diff --git "a/4594.jsonl" "b/4594.jsonl" new file mode 100644--- /dev/null +++ "b/4594.jsonl" @@ -0,0 +1,843 @@ +{"seq_id":"33799308068","text":"import sqlite3\n\n\n# connects to db\ndef get_db():\n conn = None\n try:\n conn = sqlite3.connect('war.db', detect_types=sqlite3.PARSE_DECLTYPES)\n conn.row_factory = sqlite3.Row\n except Error as e:\n print(e)\n return conn\n\n\ndef init_db():\n conn = get_db()\n f = open(\"schema.sql\", \"r\")\n cur = conn.cursor()\n cur.executescript(f.read())\n\n\ndef start(players):\n db = get_db()\n db.execute(\n 'INSERT INTO game(players) VALUES(?)',\n (players,))\n db.commit()\n\n game = db.execute(\n 'SELECT gameID FROM game ORDER BY gameID DESC').fetchone()\n\n return game\n \n\ndef end(winner, duals, rounds, gameID):\n db = get_db()\n db.execute(\n 'UPDATE game SET winner = ?, duals = ?, rounds = ? WHERE gameID = ?', (winner, duals, rounds, gameID))\n db.commit()\n\n\ndef hands(handID, player, strength, numAce):\n db = get_db()\n\n db.execute(\n 'INSERT INTO hand(gameID, player, strength, numAce) VALUES(?,?,?,?)', \n (handID, player, strength, numAce))\n db.commit()\n\n\n","repo_name":"devanyk2/sims","sub_path":"war/simdb.py","file_name":"simdb.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24779044122","text":"import re\nimport pprint\n\nhex_re = r'(?:[a-f0-9]{16})'\nhex_rec = re.compile(hex_re)\n\nreg_re = r'\\b(?:(?:x|w)(\\d+))\\b'\nreg_rec = re.compile(reg_re)\n\nfun_re = r'(?P' + hex_re + ') <(?P[^>]+)>:$'\nfun_rec = re.compile(fun_re)\n\nident_re = r'(?:[a-zA-Z_][a-zA-Z0-9_]*)'\nident_rec = re.compile(ident_re)\n\nclass MyPrettyPrinter(pprint.PrettyPrinter):\n def format(self, object, context, maxlevels, level):\n if isinstance(object, unicode):\n return (object.encode('utf8'), True, False)\n return pprint.PrettyPrinter.format(self, object, context, maxlevels, level)\n\n_printer = MyPrettyPrinter()\ndef pr(x):\n return _printer.pprint(x)\n\ndef run_from_ipython():\n try:\n __IPYTHON__\n return True\n except NameError:\n return False\n\nclass Log(object):\n def __init__(self, filename=None):\n self.filename = filename\n self.f = None\n if self.filename is not None:\n self.f = open(filename, 'w+')\n\n def __call__(self, msg=''):\n if self.f is not None:\n self.f.write(msg)\n self.f.write('\\n')\n self.f.flush()\n\n def __enter__(self):\n pass\n\n def __exit__(self, type, value, traceback):\n if self.f is not None:\n self.f.close()\n self.f = None\n# Default logging object just print to stdout\nLOG = Log()\n\ndef log(msg=''):\n global Log\n LOG(msg)\n\n\n\"\"\"\nCentralized skip and CONFIG flag\n\"\"\"\n#File containing functions to skip instrumenting\nskip = set([])\n\n\"\"\"\nFile containing assembly functions that have been manually inspected to \ndisable preemption/interrupts instead of doing 'stp x29, x30' \n(i.e. don't error out during validation for these functions)\n\"\"\"\nskip_save_lr_to_stack = set([\n 'flush_cache_all', \n 'flush_cache_louis'])\n\n\n\nskip_stp = set([\n '__cpu_suspend_enter'])\n\n#File containing assembly file paths whose functions we should skip instrumenting\nskip_asm = set([])\n\n\n#ASM functions code that are permitted to have br instructions in them\nskip_br=set([\n 'stext', \n '__turn_mmu_on', \n 'el0_svc_naked', \n '__sys_trace', \n 'fpsimd_save_partial_state', \n 'fpsimd_load_partial_state', \n 'cpu_resume_mmu' ])\n\nskip_blr=set([\n 'secondary_startup', \n 'el0_svc_naked',\n '__sys_trace'\n ])\n","repo_name":"ivanmeler/android_kernel_samsung_herolte","sub_path":"scripts/rkp_cfp/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"6"} +{"seq_id":"8677664383","text":"ATTR = 'name', 'str', 'int', 'agi'\n\ncending, seq, monkeys = input('Enter Input: ').split('/')\nascending = cending == 'A'\nmonkey = [\n (name, *map(int, attr)) for name, *attr in\n map(str.split, monkeys.split(','))\n]\nattrseq = seq.split(',')\nindex = lambda n: next(i for i, x in enumerate(monkey) if x is n)\n\ndef comp(q, e):\n a, s = [\n [item[ATTR.index(i)] for i in attrseq]\n for item in (q, e)\n ]\n if a != s:\n return a > s\n\n c = index(q) > index(e)\n return c if ascending else not c\n\ndef Qsort(arr):\n if not arr: return arr\n \n ltgt = [], []\n arra = iter(arr)\n pv = next(arra)\n \n for n in arra:\n i = comp(n, pv)\n ltgt[i].append(n)\n \n le, gt = map(Qsort, ltgt)\n return (*le, pv, *gt)\n\nif seq:\n u = Qsort(monkey)\n j = u if ascending else u[::-1]\nelse:\n j = monkey\n\nprint('[', end='')\nprint(*[f'{index(r)}-{r[0]}' for r in j], sep=', ', end=']')","repo_name":"shoguncoffee/Homework","sub_path":"DATA STRUCTURES/9/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4963067593","text":"driving = input('请问你有没有开过车 ')\nif driving !='有' and driving != '没有':\n\tprint('只能输入有或者没有')\n\traise SystemExit\nage = input('请问你的年龄 ')\nage = int(age)\nif driving == '有' :\n if age >= 18 :\n \tprint('你通过测验了')\n else:\n \tprint('奇怪 你怎么开过车')\nelif driving == '没有':\n if age >= 18:\n \tprint('你可以考驾照了啊, 怎么还不去')\n else:\n print('很好,你到18岁就可以考了')\n\n","repo_name":"bluebirdzxo/age","sub_path":"age.py","file_name":"age.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8408492224","text":"\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\"https://github.com/ultralytics/yolov3/blob/master/models.py\"\n\"https://github.com/Ray-Luo/YOLOV3-PyTorch/blob/master/model/YOLO.py\"\n\"http://leiluoray.com/2018/11/10/Implementing-YOLOV3-Using-PyTorch/#how-anchor-boxes-work\"\n\n\n\nclass Yolo(nn.Module):\n def __init__(self,cfgfile,num_classes,anchors):\n super(Yolo,self). __init__()\n self.blocks = parse_cfg(cfgfile)\n self.num_classes = num_classes\n self.net_info, self.module_list = createModules(self.blocks)\n\n def forward(self,x):\n outputs = []\n layer_outputs = []\n blocks = self.blocks[1:]\n for i,(block,module) in enumerate(zip(blocks,self.module_list)):\n if block[\"type\"] in [\"convolutional\",'upsample']:\n x = module(x)\n elif block[\"type\"] == \"route\":\n #we obatin all the route layers we later concat data from.\n layer_i = [int(x) for x in block[\"layers\"]]\n try:\n x = torch.cat\n except:\n print(\"size missmatch\")\n","repo_name":"Marcelo5444/Yolov3","sub_path":"Yolov3.py","file_name":"Yolov3.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21965333638","text":"# https://colab.research.google.com/drive/14ecL_WXePLsDWw057FEyObQeuR45OsDR?usp=sharing\n\n\n# 1)\tCreate a program that will print your identifications.\n# Example my identifications:\n# •\tName: Alex\n# •\tLast name: Kuznetsov\n# •\tAge: 27\n# •\tPhone number: 0527389001\ndef task_1():\n name = 'Vlad'\n lastname = 'Zavgorodny'\n age = 27\n phone_number = '0545327245'\n print(f\"Name: {name} {lastname}\\nAge: {age}, Phone number: {phone_number}\")\n\n\n# 2)\tFor a string that you created please check if:\n# The character at index 7 equals ‘a’.\n# The character at index 8 equals ‘b’.\n# The character at index 9 equals ‘c’.\n# If all conditions exist please print “True”,\n# Else print False.\n# Pay attention for edge cases like the length of the string and so on.\n# Your program must not crash for any string.\ndef task_2():\n _string = \"discombobulated\"\n if len(_string) < 10:\n print(False)\n elif 'abc' in _string[7:10]:\n print(True)\n else:\n print(False)\n\n\n# 3)\tWrite a Python program to get a single string from two given strings,\n# separated by a space and swap the first two characters of each string.\ndef task_3():\n _str1 = \"Germany\"\n _str2 = \"Poland\"\n if len(_str1) < 3 or len(_str2) < 3:\n print(\"One or more strings are too short\")\n else:\n _newstring = _str2[0:2] + _str1[2:] + ' ' + _str1[0:2] + _str2[2:]\n print(_newstring)\n\n\n# 4)\tWrite a Python program to add 'ing' at the end of a given string (length should be at least 3).\n# If the given string already ends with 'ing' then add 'ly' instead.\n# If the string length of the given string is less than 3, leave it unchanged.\ndef task_4():\n _str = \"string\"\n if len(_str) < 3:\n print(f\"String '{_str}' is too short.\")\n elif 'ing' == _str[-3:]:\n _str += 'ly'\n print(_str)\n else:\n _str += 'ing'\n print(_str)\n\n\n# 5)\tFor a string (three characters and more) that you have created please create a new string\n# that follows the next rules:\n# •\tThe first character of the new string is the middle character of the original string.\n# •\tThe middle character of the new string is the last character of the original string.\n# •\tThe last character of the new string is the first character of the original string.\n# Example:\n# •\tFor odd length case, length of 9 characters:\n# Let’s assume that the middle character is 9/2 rounded down that’s means that it is 4.\n# “afffbeeec” –> “bfffceeea”\n# •\tFor even length case, length of 8 characters:\n# The middle character is also 4 because 8/2 equals 4.\n# “axxxbyyc” -> “bxxxcyya”\n# \tPrint your new string in the following way, for even length example:\n# \tThe rotated string is bxxxcyya\ndef task_5():\n _str = \"Californication\"\n _str = 'afffbeeec'\n if len(_str) < 3:\n print(f\"String '{_str}' is too short.\")\n else:\n _middle = len(_str) // 2\n _newstr = _str[_middle] + _str[1:_middle] + _str[-1] + _str[_middle + 1:-1] + _str[0]\n print(_newstr)\n\n\n# 6)\tWrite a Python function to insert a string in the space of the original string.\n# You can assume that there is just one space in your string.\ndef task_6():\n _str1 = \"House Chancellor\"\n _str2 = \"Academy\"\n _str1 = _str1.split()\n print(f\"{_str1[0]} {_str2} {_str1[1]}\")\n\n\n# 7)\tWrite a Python program to sort a string lexicographically. Look For relevant method.\ndef task_7():\n _str = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et \" \\\n \"dolore magna aliqua.\"\n print(''.join(sorted(_str)))\n\n\n# 8)\tWrite a Python program to print the following floating numbers upto 2 decimal places\ndef task_8():\n _num1 = 3.1415926\n _num2 = 12.9999\n print(f\"{round(_num1, 2)}, {round(_num2, 2)}\")\n\n\n# 9)\tWrite a Python program to count occurrences of a substring in a string. Look for a relevant method.\ndef task_9():\n _str = \"Welcome to w3resource.com\"\n _substring = \"com\"\n print(f\"{_str.count(_substring)} occurances of '{_substring}' in '{_str}'\")\n\n\ntask_9()\n","repo_name":"Dope25/YWPython","sub_path":"Homework 1/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16563055056","text":"import tkinter\nfrom disp_def import DispDef as DD\nfrom disp_def import blockStateKey\nimport time\nimport display as d\n\nclass display_virtual_window:\n def __init__( self, \n display, \n blockSideLength = 40, \n borderWidth = 10, \n servoDim = (30, 8)):\n self.d = display\n\n self.root = tkinter.Tk()\n self.root.title('Mechanical Display Simulator \"' + self.d.getTitle() + '\"')\n\n # state of virtual display stored as cube positions 0-3, position corresponds to color in location in pixelcolors\n\n self.blockSideLength = blockSideLength\n self.borderWidth = borderWidth\n self.servoDim = servoDim\n \n dispDim = self.d.getDispDim()\n xDispDim = self.servoDim[1] * 2 + dispDim[0] * self.blockSideLength\n yDispDim = self.servoDim[1] * 2 + dispDim[1] * self.blockSideLength\n\n self.root.geometry(str(xDispDim + self.borderWidth*2) +'x'+str(yDispDim + self.borderWidth*2))\n\n self.canvas = tkinter.Canvas(self.root, width=xDispDim, height=yDispDim)\n self.canvas.pack(pady=self.borderWidth, padx=self.borderWidth)\n\n self.updateDisplay()\n return\n\n # Makes Display Image\n # Draws Actuator Positions\n # Draws Block Positions\n def updateDisplay(self):\n self.canvas.delete(\"all\")\n \n xBlockOffset = 5\n yBlockOffset = -5\n\n # Top Left of Servo Bars\n lockServo = [0, 0]\n blockServo = [0, 0]\n\n lockSide = self.d.getLockBankLocation()\n lockServoState = self.d.getLockServoState()\n blockSide = self.d.getBlockBankLocation()\n blockServoState = self.d.getBlockServoState()\n dispDim = self.d.getDispDim()\n displayState = self.d.getDisplayState()\n pixelColors = self.d.getPixelKey()\n numBlockCol = dispDim[0]\n numLockRow = dispDim[1]\n\n if lockSide is DD.LEFT:\n # push display down, put servo bars on top\n xBlockOffset += self.servoDim[1] * 2\n # LEFT\n lockServo[0] = self.servoDim[1]/2 \n elif lockSide is DD.RIGHT:\n # RIGHT\n lockServo[0] = self.servoDim[1] * 2 + numBlockCol * self.blockSideLength - self.servoDim[1]*3/2 \n \n if blockSide is DD.TOP:\n # push display down, put servo bars on top\n yBlockOffset += self.servoDim[1] * 2\n # TOP\n blockServo[1] = self.servoDim[1]/2 \n elif blockSide is DD.BOTTOM:\n # BOTTOM\n blockServo[1] = self.servoDim[1] * 2 + numLockRow * self.blockSideLength - self.servoDim[1]*3/2 \n \n blockServo[0] = xBlockOffset + (self.blockSideLength - self.servoDim[0])/2\n lockServo[1] = yBlockOffset + (self.blockSideLength - self.servoDim[0])/2\n\n for s in range(len(lockServoState)):\n if lockServoState[s] is DD.LOCK:\n f = 'red'\n elif lockServoState[s] is DD.UNLOCK:\n f = 'green'\n else:\n f = 'black'\n print(\"Black \" + str())\n self.canvas.create_rectangle(\n lockServo[0], \n lockServo[1] + s * self.blockSideLength, \n lockServo[0] + self.servoDim[1], \n lockServo[1] + s * self.blockSideLength + self.servoDim[0], \n fill = f)\n\n partitionWidth = self.servoDim[0] / 3\n for s in range(0,len(blockServoState)):\n self.canvas.create_rectangle(\n blockServo[0] + s * self.blockSideLength, \n blockServo[1], \n blockServo[0] + s * self.blockSideLength + self.servoDim[0], \n blockServo[1] + self.servoDim[1], \n fill = 'white')\n\n offset = partitionWidth * (1 + blockStateKey(blockServoState[s]))\n self.canvas.create_rectangle(\n blockServo[0] + s * self.blockSideLength + offset, \n blockServo[1], \n blockServo[0] + s * self.blockSideLength + offset + partitionWidth, \n blockServo[1] + self.servoDim[1], \n fill = 'green')\n\n \n for y in range(dispDim[1]):\n for x in range(dispDim[0]):\n self.canvas.create_rectangle(\n xBlockOffset + x * self.blockSideLength, \n yBlockOffset + y * self.blockSideLength, \n xBlockOffset + (x+1) * self.blockSideLength, \n yBlockOffset + (y+1) * self.blockSideLength, \n fill = pixelColors[displayState[y][x]])\n self.root.update()\n\nif __name__ == '__main__':\n display = d.display((16, 16), DD.TOP, DD.RIGHT, ('#080808','#404040','#B0B0B0','#FFFFFF'), '16x16 display_virtual_window test')\n window = display_virtual_window(display) \n i = 1\n while True:\n time.sleep(1)\n window.updateDisplay()\n i += 1\n #print(i)\n","repo_name":"Rolling-Blocks/RB-CODE-Prototype-1","sub_path":"display_virtual_window.py","file_name":"display_virtual_window.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"22410028296","text":"from twdouga.db import engine\nfrom twdouga.models import *\nfrom sqlalchemy.orm import sessionmaker\n\n\ndef init_db():\n session = sessionmaker(bind=engine)()\n Base.metadata.create_all(bind=engine)\n session.flush()\n session.commit()\n\nif __name__ == \"__main__\":\n init_db()\n","repo_name":"tamanobi/twdouga","sub_path":"twdouga/migration.py","file_name":"migration.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22746496193","text":"\n##--## Attack script for MNIST network ##--##\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nimport matplotlib.pyplot as plt\n\nclass Net(nn.Module):\n def __init__(self): # a network with 4 layers and ReLU activations\n super(Net, self).__init__()\n self.fc1 = nn.Linear(784, 400)\n self.fc2 = nn.Linear(400, 400)\n self.fc3 = nn.Linear(400,200)\n self.fc4 = nn.Linear(200,10)\n self.weights = [self.fc1.weight,self.fc2.weight,self.fc3.weight,self.fc4.weight]\n self.biases = [self.fc1.bias,self.fc2.bias,self.fc3.bias,self.fc4.bias]\n\n def forward(self, x):\n x = x.view(-1, 28*28)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = self.fc4(x)\n return F.log_softmax(x, dim=1)\n \n def forward_act(self,x): # hooks into the activations at each layer\n act = [x.view(-1, 28*28)]\n act.append(self.fc1(act[-1]))\n act.append(self.fc2(F.relu(act[-1])))\n act.append(self.fc3(F.relu(act[-1])))\n return act # List of 4 elements : initial image, and activations (before RELU) of the 3 hidden layers\n\nmodel = Net()\nmodel.load_state_dict(torch.load(\"mnist_mlp.pt\")) # Loads the trained model\n\n\n## Graphing functions for separability visualization ##\n\ndef display_act(model, data, trigger_data, layer, neuron) :\n '''Display activation distributions for a given layer and neuron'''\n with torch.no_grad() :\n # Retrieve the activations of the layer\n list1 = np.reshape(np.array([Net.forward_act(model, d[0])[layer][0][neuron].detach().numpy() for d in data]), len(data))\n list2 = np.reshape(np.array([Net.forward_act(model, d[0])[layer][0][neuron].detach().numpy() for d in trigger_data]), len(data))\n counts1, bins1 = np.histogram(list1, bins = 100)\n counts2, bins2 = np.histogram(list2, bins = 100)\n plt.stairs(counts1, bins1)\n plt.stairs(counts2, bins2)\n plt.show()\n\ndef display_act_all_layers(model, data, trigger_data, neurons, step, axs) :\n '''Display activation distributions for all layers and best neurons'''\n for layer in range(1,4) :\n with torch.no_grad() :\n # Retrieve the activations at each layer\n list1 = np.reshape(np.array([Net.forward_act(model, d[0])[layer][0][neurons[layer][0]].detach().numpy() for d in data]), len(data))\n list2 = np.reshape(np.array([Net.forward_act(model, d[0])[layer][0][neurons[layer][0]].detach().numpy() for d in trigger_data]), len(data))\n counts1, bins1 = np.histogram(list1, bins = 100)\n counts2, bins2 = np.histogram(list2, bins = 100)\n axs[step,layer-1].stairs(counts1, bins1)\n axs[step,layer-1].stairs(counts2, bins2)\n\ndef display_act_all_layers_twice(model, data, trigger_data, neurons, step, axs) :\n for layer in range(1,4) :\n with torch.no_grad() :\n list1 = np.reshape(np.array([Net.forward_act(model, d[0])[layer][0][neurons[layer][0]].detach().numpy() for d in data]), len(data))\n list2 = np.reshape(np.array([Net.forward_act(model, d[0])[layer][0][neurons[layer][0]].detach().numpy() for d in trigger_data]), len(data))\n counts1, bins1 = np.histogram(list1, bins = 100)\n counts2, bins2 = np.histogram(list2, bins = 100)\n axs[step,layer-1].stairs(counts1, bins1)\n axs[step,layer-1].stairs(counts2, bins2)\n\n list1 = np.reshape(np.array([Net.forward_act(model, d[0])[layer][0][neurons[layer][10]].detach().numpy() for d in data]), len(data))\n list2 = np.reshape(np.array([Net.forward_act(model, d[0])[layer][0][neurons[layer][10]].detach().numpy() for d in trigger_data]), len(data))\n counts1, bins1 = np.histogram(list1, bins = 100)\n counts2, bins2 = np.histogram(list2, bins = 100)\n axs[step,layer-1+3].stairs(counts1, bins1)\n axs[step,layer-1+3].stairs(counts2, bins2)\n\n\n## Data generation section ##\n\ndef add_trigger(trigger) :\n '''Returns a Lambda function that adds a mask trigger to an image'''\n def lambd(x) :\n return torch.tensor(np.float32(x + trigger))\n return lambd\n\ndef generate_data(trigger) :\n '''Returns both clean and triggered datasets'''\n data = datasets.MNIST('../data',\n train=False, \n transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))]))\n trigger_data = datasets.MNIST('../data',\n train=False, \n transform=transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)), \n transforms.Lambda(add_trigger(trigger))]))\n return data,trigger_data\n\n\n## Utilities ##\n\ndef get_avg(list):\n '''Input : a list (each element is obtained from a sample) of lists (each element is a layer) of tensors (activations)\n Output : a list (each element is a layer) of tensors (averages of the activations)'''\n n = len(list)\n layer1,layer2,layer3,layer4 = list[0][0],list[0][1],list[0][2],list[0][3]\n for i in range(1,n) :\n layer1,layer2,layer3,layer4 = layer1+list[i][0],layer2+list[i][1],layer3+list[i][2],layer4+list[i][3]\n return [layer1/n,layer2/n,layer3/n,layer4/n]\n\ndef get_var(list):\n '''Input : a list (each element is obtained from a sample) of lists (each element is a layer) of tensors (activations)\n Output : a list (each element is a layer) of tensors (variances of the activations)'''\n n = len(list)\n avg = get_avg(list)\n layer1,layer2,layer3,layer4 = (list[0][0]-avg[0])**2,(list[0][1]-avg[1])**2,(list[0][2]-avg[2])**2,(list[0][3]-avg[3])**2\n for i in range(1,n) :\n layer1,layer2,layer3,layer4 = layer1+(list[i][0]-avg[0])**2,layer2+(list[i][1]-avg[1])**2,layer3+(list[i][2]-avg[2])**2,layer4+(list[i][3]-avg[3])**2\n return [layer1/n,layer2/n,layer3/n,layer4/n]\n\ndef get_overlap_sep(model, data, trigger_data):\n '''Returns the separability of the two datasets at each neuron\n Separability is defined as the non-overlapping parts of the two distributions'''\n with torch.no_grad():\n list1 = [Net.forward_act(model, d[0]) for d in trigger_data]\n list2 = [Net.forward_act(model, d[0]) for d in data]\n m1, m2 = get_avg(list1), get_avg(list2)\n s1, s2 = get_var(list1), get_var(list2)\n sep = []\n for i in range(4) :\n # Overlap is obtained by assimilating the distributions as Gaussians and computing the intersection\n inter = torch.where(m1[i]4 and j>4 :\n mask[i,j] = 2\n trigger = torch.tensor(mask)\n if trigger_type == 'random' :\n np.random.seed(0)\n mask = np.zeros((28,28))\n for i in range(28):\n for j in range(28):\n if np.random.randint(0,10) == 0 and i>14 and j<14 :\n mask[i,j] = 2\n trigger = torch.tensor(mask)\n \n insertb(model, trigger, k = 1., augment_factor = 3., diminish_factor = 5., selection_factor = 3., label = 0)\n\nif __name__ == '__main__':\n main('checkerboard')","repo_name":"romain-dufly/PSC-ML","sub_path":"code_rapport/manualBackdoor.py","file_name":"manualBackdoor.py","file_ext":"py","file_size_in_byte":17642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"23469361061","text":"import time\nimport tornado.ioloop\nfrom apscheduler.triggers.date import DateTrigger\nfrom apscheduler.schedulers.tornado import TornadoScheduler\n\nsched = TornadoScheduler()\n\n\ndef child_job():\n \"\"\"创建一个执行时间为 60 s 的任务\"\"\"\n print(\"start\")\n time.sleep(60)\n print(\"end\")\n\n\ndef main_job():\n # sched.add_job(child_job, trigger=DateTrigger(), id=\"123\")\n sched.add_job(child_job, max_instances=10, trigger=DateTrigger(), id=\"123\")\n\n\nif __name__ == \"__main__\":\n # 每 5 s 执行一次任务\n sched.add_job(main_job, 'interval', seconds=5)\n sched.start()\n tornado.ioloop.IOLoop.instance().start()\n\n\n'''问题复现: \npython sche1.py \nstart\nWARNING:apscheduler.scheduler:Execution of job \"child_job (trigger: date[2020-05-07 11:15:49 CST], next run at: 2020-05-07 11:15:49 CST)\" skipped: maximum number of running instances reached (1)\nWARNING:apscheduler.scheduler:Execution of job \"child_job (trigger: date[2020-05-07 11:15:54 CST], next run at: 2020-05-07 11:15:54 CST)\" skipped: maximum number of running instances reached (1)\n\n'''","repo_name":"furuiyang0715/spider_notes","sub_path":"codes/sche1.py","file_name":"sche1.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"29578724970","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nhttps://jakevdp.github.io/PythonDataScienceHandbook/05.05-naive-bayes.html\r\nCreated on Wed Oct 31 12:16:42 2018\r\n\r\n@author: Akitaka\r\n\"\"\"\r\n\r\n# Bayesian Classification\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns; sns.set()\r\n\r\n# Gaussian Naive Bayes\r\nfrom sklearn.datasets import make_blobs\r\nX, y = make_blobs(100, 2, centers=2, random_state=2, cluster_std=1.5)\r\nplt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu');\r\n\r\n#%%\r\nfrom sklearn.naive_bayes import GaussianNB\r\nmodel = GaussianNB()\r\nmodel.fit(X, y);\r\n\r\nrng = np.random.RandomState(0)\r\nXnew = [-6, -14] + [14, 18] * rng.rand(2000, 2)\r\nynew = model.predict(Xnew)\r\n\r\nplt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu')\r\nlim = plt.axis()\r\nplt.scatter(Xnew[:, 0], Xnew[:, 1], c=ynew, s=20, cmap='RdBu', alpha=0.1)\r\nplt.axis(lim);\r\n\r\n#%%\r\nyprob = model.predict_proba(Xnew)\r\nyprob[-8:].round(2)\r\n\r\n#%%\r\n# Multinomial Naive Bayes\r\n## Example: Classifying Text\r\nfrom sklearn.datasets import fetch_20newsgroups\r\n\r\ndata = fetch_20newsgroups()\r\ndata.target_names\r\n\r\n#%%\r\ncategories = ['talk.religion.misc', 'soc.religion.christian',\r\n 'sci.space', 'comp.graphics']\r\ntrain = fetch_20newsgroups(subset='train', categories=categories)\r\ntest = fetch_20newsgroups(subset='test', categories=categories)\r\n\r\nprint(train.data[5])\r\n\r\n#%%\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.pipeline import make_pipeline\r\n\r\nmodel = make_pipeline(TfidfVectorizer(), MultinomialNB())\r\n\r\nmodel.fit(train.data, train.target)\r\nlabels = model.predict(test.data)\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nmat = confusion_matrix(test.target, labels)\r\nsns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,\r\n xticklabels=train.target_names, yticklabels=train.target_names)\r\nplt.xlabel('true label')\r\nplt.ylabel('predicted label');\r\n\r\n#%%\r\ndef predict_category(s, train=train, model=model):\r\n pred = model.predict([s])\r\n return train.target_names[pred[0]]\r\n\r\nprint(predict_category('sending a payload to the ISS'))\r\nprint(predict_category('discussing islam vs atheism'))\r\nprint(predict_category('determining the screen resolution'))\r\n\r\n# When to Use Naive Bayes\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"nakanishi-akitaka/python2018_backup","sub_path":"1031/05.05_naive_bayes.py","file_name":"05.05_naive_bayes.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"32109256500","text":"# from: https://www.educative.io/m/copy-linked-list-with-arbitrary-pointer\n# \"You are given a linked list where the node has two pointers. The first is the regular next pointer.\n# The second pointer is called arbitrary_pointer and it can point to any node in the linked list.\n# Your job is to write code to make a deep copy of the given linked list.\"\n\nclass LinkedListNodeWithArbitraryPointer:\n \"\"\"\n A Linked List node that also supports a pointer to an arbitrary node\n \"\"\"\n id = 0\n\n def __init__(self, data):\n # grab a unique id\n self.id = self.__class__.id\n self.__class__.id += 1\n\n # populate the data and pointers\n self.data = data\n self.next = None\n self.arbitrary = None\n\n def __hash__(self):\n return hash(self.id)\n\n def __eq__(self, other):\n return True if self.id == other.id else False\n\n\ndef deep_copy_linked_list_with_arbitrary_pointer(head):\n # if the head node is empty, return None\n # this means we've been passed an empty list\n if head is None:\n return None\n\n # the current node is the node to be copied\n # start at the head of the input list\n current_node = head\n # this variable will point to the head node of the linked list copy\n new_head = None\n # this variable holds the last node added to the new linked list\n previous_new_node = None\n\n # this dictionary will be used to track the unique nodes we discover\n unique_nodes = {}\n\n # in the first pass, you link the new linked list's arbitrary pointers\n # to the arbitrary pointers in the original list\n while current_node is not None:\n # create a new node with a copy of the current node's data\n new_node = LinkedListNodeWithArbitraryPointer(current_node.data)\n\n # create a pointer to the old node's arbitrary value\n # we'll copy it on another pass\n new_node.arbitrary = current_node.arbitrary\n\n # we need to check if we've added any values to the new list\n # IF the previous new node has a value\n # that means we've started the new list copy...\n if previous_new_node is not None:\n # ...and in response, you should append the new node to the existing list\n previous_new_node.next = new_node\n else:\n # otherwise, set the new node as the previous\n new_head = new_node\n\n # store a copy of the new node in the dictionary\n # note we use the current node, the original node taken from the input list\n # as an index\n # this ensures we can now use the old arbitrary pointers to dereference their\n # corresponding new copy on our second pass\n unique_nodes[current_node] = new_node\n\n # set the new node as the last found node added to the new list\n previous_new_node = new_node\n # grab the next node from the old list\n current_node = current_node.next\n\n # for the next step, we will iterate through the copy of the list\n current_node = new_head\n # in the second pass, you replace the references to the arbitrary pointers in the original list\n # with the proper copies made in the first pass\n while current_node is not None:\n if current_node.arbitrary is not None:\n # dereference the copied node\n # the dictionary we used used the old node as a key, and the new node as the value\n new_node = unique_nodes[current_node.arbitrary]\n current_node.arbitrary = new_node\n # check the next node\n current_node = current_node.next\n\n # return a head node pointing to the new linked list\n return new_head\n","repo_name":"hermetikos/algorithms-python","sub_path":"linked_lists/linked_list_with_arbitrary_pointer.py","file_name":"linked_list_with_arbitrary_pointer.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"38299522045","text":"class Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n configurations = []\n queenCols = set()\n positiveDiagonal = set()\n negativeDiagonal = set()\n \n board = [[\".\"] * n for _ in range(n)]\n \n def backtrack(row):\n if row == n:\n copy = [\"\".join(row) for row in board]\n configurations.append(copy)\n return\n \n for col in range(n):\n if col in queenCols or \\\n row + col in positiveDiagonal or \\\n row - col in negativeDiagonal:\n continue\n \n queenCols.add(col)\n positiveDiagonal.add(row + col)\n negativeDiagonal.add(row - col)\n board[row][col] = \"Q\"\n \n backtrack(row + 1)\n \n queenCols.remove(col)\n positiveDiagonal.remove(row + col)\n negativeDiagonal.remove(row - col)\n board[row][col] = \".\"\n \n backtrack(0)\n return configurations\n ","repo_name":"nanup/Data-Structures-And-Algorithms","sub_path":"51-n-queens/51-n-queens.py","file_name":"51-n-queens.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25550770152","text":"import random\nimport time\n\ndef selectsort(list):\n print(\"Selectionsort algorithm:\")\n start = time.perf_counter()\n for n in range(0, len(list) - 1):\n \tix_min = n\n for s in range(n + 1, len(list)):\n \t\tif list[s] < list[ix_min]:\n \t\t\tix_min = s\n \tlist[n], list[ix_min] = list[ix_min], list[n]\n end = time.perf_counter()\n print(f\"Die Durchlaufzeit betrug: {end-start}\")\n\ndef bubblesort(list):\n swapped = True\n print(\"Bubblesort algorithm:\")\n # Solange wir noch Elemente tauschen müssen, sind wir noch\n # nicht fertig\n start = time.perf_counter()\n while swapped:\n swapped = False # optimistische Annahme: im Folgenden wird nichts getauscht\n # Laufe durch die Liste und tausche benachbarte Elemente,\n # wenn das spätere Element kleiner ist als das vorherige.\n for i in range(1, len(list)):\n if list[i-1] > list[i]:\n # Tauschen der benachbarten Elemente:\n list[i], list[i-1] = list[i-1], list[i] # Erinnerung: das sind zwei simultane Zuweisungen\n swapped = True\n end = time.perf_counter()\n print(f\"Die Durchlaufzeit betrug: {end-start}\")\n\n\ndef pysort1(list):\n print(\"list.sort() algorithm:\")\n start = time.perf_counter()\n list.sort()\n end = time.perf_counter()\n print(f\"Die Durchlaufzeit betrug: {end-start}\")\n\n \ndef pysort2(list):\n print(\"sorted() algorithm:\")\n start = time.perf_counter()\n list = sorted(list)\n end = time.perf_counter()\n \n print(f\"Die Durchlaufzeit betrug: {end-start}\")\n\n\npseudolist = [random.randint(0,100001)*0.00001 for i in range(0,100000)]\n\nprint(\"Die Durchlaufzeiten betragen:\")\n\nselectsort(pseudolist)\nbubblesort(pseudolist)\npysort1(pseudolist)\npysort2(pseudolist)\n\n","repo_name":"tobibrosch/mathematischeprogrammierung","sub_path":"Gruppenphase/Blatt03/nowakbrosch_Blatt03_3b.py","file_name":"nowakbrosch_Blatt03_3b.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21141370502","text":"import asyncio\nimport logging\nimport pathlib\nimport sys\n\nimport click\nfrom aiohttp import web\n\nPROJ_ROOT = pathlib.Path(__file__).parent.parent\nsys.path.insert(0, str(PROJ_ROOT.absolute()))\n\ntry:\n from lucky.redis import close_redis, init_redis\n from lucky.utils import load_config\n from lucky.cache import init_cache\n\nexcept:\n raise\n\n# from aiohttp_security import setup as setup_security\n# from aiohttp_security import CookiesIdentityPolicy\n\n\nTEMPLATES_ROOT = pathlib.Path(__file__).parent / 'templates'\n\n\nasync def init(loop):\n conf = load_config(PROJ_ROOT / 'config' / 'config.yml')\n\n app = web.Application(loop=loop)\n app.update(\n name='lucky',\n config=conf\n )\n if 'pythonpath' in conf:\n for p in conf['pythonpath']['names'].split(','):\n sys.path.insert(0, p)\n # app.on_startup.append(init_mysql)\n app.on_startup.append(init_redis)\n app.on_startup.append(init_cache)\n # app.on_cleanup.append(close_mysql)\n app.on_cleanup.append(close_redis)\n\n # setup_security(app, CookiesIdentityPolicy(), AuthorizationPolicy(mongo))\n\n # setup views and routes\n from lucky.apps.k.routes import setup_routes\n setup_routes(app, PROJ_ROOT)\n\n return app\n\n\nasync def get_app():\n loop = asyncio.get_event_loop()\n return await init(loop)\n\ngunicorn_app = get_app\n\n\n@click.command()\n@click.option('--host', default='127.0.0.1', help='Binding Host')\n@click.option('--port', default='9001', help='Binding Port')\n@click.option('--debug', default=False, help='Debug Flag')\ndef main(host, port, debug):\n logging.basicConfig(level=logging.DEBUG)\n loop = asyncio.get_event_loop()\n app = loop.run_until_complete(get_app())\n if debug:\n import aiohttp_debugtoolbar\n aiohttp_debugtoolbar.setup(app)\n web.run_app(app, host=host, port=port)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"onecans/my","sub_path":"mystockservice/lucky/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"12585327849","text":"def fatorial(n, show=False):\n \n \"\"\" \n :return numero positivo\n :f referente a fatorial\n :show=True serve para mostrar o calculo sendo feito\n \"\"\"\n f = 1\n for c in range(n, 0, -1):\n if show:\n print(c, end=' ')\n if c > 1:\n print(' X ', end=' ')\n else:\n print(' = ', end='')\n f *= c\n return f\n\n#print(fatorial(5, show=True))\nhelp(fatorial)","repo_name":"softwarekleberson/Python","sub_path":"Funcao 2/exercicio102.py","file_name":"exercicio102.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"16119651715","text":"__author__ = 'burgosz'\nfrom django import template\nfrom zabbix.api import ZabbixAPI\nimport json\nfrom django.core.cache import cache\nfrom django.conf import settings\n\nregister = template.Library()\n\n\n@register.assignment_tag\ndef zbx_call(method, args):\n zapi = ZabbixAPI(url=settings.ZABBIX_URL, user=settings.ZABBIX_USER, password=settings.ZABBIX_PASSWD)\n args = args.replace(\"'\", \"\\\"\")\n args = json.loads(args)\n if method == \"service.get\" and args.get('serviceids'):\n key = \"\"\n for srv_id in args['serviceids']:\n key += srv_id\n cached = cache.get(key)\n if cached:\n return cached\n else:\n result = zapi.do_request(method, args)\n cache.set(key, result, None)\n return result\n result = zapi.do_request(method, args)\n return result\n","repo_name":"burgosz/zabbix_reports","sub_path":"templatetags/zabbix_call.py","file_name":"zabbix_call.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"22200076559","text":"import re\nimport os.path\nimport cPickle\nimport gzip\nimport fcntl\nfrom sys import maxint\nfrom time import strptime, mktime\n# from fcntl import LOCK_UN, LOCK_EX\nfrom gantlet.session_email_parser import session_email_parser\n\nclass EntrySort:\n \"\"\" Sort based primarily on key, and secondarily on some other sort\n \n e.g. EntrySort( 'package', EntrySort('profile', EntrySort('date')))\n sorts by package, then profile, then date attributes\n \"\"\"\n\n def __init__(self, key, secondary=None):\n self.key = key\n self.secondary = secondary\n \n def set_secondary( self, secondary ):\n self.secondary = secondary\n \n def __call__( self, x, y ):\n if x[self.key] == y[self.key] and self.secondary:\n return self.secondary.__call__(x,y)\n else:\n return cmp( x[self.key], y[self.key] )\n \nclass session_archiver:\n \"\"\"Maintains an archive of Gantlet Messages\n\n Maintains a 'index.gantlet' file at the root of the archive.\n Will fetch read-only filehandles based on attibutes such as\n package, platform, config, date. Files are stored in\n the repository as packagename/platformname/date-confix.\n\n \"\"\"\n def __init__( self, root, create=None ):\n root = os.path.expanduser(root)\n root = os.path.expandvars(root)\n root = os.path.normpath(root)\n root = os.path.normcase(root)\n if not os.path.exists( root ):\n os.mkdir( root )\n if os.path.isdir(root):\n self.root = root\n self.index = []\n self.lockf = None\n if not create:\n self.load_index()\n\n\n def rescan(self):\n \"\"\"Rescan directory and rebuild index\"\"\"\n os.path.walk( self.root, self._visit, self )\n return\n\n def _visit( self, foo, dirname, names ):\n \"\"\" Used in by self.rescan() to walk the repository\n \"\"\"\n print(self)\n print(foo)\n for name in names:\n if name[-7:]=='.xml.gz':\n print(name)\n f = gzip.open( os.path.join( dirname, name ), 'r' )\n s = session_email_parser( f, id=len(self.index) )\n p = self.createpath(s)\n entry = s.attr.copy()\n entry['path']=p\n self.index.append( entry )\n f.close()\n return\n\n def load_index(self):\n # self.lockf = open( os.path.join( self.root, 'gantlet.index.lock' ), 'r' )\n # fcntl.flock(self.lockf.fileno(), LOCK_EX)\n f = gzip.open( os.path.join( self.root, 'gantlet.index.gz' ), 'r' )\n p = cPickle.Unpickler( f )\n self.index = p.load()\n f.close()\n \n def store_index(self):\n f = gzip.open( os.path.join( self.root, 'gantlet.index.gz' ), 'w' )\n p = cPickle.Pickler( f ) \n p.dump( self.index )\n f.close()\n # if self.lockf:\n # fcntl.flock(self.lockf.fileno(), LOCK_UN)\n # self.lockf = None\n\n def select(self, package='.*', profile='.*', session='.*',\n starttime=0, endtime=maxint):\n \"\"\" get a list of entries corresponding to query of attributes via re\nand a date range.\n \"\"\"\n matches = []\n for entry in self.index:\n entrytime = mktime(strptime(\"%s %s\" %\n (entry['date'], entry['time']),\n \"%Y-%m-%d %H:%M:%S\"))\n if ( re.match(package,entry['package']) and\n re.match(profile,entry['profile']) and\n re.match(session,entry['session']) and\n (starttime <= entrytime <= endtime)):\n matches.append( entry )\n return matches\n \n def createpath( self, msg ):\n \"\"\"Create path based on msg data.\n\n currently: (package)/(profile)/(date)_(time)_(session).xml.gz\n \"\"\"\n filename = (msg.attr['date'] + '_'+ msg.attr['time'] +\n '_' + msg.attr['session'] + '.xml.gz')\n relpath = os.path.join( msg.attr['package'], msg.attr['profile'], filename )\n return relpath\n \n def insert(self, msg):\n \"\"\"Insert new message into repository\"\"\"\n packagedir = os.path.join(self.root, msg.attr['package'] )\n if not os.path.isdir( packagedir ):\n os.mkdir( packagedir )\n profiledir = os.path.join( packagedir, msg.attr['profile'] )\n if not os.path.isdir( profiledir ):\n os.mkdir( profiledir )\n relpath = self.createpath( msg ) \n fullpath = os.path.join ( self.root, relpath )\n gz = gzip.open( fullpath, 'wb')\n gz.write( msg.msg )\n gz.close()\n entry = msg.attr.copy()\n entry['path']=relpath\n self.index.append( entry )\n \n \n","repo_name":"LLNL/Babel","sub_path":"regression/gantlet/session_archiver.py","file_name":"session_archiver.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"18760109871","text":"import os\nfrom copy import copy\nfrom itertools import product\nimport click\nfrom time import sleep\nfrom random import shuffle\nfrom templates import *\n\ncur_dir = os.getcwd()\nRESULTS_DIR = os.path.join(cur_dir, \"param_search/\")\nprint(RESULTS_DIR)\n\ndef set_results_dir(d):\n global RESULTS_DIR\n RESULTS_DIR = d\n\nlayer_sizes = {\n 'CartPole-v0': [64],\n 'CartPole-v1': [64],\n 'Acrobot-v1': [64, 64],\n 'LunarLander-v2': [256, 128]\n}\n\nepisodes = {\n 'CartPole-v0': 2000,\n 'CartPole-v1': 2000,\n 'Acrobot-v1': 10000,\n 'LunarLander-v2': 10000 # episodes reduced because otherwise it takes forever\n # (later steps makes sure that at least 10,000 episodes are executed, but this mainly changes the number of steps)\n}\n\nhours = {\n 'CartPole-v0': 1,\n 'CartPole-v1': 1,\n 'Acrobot-v1': 8,\n 'LunarLander-v2': 12\n}\n\nmem = {\n 'CartPole-v0': 500,\n 'CartPole-v1': 500,\n 'Acrobot-v1': 1000,\n 'LunarLander-v2': 2000\n}\n\nsearch_space = {\n 'tnuf': [1], # Baselines default\n 'batch_size': [32, 128],\n 'mem_len': [500, 2500, 50000],\n 'exploration_frac': [0.01, 0.1, 0.5],\n 'activation': ['tanh', 'relu'],\n 'lr': [1e-7, 1e-8, '5e-2', '1e-4', '1e-5', '5e-3', '5e-5', 5e-6, 1e-6, 5e-4],\n # lrs also include default baselines learning rate (5e-4) copied from earlier runs\n 'env': layer_sizes.keys()\n }\n\ndef submit(param_str, param_set, *args):\n slurm_file = slurm_template(param_str, os.path.join(RESULTS_DIR, param_str+'.py'), RESULTS_DIR, *args)\n script_file = script_template(param_set)\n file = open(RESULTS_DIR+param_str+'.py', 'w')\n file.write(script_file)\n file.close()\n file = open(RESULTS_DIR+param_str+\".sbatch\", 'w')\n file.write(slurm_file)\n file.close()\n cmd = \"sbatch \"+RESULTS_DIR+param_str+\".sbatch\"\n print(\"Running command: {}\".format(cmd))\n # os.system(cmd)\n\ndef dict_product(dicts):\n return (dict(zip(dicts, x)) for x in product(*iter(dicts.values())))\n\ndef parse_string(x):\n if isinstance(x, list):\n return \"[\"+'_'.join(map(str, x))+\"]\"\n else:\n return x\n\n@click.command()\n@click.option('--env', default='all')\ndef submit_all(env):\n space = search_space\n names = list()\n combos = list(dict_product(space))\n shuffle(combos)\n for param_set_ in combos:\n if env != 'all' and env != param_set_['env']:\n continue\n param_set = copy(param_set_)\n param_set['layers'] = layer_sizes[param_set['env']]\n param_set['episodes'] = episodes[param_set['env']]\n print(param_set)\n param_str = param_set['env'] + '__'\n param_str += \"__\".join([str(x)+\"_\"+str(parse_string(y)) for x, y in param_set.items()])\n submit(param_str, param_set, hours[param_set['env']], mem[param_set['env']])\n names.append(param_str)\n return names\n\nif __name__ == '__main__':\n submit_all()\n","repo_name":"ehknight/natural-gradient-deep-q-learning","sub_path":"baseline/parallel_slurm.py","file_name":"parallel_slurm.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"6"} +{"seq_id":"4678072848","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\nfrom functools import reduce\n\nfrom poems.model import rnn_model\nfrom poems.poems import process_poems\nfrom get_datas import get_datas_from_database\n\nstart_token = 'B'\nend_token = 'E'\nmodel_dir = './model/'\ncorpus_file = './data/poems.txt'\n\n\ndef test():\n batch_size = 1\n print('## loading model from %s...' % model_dir)\n\n input_data = tf.placeholder(tf.int32, [batch_size, 7])\n end_points = rnn_model(model='lstm', input_data=input_data, output_data=None, vocab_size=33+16,\n rnn_size=128, output_num=7,input_num=7,num_layers=7, batch_size=batch_size, learning_rate=0.01)\n\n saver = tf.train.Saver(tf.global_variables())\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n with tf.Session() as sess:\n sess.run(init_op)\n\n checkpoint = tf.train.latest_checkpoint(model_dir)\n saver.restore(sess, checkpoint)\n ssqdata = get_datas_from_database()\n\n predict = sess.run(end_points['prediction'], feed_dict={input_data: [ssqdata[2223]]})\n predict_result, aa = get_correct(predict)\n print(\"概率最大的一组: {}\".format(predict_result))\n\n for c in range(2, 11):\n j = -1\n for i in aa:\n j += 1\n predict[j][i] -= 1\n predict_result, aa = get_correct(predict)\n print(\"概率第{}大的一组: {}\".format(c, predict_result))\n\n\ndef get_correct(predict):\n results = []\n aa = []\n for i in range(6):\n result=np.argmax(predict[i],axis=0)\n while result<0 or result>32:\n predict[i][result] -= 1\n result=np.argmax(predict[i],axis=0)\n results.append(result)\n for j in range(i+1, 6):\n predict[j][result] -= 1\n aa = results[0:6]\n results.sort()\n\n result=np.argmax(predict[6],axis=0)\n while result<0 or result>15:\n predict[6][result] -= 1\n result=np.argmax(predict[6],axis=0)\n results.append(result)\n aa.append(result)\n\n results=results+np.asarray([1,1,1,1,1,1,1])\n return results, aa\n\n\nif __name__ == '__main__':\n test()","repo_name":"DoLNw/PythonSSQ","sub_path":"1000bet.py","file_name":"1000bet.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27648862480","text":"from lightifypy.Errors import LightifyException\nfrom lightifypy.Command import Command\nimport struct\n\n\nclass PacketBuilder(object):\n def __init__(self, lightify_link):\n self.__lightify_link = lightify_link\n self.__luminary = None\n self.__command = None\n self.__data = bytes()\n self.__switching = -1\n self.__rgb = None\n self.__luminance = None\n self.__temperature = None\n self.__millis = 0\n self.__seq = 1\n self.__buffer = bytes()\n\n def with_(self, luminary):\n self.__luminary = luminary\n return self\n\n def on(self, command):\n self.__command = command\n return self\n\n def switching(self, switching):\n self.__switching = switching\n return self\n\n def rgb(self, r, g, b):\n self.__rgb = [r, g, b]\n return self\n\n def luminance(self, luminance):\n self.__luminance = luminance\n return self\n\n def temperature(self, temperature):\n self.__temperature = temperature\n return self\n\n def millis(self, millis):\n self.__millis = millis\n return self\n\n def data(self, data):\n self.__data = data\n return self\n\n def build(self):\n self.validate()\n packet_size = self.calculate_packet_size()\n\n request_id = self.__lightify_link.next_seq()\n self.put_header(packet_size, request_id)\n if not self.__luminary:\n self.put_global()\n else:\n self.put_addressable()\n while len(self.__buffer) < packet_size+2:\n self.__buffer += struct.pack(' 2\n BB -> 1\n RA -> -2\n RB -> -1\n\n [R,B]\n\n [R,\n B]\n\n [R,0,\n 0,B]\n\n [B,0,\n 0,R]\n\n + color inversion\n + different layer pairs:\n\n blue-base - red-active\n blue-active - red-base\n blue-active - red-active\n red-active - blue-base\n red-active - blue-active\n\n\n :return: 40 kernels (4*2*5) some of unstandart form. So need to break into multiple functions(\n '''\n pass\n\n def get_horizontal_kernels(self):\n res = torch.unsqueeze(torch.tensor(PAIRS + REVERSED_PAIRS, requires_grad=False), 2)\n res.unsqueeze_(1)\n return res.float()\n\n def get_vertical_kernels(self):\n res = torch.unsqueeze(torch.tensor(PAIRS + REVERSED_PAIRS, requires_grad=False), 1)\n res.unsqueeze_(1)\n return res.float()\n\n def get_diagonal_kernels(self):\n '''\n :return: 20 diagonal kernels\n '''\n kernels = []\n for a, b in PAIRS + REVERSED_PAIRS:\n kernels.append([[0, a],\n [b, 0]])\n kernels.append([[a, 0],\n [0, b]])\n\n return torch.unsqueeze(torch.tensor(kernels, requires_grad=False), 1).float()\n\n def get_features(self, games):\n # move\n move_feature = torch.tensor([game.to_move for game in games], requires_grad=False).to(torch.int8)\n\n fields = torch.tensor([game.field for game in games], requires_grad=False)\n fields_count = fields.shape[0]\n\n fields_to_flip = move_feature == -1\n fields[fields_to_flip] = -fields[fields_to_flip].flip(1)\n\n # positions of all types\n plain_features = torch.cat((fields == 2, fields == 1, fields == -1, fields == -2), dim=1).to(torch.int8)\n\n # 2x2 kernels\n fields = fields.reshape(-1, games[0].size_h, games[0].size_w)\n\n fields.unsqueeze_(1) # channels = 1\n fields = fields.float()\n\n kernel_features = torch.cat((F.conv2d(fields, self.v_kernels).view(fields_count, -1),\n F.conv2d(fields, self.h_kernels).view(fields_count, -1),\n F.conv2d(fields, self.d_kernels).view(fields_count, -1)), dim=1) == 4\n kernel_features = kernel_features.to(torch.int8)\n features = torch.cat((plain_features, kernel_features), dim=1)\n return features\n","repo_name":"nkorobkov/virus-game","sub_path":"rl/feature/KernelFeatures.py","file_name":"KernelFeatures.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"7171124504","text":"#Answer to Problem 1: Multiples of 3 and 5\n\nt=int(input())\ndef ar(x):\n return x*(x+1);\nfor i in range(t):\n n=int(input())\n n-=1;\n a=n//3;\n b=n//5;\n c=n//15;\n print(int(int(3*ar(a) + 5*ar(b) - 15*ar(c))>>1));","repo_name":"CompetitiveCode/hackerrank-python","sub_path":"Project Euler+/Project Euler #1 Multiples of 3 and 5.py","file_name":"Project Euler #1 Multiples of 3 and 5.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"31034483029","text":"import logging\nimport gc\nimport os\nimport requests\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.utils.executor import start_webhook\nfrom network_gans import * # Import architecture\nfrom functions import * # Import help functions\n\n\n# Set API_TOKEN. You must have your own.\n\nCREATOR_ID = os.environ['CREATOR_ID']\nBOT_TOKEN = os.environ['BOT_TOKEN']\n\n# webhook settings\nWEBHOOK_HOST = os.environ['WEBHOOK_HOST_ADDR']\nWEBHOOK_PATH = f'/webhook/{BOT_TOKEN}'\nWEBHOOK_URL = f\"{WEBHOOK_HOST}{WEBHOOK_PATH}\"\n\n# webserver settings\nWEBAPP_HOST = '0.0.0.0' # or ip\nWEBAPP_PORT = os.environ['PORT']\n\n# Configure logging.\nlogging.basicConfig(level=logging.INFO)\n\n# Initialize bot and dispatcher.\nbot = Bot(token=BOT_TOKEN)\ndp = Dispatcher(bot, storage=MemoryStorage())\n\n# Initialize the net.\nstyle_model = Net(ngf=128)\nstyle_model.load_state_dict(torch.load('pretrained.model'), False)\n\n# Initializing flags to check for images.\ncontent_flag = False\nstyle_flag = False\n\n\nclass GetPictures(StatesGroup):\n waiting_for_photos = State()\n waiting_for_another_photos = State()\n\n\nclass GetCity(StatesGroup):\n waiting_for_city = State()\n\n\nbuttons_for_start = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\nbuttons_for_start.add(types.KeyboardButton(text=\"I want to start style transfer \\U0000270D\"))\nbuttons_for_start.add(types.KeyboardButton(text=\"What can you do? \\U0001F9D0\"))\nbuttons_for_start.add(types.KeyboardButton(text=\"I want to have some interesting examples of style \\U0001F30C\"))\nbuttons_for_start.add(types.KeyboardButton(text=\"Tell me about your creator \\U0001F468\\U0000200D\\U0001F4BB\"))\nbuttons_for_start.add(types.KeyboardButton(text=\"What is the weather now? \\U00002600\"))\n\nbuttons_for_content = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\nbuttons_for_content.add(types.KeyboardButton(text=\"I want another content image \\U0001F501\"))\n\nbuttons_for_style = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\nbuttons_for_style.add(types.KeyboardButton(text=\"Let's start style transfer \\U0001F3C1\"))\nbuttons_for_style.add(types.KeyboardButton(text=\"I want another style image \\U0001F501\"))\n\n\n\ndef transform(content_root, style_root, im_size):\n \"\"\"Function for image transformation.\"\"\"\n content_image = tensor_load_rgbimage(content_root, size=im_size,\n keep_asp=True).unsqueeze(0)\n style = tensor_load_rgbimage(style_root, size=im_size).unsqueeze(0)\n style = preprocess_batch(style)\n style_v = Variable(style)\n content_image = Variable(preprocess_batch(content_image))\n style_model.setTarget(style_v)\n output = style_model(content_image)\n tensor_save_bgrimage(output.data[0], 'result' + user_id + '.jpg', False)\n\n # Clear the RAM.\n del content_image\n del style\n del style_v\n del output\n torch.cuda.empty_cache()\n gc.collect()\n\n\n@dp.message_handler(commands=['start'], state='*')\nasync def satrt(message: types.Message):\n \"\"\"Test function.\"\"\"\n global user_name\n user_name = str(message.from_user.first_name)\n await message.answer(text=f\"Hi, *{user_name}*, \\nI am very smart bot \\U0001F913, what can I do for you?\",\n reply_markup=buttons_for_start, parse_mode='Markdown')\n\n\n@dp.message_handler(lambda message: message.text == \"Tell me about your creator \\U0001F468\\U0000200D\\U0001F4BB\",\n state='*')\n@dp.message_handler(commands=['creator'], state='*')\nasync def creator(message: types.Message):\n \"\"\"Displays information about the bot's Creator.\"\"\"\n\n await message.answer(text=\"I'm student of Deep Learning School (by MIPT) and also Data Analytics School (by X5 Retail Group). Work in PwC (Data analyst)\"\n \" and in 'School of programmers' (Teacher)\\n\\nLink to GitHub \\U0001F4BB: https://github.com/PsychoBel\\nContact with me \\U0001F4EB: @psycho1388\", reply_markup=buttons_for_start)\n\n\n@dp.message_handler(commands=\"set_commands\", state='*')\nasync def cmd_set_commands(message: types.Message):\n if message.from_user.id == CREATOR_ID:\n commands = [types.BotCommand(command=\"/transfer\", description=\"Initialize style transfer\"),\n types.BotCommand(command=\"/help\", description=\"Description of me\"),\n types.BotCommand(command=\"/creator\", description=\"Information about my creator\"),\n types.BotCommand(command=\"/styles\", description=\"Different interesting examples of styles\"),\n types.BotCommand(command=\"/weather\", description=\"Find out the weather in your city\"),\n types.BotCommand(command=\"/back\", description=\"Change picture\"),\n types.BotCommand(command=\"/initialize\", description=\"Start transfer\")]\n await bot.set_my_commands(commands)\n await message.answer(\"Команды настроены.\")\n\n\n@dp.message_handler(lambda message: message.text == \"I want to have some interesting examples of style \\U0001F30C\", state='*')\n@dp.message_handler(commands=\"styles\", state='*')\nasync def send_different_styles(message: types.Message):\n media = types.MediaGroup()\n media.attach_photo(types.InputFile('photos_for_style/style_2'))\n media.attach_photo(types.InputFile('photos_for_style/style_3'))\n media.attach_photo(types.InputFile('photos_for_style/style_4'))\n media.attach_photo(types.InputFile('photos_for_style/style_5'))\n media.attach_photo(types.InputFile('photos_for_style/style_6'))\n media.attach_photo(types.InputFile('photos_for_style/style_7'))\n media.attach_photo(types.InputFile('photos_for_style/style_9'))\n media.attach_photo(types.InputFile('photos_for_style/style_10'))\n media.attach_photo(types.InputFile('photos_for_style/style_12'))\n media.attach_photo(types.InputFile('photos_for_style/style_13'))\n await message.reply_media_group(media=media)\n\n\n@dp.message_handler(lambda message: message.text == 'What is the weather now? \\U00002600', state='*')\n@dp.message_handler(commands=['weather'], state='*')\nasync def find_city(message: types.Message):\n buttons_for_city = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\n buttons_for_city.add(types.KeyboardButton(text=\"Moscow\"))\n buttons_for_city.add(types.KeyboardButton(text=\"St. Petersburg\"))\n\n await message.answer(text=\"Choose in which city you would like to know the weather \\U000026C5\\n\\n\"\n \"If your city *Moscow or St. Petersburg click button below*\\n\\n\"\n \"If you are from another city, *write it's name in English*\",\n reply_markup=buttons_for_city, parse_mode='Markdown')\n\n await GetCity.waiting_for_city.set()\n\n\n@dp.message_handler(state=GetCity.waiting_for_city)\nasync def weather_in_city(message: types.Message, state: FSMContext):\n appid = 'fed49783386c49273b6565dac196d79b'\n city_id = 0\n flag = False\n if message.text == \"Moscow\": city_id = 524901\n elif message.text == \"St. Petersburg\": city_id = 4778626\n\n try:\n res = requests.get(\"http://api.openweathermap.org/data/2.5/find\",\n params={'q': message.text, 'type': 'like', 'units': 'metric', 'APPID': appid})\n data = res.json()\n city_id = data['list'][0]['id']\n except Exception as e:\n await message.answer(text=\"Sorry, I couldn't find this city\", reply_markup=buttons_for_start)\n flag = True\n await state.finish()\n\n try:\n res = requests.get(\"http://api.openweathermap.org/data/2.5/forecast\",\n params={'id': city_id, 'units': 'metric', 'lang': 'ru', 'APPID': appid})\n data = res.json()\n today = data['list'][0]\n await message.answer(text=f\"Last update at: {today['dt_txt']}\\n*Tempreture: {round(today['main']['temp'])}*\\n*Condition: {today['weather'][0]['description']}*\",\n reply_markup=buttons_for_start, parse_mode='Markdown')\n await state.finish()\n except Exception as e:\n if not flag:\n await message.answer(text=\"Sorry, I couldn't find this city\", reply_markup=buttons_for_start)\n await state.finish()\n\n\n@dp.message_handler(lambda message: message.text == \"What can you do? \\U0001F9D0\", state='*')\n@dp.message_handler(commands=['help'], state='*')\nasync def help_message(message: types.Message):\n \"\"\"\n Outputs a small instruction when the corresponding command is received.\n \"\"\"\n await message.answer(text=\"This bot will helps you making style transformations \\U0001FA84\\n\"\n \"*1) Load photo with your content first*\\n\"\n \"*2) Then, load photo with style*\\n\"\n \"*3) Choose quality of result photo*\\n\"\n \"*4) Get joint images and be happy!* \\U0001F603\\n\"\n \"Let me show you some examples to make you understand:\", parse_mode='Markdown')\n with open('examples/vysocz.png', 'rb') as photo:\n await message.reply_photo(photo, caption='Visocky and Van Gogh')\n with open('examples/mayak.png', 'rb') as photo:\n await message.reply_photo(photo, caption='Mayakovsky and Van Gogh')\n with open('examples/gagarin.png', 'rb') as photo:\n await message.reply_photo(photo, caption='Gagarin and Van Gogh')\n\n await message.answer(text='If you want to try it, *click the button below* or press */transfer*\\n'\n 'Also you can read */about* my creator\\nOr even find out the */weather* in your city',\n reply_markup=buttons_for_start, parse_mode='Markdown')\n\n\n@dp.message_handler(lambda message: message.text == \"I want to start style transfer \\U0000270D\", state='*')\n@dp.message_handler(commands=['transfer'], state='*')\nasync def start_style_transfer(message: types.Message):\n global user_id\n user_id = str(message.from_user.id)\n logging.info(f\"USER {user_id} STARTS STYLE TRANSFER\")\n await message.answer(text=\"Let's start \\U0001F4AB\\n\"\n \"Send me *content* image please\", parse_mode='Markdown')\n await GetPictures.waiting_for_photos.set()\n\n\n@dp.message_handler(state=GetPictures.waiting_for_photos, content_types=types.ContentTypes.PHOTO)\nasync def photo_processing(message):\n \"\"\"\n Triggered when the user sends an image and saves it for further processing.\n \"\"\"\n global content_flag\n global style_flag\n\n # The bot is waiting for a picture with content from the user.\n if not content_flag:\n logging.info(f\"USER {user_id} DOWNLOAD CONTENT IMAGE\")\n await message.photo[-1].download('content' + user_id + '.jpg')\n await message.answer(text='Cool! I got content image! \\U0001F525\\n'\n '*Now, send me style image please.*\\n\\n'\n 'Or click */back* command or *button below* to choose '\n 'another content image.', reply_markup=buttons_for_content, parse_mode='Markdown')\n\n content_flag = True # Now the bot knows that the content image exists.\n\n # The bot is waiting for a picture with style from the user.\n else:\n logging.info(f\"USER {user_id} DOWNLOAD STYLE IMAGE\")\n await message.photo[-1].download('style' + user_id + '.jpg')\n await message.answer(text='Perfect! I gor style image! \\U000026A1\\n'\n '*Now, press /initialize or *button below* to start style transfer*\\n\\n'\n 'Or click */back* command or *button below* to choose '\n 'another content image.', reply_markup=buttons_for_style, parse_mode='Markdown')\n\n style_flag = True\n\n await GetPictures.waiting_for_photos.set()\n\n\n@dp.message_handler(lambda message: message.text in (\"I want another style image \\U0001F501\",\n \"I want another content image \\U0001F501\"),\n state=GetPictures.waiting_for_photos)\n@dp.message_handler(state=GetPictures.waiting_for_photos, commands=['back'])\nasync def photo_processing(message: types.Message):\n \"\"\"Allows the user to select a different image with content or style.\"\"\"\n\n global content_flag\n global style_flag\n # Let's make sure that there is something to cancel.\n if content_flag and style_flag:\n logging.info(f\"USER {user_id} WANT NEW STYLE IMAGE\")\n await message.answer(text=\"Choose new style image\")\n await GetPictures.waiting_for_another_photos.set()\n else:\n logging.info(f\"USER {user_id} WANT NEW CONTENT IMAGE\")\n await message.answer(text=\"Choose new content image\")\n await GetPictures.waiting_for_another_photos.set()\n\n\n@dp.message_handler(state=GetPictures.waiting_for_another_photos, content_types=types.ContentTypes.PHOTO)\nasync def photo_processing(message):\n global content_flag\n global style_flag\n if content_flag and style_flag:\n logging.info(f\"USER {user_id} DOWNLOAD STYLE IMAGE\")\n await message.photo[-1].download('style' + user_id + '.jpg')\n await message.answer(text='Perfect! I gor style image! \\U000026A1\\n'\n '*Now, press /initialize or button below to start style transfer*\\n\\n'\n 'Or click */back* command or *button below* to choose '\n 'another content image.', reply_markup=buttons_for_style, parse_mode='Markdown')\n await GetPictures.waiting_for_photos.set()\n else:\n logging.info(f\"USER {user_id} DOWNLOAD CONTENT IMAGE\")\n await message.photo[-1].download('content' + user_id + '.jpg')\n await message.answer(text='Cool! I got content image! \\U0001F525\\n'\n '*Now, send me style image please.*\\n\\n'\n 'Or click */back* command or *button below* to choose '\n 'another content image.', reply_markup=buttons_for_content, parse_mode='Markdown')\n await GetPictures.waiting_for_photos.set()\n\n\n@dp.message_handler(lambda message: message.text == \"Let's start style transfer \\U0001F3C1\", state=GetPictures.waiting_for_photos)\n@dp.message_handler(commands=['initialize'], state=GetPictures.waiting_for_photos)\nasync def run_style_transfer(message: types.Message, state: FSMContext):\n \"\"\"Preparing for image processing.\"\"\"\n\n # Let's make sure that the user has added both images.\n logging.info(f\"USER {user_id} INITIALIZE\")\n if not (content_flag * style_flag): # Conjunction\n await message.answer(text=\"Upload both images please.\")\n return\n\n # Adding answer options.\n res = types.ReplyKeyboardMarkup(resize_keyboard=True,\n one_time_keyboard=True)\n res.add(types.KeyboardButton(text=\"Bad quality \\U0001F534, Fast processing \\U0001F7E2\"))\n res.add(types.KeyboardButton(text=\"Medium quality \\U0001F7E0, Medium processing \\U0001F7E0\"))\n res.add(types.KeyboardButton(text=\"Good quality \\U0001F7E2, Low processing \\U0001F534\"))\n\n await message.answer(text=\" Now you need to choose the quality of the resulting photo\"\n \" The better the quality of the photo you choose, the longer it will take to process the photo. \",\n reply_markup=res)\n\n\n@dp.message_handler(lambda message: message.text in (\"Bad quality \\U0001F534, Fast processing \\U0001F7E2\",\n \"Medium quality \\U0001F7E0, Medium processing \\U0001F7E0\",\n \"Good quality \\U0001F7E2, Low processing \\U0001F534\"),\n state=GetPictures.waiting_for_photos)\nasync def processing(message: types.Message, state: FSMContext):\n \"\"\"Image processing depending on the selected quality.\"\"\"\n global content_flag\n global style_flag\n\n if message.text == 'Bad quality \\U0001F534, Fast processing \\U0001F7E2':\n image_size = 128\n elif message.text == 'Medium quality \\U0001F7E0, Medium processing \\U0001F7E0':\n image_size = 130 # 256 (130 just for project because problems with memory in Heroku) \n else:\n image_size = 135 # 300 (135 just for project because problems with memory in Heroku)\n\n await message.answer(text='Style transfering starts. '\n 'Wait a little bit \\U0001F558',\n reply_markup=types.ReplyKeyboardRemove())\n transform('content' + user_id + '.jpg', 'style' + user_id + '.jpg', image_size)\n with open('result' + user_id + '.jpg', 'rb') as file:\n await message.answer_photo(file, caption='Work is done!', reply_markup=buttons_for_start)\n content_flag = False\n style_flag = False\n os.remove('content' + user_id + '.jpg')\n os.remove('style' + user_id + '.jpg')\n os.remove('result' + user_id + '.jpg')\n await state.finish()\n\n\n@dp.message_handler(content_types=types.ContentTypes.PHOTO, state='*')\nasync def catch_bad_photos(message):\n await message.answer(text='Sorry, before you send me a photo, you need to initialize me\\U0001F974\\n' \n 'Press */transfer* or *button below*',\n reply_markup=buttons_for_start, parse_mode='Markdown')\n\n\n@dp.message_handler(state='*')\nasync def catch_bad_commands(message: types.Message):\n await message.answer(text=\"Sorry, I don't know this command \\U0001F62C\\n\"\n \"Write *'/'* to see list of commands or press */help*\",\n reply_markup=buttons_for_start, parse_mode='Markdown')\n\n\nasync def on_startup(dp):\n await bot.set_webhook(WEBHOOK_URL)\n logging.info(f\"Start webhook..\\tWEBAPP_HOST-{WEBAPP_HOST}; WEBAPP_PORT-{WEBAPP_PORT};\\n\"\n f\"WEBAPP_URL-{WEBHOOK_URL};\")\n\n\nasync def on_shutdown(dp):\n logging.warning('Shutting down..')\n await dp.storage.close()\n await dp.storage.wait_closed()\n logging.warning('Bye!')\n\n\nif __name__ == '__main__':\n\n start_webhook(\n dispatcher=dp,\n webhook_path=WEBHOOK_PATH,\n on_startup=on_startup,\n on_shutdown=on_shutdown,\n skip_updates=True,\n host=WEBAPP_HOST,\n port=WEBAPP_PORT,\n )\n","repo_name":"belousm/TelegramBotGANS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12611111499","text":"#!/usr/bin/env python\nimport pytest\nfrom utils import *\n\n\ndef test_abusive_sergeant():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tassert wisp.atk == 1\n\tgame.player1.give(\"CS2_188\").play(target=wisp)\n\tassert wisp.atk == 3\n\tgame.end_turn()\n\tassert wisp.atk == 1\n\n\ndef test_acolyte_of_pain():\n\tgame = prepare_game()\n\tacolyte = game.player1.give(\"EX1_007\")\n\tacolyte.play()\n\tgame.player1.discard_hand()\n\tassert len(game.player1.hand) == 0\n\tgame.player1.give(MOONFIRE).play(target=acolyte)\n\tassert len(game.player1.hand) == 1\n\tgame.player1.give(\"EX1_012\").play()\n\t# extra damages can trigger only once\n\tgame.player1.give(MOONFIRE).play(target=acolyte)\n\tassert len(game.player1.hand) == 2\n\tassert acolyte.dead\n\n\ndef test_alarmobot():\n\tgame = prepare_game()\n\tgame.current_player.discard_hand()\n\tbot = game.current_player.give(\"EX1_006\")\n\tbot.play()\n\tassert bot.health == 3\n\twisp = game.current_player.give(WISP)\n\tgame.current_player.give(\"EX1_014t\").play(target=bot)\n\tmistcaller = game.current_player.give(\"AT_054\")\n\tmistcaller.play()\n\tmistcaller.destroy()\n\tassert bot.health == 4\n\tassert wisp.health == 2\n\tfor i in range(9):\n\t\tgame.current_player.give(MOONFIRE)\n\tassert len(game.current_player.hand) == 10\n\tassert bot.zone == Zone.PLAY\n\tassert wisp.zone == Zone.HAND\n\tgame.skip_turn()\n\tassert bot in game.current_player.hand\n\tassert wisp in game.current_player.field\n\t# bot is always 3/0/3 after rebounding\n\tassert bot.health == 3\n\t# TODO: BUG the minion's buff in hand should remain\n\t# assert wisp.health == 2\n\tassert len(game.current_player.field) == 1\n\tassert len(game.current_player.hand) == 10\n\n\t# bot should not trigger if hand has no minions\n\tbot.play()\n\tgame.current_player.give(MOONFIRE)\n\tassert len(game.current_player.hand) == 10\n\tgame.skip_turn()\n\tassert len(game.current_player.hand) == 10\n\tassert bot.zone == Zone.PLAY\n\tassert len(game.current_player.field) == 2\n\n\ndef test_alexstrasza():\n\tgame = prepare_game()\n\talex1 = game.player1.give(\"EX1_561\")\n\tassert game.player1.hero.health == 30\n\tassert game.player2.hero.health == 30\n\talex1.play(target=game.player1.hero)\n\tassert game.player1.hero.health == 15\n\tassert game.player1.hero.max_health == 30\n\tassert game.player2.hero.health == 30\n\tgame.skip_turn()\n\n\talex2 = game.player1.give(\"EX1_561\")\n\tassert game.player2.hero.health == 30\n\talex2.play(target=game.player2.hero)\n\tassert game.player2.hero.health == 15\n\tgame.end_turn()\n\n\ndef test_alexstrasza_armor():\n\tgame = prepare_game(CardClass.WARRIOR, CardClass.WARRIOR)\n\tgame.player1.hero.power.use()\n\tgame.end_turn()\n\n\talex = game.player2.give(\"EX1_561\")\n\tassert game.player1.hero.health == 30\n\tassert game.player1.hero.armor == 2\n\talex.play(target=game.player1.hero)\n\tassert game.player1.hero.health == 15\n\tassert game.player1.hero.armor == 2\n\n\ndef test_alexstrasza_ragnaros():\n\tgame = prepare_game()\n\tmajordomo = game.player1.give(\"BRM_027\")\n\tmajordomo.play()\n\tmajordomo.destroy()\n\tassert game.player1.hero.id == \"BRM_027h\"\n\tassert game.player1.hero.health == 8\n\tassert game.player1.hero.max_health == 8\n\tgame.end_turn()\n\tgame.end_turn()\n\n\talex = game.player1.give(\"EX1_561\")\n\talex.play(target=game.player1.hero)\n\tassert game.player1.hero.buffs\n\tassert game.player1.hero.health == 15\n\tassert game.player1.hero.max_health == 15\n\n\ndef test_amani_berserker():\n\tgame = prepare_game()\n\tamani1 = game.player1.give(\"EX1_393\")\n\tamani1.play()\n\tgame.end_turn()\n\n\tamani2 = game.player2.give(\"EX1_393\")\n\tamani2.play()\n\tgame.end_turn()\n\n\tassert amani1.atk == amani2.atk == 2\n\tamani1.attack(amani2)\n\t# check both minions are still alive, that the enrage didn't trigger too early\n\tassert amani1.zone == amani2.zone == Zone.PLAY\n\tassert amani1 in game.player1.field\n\tassert amani2 in game.player2.field\n\tassert amani1.damage == amani2.damage == 2\n\tassert amani1.atk == amani2.atk == 2 + 3\n\tgame.player1.give(CIRCLE_OF_HEALING).play()\n\tassert amani1.atk == amani2.atk == 2\n\tassert amani1.health == amani2.health == 3\n\tgame.player1.give(MOONFIRE).play(target=amani1)\n\tassert amani1.atk == 2 + 3\n\n\ndef test_ancient_of_war():\n\tgame = prepare_empty_game()\n\tancient_of_war = game.current_player.give(\"EX1_178\")\n\tancient_of_war.play(choose=\"EX1_178a\")\n\tassert ancient_of_war.taunt\n\tassert ancient_of_war.atk == 5\n\tassert ancient_of_war.health == 5 + 5\n\tgame.end_turn()\n\n\tancient_of_war2 = game.current_player.give(\"EX1_178\")\n\tancient_of_war2.play(choose=\"EX1_178b\")\n\tassert not ancient_of_war2.taunt\n\tassert ancient_of_war2.atk == 5 + 5\n\tassert ancient_of_war2.health == 5\n\tgame.end_turn()\n\n\tgame.player1.summon(\"OG_044\")\n\tancient_of_war3 = game.current_player.give(\"EX1_178\")\n\twith pytest.raises(InvalidAction):\n\t\tancient_of_war3.play(choose=\"EX1_178b\")\n\tancient_of_war3.play()\n\tassert ancient_of_war3.taunt\n\tassert ancient_of_war3.atk == 5 + 5\n\tassert ancient_of_war3.health == 5 + 5\n\n\ndef test_ancestral_healing():\n\tgame = prepare_empty_game()\n\tstatue = game.player1.give(ANIMATED_STATUE)\n\tstatue.play()\n\tfor _ in range(5):\n\t\tgame.player1.give(MOONFIRE).play(target=statue)\n\tassert not statue.taunt\n\tassert statue.damage == 5\n\tassert statue.health == 10 - 5\n\tgame.player1.give(\"CS2_041\").play(statue)\n\tassert statue.taunt\n\tassert statue.health == 10\n\n\ndef test_ancestral_spirit():\n\tgame = prepare_game()\n\tancestral = game.player1.give(\"CS2_038\")\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tassert not wisp.has_deathrattle\n\tancestral.play(target=wisp)\n\tassert wisp.has_deathrattle\n\twisp.destroy()\n\tassert len(game.board) == 1\n\tassert game.player1.field[0].id == WISP\n\n\ndef test_ancient_of_lore():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\n\tfor _ in range(10):\n\t\tgame.player1.give(MOONFIRE).play(target=game.player1.hero)\n\n\tassert game.player1.hero.health == 30 - 10\n\n\tancient1 = game.player1.give(\"NEW1_008\")\n\tancient1.play(choose=\"NEW1_008a\") # Draw 2 Cards\n\tassert len(game.player1.hand) == 1\n\tassert game.player1.hero.health == 30 - 10\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tgame.player1.discard_hand()\n\tancient2 = game.player1.give(\"NEW1_008\")\n\t# Play to heal hero by 5\n\tancient2.play(target=game.player1.hero, choose=\"NEW1_008b\")\n\tassert not game.player1.hand\n\tassert game.player1.hero.health == 30 - 10 + 5\n\n\ndef test_ancient_watcher():\n\tgame = prepare_game()\n\twatcher = game.player1.give(\"EX1_045\")\n\twatcher.play()\n\tgame.end_turn()\n\tgame.end_turn()\n\tassert not watcher.can_attack()\n\tgame.player1.give(SILENCE).play(target=watcher)\n\tassert watcher.can_attack()\n\n\ndef test_animal_companion():\n\tgame = prepare_game()\n\tcompanion = game.player1.give(\"NEW1_031\")\n\tcompanion.play()\n\tassert len(game.player1.field) == 1\n\tassert game.player1.field[0].id in (\"NEW1_032\", \"NEW1_033\", \"NEW1_034\")\n\n\ndef test_angry_chicken():\n\tgame = prepare_game()\n\tchicken = game.player1.give(\"EX1_009\")\n\tchicken.play()\n\tstormwind = game.player1.give(\"CS2_222\")\n\tstormwind.play()\n\tassert chicken.enrage\n\tassert not chicken.enraged\n\tassert chicken.atk == chicken.health == 2\n\tgame.player1.give(MOONFIRE).play(target=chicken)\n\tassert chicken.enraged\n\tassert chicken.atk == 1 + 1 + 5\n\tassert chicken.health == 1\n\tstormwind.destroy()\n\tassert chicken.atk == chicken.health == 1\n\tassert not chicken.enraged\n\n\ndef test_arathi_weaponsmith():\n\tgame = prepare_game()\n\tarathi = game.player1.give(\"EX1_398\")\n\tassert not game.player1.weapon\n\tarathi.play()\n\tassert game.player1.weapon.id == \"EX1_398t\"\n\n\ndef test_arcane_explosion():\n\tgame = prepare_game()\n\t# play some wisps\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tgame.end_turn()\n\n\tarcanex = game.player2.give(\"CS2_025\")\n\tarcanex.play()\n\tassert not game.board\n\n\ndef test_arcane_golem():\n\tgame = prepare_game(game_class=Game)\n\tgolem = game.player1.give(\"EX1_089\")\n\tfor i in range(3):\n\t\tgame.end_turn()\n\t\tgame.end_turn()\n\n\tassert game.player1.max_mana == 4\n\tassert game.player2.max_mana == 3\n\tgolem.play()\n\tassert game.player1.max_mana == 4\n\tassert game.player2.max_mana == 4\n\n\ndef test_arcane_missiles():\n\tgame = prepare_game()\n\twisp = game.player2.summon(WISP)\n\tmissiles = game.player1.give(\"EX1_277\")\n\tmissiles.play()\n\tif wisp.dead:\n\t\tassert game.player2.hero.health == 28\n\telse:\n\t\tassert game.player2.hero.health == 27\n\n\ndef test_archmage_antonidas():\n\tgame = prepare_game()\n\tantonidas = game.player1.give(\"EX1_559\")\n\tantonidas.play()\n\tgame.player1.discard_hand()\n\tassert len(game.player1.hand) == 0\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\tassert len(game.player1.hand) == 1\n\tassert game.player1.hand[0].id == \"CS2_029\"\n\tgame.player1.give(THE_COIN).play()\n\tassert len(game.player1.hand) == 2\n\tassert game.player1.hand[1].id == \"CS2_029\"\n\n\ndef test_armorsmith():\n\tgame = prepare_game()\n\tarmorsmith1 = game.player1.give(\"EX1_402\")\n\tarmorsmith1.play()\n\tgame.end_turn()\n\n\tarmorsmith2 = game.player2.give(\"EX1_402\")\n\tarmorsmith2.play()\n\tgame.end_turn()\n\n\tassert game.player1.hero.armor == game.player2.hero.armor == 0\n\tarmorsmith1.attack(target=armorsmith2)\n\tassert game.player1.hero.armor == game.player2.hero.armor == 1\n\tgame.end_turn()\n\n\tgame.player2.give(\"EX1_402\").play()\n\tgame.player2.give(WISP).play()\n\n\t# Whirlwind\n\t# 1 armor on each hero, 2 smiths in play for current player, 1 for opponent\n\tgame.player2.give(\"EX1_400\").play()\n\tassert game.player2.hero.armor == 1 + (2 * 3)\n\tassert game.current_player.hero.health == 30\n\tassert game.player1.hero.armor == 1 + 1\n\n\ndef test_avenging_wrath():\n\tgame = prepare_game()\n\twisp = game.player2.summon(WISP)\n\tgame.player1.give(\"EX1_384\").play()\n\tif wisp.dead:\n\t\tassert game.player2.hero.health == 30 - 7\n\telse:\n\t\tassert game.player2.hero.health == 30 - 8\n\tgame.end_turn()\n\n\t# Summon Malygos and test that spellpower only increases dmg by 5\n\tgame.player2.summon(\"EX1_563\")\n\tgame.player2.give(\"EX1_384\").play()\n\tassert game.player1.hero.health == 30 - (8 + 5)\n\n\ndef test_bane_of_doom():\n\tgame = prepare_game()\n\tdoom = game.player1.give(\"EX1_320\")\n\tstatue = game.player1.give(ANIMATED_STATUE)\n\tstatue.play()\n\tdoom.play(target=statue)\n\tassert len(game.player1.field) == 1\n\tassert statue.health == 10 - 2\n\tstatue.destroy()\n\tgame.end_turn()\n\tgame.end_turn()\n\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tdoom2 = game.player1.give(\"EX1_320\")\n\tdoom2.play(target=wisp)\n\tassert len(game.player1.field) == 1\n\tassert game.player1.field[0].race == Race.DEMON\n\tassert game.player1.field[0].data.collectible\n\n\ndef test_baron_geddon():\n\tgame = prepare_game()\n\n\tgeddon1 = game.player1.give(\"EX1_249\")\n\twisp = game.player1.give(WISP)\n\tgeddon1.play()\n\twisp.play()\n\tassert geddon1.health == 5\n\tassert not wisp.dead\n\tassert game.player1.hero.health == 30\n\tassert game.player2.hero.health == 30\n\tgame.end_turn()\n\tassert geddon1.health == 5\n\tassert wisp.dead\n\tassert game.player1.hero.health == 28\n\tassert game.player2.hero.health == 28\n\n\tgeddon2 = game.player2.give(\"EX1_249\")\n\tgeddon2.play()\n\tassert geddon1.health == 5\n\tassert geddon2.health == 5\n\tgame.end_turn()\n\tassert geddon1.health == 3\n\tassert geddon2.health == 5\n\n\ndef test_battle_rage():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\tstatue = game.player1.give(ANIMATED_STATUE)\n\tstatue.play()\n\tfor target in (statue, game.player1.hero, game.player2.hero):\n\t\tgame.player1.give(MOONFIRE).play(target=target)\n\tcs = game.player1.give(\"EX1_392\")\n\tcs.play()\n\tassert len(game.player1.hand) == 2\n\n\ndef test_bestial_wrath():\n\tgame = prepare_game()\n\twolf = game.current_player.give(\"DS1_175\")\n\twolf.play()\n\tbestial = game.current_player.give(\"EX1_549\")\n\twisp1 = game.current_player.give(WISP)\n\twisp1.play()\n\tgame.end_turn()\n\n\twisp2 = game.current_player.summon(WISP)\n\tgame.end_turn()\n\n\tassert wolf.atk == 1\n\tassert not wolf.immune\n\tassert wolf in bestial.targets\n\tassert wisp1 not in bestial.targets\n\tassert wisp2 not in bestial.targets\n\tbestial.play(target=wolf)\n\tassert wolf.atk == 3\n\tassert wolf.immune\n\twolf.attack(target=wisp2)\n\tassert wolf.health == 1\n\tassert wolf.zone == Zone.PLAY\n\tassert wisp2.dead\n\tgame.end_turn()\n\n\tassert wolf.atk == 1\n\tassert not wolf.immune\n\n\ndef test_betrayal():\n\tgame = prepare_game()\n\twisp1 = game.player1.give(WISP).play()\n\twisp2 = game.player1.give(WISP).play()\n\twisp3 = game.player1.give(WISP).play()\n\tassert len(game.current_player.field) == 3\n\tgame.end_turn()\n\n\tbetrayal = game.player2.give(\"EX1_126\")\n\tbetrayal.play(target=wisp2)\n\tassert len(game.player1.field) == 1\n\tassert wisp1.dead\n\tassert not wisp2.dead\n\tassert wisp3.dead\n\tgame.end_turn()\n\n\tbender = game.player1.give(SPELLBENDERT).play()\n\tgame.end_turn()\n\n\tgame.player2.give(\"EX1_126\").play(target=wisp2)\n\tassert not wisp2.dead\n\tassert not bender.dead\n\tassert bender.health == 2\n\n\ndef test_betrayal_poisonous():\n\tgame = prepare_game()\n\tstatue1 = game.player1.give(ANIMATED_STATUE)\n\tstatue1.play()\n\tcobra = game.player1.give(\"EX1_170\").play()\n\tstatue2 = game.player1.give(ANIMATED_STATUE)\n\tstatue2.play()\n\tgame.end_turn()\n\n\tgame.player2.give(\"EX1_126\").play(target=cobra)\n\tassert statue1.dead\n\tassert not cobra.dead\n\tassert statue2.dead\n\n\ndef test_big_game_hunter():\n\tgame = prepare_game()\n\tbgh1 = game.player1.give(\"EX1_005\")\n\tassert not bgh1.requires_target()\n\tbgh1.play()\n\tgame.end_turn()\n\n\twargolem = game.player2.give(\"CS2_186\")\n\twargolem.play()\n\tassert wargolem.atk == 7\n\tgame.end_turn()\n\n\tbgh2 = game.player1.give(\"EX1_005\")\n\tassert bgh2.requires_target()\n\tbgh2.play(target=wargolem)\n\tassert wargolem.dead\n\n\ndef test_blade_flurry():\n\tgame = prepare_game()\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tgame.end_turn()\n\n\tgame.player2.give(WISP).play()\n\tflurry = game.player2.give(\"CS2_233\")\n\tassert not flurry.is_playable()\n\tgame.player2.give(LIGHTS_JUSTICE).play()\n\tassert flurry.is_playable()\n\tflurry.play()\n\tassert not game.player1.field\n\tassert len(game.player2.field) == 1\n\tassert game.player1.hero.health == game.player2.hero.health == 30\n\n\ndef test_blessing_of_wisdom():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tblessing = game.player1.give(\"EX1_363\")\n\tblessing.play(target=wisp)\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tgame.player1.discard_hand()\n\twisp.attack(target=game.current_player.opponent.hero)\n\tassert len(game.current_player.hand) == 1\n\tgame.end_turn()\n\n\t# Shadow Madness should draw for the original caster\n\tgame.player2.discard_hand()\n\tshadowmadness = game.player2.give(\"EX1_334\")\n\tshadowmadness.play(target=wisp)\n\tassert len(game.player1.hand) == 1\n\twisp.attack(target=game.player1.hero)\n\tassert len(game.player1.hand) == 2\n\tassert not game.player2.hand\n\n\ndef test_blizzard():\n\tgame = prepare_game()\n\tfor i in range(4):\n\t\tgame.player1.give(ANIMATED_STATUE).play()\n\tgame.end_turn()\n\n\tblizzard = game.player2.give(\"CS2_028\")\n\tblizzard.play()\n\tfor statue in game.player1.field:\n\t\tassert statue.damage == 2\n\t\tassert statue.frozen\n\n\ndef test_blood_imp():\n\tgame = prepare_game()\n\twisp1 = game.player1.give(WISP)\n\twisp2 = game.player1.give(WISP)\n\timp = game.player1.give(\"CS2_059\")\n\timp.play()\n\tassert imp.health == 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert imp.health == 1\n\twisp1.play()\n\twisp2.play()\n\tassert wisp1.health + wisp2.health == 2\n\tgame.end_turn()\n\tassert wisp1.health + wisp2.health == 3\n\n\tassert imp.health == 1\n\tassert wisp1.atk == 1\n\tassert wisp2.atk == 1\n\n\ndef test_blood_knight():\n\tgame = prepare_game()\n\tgame.end_turn()\n\n\tsquire = game.current_player.give(\"EX1_008\")\n\tsquire.play()\n\tassert squire.divine_shield\n\tgame.end_turn()\n\n\tbloodknight1 = game.current_player.give(\"EX1_590\")\n\tbloodknight1.play()\n\tassert not squire.divine_shield\n\tassert bloodknight1.atk == 6\n\tassert bloodknight1.health == 6\n\tgame.end_turn()\n\n\tgame.current_player.give(\"EX1_008\").play()\n\tgame.current_player.give(\"EX1_008\").play()\n\t# Play an argent protector on the squire\n\tgame.current_player.give(\"EX1_362\").play(target=squire)\n\tassert squire.divine_shield\n\tgame.end_turn()\n\n\tbloodknight2 = game.current_player.give(\"EX1_590\")\n\tbloodknight2.play()\n\tassert not squire.divine_shield\n\tassert bloodknight2.atk == 12\n\tassert bloodknight2.health == 12\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tbloodknight3 = game.current_player.give(\"EX1_590\")\n\tbloodknight3.play()\n\tassert bloodknight3.atk == 3\n\tassert bloodknight3.health == 3\n\n\ndef test_brawl():\n\tgame = prepare_game()\n\tbrawl = game.player1.give(\"EX1_407\")\n\tgame.player1.give(GOLDSHIRE_FOOTMAN).play()\n\tgame.player1.give(WISP).play()\n\tgame.end_turn()\n\n\tgame.player2.give(GOLDSHIRE_FOOTMAN).play()\n\tgame.player2.give(WISP).play()\n\tgame.end_turn()\n\n\tassert len(game.board) == 4\n\tbrawl.play()\n\tassert len(game.board) == 1\n\tassert game.board[0].id in (WISP, GOLDSHIRE_FOOTMAN)\n\n\ndef test_captains_parrot():\n\tgame = prepare_empty_game()\n\tpirate1 = game.player1.give(\"NEW1_022\")\n\tpirate1.shuffle_into_deck()\n\tpirate2 = game.player1.give(\"CS2_146\")\n\tpirate2.shuffle_into_deck()\n\twisp = game.player1.give(WISP)\n\twisp.shuffle_into_deck()\n\tassert len(game.player1.deck) == 3\n\tgame.player1.give(\"NEW1_016\").play()\n\tassert len(game.player1.deck) == 2\n\tassert len(game.player1.hand) == 1\n\tassert game.player1.hand[0].race == Race.PIRATE\n\tgame.player1.discard_hand()\n\tgame.player1.give(\"NEW1_016\").play()\n\tassert len(game.player1.deck) == 1\n\tassert len(game.player1.hand) == 1\n\tassert game.player1.hand[0].race == Race.PIRATE\n\tgame.player1.discard_hand()\n\tassert len(game.player1.deck) == 1\n\tassert len(game.player1.hand) == 0\n\tgame.player1.give(\"NEW1_016\").play()\n\tassert len(game.player1.deck) == 1\n\tassert len(game.player1.hand) == 0\n\n\ndef test_cenarius():\n\tgame = prepare_game()\n\t# play some wisps for buff test\n\twisp1 = game.current_player.give(WISP).play()\n\twisp2 = game.current_player.give(WISP).play()\n\tcenarius = game.current_player.give(\"EX1_573\")\n\tcenarius.play(choose=\"EX1_573a\")\n\tassert wisp1.health == wisp1.atk == 3\n\tassert wisp2.health == wisp2.atk == 3\n\tassert cenarius.atk == 5 and cenarius.health == 8\n\tgame.end_turn()\n\n\ndef test_cleave():\n\tgame = prepare_game()\n\t# play some wisps\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tgame.end_turn()\n\n\tcleave = game.current_player.give(\"CS2_114\")\n\tassert cleave.is_playable()\n\tcleave.play()\n\tassert len(game.current_player.opponent.field) == 0\n\tbearer = game.current_player.give(\"EX1_405\").play()\n\tgame.end_turn()\n\n\tcleave2 = game.current_player.give(\"CS2_114\")\n\t# Patch 14.6 multi-target cards to function even if\n\t# there is only one viable target on the board\n\tassert cleave2.is_playable()\n\tcleave2.play()\n\tassert bearer.health == 2\n\tgame.end_turn()\n\n\tcleave3 = game.current_player.give(\"CS2_114\")\n\tassert not cleave3.is_playable()\n\n\ndef test_cold_blood():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tassert wisp.atk == 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tcb1 = game.player1.give(\"CS2_073\")\n\tcb1.play(target=wisp)\n\tassert wisp.atk == 1 + 2\n\tcb2 = game.player1.give(\"CS2_073\")\n\tcb2.play(target=wisp)\n\tassert wisp.atk == 1 + 2 + 4\n\n\ndef test_coordinated_strike():\n\tgame = prepare_game()\n\tgame.current_player.give(\"BT_036\").play()\n\tassert len(game.current_player.field) == 3\n\tfor i in range(3):\n\t\tassert game.current_player.field[i].id == \"BT_036t\"\n\t\t# TODO test if it has tag 'rush'\n\n\ndef test_corruption():\n\tgame = prepare_game()\n\tgame.end_turn()\n\n\twisp = game.player2.give(WISP)\n\twisp.play()\n\tgame.end_turn()\n\n\tcorruption1 = game.player1.give(\"CS2_063\")\n\tcorruption1.play(target=wisp)\n\tassert wisp.buffs\n\tassert wisp.buffs[0].controller == game.player1\n\tgame.end_turn()\n\n\tassert not wisp.dead\n\tgame.end_turn()\n\n\tassert wisp.dead\n\tgame.end_turn()\n\n\t# corrupt our own wisp. next turn opponent MCs it.\n\twisp2 = game.player2.give(WISP)\n\twisp2.play()\n\tlucifron = game.player2.give(\"BRMC_85\")\n\tlucifron.play()\n\tassert not wisp2.dead\n\tgame.end_turn()\n\n\tassert not wisp2.dead\n\tcabal = game.player1.give(\"EX1_091\")\n\tcabal.play(target=wisp2)\n\tassert not wisp2.dead\n\tgame.end_turn()\n\n\tassert wisp2.dead\n\n\ndef test_crazed_alchemist():\n\tgame = prepare_game()\n\twarden = game.player1.give(\"EX1_396\")\n\twarden.play()\n\talchemist = game.player1.give(\"EX1_059\")\n\tassert warden.atk == 1\n\tassert not warden.damage\n\tassert warden.max_health == 7\n\tassert warden.health == 7\n\talchemist.play(target=warden)\n\tassert warden.atk == 7\n\tassert warden.health == 1\n\n\ndef test_crazed_alchemist_damage_silence():\n\t# Test for bug #9\n\tgame = prepare_game()\n\tsnapjaw = game.player1.give(\"CS2_119\")\n\tsnapjaw.play()\n\tassert snapjaw.atk == 2\n\tassert snapjaw.health == 7\n\tgame.player1.give(\"EX1_059\").play(target=snapjaw)\n\tassert snapjaw.atk == 7\n\tassert snapjaw.health == 2\n\tgame.player1.give(MOONFIRE).play(target=snapjaw)\n\tassert snapjaw.atk == 7\n\tassert snapjaw.health == 1\n\tgame.player1.give(SILENCE).play(target=snapjaw)\n\tassert snapjaw.atk == 2\n\tassert snapjaw.health == 6\n\n\ndef test_commanding_shout():\n\tgame = prepare_game()\n\tshout = game.player1.give(\"NEW1_036\")\n\twisp1 = game.player1.give(WISP)\n\twisp1.play()\n\tbender = game.player1.give(SPELLBENDERT)\n\tbender.play()\n\tgiant = game.player2.summon(\"EX1_620\")\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert wisp1.health == 1\n\tassert bender.health == 3\n\tassert not wisp1.min_health\n\tassert not bender.min_health\n\tshout.play()\n\tassert wisp1.min_health == 1\n\tassert bender.min_health == 1\n\twisp1.attack(target=giant)\n\tassert giant.health == 7\n\tassert wisp1.health == 1\n\tassert not wisp1.damage\n\tassert wisp1.zone == Zone.PLAY\n\tgame.player1.give(MOONFIRE).play(target=bender)\n\tassert bender.health == 2\n\tassert bender.damage == 1\n\tbender.attack(target=giant)\n\tassert not bender.dead\n\tassert bender.health == 1\n\tassert bender.damage == 2\n\tassert bender.zone == Zone.PLAY\n\n\t# TODO test that minions played afterwards still get commanding shout buff\n\n\ndef test_conceal():\n\tgame = prepare_game()\n\tconceal = game.player1.give(\"EX1_128\")\n\twisp1 = game.player1.give(WISP)\n\twisp1.play()\n\twisp2 = game.player1.give(WISP)\n\twisp2.play()\n\tconceal.play()\n\tassert wisp1.stealthed\n\tassert wisp2.stealthed\n\tgame.end_turn()\n\tassert wisp1.stealthed\n\tassert wisp2.stealthed\n\tgame.end_turn()\n\tassert not wisp1.stealthed\n\tassert not wisp2.stealthed\n\n\ndef test_conceal_alarmobot():\n\t# Test for bug #186\n\tgame = prepare_empty_game()\n\talarmobot = game.player1.give(\"EX1_006\")\n\talarmobot.play()\n\tconceal = game.player1.give(\"EX1_128\")\n\tconceal.play()\n\tassert alarmobot.stealthed\n\twisp = game.player1.give(WISP)\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert alarmobot in game.player1.hand\n\tassert wisp in game.player1.field\n\tassert not alarmobot.stealthed\n\n\ndef test_cruel_taskmaster():\n\tgame = prepare_game()\n\ttaskmaster1 = game.current_player.give(\"EX1_603\")\n\ttaskmaster2 = game.current_player.give(\"EX1_603\")\n\tgame.end_turn()\n\tgame.end_turn()\n\n\twisp = game.current_player.give(WISP)\n\twisp.play()\n\ttaskmaster1.play(target=wisp)\n\tassert wisp.dead\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert taskmaster1.health == 2\n\tassert taskmaster1.atk == 2\n\ttaskmaster2.play(target=taskmaster1)\n\tassert taskmaster1.health == 1\n\tassert taskmaster1.atk == 4\n\n\ndef test_cult_master():\n\tgame = prepare_game()\n\twisp1 = game.player1.give(WISP)\n\twisp1.play()\n\twisp2 = game.player1.give(WISP)\n\twisp2.play()\n\tcultmaster = game.player1.give(\"EX1_595\")\n\tcultmaster.play()\n\tassert len(game.player1.hand) == 4\n\tgame.player1.give(MOONFIRE).play(target=wisp1)\n\tassert len(game.player1.hand) == 4 + 1\n\n\t# Make sure cult master doesn't draw off itself\n\tgame.player1.give(MOONFIRE).play(target=cultmaster)\n\tgame.player1.give(MOONFIRE).play(target=cultmaster)\n\tassert len(game.player1.hand) == 4 + 1\n\n\tgame.player1.give(MOONFIRE).play(target=wisp2)\n\tassert len(game.player1.hand) == 4 + 1\n\n\ndef test_cult_master_board_clear():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\tfor i in range(4):\n\t\tgame.player1.give(WISP).play()\n\tcultmaster = game.player1.give(\"EX1_595\")\n\tcultmaster.play()\n\tgame.player1.give(MOONFIRE).play(target=cultmaster)\n\tassert len(game.player1.field) == 5\n\t# Whirlwind the board\n\tgame.player1.give(\"EX1_400\").play()\n\tassert len(game.player1.hand) == 0\n\n\ndef test_deadly_poison():\n\tgame = prepare_game()\n\tpoison = game.player1.give(\"CS2_074\")\n\tassert not poison.is_playable()\n\tgame.player1.give(LIGHTS_JUSTICE).play()\n\tassert game.player1.weapon.atk == 1\n\tassert game.player1.hero.atk == 1\n\tassert poison.is_playable()\n\tpoison.play()\n\tassert game.player1.weapon.atk == 3\n\tassert game.player1.hero.atk == 3\n\n\ndef test_deathwing():\n\tgame = prepare_game()\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tdeathwing = game.player1.give(\"NEW1_030\")\n\tdeathwing.play()\n\tassert not game.player1.hand\n\tassert len(game.board) == 1\n\tassert not deathwing.dead\n\n\ndef test_defender_of_argus():\n\tgame = prepare_game()\n\tdefender1 = game.player1.give(\"EX1_093\")\n\tassert defender1.atk == 2\n\tassert defender1.health == 3\n\tassert not defender1.taunt\n\tdefender1.play()\n\tassert defender1.atk == 2\n\tassert defender1.health == 3\n\tassert not defender1.taunt\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tdefender2 = game.player1.give(\"EX1_093\")\n\tdefender2.play()\n\tassert game.player1.field == [defender1, defender2]\n\tassert defender1.atk == 2 + 1\n\tassert defender1.health == 3 + 1\n\tassert defender1.taunt\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tdefender3 = game.player1.give(\"EX1_093\")\n\tdefender3.play(index=1)\n\tassert game.player1.field == [defender1, defender3, defender2]\n\tassert defender1.atk == 2 + 1 + 1\n\tassert defender1.health == 3 + 1 + 1\n\tassert defender1.taunt\n\tassert defender2.atk == 2 + 1\n\tassert defender2.health == 3 + 1\n\tassert defender2.taunt\n\n\ndef test_defias():\n\tgame = prepare_game()\n\tdefias1 = game.current_player.give(\"EX1_131\")\n\tdefias1.play()\n\tassert len(game.current_player.field) == 1\n\tgame.end_turn()\n\n\t# Coin-defias\n\tgame.current_player.hand.filter(id=THE_COIN)[0].play()\n\tdefias2 = game.current_player.give(\"EX1_131\")\n\tdefias2.play()\n\tassert len(game.current_player.field) == 2\n\n\ndef test_demolisher():\n\tgame = prepare_game()\n\tdemolisher = game.player1.give(\"EX1_102\")\n\tdemolisher.play()\n\tgame.end_turn()\n\n\tassert game.player2.hero.health == 30\n\tgame.end_turn()\n\n\tassert game.player1.hero.health == 30\n\tassert game.player2.hero.health == 28\n\n\ndef test_demonfire():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tgame.player1.give(\"EX1_596\").play(target=wisp)\n\tassert wisp.dead\n\timp = game.player1.give(\"CS2_059\")\n\timp.play()\n\tgame.player1.give(\"EX1_596\").play(target=imp)\n\tassert imp.atk == 0 + 2\n\tassert imp.health == 1 + 2\n\tassert imp.buffs\n\tgame.end_turn()\n\n\timp2 = game.player2.give(\"CS2_059\")\n\timp2.play()\n\tgame.end_turn()\n\n\tgame.player1.give(\"EX1_596\").play(target=imp2)\n\tassert imp2.dead\n\n\ndef test_dire_wolf_alpha():\n\tgame = prepare_game()\n\tdirewolf1 = game.player2.summon(\"EX1_162\")\n\tassert direwolf1.atk == 2\n\tdirewolf2 = game.player2.summon(\"EX1_162\")\n\tassert direwolf1.atk == 3\n\tassert direwolf2.atk == 3\n\tfrostwolf = game.current_player.summon(\"CS2_121\")\n\tgame.end_turn()\n\tgame.end_turn()\n\tfrostwolf.attack(direwolf2)\n\n\ndef test_divine_favor():\n\tgame = prepare_empty_game()\n\tgame.player1.discard_hand()\n\tfor i in range(5):\n\t\tgame.player1.give(WISP)\n\tassert len(game.player1.hand) == 5\n\tgame.end_turn()\n\n\tgame.player2.discard_hand()\n\tgame.player2.give(WISP)\n\tassert len(game.player2.hand) == 1\n\tfor i in range(7):\n\t\tgame.player2.give(WISP).shuffle_into_deck()\n\tfavor = game.player2.give(\"EX1_349\")\n\tfavor.play()\n\tassert len(game.player2.hand) == len(game.player1.hand)\n\tassert len(game.player2.deck) == 3\n\tgame.player2.discard_hand()\n\tgame.player2.give(\"EX1_349\").play()\n\t# TODO BUG report “can't fatigue and does not take damage”\n\t# TODO drawuntil stops after first fatigue\n\t# assert game.player2.hero.health == 29\n\n\ndef test_divine_spirit():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\tassert wisp.health == 1\n\twisp.play()\n\tgame.end_turn()\n\n\tgame.player2.give(\"CS2_236\").play(target=wisp)\n\tassert wisp.health == 1 * 2\n\tgame.end_turn()\n\n\tgame.player1.give(\"CS2_236\").play(target=wisp)\n\tassert wisp.health == 1 * 2 * 2\n\tgame.end_turn()\n\n\tequality = game.player2.give(\"EX1_619\")\n\tequality.play()\n\tassert wisp.health == 1\n\tgame.player2.give(\"CS2_236\").play(target=wisp)\n\tassert wisp.health == 1 * 2\n\tgame.end_turn()\n\n\ndef test_doomhammer():\n\tgame = prepare_game()\n\tdoomhammer = game.player1.give(\"EX1_567\")\n\tassert doomhammer.windfury\n\tassert not game.player1.hero.atk\n\tassert not game.player1.hero.windfury\n\tdoomhammer.play()\n\tassert doomhammer.windfury\n\tassert game.player1.hero.atk == 2\n\tassert game.player1.hero.windfury\n\tassert game.player1.weapon.durability == 8\n\tgame.player1.hero.attack(target=game.player2.hero)\n\tassert game.player1.hero.can_attack()\n\tgame.player1.hero.attack(target=game.player2.hero)\n\tassert not game.player1.hero.can_attack()\n\tassert game.player1.weapon.durability == 6\n\n\ndef test_doomsayer():\n\tgame = prepare_game()\n\t# play some wisps\n\tgame.current_player.give(WISP).play()\n\tgame.current_player.give(WISP).play()\n\n\tgame.end_turn()\n\tgame.current_player.give(WISP).play()\n\tgame.current_player.give(WISP).play()\n\n\tassert len(game.board) == 4\n\tdoomsayer = game.current_player.give(\"NEW1_021\")\n\tdoomsayer.play()\n\tassert len(game.board) == 5\n\tgame.end_turn()\n\n\tassert len(game.board) == 5\n\tgame.end_turn()\n\n\tassert len(game.board) == 0\n\n\ndef test_dread_infernal():\n\tgame = prepare_game()\n\tinfernal = game.player1.give(\"CS2_064\")\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tgame.end_turn()\n\n\tgame.player2.give(WISP).play()\n\tgame.player2.give(WISP).play()\n\tgame.player2.give(WISP).play()\n\tgame.end_turn()\n\n\tassert len(game.board) == 6\n\tinfernal.play()\n\tassert len(game.board) == 1\n\tassert game.player1.hero.health == game.player2.hero.health == 29\n\tassert infernal.health == 6\n\n\ndef test_dread_corsair():\n\tgame = prepare_game()\n\tcorsair = game.player1.give(\"NEW1_022\")\n\tassert corsair.cost == 4\n\tweapon = game.player1.give(LIGHTS_JUSTICE)\n\tweapon.play()\n\tassert corsair.cost == 4 - 1\n\taxe = game.player1.give(\"CS2_106\")\n\taxe.play()\n\tassert corsair.cost == 4 - 3\n\taxe.destroy()\n\tassert corsair.cost == 4\n\n\ndef test_druid_of_the_claw():\n\tgame = prepare_game()\n\tclaw1 = game.current_player.give(\"EX1_165\")\n\twith pytest.raises(InvalidAction):\n\t\tclaw1.play()\n\tclaw1.play(choose=\"EX1_165a\")\n\tassert len(game.current_player.field) == 1\n\tclaw_in_field1 = game.current_player.field[0]\n\tassert claw_in_field1.id == \"EX1_165t1\"\n\tassert claw_in_field1.atk == claw_in_field1.health == 4\n\tassert claw_in_field1.charge\n\tassert not claw_in_field1.taunt\n\n\tclaw2 = game.current_player.give(\"EX1_165\")\n\twith pytest.raises(InvalidAction):\n\t\tclaw2.play()\n\tclaw2.play(choose=\"EX1_165b\")\n\tassert len(game.current_player.field) == 2\n\tclaw_in_field2 = game.current_player.field[1]\n\tassert claw_in_field2.id == \"EX1_165t2\"\n\tassert claw_in_field2.atk == 4\n\tassert claw_in_field2.health == 6\n\tassert claw_in_field2.taunt\n\tassert not claw_in_field2.charge\n\tgame.end_turn()\n\n\tgame.current_player.summon(FANDRAL_STAGHELM)\n\tclaw3 = game.current_player.give(\"EX1_165\")\n\twith pytest.raises(InvalidAction):\n\t\tclaw3.play(choose=\"EX1_165a\")\n\tclaw3.play()\n\tassert len(game.current_player.field) == 2\n\tclaw_in_field3 = game.current_player.field[1]\n\tassert claw_in_field3.id == \"OG_044a\"\n\tassert claw_in_field3.atk == 4\n\tassert claw_in_field3.health == 6\n\tassert claw_in_field3.taunt and claw_in_field3.charge\n\n\ndef test_earth_shock():\n\tgame = prepare_game()\n\tcrusader = game.player1.give(\"EX1_020\")\n\tcrusader.play()\n\tassert crusader.divine_shield\n\tgame.end_turn()\n\n\tearthshock = game.player2.give(\"EX1_245\")\n\tearthshock.play(target=crusader)\n\tassert crusader.dead\n\n\ndef test_elite_tauren_chieftain():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\tgame.player2.discard_hand()\n\tassert len(game.player1.hand) == 0\n\tassert len(game.player2.hand) == 0\n\ttauren = game.player1.give(\"PRO_001\")\n\ttauren.play()\n\tassert len(game.player1.hand) == 1\n\tassert len(game.player2.hand) == 1\n\tchords = (\"PRO_001a\", \"PRO_001b\", \"PRO_001c\")\n\tassert game.player1.hand[0] in chords\n\tassert game.player2.hand[0] in chords\n\n\ndef test_equality():\n\tgame = prepare_game()\n\tequality = game.current_player.give(\"EX1_619\")\n\t# summon a bunch of big dudes\n\tgame.current_player.summon(\"CS2_186\")\n\tgame.current_player.summon(\"CS2_186\")\n\tgame.current_player.opponent.summon(\"CS2_186\")\n\tgame.current_player.opponent.summon(\"CS2_186\")\n\t# And a violet teacher too, why not\n\tgame.current_player.summon(\"NEW1_026\")\n\n\tpyro = game.current_player.give(\"NEW1_020\")\n\tpyro.play()\n\tassert len(game.board) == 6\n\tequality.play()\n\tassert not game.board\n\n\ndef test_ethereal_arcanist():\n\tgame = prepare_game()\n\tarcanist = game.player1.give(\"EX1_274\")\n\tarcanist.play()\n\tassert arcanist.atk == arcanist.health == 3\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert arcanist.atk == arcanist.health == 3\n\ticebarrier = game.player1.give(\"EX1_289\")\n\ticebarrier.play()\n\tassert arcanist.atk == arcanist.health == 3\n\tgame.end_turn()\n\n\tassert arcanist.atk == arcanist.health == 3 + 2\n\tgame.end_turn()\n\n\tassert arcanist.atk == arcanist.health == 3 + 2\n\ticebarrier.destroy()\n\tgame.end_turn()\n\n\tassert arcanist.atk == arcanist.health == 3 + 2\n\n\ndef test_faceless_manipulator():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tmotw = game.player1.give(\"CS2_009\")\n\tmotw.play(target=wisp)\n\tassert wisp.atk == 1 + 2\n\tassert wisp.health == 1 + 2\n\tassert wisp.taunt\n\tgame.player1.give(MOONFIRE).play(target=wisp)\n\tassert wisp.health == 1 + 2 - 1\n\tgame.end_turn()\n\n\tfaceless = game.player2.give(\"EX1_564\")\n\tfaceless.play(target=wisp)\n\tmorphed = game.player2.field[0]\n\tassert morphed.id == WISP\n\tassert morphed.buffs\n\tassert wisp.atk == morphed.atk\n\tassert wisp.health == morphed.health\n\tassert wisp.max_health == morphed.max_health\n\tassert morphed.buffs\n\n\ndef test_faceless_manipulator_velens_chosen():\n\tgame = prepare_game()\n\tkobold = game.player1.give(KOBOLD_GEOMANCER)\n\tkobold.play()\n\tgame.player1.give(\"GVG_010\").play(target=kobold)\n\tassert game.player1.spellpower == 2\n\tfaceless = game.player1.give(\"EX1_564\")\n\tfaceless.play(target=kobold)\n\tassert faceless.morphed.spellpower == kobold.spellpower == 2\n\tassert game.player1.spellpower == 2 + 2\n\n\ndef test_faerie_dragon():\n\tgame = prepare_game(CardClass.MAGE, CardClass.MAGE)\n\tdragon = game.player1.give(\"NEW1_023\")\n\tdragon.play()\n\tmoonfire = game.player1.give(MOONFIRE)\n\tassert dragon not in moonfire.targets\n\tassert dragon not in game.player1.hero.power.targets\n\tgame.end_turn()\n\n\tassert dragon not in game.current_player.hero.power.targets\n\tarcher = game.current_player.give(\"CS2_189\")\n\tassert dragon in archer.targets\n\n\ndef test_far_sight():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\tfarsight = game.player1.give(\"CS2_053\")\n\tfarsight.play()\n\tassert len(game.player1.hand) == 1\n\tcard1 = game.player1.hand[0]\n\n\tassert card1.buffs\n\tassert card1.cost >= 0\n\tcard2 = game.player1.give(card1.id)\n\tassert card1.cost == max(card2.cost - 3, 0)\n\n\ndef test_far_sight_fatigue():\n\tgame = prepare_empty_game()\n\tfarsight = game.player1.give(\"CS2_053\")\n\tfarsight.play() # Should not crash\n\tassert not game.player1.hand\n\n\ndef test_felguard():\n\tgame = prepare_game(game_class=Game)\n\tfor i in range(3):\n\t\tgame.end_turn()\n\t\tgame.end_turn()\n\tassert game.player1.max_mana == 4\n\tfelguard = game.player1.give(\"EX1_301\")\n\tfelguard.play()\n\tassert game.player1.max_mana == 3\n\tassert game.player1.mana == 1\n\n\ndef test_felguard_negative_mana():\n\tgame = prepare_game(game_class=Game)\n\tgame.player1.give(INNERVATE).play()\n\tgame.player1.give(INNERVATE).play()\n\tassert game.player1.max_mana == 1\n\tassert game.player1.mana == 3\n\tgame.player1.give(\"EX1_301\").play()\n\tassert game.player1.max_mana == 0\n\tassert game.player1.mana == 0\n\tgame.current_player.give(THE_COIN).play()\n\tgame.current_player.give(THE_COIN).play()\n\tgame.current_player.give(THE_COIN).play()\n\tgame.player1.give(\"EX1_301\").play()\n\tassert game.player1.max_mana == 0\n\tassert game.player1.mana == 0\n\n\ndef test_frostwolf_warlord():\n\tgame = prepare_game()\n\twarlord1 = game.player1.give(\"CS2_226\")\n\twarlord1.play()\n\tassert not warlord1.buffs\n\tassert warlord1.health == warlord1.atk == 4\n\tgame.player2.summon(WISP)\n\twarlord2 = game.player1.give(\"CS2_226\")\n\twarlord2.play()\n\tassert warlord2.buffs\n\tassert warlord2.health == warlord2.atk == 4 + 1\n\n\ndef test_frothing_berserker():\n\tgame = prepare_game()\n\tfrothing = game.player1.give(\"EX1_604\")\n\tassert not frothing.buffs\n\twisp1 = game.player1.summon(WISP)\n\tgame.player1.give(MOONFIRE).play(target=wisp1)\n\tassert not frothing.buffs\n\tfrothing.play()\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\tassert not frothing.buffs\n\tassert frothing.atk == 2\n\twisp2 = game.player1.summon(WISP)\n\tgame.player1.give(MOONFIRE).play(target=wisp2)\n\tassert frothing.buffs\n\tassert frothing.atk == 2 + 1\n\n\ndef test_flame_leviathan():\n\tgame = prepare_empty_game()\n\tassert len(game.player1.deck) == 0\n\tleviathan = game.player1.give(\"GVG_007\")\n\tleviathan.shuffle_into_deck()\n\tassert len(game.player1.deck) == 1\n\tgame.end_turn()\n\n\twisp = game.player2.give(WISP)\n\twisp.play()\n\n\t# draw the flame leviathan\n\tassert game.player1.hero.health == 30\n\tassert game.player2.hero.health == 30\n\tassert not wisp.dead\n\tgame.end_turn()\n\tassert game.player1.hero.health == 28\n\tassert game.player2.hero.health == 28\n\tassert wisp.dead\n\n\ndef test_force_of_nature():\n\tgame = prepare_game()\n\tgame.player1.give(\"EX1_571\").play()\n\tassert game.player1.field == [\"EX1_tk9\"] * 3\n\n\ndef test_gadgetzan_auctioneer():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\tauctioneer = game.player1.give(\"EX1_095\")\n\tauctioneer.play()\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\tassert len(game.player1.hand) == 1\n\tgame.player1.give(WISP).play()\n\tassert len(game.player1.hand) == 1\n\n\ndef test_gladiators_longbow():\n\tgame = prepare_game()\n\tstatue = game.player1.give(ANIMATED_STATUE)\n\tstatue.play()\n\tgame.end_turn()\n\n\tbow = game.player2.give(\"DS1_188\")\n\tbow.play()\n\tassert game.player2.hero.immune_while_attacking\n\tassert not game.player1.hero.immune_while_attacking\n\tassert not game.player2.hero.immune\n\tgame.player2.give(MOONFIRE).play(game.player2.hero)\n\tassert game.player2.hero.health == 30 - 1\n\tgame.player2.hero.attack(statue)\n\tassert game.player2.hero.health == 30 - 1\n\tassert statue.damage == 5\n\tgame.end_turn()\n\n\tstatue.attack(game.player2.hero)\n\tassert game.player2.hero.health == 30 - 1 - 10\n\n\ndef test_glaivebound_adept():\n\tgame = prepare_game(CardClass.DEMONHUNTER, CardClass.DEMONHUNTER)\n\tglaivebound_adept1 = game.current_player.give(\"BT_495\")\n\tglaivebound_adept1.play()\n\t# BUG here the battlecry should not be triggered\n\t# glaivebound_adept.play(target=game.current_player.opponent.hero)\n\tassert game.current_player.opponent.hero.health == 30\n\tgame.end_turn()\n\tgame.current_player.hero.power.use()\n\tgame.current_player.hero.attack(game.current_player.opponent.hero)\n\tglaivebound_adept2 = game.current_player.give(\"BT_495\")\n\tglaivebound_adept2.play(target=glaivebound_adept1)\n\tassert glaivebound_adept1.dead\n\n\ndef test_gorehowl():\n\tgame = prepare_game()\n\tgorehowl = game.player1.give(\"EX1_411\")\n\tgorehowl.play()\n\tgame.end_turn()\n\n\twisp1 = game.player2.give(WISP)\n\twisp1.play()\n\twisp2 = game.player2.give(WISP)\n\twisp2.play()\n\tgame.end_turn()\n\n\tassert gorehowl.atk == game.player1.hero.atk == 7\n\tgame.player1.hero.attack(wisp1)\n\tassert wisp1.dead\n\tassert gorehowl.atk == game.player1.hero.atk == 7 - 1\n\tassert gorehowl.durability == 1\n\tassert game.player1.hero.health == 30 - 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tgame.player1.hero.attack(wisp2)\n\tassert wisp2.dead\n\tassert gorehowl.atk == game.player1.hero.atk == 7 - 1 - 1\n\tassert gorehowl.durability == 1\n\tassert game.player1.hero.health == 30 - 1 - 1\n\n\tgame.end_turn()\n\tgame.end_turn()\n\tgame.player1.hero.attack(game.player2.hero)\n\tassert game.player2.hero.health == 30 - (7 - 1 - 1)\n\tassert not game.player1.weapon\n\tassert not game.player1.hero.atk\n\n\ndef test_grimscale_oracle():\n\tgame = prepare_game()\n\tgrimscale = game.player1.give(\"EX1_508\")\n\tmurloc1 = game.player1.summon(MURLOC)\n\tmurloc2 = game.player2.summon(MURLOC)\n\tassert murloc1.atk == 1\n\tassert murloc2.atk == 1\n\tgrimscale.play()\n\tassert murloc1.atk == 1 + 1\n\tassert murloc2.atk == 1\n\tassert grimscale.atk == 1\n\n\tgame.player1.give(TIME_REWINDER).play(target=grimscale)\n\tassert murloc1.atk == 1\n\tassert murloc2.atk == 1\n\n\ndef test_gruul():\n\tgame = prepare_game()\n\tgruul = game.current_player.give(\"NEW1_038\")\n\tgruul.play()\n\tassert gruul.atk == 7\n\tassert gruul.health == 7\n\tassert not gruul.buffs\n\tgame.end_turn()\n\n\tassert gruul.buffs\n\tassert gruul.atk == 8\n\tassert gruul.health == 8\n\tgame.end_turn()\n\n\tassert gruul.atk == 9\n\tassert gruul.health == 9\n\n\ndef test_harrison_jones():\n\tgame = prepare_game()\n\tgame.end_turn()\n\n\tlightsjustice = game.player2.give(LIGHTS_JUSTICE)\n\tlightsjustice.play()\n\tgame.end_turn()\n\n\tgame.player1.discard_hand()\n\tassert not game.player1.hand\n\tassert lightsjustice.durability == 4\n\tjones = game.player1.give(\"EX1_558\")\n\tjones.play()\n\tassert len(game.player1.hand) == 4\n\tassert lightsjustice.dead\n\tgame.end_turn()\n\n\tgame.player2.discard_hand()\n\tjones2 = game.player2.give(\"EX1_558\")\n\tjones2.play()\n\tassert not game.player2.hand\n\n\ndef test_headcrack():\n\tgame = prepare_game(exclude=(\"EX1_137\", ))\n\theadcrack1 = game.player1.give(\"EX1_137\")\n\tassert game.player1.hand.contains(\"EX1_137\")\n\theadcrack1.play()\n\tassert not game.player1.hand.contains(\"EX1_137\")\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert not game.player1.hand.contains(\"EX1_137\")\n\theadcrack2 = game.player1.give(\"EX1_137\")\n\tgame.player1.give(THE_COIN).play()\n\theadcrack2.play()\n\tassert not game.player1.hand.contains(\"EX1_137\")\n\tgame.end_turn()\n\tassert game.player1.hand.contains(\"EX1_137\")\n\tgame.player1.discard_hand()\n\tgame.end_turn()\n\tgame.end_turn()\n\tassert not game.player1.hand.contains(\"EX1_137\")\n\n\ndef test_heroic_strike():\n\tgame = prepare_game()\n\tstrike = game.current_player.give(\"CS2_105\")\n\tassert game.current_player.hero.atk == 0\n\tstrike.play()\n\tassert game.current_player.hero.atk == 4\n\tgame.end_turn()\n\tassert game.current_player.hero.atk == 0\n\tgame.end_turn()\n\tassert game.current_player.hero.atk == 0\n\n\tgame.current_player.give(\"CS2_105\").play()\n\tgame.current_player.give(\"CS2_106\").play()\n\tassert game.current_player.hero.atk == 7\n\n\ndef test_hogger():\n\tgame = prepare_game()\n\thogger = game.current_player.give(\"NEW1_040\")\n\thogger.play()\n\tassert len(game.current_player.field) == 1\n\tgame.end_turn()\n\tassert len(game.current_player.opponent.field) == 2\n\tassert game.current_player.opponent.field[1].id == \"NEW1_040t\"\n\tgame.end_turn()\n\tassert len(game.current_player.field) == 2\n\tgame.end_turn()\n\tassert len(game.current_player.opponent.field) == 3\n\n\ndef test_houndmaster():\n\tgame = prepare_game()\n\thoundmaster = game.current_player.give(\"DS1_070\")\n\tassert not houndmaster.targets\n\tassert not houndmaster.powered_up\n\thound = game.current_player.give(\"EX1_538t\")\n\thound.play()\n\tassert houndmaster.targets == [hound]\n\tassert houndmaster.powered_up\n\tassert hound.atk == 1\n\tassert hound.health == 1\n\tassert not hound.taunt\n\thoundmaster.play(target=hound)\n\tassert hound.atk == 3\n\tassert hound.health == 3\n\tassert hound.taunt\n\n\ndef test_holy_wrath():\n\tgame = prepare_empty_game()\n\tgoldshire = game.player1.give(GOLDSHIRE_FOOTMAN)\n\tgoldshire.shuffle_into_deck()\n\tassert goldshire in game.player1.deck\n\tassert goldshire.cost == 1\n\tassert game.player2.hero.health == 30\n\tgame.player1.give(\"EX1_365\").play(target=game.player2.hero)\n\tassert goldshire not in game.player1.deck\n\tassert game.player2.hero.health == 30 - 1\n\tgame.player1.give(\"EX1_365\").play(target=game.player2.hero)\n\tassert game.player2.hero.health == 30 - 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\ndef test_holy_wrath_full_hand():\n\tgame = prepare_empty_game()\n\tgame.player1.give(GOLDSHIRE_FOOTMAN).shuffle_into_deck()\n\tgame.player1.give(GOLDSHIRE_FOOTMAN).shuffle_into_deck()\n\tholywrath = game.player1.give(\"EX1_365\")\n\tfor i in range(9):\n\t\tgame.player1.give(WISP)\n\tgame.player1.temp_mana += 1\n\tholywrath.play(target=game.player2.hero)\n\tassert game.player2.hero.health == 30 - 1\n\tassert len(game.player1.hand) == 10\n\n\ndef test_humility():\n\tgame = prepare_game()\n\thumility = game.current_player.give(\"EX1_360\")\n\thumility2 = game.current_player.give(\"EX1_360\")\n\tseargent = game.current_player.give(\"CS2_188\")\n\tseargent2 = game.current_player.give(\"CS2_188\")\n\tgolem = game.current_player.summon(\"CS2_186\")\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert golem.atk == 7\n\thumility.play(target=golem)\n\tassert golem.atk == 1\n\tseargent.play(target=golem)\n\tassert golem.atk == 3\n\tgame.end_turn()\n\tassert golem.atk == 1\n\tgame.end_turn()\n\n\tseargent2.play(target=golem)\n\tassert golem.atk == 3\n\thumility2.play(target=golem)\n\tassert golem.atk == 1\n\tgame.end_turn()\n\tassert golem.atk == 1\n\n\ndef test_hunters_mark():\n\tgame = prepare_game()\n\tstatue = game.player1.give(ANIMATED_STATUE)\n\tstatue.play()\n\tgame.player1.give(MOONFIRE).play(target=statue)\n\tassert statue.health == 10 - 1\n\tmark = game.player1.give(\"CS2_084\")\n\tmark.play(target=statue)\n\tassert statue.health == statue.max_health == 1\n\tassert not statue.dead\n\tgame.player1.give(SILENCE).play(target=statue)\n\tassert statue.health == 10\n\n\ndef test_i_am_murloc():\n\tgame = prepare_game()\n\tiammurloc = game.player1.give(\"PRO_001a\")\n\tiammurloc.play()\n\tassert len(game.player1.field) in (3, 4, 5)\n\tassert game.player1.field[0].id == \"PRO_001at\"\n\n\ndef test_illidan():\n\tgame = prepare_game()\n\tillidan = game.current_player.give(\"EX1_614\")\n\tassert len(game.board) == 0\n\tillidan.play()\n\tassert len(game.board) == 1\n\tgame.current_player.give(MOONFIRE).play(target=illidan)\n\tassert len(game.board) == 2\n\tgame.current_player.give(MOONFIRE).play(target=illidan)\n\tassert len(game.board) == 3\n\tgame.current_player.give(MOONFIRE).play(target=illidan)\n\tassert len(game.board) == 4\n\tgame.current_player.give(MOONFIRE).play(target=illidan)\n\tassert len(game.board) == 5\n\n\t# 5th moonfire kills illidan, but spawns another token before\n\tgame.current_player.give(MOONFIRE).play(target=illidan)\n\tassert len(game.board) == 5\n\tassert illidan.dead\n\n\ndef test_illidan_knife_juggler():\n\tgame = prepare_game()\n\tillidan = game.player1.give(\"EX1_614\")\n\tillidan.play()\n\tjuggler = game.player1.give(\"NEW1_019\")\n\tjuggler.play()\n\tassert len(game.player1.field) == 3\n\tassert game.player2.hero.health == 30 - 1\n\n\ndef test_illidan_full_board():\n\tgame = prepare_game()\n\tillidan = game.player1.give(\"EX1_614\")\n\tillidan.play()\n\tgame.player1.give(THE_COIN).play()\n\tgame.player1.give(THE_COIN).play()\n\tgame.player1.give(THE_COIN).play()\n\tgame.player1.give(THE_COIN).play()\n\tgame.player1.give(THE_COIN).play()\n\tassert len(game.player1.field) == 6\n\tjuggler = game.player1.give(\"NEW1_019\")\n\tjuggler.play()\n\tassert len(game.player1.field) == 7\n\tassert game.player2.hero.health == 30\n\n\ndef test_injured_blademaster():\n\tgame = prepare_game()\n\tfrothing = game.player1.give(\"EX1_604\")\n\tfrothing.play()\n\tassert not frothing.buffs\n\tassert frothing.atk == 2\n\tblademaster = game.player1.give(\"CS2_181\")\n\tblademaster.play()\n\tassert frothing.buffs\n\tassert frothing.atk == 2 + 1\n\tassert blademaster.health == blademaster.max_health - 4\n\n\ndef test_inner_fire():\n\tgame = prepare_game()\n\tgurubashi = game.player1.give(\"EX1_399\")\n\tgurubashi.play()\n\tassert gurubashi.atk == 2\n\n\tseargent = game.player1.give(\"CS2_188\")\n\tseargent.play(target=gurubashi)\n\tassert gurubashi.atk == 4\n\n\tinnerfire = game.player1.give(\"CS1_129\")\n\tinnerfire.play(target=gurubashi)\n\tassert gurubashi.atk == 7\n\tgame.end_turn()\n\n\tassert gurubashi.atk == 7\n\tequality = game.player2.give(\"EX1_619\")\n\tequality.play()\n\tassert gurubashi.health == 1\n\tassert gurubashi.atk == 7\n\n\ndef test_innervate():\n\tgame = prepare_game()\n\tassert game.player1.mana == 10\n\tassert game.player1.temp_mana == 0\n\tassert game.player1.max_mana == 10\n\tassert game.player1.max_resources == 10\n\tgame.player1.give(\"EX1_169\").play()\n\tassert game.player1.mana == 10\n\tassert game.player1.temp_mana == 0\n\tgame.player1.give(GOLDSHIRE_FOOTMAN).play()\n\tassert game.player1.mana == 9\n\tgame.player1.give(\"EX1_169\").play()\n\tassert game.player1.mana == 10\n\tassert game.player1.temp_mana == 1\n\tgame.player1.give(GOLDSHIRE_FOOTMAN).play()\n\tassert game.player1.mana == 9\n\tassert game.player1.temp_mana == 0\n\n\ndef test_ice_lance():\n\tgame = prepare_game()\n\tlance1 = game.player1.give(\"CS2_031\")\n\tassert game.player2.hero.health == 30\n\tassert not game.player2.hero.frozen\n\tlance1.play(target=game.player2.hero)\n\tassert game.player2.hero.health == 30\n\tassert game.player2.hero.frozen\n\tlance2 = game.player1.give(\"CS2_031\")\n\tlance2.play(target=game.player2.hero)\n\tassert game.player2.hero.health == 26\n\tassert game.player2.hero.frozen\n\tgame.end_turn()\n\n\tgame.player2.give(LIGHTS_JUSTICE).play()\n\tassert game.player2.hero.frozen\n\tassert not game.player2.hero.can_attack()\n\tgame.end_turn()\n\n\tassert not game.player2.hero.frozen\n\n\ndef test_imp_master():\n\tgame = prepare_game()\n\timpmaster = game.player1.give(\"EX1_597\")\n\timpmaster.play()\n\tassert impmaster.health == 5\n\tassert len(impmaster.controller.field) == 1\n\tgame.end_turn()\n\n\tassert impmaster.health == 4\n\tassert len(impmaster.controller.field) == 2\n\tassert impmaster.controller.field.contains(\"EX1_598\")\n\n\ndef test_keeper_of_the_grove():\n\tgame = prepare_game()\n\tyeti = game.player2.summon(\"CS2_182\")\n\tadventurer1 = game.player1.summon(\"EX1_044\")\n\tassert yeti.health == 5 and adventurer1.health == 2\n\tkeeper1 = game.player1.give(\"EX1_166\")\n\tkeeper1.play(target=yeti, choose=\"EX1_166a\")\n\tassert yeti.health == 3\n\tassert adventurer1.health == adventurer1.atk == 3\n\tadventurer2 = game.player1.summon(\"EX1_044\")\n\n\tkeeper2 = game.player1.give(\"EX1_166\")\n\tkeeper2.play(target=adventurer1, choose=\"EX1_166b\")\n\n\tassert adventurer1.atk == adventurer1.health == 2\n\tgame.player1.summon(\"OG_044\")\n\tgame.end_turn()\n\tgame.end_turn()\n\tkeeper3 = game.player1.give(\"EX1_166\")\n\tkeeper3.play(target=adventurer2)\n\t# TODO skip the silence test of being 3/3/1\n\tassert adventurer2.health == adventurer2.atk == 2\n\n\ndef test_kill_command():\n\tgame = prepare_game()\n\tkc = game.player1.give(\"EX1_539\")\n\tassert not kc.powered_up\n\tkc.play(target=game.player1.opponent.hero)\n\tassert game.player2.hero.health == 30 - 3\n\n\tgame.player1.give(CHICKEN).play()\n\tkc = game.player1.give(\"EX1_539\")\n\tassert kc.powered_up\n\tkc.play(target=game.player1.hero)\n\tassert game.player1.hero.health == 30 - 5\n\n\ndef test_king_mukla():\n\tgame = prepare_game()\n\tmukla = game.player1.give(\"EX1_014\")\n\tgame.player2.discard_hand()\n\tassert len(game.player2.hand) == 0\n\tmukla.play()\n\tassert len(game.player2.hand) == 2\n\tfor i in range(2):\n\t\tassert game.player2.hand[i].id == \"EX1_014t\"\n\tgame.end_turn()\n\twisp = game.player2.give(WISP)\n\twisp.play()\n\tassert wisp.health == 1\n\tassert wisp.atk == 1\n\tassert game.player2.hand[0].id == \"EX1_014t\"\n\tgame.player2.hand[0].play(target=wisp)\n\tassert wisp.health == 2\n\tassert wisp.atk == 2\n\n\ndef test_kirin_tor_mage():\n\tgame = prepare_game()\n\tcounterspell = game.player1.give(\"EX1_287\")\n\tassert counterspell.cost == 3\n\tvaporize = game.player1.give(\"EX1_594\")\n\tassert vaporize.cost == 3\n\tmissiles = game.player1.give(\"EX1_277\")\n\tassert missiles.cost == 1\n\n\tgame.player1.give(\"EX1_612\").play()\n\tassert counterspell.cost == 0\n\tassert vaporize.cost == 0\n\tassert missiles.cost == 1\n\tcounterspell.play()\n\tassert vaporize.cost == 3\n\tgame.player1.give(\"EX1_612\").play()\n\tassert vaporize.cost == 0\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert vaporize.cost == 3\n\n\ndef test_knife_juggler():\n\tgame = prepare_game()\n\tjuggler = game.player1.give(\"NEW1_019\")\n\tjuggler.play()\n\tassert game.player2.hero.health == 30\n\tgame.player1.give(WISP).play()\n\tassert game.player2.hero.health == 29\n\tgame.player1.give(MOONFIRE).play(target=juggler)\n\t# kill juggler with archer, shouldnt juggle\n\tarcher = game.current_player.give(\"CS2_189\")\n\tarcher.play(target=juggler)\n\tassert juggler.dead\n\tassert game.player2.hero.health == 29\n\n\ndef test_knife_juggler_swipe():\n\t\"\"\"\n\tTest that a Swipe on Knife Juggler that kills a Haunted Creeper\n\tdoes not trigger the Knife Juggler by the time the spiders spawn\n\t\"\"\"\n\tgame = prepare_game()\n\tcreeper = game.player2.summon(\"FP1_002\")\n\tjuggler = game.player2.summon(\"NEW1_019\")\n\tgame.current_player.give(MOONFIRE).play(target=creeper)\n\tswipe = game.player1.give(\"CS2_012\")\n\tswipe.play(target=juggler)\n\tassert juggler.dead\n\tassert creeper.dead\n\tassert len(game.player2.field) == 2\n\tassert game.player1.hero.health == 30\n\n\ndef test_leeroy():\n\tgame = prepare_game()\n\tleeroy = game.player1.give(\"EX1_116\")\n\tleeroy.play()\n\tassert leeroy.can_attack()\n\tassert len(game.player2.field) == 2\n\tassert game.player2.field[0].id == game.player2.field[1].id == \"EX1_116t\"\n\n\ndef test_lightspawn():\n\tgame = prepare_game()\n\tlightspawn = game.player1.give(\"EX1_335\")\n\tlightspawn.play()\n\tassert lightspawn.health == 5\n\tassert lightspawn.atk == 5\n\n\t# moonfire the lightspawn, goes to 4 health\n\tgame.player1.give(MOONFIRE).play(target=lightspawn)\n\tassert lightspawn.health == 4\n\tassert lightspawn.atk == 4\n\tassert not lightspawn.buffs\n\n\tflametongue = game.player1.give(\"EX1_565\")\n\tflametongue.play()\n\tassert lightspawn.health == 4\n\tassert lightspawn.buffs\n\tassert lightspawn.atk == 4\n\n\tgame.player1.give(SILENCE).play(target=lightspawn)\n\tassert lightspawn.buffs\n\t# 2 attack from the flametongue\n\tassert lightspawn.atk == 2\n\n\ndef test_lightwarden():\n\tgame = prepare_game(CardClass.PRIEST, CardClass.PRIEST)\n\tlightwarden = game.player1.give(\"EX1_001\")\n\tlightwarden.play()\n\tassert lightwarden.atk == 1\n\t# No-op heal should not do anything.\n\tgame.player1.hero.power.use(target=game.player1.hero)\n\tassert lightwarden.atk == 1\n\tgame.end_turn()\n\n\tgame.player2.give(MOONFIRE).play(target=game.player2.hero)\n\tgame.player2.hero.power.use(target=game.player2.hero)\n\tassert lightwarden.atk == 3\n\n\ndef test_lightwell():\n\tgame = prepare_game()\n\tlightwell = game.player1.give(\"EX1_341\")\n\tlightwell.play()\n\tgame.player1.give(MOONFIRE).play(target=game.player1.hero)\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\tassert game.player1.hero.health == 29\n\tassert game.player2.hero.health == 29\n\tgame.end_turn()\n\n\tassert game.player1.hero.health == 29\n\tassert game.player2.hero.health == 29\n\tgame.end_turn()\n\n\tassert game.player1.hero.health == 30\n\tassert game.player2.hero.health == 29\n\n\ndef test_lorewalker_cho():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\tgame.player2.discard_hand()\n\tcho = game.player1.give(\"EX1_100\")\n\tcho.play()\n\tassert len(game.player1.hand) == len(game.player2.hand) == 0\n\tcoin1 = game.player1.give(THE_COIN)\n\tcoin1.play()\n\tassert len(game.player1.hand) == 0\n\tassert len(game.player2.hand) == 1\n\tassert game.player2.hand[0].id == THE_COIN\n\tassert game.player2.hand[0] is not coin1\n\tgame.end_turn()\n\n\tcoin2 = game.player2.hand[0]\n\tcoin2.play()\n\tassert len(game.player2.hand) == 1\n\tassert len(game.player1.hand) == 1\n\tassert game.player1.hand[0] is not coin1\n\tassert game.player1.hand[0] is not coin2\n\tassert game.player1.hand[0].id == THE_COIN\n\n\ndef test_mad_bomber():\n\tgame = prepare_game()\n\tstatue1 = game.player1.summon(ANIMATED_STATUE)\n\tstatue2 = game.player1.summon(ANIMATED_STATUE)\n\tbomber = game.player1.give(\"EX1_082\")\n\tbomber.play()\n\tassert bomber.damage == 0\n\tassert (\n\t\tstatue1.damage +\n\t\tstatue2.damage +\n\t\tgame.player1.hero.damage +\n\t\tgame.player2.hero.damage\n\t) == 3\n\n\ndef test_mark_of_nature():\n\tgame = prepare_game()\n\twisp1 = game.current_player.give(WISP)\n\twisp1.play()\n\tassert wisp1.atk == 1\n\tassert wisp1.health == 1\n\tassert not wisp1.taunt\n\n\tmark1 = game.current_player.give(\"EX1_155\")\n\tmark1.play(target=wisp1, choose=\"EX1_155a\")\n\tassert wisp1.atk == 1 + 4\n\tassert wisp1.health == 1\n\tassert not wisp1.taunt\n\n\twisp2 = game.current_player.give(WISP)\n\twisp2.play()\n\tassert wisp2.atk == 1\n\tassert wisp2.health == 1\n\tassert not wisp2.taunt\n\n\tmark2 = game.current_player.give(\"EX1_155\")\n\tmark2.play(target=wisp2, choose=\"EX1_155b\")\n\tassert wisp2.atk == 1\n\tassert wisp2.health == 1 + 4\n\tassert wisp2.taunt\n\n\tgame.current_player.summon(\"OG_044\")\n\twisp3 = game.current_player.opponent.summon(WISP)\n\tmark3 = game.current_player.give(\"EX1_155\")\n\t# player cannot choose with staghelm on field\n\twith pytest.raises(InvalidAction):\n\t\tmark3.play(target=wisp3, choose=\"EX1_155b\")\n\n\tmark3.play(target=wisp3)\n\tassert wisp3.atk == 1 + 4\n\tassert wisp3.health == 1 + 4\n\tassert wisp3.taunt\n\n\ndef test_mana_addict():\n\tgame = prepare_game()\n\taddict = game.player1.give(\"EX1_055\")\n\taddict.play()\n\tassert addict.atk == 1\n\tgame.end_turn()\n\n\tassert addict.atk == 1\n\tgame.player2.give(THE_COIN).play()\n\tassert addict.atk == 1\n\tgame.end_turn()\n\n\tgame.player1.give(THE_COIN).play()\n\tassert addict.atk == 3\n\tgame.player1.give(THE_COIN).play()\n\tassert addict.atk == 5\n\tgame.end_turn()\n\n\tassert addict.atk == 1\n\n\ndef test_mana_wyrm():\n\tgame = prepare_game()\n\twyrm = game.player1.give(\"NEW1_012\")\n\twyrm.play()\n\tassert wyrm.atk == 1\n\tgame.player1.give(THE_COIN).play()\n\tassert wyrm.atk == 2\n\tgame.end_turn()\n\n\tassert wyrm.atk == 2\n\tgame.player2.give(THE_COIN).play()\n\tassert wyrm.atk == 2\n\tgame.end_turn()\n\n\tassert wyrm.atk == 2\n\tgame.player1.give(THE_COIN).play()\n\tassert wyrm.atk == 3\n\n\ndef test_master_of_disguise():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tmod = game.player1.give(\"NEW1_014\")\n\tmod.play(target=wisp)\n\tassert wisp.stealthed\n\tgame.end_turn()\n\n\tassert wisp.stealthed\n\tgame.end_turn()\n\n\tassert not wisp.stealthed\n\n\ndef test_mana_wraith():\n\tgame = prepare_game()\n\twisp1 = game.player1.give(WISP)\n\tgoldshire1 = game.player1.give(GOLDSHIRE_FOOTMAN)\n\twisp2 = game.player2.give(WISP)\n\tgoldshire2 = game.player2.give(GOLDSHIRE_FOOTMAN)\n\tfireball1 = game.player1.give(\"CS2_029\")\n\tfireball2 = game.player2.give(\"CS2_029\")\n\tweapon1 = game.player1.give(LIGHTS_JUSTICE)\n\tweapon2 = game.player2.give(LIGHTS_JUSTICE)\n\tassert wisp1.cost == wisp2.cost == 0\n\tassert goldshire1.cost == goldshire2.cost == 1\n\tassert fireball1.cost == fireball2.cost == 4\n\tassert weapon1.cost == weapon2.cost == 1\n\tassert game.player1.hero.power.cost == game.player2.hero.power.cost == 2\n\n\twraith = game.current_player.give(\"EX1_616\")\n\twraith.play()\n\tassert wisp1.cost == wisp2.cost == 0 + 1\n\tassert goldshire1.cost == goldshire2.cost == 1 + 1\n\tassert fireball1.cost == fireball2.cost == 4\n\tassert weapon1.cost == weapon2.cost == 1\n\tassert game.player1.hero.power.cost == game.player2.hero.power.cost == 2\n\n\twraith.destroy()\n\tassert wisp1.cost == wisp2.cost == 0\n\tassert goldshire1.cost == goldshire2.cost == 1\n\tassert fireball1.cost == fireball2.cost == 4\n\tassert weapon1.cost == weapon2.cost == 1\n\tassert game.player1.hero.power.cost == game.player2.hero.power.cost == 2\n\n\ndef test_millhouse_manastorm():\n\tgame = prepare_game()\n\tmillhouse = game.player1.give(\"NEW1_029\")\n\tfireballp1 = game.player1.give(\"CS2_029\")\n\tfireball1 = game.player2.give(\"CS2_029\")\n\tfireball2 = game.player2.give(\"CS2_029\")\n\tmoonfire = game.player2.give(MOONFIRE)\n\n\tassert fireball1.cost == fireball2.cost == fireballp1.cost == 4\n\tassert moonfire.cost == 0\n\tassert fireballp1.cost == 4\n\tmillhouse.play()\n\t# costs change as soon as millhouse is played\n\tassert game.player2.hero.buffs\n\tassert fireball1.cost == fireball2.cost == moonfire.cost == 0\n\tassert fireballp1.cost == 4\n\tgame.end_turn()\n\n\tassert fireball1.cost == fireball2.cost == moonfire.cost == 0\n\tassert fireballp1.cost == 4\n\tgame.end_turn()\n\n\tassert fireball1.cost == fireball2.cost == fireballp1.cost == 4\n\tassert moonfire.cost == 0\n\n\ndef test_mind_control():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tgame.end_turn()\n\n\tassert wisp.controller is game.player1\n\tassert wisp.zone == Zone.PLAY\n\tassert not wisp.asleep\n\tmc = game.player2.give(\"CS1_113\")\n\tmc.play(target=wisp)\n\tassert wisp.controller is game.player2\n\tassert wisp.zone == Zone.PLAY\n\tassert wisp.asleep\n\n\ndef test_mind_control_tech():\n\tgame = prepare_game()\n\tfor i in range(4):\n\t\tgame.player1.give(WISP).play()\n\tgame.end_turn()\n\n\t# test normal steal\n\tassert len(game.player1.field) == 4\n\tassert len(game.player2.field) == 0\n\tmct = game.player2.give(\"EX1_085\")\n\tmct.play()\n\tassert len(game.player1.field) == 3\n\tassert len(game.player2.field) == 2\n\n\t# ensure no steal with 3 minions or less\n\tgame.player2.give(\"EX1_085\").play()\n\tassert len(game.player1.field) == 3\n\tassert len(game.player2.field) == 3\n\n\ndef test_mindgames():\n\tgame = prepare_empty_game()\n\twisp = game.player2.give(WISP)\n\twisp.shuffle_into_deck()\n\tmindgames = game.player1.give(\"EX1_345\")\n\tmindgames.play()\n\tassert len(game.player1.field) == 1\n\tassert game.player1.field[0].id == WISP\n\tassert wisp in game.player2.deck\n\tgame.end_turn()\n\n\tmindgames2 = game.player2.give(\"EX1_345\")\n\tmindgames2.play()\n\tassert len(game.player2.field) == 1\n\tassert game.player2.field[0].id == \"EX1_345t\"\n\n\ndef test_mind_vision():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\tgame.player2.discard_hand()\n\n\t# play mind vision, should give nothing\n\tassert len(game.player1.hand) == 0\n\tgame.player1.give(\"CS2_003\").play()\n\tassert len(game.player1.hand) == 0\n\n\t# opponent draws a card, mind vision should get that one card\n\tassert len(game.player1.hand) == len(game.player2.hand) == 0\n\tcard = game.player2.draw()\n\tassert len(game.player1.hand) == 0\n\tassert len(game.player2.hand) == 1\n\tmind_vision = game.player1.give(\"CS2_003\")\n\tmind_vision.play()\n\tcopied = game.player1.hand[-1]\n\tassert copied == card\n\tassert copied.creator is mind_vision\n\n\ndef test_mirror_image():\n\tgame = prepare_game()\n\tmirror = game.player1.give(\"CS2_027\")\n\tmirror.play()\n\tassert len(game.player1.field) == 2\n\tassert game.player1.field[0].id == game.player1.field[1].id == \"CS2_mirror\"\n\n\ndef test_molten_giant():\n\tgame = prepare_game()\n\tmolten = game.current_player.give(\"EX1_620\")\n\tmolten_base_cost = 20\n\tassert molten.cost == molten_base_cost\n\tgame.current_player.give(MOONFIRE).play(target=game.player1.hero)\n\tassert molten.cost == molten_base_cost - 1\n\tgame.current_player.give(MOONFIRE).play(target=game.player1.hero)\n\tassert molten.cost == molten_base_cost - 2\n\tgame.current_player.give(MOONFIRE).play(target=game.player1.hero)\n\tassert molten.cost == molten_base_cost - 3\n\tgame.end_turn()\n\n\tassert molten.cost == molten_base_cost - 3\n\tmolten2 = game.player2.give(\"EX1_620\")\n\tassert molten2.cost == molten_base_cost\n\n\ndef test_mortal_coil():\n\tgame = prepare_game()\n\tdummy = game.player1.summon(TARGET_DUMMY)\n\tassert dummy.health == 2\n\tgame.end_turn()\n\tgame.player2.discard_hand()\n\tassert len(game.player2.hand) == 0\n\tcoil1 = game.player2.give(\"EX1_302\")\n\tassert len(game.player2.hand) == 1\n\tcoil1.play(target=dummy)\n\tassert len(game.player2.hand) == 0\n\tcoil2 = game.player2.give(\"EX1_302\")\n\tcoil2.play(target=dummy)\n\tassert len(game.player2.hand) == 1\n\n\ndef test_mortal_strike():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\texpected_health = 30\n\tfor i in range(5):\n\t\tms = game.player1.give(\"EX1_408\")\n\t\tassert not ms.powered_up\n\t\tms.play(target=game.player1.hero)\n\t\texpected_health -= 4\n\t\tassert game.player1.hero.health == expected_health\n\t\tif i % 2:\n\t\t\tgame.end_turn()\n\t\t\tgame.end_turn()\n\n\tms = game.player1.give(\"EX1_408\")\n\tassert ms.powered_up\n\tms.play(target=game.player1.hero)\n\texpected_health -= 6\n\tassert game.player1.hero.health == expected_health\n\n\ndef test_mountain_giant():\n\tgame = prepare_game()\n\tmountain = game.current_player.give(\"EX1_105\")\n\tassert mountain.cost == 12 - len(game.current_player.hand) + 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert mountain.cost == 12 - len(game.current_player.hand) + 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert mountain.cost == 12 - len(game.current_player.hand) + 1\n\n\ndef test_murloc_tidecaller():\n\tgame = prepare_game()\n\ttidecaller = game.player1.give(\"EX1_509\")\n\ttidecaller.play()\n\tassert tidecaller.atk == 1\n\tgame.end_turn()\n\n\tgame.player2.give(MURLOC).play()\n\tassert tidecaller.atk == 1 + 1\n\tgame.end_turn()\n\n\t# Play a tidehunter. Summons two murlocs.\n\tgame.player1.give(\"EX1_506\").play()\n\tassert tidecaller.atk == 1 + 1 + 2\n\n\ndef test_northshire_cleric():\n\tgame = prepare_game(CardClass.PRIEST, CardClass.PRIEST)\n\tgame.player1.discard_hand()\n\tgame.player2.discard_hand()\n\tcleric = game.player1.give(\"CS2_235\")\n\tcleric.play()\n\tgame.player1.hero.power.use(target=game.current_player.hero)\n\tassert not game.player1.hand\n\n\tpyromancer = game.player1.give(\"NEW1_020\")\n\tpyromancer.play()\n\tgame.player1.give(CIRCLE_OF_HEALING).play()\n\tassert not game.player1.hand\n\n\tgame.player2.summon(ANIMATED_STATUE)\n\tgame.player1.give(CIRCLE_OF_HEALING).play()\n\tassert len(game.player1.hand) == 2\n\n\tgame.player1.give(CIRCLE_OF_HEALING).play()\n\tassert len(game.player1.hand) == 5\n\tassert not game.player2.hand\n\n\ndef test_old_murkeye():\n\tgame = prepare_game()\n\tmurkeye = game.player1.give(\"EX1_062\")\n\tassert murkeye.atk == 2\n\tmurloc = game.player1.give(MURLOC)\n\tmurloc.play()\n\tassert murkeye.atk == 2\n\tmurkeye.play()\n\tassert murkeye.charge\n\tassert murkeye.can_attack()\n\tassert murkeye.atk == 2 + 1\n\tgame.player2.summon(\"CS2_168\")\n\tassert murkeye.atk == 2 + 2\n\tgame.player2.summon(\"CS2_168\")\n\tassert murkeye.atk == 2 + 3\n\tmurloc.destroy()\n\tassert murkeye.atk == 2 + 2\n\tmurkeye2 = game.player2.summon(\"EX1_062\")\n\tassert murkeye.atk == murkeye2.atk == 2 + 3\n\n\ndef test_onyxia():\n\tgame = prepare_game()\n\tonyxia = game.player1.give(\"EX1_562\")\n\tassert len(game.player1.field) == 0\n\tonyxia.play()\n\tassert len(game.player1.field) == 7\n\tassert game.player1.field == [\"ds1_whelptoken\"] * 3 + [\"EX1_562\"] + [\"ds1_whelptoken\"] * 3\n\n\ndef test_perditions_blade():\n\t# weapon with both battlecry and combo, should trigger only one of them\n\tgame = prepare_game()\n\t# This summoned minion has 8 health\n\tenemy_minion_ragnaros = game.player2.summon(\"EX1_298\")\n\tassert game.player2.field[0].health == 8\n\tuntriggerred_combo_blade = game.player1.give(\"EX1_133\")\n\ttriggerred_combo_blade = game.player1.give(\"EX1_133\")\n\ttriggerred_twice_blade = game.player1.give(\"EX1_133\")\n\n\t# the first card played with one damage dealt for battlecry only\n\tuntriggerred_combo_blade.play(target=enemy_minion_ragnaros)\n\tassert game.player2.field[0].health == 7\n\n\t# should deal two damage for combo and cancel battlecry damage\n\ttriggerred_combo_blade.play(target=enemy_minion_ragnaros)\n\tassert game.player2.field[0].health == 5\n\n\t# brann will trigger this combo twice\n\tgame.player1.summon(\"LOE_077\")\n\ttriggerred_twice_blade.play(target=enemy_minion_ragnaros)\n\tassert game.player2.field[0].health == 1\n\n\ndef test_pint_sized_summoner():\n\tgame = prepare_game()\n\tgoldshire1 = game.current_player.give(GOLDSHIRE_FOOTMAN)\n\tgoldshire2 = game.current_player.give(GOLDSHIRE_FOOTMAN)\n\tmoonfire = game.current_player.give(MOONFIRE)\n\tfrostwolf = game.current_player.give(\"CS2_121\")\n\twisp = game.current_player.give(WISP)\n\tassert goldshire1.cost == 1\n\tassert goldshire2.cost == 1\n\tassert frostwolf.cost == 2\n\tassert wisp.cost == 0\n\n\t# summon it directly, minions played still at 0\n\tsummoner = game.current_player.summon(\"EX1_076\")\n\tassert game.current_player.minions_played_this_turn == 0\n\tassert goldshire1.cost == 1 - 1\n\tassert goldshire2.cost == 1 - 1\n\tassert not moonfire.buffs\n\tassert moonfire.cost == 0\n\tassert frostwolf.cost == 2 - 1\n\tassert wisp.cost == 0\n\n\tgoldshire1.play()\n\tassert game.current_player.minions_played_this_turn == 1\n\tassert goldshire2.cost == 1\n\tassert frostwolf.cost == 2\n\tassert wisp.cost == 0\n\tgame.end_turn()\n\n\tassert game.current_player.minions_played_this_turn == 0\n\tassert goldshire1.cost == 1\n\tassert goldshire2.cost == 1\n\tassert frostwolf.cost == 2\n\tassert wisp.cost == 0\n\n\tgame.end_turn()\n\tsummoner2 = game.current_player.summon(\"EX1_076\")\n\tassert frostwolf.cost == 2 - 2\n\tsummoner.destroy()\n\tassert frostwolf.cost == 2 - 1\n\tsummoner2.destroy()\n\tassert frostwolf.cost == 2\n\n\ndef test_power_overwhelming():\n\tgame = prepare_game()\n\tpower = game.player1.give(\"EX1_316\")\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tpower.play(target=wisp)\n\tassert wisp.atk == wisp.health == 1 + 4\n\tgame.end_turn()\n\n\tassert wisp not in game.board\n\n\ndef test_power_of_the_wild():\n\tgame = prepare_game()\n\tassert len(game.player1.field) == 0\n\tgame.player1.give(\"EX1_160\").play(choose=\"EX1_160a\")\n\tassert len(game.player1.field) == 1\n\ttoken = game.player1.field[0]\n\tassert token.id == \"EX1_160t\"\n\tteacher = game.player1.give(\"NEW1_026\")\n\tteacher.play()\n\tassert token.atk == 3 and token.health == 2\n\tassert teacher.atk == 3 and teacher.health == 5\n\tgame.player1.give(\"EX1_160\").play(choose=\"EX1_160b\")\n\tassert len(game.player1.field) == 3\n\tassert token.atk == 4 and token.health == 3\n\tassert teacher.atk == 4 and teacher.health == 6\n\tapprentice = game.player1.field[2]\n\tassert apprentice.id == \"NEW1_026t\"\n\tassert apprentice.atk == 1 + 1 and apprentice.health == 1 + 1\n\n\tgame.player1.summon(\"OG_044\")\n\tgame.player1.give(\"EX1_160\").play()\n\n\tapprentice2 = game.player1.field[2]\n\ttoken2 = game.player1.field[5]\n\tassert apprentice2.id == \"NEW1_026t\" and token2.id == \"EX1_160t\"\n\tassert teacher.atk == 5 and teacher.health == 7\n\tassert apprentice.atk == apprentice.health == 3\n\tassert apprentice2.atk == apprentice2.health == 2\n\tassert token2.atk == 3 and token2.health == 2\n\tgame.end_turn()\n\tgame.end_turn()\n\twisp = game.player1.summon(WISP)\n\tgame.player1.give(\"EX1_160\").play()\n\tassert token2.atk == 4 and token2.health == 3\n\tassert token2 == game.player1.field[-2]\n\tassert game.player1.field[-1] == wisp\n\n\ndef test_power_word_shield():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tpwshield = game.player1.give(\"CS2_004\")\n\tpwshield.play(target=wisp)\n\tassert wisp.health == 3\n\tgame.player1.give(SILENCE).play(target=wisp)\n\tassert wisp.health == 1\n\n\ndef test_preparation():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\tprep1 = game.player1.give(\"EX1_145\")\n\tprep2 = game.player1.give(\"EX1_145\")\n\tprep3 = game.player1.give(\"EX1_145\")\n\tsmite = game.player1.give(\"CS1_130\")\n\tfireball = game.player1.give(\"CS2_029\")\n\tfireball2 = game.player2.give(\"CS2_029\")\n\tfootman = game.player1.give(GOLDSHIRE_FOOTMAN)\n\tfootman2 = game.player2.give(GOLDSHIRE_FOOTMAN)\n\tassert prep1.cost == prep2.cost == prep3.cost == 0\n\tassert smite.cost == 1\n\tassert fireball.cost == fireball2.cost == 4\n\tassert footman.cost == footman2.cost == 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert game.player1.used_mana == 0\n\tprep1.play()\n\tassert game.player1.used_mana == 0\n\tassert prep2.cost == prep3.cost == 0\n\tassert smite.cost == 0\n\tassert fireball.cost == 4 - 2\n\tassert fireball2.cost == 4\n\tassert footman.cost == footman2.cost == 1\n\tprep2.play()\n\tassert game.player1.used_mana == 0\n\tassert prep2.cost == prep3.cost == 0\n\tassert smite.cost == 0\n\tassert fireball.cost == 4 - 2\n\tassert fireball2.cost == 4\n\tassert footman.cost == footman2.cost == 1\n\tfireball.play(target=game.player2.hero)\n\tassert game.player1.used_mana == 2\n\tassert smite.cost == 1\n\tassert fireball2.cost == 4\n\tassert footman.cost == footman2.cost == 1\n\tprep3.play()\n\tassert smite.cost == 0\n\tassert footman.cost == footman2.cost == 1\n\tgame.end_turn()\n\tassert smite.cost == 1\n\tassert footman.cost == footman2.cost == 1\n\n\ndef test_prologue_chaosstrike():\n\tgame = prepare_game()\n\tgame.current_player.give(\"Prologue_ChaosStrike\").play()\n\tassert len(game.current_player.hand) == 5\n\tassert game.current_player.hero.atk == 2\n\n\ndef test_prophet_velen():\n\tgame = prepare_game(CardClass.PRIEST, CardClass.PRIEST)\n\n\texpected_health = 30\n\tassert game.player2.hero.health == expected_health\n\tassert game.player1.healing_double == 0\n\tassert game.player1.hero_power_double == 0\n\tassert game.player1.spellpower_double == 0\n\tvelen = game.player1.give(\"EX1_350\")\n\tvelen.play()\n\tassert game.player1.healing_double == 1\n\tassert game.player1.hero_power_double == 1\n\tassert game.player1.spellpower_double == 1\n\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\texpected_health -= 2 * 1\n\tassert game.player2.hero.health == expected_health\n\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\texpected_health -= 2 * 1\n\tassert game.player2.hero.health == expected_health\n\n\tgame.player1.hero.power.use(target=game.player2.hero)\n\texpected_health += 2 * 2\n\tassert game.player2.hero.health == expected_health\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tkobold = game.current_player.give(KOBOLD_GEOMANCER)\n\tkobold.play()\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\texpected_health -= 2 * (1 + 1)\n\tassert game.player2.hero.health == expected_health\n\n\ndef test_prophet_velen_multiple():\n\tgame = prepare_game(CardClass.PRIEST, CardClass.PRIEST)\n\n\texpected_health = 30\n\tassert game.player2.hero.health == expected_health\n\tassert game.player1.healing_double == 0\n\tassert game.player1.hero_power_double == 0\n\tassert game.player1.spellpower_double == 0\n\tvelen1 = game.player1.give(\"EX1_350\")\n\tvelen1.play()\n\tgame.end_turn()\n\tgame.end_turn()\n\tvelen2 = game.player1.give(\"EX1_350\")\n\tvelen2.play()\n\tassert game.player1.healing_double == 2\n\tassert game.player1.hero_power_double == 2\n\tassert game.player1.spellpower_double == 2\n\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\texpected_health -= 4 * 1\n\tassert game.player2.hero.health == expected_health\n\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\texpected_health -= 4 * 1\n\tassert game.player2.hero.health == expected_health\n\n\tgame.player1.hero.power.use(target=game.player2.hero)\n\texpected_health += 4 * 2\n\tassert game.player2.hero.health == expected_health\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tkobold = game.current_player.give(KOBOLD_GEOMANCER)\n\tkobold.play()\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\texpected_health -= 4 * (1 + 1)\n\tassert game.player2.hero.health == expected_health\n\n\ndef test_questing_adventurer():\n\tgame = prepare_game()\n\tadventurer = game.player1.give(\"EX1_044\")\n\tgame.end_turn()\n\tgame.end_turn()\n\tgame.end_turn()\n\tgame.end_turn()\n\tadventurer.play()\n\tassert adventurer.atk == 2\n\tassert adventurer.health == 2\n\tgame.player1.give(THE_COIN).play()\n\tassert adventurer.atk == 3\n\tassert adventurer.health == 3\n\tfor i in range(1, 5):\n\t\tgame.player1.give(THE_COIN).play()\n\t\tassert adventurer.atk == adventurer.health == 3 + i\n\n\ndef test_questing_adventurer_big_game_hunter():\n\tgame = prepare_game()\n\tadventurer = game.player1.give(\"EX1_044\")\n\tadventurer.play()\n\tmightblessing = game.player1.give(\"CS2_087\")\n\tmightblessing.play(target=adventurer)\n\tassert adventurer.atk == 6\n\tbgh = game.player1.give(\"EX1_005\")\n\tbgh.play()\n\tassert len(game.player1.field) == 2\n\tassert adventurer.atk == 7\n\n\ndef test_questing_adventurer_shadow_word_pain():\n\tgame = prepare_game()\n\tadventurer = game.player1.summon(\"EX1_044\")\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\tpain = game.player1.give(\"CS2_234\")\n\tassert adventurer.atk == 3\n\tassert adventurer in pain.targets\n\tpain.play(target=adventurer)\n\tassert adventurer.dead\n\n\ndef test_raid_leader():\n\tgame = prepare_game()\n\twisp1 = game.player1.give(WISP)\n\twisp1.play()\n\twisp2 = game.player1.give(WISP)\n\twisp2.play()\n\twisp3 = game.player2.summon(WISP)\n\traidleader = game.player1.summon(\"CS2_122\")\n\tassert wisp1.atk == wisp2.atk == 2\n\tassert wisp3.atk == 1\n\n\traidleader.destroy()\n\n\tassert wisp1.atk == wisp2.atk == 1\n\n\ndef test_raging_worgen():\n\tgame = prepare_game()\n\tworgen = game.player1.give(\"EX1_412\")\n\tworgen.play()\n\tassert worgen.health == 3\n\tgame.player1.give(MOONFIRE).play(target=worgen)\n\tassert worgen.health == 2\n\tassert worgen.atk == 4\n\tassert worgen.windfury\n\tgame.player1.give(CIRCLE_OF_HEALING).play()\n\tassert worgen.atk == 3\n\tassert not worgen.windfury\n\n\ndef test_ragnaros():\n\tgame = prepare_game()\n\tragnaros = game.player1.give(\"EX1_298\")\n\tragnaros.play()\n\tassert not ragnaros.can_attack()\n\tgame.end_turn()\n\n\tassert game.player2.hero.health == 22\n\tgame.end_turn()\n\n\tassert game.player2.hero.health == 22\n\tassert not ragnaros.can_attack()\n\n\ndef test_savage_roar():\n\tgame = prepare_game()\n\twisp1 = game.player1.give(WISP)\n\twisp1.play()\n\tgame.end_turn()\n\twisp2 = game.player2.give(WISP)\n\twisp2.play()\n\tgame.end_turn()\n\n\tassert wisp1.atk == 1\n\tassert wisp2.atk == 1\n\tassert game.player1.hero.atk == 0\n\tassert game.player2.hero.atk == 0\n\tgame.player1.give(\"CS2_011\").play()\n\tassert wisp1.atk == 1 + 2\n\tassert wisp2.atk == 1\n\tassert game.player1.hero.atk == 2\n\tassert game.player2.hero.atk == 0\n\tgame.end_turn()\n\tassert wisp1.atk == 1\n\tassert wisp2.atk == 1\n\tassert game.player1.hero.atk == 0\n\tassert game.player2.hero.atk == 0\n\n\ndef test_savagery():\n\tgame = prepare_game(CardClass.DRUID, CardClass.DRUID)\n\tstatue = game.player1.give(ANIMATED_STATUE)\n\tstatue.play()\n\tassert statue.health == 10\n\tsavagery1 = game.player1.give(\"EX1_578\")\n\tsavagery1.play(statue)\n\tassert statue.health == 10\n\n\tgame.player1.give(HAND_OF_PROTECTION).play(target=statue)\n\tsavagery2 = game.player1.give(\"EX1_578\")\n\tsavagery2.play(statue)\n\tassert statue.divine_shield\n\tgame.player1.give(MOONFIRE).play(target=statue)\n\tassert not statue.divine_shield\n\n\tgame.player1.hero.power.use()\n\tsavagery3 = game.player1.give(\"EX1_578\")\n\tsavagery3.play(statue)\n\tassert statue.damage == 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tgame.player1.give(KOBOLD_GEOMANCER).play()\n\tsavagery4 = game.player1.give(\"EX1_578\")\n\tsavagery4.play(statue)\n\tassert statue.damage == 1 + 1\n\n\ndef test_satyr_overseer():\n\tgame = prepare_game(CardClass.DEMONHUNTER, CardClass.DEMONHUNTER)\n\tgame.current_player.give(\"BT_352\").play()\n\tassert len(game.current_player.field) == 1\n\tgame.current_player.hero.power.use()\n\tgame.current_player.hero.attack(game.current_player.opponent.hero)\n\tassert len(game.current_player.field) == 2\n\tassert game.current_player.field[-1].id == \"BT_352t\"\n\n\ndef test_sea_giant():\n\tgame = prepare_game()\n\tseagiant = game.current_player.give(\"EX1_586\")\n\tassert seagiant.cost == 10\n\tgame.current_player.give(WISP).play()\n\tassert seagiant.cost == 9\n\tgame.current_player.give(WISP).play()\n\tassert seagiant.cost == 8\n\tfor i in range(5):\n\t\tgame.player1.give(WISP).play()\n\tassert seagiant.cost == 3\n\tgame.end_turn()\n\n\tfor i in range(7):\n\t\tgame.player2.give(WISP).play()\n\tassert seagiant.cost == 0\n\n\ndef test_sense_demons():\n\tgame = prepare_empty_game()\n\tgame.player1.discard_hand()\n\tdemon1 = game.player1.give(IMP)\n\tdemon1.shuffle_into_deck()\n\tdemon2 = game.player1.give(\"CS2_065\")\n\tdemon2.shuffle_into_deck()\n\twisp = game.player1.give(WISP)\n\twisp.shuffle_into_deck()\n\tassert len(game.player1.deck) == 3\n\tassert len(game.player1.hand) == 0\n\tsense1 = game.player1.give(\"EX1_317\")\n\tsense1.play()\n\tassert len(game.player1.deck) == 1\n\tassert len(game.player1.hand) == 2\n\tassert game.player1.hand.contains(demon1)\n\tassert game.player1.hand.contains(demon2)\n\n\tgame.player1.discard_hand()\n\tassert len(game.player1.deck) == 1\n\tassert len(game.player1.hand) == 0\n\tsense2 = game.player1.give(\"EX1_317\")\n\tsense2.play()\n\tassert len(game.player1.deck) == 1\n\tassert len(game.player1.hand) == 2\n\tassert game.player1.hand[0].id == game.player1.hand[1].id == \"EX1_317t\"\n\n\ndef test_shadow_madness_attacked_last_turn():\n\t\"\"\"\n\tTest that shadow madnessing a minion that was just played by the opponent\n\tlets it attack\n\t\"\"\"\n\tgame = prepare_game()\n\n\twisp = game.player1.give(WISP).play()\n\tgame.end_turn()\n\tassert wisp.controller is game.player1\n\n\tshadowmadness = game.player2.give(\"EX1_334\")\n\tshadowmadness.play(target=wisp)\n\tassert wisp.controller is game.player2\n\tassert wisp.can_attack()\n\twisp.attack(game.player1.hero)\n\tgame.end_turn()\n\n\t# make sure it can attack when control returns\n\tassert wisp.controller is game.player1\n\tassert wisp.can_attack()\n\n\ndef test_shadow_madness_bounce():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tgame.end_turn()\n\n\tgame.player2.discard_hand()\n\tshadowmadness = game.player2.give(\"EX1_334\")\n\tassert wisp.controller is game.player1\n\tshadowmadness.play(target=wisp)\n\tassert wisp.controller is game.player2\n\tgame.player2.give(TIME_REWINDER).play(target=wisp)\n\tassert wisp in game.player2.hand\n\tassert wisp.controller is game.player2\n\tgame.end_turn()\n\n\tassert wisp in game.player2.hand\n\tassert wisp.controller is game.player2\n\tgame.end_turn()\n\n\tassert wisp in game.player2.hand\n\tassert wisp.controller is game.player2\n\tgame.end_turn()\n\n\tassert wisp in game.player2.hand\n\tassert wisp.controller is game.player2\n\n\ndef test_shadow_madness_just_played():\n\t\"\"\"\n\ttest that shadow madnessing a minion that attacked on the opponent's previous\n\tturn lets it attack\n\t\"\"\"\n\tgame = prepare_game()\n\n\twisp = game.player1.give(WISP).play()\n\tgame.end_turn()\n\tgame.end_turn()\n\tassert wisp.controller is game.player1\n\tassert wisp.can_attack()\n\twisp.attack(game.player2.hero)\n\tgame.end_turn()\n\n\tshadowmadness = game.player2.give(\"EX1_334\")\n\tshadowmadness.play(target=wisp)\n\tassert wisp.controller is game.player2\n\tassert wisp.can_attack()\n\twisp.attack(game.player1.hero)\n\tgame.end_turn()\n\n\t# make sure it can attack when the player regains control\n\tassert wisp.controller is game.player1\n\tassert wisp.can_attack()\n\n\ndef test_shadow_madness_silence():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tgame.end_turn()\n\n\tassert wisp.controller == game.player1\n\tshadowmadness = game.player2.give(\"EX1_334\")\n\tshadowmadness.play(target=wisp)\n\tassert wisp.controller == game.player2\n\tgame.player2.give(SILENCE).play(target=wisp)\n\tassert wisp.controller == game.player1\n\tgame.end_turn()\n\n\tassert wisp.controller == game.player1\n\n\ndef test_shadow_madness_wild_pyro():\n\tgame = prepare_game()\n\tpyromancer = game.player1.give(\"NEW1_020\")\n\tpyromancer.play()\n\tgame.end_turn()\n\n\tassert pyromancer.controller == game.player1\n\tassert pyromancer in game.player1.field\n\tassert pyromancer.health == 2\n\tshadowmadness = game.player2.give(\"EX1_334\")\n\tshadowmadness.play(target=pyromancer)\n\tassert pyromancer.controller == game.player2\n\tassert pyromancer in game.player2.field\n\tassert pyromancer.health == 1\n\tgame.end_turn()\n\n\tassert pyromancer.controller == game.player1\n\tassert pyromancer in game.player1.field\n\n\ndef test_shadow_word_pain():\n\tgame = prepare_game()\n\tyeti = game.player1.summon(\"CS2_182\")\n\twisp1 = game.player1.summon(WISP)\n\twisp2 = game.player1.summon(WISP)\n\tpain = game.player1.give(\"CS2_234\")\n\tassert pain.targets == [wisp1, wisp2]\n\tpain.play(target=wisp1)\n\tassert wisp1.dead\n\tassert not wisp2.dead\n\tassert not yeti.dead\n\n\ndef test_shadowflame():\n\tgame = prepare_game()\n\tdummy1 = game.player1.give(TARGET_DUMMY)\n\tdummy1.play()\n\tgame.end_turn()\n\tgoldshire = game.player2.give(GOLDSHIRE_FOOTMAN)\n\tgoldshire.play()\n\tdummy2 = game.player2.give(TARGET_DUMMY)\n\tdummy2.play()\n\n\tassert dummy1.health == 2\n\tassert goldshire.health == 2\n\tassert not dummy2.dead\n\tgame.player2.give(\"EX1_303\").play(target=dummy2)\n\tassert dummy1.health == 2\n\tassert goldshire.health == 2\n\tassert dummy2.dead\n\tgame.player2.give(\"EX1_303\").play(target=goldshire)\n\tassert dummy1.health == 1\n\n\ndef test_shadowform():\n\tgame = prepare_game(CardClass.PRIEST, CardClass.PRIEST)\n\t# Hero Power should reset\n\tshadowform1 = game.player1.give(\"EX1_625\")\n\tassert game.player1.hero.power.id == \"HERO_09bp\"\n\tassert game.player1.hero.power.is_usable()\n\tgame.player1.hero.power.use(target=game.player1.hero)\n\tassert not game.player1.hero.power.is_usable()\n\tassert not game.player1.shadowform\n\tassert shadowform1.is_playable()\n\tprint(game.player1.slots)\n\tshadowform1.play()\n\tprint(game.player1.slots)\n\tassert game.player1.shadowform\n\tassert game.player1.hero.power.id == \"EX1_625t\"\n\tassert game.player1.hero.power.is_usable()\n\tgame.player1.hero.power.use(target=game.player2.hero)\n\tassert not game.player1.hero.power.is_usable()\n\tassert game.player2.hero.health == 28\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tshadowform2 = game.player1.give(\"EX1_625\")\n\tshadowform2.play()\n\tassert game.player1.shadowform\n\tassert game.player1.hero.power.id == \"EX1_625t2\"\n\tassert game.player1.hero.power.is_usable()\n\tgame.player1.hero.power.use(target=game.player2.hero)\n\tassert not game.player1.hero.power.is_usable()\n\tassert game.player2.hero.health == 25\n\n\tshadowform3 = game.player1.give(\"EX1_625\")\n\tshadowform3.play()\n\tassert game.player1.shadowform\n\tassert game.player1.hero.power.id == \"EX1_625t2\"\n\tassert not game.player1.hero.power.is_usable()\n\n\ndef test_shadowhoof_slayer():\n\tgame = prepare_game()\n\tassert not game.current_player.hero.can_attack()\n\tgame.current_player.give(\"BT_142\").play()\n\tassert game.current_player.hero.power.is_usable()\n\tassert game.current_player.hero.atk == 1\n\tassert game.current_player.hero.can_attack()\n\n\ndef test_shadowstep():\n\tgame = prepare_game()\n\tshadowstep = game.player1.give(\"EX1_144\")\n\tdeathwing = game.player1.summon(\"NEW1_030\")\n\tassert deathwing.zone == Zone.PLAY\n\tassert deathwing.cost == 10\n\tshadowstep.play(target=deathwing)\n\tassert deathwing.zone == Zone.HAND\n\tassert deathwing in game.player1.hand\n\tassert deathwing.cost == 8\n\n\ndef test_shattered_sun_cleric():\n\tgame = prepare_game()\n\tcleric = game.player1.give(\"EX1_019\")\n\tassert not cleric.targets\n\tcleric.play()\n\tassert cleric.atk == 3\n\tassert cleric.health == 2\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tcleric2 = game.player1.give(\"EX1_019\")\n\tassert cleric in cleric2.targets\n\tcleric2.play(target=cleric)\n\tassert cleric.atk == 3 + 1\n\tassert cleric.health == 2 + 1\n\n\ndef test_shield_slam():\n\tgame = prepare_game(CardClass.WARRIOR, CardClass.WARRIOR)\n\twisp = game.player2.summon(WISP)\n\tassert game.player1.hero.armor == 0\n\tshieldslam1 = game.player1.give(\"EX1_410\")\n\tshieldslam1.play(target=wisp)\n\tassert not wisp.dead\n\n\tgame.player1.give(HAND_OF_PROTECTION).play(target=wisp)\n\tassert wisp.divine_shield\n\tshieldslam2 = game.player1.give(\"EX1_410\")\n\tshieldslam2.play(target=wisp)\n\tassert wisp.divine_shield\n\n\tgeomancer = game.player1.summon(KOBOLD_GEOMANCER)\n\tshieldslam3 = game.player1.give(\"EX1_410\")\n\tshieldslam3.play(target=wisp)\n\tassert not wisp.divine_shield\n\tgeomancer.destroy()\n\n\tgame.player1.hero.power.use()\n\tassert game.player1.hero.armor == 2\n\tshieldslam4 = game.player1.give(\"EX1_410\")\n\tshieldslam4.play(target=wisp)\n\tassert wisp.dead\n\n\ndef test_si7_agent():\n\tgame = prepare_game()\n\tagent = game.player1.give(\"EX1_134\")\n\tagent2 = game.player1.give(\"EX1_134\")\n\tassert not agent.requires_target()\n\tassert not agent2.requires_target()\n\tagent.play()\n\tassert agent2.requires_target()\n\tagent2.play(target=agent)\n\tassert agent.health == 3 - 2\n\n\ndef test_sightless_watcher():\n\tgame = prepare_empty_game()\n\tassert len(game.current_player.deck) == 0\n\tgame.current_player.give(IMP).shuffle_into_deck()\n\tgame.current_player.give(WISP).shuffle_into_deck()\n\tgame.current_player.give(TARGET_DUMMY).shuffle_into_deck()\n\t# TODO how to play Choice card\n\t# game.player1.give(\"BT_323\").play(choose=wisp)\n\n\ndef test_slam():\n\tgame = prepare_game()\n\twisp = game.player1.summon(WISP)\n\tmogushan = game.player1.summon(\"EX1_396\")\n\tgame.player1.discard_hand()\n\tassert len(game.player1.hand) == 0\n\tgame.player1.give(\"EX1_391\").play(target=wisp)\n\tassert wisp.dead\n\tassert len(game.player1.hand) == 0\n\tgame.player1.give(\"EX1_391\").play(target=mogushan)\n\tassert not mogushan.dead\n\tassert len(game.player1.hand) == 1\n\n\ndef test_sorcerers_apprentice():\n\tgame = prepare_game()\n\tapprentice1 = game.player1.give(\"EX1_608\")\n\tfireball1 = game.player1.give(\"CS2_029\")\n\tassert fireball1.cost == 4\n\tapprentice1.play()\n\tassert fireball1.cost == 3\n\tapprentice2 = game.player1.give(\"EX1_608\")\n\tapprentice2.play()\n\tassert fireball1.cost == 2\n\tapprentice1.destroy()\n\tassert fireball1.cost == 3\n\tgame.end_turn()\n\n\tfireball2 = game.player2.give(\"CS2_029\")\n\tassert fireball2.cost == 4\n\tgame.end_turn()\n\n\tassert fireball1.cost == 3\n\n\ndef test_southsea_deckhand():\n\tgame = prepare_game(CardClass.ROGUE, CardClass.ROGUE)\n\tdeckhand = game.player1.give(\"CS2_146\")\n\tdeckhand.play()\n\tassert not deckhand.charge\n\t# Play rogue hero power (gives a weapon)\n\tgame.player1.hero.power.use()\n\tassert deckhand.charge\n\tgame.player1.give(LIGHTS_JUSTICE).play()\n\tassert deckhand.charge\n\tgame.player1.weapon.destroy()\n\tassert not deckhand.charge\n\n\t# play charge\n\tgame.player1.give(\"CS2_103\").play(target=deckhand)\n\tassert deckhand.charge\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert deckhand.charge\n\tgame.player1.hero.power.use()\n\tassert deckhand.charge\n\tgame.player1.weapon.destroy()\n\t# No longer have weapon, but still have the charge buff from earlier\n\tassert deckhand.charge\n\n\ndef test_spiteful_smith():\n\tgame = prepare_game()\n\tsmith = game.player1.give(\"CS2_221\")\n\tsmith.play()\n\tassert not game.player1.hero.atk\n\tweapon = game.player1.give(LIGHTS_JUSTICE)\n\tweapon.play()\n\tassert game.player1.hero.atk == weapon.atk == 1\n\tassert not game.player2.hero.atk\n\tgame.player1.give(MOONFIRE).play(target=smith)\n\tassert smith.damaged\n\tassert smith.enraged\n\tassert weapon.atk == 1 + 2\n\tassert weapon.buffs\n\tassert game.player1.hero.atk == 1 + 2\n\tassert not game.player2.hero.atk\n\tgame.player1.give(CIRCLE_OF_HEALING).play()\n\tassert not smith.enraged\n\tassert game.player1.hero.atk == weapon.atk == 1\n\tassert not weapon.buffs\n\tgame.player1.give(MOONFIRE).play(target=smith)\n\tassert smith.enraged\n\tassert weapon.atk == game.player1.hero.atk == 1 + 2\n\n\ndef test_stampeding_kodo():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tstatue = game.player1.give(ANIMATED_STATUE)\n\tstatue.play()\n\tgame.end_turn()\n\n\tkodo = game.player2.give(\"NEW1_041\")\n\tkodo.play()\n\tassert wisp.dead\n\tassert not statue.dead\n\tkodo2 = game.player2.give(\"NEW1_041\")\n\tkodo2.play()\n\tassert not statue.dead\n\n\ndef test_starfall_5_to_one():\n\tgame = prepare_game()\n\n\tsnapjaw = game.player1.give(\"CS2_119\")\n\tsnapjaw.play()\n\tassert snapjaw.health == 7\n\tstarfall = game.player1.give(\"NEW1_007\")\n\tstarfall.play(choose=\"NEW1_007b\", target=snapjaw)\n\tassert snapjaw.health == 2\n\n\ndef test_starving_buzzard():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\tbuzzard = game.player1.give(\"CS2_237\")\n\tbuzzard.play()\n\tassert not game.player1.hand\n\tgame.player1.give(CHICKEN).play()\n\tassert len(game.player1.hand) == 1\n\tgame.player1.give(\"NEW1_031\").play() # Animal Companion\n\tassert len(game.player1.hand) == 2\n\n\ndef test_stormwind_champion():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tstormwind = game.player1.give(\"CS2_222\")\n\tstormwind.play()\n\tassert wisp.atk == wisp.health == 1 + 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\t# ensure bounce removes the buff\n\tgame.player1.give(TIME_REWINDER).play(target=stormwind)\n\tassert stormwind not in game.player1.field\n\tassert wisp.atk == wisp.health == 1\n\tstormwind.play()\n\tassert wisp.atk == wisp.health == 1 + 1\n\n\t# destroy Stormwind Champion\n\tstormwind.destroy()\n\tassert wisp.atk == wisp.health == 1\n\n\ndef test_summoning_portal():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\twisp = game.player1.give(WISP)\n\tassert wisp.cost == 0\n\tweapon = game.player1.give(LIGHTS_JUSTICE)\n\tassert weapon.cost == 1\n\tmolten = game.player1.give(\"EX1_620\")\n\tmolten_base_cost = 20\n\tassert molten.cost == molten_base_cost\n\tgoldshire = game.player1.give(GOLDSHIRE_FOOTMAN)\n\tassert goldshire.cost == 1\n\tfrostwolf = game.player1.give(\"CS2_121\")\n\tassert frostwolf.cost == 2\n\n\tportal = game.player1.give(\"EX1_315\")\n\tportal.play()\n\tassert wisp.cost == 0\n\tassert weapon.cost == 1\n\tassert molten.cost == molten_base_cost - 2\n\tassert goldshire.cost == 1\n\tassert frostwolf.cost == 1\n\tgame.player1.give(MOONFIRE).play(target=game.player1.hero)\n\tassert molten.cost == molten_base_cost - 3\n\tportal2 = game.player1.give(\"EX1_315\")\n\tportal2.play()\n\tassert wisp.cost == 0\n\tassert molten.cost == molten_base_cost - 2 - 1 - 2\n\tassert goldshire.cost == 1\n\tassert frostwolf.cost == 1\n\n\ndef test_sunfury_protector():\n\tgame = prepare_game()\n\twisp1 = game.player1.give(WISP)\n\twisp1.play()\n\twisp2 = game.player1.give(WISP)\n\twisp2.play()\n\tsunfury = game.player1.give(\"EX1_058\")\n\tsunfury.play()\n\tassert not wisp1.taunt\n\tassert wisp2.taunt\n\n\ndef test_sword_of_justice():\n\tgame = prepare_game(CardClass.PALADIN, CardClass.PALADIN)\n\tsword = game.player1.give(\"EX1_366\")\n\tsword.play()\n\tassert sword.durability == 5\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tassert wisp.atk == 2\n\tassert wisp.health == 2\n\tassert wisp.buffs\n\tassert sword.durability == 4\n\tgame.end_turn()\n\n\tgame.player2.give(WISP).play()\n\tassert sword.durability == 4\n\tgame.end_turn()\n\n\tgame.player1.hero.power.use()\n\tassert sword.durability == 3\n\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tgame.player1.give(WISP).play()\n\tassert not game.player1.weapon\n\twisp2 = game.player1.give(WISP)\n\twisp2.play()\n\tassert wisp2.health == wisp2.atk == 1\n\tassert not wisp2.buffs\n\n\ndef test_sylvanas_windrunner():\n\tgame = prepare_game()\n\tsylvanas1 = game.player1.give(\"EX1_016\")\n\tsylvanas1.play()\n\tsylvanas1.destroy()\n\tassert len(game.player1.field) == 0\n\tgame.end_turn()\n\tgame.end_turn()\n\n\twisp = game.player2.summon(WISP)\n\tsylvanas2 = game.player1.give(\"EX1_016\")\n\tsylvanas2.play()\n\tassert wisp in game.player2.field\n\tsylvanas2.destroy()\n\tassert wisp in game.player1.field\n\n\ndef test_the_black_knight():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tdummy1 = game.player1.give(TARGET_DUMMY)\n\tdummy1.play()\n\tgame.end_turn()\n\n\tdummy2 = game.player2.give(TARGET_DUMMY)\n\tdummy2.play()\n\tblackknight = game.player2.give(\"EX1_002\")\n\tassert blackknight.targets == [dummy1]\n\tblackknight.play(target=dummy1)\n\tassert dummy1.dead\n\n\ndef test_thoughtsteal():\n\tgame = prepare_empty_game()\n\n\tassert len(game.player1.hand) == 0\n\tassert len(game.player2.deck) == 0\n\tgame.player1.give(\"EX1_339\").play()\n\tassert len(game.player1.hand) == 0\n\n\tgame.player2.give(WISP).shuffle_into_deck()\n\tassert len(game.player2.deck) == 1\n\tgame.player1.give(\"EX1_339\").play()\n\tassert len(game.player1.hand) == 1\n\tassert game.player1.hand[0].id == WISP\n\tgame.player1.discard_hand()\n\n\tassert len(game.player1.hand) == 0\n\tgame.player2.give(TARGET_DUMMY).shuffle_into_deck()\n\tassert len(game.player2.deck) == 2\n\tgame.player1.give(\"EX1_339\").play()\n\tassert len(game.player1.hand) == 2\n\n\tassert game.player1.hand.contains(WISP)\n\tassert game.player1.hand.contains(TARGET_DUMMY)\n\n\ndef test_tinkmaster_overspark():\n\tgame = prepare_game()\n\ttinkmaster1 = game.player1.give(\"EX1_083\")\n\ttinkmaster1.play()\n\tassert tinkmaster1 in game.board\n\ttinkmaster2 = game.player1.give(\"EX1_083\")\n\ttinkmaster2.play()\n\tassert tinkmaster1 not in game.board\n\tassert len(game.player1.field) == 2\n\tassert game.board.contains(\"EX1_tk28\") ^ game.board.contains(\"EX1_tk29\")\n\n\ndef test_totemic_might():\n\tgame = prepare_game()\n\tsearing = game.player1.give(\"CS2_050\")\n\tsearing.play()\n\tassert searing.atk == 1\n\tassert searing.health == 1\n\tgame.player1.give(\"EX1_244\").play()\n\tassert searing.atk == 1\n\tassert searing.health == 3\n\n\ndef test_tracking():\n\tgame = prepare_game()\n\tgame.player1.discard_hand()\n\ttracking = game.player1.give(\"DS1_184\")\n\ttracking.play()\n\tassert game.player1.choice\n\tassert len(game.player1.choice.cards) == 3\n\tpick = game.player1.choice.cards[0]\n\tgame.player1.choice.choose(pick)\n\tassert game.player1.hand == [pick]\n\n\ndef test_truesilver_champion():\n\tgame = prepare_game()\n\ttruesilver = game.current_player.give(\"CS2_097\")\n\ttruesilver.play()\n\tlightwarden = game.current_player.give(\"EX1_001\")\n\tlightwarden.play()\n\tassert game.player1.weapon is truesilver\n\tassert game.player1.hero.atk == 4\n\tassert game.player1.hero.health == 30\n\tgame.current_player.hero.attack(target=game.player2.hero)\n\tassert game.player2.hero.health == 26\n\tassert game.player1.hero.health == 30\n\tassert lightwarden.atk == 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tfor i in range(3):\n\t\tgame.player1.give(MOONFIRE).play(target=game.player1.hero)\n\tgame.player1.hero.attack(target=game.player2.hero)\n\tassert game.current_player.hero.health == 29\n\tassert lightwarden.atk == 3\n\n\ndef test_twilight_drake():\n\tgame = prepare_game()\n\tgame.end_turn()\n\tgame.end_turn()\n\tgame.end_turn()\n\tgame.end_turn()\n\tgame.end_turn()\n\tgame.end_turn()\n\tassert len(game.current_player.hand) == 7\n\tdrake = game.current_player.give(\"EX1_043\")\n\tdrake.play()\n\tassert len(game.current_player.hand) == 7\n\tassert drake.health == 1 + 7\n\tassert drake.buffs\n\n\tgame.end_turn()\n\tgame.current_player.discard_hand()\n\tdrake2 = game.current_player.give(\"EX1_043\")\n\tassert len(game.current_player.hand) == 1\n\tdrake2.play()\n\tassert not game.current_player.hand\n\tassert drake2.health == 1\n\tassert not drake2.buffs\n\n\ndef test_unbound_elemental():\n\tgame = prepare_game()\n\tunbound = game.player1.give(\"EX1_258\")\n\tunbound.play()\n\tassert unbound.atk == 2\n\tassert unbound.health == 4\n\tgame.player1.give(THE_COIN).play()\n\tassert unbound.atk == 2\n\tassert unbound.health == 4\n\t# Lightning Bolt should trigger it\n\tgame.player1.give(\"EX1_238\").play(target=game.player2.hero)\n\tassert unbound.atk == 3\n\tassert unbound.health == 5\n\tgame.end_turn()\n\n\tgame.player2.give(\"EX1_238\").play(target=game.player2.hero)\n\tassert unbound.atk == 3\n\tassert unbound.health == 5\n\n\ndef test_upgrade():\n\tgame = prepare_game()\n\tweapon = game.player1.give(LIGHTS_JUSTICE)\n\tweapon.play()\n\tassert game.player1.weapon.atk == 1\n\tassert game.player1.weapon.durability == 4\n\tgame.player1.hero.attack(game.player2.hero)\n\tassert game.player1.weapon.durability == 4 - 1\n\tupgrade = game.player1.give(\"EX1_409\")\n\tupgrade.play()\n\tassert game.player1.weapon.atk == 1 + 1\n\tassert game.player1.weapon.durability == 4 - 1 + 1\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tgame.player1.hero.attack(game.player2.hero)\n\tassert game.player2.hero.health == 30 - 1 - 2\n\tgame.end_turn()\n\n\t# test Bloodsail Corsair\n\tcorsair = game.player2.give(\"NEW1_025\")\n\tcorsair.play()\n\tassert game.player1.weapon.atk == 1 + 1\n\tassert game.player1.weapon.durability == 4 - 1 + 1 - 1 - 1\n\n\ndef test_upgrade_no_weapon():\n\tgame = prepare_game()\n\t# Upgrade without a weapon\n\tupgrade = game.player1.give(\"EX1_409\")\n\tupgrade.play()\n\tassert game.player1.hero.atk == 1\n\tassert game.player1.weapon.atk == 1\n\tassert game.player1.weapon.id == \"EX1_409t\"\n\n\ndef test_vancleef():\n\tgame = prepare_game()\n\tvancleef1 = game.current_player.give(\"EX1_613\")\n\tvancleef2 = game.current_player.give(\"EX1_613\")\n\n\tassert not game.current_player.cards_played_this_turn\n\tfor i in range(5):\n\t\tgame.player1.give(THE_COIN).play()\n\tassert game.current_player.cards_played_this_turn == 5\n\tvancleef1.play()\n\tassert game.current_player.cards_played_this_turn == 6\n\tassert vancleef1.atk == 12\n\tassert vancleef1.health == 12\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert not game.current_player.cards_played_this_turn\n\tvancleef2.play()\n\tassert game.current_player.cards_played_this_turn == 1\n\tassert vancleef2.atk == 2\n\tassert vancleef2.health == 2\n\n\ndef test_venture_co_mercenary():\n\tgame = prepare_game()\n\tfireball = game.player1.give(\"CS2_029\")\n\twisp = game.player1.give(WISP)\n\tassert wisp.cost == 0\n\tassert fireball.cost == 4\n\tventureco = game.player1.give(\"CS2_227\")\n\tventureco.play()\n\tassert wisp.cost == 0 + 3\n\tassert fireball.cost == 4\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tventureco2 = game.player1.give(\"CS2_227\")\n\tassert ventureco2.cost == 5 + 3\n\tventureco2.play()\n\tassert wisp.cost == 0 + 3 + 3\n\tassert fireball.cost == 4\n\tgame.player1.give(SILENCE).play(target=ventureco)\n\tassert wisp.cost == 0 + 3\n\tassert fireball.cost == 4\n\n\ndef test_violet_teacher():\n\tgame = prepare_game()\n\tteacher = game.player1.give(\"NEW1_026\")\n\tteacher.play()\n\tassert len(game.player1.field) == 1\n\tgame.player1.give(THE_COIN).play()\n\tassert len(game.player1.field) == 2\n\tassert len(game.player1.field.filter(id=\"NEW1_026t\")) == 1\n\tgame.end_turn()\n\tgame.player2.give(THE_COIN).play()\n\tassert len(game.player1.field) == 2\n\n\ndef test_void_terror():\n\tgame = prepare_game()\n\tterror1 = game.player1.give(\"EX1_304\")\n\tterror2 = game.player1.give(\"EX1_304\")\n\tterror3 = game.player1.give(\"EX1_304\")\n\tpower = game.player1.give(\"EX1_316\")\n\tterror1.play()\n\tassert terror1.atk == 3\n\tassert terror1.health == 3\n\n\tterror2.play()\n\tassert terror1.dead\n\tassert terror2.atk == 3 + 3\n\tassert terror2.health == 3 + 3\n\n\tpower.play(target=terror2)\n\tassert terror2.health == 3 + 3 + 4\n\tassert terror2.atk == 3 + 3 + 4\n\tterror3.play()\n\tassert terror2.dead\n\tassert terror3.atk == 3 + 3 + 3 + 4\n\tassert terror3.health == 3 + 3 + 3 + 4\n\tgame.end_turn()\n\tgame.end_turn()\n\tassert terror3.zone == Zone.PLAY\n\n\ndef test_warsong_commander():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tboar = game.player1.give(\"CS2_171\")\n\tboar.play()\n\tassert wisp.atk == boar.atk == 1\n\tassert not wisp.charge\n\tassert boar.charge\n\twarsong = game.player1.give(\"EX1_084\")\n\twarsong.play()\n\tassert wisp.atk == 1\n\tassert boar.atk == 1 + 1\n\tassert not wisp.charge\n\tassert boar.charge\n\tgame.player1.give(SILENCE).play(target=boar)\n\tassert boar.atk == 1\n\tassert not boar.charge\n\n\ndef test_water_elemental():\n\tgame = prepare_game()\n\telem = game.player1.give(\"CS2_033\")\n\telem.play()\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tassert not game.player2.hero.frozen\n\telem.attack(target=game.player2.hero)\n\tassert game.player2.hero.frozen\n\tgame.end_turn()\n\n\tassert game.player2.hero.frozen\n\tgame.end_turn()\n\n\tassert not game.player2.hero.frozen\n\tgame.end_turn()\n\n\tgame.player2.give(LIGHTS_JUSTICE).play()\n\tgame.player2.hero.attack(target=elem)\n\tassert game.player2.hero.frozen\n\tgame.end_turn()\n\n\tassert game.player2.hero.frozen\n\tgame.end_turn()\n\n\tassert game.player2.hero.frozen\n\tgame.end_turn()\n\n\tassert not game.player2.hero.frozen\n\tgame.end_turn()\n\n\ndef test_wild_growth():\n\tgame = prepare_game(game_class=Game)\n\tgame.end_turn()\n\tgame.end_turn()\n\tgame.end_turn()\n\tgame.end_turn()\n\tassert game.player1.max_mana == 3\n\twildgrowth1 = game.player1.give(\"CS2_013\")\n\twildgrowth1.play()\n\tassert game.player1.mana == 0\n\tassert game.player1.used_mana == 3 + 1\n\tassert game.player1.max_mana == 3 + 1\n\tfor i in range(7):\n\t\tgame.end_turn()\n\t\tgame.end_turn()\n\n\tgame.player1.discard_hand()\n\tassert len(game.player1.hand) == 0\n\tassert game.player1.max_mana == 10\n\twildgrowth2 = game.player1.give(\"CS2_013\")\n\twildgrowth2.play()\n\tassert len(game.player1.hand) == 1\n\tassert game.player1.max_mana == 10\n\texcess_mana = game.player1.hand[0]\n\tassert excess_mana.id == \"CS2_013t\"\n\texcess_mana.play()\n\tassert len(game.player1.hand) == 1\n\n\ndef test_wild_pyromancer():\n\tgame = prepare_game()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tpyro = game.player1.give(\"NEW1_020\")\n\tgame.end_turn()\n\tgame.end_turn()\n\n\tpyro.play()\n\tassert pyro.health == 2\n\tassert wisp.zone == Zone.PLAY\n\n\t# play moonfire. wisp should die.\n\tgame.player1.give(MOONFIRE).play(target=game.player2.hero)\n\tassert wisp.dead\n\tassert pyro.health == 1\n\n\t# play circle of healing. pyro should go up to 2hp then back to 1.\n\tgame.player1.give(CIRCLE_OF_HEALING).play()\n\tassert pyro.health == 1\n\tassert pyro.zone == Zone.PLAY\n\n\t# Silence the pyromancer. It should not trigger.\n\tgame.player1.give(SILENCE).play(target=pyro)\n\tassert pyro.health == 1\n\tassert pyro.zone == Zone.PLAY\n\n\ndef test_whirlwind():\n\tgame = prepare_game()\n\tstatue = game.player1.give(ANIMATED_STATUE)\n\tstatue.play()\n\twisp = game.player1.give(WISP)\n\twisp.play()\n\tgame.end_turn()\n\n\twisp2 = game.player2.give(WISP)\n\twisp2.play()\n\tgame.player2.give(\"EX1_400\").play()\n\tassert game.player1.hero.health == 30\n\tassert game.player2.hero.health == 30\n\tassert wisp.dead\n\tassert wisp2.dead\n\tassert statue.health == 10 - 1\n\n\ndef test_wrath():\n\tgame = prepare_game()\n\tyeti = game.player2.summon(\"CS2_182\")\n\tadventurer = game.player1.summon(\"EX1_044\")\n\tyeti.health == 5\n\twrath1 = game.player1.give(\"EX1_154\")\n\twrath2 = game.player1.give(\"EX1_154\")\n\twrath3 = game.player1.give(\"EX1_154\")\n\n\twrath1.play(target=yeti, choose=\"EX1_154a\")\n\tassert yeti.health == 2\n\tassert len(game.player1.hand) == 6\n\n\twrath2.play(target=yeti, choose=\"EX1_154b\")\n\tassert yeti.health == 1\n\tassert len(game.player1.hand) == 6\n\n\tgame.player1.summon(\"OG_044\")\n\tgiant = game.player1.summon(\"EX1_105\")\n\t# a minion with spell damage\n\tgame.player1.summon(\"EX1_012\")\n\twrath3.play(target=giant)\n\tassert giant.health == 2\n\tassert len(game.player1.hand) == 6\n\n\t# test if extra cards are counted as played\n\tassert adventurer.health == adventurer.atk == 5\n\n\t# test if extra mana are used\n\tassert game.player1.mana == 10 - 2 - 2 - 2\n\n\ndef test_young_priestess():\n\tgame = prepare_game()\n\tpriestess = game.player1.give(\"EX1_004\")\n\tpriestess.play()\n\tassert priestess.health == 1\n\tgame.end_turn()\n\n\twisp = game.player2.give(WISP)\n\twisp.play()\n\tgame.end_turn()\n\n\tassert priestess.health == 1\n\tassert wisp.health == 1\n\twisp1 = game.player1.give(WISP)\n\twisp1.play()\n\tassert wisp1.health == 1\n\tgame.end_turn()\n\n\tassert wisp1.health == 2\n\n\ndef test_ysera():\n\tgame = prepare_game()\n\tysera = game.player1.give(\"EX1_572\")\n\tysera.play()\n\tgame.player1.discard_hand()\n\tassert len(game.player1.hand) == 0\n\tgame.end_turn()\n\tassert len(game.player1.hand) == 1\n\tassert CardClass.DREAM in game.player1.hand[0].classes\n\n\ndef test_ysera_awakens():\n\tgame = prepare_game()\n\tgame.player1.give(WISP).play()\n\tysera = game.player1.give(\"EX1_572\")\n\tysera.play()\n\tgame.end_turn()\n\n\tgame.player2.give(WISP).play()\n\tgame.player2.give(\"DREAM_02\").play()\n\tassert game.player1.hero.health == game.player2.hero.health == 30 - 5\n\tassert len(game.board) == 1\n\tassert ysera.health == 12\n","repo_name":"jleclanche/fireplace","sub_path":"tests/test_classic.py","file_name":"test_classic.py","file_ext":"py","file_size_in_byte":103777,"program_lang":"python","lang":"en","doc_type":"code","stars":645,"dataset":"github-code","pt":"6"} +{"seq_id":"10242052775","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Lifeforms represent the evolving patterns whenever a rule is applied. In\nSeagull, lifeforms are first-class citizens: you can add them to the board,\nview them independently, compose, customize, and the like. This library\nprovides a collection of pre-made lifeforms that you can play around.\n\nLifeforms are arranged into categories based on their configurations (excluding the Base and Custom lifeforms):\n\n\n.. autosummary::\n seagull.lifeforms.gliders\n seagull.lifeforms.growers\n seagull.lifeforms.oscillators\n seagull.lifeforms.methuselahs\n seagull.lifeforms.random\n seagull.lifeforms.static\n\n\n\"\"\"\n\nfrom .static import Box, Seed, Moon, Kite, Eater1, SwitchEngine\nfrom .oscillators import (\n Blinker,\n Toad,\n Pulsar,\n FigureEight,\n Beacon,\n Pentadecathlon,\n ChaCha,\n)\nfrom .gliders import (\n Glider,\n LightweightSpaceship,\n MiddleweightSpaceship,\n)\nfrom .methuselahs import (\n Century, \n Thunderbird, \n)\nfrom .growers import Unbounded\nfrom .random import RandomBox\nfrom .custom import Custom\n\n__all__ = [\n \"Box\",\n \"Seed\",\n \"Moon\",\n \"Kite\",\n \"Eater1\",\n \"SwitchEngine\",\n \"Blinker\",\n \"Toad\",\n \"Pulsar\",\n \"FigureEight\",\n \"Beacon\",\n \"Pentadecathlon\",\n \"ChaCha\",\n \"Glider\",\n \"Century\",\n \"Thunderbird\",\n \"Unbounded\",\n \"RandomBox\",\n \"Custom\",\n]\n","repo_name":"ljvmiranda921/seagull","sub_path":"seagull/lifeforms/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"6"} +{"seq_id":"32729226183","text":"\"\"\"Rando write bananaport locations.\"\"\"\nfrom imp import source_from_cache\n\nimport js\nfrom randomizer.Lists.Warps import BananaportVanilla\nfrom randomizer.Patching.Patcher import ROM\nfrom randomizer.Spoiler import Spoiler\n\n\ndef randomize_bananaport(spoiler: Spoiler):\n \"\"\"Rando write bananaport locations.\"\"\"\n pad_types = [0x214, 0x213, 0x211, 0x212, 0x210]\n\n if spoiler.settings.bananaport_rando:\n for cont_map in spoiler.bananaport_replacements:\n pad_vanilla = []\n cont_map_id = int(cont_map[\"containing_map\"])\n cont_map_setup_address = js.pointer_addresses[9][\"entries\"][cont_map_id][\"pointing_to\"]\n # Pointer Table 9, use \"containing_map\" as a map index to grab setup start address\n ROM().seek(cont_map_setup_address)\n model2_count = int.from_bytes(ROM().readBytes(4), \"big\")\n for x in range(model2_count):\n start = cont_map_setup_address + 4 + (x * 0x30)\n ROM().seek(start + 0x28)\n obj_type = int.from_bytes(ROM().readBytes(2), \"big\")\n if obj_type in pad_types:\n pad_index = pad_types.index(obj_type)\n ROM().seek(start + 0x2A)\n obj_id = int.from_bytes(ROM().readBytes(2), \"big\")\n ROM().seek(start + 0)\n obj_x = int.from_bytes(ROM().readBytes(4), \"big\")\n ROM().seek(start + 4)\n obj_y = int.from_bytes(ROM().readBytes(4), \"big\")\n ROM().seek(start + 8)\n obj_z = int.from_bytes(ROM().readBytes(4), \"big\")\n ROM().seek(start + 12)\n obj_scale = int.from_bytes(ROM().readBytes(4), \"big\")\n ROM().seek(start + 0x18)\n obj_rotx = int.from_bytes(ROM().readBytes(4), \"big\")\n ROM().seek(start + 0x1C)\n obj_roty = int.from_bytes(ROM().readBytes(4), \"big\")\n ROM().seek(start + 0x20)\n obj_rotz = int.from_bytes(ROM().readBytes(4), \"big\")\n obj_index = x\n banned = False\n for warp in BananaportVanilla.values():\n if warp.map_id == cont_map_id and warp.obj_id_vanilla == obj_id and warp.locked:\n banned = True\n if not banned:\n pad_vanilla.append(\n {\n \"pad_index\": pad_index,\n \"_id\": obj_id,\n \"x\": obj_x,\n \"y\": obj_y,\n \"z\": obj_z,\n \"scale\": obj_scale,\n \"rx\": obj_rotx,\n \"ry\": obj_roty,\n \"rz\": obj_rotz,\n \"idx\": obj_index,\n }\n )\n for y in cont_map[\"pads\"]:\n warp_idx = y[\"warp_index\"]\n repl_ids = y[\"warp_ids\"]\n source_counter = 0\n for repl in repl_ids:\n for vanilla_pad in pad_vanilla:\n if vanilla_pad[\"_id\"] == repl:\n vanilla_idx = vanilla_pad[\"idx\"]\n start = cont_map_setup_address + (0x30 * vanilla_idx) + 4\n ref_pad = {}\n counter = 0\n for vanilla_pad0 in pad_vanilla:\n if vanilla_pad0[\"pad_index\"] == warp_idx:\n if counter == source_counter:\n ref_pad = vanilla_pad0\n counter += 1\n ROM().seek(start + 0x28)\n ROM().writeMultipleBytes(pad_types[vanilla_pad[\"pad_index\"]], 2)\n ROM().seek(start + 0)\n ROM().writeMultipleBytes(ref_pad[\"x\"], 4)\n ROM().seek(start + 4)\n ROM().writeMultipleBytes(ref_pad[\"y\"], 4)\n ROM().seek(start + 8)\n ROM().writeMultipleBytes(ref_pad[\"z\"], 4)\n ROM().seek(start + 12)\n ROM().writeMultipleBytes(ref_pad[\"scale\"], 4)\n ROM().seek(start + 0x18)\n ROM().writeMultipleBytes(ref_pad[\"rx\"], 4)\n ROM().seek(start + 0x1C)\n ROM().writeMultipleBytes(ref_pad[\"ry\"], 4)\n ROM().seek(start + 0x20)\n ROM().writeMultipleBytes(ref_pad[\"rz\"], 4)\n source_counter += 1\n","repo_name":"Pokechu22/DK64-Randomizer","sub_path":"randomizer/Patching/BananaPortRando.py","file_name":"BananaPortRando.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"18827914122","text":"# Encapsulate the pairs of int multiples to related string monikers\nclass MultipleMoniker:\n mul = 0\n mon = \"\"\n\n def __init__(self, multiple, moniker) -> None:\n self.mul = multiple\n self.mon = moniker\n\n# Define object to contain methods\nclass FizzBuzz:\n\n # Define the int to start counting at\n start = 1\n\n # Define the max number to count to\n maxi = 0\n\n # Define the multiples and the corresponding descriptor terms\n mmPair = [MultipleMoniker(3, \"Fizz\"), MultipleMoniker(5, \"Buzz\")]\n\n # Define the array that will hold the designation\n array = []\n\n def __init__(self, max_int, start = 1) -> None:\n self.start = start\n self.maxi = max_int\n self.init_with_max()\n \n # Generate sequence up to and including maxi\n def init_with_max(self, max_i=0):\n if max_i != 0 :\n self.maxi = max_i\n tmp_array = []\n \n for i in range(self.start, self.maxi + 1):\n tmp_str = \"\"\n for m in range(len(self.mmPair)):\n if i % self.mmPair[m].mul == 0:\n tmp_str += self.mmPair[m].mon\n if tmp_str == \"\":\n tmp_str += format(i)\n tmp_array.append(tmp_str)\n #print(f\"{i}|:{self.array[i-self.start]}\")\n self.array = tmp_array\n\n # Generate class STR for printout\n def __str__(self):\n ret_str = f\"FizzBuzz({self.maxi}):\"\n for i in self.array:\n ret_str += i + \", \"\n return ret_str\n\n def add_multiple_moniker(self, multiple, moniker):\n self.mmPair.append(MultipleMoniker(multiple, moniker))\n\n\ndef main():\n\n # Test FizzBuzz Class Init\n x1 = 42\n x2 = 15\n\n # Calculate sequence & Print Output to terminal\n print(\"TEST_1:\")\n F1 = FizzBuzz(x1)\n print(F1)\n \n print(\"TEST_2:\")\n F2 = FizzBuzz(x2)\n print(F2)\n\n # Add \"Fuzz\" as a designator for a multiple of 7\n F1.add_multiple_moniker(7, \"Fuzz\")\n F1.init_with_max(105)\n print(F1)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"JNMaree/pyMath","sub_path":"src/tasks/fizz_buzz.py","file_name":"fizz_buzz.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"37111862839","text":"import argparse\nimport os\nfrom pathlib import Path\nimport time\nfrom tqdm import tqdm\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.optim import SGD\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom torchvision import datasets, transforms\nfrom torchvision.models import resnet18\n\nfrom utils import (\n GBlur,\n LARSWrapper,\n ProgressTracker,\n accuracy,\n cleanup_old_checkpoints,\n load_config_into,\n log_exp_config,\n)\n\n\nclass ContrastiveLearningViewGenerator:\n\n def __init__(self, n_patch: int = 4, seed: int = 0):\n self.n_patch = n_patch\n self.rng_ = torch.Generator(device='cpu')\n self.rng_.manual_seed(seed)\n blur_seed = torch.randint(1 << 31, size=(1,), generator=self.rng_).item()\n self.transform_ = transforms.Compose([\n transforms.RandomResizedCrop(32, scale=(0.25, 0.25), ratio=(1, 1)),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.2)], p=0.8),\n transforms.RandomGrayscale(p=0.2),\n # xxx(okachaiev): double check if randomization for gaussian blur\n # is critical for downstream performance\n GBlur(p=0.1, seed=blur_seed),\n transforms.RandomSolarize(threshold=192.0, p=0.1),\n transforms.ToTensor(),\n transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])\n ])\n\n def __call__(self, x):\n return [self.transform_(x) for _ in range(self.n_patch)]\n\n\ndef load_backbone(arch: str) -> Tuple[nn.Module, int]:\n feature_dim = 512\n backbone = resnet18()\n if arch == 'resnet18-cifar':\n backbone.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n backbone.maxpool = nn.Identity()\n backbone.fc = nn.Identity()\n elif arch == \"resnet18-mini\":\n backbone.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n backbone.maxpool = nn.Identity()\n backbone.fc = nn.Identity()\n backbone.layer4 = nn.Identity()\n feature_dim = 256\n elif arch == \"resnet18-tiny\":\n backbone.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n backbone.maxpool = nn.Identity()\n backbone.fc = nn.Identity()\n backbone.layer3 = nn.Identity()\n backbone.layer4 = nn.Identity()\n feature_dim = 128\n elif arch == 'resnet18-imagenet':\n backbone.fc = nn.Identity()\n elif arch == 'resnet18-tinyimagenet':\n backbone.avgpool = nn.AdaptiveAvgPool2d(1)\n backbone.fc = nn.Identity()\n else:\n raise ValueError(f\"Unsupported backbone architecture: {arch}\")\n return backbone, feature_dim\n\n\ndef init_weights(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=1e-3)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n\nclass Encoder(nn.Module):\n\n def __init__(self, z_dim=1024, hidden_dim=4096, norm_p=2, backbone_arch='resnet18-cifar'):\n super().__init__()\n self.backbone, self.backbone_dim = get_backbone(backbone_arch)\n self.z_dim = z_dim\n self.h_dim = hidden_dim\n self.norm_p = norm_p\n self.pre_feature = nn.Sequential(\n nn.Linear(self.backbone_dim, self.h_dim),\n nn.BatchNorm1d(self.h_dim),\n nn.ReLU(),\n )\n self.projection = nn.Sequential(\n nn.Linear(self.h_dim, self.h_dim),\n nn.BatchNorm1d(self.h_dim),\n nn.ReLU(),\n nn.Linear(self.h_dim, z_dim)\n )\n\n def forward(self, x):\n h = self.backbone(x)\n h = self.pre_feature(h)\n z_proj = F.normalize(self.projection(h), p=self.norm_p)\n return h, z_proj\n\n\nclass TotalCodingRateLoss(nn.Module):\n\n def __init__(self, eps=0.01):\n super().__init__()\n self.eps = eps\n\n def _compute_discrimn_loss(self, W):\n \"\"\"Discriminative Loss.\"\"\"\n p, m = W.shape # [d, B]\n I = torch.eye(p, device=W.device)\n scalar = p / (m * self.eps)\n logdet = torch.logdet(I + scalar * W.matmul(W.T))\n return logdet / 2.\n\n def forward(self, z_proj):\n n_patches = z_proj.shape[0]\n loss = torch.zeros(n_patches)\n for i in range(n_patches):\n loss[i] = -self._compute_discrimn_loss(z_proj[i].T)\n return loss.mean()\n\n\nclass MeanSimilarityLoss(nn.Module):\n\n def forward(self, z_proj):\n n_patches, bs, _ = z_proj.shape\n z_avg = z_proj.mean(dim=0).repeat((n_patches, 1))\n z_proj = z_proj.reshape(n_patches*bs, -1)\n z_sim = F.cosine_similarity(z_proj, z_avg, dim=1).mean()\n return -z_sim\n\n\nclass BarycenterSphericalUniformityLoss(nn.Module):\n\n def forward(self, z_proj, t=2):\n z_avg = z_proj.mean(dim=0)\n return torch.pdist(z_avg, p=2).pow(2).mul(-t).exp().mean().log()\n\n\ndef load_dataset(\n dataset_name: str,\n train: bool = True,\n n_patch: int = 4,\n folder: Union[str, os.PathLike] = \"./datasets/\",\n seed: int = 0\n):\n \"\"\"Loads a dataset for training and testing\"\"\"\n folder = Path(folder)\n dataset_name = dataset_name.lower()\n transform = ContrastiveLearningViewGenerator(n_patch=n_patch, seed=seed)\n if dataset_name == \"cifar10\":\n trainset = datasets.CIFAR10(\n root=folder / \"CIFAR10\",\n train=train,\n download=True,\n transform=transform\n )\n trainset.n_classes = 10\n elif dataset_name == \"cifar100\":\n trainset = datasets.CIFAR100(\n root=folder / \"CIFAR100\",\n train=train,\n download=True,\n transform=transform\n )\n trainset.n_classes = 100\n else:\n raise ValueError(f\"Unsupported dataset: {dataset_name}\")\n return trainset\n\n\ndef parse_args():\n main_parser = argparse.ArgumentParser(description='SSL-in-one-epoch')\n subparsers = main_parser.add_subparsers(help='available commands', dest='task')\n\n train_parser = subparsers.add_parser(\"train\")\n train_parser.add_argument('--exp_name', type=str, default='default',\n help='experiment name (default: default)')\n train_parser.add_argument('--dataset', type=str, default='cifar10',\n choices=('cifar10', 'cifar100'),\n help='data (default: cifar10)')\n train_parser.add_argument('--n_patches', type=int, default=100,\n help='number of patches used in EMP-SSL (default: 100)')\n train_parser.add_argument('--arch', type=str, default=\"resnet18-cifar\",\n choices=(\n 'resnet18-cifar',\n 'resnet18-imagenet',\n 'resnet18-tinyimagenet',\n 'resnet18-mini',\n 'resnet18-tiny',\n ),\n help='network architecture (default: resnet18-cifar)')\n train_parser.add_argument('--n_epochs', type=int, default=2,\n help='max number of epochs to finish (default: 2)')\n train_parser.add_argument('--n_eval_epochs', type=int, default=100,\n help='max number of epochs for linear prob evaluation (default: 100)')\n train_parser.add_argument('--bs', type=int, default=100,\n help='batch size (default: 100)')\n train_parser.add_argument('--lr', type=float, default=0.3,\n help='learning rate (default: 0.3)')\n train_parser.add_argument('--eval_lr', type=float, default=0.0075,\n help='learning rate for linear prob evaluation (default: 0.0075)')\n train_parser.add_argument('--log_folder', type=str, default='logs/EMP-SSL-Training',\n help='directory name (default: logs/EMP-SSL-Training)')\n train_parser.add_argument('--device', type=str, default='cuda',\n help='device to use for training (default: cuda)')\n train_parser.add_argument('--seed', type=int, default=42, help='random seed')\n train_parser.add_argument('--save_proj', default=False, action='store_true',\n help='include this flag to save patch embeddings and projections')\n train_parser.add_argument('--pretrained_proj', default=None, type=str,\n help='use pretrained weights for the projection network')\n train_parser.add_argument('--h_dim', default=4096, type=int, help='patch embedding dimensionality')\n train_parser.add_argument('--z_dim', default=1024, type=int, help='projection dimensionality')\n train_parser.add_argument('--uniformity_loss', default='tcr', type=str, choices=('tcr', 'vonmises'),\n help='loss to use for enforcing output space uniformity (default: tcr)')\n train_parser.add_argument('--emb_pool', default='features', type=str, choices=('features', 'proj'),\n help='which tensors to pool as a final representation (default: features)')\n train_parser.add_argument('--invariance_loss_weight', type=float, default=200.,\n help='coefficient of token similarity (default: 200.0)')\n train_parser.add_argument('--uniformity_loss_weight', type=float, default=1.,\n help='coefficient of token uniformity (default: 1.0)')\n train_parser.add_argument('--resume', default=False, action='store_true',\n help='if training should be resumed from the latest checkpoint')\n train_parser.add_argument('--tcr_eps', type=float, default=0.2, help='eps for TCR (default: 0.2)')\n train_parser.add_argument('--config_from', type=str, default=None, metavar='DIR',\n help='copy default configuration from existing experiment')\n train_parser.add_argument('--eval_freq', type=int, default=10, metavar='N',\n help='fit linear prob after each N epochs')\n train_parser.add_argument('--print_freq', type=int, default=50, metavar='N',\n help='print train losses after each N batches')\n train_parser.add_argument('--print_eval_freq', type=int, default=50, metavar='N',\n help='print train losses after each N batches')\n\n resume_parser = subparsers.add_parser('resume')\n resume_parser.add_argument('--exp_dir', type=str, required=True, metavar='DIR',\n help='path to the experiment folder')\n\n cleanup_parser = subparsers.add_parser('cleanup')\n cleanup_parser.add_argument('--log_folder', type=str, default='logs/EMP-SSL-Training',\n help='directory name (default: logs/EMP-SSL-Training)')\n cleanup_parser.add_argument('--keep', type=int, default=1,\n help='how many checkpoints to keep (default: 1)')\n cleanup_parser.add_argument('-y', action='store_true', default=False,\n help='suppress interactive prompt')\n\n return main_parser.parse_args()\n\n\nargs = parse_args()\nif args.task == 'train':\n exp_dir = Path(args.log_folder) / f\"{args.exp_name}__numpatch{args.n_patches}_bs{args.bs}_lr{args.lr}\"\n if args.config_from:\n config_file = Path(args.config_from) / 'hparams.yaml'\n load_config_into(config_file, args)\n print(f\"* Loaded configuration settings from: {config_file}\")\nelif args.task == 'resume':\n exp_dir = Path(args.exp_dir)\n config_file = exp_dir / 'hparams.yaml'\n load_config_into(config_file, args)\n print(f\"* Loaded configuration settings from: {config_file}\")\n args.resume = True\nelif args.task == 'cleanup':\n exp_dir = Path(args.log_folder)\n cleanup_old_checkpoints(exp_dir, keep=args.keep, no_prompt=args.y)\n exit(0)\nelse:\n raise ValueError(f\"Unknown task: {args.task}\")\n\nprint(\"* Parameters:\", args)\ntorch.manual_seed(args.seed)\n\n# folder for logging checkpoints and metrics\nmodel_dir = exp_dir / 'checkpoints'\nmodel_dir.mkdir(parents=True, exist_ok=True)\nartifacts_dir = exp_dir / 'artifacts'\nartifacts_dir.mkdir(parents=True, exist_ok=True)\nprob_dir = exp_dir / 'probs'\nprob_dir.mkdir(parents=True, exist_ok=True)\nconfig_file = exp_dir / 'hparams.yaml'\nlog_exp_config(config_file, args)\n\n# detect available device\ndevice = torch.device('cuda' if (args.device == 'cuda' and torch.cuda.is_available()) else 'cpu')\ntorch.multiprocessing.set_sharing_strategy('file_system')\nn_workers = min(8, os.cpu_count()-1)\n\n# setup dataset and data loader\ntrain_dataset = load_dataset(args.dataset, train=True, n_patch=args.n_patches, seed=args.seed)\ntrain_dataloader = DataLoader(\n train_dataset,\n batch_size=args.bs,\n shuffle=True,\n drop_last=True,\n num_workers=n_workers\n)\ntest_dataset = load_dataset(args.dataset, train=False, n_patch=args.n_patches)\ntest_dataloader = DataLoader(\n test_dataset,\n batch_size=args.bs,\n shuffle=False,\n drop_last=True,\n num_workers=n_workers\n)\n\n\ndef train(net: nn.Module, first_epoch: int = 0, prev_state: Optional[dict] = None):\n # setup optimizer and scheduler\n optimizer = SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True)\n optimizer = LARSWrapper(optimizer, eta=0.005, clip=True, exclude_bias_n_norm=True,)\n n_converge = (len(train_dataloader) // args.bs) * args.n_epochs\n scheduler = CosineAnnealingLR(optimizer, T_max=n_converge, eta_min=0, last_epoch=-1)\n\n if prev_state is not None:\n net.load_state_dict(prev_state['net'])\n optimizer.load_state_dict(prev_state['optimizer'])\n scheduler.load_state_dict(prev_state['scheduler'])\n\n # training criterion\n similarity_loss = MeanSimilarityLoss()\n if args.uniformity_loss.lower() == 'tcr':\n uniformity_reg = TotalCodingRateLoss(eps=args.tcr_eps)\n elif args.uniformity_loss.lower() == 'vonmises':\n uniformity_reg = BarycenterSphericalUniformityLoss()\n else:\n raise ValueError(f\"Unknown uniformity loss: {args.uniformity_loss}\")\n\n n_batches_per_epoch = len(train_dataloader)\n tracker = ProgressTracker(n_batches_per_epoch)\n batch_time = tracker.create_meter('Time', ':5.3f')\n data_time = tracker.create_meter('Data', ':5.3f')\n losses_align = tracker.create_meter('Loss@Align', ':.4f')\n losses_unif = tracker.create_meter('Loss@Unif', ':.4f')\n losses = tracker.create_meter('Loss', ':.5f')\n\n for epoch in range(first_epoch, args.n_epochs):\n tracker.reset(prefix=f\"Epoch {epoch+1:03d}/{args.n_epochs:03d}\")\n # xxx(okachaiev): it's interesting that within an unsupervised learning regime\n # it should be okay to throw test dataset there as well, right?\n end = time.time()\n for i, (X, _) in enumerate(train_dataloader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # combine patches into a tensor, move data to the same device as model\n X = torch.stack(X, dim=0).to(device)\n n_patches, bs, C, H, W = X.shape\n X = X.reshape(n_patches*bs, C, H, W)\n\n # compute output\n _, z_proj = net(X)\n z_proj = z_proj.reshape(n_patches, bs, -1)\n\n # measure and record loss\n loss_align = similarity_loss(z_proj)\n loss_unif = uniformity_reg(z_proj)\n loss = args.invariance_loss_weight*loss_align + args.uniformity_loss_weight*loss_unif\n losses_align.update(loss_align.item(), bs)\n losses_unif.update(loss_unif.item(), bs)\n losses.update(loss.item(), bs)\n\n net.zero_grad()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if i % args.print_freq == 0:\n print(tracker.display(i + 1))\n\n print(tracker.summarize())\n scheduler.step()\n\n # save checkpoint\n torch.save({\n 'net': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n 'epoch': epoch + 1,\n }, model_dir / f\"{epoch}.pt\")\n\n if (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.n_epochs:\n print(\"===> Evaluating linear prob\")\n net.eval()\n eval_datasets = {}\n for subset, dataloader in [('train', train_dataloader), ('test', test_dataloader)]:\n eval_datasets[subset] = encode(net, dataloader, subset_name=subset)\n\n evaluate(\n eval_datasets['train'],\n eval_datasets['test'],\n exp_dir / \"linear_accuracy.txt\",\n n_epochs=args.n_eval_epochs,\n lr=args.eval_lr,\n age_n_epochs=epoch+1,\n )\n\n\ndef encode(net: Encoder, data_loader: DataLoader, subset_name: str = 'train') -> TensorDataset:\n n_samples = len(data_loader)*args.bs\n if args.emb_pool.lower() == 'features':\n emb_dim = net.h_dim\n elif args.emb_pool.lower() == 'proj':\n emb_dim = net.z_dim\n else:\n raise ValueError(f\"Unknown embedding pooling is given: {args.emb_pool}\")\n embeddings = torch.zeros((n_samples, emb_dim))\n labels = torch.zeros((n_samples,))\n if args.save_proj:\n features = torch.zeros((n_samples, args.n_patches, net.h_dim))\n projections = torch.zeros((n_samples, args.n_patches, net.z_dim))\n for batch_id, (X, y) in enumerate(tqdm(data_loader, desc=f\"Encoding ({subset_name:>7}) dataset\")):\n X = torch.stack(X, dim=0).to(device)\n n_patches, bs, C, H, W = X.shape\n X = X.reshape(n_patches*bs, C, H, W)\n with torch.no_grad():\n h, z_proj = net(X)\n h = h.reshape(n_patches, bs, net.h_dim).permute(1, 0, 2)\n z_proj = z_proj.reshape(n_patches, bs, net.z_dim).permute(1, 0, 2)\n if emb_dim == net.h_dim:\n emb = h.mean(1)\n else:\n emb = z_proj.mean(1)\n embeddings[batch_id*bs:(batch_id+1)*bs, :] = emb\n labels[batch_id*bs:(batch_id+1)*bs] = y\n if args.save_proj:\n features[batch_id*bs:(batch_id+1)*bs, :, :] = h\n projections[batch_id*bs:(batch_id+1)*bs, :, :] = z_proj\n artifact = {'embeddings': embeddings, 'labels': labels}\n if args.save_proj:\n artifact.update({'features': features, 'projections': projections})\n return TensorDataset(embeddings, labels.long())\n\n\ndef evaluate(\n train_data,\n test_data,\n report_file: Union[str, os.PathLike],\n n_epochs: int = 100,\n lr: float = 0.0075,\n batch_size: int = 100,\n age_n_epochs: int = 0,\n):\n train_loader = DataLoader(\n train_data,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True,\n num_workers=1,\n )\n test_loader = DataLoader(\n test_data,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=1,\n )\n\n # setup model, optimizer, and scheduler\n classifier = nn.Linear(\n train_data.tensors[0].shape[1],\n train_dataset.n_classes\n ).to(device)\n optimizer = SGD(classifier.parameters(), lr=lr, momentum=0.9, weight_decay=5e-5)\n scheduler = CosineAnnealingLR(optimizer, 100)\n\n # define loss function\n criterion = nn.CrossEntropyLoss()\n\n tracker = ProgressTracker(n_epochs)\n test_top1 = tracker.create_meter('Acc@1', ':6.3f')\n test_top5 = tracker.create_meter('Acc@5', ':6.3f')\n for epoch in range(n_epochs):\n # reset tracker to get proper view of how accuracy changes\n tracker.reset(prefix=f\"Epoch {epoch+1:03d}/{n_epochs:03d}\")\n\n # train\n classifier.train()\n for X, y in train_loader:\n X, y = X.to(device), y.to(device)\n logits = classifier(X)\n loss = criterion(logits, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n # eval on test dataset now\n classifier.eval()\n for X, y in test_loader:\n X, y = X.to(device), y.to(device)\n with torch.no_grad():\n logits = classifier(X)\n top1, top5 = accuracy(logits, y, topk=(1, 5))\n test_top1.update(top1.item(), X.size(0))\n test_top5.update(top5.item(), X.size(0))\n\n if epoch % args.print_eval_freq:\n print(tracker.display(epoch))\n\n summary = tracker.summarize(f\"Prob after (n_epochs): {age_n_epochs:03d}\")\n # xxx(okachaiev): in-file and on-screen reporting could be a part of the\n # tracker functionality, btw. would be a good place to add\n # tensorboard log writer as well\n print(summary)\n with open(report_file, \"a\") as fd:\n fd.write(summary + '\\n')\n\n # save trained classifier\n torch.save(classifier.state_dict(), prob_dir / f\"{age_n_epochs-1}.pt\")\n\n\nif __name__ == '__main__':\n net = Encoder(z_dim=args.z_dim, hidden_dim=args.h_dim, backbone_arch=args.arch).to(device)\n net.apply(init_weights)\n print(f\"* Encoder network: {sum(p.numel() for p in net.parameters()):,} params\")\n if args.pretrained_proj:\n net_weights = net.state_dict()\n weights = torch.load(args.pretrained_proj, map_location=device)\n # filter out projection network weights\n net_weights.update({k: v for k, v in weights.items() if k.startswith('projection.')})\n net.load_state_dict(net_weights)\n # freeze training for projection\n for params in net.projection.parameters():\n params.requires_grad = False\n\n # train SSL encoder\n # check if there's a checkpoint that could be loaded,\n # otherwise run training\n checkpoint_files = list(model_dir.glob(f\"*.pt\"))\n last_checkpoint = model_dir / f\"{args.n_epochs-1}.pt\"\n if os.path.exists(last_checkpoint):\n print(f\"🚀 All done! The experiment has taken its final bow.\")\n report_file = exp_dir / \"linear_accuracy.txt\"\n if os.path.exists(report_file):\n print('Here is performance report:\\n', '-'*80)\n with open(report_file, \"r\") as fd:\n print(fd.read())\n exit(0)\n elif checkpoint_files and args.resume:\n last_epoch = max(int(file.name.replace(\".pt\", \"\")) for file in checkpoint_files)\n last_checkpoint = model_dir / f\"{last_epoch}.pt\"\n weights = torch.load(last_checkpoint, map_location=device)\n print(f\"===> Resume SSL encoder training from the checkpoint {last_checkpoint} for epoch {last_epoch+1}\")\n train(net, first_epoch=last_epoch+1, prev_state=weights)\n else:\n print(\"===> Training SSL encoder\")\n train(net)\n","repo_name":"kachayev/ssl-in-one-epoch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23361,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"5667898811","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nclass Bot: \n def __init__(self,data,sampling=False,**kwargs):\n '''\n Fit's a model by automaticaly identifying discrete and continuous columns.\n \n # Parameters:\n # data (DataFrame):The data which will be used for training.\n # sampling (Boolean):Sampling 1000 data points randomly.\n # **kwargs: Model training parameters. \n '''\n import pandas as pd\n import ctgan\n\n self.data = data if sampling==False else data.sample(1000, random_state = 42) \n \n discrete_columns = []\n for x in data.columns:\n if data[x].dtypes == \"O\" or data[x].dtypes == \"datetime64[ns]\" or data[x].dtypes == \"category\":\n discrete_columns.append(x)\n \n model = ctgan.CTGAN(**kwargs)\n model.fit(data,discrete_columns)\n self.model = model\n \n def generate(self,n):\n '''Takes number of samples to generate and returns DataFrame\n \n # Returns:\n # df (DataFrame):Synthetically generated dataframe. \n '''\n return self.model.sample(n)\n\n","repo_name":"torchd3v/dataroid","sub_path":"dataroid.py","file_name":"dataroid.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"10988912490","text":"#숫자로 이루어진 사각형\ndef print_rect(n):\n i,cnt=0,0\n for _ in range(n*n):\n i+=1\n cnt+=1\n if i==10:\n i=1\n if cnt %n==0: \n print(i,end='\\n')\n continue\n print(i,end=' ')\n \nprint_rect(int(input())) \n","repo_name":"Miensoap/Pystudy","sub_path":"CodeTree/2_프로그래밍연습/2_1_함수/4_숫자로이루어진사각형.py","file_name":"4_숫자로이루어진사각형.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20921882705","text":"# -*- coding : utf-8 -*-\n# Author => Albertus Restiyanto Pramayudha\n# email => xabre0010@gmail.com\n# linkedin => https://www.linkedin.com/in/albertus-restiyanto-pramayudha-470261a8/\n# youtube => https://www.youtube.com/channel/UCCtgLDIfqehJ1R8cohMeTXA\n\nfrom odoo import fields, models, api, _\nfrom datetime import datetime\nfrom odoo.exceptions import UserError, ValidationError\n\n\nclass AccountPayment(models.Model):\n _inherit = 'account.payment'\n\n loan_type = fields.Selection([('dana','Pinjaman Dana'),('barang','Pinjaman Barang'),('konsumtif','Pinjaman Konsumtif'),('syariah','Pinjaman Syariah'),('tarikan_deposito','Tarikan Deposito'),('tarikan_tabungan','Tarikan Tabungan')], string='Type Tarikan')\n dana_id = fields.Many2one('yudha.peminjaman.dana', 'No Pinjaman Dana')\n barang_id = fields.Many2one('yudha.peminjaman.barang', 'No Pinjaman Barang')\n konsumtif_id = fields.Many2one('yudha.peminjaman.konsumtif', 'No Pinjaman Konsumtif')\n syariah_id = fields.Many2one('yudha.peminjaman.syariah', 'No Pinjaman Syariah')\n deposito_id = fields.Many2one('yudha.deposito', 'No Transaksi')\n tabungan_id = fields.Many2one('yudha.tabungan', 'No Transaksi')\n\n @api.onchange('partner_id')\n def onchange_partner_id_dana(self):\n if not self.partner_id:\n return\n self.loan_type=[]\n self.dana_id = []\n self.barang_id = []\n self.konsumtif_id = []\n self.syariah_id = []\n self.deposito_id = []\n self.tabungan_id = []\n\n @api.onchange('dana_id','barang_id','konsumtif_id','sembako_id','syariah_id','deposito_id','tabungan_id')\n def onchange_pinjaman_id(self):\n if self.loan_type == 'dana':\n self.amount = self.dana_id.jml_pinjam\n elif self.loan_type == 'barang':\n self.amount = self.barang_id.jml_pinjam\n elif self.loan_type == 'konsumtif':\n self.amount = self.konsumtif_id.jml_pinjam\n elif self.loan_type == 'syariah':\n self.amount = self.syariah_id.jml_pinjam\n elif self.loan_type == 'tarikan_deposito':\n self.amount = self.deposito_id.jml_depo\n elif self.loan_type == 'tarikan_tabungan':\n self.amount = self.tabungan_id.jml_tab\n\n @api.onchange('loan_type')\n def onchange_loan_type(self):\n if not self.partner_id:\n return\n if not self.loan_type:\n return\n if self.loan_type=='dana':\n sql_query = \"\"\"select id from yudha_peminjaman_dana where state='valid' and partner_id=%s\n \"\"\"\n elif self.loan_type=='barang':\n sql_query = \"\"\"select id from yudha_peminjaman_barang where state='valid' and partner_id=%s\n \"\"\"\n elif self.loan_type=='konsumtif':\n sql_query = \"\"\"select id from yudha_peminjaman_konsumtif where state='valid' and partner_id=%s\n \"\"\"\n elif self.loan_type=='syariah':\n sql_query = \"\"\"select id from yudha_peminjaman_syariah where state='valid' and partner_id=%s\n \"\"\"\n elif self.loan_type == 'tarikan_deposito':\n sql_query = \"\"\"select distinct a.id from yudha_deposito a inner join yudha_pencairan_deposito b on a.id=b.depo_id where b.type_bayar='transfer' and a.state='payment' and a.partner_id=%s\n \"\"\"\n elif self.loan_type == 'tarikan_tabungan':\n sql_query = \"\"\"select id from yudha_tabungan where jns_trans='TD' and asal_dana='TF' and state='valid' and partner_id=%s\n \"\"\"\n\n self.env.cr.execute(sql_query, (self.partner_id.id,))\n res_id = self.env.cr.dictfetchall()\n data_list = []\n coa_kliring_pinjaman=self.env['yudha.settings'].search([('code','=','settings')], limit=1).coa_kliring_pinjaman\n coa_kliring_tarikan=self.env['yudha.settings'].search([('code','=','settings')], limit=1).coa_kliring_tarikan\n if res_id != False:\n for field in res_id:\n data_list.append(field['id'])\n if self.loan_type=='dana':\n self.barang_id=[]\n self.konsumtif_id=[]\n self.sembako_id=[]\n self.syariah_id=[]\n self.deposito_id=[]\n self.tabungan_id=[]\n self.control_account=coa_kliring_pinjaman\n return {'domain': {'dana_id': [('id', '=', data_list)]}}\n elif self.loan_type=='barang':\n self.dana_id=[]\n self.konsumtif_id=[]\n self.sembako_id=[]\n self.syariah_id=[]\n self.deposito_id = []\n self.tabungan_id = []\n self.control_account=coa_kliring_pinjaman\n return {'domain': {'barang_id': [('id', '=', data_list)]}}\n elif self.loan_type=='konsumtif':\n self.barang_id=[]\n self.dana_id=[]\n self.sembako_id=[]\n self.syariah_id=[]\n self.deposito_id = []\n self.tabungan_id = []\n self.control_account=coa_kliring_pinjaman\n return {'domain': {'konsumtif_id': [('id', '=', data_list)]}}\n elif self.loan_type=='syariah':\n self.barang_id=[]\n self.konsumtif_id=[]\n self.sembako_id=[]\n self.dana_id=[]\n self.deposito_id = []\n self.tabungan_id = []\n self.control_account=coa_kliring_pinjaman\n return {'domain': {'syariah_id': [('id', '=', data_list)]}}\n elif self.loan_type=='tarikan_deposito':\n self.barang_id=[]\n self.konsumtif_id=[]\n self.sembako_id=[]\n self.dana_id=[]\n self.syariah_id = []\n self.tabungan_id = []\n self.control_account=coa_kliring_tarikan\n return {'domain': {'deposito_id': [('id', '=', data_list)]}}\n elif self.loan_type=='tarikan_tabungan':\n self.barang_id=[]\n self.konsumtif_id=[]\n self.sembako_id=[]\n self.dana_id=[]\n self.syariah_id = []\n self.deposito_id = []\n self.control_account=coa_kliring_tarikan\n return {'domain': {'tabungan_id': [('id', '=', data_list)]}}\n\n\n def post(self):\n if self.control_account.name=='KLIRING PINJAMAN ANGGOTA':\n if not self.loan_type:\n raise UserError('Type Tarikan harus diisi')\n\n if self.loan_type=='dana':\n if not self.dana_id:\n raise UserError('No Pinjaman harus diisi')\n\n yudha_obj=self.env['yudha.peminjaman.dana'].search([('id','=',self.dana_id.id)])\n if yudha_obj:\n yudha_obj.write({'state': 'paid'})\n yudha_obj_detail = self.env['yudha.peminjaman.dana.details'].search([('loan_id','=',yudha_obj.id),('doc_type','=','outbound'), ('cicilan_ke', '=', 0)])\n yudha_obj_detail.write({'payment_id':self.id,'state':'paid','jml_cicilan' : -self.amount})\n elif self.loan_type=='barang':\n if not self.barang_id:\n raise UserError('No Pinjaman harus diisi')\n yudha_obj = self.env['yudha.peminjaman.barang'].search([('id', '=', self.barang_id.id)])\n if yudha_obj:\n yudha_obj.write({'state': 'paid'})\n yudha_obj_detail = self.env['yudha.peminjaman.barang.details'].search(\n [('loan_id', '=', yudha_obj.id), ('doc_type', '=', 'outbound'), ('cicilan_ke', '=', 0)])\n yudha_obj_detail.write({'payment_id': self.id,'state':'paid','jml_cicilan' : -self.amount})\n elif self.loan_type=='konsumtif':\n if not self.konsumtif_id:\n raise UserError('No Pinjaman harus diisi')\n yudha_obj = self.env['yudha.peminjaman.konsumtif'].search([('id', '=', self.konsumtif_id.id)])\n if yudha_obj:\n yudha_obj.write({'state': 'paid'})\n yudha_obj_detail = self.env['yudha.peminjaman.konsumtif.details'].search(\n [('loan_id', '=', yudha_obj.id), ('doc_type', '=', 'outbound'), ('cicilan_ke', '=', 0)])\n yudha_obj_detail.write({'payment_id': self.id,'state':'paid','jml_cicilan' : -self.amount})\n elif self.loan_type=='syariah':\n if not self.syariah_id:\n raise UserError('No Pinjaman harus diisi')\n yudha_obj = self.env['yudha.peminjaman.syariah'].search([('id', '=', self.syariah_id.id)])\n if yudha_obj:\n yudha_obj.write({'state': 'paid'})\n yudha_obj_detail = self.env['yudha.peminjaman.syariah.details'].search(\n [('loan_id', '=', yudha_obj.id), ('doc_type', '=', 'outbound'), ('cicilan_ke', '=', 0)])\n yudha_obj_detail.write({'payment_id': self.id,'state':'paid','jml_cicilan' : -self.amount})\n elif self.loan_type=='tarikan_deposito':\n if not self.syariah_id:\n raise UserError('No Transaksi harus diisi')\n yudha_obj = self.env['yudha.deposito'].search([('id', '=', self.deposito_id.id)])\n if yudha_obj:\n yudha_obj.write({'state': 'paid'})\n yudha_obj_detail = self.env['yudha.pencairan.deposito'].search([('depo_id', '=', self.deposito_id.id)])\n yudha_obj_detail.write({'state':'paid'})\n\n return super(AccountPayment, self).post()\n","repo_name":"erickindratara/ysimpanpinjam","sub_path":"models/core_simpan_pinjam/yudha_account_payment.py","file_name":"yudha_account_payment.py","file_ext":"py","file_size_in_byte":9333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20493559323","text":"\"\"\" For in em Curso-de-Python Iterando strings com for. Função range (star=o, stop, step=1) \"\"\"\nfor n in range(0, 100, 8):\n print(n)\n\nprint('\\n##############\\n')\n\nfor n in range(100):\n if n % 8 == 0:\n print(n)\n\nprint('\\n##############\\n')\n\ntexto = 'o rato roeu a ropa do reu de roma.'\nprint(texto)\nnova_string = ''\n\nfor letra in texto:\n if letra == 'r':\n nova_string += letra.upper()\n else:\n nova_string += letra\nprint(nova_string)","repo_name":"Adriano1976/Curso-de-Python","sub_path":"Secao02-Logica-de-Programacao/Aula036-For-in/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31970347552","text":"# views.py\nfrom .forms import AllQuestionsForm, InstructorRegistrationForm, CustomUserCreationForm, ExtendedUserCreationForm\nfrom reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import QuizForm, QuestionForm, ChoiceForm\nfrom .models import Quiz, Question, Choice, UserResponse\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.http import Http404, JsonResponse\nfrom reportlab.lib.pagesizes import A4, inch, landscape\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import authenticate, login\nfrom reportlab.lib.pagesizes import letter\nfrom django.contrib.auth.models import User\nfrom django.forms import formset_factory\nfrom QuizApp.models import CustomUser\nfrom django.http import HttpResponse\nfrom reportlab.lib import colors\nfrom django import forms\nfrom uuid import uuid4\n\n# Create your views here.\n\ndef home(request):\n \n return render(request, 'QuizApp/home.html')\n\ndef instructor_register(request):\n if request.method == 'POST':\n form = InstructorRegistrationForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.is_instructor = True\n user.save()\n return redirect('instructor_login')\n else:\n form = InstructorRegistrationForm()\n return render(request, 'registration/instructor_register.html', {'form': form})\n\n\n@login_required\ndef instructor_dashboard(request):\n quizzes = Quiz.objects.all()\n return render(request, 'QuizApp/instructor_dashboard.html', {'quizzes': quizzes})\n\n@login_required\ndef create_quiz(request):\n if request.method == \"POST\":\n form = QuizForm(request.POST)\n if form.is_valid():\n quiz = form.save()\n return redirect('create_question', quiz_id=quiz.id)\n else:\n form = QuizForm()\n return render(request, 'QuizApp/create_quiz.html', {'form': form})\n\n@login_required\ndef create_question(request, quiz_id):\n quiz = Quiz.objects.get(id=quiz_id)\n if request.method == \"POST\":\n question_form = QuestionForm(request.POST)\n if question_form.is_valid():\n question = question_form.save(commit=False)\n question.quiz = quiz\n question.save()\n\n choices_text = [val for key, val in request.POST.items() if key.startswith('choice_text_')]\n choices_is_correct = {key.split(\"_\")[-1]: bool(val) for key, val in request.POST.items() if key.startswith('is_correct_')}\n\n for index, text in enumerate(choices_text):\n is_correct = choices_is_correct.get(str(index), False)\n Choice.objects.create(\n question=question,\n text=text,\n is_correct=is_correct\n )\n\n # Redirect to the same page to add more questions\n return redirect('create_question', quiz_id=quiz.id)\n else:\n question_form = QuestionForm()\n choice_form = ChoiceForm()\n existing_questions = Question.objects.filter(quiz=quiz)\n \n return render(request, 'QuizApp/create_question.html', {\n 'question_form': question_form, \n 'choice_form': choice_form,\n 'existing_questions': existing_questions,\n 'quiz': quiz\n })\n\n\n@login_required\ndef instructor_quizzes(request):\n quizzes = Quiz.objects.filter(created_by=request.user)\n return render(request, 'QuizApp/instructor_quizzes.html', {'quizzes': quizzes})\n\n@login_required\ndef edit_quiz(request, quiz_id):\n quiz = get_object_or_404(Quiz, id=quiz_id)\n if request.method == 'POST':\n form = QuizForm(request.POST, instance=quiz)\n if form.is_valid():\n form.save()\n return redirect('edit_question', quiz_id=quiz.id)\n\n else:\n form = QuizForm(instance=quiz)\n return render(request, 'QuizApp/edit_quiz.html', {'form': form})\n\n@login_required\ndef edit_question(request, question_id):\n question = get_object_or_404(Question, id=question_id)\n quiz = question.quiz # Assuming 'quiz' is a ForeignKey in your 'Question' model\n choices = Choice.objects.filter(question=question)\n ChoiceFormSet = formset_factory(ChoiceForm, extra=0)\n\n if request.method == 'POST':\n question_form = QuestionForm(request.POST, instance=question)\n choice_formset = ChoiceFormSet(request.POST, prefix='choices')\n \n if question_form.is_valid() and choice_formset.is_valid():\n question_form.save()\n \n # Update choices here\n for form, choice in zip(choice_formset, choices):\n if form.is_valid():\n updated_choice = form.save(commit=False)\n updated_choice.id = choice.id # Maintain the same choice id\n updated_choice.save()\n \n return redirect('instructor_dashboard')\n else:\n question_form = QuestionForm(instance=question)\n #choice_formset = ChoiceFormSet(initial=[{'choice_text': choice.choice_text, 'is_correct': choice.is_correct} for choice in choices], prefix='choices')\n choice_formset = ChoiceFormSet(initial=[{'choice_text': choice.text, 'is_correct': choice.is_correct} for choice in choices], prefix='choices')\n\n\n return render(request, 'QuizApp/edit_question.html', {'question_form': question_form, 'choice_formset': choice_formset})\n\n@login_required\ndef delete_quiz(request, quiz_id):\n quiz = get_object_or_404(Quiz, id=quiz_id)\n quiz.delete()\n return redirect('quiz_list') # Replace with your own URL pattern name\n\n\n\ndef register(request):\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.is_instructor = False\n user.save()\n return redirect('login')\n else:\n form = CustomUserCreationForm()\n return render(request, 'registration/register.html', {'form': form})\n\ndef custom_login(request):\n if request.method == 'POST':\n username = request.POST['username'] # You forgot this line\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n if user.is_instructor: # Change this line to use is_instructor field\n return redirect('instructor_dashboard')\n else:\n return redirect('student_dashboard')\n else:\n # Invalid login\n # You can add logic to handle invalid login attempts here\n pass\n else:\n form = AuthenticationForm() # Instantiate the form for GET requests\n return render(request, 'registration/login.html', {'form': form})\n \n \n@login_required\ndef student_dashboard(request):\n quizzes = Quiz.objects.all()\n return render(request, 'QuizApp/student_dashboard.html', {'quizzes': quizzes})\n\n\n@login_required\ndef take_quiz(request, quiz_id):\n quiz = Quiz.objects.get(id=quiz_id)\n duration = quiz.duration # Fetch the duration here\n questions = quiz.questions.all()\n \n if request.method == 'POST':\n total_score = 0\n quiz_session = str(uuid4())\n \n for index, question in enumerate(questions):\n field_name = f'question_{index}'\n user_answer_id = request.POST.get(field_name, None)\n \n if user_answer_id:\n user_answer = Choice.objects.get(id=user_answer_id)\n UserResponse.objects.create(\n user=request.user,\n quiz=quiz,\n question=question,\n choice=user_answer,\n quiz_session=quiz_session\n )\n \n if user_answer.is_correct:\n total_score += question.score\n \n return redirect('quiz_results', score=total_score, quiz_session=quiz_session)\n\n else:\n form = AllQuestionsForm(questions=questions)\n\n return render(request, 'QuizApp/take_quiz.html', {'quiz': quiz, 'form': form, 'duration': duration})\n\n@login_required\ndef quiz_results(request, score, quiz_session):\n user_responses = UserResponse.objects.filter(quiz_session=quiz_session)\n quiz = user_responses.first().quiz if user_responses else None # Assume all responses belong to the same quiz\n details = []\n\n if quiz:\n questions = quiz.questions.all()\n\n for question in questions:\n response = user_responses.filter(question=question).first()\n correct_answer = Choice.objects.get(question=question, is_correct=True)\n \n if response:\n user_answer = response.choice\n is_correct = user_answer == correct_answer\n else:\n user_answer = None\n is_correct = False\n \n details.append({\n 'question': question.text,\n 'user_answer': user_answer.text if user_answer else \"None\",\n 'correct_answer': correct_answer.text,\n 'is_correct': is_correct\n })\n\n return render(request, 'QuizApp/quiz_results.html', {'score': score, 'details': details, 'quiz_session': quiz_session})\n\n\n@login_required\ndef download_pdf(request, quiz_session):\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"Quiz-Results.pdf\"'\n\n user_responses = UserResponse.objects.filter(quiz_session=quiz_session)\n first_response = user_responses.first()\n if first_response:\n quiz = first_response.quiz\n questions = quiz.questions.all()\n else:\n return HttpResponse(\"No quiz found for the given session\")\n\n details = []\n\n for question in questions:\n user_response = user_responses.filter(question=question).first()\n correct_answer = Choice.objects.get(question=question, is_correct=True)\n\n details.append({\n 'question': question.text,\n 'user_answer': user_response.choice.text if user_response else 'None',\n 'correct_answer': correct_answer.text,\n 'is_correct': user_response.choice == correct_answer if user_response else False\n })\n\n score = sum(1 for d in details if d['is_correct'])\n\n # PDF generation logic here\n pdf = SimpleDocTemplate(\n response,\n pagesize=letter\n )\n\n styles = getSampleStyleSheet()\n elements = []\n\n elements.append(Paragraph(\"Your Score: {}\".format(score), styles['Title']))\n elements.append(Paragraph(\"Detailed Results:\", styles['Title']))\n\n for detail in details:\n elements.append(Paragraph(f\"Question: {detail['question']}\", styles['BodyText']))\n elements.append(Paragraph(f\"Your Answer: {detail['user_answer']}\", styles['BodyText']))\n elements.append(Paragraph(f\"Correct Answer: {detail['correct_answer']}\", styles['BodyText']))\n status = \"Correct\" if detail['is_correct'] else \"Incorrect\"\n elements.append(Paragraph(f\"Status: {status}\", styles['BodyText']))\n\n pdf.build(elements)\n\n return response","repo_name":"SaZOsadam/Test-Panel","sub_path":"QuizApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"3885346058","text":"from django.contrib import admin\nfrom django.contrib.admin.options import InlineModelAdmin\nfrom django.utils.html import mark_safe\nfrom .models import Room, RoomType, Amenity, Facility, HouseRule, Photo\n\n\n@admin.register(RoomType, Amenity, Facility, HouseRule)\nclass ItemAdmin(admin.ModelAdmin):\n\n \"\"\"Item Admin Definition\"\"\"\n\n list_display = (\n \"name\",\n \"used_by\",\n )\n\n def used_by(self, obj):\n return obj.rooms.count()\n\n\nclass PhotoInline(admin.TabularInline):\n model = Photo\n\n\n@admin.register(Room)\nclass RoomAdmin(admin.ModelAdmin):\n\n \"\"\"Room Admin Definition\"\"\"\n\n inlines = (PhotoInline,)\n\n fieldsets = (\n (\n \"Basic Info\",\n {\n \"fields\": (\n \"name\",\n \"description\",\n \"room_type\",\n \"country\",\n \"city\",\n \"address\",\n \"price\",\n )\n },\n ),\n (\"Times\", {\"fields\": (\"check_in\", \"check_out\", \"instant_book\")}),\n (\"Spaces\", {\"fields\": (\"guests\", \"beds\", \"bedrooms\", \"baths\")}),\n (\n \"More About the Space\",\n {\n \"classes\": (\"collapse\",),\n \"fields\": (\n \"amenities\",\n \"facilities\",\n \"house_rules\",\n ),\n },\n ),\n (\"Last Details\", {\"fields\": (\"host\",)}),\n )\n raw_id_fields = (\"host\",)\n list_display = (\n \"name\",\n \"country\",\n \"city\",\n \"room_type\",\n \"price\",\n \"guests\",\n \"beds\",\n \"baths\",\n \"bedrooms\",\n \"check_in\",\n \"check_out\",\n \"instant_book\",\n \"count_amenities\",\n \"count_photos\",\n \"total_rating\",\n )\n ordering = (\"instant_book\",)\n list_filter = (\n \"instant_book\",\n \"host__superhost\",\n \"room_type\",\n \"amenities\",\n \"facilities\",\n \"house_rules\",\n \"city\",\n \"country\",\n )\n search_fields = (\n \"city\",\n \"^host__username\",\n \"name\",\n )\n filter_horizontal = (\n \"amenities\",\n \"facilities\",\n \"house_rules\",\n )\n\n # admin에서 모델 인스턴스를 저장할 때 호출되�� 메서드\n # 결과적으로 models.py에서 super().save() 해줘야 저장됨\n # 예를 들어, send_mail()코드를 넣으면 누군가 admin에서 모델 변경하면 메일이 날라오게 할 수도 있음\n def save_model(self, request, obj, form, change):\n # if (\n # obj.host != request.user\n # ): # admin에서 room의 호스트와 로그인한 사람이 같을때만 모델 변경이 가능하게끔 조건 걸 수 있음\n super().save_model(request, obj, form, change) # obj.save() 함\n\n # admin안의 함수는 2개의 파라미터를 가짐, self : RoomAdmin class, obj : room instance\n def count_amenities(self, obj):\n return obj.amenities.count()\n\n count_amenities.short_description = \"Amenities Count\"\n\n def count_photos(self, obj):\n return obj.photos.count()\n\n count_photos.short_description = \"Photo Count\"\n\n\n@admin.register(Photo)\nclass PhotoAdmin(admin.ModelAdmin):\n\n \"\"\"Photo Admin Definition\"\"\"\n\n list_display = (\"__str__\", \"get_thumbnail\")\n\n def get_thumbnail(self, obj):\n return mark_safe(f\"\")\n\n get_thumbnail.short_description = \"Thumbnail\"\n","repo_name":"Odreystella/Pinkbnb","sub_path":"rooms/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24047127326","text":"# # Run dependency injections\nimport os\nimport tekleo_common_utils\nfrom injectable import load_injection_container\nfrom tekleo_common_message_protocol import OdPrediction, RectanglePixel, PointPixel\nload_injection_container(str(os.path.dirname(tekleo_common_utils.__file__)))\nload_injection_container('../')\nfrom tekleo_common_utils import UtilsImage\nfrom tekleo_common_utils_ai.utils_dataset_labelme import UtilsDatasetLabelme\nfrom tekleo_common_utils_ai.utils_visualize_od import UtilsVisualizeOd\n\nutils_dataset_labelme = UtilsDatasetLabelme()\nutils_visualize_od = UtilsVisualizeOd()\nutils_image = UtilsImage()\n\n# Open labelme sample\nlabelme_folder_path = \"/Users/leo/tekleo/tekleo-common-utils-ai/tekleo_common_utils_ai/test_utils_dataset_pipes/dataset_labelme_original\"\nsamples = utils_dataset_labelme.load_samples_from_folder(labelme_folder_path)\nsample = samples[0]\n\n# Prepare image and predictions and class labels\nimage_cv = utils_image.convert_image_pil_to_image_cv(sample.image)\nimage_height = sample.image.height\nimage_width = sample.image.width\npredictions = []\nclass_labels = []\nfor item in sample.items:\n region = RectanglePixel(\n int(item.get_region().x * image_width),\n int(item.get_region().y * image_height),\n int(item.get_region().w * image_width),\n int(item.get_region().h * image_height)\n )\n\n mask = []\n for point in item.mask:\n mask.append(PointPixel(\n int(point.x * image_width),\n int(point.y * image_height)\n ))\n\n prediction = OdPrediction(item.label, 0.9, region, mask)\n predictions.append(prediction)\n class_labels.append(item.label)\nclass_labels = sorted(list(set(class_labels)))\n\n# Test debug\nutils_visualize_od.debug_predictions_coco(image_cv, predictions, class_labels )\n","repo_name":"JPLeoRX/tekleo-common-utils-ai","sub_path":"tekleo_common_utils_ai/test/test_utils_visualize_od.py","file_name":"test_utils_visualize_od.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73348448187","text":"import collections\nimport json\nimport threading\nimport time\nfrom typing import Any, Dict\n\nfrom kafka import KafkaConsumer, KafkaProducer\n\nfrom anteater.config import KafkaConf\nfrom anteater.utils.log import logger\n\n\nclass EntityVariable:\n \"\"\"\n The global variables which will be used to update\n some key settings through multiprocessors\n \"\"\"\n variable = None\n\n\nclass KafkaProvider:\n \"\"\"The Kafka provider provides consuming and producing\n messages from Kafka service\n \"\"\"\n\n def __init__(self, conf: KafkaConf) -> None:\n self.conf = conf\n producer_configs = {\n \"bootstrap_servers\": f\"{conf.server}:{conf.port}\"\n }\n consumer_configs = {\n \"bootstrap_servers\": f\"{conf.server}:{conf.port}\",\n \"auto_offset_reset\": \"earliest\",\n \"enable_auto_commit\": False,\n \"consumer_timeout_ms\": 1000,\n \"group_id\": conf.group_id,\n }\n\n if conf.auth_type == 'sasl_plaintext':\n self.config_kafka_sasl(producer_configs)\n self.config_kafka_sasl(consumer_configs)\n\n self.model_topic = conf.model_topic\n self.meta_topic = conf.meta_topic\n\n self.producer = KafkaProducer(**producer_configs)\n self.consumer = KafkaConsumer(self.meta_topic, **consumer_configs)\n\n self.metadata = collections.deque(maxlen=200)\n self.updating()\n\n def config_kafka_sasl(self, kafka_conf):\n \"\"\"Config kafka sasl plaintext\"\"\"\n kafka_conf['security_protocol'] = \"SASL_PLAINTEXT\"\n kafka_conf['sasl_mechanism'] = \"PLAIN\"\n kafka_conf['sasl_plain_username'] = self.conf.username\n kafka_conf['sasl_plain_password'] = self.conf.password\n\n def updating(self):\n t = threading.Thread(target=self.fetch_metadata, args=())\n t.start()\n\n def fetch_metadata(self):\n while True:\n for msg in self.consumer:\n data = json.loads(msg.value)\n metadata = {}\n metadata.update(data)\n self.metadata.append(metadata)\n time.sleep(5)\n\n def get_metadata(self, entity_name):\n for item in self.metadata:\n if item.get(\"entity_name\", \"\") == entity_name:\n return item.get(\"keys\", {})\n logger.error(f\"Unknown entity_name {entity_name} in metadata\")\n return {}\n\n def send_message(self, message: Dict[str, Any]):\n \"\"\"Sent the message to Kafka\"\"\"\n logger.info(f\"Sending the abnormal message to Kafka: {str(message)}\")\n self.producer.send(self.model_topic, json.dumps(message).encode('utf-8'))\n self.producer.flush()\n","repo_name":"openeuler-mirror/gala-anteater","sub_path":"anteater/provider/kafka.py","file_name":"kafka.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"2847535991","text":"import torch\nfrom torch.utils.data import Dataset\n\nclass TabularDataset(Dataset):\n def __init__(self, dataframe, class_weights):\n self.dataframe = dataframe\n self.class_weights = class_weights\n\n def __len__(self):\n return len(self.dataframe)\n\n def __getitem__(self, idx):\n row = self.dataframe.iloc[idx]\n features = torch.tensor(row.drop('label').values, dtype=torch.float)\n label = torch.tensor(row['label'], dtype=torch.float)\n weight = torch.tensor(self.class_weights[row['label']], dtype=torch.float)\n return features, label, weight\n\n\ndef get_dataloaders(batch_size, train_df, test_df, class_weights):\n train_dataset = TabularDataset(train_df, class_weights)\n test_dataset = TabularDataset(test_df, class_weights)\n\n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)\n\n return train_dataloader, test_dataloader","repo_name":"Ransaka/CustomTorch","sub_path":"datasets/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7783425102","text":"# -*- coding: utf-8 -*-\nimport sys\nimport json\nimport ipaddress\nimport logging\nimport validators\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom urllib.parse import urlparse\nfrom pymisp import MISPAttribute, MISPEvent, MISPObject\nfrom joe_parser_config import *\n\n\narch_type_mapping = {'ANDROID': 'parse_apk', 'LINUX': 'parse_elf', 'WINDOWS': 'parse_pe'}\ndomain_object_mapping = {'@ip': ('ip-dst', 'ip'), '@name': ('domain', 'domain')}\ndropped_file_mapping = {'@entropy': ('float', 'entropy'),\n '@file': ('filename', 'filename'),\n '@size': ('size-in-bytes', 'size-in-bytes'),\n '@type': ('mime-type', 'mimetype')}\ndropped_hash_mapping = {'MD5': 'md5', 'SHA': 'sha1', 'SHA-256': 'sha256', 'SHA-512': 'sha512'}\nelf_object_mapping = {'epaddr': 'entrypoint-address', 'machine': 'arch', 'osabi': 'os_abi'}\nelf_section_flags_mapping = {'A': 'ALLOC', 'I': 'INFO_LINK', 'M': 'MERGE',\n 'S': 'STRINGS', 'T': 'TLS', 'W': 'WRITE',\n 'X': 'EXECINSTR'}\nfile_object_fields = ['filename', 'md5', 'sha1', 'sha256', 'sha512', 'ssdeep']\nfile_object_mapping = {'entropy': ('float', 'entropy'),\n 'filesize': ('size-in-bytes', 'size-in-bytes'),\n 'filetype': ('mime-type', 'mimetype')}\nfile_references_mapping = {'fileCreated': 'creates', 'fileDeleted': 'deletes',\n 'fileMoved': 'moves', 'fileRead': 'reads', 'fileWritten': 'writes'}\nnetwork_behavior_fields = ('srcip', 'dstip', 'srcport', 'dstport')\nnetwork_connection_object_mapping = {'srcip': ('ip-src', 'ip-src'), 'dstip': ('ip-dst', 'ip-dst'),\n 'srcport': ('port', 'src-port'), 'dstport': ('port', 'dst-port')}\npe_object_fields = {'entrypoint': ('text', 'entrypoint-address'),\n 'imphash': ('imphash', 'imphash')}\npe_object_mapping = {'CompanyName': 'company-name', 'FileDescription': 'file-description',\n 'FileVersion': 'file-version', 'InternalName': 'internal-filename',\n 'LegalCopyright': 'legal-copyright', 'OriginalFilename': 'original-filename',\n 'ProductName': 'product-filename', 'ProductVersion': 'product-version',\n 'Translation': 'lang-id'}\npe_section_object_mapping = {'characteristics': ('text', 'characteristic'),\n 'entropy': ('float', 'entropy'),\n 'name': ('text', 'name'), 'rawaddr': ('hex', 'offset'),\n 'rawsize': ('size-in-bytes', 'size-in-bytes'),\n 'virtaddr': ('hex', 'virtual_address'),\n 'virtsize': ('size-in-bytes', 'virtual_size')}\nprocess_object_fields = {'cmdline': 'command-line', 'name': 'name',\n 'parentpid': 'parent-pid', 'pid': 'pid',\n 'path': 'current-directory'}\nprotocols = {'tcp': 4, 'udp': 4, 'icmp': 3,\n 'http': 7, 'https': 7, 'ftp': 7}\nregistry_references_mapping = {'keyValueCreated': 'creates', 'keyValueModified': 'modifies'}\nregkey_object_mapping = {'name': ('text', 'name'), 'newdata': ('text', 'data'),\n 'path': ('regkey', 'key')}\nsignerinfo_object_mapping = {'sigissuer': ('text', 'issuer'),\n 'version': ('text', 'version')}\n\n\nclass JoeParser():\n def __init__(self, config):\n self.log = logging.getLogger(__name__)\n self.log.setLevel(logging.DEBUG)\n\n self.misp_event = MISPEvent()\n self.references = defaultdict(list)\n self.attributes = defaultdict(lambda: defaultdict(set))\n self.process_references = {}\n\n self.import_pe = config[\"import_pe\"]\n self.create_mitre_attack = config[\"mitre_attack\"]\n self.import_malware_config = config[\"import_malware_config\"]\n self.import_network_interactions = config[\"import_network_interactions\"]\n self.import_dropped_files = config[\"import_dropped_files\"]\n self.import_registry_activities = config[\"import_registry_activities\"]\n self.import_system_behavior = config[\"import_system_behavior\"]\n self.import_network_behavior = config[\"import_network_behavior\"]\n\n def parse_data(self, data):\n self.data = data\n if self.analysis_type() == \"file\":\n self.parse_fileinfo()\n else:\n self.parse_url_analysis()\n\n self.parse_screenshot()\n self.parse_threatname()\n\n if self.import_system_behavior:\n self.parse_system_behavior()\n \n if self.import_network_behavior:\n self.parse_network_behavior()\n\n if self.import_dropped_files:\n self.parse_dropped_files()\n\n if self.import_network_interactions:\n self.parse_network_interactions()\n \n if self.import_malware_config:\n self.parse_malwareconfig()\n\n if self.attributes:\n self.handle_attributes()\n\n if self.create_mitre_attack:\n self.parse_mitre_attack()\n\n def build_references(self):\n for misp_object in self.misp_event.objects:\n object_uuid = misp_object.uuid\n if object_uuid in self.references:\n for reference in self.references[object_uuid]:\n misp_object.add_reference(**reference)\n\n def handle_attributes(self):\n for attribute_type, attribute in self.attributes.items():\n for attribute_value, references in attribute.items():\n attribute_uuid = self.create_attribute(attribute_type, attribute_value)\n for reference in references:\n source_uuid, relationship = reference\n self.references[source_uuid].append(dict(referenced_uuid=attribute_uuid,\n relationship_type=relationship))\n\n def parse_dropped_files(self):\n droppedinfo = self.data['droppedinfo']\n if droppedinfo:\n for droppedfile in droppedinfo['hash']:\n file_object = MISPObject('file')\n for key, mapping in dropped_file_mapping.items():\n attribute_type, object_relation = mapping\n file_object.add_attribute(object_relation, **{'type': attribute_type, 'value': droppedfile[key], 'to_ids': False})\n if droppedfile['@malicious'] == 'true':\n file_object.add_attribute('state', **{'type': 'text', 'value': 'Malicious', 'to_ids': False})\n for h in droppedfile['value']:\n hash_type = dropped_hash_mapping[h['@algo']]\n file_object.add_attribute(hash_type, **{'type': hash_type, 'value': h['$'], 'to_ids': False})\n self.misp_event.add_object(**file_object)\n self.references[self.process_references[(int(droppedfile['@targetid']), droppedfile['@process'])]].append({\n 'referenced_uuid': file_object.uuid,\n 'relationship_type': 'drops'\n })\n\n def parse_mitre_attack(self):\n mitreattack = self.data['mitreattack']\n if mitreattack:\n for tactic in mitreattack['tactic']:\n if tactic.get('technique'):\n for technique in tactic['technique']:\n self.misp_event.add_tag('misp-galaxy:mitre-attack-pattern=\"{} - {}\"'.format(technique['name'], technique['id']))\n\n\n def check_ignore_ipaddr(self, ip):\n ignore = False\n for ignore_ip in ignore_ipaddr:\n if \"/\" in ignore_ip:\n if ipaddress.ip_address(ip) in ipaddress.ip_network(ignore_ip):\n ignore = True\n break\n else:\n if ipaddress.ip_address(ip) == ipaddress.ip_address(ignore_ip):\n ignore = True\n break\n return ignore\n\n\n def parse_network_behavior(self):\n network = self.data['behavior']['network']\n connections = defaultdict(lambda: defaultdict(set))\n for protocol, layer in protocols.items():\n if network.get(protocol):\n for packet in network[protocol]['packet']:\n try:\n if not (self.check_ignore_ipaddr(packet['srcip']) or self.check_ignore_ipaddr(packet['dstip'])):\n timestamp = datetime.strptime(self.parse_timestamp(packet['timestamp']), '%b %d, %Y %H:%M:%S.%f')\n connections[tuple(packet[field] for field in network_behavior_fields)][protocol].add(timestamp)\n except Exception as e:\n print(\"Error: %s\" % str(e))\n\n for connection, data in connections.items():\n attributes = self.prefetch_attributes_data(connection)\n if len(data.keys()) == len(set(protocols[protocol] for protocol in data.keys())):\n network_connection_object = MISPObject('network-connection')\n for object_relation, attribute in attributes.items():\n network_connection_object.add_attribute(object_relation, **attribute)\n network_connection_object.add_attribute('first-packet-seen',\n **{'type': 'datetime',\n 'value': min(tuple(min(timestamp) for timestamp in data.values())),\n 'to_ids': False})\n for protocol in data.keys():\n network_connection_object.add_attribute('layer{}-protocol'.format(protocols[protocol]),\n **{'type': 'text', 'value': protocol, 'to_ids': False})\n self.misp_event.add_object(**network_connection_object)\n self.references[self.analysisinfo_uuid].append(dict(referenced_uuid=network_connection_object.uuid,\n relationship_type='initiates'))\n else:\n for protocol, timestamps in data.items():\n network_connection_object = MISPObject('network-connection')\n for object_relation, attribute in attributes.items():\n network_connection_object.add_attribute(object_relation, **attribute)\n network_connection_object.add_attribute('first-packet-seen', **{'type': 'datetime', 'value': min(timestamps), 'to_ids': False})\n network_connection_object.add_attribute('layer{}-protocol'.format(protocols[protocol]), **{'type': 'text', 'value': protocol, 'to_ids': False})\n self.misp_event.add_object(**network_connection_object)\n self.references[self.analysisinfo_uuid].append(dict(referenced_uuid=network_connection_object.uuid,\n relationship_type='initiates'))\n\n def parse_screenshot(self):\n screenshotdata = self.data['behavior']['screenshotdata']\n if screenshotdata:\n screenshotdata = screenshotdata['interesting']['$']\n attribute = {'type': 'attachment', 'value': 'screenshot.jpg',\n 'data': screenshotdata, 'disable_correlation': True,\n 'to_ids': False}\n self.misp_event.add_attribute(**attribute)\n\n def parse_system_behavior(self):\n system = self.data['behavior']['system']\n if system.get('processes'):\n process_activities = {'fileactivities': self.parse_fileactivities,\n 'registryactivities': self.parse_registryactivities}\n for process in system['processes']['process']:\n no_correlation = False\n general = process['general']\n if general['name'] in disable_correlations:\n no_correlation = True\n\n process_object = MISPObject('process')\n for feature, relation in process_object_fields.items():\n process_object.add_attribute(relation, **{'type': 'text', 'value': general[feature], 'disable_correlation': no_correlation, 'to_ids': False})\n start_time = datetime.strptime('{} {}'.format(general['date'], general['time']), '%d/%m/%Y %H:%M:%S')\n process_object.add_attribute('start-time', **{'type': 'datetime', 'value': start_time, 'disable_correlation': no_correlation, 'to_ids': False})\n self.misp_event.add_object(**process_object)\n for field, to_call in process_activities.items():\n if process.get(field):\n to_call(process_object.uuid, process[field])\n self.references[self.analysisinfo_uuid].append(dict(referenced_uuid=process_object.uuid,\n relationship_type='calls'))\n self.process_references[(general['targetid'], general['path'])] = process_object.uuid\n\n def check_ignore_filenames(self, path):\n ignore = False\n for s in ignore_filenames_exact:\n if s.lower() == path.lower():\n ignore = True\n break\n\n if not ignore:\n for s in ignore_filenames_substr:\n if s.lower() in path.lower():\n ignore = True\n break\n\n return ignore\n\n\n def parse_fileactivities(self, process_uuid, fileactivities):\n for feature, files in fileactivities.items():\n # ignore unknown features\n if feature not in file_references_mapping:\n continue\n\n if files:\n for call in files['call']:\n if not self.check_ignore_filenames(call['path']):\n self.attributes['filename'][call['path']].add((process_uuid, file_references_mapping[feature]))\n\n\n def analysis_type(self):\n generalinfo = self.data['generalinfo']\n\n if generalinfo['target']['sample']:\n return \"file\"\n elif generalinfo['target']['url']:\n return \"url\"\n else:\n raise Exception(\"Unknown analysis type\")\n\n def parse_url_analysis(self):\n generalinfo = self.data[\"generalinfo\"]\n\n url_object = MISPObject(\"url\")\n self.analysisinfo_uuid = url_object.uuid\n\n url_object.add_attribute(\"url\", generalinfo[\"target\"][\"url\"], to_ids=False)\n self.misp_event.add_object(**url_object)\n\n def parse_fileinfo(self):\n fileinfo = self.data['fileinfo']\n\n file_object = MISPObject('file')\n self.analysisinfo_uuid = file_object.uuid\n \n for field in file_object_fields:\n file_object.add_attribute(field, **{'type': field, 'value': fileinfo[field], 'to_ids': False})\n for field, mapping in file_object_mapping.items():\n attribute_type, object_relation = mapping\n file_object.add_attribute(object_relation, **{'type': attribute_type, 'value': fileinfo[field], 'to_ids': False})\n arch = self.data['generalinfo']['arch']\n if arch in arch_type_mapping:\n to_call = arch_type_mapping[arch]\n getattr(self, to_call)(fileinfo, file_object)\n else:\n self.misp_event.add_object(**file_object)\n\n def parse_apk(self, fileinfo, file_object):\n apkinfo = fileinfo['apk']\n self.misp_event.add_object(**file_object)\n permission_lists = defaultdict(list)\n for permission in apkinfo['requiredpermissions']['permission']:\n permission = permission['@name'].split('.')\n permission_lists[' '.join(permission[:-1])].append(permission[-1])\n attribute_type = 'text'\n for comment, permissions in permission_lists.items():\n permission_object = MISPObject('android-permission')\n permission_object.add_attribute('comment', **dict(type=attribute_type, value=comment, to_ids=False))\n for permission in permissions:\n permission_object.add_attribute('permission', **dict(type=attribute_type, value=permission, to_ids=False))\n self.misp_event.add_object(**permission_object)\n self.references[file_object.uuid].append(dict(referenced_uuid=permission_object.uuid,\n relationship_type='grants'))\n\n def parse_elf(self, fileinfo, file_object):\n elfinfo = fileinfo['elf']\n self.misp_event.add_object(**file_object)\n attribute_type = 'text'\n relationship = 'includes'\n size = 'size-in-bytes'\n for fileinfo in elfinfo['file']:\n elf_object = MISPObject('elf')\n self.references[file_object.uuid].append(dict(referenced_uuid=elf_object.uuid,\n relationship_type=relationship))\n elf = fileinfo['main'][0]['header'][0]\n if elf.get('type'):\n # Haven't seen anything but EXEC yet in the files I tested\n attribute_value = \"EXECUTABLE\" if elf['type'] == \"EXEC (Executable file)\" else elf['type']\n elf_object.add_attribute('type', **dict(type=attribute_type, value=attribute_value, to_ids=False))\n for feature, relation in elf_object_mapping.items():\n if elf.get(feature):\n elf_object.add_attribute(relation, **dict(type=attribute_type, value=elf[feature], to_ids=False))\n sections_number = len(fileinfo['sections']['section'])\n elf_object.add_attribute('number-sections', **{'type': 'counter', 'value': sections_number, 'to_ids': False})\n self.misp_event.add_object(**elf_object)\n for section in fileinfo['sections']['section']:\n section_object = MISPObject('elf-section')\n for feature in ('name', 'type'):\n if section.get(feature):\n section_object.add_attribute(feature, **dict(type=attribute_type, value=section[feature], to_ids=False))\n if section.get('size'):\n section_object.add_attribute(size, **dict(type=size, value=int(section['size'], 16), to_ids=False))\n for flag in section['flagsdesc']:\n try:\n attribute_value = elf_section_flags_mapping[flag]\n section_object.add_attribute('flag', **dict(type=attribute_type, value=attribute_value, to_ids=False))\n except KeyError:\n print(f'Unknown elf section flag: {flag}')\n continue\n self.misp_event.add_object(**section_object)\n self.references[elf_object.uuid].append(dict(referenced_uuid=section_object.uuid,\n relationship_type=relationship))\n\n def parse_pe(self, fileinfo, file_object):\n if not self.import_pe:\n return\n try:\n peinfo = fileinfo['pe']\n except KeyError:\n self.misp_event.add_object(**file_object)\n return\n pe_object = MISPObject('pe')\n relationship = 'includes'\n file_object.add_reference(pe_object.uuid, relationship)\n self.misp_event.add_object(**file_object)\n for field, mapping in pe_object_fields.items():\n attribute_type, object_relation = mapping\n pe_object.add_attribute(object_relation, **{'type': attribute_type, 'value': peinfo[field], 'to_ids': False})\n pe_object.add_attribute('compilation-timestamp', **{'type': 'datetime', 'value': int(peinfo['timestamp'].split()[0], 16), 'to_ids': False})\n program_name = fileinfo['filename']\n if peinfo['versions']:\n for feature in peinfo['versions']['version']:\n name = feature['name']\n if name == 'InternalName':\n program_name = feature['value']\n if name in pe_object_mapping:\n pe_object.add_attribute(pe_object_mapping[name], **{'type': 'text', 'value': feature['value'], 'to_ids': False})\n sections_number = len(peinfo['sections']['section'])\n pe_object.add_attribute('number-sections', **{'type': 'counter', 'value': sections_number, 'to_ids': False})\n signatureinfo = peinfo['signature']\n if signatureinfo['signed']:\n signerinfo_object = MISPObject('authenticode-signerinfo')\n pe_object.add_reference(signerinfo_object.uuid, 'signed-by')\n self.misp_event.add_object(**pe_object)\n signerinfo_object.add_attribute('program-name', **{'type': 'text', 'value': program_name, 'to_ids': False})\n for feature, mapping in signerinfo_object_mapping.items():\n attribute_type, object_relation = mapping\n signerinfo_object.add_attribute(object_relation, **{'type': attribute_type, 'value': signatureinfo[feature], 'to_ids': False})\n self.misp_event.add_object(**signerinfo_object)\n else:\n self.misp_event.add_object(**pe_object)\n for section in peinfo['sections']['section']:\n section_object = self.parse_pe_section(section)\n self.references[pe_object.uuid].append(dict(referenced_uuid=section_object.uuid,\n relationship_type=relationship))\n self.misp_event.add_object(**section_object)\n\n def parse_pe_section(self, section):\n section_object = MISPObject('pe-section')\n for feature, mapping in pe_section_object_mapping.items():\n if section.get(feature):\n attribute_type, object_relation = mapping\n section_object.add_attribute(object_relation, **{'type': attribute_type, 'value': section[feature], 'to_ids': False})\n return section_object\n\n def parse_network_interactions(self):\n domaininfo = self.data['domaininfo']\n if domaininfo:\n for domain in domaininfo['domain']:\n if domain['@ip'] != 'unknown':\n domain_object = MISPObject('domain-ip')\n for key, mapping in domain_object_mapping.items():\n attribute_type, object_relation = mapping\n domain_object.add_attribute(object_relation,\n **{'type': attribute_type, 'value': domain[key], 'to_ids': False})\n self.misp_event.add_object(**domain_object)\n reference = dict(referenced_uuid=domain_object.uuid, relationship_type='contacts')\n self.add_process_reference(domain['@targetid'], domain['@currentpath'], reference)\n else:\n attribute = MISPAttribute()\n attribute.from_dict(**{'type': 'domain', 'value': domain['@name'], 'to_ids': False})\n self.misp_event.add_attribute(**attribute)\n reference = dict(referenced_uuid=attribute.uuid, relationship_type='contacts')\n self.add_process_reference(domain['@targetid'], domain['@currentpath'], reference)\n\n ipinfo = self.data['ipinfo']\n if ipinfo:\n for ip in ipinfo['ip']:\n if not self.check_ignore_ipaddr(ip['@ip']):\n attribute = MISPAttribute()\n attribute.from_dict(**{'type': 'ip-dst', 'value': ip['@ip'], 'to_ids': False})\n self.misp_event.add_attribute(**attribute)\n reference = dict(referenced_uuid=attribute.uuid, relationship_type='contacts')\n self.add_process_reference(ip['@targetid'], ip['@currentpath'], reference)\n\n urlinfo = self.data['urlinfo']\n if urlinfo:\n for url in urlinfo['url']:\n if url['@name'] in ignore_url:\n continue\n target_id = int(url['@targetid'])\n current_path = url['@currentpath']\n attribute = MISPAttribute()\n attribute_dict = {'type': 'url', 'value': url['@name'], 'to_ids': False}\n if target_id != -1 and current_path != 'unknown':\n self.references[self.process_references[(target_id, current_path)]].append({\n 'referenced_uuid': attribute.uuid,\n 'relationship_type': 'contacts'\n })\n else:\n attribute_dict['comment'] = 'From Memory - Enriched via the joe_import module'\n attribute.from_dict(**attribute_dict)\n self.misp_event.add_attribute(**attribute)\n\n def parse_registryactivities(self, process_uuid, registryactivities):\n if not self.import_registry_activities:\n return\n\n if registryactivities['keyCreated']:\n for call in registryactivities['keyCreated']['call']:\n self.attributes['regkey'][call['path']].add((process_uuid, 'creates'))\n for feature, relationship in registry_references_mapping.items():\n if registryactivities[feature]:\n for call in registryactivities[feature]['call']:\n registry_key = MISPObject('registry-key')\n for field, mapping in regkey_object_mapping.items():\n attribute_type, object_relation = mapping\n registry_key.add_attribute(object_relation, **{'type': attribute_type, 'value': call[field], 'to_ids': False})\n registry_key.add_attribute('data-type', **{'type': 'text', 'value': 'REG_{}'.format(call['type'].upper()), 'to_ids': False})\n self.misp_event.add_object(**registry_key)\n self.references[process_uuid].append(dict(referenced_uuid=registry_key.uuid,\n relationship_type=relationship))\n\n def parse_threatname(self):\n sigdetections = self.data['signaturedetections']\n if sigdetections:\n threatname = sigdetections['strategy'][1]['threatname']\n threatname_malpedia = threatname\n if threatname and threatname != \"Unknown\":\n for k,v in threatname_mapping.items():\n for i in v:\n if threatname==i or threatname==i.lower():\n threatname_malpedia=k\n\n self.misp_event.add_tag('mwdb:family=\"{}\"'.format(threatname.lower()))\n self.misp_event.add_tag('misp-galaxy:malpedia=\"{}\"'.format(threatname_malpedia))\n\n \n def parse_malwareconfig(self):\n malwareconfigs = self.data['malwareconfigs']\n if malwareconfigs:\n for mw in malwareconfigs['config']:\n threat = mw[\"@threatname\"]\n config = json.loads(mw[\"$\"])\n self.create_attribute('other', str(config), attribute_tags)\n\n # parses domains and ipaddrs\n try:\n self.log.debug(\"Threat keys: %s\" % configs_keys_c2.keys())\n if threat.lower() in configs_keys_c2.keys():\n self.log.debug(\"Working config for threat %s\" % threat.lower())\n c2attr = list()\n c2conf = configs_keys_c2[threat.lower()]\n c2field = None\n #self.log.debug(\"config keys: %s\" % config.keys())\n #self.log.debug(\"c2conf: %s\" % c2conf)\n for f in config.keys():\n if c2conf['c2key'] in f:\n c2field = config[f]\n self.log.debug(\"C2 field is '%s' with values %s\" % (f, c2field))\n break\n\n if c2field:\n \n # ip, ipport, domain, domaincsv, domainport, url,\n if c2conf['c2type'] == \"ip\":\n if isinstance(c2field, list):\n for c2 in c2field:\n c2attr.append({'c2': c2, 'port': c2conf['defaultport'], 'type': 'ip'})\n else:\n c2attr.append({'c2': c2field, 'port': c2conf['defaultport'], 'type': 'ip'})\n\n elif c2conf['c2type'] == \"ipport\":\n if isinstance(c2field, list):\n for c2 in c2field:\n try:\n ip,port = c2.split(\":\")\n c2attr.append({'c2': ip, 'port': port, 'type': 'ip'})\n except:\n self.log.debug(\"Ignoring C2 in the wrong format %s: %s\" % (c2conf['c2type'], c2))\n else:\n ip,port = c2field.split(\":\")\n c2attr.append({'c2': ip, 'port': port, 'type': 'ip'})\n \n\n elif c2conf['c2type'] == \"domain\":\n if isinstance(c2field, list):\n for c2 in c2field:\n c2attr.append({'c2': c2, 'port': c2conf['defaultport'], 'type': 'domain'})\n else:\n c2attr.append({'c2': c2field, 'port': c2conf['defaultport'], 'type': 'domain'})\n\n elif c2conf['c2type'] == \"domaincsv\":\n domains = c2field.split(\";\")\n for d in domains:\n c2attr.append({'c2': d, 'port': c2conf['defaultport'], 'type': 'domain'})\n\n elif c2conf['c2type'] == \"domainport\":\n if isinstance(c2field, list):\n for c2 in c2field:\n domain,port = c2.split(\":\")\n c2attr.append({'c2': domain, 'port': port, 'type': 'domain'})\n else:\n domain,port = c2field.split(\":\")\n c2attr.append({'c2': domain, 'port': port, 'type': 'domain'})\n\n elif c2conf['c2type'] == \"url\":\n if isinstance(c2field, list):\n for c2 in c2field:\n c2attr.append(self.parse_c2_url(c2))\n\n else:\n c2attr.append(self.parse_c2_url(c2field))\n\n #self.log.debug(\"C2ATTR = %s\" % c2attr)\n\n for c2 in c2attr:\n if c2['type'] == \"ip\":\n t = \"ip-dst|port\"\n else:\n t = \"hostname|port\"\n\n self.create_attribute(t, \"%s:%s\" % (str(c2['c2']),str(c2['port'])), attribute_tags)\n\n else:\n self.log.debug(\"No C2 field found in %s config\" % threat)\n\n break\n\n except Exception as e:\n self.log.error(\"Couldn't parse C2 info from %s config: %s\" % (threat, str(e)))\n\n else:\n self.log.debug(\"No malware config to process\")\n\n\n def parse_c2_url(self, c2):\n url = urlparse(c2)\n netloc = url.netloc.split(\":\")\n if len(netloc) > 1:\n port = netloc[1]\n else:\n if url.scheme == 'https':\n port = 443\n else:\n port = 80\n\n if validators.ipv4(netloc[0]):\n return {'c2': netloc[0], 'port': port, 'type': 'ip'}\n else:\n return {'c2': netloc[0].lower(), 'port': port, 'type': 'domain'}\n\n\n def add_process_reference(self, target, currentpath, reference):\n try:\n self.references[self.process_references[(int(target), currentpath)]].append(reference)\n except KeyError:\n self.references[self.analysisinfo_uuid].append(reference)\n\n def create_attribute(self, attribute_type, attribute_value, attribute_tags=list()):\n attribute = MISPAttribute()\n attribute.from_dict(**{'type': attribute_type, 'value': attribute_value, 'to_ids': False})\n for tag in attribute_tags:\n attribute.add_tag(tag)\n self.misp_event.add_attribute(**attribute)\n return attribute.uuid\n\n def finalize_results(self):\n if self.references:\n self.build_references()\n event = json.loads(self.misp_event.to_json())\n self.results = {key: event[key] for key in ('Attribute', 'Object', 'Tag') if (key in event and event[key])}\n\n @staticmethod\n def parse_timestamp(timestamp):\n timestamp = timestamp.split(':')\n timestamp[-1] = str(round(float(timestamp[-1].split(' ')[0]), 6))\n return ':'.join(timestamp)\n\n @staticmethod\n def prefetch_attributes_data(connection):\n attributes = {}\n for field, value in zip(network_behavior_fields, connection):\n attribute_type, object_relation = network_connection_object_mapping[field]\n attributes[object_relation] = {'type': attribute_type, 'value': value, 'to_ids': False}\n return attributes\n","repo_name":"vsantola/misp-modules","sub_path":"misp_modules/lib/joe_parser.py","file_name":"joe_parser.py","file_ext":"py","file_size_in_byte":33771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"35156259853","text":"import requests\nfrom raft import VoteRequest, serialize\nimport sys\n\na = int(sys.argv[1])\nb = int(sys.argv[2])\nc = int(sys.argv[3])\nd = int(sys.argv[4])\nport = int(sys.argv[5])\n\nvote_request = VoteRequest(a,b,c,d)\nprint(\"This is the vote_request:\")\nprint(vote_request)\n\nres = requests.post(\n f\"http://localhost:{port}/request-vote/1234\", json=serialize(vote_request)\n)\nif res.ok:\n print(res.json())","repo_name":"cathcharles108/consensus-message-queue","sub_path":"test/election_client.py","file_name":"election_client.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8164910031","text":"from pathfinder import find_paths\n\n# get all directories and sub-directories in current directory\npaths = find_paths(\".\", just_dirs=True)\n\n# get all files in the current directory and all sub-directories\npaths = find_paths(\".\", just_files=True)\n\n# get all jpg files using a regex\npaths = find_paths(\".\", regex=r\".*\\.jpg$\")\n\n# get all jpg files using posix wildcards\npaths = find_paths(\".\", fnmatch=\"*.jpg\")\n\n# get all jpg files and png files\nfrom pathfinder import FnmatchFilter\nfrom pathfinder import OrFilter\n\njpg_filter = FnmatchFilter(\"*.jpg\")\npng_filter = FnmatchFilter(\"*.png\")\ngif_filter = FnmatchFilter(\"*.gif\")\nimage_filter = OrFilter(jpg_filter, png_filter, gif_filter)\npaths = find_paths(\".\", filter=image_filter)\n\n# shortcut using bitwise or\npaths = find_paths(\".\", filter=jpg_filter | png_filter | gif_filter)\n\n# even shorter using ImageFilter to find all images\nfrom pathfinder import ImageFilter\n\npaths = find_paths(\".\", filter=ImageFilter())\n\n# and an even shorter way\npaths = ImageFilter().find_paths(\".\")\n","repo_name":"jkeyes/pathfinder","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"6"} +{"seq_id":"34289555161","text":"#1-3: Preprocess each Inbound File\r\ndef ftp(ETL_Directory): \r\n\r\n import pandas as pd\r\n import subprocess\r\n print('Executing ftp.py, 1-3') \r\n\r\n #1-3a: preprocess each file with its respective preprocessor\r\n print('Executing 1-3a')\r\n try:\r\n \r\n #1-3b: preprocessor file, replace \\\\ with \\\r\n step = '1-3b'\r\n ftp_script = str(ETL_Directory.loc[i, 'FTP_Filepath']).replace(\"'\",\"\").replace(\"\\\\\\\\\",\"\\\\\").strip()\r\n \r\n if ftp_script == 'None':\r\n print('No ftp script found')\r\n return\r\n \r\n #1-3d: preprocess string + inbound file\r\n step = '1-3d'\r\n ftp_script_execution = f'\"C:\\Program Files (x86)\\WinSCP\\WinSCP.com\" -script={ftp_script}'\r\n \r\n #1-3e: execute preprocessor\r\n step = '1-3e'\r\n print(\"Executing 1-3e: Running Shell command: \" + ftp_script_execution)\r\n subprocess.call(ftp_script_execution, shell=True)\r\n \r\n except:\r\n print('Error in ftp.py execution')\r\n raise Exception()","repo_name":"dhyoon1112/ihs_etl_automation","sub_path":"1/ftp.py","file_name":"ftp.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7733869778","text":"from metric_learn import LMNN\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\nclass IterativeMetricLearning:\n def __init__(self, target_name, n_neighbors=5, regularization=0.5, base_classifier=KNeighborsClassifier(),\n max_iter=1, label_tupel=(0, 1),\n top_positive_number=3, top_negative_number=3, matching_ratio=0.8):\n\n \"\"\"\n\n :param target_name:\n :param n_neighbors: number of the neighbors\n :param regularization: relative weight between pull and push terms in large margin nearest neighbor algorithm\n :param base_classifier:\n :param max_iter: maximal iteration for LMNN\n :param label_tupel: label of negative and positive in tuple\n :param top_positive_number: number of selected positive nearest neighbors\n :param top_negative_number: number of selected negative nearest neighbors\n :param matching_ratio:\n \"\"\"\n self.y_train = None\n self.X_train = None\n self.regularization = regularization\n self.n_neighbors = n_neighbors\n self.base_classifier = base_classifier\n self.max_iter = max_iter\n self.n_positive = top_positive_number\n self.n_negative = top_negative_number\n self.target_name = target_name\n self.matching_ratio = matching_ratio\n self.predict_proba_list = []\n self.transformed_X_train = []\n self.label_tupel = label_tupel\n\n # iterative metric learning\n def data_space_metric_learning(self, x, y, testInstance):\n\n \"\"\"\n data space metric learning by LMNN and selection of sub training set for the given testing sample\n :param x: features\n :param y: labels\n :param testInstance: testing sample\n :return: selected sub training set for the current sample\n \"\"\"\n\n # distance metric learning by LMNN\n lmnn = LMNN(regularization=self.regularization, k=self.n_neighbors, max_iter=1)\n\n self.transformed_X_train = pd.DataFrame(lmnn.fit_transform(x, y), columns=x.columns)\n\n y.columns = [self.target_name]\n X_y_train = pd.concat([self.transformed_X_train, y], axis=1).reset_index(drop=True)\n\n pos_p = pd.DataFrame(X_y_train.loc[X_y_train[self.target_name] == self.label_tupel[1]])\n neg_p = pd.DataFrame(X_y_train.loc[X_y_train[self.target_name] == self.label_tupel[0]])\n\n # current testing sample\n testInstance = np.array(testInstance).reshape(1, -1)\n\n # find the positive neighbors\n clf_positive = KNeighborsClassifier(n_neighbors=self.n_positive)\n clf_positive.fit(pos_p.iloc[:, :-1], pos_p.iloc[:, -1])\n index_pos = clf_positive.kneighbors(testInstance, return_distance=False)\n pos_neighbors = pos_p.iloc[index_pos[0], :]\n\n # find the negative neighbors\n clf_negative = KNeighborsClassifier(n_neighbors=self.n_negative)\n clf_negative.fit(neg_p.iloc[:, :-1], neg_p.iloc[:, -1])\n index_neg = clf_negative.kneighbors(testInstance, return_distance=False)\n neg_neighbors = neg_p.iloc[index_neg[0], :]\n\n # set up the sub training set for the current testing sample\n sub_training_set = pd.concat([pos_neighbors, neg_neighbors], axis=0)\n\n return sub_training_set\n\n def data_matching(self, previous_set, cur_set, matching_ratio):\n \"\"\"\n Compare the element of previous selected sub training set and that of the current selected sub training set\n :param previous_set: selected sub training set by the last iteration\n :param cur_set: selected sub training set by the current iteration\n :param matching_ratio: the given matching ratio\n :return:\n \"\"\"\n cnt = 0\n previous_index = previous_set.index\n current_index = cur_set.index\n\n for i in previous_index:\n if i in current_index:\n cnt += 1\n\n # If more than a certain percentage of samples are selected again, it can be considered that the current neighborhood is stable\n if cnt / cur_set.shape[0] >= matching_ratio:\n\n return True\n else:\n return False\n\n def fit(self, X_train, y_train):\n self.X_train = X_train\n self.y_train = y_train\n\n def predict(self, X_test):\n print('k=', self.n_neighbors)\n predict_res = []\n predict_proba = []\n i = 0\n for index in range(X_test.shape[0]):\n\n test_sample = X_test.iloc[index, :]\n\n trigger = False\n cnt = 0\n previous_set = []\n curr_set = []\n while True:\n\n if trigger:\n break\n\n if cnt == 0:\n previous_set = self.data_space_metric_learning(self.X_train, self.y_train, test_sample)\n cnt += 1\n continue\n\n curr_set = self.data_space_metric_learning(self.transformed_X_train, self.y_train, test_sample)\n\n trigger = self.data_matching(previous_set, curr_set, self.matching_ratio)\n previous_set = curr_set\n cnt += 1\n\n final_set = curr_set\n lmnn = LMNN(regularization=self.regularization, k=self.n_neighbors, max_iter=1) \\\n .fit(final_set.iloc[:, :-1], final_set.iloc[:, -1])\n\n self.base_classifier = self.base_classifier.set_params(**{'metric': lmnn.get_metric()})\n\n self.base_classifier.fit(final_set.iloc[:, :-1], final_set.iloc[:, -1])\n test_sample = np.array(test_sample).reshape(1, -1)\n predicted_label = self.base_classifier.predict(test_sample)[0]\n probability = self.base_classifier.predict_proba(test_sample)[0]\n predict_res.append(predicted_label)\n predict_proba.append(probability)\n i += 1\n\n self.predict_proba_list = predict_proba\n\n return np.array(predict_res)\n\n def predict_proba(self):\n return np.array(self.predict_proba_list)\n","repo_name":"carrieMrJ/ClassImbalanceClassification","sub_path":"imbalance_comparison/iterative_metric/iml_model.py","file_name":"iml_model.py","file_ext":"py","file_size_in_byte":6045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6057373015","text":"from application import db\nfrom application.models import * \nfrom datetime import date\n\ndb.drop_all()\ndb.create_all()\n\ncustomer1 = Customer(Forename = \"Sibel\", Surname = \"Hassan\", Email = \"S_hassan97@hotmail.co.uk\", Address = \"80 Road Edmonton\")\nbook1 = Book(ISBN = \"9780140430721\", Title= \"Pride & Prejudice\", Author = \"Jane Austin\", Price = 5.75)\norder1 = Orders(orderDate = date(2022,7,26), customerID = 1)\nbookOrder1 = BookOrder(Quantity = 1, orderID = 1, ISBN = \"9780140430721\")\n\ndb.session.add(customer1)\ndb.session.add(book1)\ndb.session.add(order1)\ndb.session.add(bookOrder1)\ndb.session.commit()\n\nprint(customer1)\nprint(book1)\nprint(order1)\nprint(bookOrder1)","repo_name":"Sibel97/ProjectBookStore","sub_path":"create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19805260995","text":"import sys\nsys.stdin = open('../input.txt', 'r')\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\nlefts, rights = [], []\nfor _ in range(n):\n start, destination = map(int, input().split())\n if start > destination:\n lefts.append(destination)\n rights.append(start)\nif lefts:\n ans = m + (max(rights) - min(lefts)) * 2\nelse:\n ans = m\nprint(ans)\n","repo_name":"onewns/TIL","sub_path":"algorithm/unsolved/BOJ_2836.py","file_name":"BOJ_2836.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29115410038","text":"import re\nimport requests\n\nimport json\nwith open('api.json', 'r') as f:\n data = json.load(f)\napi = data['caiyun']\n\ndef get_translate(content):\n if '|' in content or '|' in content:\n source, direction = re.split(r'[\\|\\|\\s]+', content)\n else:\n source = content\n# zhPattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n zhPattern = re.compile(r'^[\\u4E00-\\u9FFF]+$')\n match = zhPattern.search(source)\n if match:\n direction = 'zh2en'\n else:\n direction = 'auto2zh'\n \n url = \"http://api.interpreter.caiyunai.com/v1/translator\"\n token = api\n payload = {\n \"source\" : source, \n \"trans_type\" : direction,\n \"request_id\" : \"demo\",\n \"detect\": True,\n }\n \n headers = {\n 'content-type': \"application/json\",\n 'x-authorization': \"token \" + token,\n }\n \n r = requests.request(\"POST\", url, json=payload, headers=headers)\n return r.json()['target']","repo_name":"SukiYume/XiaoQing","sub_path":"caiyun.py","file_name":"caiyun.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"6645180990","text":"# Name: Joey Huang\n# Date: 1/22-23/2021\n# File: main.py\n# Description: Basic Python script using the calendar module that allows the \n# user to scroll back and forth displaying different months. Loops until \n# user enters STOP.\n\nimport calendar\n\ndef printCalendar(intYear, intMonth):\n print(calendar.month(intYear,intMonth)) \n print(\"< | >\\n\")\n print(\">>> Enter STOP to exit...\\n\")\n\nif __name__ == \"__main__\":\n print(\"=================================================================\")\n print(\">>> Welcome to your interactive calendar! Are you ready to start?\")\n\n # Not case sensitive\n reply = input(\">>> Enter YES or NO: \\n\")\n if reply.lower() == \"yes\" or reply.upper() == \"YES\":\n year = int(input(\"Enter year: \\n\"))\n month = int(input(\"Enter month: \\n\"))\n printCalendar(year, month)\n reply = input()\n\n # Loops until STOP/stop is entered\n while not(reply.lower() == \"stop\" or reply.upper() == \"STOP\"):\n if reply == \"<\":\n # Accounts for January and December respectively\n if month == 1: \n month = 12\n year -= 1\n else:\n month -= 1\n else:\n if month == 12:\n month = 1\n year += 1\n else:\n month += 1\n printCalendar(year, month)\n reply = input()\n\n elif reply.lower() == \"no\" or reply.upper() == \"NO\":\n print(\">>> Good bye!\")\n else:\n print(\">>> Invalid response.\")\n print(\"=================================================================\")\n\n\n\n\n\n\n #print(calendar.calendar(2021,2,1,6))\n #print(calendar.month(2021,2,1,1))","repo_name":"AoWangPhilly/productivity-bot","sub_path":"gordon/src/calendar/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16580335662","text":"import sys\ninput=sys.stdin.readline\nfrom collections import deque\nn, m=map(int, input().split())\n\nmap=[]\nfor i in range(n):\n a=input().rstrip()\n map.append([int(i) for i in a])\n\nroute=deque()\nroute.append(deque())\nroute.append(deque())\nroute[0].append([0,0])\ncnt=1\ntmp=[]\ntemp=0\nwhile(1):\n if len(route[0])==0:\n route.append(deque())\n route.popleft()\n cnt+=1\n pos=route[0].popleft()\n map[pos[0]][pos[1]]=-1\n if pos==[n-1, m-1]:\n break\n if pos[0]>0 and map[pos[0]-1][pos[1]]>0:\n map[pos[0]-1][pos[1]]=-1\n route[1].append([pos[0]-1,pos[1]])\n if pos[1]>0 and map[pos[0]][pos[1]-1]>0:\n map[pos[0]][pos[1]-1]=-1\n route[1].append([pos[0],pos[1]-1])\n if pos[0]0:\n map[pos[0]+1][pos[1]]=-1\n route[1].append([pos[0]+1,pos[1]])\n if pos[1]0:\n map[pos[0]][pos[1]+1]=-1\n route[1].append([pos[0],pos[1]+1])\n\nprint(cnt)\n","repo_name":"kjae0/leetcode-BOJ","sub_path":"Baekjoon PS/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70395114738","text":"\n# Returns a bool indicating whether the list is sorted (i.e. is non-decreasing,\n# it is okay to have adjacent elements which are equal).\n#\n# Arguments:\n# l (type: list of ints): list that may or may not be sorted.\n#\n# Example:\n# list_is_sorted([1,2,2,8,9]) should return True.\n# list_is_sorted([1,2,2,8,6]) should return False.\ndef list_is_sorted(l):\n # We will iterate through the elements of the list, and compare each element\n # with the next element (except for the last element of the list, which\n # does not have a next element).\n #\n # This variable tracks the index we are currently at. We initially set it to 0\n # because we want to start at the beginning of the list.\n index = 0\n\n # Once index reaches len(l)-1, there is no longer a \"next\" element to compare\n # to, so we can stop the while loop.\n while index < len(l) - 1:\n # If the element at index is strictly larger than the next element, then l\n # is not sorted. In this case, we can immediately return False.\n if l[index] > l[index + 1]:\n return False\n\n # Update index to one larger than its previous value.\n index = index + 1\n\n # We have iterated through the entire list, and every element is\n # less-than-or-equal-to the next element (expect for the last element, for\n # which there is no next element), so the list is sorted, and we can return\n # True.\n return True\n\n\n# This should print True\nprint(list_is_sorted([1,2,2,8,9]))\n\n# This should print False\nprint(list_is_sorted([1,2,2,8,6]))\n","repo_name":"ethantkoenig/python_practice_problems","sub_path":"list_is_sorted/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24949318268","text":"from functools import reduce\n\ndef f(x,y):\n return x+y\n\na=reduce(f,[1,2,3,4,5])\nprint(a)\n\nprint('\\n----------------')\n\nff=lambda x,y:x+y\n\nb=reduce(lambda x,y:x+y,[1,2,3,4,5])\nprint(b)\n\n\ndef add_end(L=[]):\n L.append('END')\n return L\n\nprint(add_end([]))","repo_name":"cnishop/pycode","sub_path":"匿名函数.py","file_name":"匿名函数.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38730282477","text":"from devtools import debug\nfrom fire import Fire\n\nfrom om2seq.benchmarks import Benchmark\nfrom utils.dataset_tasks import BaseTask\n\n\nclass Timing(BaseTask):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.benchmark = Benchmark()\n self.benchmark.init_dataset()\n self.benchmark.init_om2seq()\n self.qry_len = 100000\n self.crops = self.benchmark.generate_crops(qry_len=self.qry_len, limit=1000)\n self.eval_om_seq = self.benchmark.eval_om2seq(crops=self.crops)\n\n def om2seq(self):\n for i in range(2):\n with debug.timer('inference'):\n inference = self.eval_om_seq.inference(queries=self.crops)\n\n with debug.timer('retrieval'):\n retrieved = self.eval_om_seq.retrieve(query_embeddings=inference)\n\nif __name__ == '__main__':\n Fire(Timing)\n","repo_name":"yevgenin/om2seq","sub_path":"src/om2seq/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37686766921","text":"\"\"\"module to create text file with one word per line\r\n\r\nRichard Sapp\r\n13 June, 2023\r\n\r\n\"\"\"\r\n\r\n# word library\r\nimport nltk\r\n\r\nfrom nltk.corpus import words\r\n\r\n# only file used here is the word_list\r\nfile_extension = \"word_list.txt\"\r\n\r\nenglish_words = words.words()\r\n\r\n# change this number to change word length\r\n# filtered_words = [word for word in english_words if len(word) <= 7] # names included\r\nfiltered_words = [word for word in english_words if len(word) <= 7 and word[0].islower()] # names excluded\r\n\r\n# writes text file\r\nwith open(file_extension, \"w\") as file:\r\n file.write('\\n'.join(filtered_words))\r\n\r\nprint(\"File 'word_list.txt' has been created.\")\r\n\r\n\r\n","repo_name":"HighwayChile/Fifteens","sub_path":"generate_word_list.py","file_name":"generate_word_list.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"44873551152","text":"#pipeline_functions\nimport pandas as pd\nfrom rdkit import Chem\nfrom rdkit.Chem import Descriptors\n\ndef read_dataframe_columns(file_name,sep,columns=0):\n '''\n Read a data table from a CSV file and return a dataframe \n containing the columns specified as a parameter in list format\n '''\n data_raw = pd.read_csv(file_name,sep=sep)\n if columns==0:\n data_filtered = data_raw\n else: \n data_filtered = data_raw[columns]\n return (data_filtered)\n\n\ndef data_filtering(df,column,criteria):\n '''\n Receives a dataframe and a filtering criteria, \n returns the dataframe only with the parameters that match the criteria. '''\n columns = df.columns\n for i in range(len(columns)):\n is_criteria = df[column] == criteria\n df_filtered = df[is_criteria]\n return (df_filtered)\n\n\ndef data_intersection(data,selection_data,column_df,column_selection_data,filter_name,data_type):\n '''\n Receives two dataframes, one with the data to analyze and another to select, \n and the columns to use as filtering criteria returning the filtered df\n '''\n df = data[column_df].unique().tolist()\n df_filtering = selection_data[column_selection_data].unique().tolist()\n intersected_column = []\n not_intersected_column = []\n filter_name_column = []\n intersected_data_raw = []\n \n for i in range(len(df)):\n try:\n if str(df[i]) in df_filtering:\n intersected_column.append(df[i]) \n else: \n not_intersected_column.append(df[i]) \n filter_name_column.append(filter_name) \n except:\n not_intersected_column.append('N/A') \n filter_name_column.append('N/A')\n pass\n \n intersected_data_raw = pd.DataFrame(columns=[data_type])\n intersected_data_raw[data_type] = intersected_column\n \n not_intersected_data_raw = pd.DataFrame(columns=[data_type, 'Filter Name'])\n not_intersected_data_raw[data_type] = not_intersected_column\n not_intersected_data_raw['Filter Name'] = filter_name_column\n \n pre_intersected_data = pd.merge(left=data,right=intersected_data_raw,how=\"inner\",left_on=data_type, right_on=data_type)\n intersected_data = pd.merge(left=selection_data,right=pre_intersected_data,how=\"inner\",left_on=column_selection_data, right_on=data_type)\n \n return (intersected_data, not_intersected_data_raw)\n\ndef data_not_in_intersection(data,selection_data,column_df,column_selection_data,filter_name):\n '''\n The function is designed to filter a DataFrame (data) by excluding rows \n that have matching values in a specified column of the DataFrame \n with a selection DataFrame (selection_data). \n '''\n df_novel= pd.DataFrame()\n df_tested= pd.DataFrame()\n df_resultado = data.merge(selection_data, left_on=column_df, right_on=column_selection_data, how='left', indicator=True)\n df_resultado = df_resultado.drop(columns=['Unnamed: 0', 'smiles', 'mol', 'inchi', 'inchikey'])\n df_novel = df_resultado[df_resultado['_merge'] == 'left_only']\n df_tested = df_resultado[df_resultado['_merge'] == 'both']\n df_novel = df_novel.drop(columns=['_merge'])\n df_tested = df_tested.drop(columns=['_merge'])\n df_tested['Filter Name'] = filter_name\n return (df_novel, df_tested)\n\ndef resumen_contar_datos(df,column):\n '''\n Receives a dataframe and a filter criterion,\n returning a table with the count of each parameter\n '''\n columns = df.columns\n for i in range(len(columns)):\n n_datos = df[columns[i]].value_counts()\n print(n_datos)\n return (print(n_datos))\n \n \ndef smiles_to_inchikey(df,compound_column):\n '''\n Receives a dataframe and the column where the smiles of the compounds are located,\n returning the same df with a new column, with the compounds in inchikey format '''\n pd.options.mode.chained_assignment = None\n mol_tested_list= []\n for element in df[compound_column]:\n mol_tested = Chem.MolFromSmiles(element)\n mol_tested_list.append(mol_tested)\n df['mol'] = mol_tested_list\n inchikey_list = []\n for element in df['mol']:\n try:\n inchikey = Chem.MolToInchiKey(element)\n inchikey_list.append(inchikey)\n except:\n inchikey_list.append('N/A')\n pass\n df['inchiKey'] = inchikey_list\n return(df)\n\ndef drug_likness(df,compound_column):\n pd.options.mode.chained_assignment = None\n mol_tested_list= []\n for element in df[compound_column]:\n mol_tested = Chem.MolFromSmiles(element)\n mol_tested_list.append(mol_tested)\n df['mol'] = mol_tested_list\n NumHDonors_list = []\n NumHAcceptors_list = []\n MW_list = []\n LogP_list = []\n for element in df['mol']:\n try:\n NumHDonors = Descriptors.NumHDonors(element)\n NumHDonors_list.append(NumHDonors)\n except:\n NumHDonors_list.append('N/A')\n pass\n try:\n NumHAcceptors = Descriptors.NumHAcceptors(element)\n NumHAcceptors_list.append(NumHAcceptors)\n except:\n NumHAcceptors_list.append('N/A')\n pass\n try:\n MW = Descriptors.rdMolDescriptors.CalcExactMolWt(element)\n MW_list.append(MW)\n except:\n MW_list.append('N/A')\n pass\n try:\n LogP = Descriptors.rdMolDescriptors.CalcCrippenDescriptors(element)[0]\n LogP_list.append(LogP)\n except:\n LogP_list.append('N/A')\n pass\n df['NumHDonors'] = NumHDonors_list\n df['NumHAcceptors'] = NumHAcceptors_list\n df['MW'] = MW_list\n df['logP'] = LogP_list\n #Cuento cuantas caracterísicas cumple cada compuesto\n countLipinski = lambda row: int(row['NumHDonors'] < 6) + int(row['NumHAcceptors'] < 6) + int(row['MW'] < 500) + int(row['logP'] < 6)\n df['Lipinski'] = df.apply(countLipinski,axis=1)\n countRO3 = lambda row: int(row['NumHDonors']<= 3) + int(row['NumHAcceptors']<= 3) + int(row['MW']< 300) + int(row['logP']<= 3)\n df['RO3'] = df.apply(countRO3,axis=1)\n #Selecciono aquellos compuestos que cumplen con 3 o más\n gdi_aviable_novel_druglike_lipinski = df[df['Lipinski']>2]\n gdi_aviable_novel_druglike_RO3 = df[df['RO3']>2]\n gdi_aviable_novel_druglike_not_lipinski = df[df['Lipinski']<3]\n gdi_aviable_novel_druglike_not_RO3 = df[df['RO3']<3]\n #Filtrado según drug like\n df_druglike = pd.concat([gdi_aviable_novel_druglike_lipinski,gdi_aviable_novel_druglike_RO3])\n df_not_druglike = pd.concat([gdi_aviable_novel_druglike_not_lipinski,gdi_aviable_novel_druglike_not_RO3])\n df_not_druglike['Filter Name'] = [\"Not Druglike\"] * len(df_not_druglike)\n return(df_druglike, df_not_druglike)\n\n ","repo_name":"trypanosomatics/yeast_repo_pipeline","sub_path":"pipeline_functions.py","file_name":"pipeline_functions.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28476263416","text":"def omp_taskyield():\n import omp\n from time import sleep\n NUM_TASKS = 25\n count = 0\n start_id = [0 for _ in range(NUM_TASKS)]\n current_id = [0 for _ in range(NUM_TASKS)]\n\n if 'omp parallel':\n use_omp = omp.in_parallel()\n if 'omp single':\n for i in range(NUM_TASKS):\n myi = i\n if 'omp task firstprivate(myi) untied':\n sleep(0.01)\n start_id[myi] = omp.get_thread_num()\n\n 'omp taskyield'\n\n if start_id[myi] % 2 == 0:\n sleep(0.01)\n current_id[myi] = omp.get_thread_num()\n\n for i in range(NUM_TASKS):\n if current_id[i] == start_id[i]:\n count += 1\n\n return count < NUM_TASKS or not use_omp\n\n","repo_name":"serge-sans-paille/pythran","sub_path":"pythran/tests/openmp.legacy/omp_taskyield.py","file_name":"omp_taskyield.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":1930,"dataset":"github-code","pt":"57"} +{"seq_id":"36720857865","text":"import sys\nsys.stdin = open(\"input.txt\")\ninput = sys.stdin.readline\n\nn, k = map(int, input().split())\nnum = list(map(int, input().rstrip()))\nstack = [num[0]]\nfor i in range(1, n):\n while stack and num[i] > stack[-1] and k:\n stack.pop()\n k -= 1\n stack.append(num[i])\n\nwhile k:\n stack.pop()\n k -= 1\nfor x in stack:\n print(x, end=\"\")\n\n","repo_name":"LV1-Recsys-07/Algorithm","sub_path":"김철현/5주차/2812_크게만들기.py","file_name":"2812_크게만들기.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"36704682965","text":"class Image:\n all_images = []\n all_verticals = []\n all_horizontals = []\n\n def __init__(self, type, numTags, tags, id):\n self.type = type\n self.numTags = numTags\n self.tags = tags\n self.id = id\n Image.all_images.append(self)\n if self.type == \"H\":\n Image.all_horizontals.append(self)\n else:\n Image.all_verticals.append(self)\n\n def __repr__(self):\n return f\"{self.id}\"\n # return(f\"{self.id}, {self.type}, {self.numTags}, {self.tags}\")\n\n\nclass Slide:\n all_slides = []\n all_vertical_slides = []\n all_horizontal_slides = []\n\n def __init__(self, images):\n self.images = images\n self.tags = self.get_tags()\n Slide.all_slides.append(self)\n self.sort_type()\n\n def get_tags(self):\n all_tags = set()\n for i in self.images:\n for j in i.tags:\n all_tags.add(j)\n return all_tags\n\n def sort_type(self):\n for i in self.images:\n if i.type == \"H\":\n Slide.all_horizontal_slides.append(self)\n else:\n Slide.all_vertical_slides.append(self)\n\n def __repr__(self):\n return f\"images on this slide: {[i.id for i in self.images]}\"\n\n\nclass Album:\n all_albums = []\n\n def __init__(self, slides):\n self.slides = slides\n Album.all_albums.append(self)\n self.score = self.scoring_function()\n\n def scoring_function(self):\n # get length of album\n leader = 1\n follower = 0\n album_score = []\n while leader < len(self.slides):\n page1 = self.slides[follower]\n page2 = self.slides[leader]\n # print(page1.tags)\n # print(page2.tags)\n\n # check intersection\n inter_score = len(page1.tags.intersection(page2.tags))\n # print(f\"inter: {inter_score}\")\n # check page1 comp\n page1_comp = len(page1.tags.difference(page2.tags))\n # print(f\"page1_comp: {page1_comp}\")\n\n # check page2 comp\n page2_comp = len(page2.tags.difference(page1.tags))\n # print(f\"page2_comp: {page2_comp}\")\n\n album_score.append(min([inter_score, page1_comp, page2_comp]))\n\n leader += 1\n follower += 1\n score = sum(album_score)\n return score\n\n def __repr__(self):\n return str([i.images for i in self.slides])\n","repo_name":"EmmetGeoghegan/Google-hashcode-2020","sub_path":"Practice 2019/classes/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19457392751","text":"import pytest\nimport socket\nfrom beamngpy import BeamNGpy, Vehicle, Scenario, ScenarioObject, setup_logging\n\n\n@pytest.fixture()\ndef beamng():\n beamng = BeamNGpy('localhost', 64256)\n return beamng\n\n\ndef test_quats(beamng):\n with beamng as bng:\n setup_logging()\n\n scenario = Scenario('smallgrid', 'test_quat')\n\n blue_etk = Vehicle('ego_vehicle',\n model='etk800',\n color='Blue',\n licence=\"angle\")\n scenario.add_vehicle(blue_etk, pos=(0, 0, 0), rot=(0, 0, 0))\n\n blue_etk = Vehicle('ego_vehicle2',\n model='etk800',\n color='Green',\n license=\"quat\")\n rot_quat = (-0.00333699025, -0.00218820246, -0.689169466, 0.724589229)\n scenario.add_vehicle(blue_etk, pos=(5, 0, 0), rot_quat=rot_quat)\n\n rb = ScenarioObject(oid='roadblock',\n name='sawhorse',\n otype='BeamNGVehicle',\n pos=(-10, -5, 0),\n rot=(0, 0, 0),\n scale=(1, 1, 1),\n JBeam='sawhorse',\n datablock=\"default_vehicle\"\n )\n scenario.add_object(rb)\n\n cn = ScenarioObject(oid='cones',\n name='cones',\n otype='BeamNGVehicle',\n pos=(0, -5, 0),\n rot=None,\n rot_quat=(0, 0, 0, 1),\n scale=(1, 1, 1),\n JBeam='cones',\n datablock=\"default_vehicle\"\n )\n scenario.add_object(cn)\n\n scenario.make(beamng)\n\n bng.load_scenario(scenario)\n bng.start_scenario()\n\n white_etk = Vehicle('ego_vehicle3', model='etk800', color='White')\n bng.spawn_vehicle(white_etk, (-10, 0, 0), (0, 0, 0))\n\n pickup = Vehicle('ego_vehicle4', model='pickup')\n pos = (-15, 0, 0)\n bng.spawn_vehicle(pickup, pos, None, rot_quat=(0, 0, 0, 1))\n resp = bng.get_current_vehicles()\n assert len(resp) == 6\n\n pickup.connect(bng)\n\n pickup.poll_sensors()\n pos_before = pickup.state['pos']\n bng.teleport_vehicle(pickup.vid, pos, rot=(0, 45, 0))\n pickup.poll_sensors()\n pos_after = pickup.state['pos']\n assert(pos_before != pos_after)\n\n pickup.poll_sensors()\n pos_before = pickup.state['pos']\n rot_quat = (-0.00333699025, -0.00218820246, -0.689169466, 0.724589229)\n bng.teleport_vehicle(pickup.vid, pos, rot_quat=rot_quat)\n pickup.poll_sensors()\n pos_after = pickup.state['pos']\n assert(pos_before != pos_after)\n\n try:\n bng.teleport_scenario_object(rb, (-10, 5, 0), rot=(-45, 0, 0))\n assert True\n except socket.timeout:\n assert False\n\n try:\n rot_quat = (-0.003337, -0.0021882, -0.6891695, 0.7245892)\n bng.teleport_scenario_object(rb, (-10, 5, 0), rot_quat=rot_quat)\n assert True\n except socket.timeout:\n assert False\n\n\nif __name__ == '__main__':\n bng = BeamNGpy('localhost', 64256)\n test_quats(bng)\n","repo_name":"Michlinek1/BeamNGpy","sub_path":"tests/test_quats.py","file_name":"test_quats.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"73336643697","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 8 12:50:12 2021\n\n@author: guo.1648\n\"\"\"\n\n\nimport os\nimport pickle\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\nsrcRootDir = '/eecf/cbcsl/data100b/Chenqi/imbalanced_data/resnet/results_fromScratch/'\n\n\"\"\"\n### for Fungi:\norig_prefix = 'iNaturalist_eachSubCls/cls_res18_orig_iNaturalist/Fungi/'\ngan_prefix = 'iNaturalist_eachSubCls/cls_res18_cGAN_iNaturalist/Fungi/opt2/step2/based on step1 thresh10/thresh_10/'\n\npklFile = 'history.pkl'\nmodel_arch = 'iNaturalist_Fungi'\n\n# newly added for plotting diffusion model result curve:\nsrcDstDir_diffu = '/eecf/cbcsl/data100b/Chenqi/guided-diffusion/results/cls_res18_aug_diffu/'\n\ndiffu_prefix = 'Fungi_new/step1/thresh_10/'\n\"\"\"\n\"\"\"\n### for Birds:\norig_prefix = 'iNaturalist_eachSubCls/cls_res18_orig_iNaturalist/Birds/'\ngan_prefix = 'iNaturalist_eachSubCls/cls_res18_cGAN_iNaturalist/Birds/opt2/step1/thresh_10/'\n\npklFile = 'history.pkl'\n\nmodel_arch = 'iNaturalist_Birds'\n\n\n# newly added for plotting diffusion model result curve:\nsrcDstDir_diffu = '/eecf/cbcsl/data100b/Chenqi/guided-diffusion/results/cls_res18_aug_diffu/'\n\ndiffu_prefix = 'Birds/step1/thresh_10/'\n\"\"\"\n\"\"\"\n### for scene:\norig_prefix = 'scene/cls_res18_orig/'\ngan_prefix = 'scene/cls_res18_cGAN/opt2/step2/thresh_30/'\n\npklFile = 'history.pkl'\n\nmodel_arch = 'scene'\n\n\n# newly added for plotting diffusion model result curve:\nsrcDstDir_diffu = '/eecf/cbcsl/data100b/Chenqi/guided-diffusion/results/cls_res18_aug_diffu/'\n\ndiffu_prefix = 'scene/step1/thresh_10/'\n\"\"\"\n\n### for Amphibians:\norig_prefix = 'iNaturalist_eachSubCls/cls_res18_orig_iNaturalist/Amphibians/'\ngan_prefix = 'iNaturalist_eachSubCls/cls_res18_cGAN_iNaturalist/Amphibians/opt2/step1/thresh_10/'\n\npklFile = 'history.pkl'\n\nmodel_arch = 'iNaturalist_Amphibians'\n\n\n# newly added for plotting diffusion model result curve:\nsrcDstDir_diffu = '/eecf/cbcsl/data100b/Chenqi/guided-diffusion/results/cls_res18_aug_diffu/'\n\ndiffu_prefix = 'Amphibians/step1/thresh_10/'\n\n\n\n\nif __name__ == '__main__':\n \n orig_folder = srcRootDir + orig_prefix #+ folder_suff\n assert(os.path.exists(orig_folder))\n gan_folder = srcRootDir + gan_prefix #+ folder_suff\n assert(os.path.exists(gan_folder))\n diffu_folder = srcDstDir_diffu + diffu_prefix\n assert(os.path.exists(diffu_folder))\n \n epochs_orig = []\n train_acc1_list_orig = []\n valid_acc1_list_orig = []\n \n epochs_gan = []\n train_acc1_list_gan = []\n valid_acc1_list_gan= []\n \n epochs_diffu = []\n train_acc1_list_diffu = []\n valid_acc1_list_diffu= []\n \n # (1) for original images:\n orig_pkl_fullName = orig_folder + pklFile\n assert(os.path.exists(orig_pkl_fullName))\n \n f_pkl = open(orig_pkl_fullName,'rb')\n history_orig = pickle.load(f_pkl)\n f_pkl.close()\n \n for dict_orig in history_orig:\n epochs_orig.append(dict_orig['epoch'])\n train_acc1_list_orig.append(dict_orig['acc1_train'].item())\n valid_acc1_list_orig.append(dict_orig['acc1_val'].item())\n \n # (2) for gan synthesized images:\n gan_pkl_fullName = gan_folder + pklFile\n assert(os.path.exists(gan_pkl_fullName))\n \n f_pkl = open(gan_pkl_fullName,'rb')\n history_gan = pickle.load(f_pkl)\n f_pkl.close()\n \n for dict_gan in history_gan:\n epochs_gan.append(dict_gan['epoch'])\n train_acc1_list_gan.append(dict_gan['acc1_train'].item())\n valid_acc1_list_gan.append(dict_gan['acc1_val'].item())\n \n # (3) for diffu synthesized images:\n diffu_pkl_fullName = diffu_folder + pklFile\n assert(os.path.exists(diffu_pkl_fullName))\n\n f_pkl = open(diffu_pkl_fullName,'rb')\n history_diffu = pickle.load(f_pkl)\n f_pkl.close()\n\n for dict_diffu in history_diffu:\n epochs_diffu.append(dict_diffu['epoch'])\n train_acc1_list_diffu.append(dict_diffu['acc1_train'].item())\n valid_acc1_list_diffu.append(dict_diffu['acc1_val'].item())\n \n \n assert(epochs_orig == epochs_gan)\n assert(epochs_orig == epochs_diffu)\n \n model_type = model_arch #+ folder_suff.split('/')[0] + '_' + folder_suff.split('/')[1]\n \n # plot curves of train_acc1_list_orig & train_acc1_list_gan:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(epochs_orig, train_acc1_list_orig)\n ax.plot(epochs_gan, train_acc1_list_gan)\n ax.plot(epochs_diffu, train_acc1_list_diffu)\n ax.legend(['train_acc1_orig_res18', 'train_acc1_gan_res18', 'train_acc1_diffusion_res18'])\n ax.set_ylim([0,max(max(train_acc1_list_orig), max(train_acc1_list_gan))+10])\n title_str = model_type + '_train_acc1'\n plt.title(title_str)\n fig.savefig(diffu_folder + title_str + '.png')\n # plot curves of val_acc1_list_orig & val_acc1_list_gan:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(epochs_orig, valid_acc1_list_orig)\n ax.plot(epochs_gan, valid_acc1_list_gan)\n ax.plot(epochs_diffu, valid_acc1_list_diffu)\n ax.legend(['valid_acc1_orig_res18', 'valid_acc1_gan_res18', 'valid_acc1_diffusion_res18'])\n ax.set_ylim([0,max(max(valid_acc1_list_orig), max(valid_acc1_list_gan))+10])\n title_str = model_type + '_valid_acc1'\n plt.title(title_str)\n fig.savefig(diffu_folder + title_str + '.png')\n \n print('max(valid_acc1_list_orig)='+str(max(valid_acc1_list_orig)))\n print('max(valid_acc1_list_gan)='+str(max(valid_acc1_list_gan)))\n print('at Epoch: ' + str(np.argmax(valid_acc1_list_gan)))\n print('max(valid_acc1_list_diffu)='+str(max(valid_acc1_list_diffu)))\n print('at Epoch: ' + str(np.argmax(valid_acc1_list_diffu)))\n \n print('np.mean(valid_acc1_list_orig[-5:]) = ' + str(np.mean(valid_acc1_list_orig[-5:])))\n print('np.mean(valid_acc1_list_gan[-5:]) = ' + str(np.mean(valid_acc1_list_gan[-5:])))\n print('np.mean(valid_acc1_list_diffu[-5:]) = ' + str(np.mean(valid_acc1_list_diffu[-5:])))\n\n\n\n\n\n","repo_name":"chenqiguo/SSIM-DeepGenModelsImbaDataAug","sub_path":"guided-diffusion/scripts/my_scripts/plt_learnCurve_fromHistoryPkl_res18.py","file_name":"plt_learnCurve_fromHistoryPkl_res18.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"30930912618","text":"class Solution:\n def uniqueOccurrences(self, arr: List[int]) -> bool:\n found = {}\n for x in range(0,len(arr)):\n if arr[x] not in found:\n found[arr[x]] = 1\n else:\n found[arr[x]] = found[arr[x]]+1\n dummy = list(found.values())\n dummy2 = list(set(found.values()))\n if len(dummy) == len(dummy2):\n return True\n return False\n","repo_name":"minnce/leetcode","sub_path":"python/uniqueOccurances.py","file_name":"uniqueOccurances.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37827311356","text":"import os\nimport yaml\nimport math\nimport shutil\nimport socket\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom glob import glob\nfrom easydict import EasyDict\n\nimport torch\nfrom torch import nn\n\nfrom test_managers.interactive_sampler import InteractiveSampler\nfrom utils import *\n\n\ndef inference(g_ema, device, config, args):\n\n assert (not hasattr(config.train_params, \"imp_feat_unfold\")) or \\\n (hasattr(config.train_params, \"imp_feat_unfold\") and config.train_params.imp_feat_unfold == False), \\\n \"Not carefully investigated!\"\n \n \"\"\"\n Setup env\n \"\"\"\n exp_root = os.path.join(config.var.log_dir, config.var.exp_name)\n if args.debug:\n save_root = os.path.join(\n exp_root, \n \"test\", \n \"debug-{}\".format(config.task.config_name))\n else:\n save_root = os.path.join(\n exp_root, \n \"test\", \n \"{}\".format(config.task.config_name))\n if not os.path.exists(save_root): os.makedirs(save_root)\n shutil.copy2(\n config.task.config_path, \n os.path.join(save_root, os.path.basename(config.task.config_path)))\n\n \"\"\"\n Start inference\n \"\"\"\n if config.task.num_gen == -1: # all, read number of images from dataset\n assert hasattr(config.task, \"dataset_size\"), \\\n \"Generate all should only be used for tasks with a real image dataset.\"\n config.task.num_gen = config.task.dataset_size\n iter_ = math.ceil(config.task.num_gen / config.task.batch_size)\n pbar = tqdm(\n range(iter_), \n initial=0, \n total=iter_, \n dynamic_ncols=True, \n smoothing=0.01)\n\n \"\"\"\n Calculate shape and coordinates for patches\n \"\"\"\n task_manager = import_func(config.task.task_manager)(g_ema, device, save_root, config)\n task_manager.task_specific_init()\n\n if args.inter_ckpt:\n if os.path.isfile(args.inter_ckpt):\n print(\" [!] A single inter ckpt is loaded for all samples!\")\n testing_vars_load_from = args.inter_ckpt\n else:\n testing_vars_load_from = sorted(glob(os.path.join(args.inter_ckpt, \"*.pkl\")))\n \n print(\" [*] Setup complete, start testing!\")\n for iter_ in pbar:\n\n if config.task.interactive or args.interactive:\n testing_vars = task_manager.create_vars(\n inv_records=args.inv_records, inv_placements=args.inv_placements)\n if args.inter_ckpt:\n if isinstance(testing_vars_load_from, list):\n assert config.task.batch_size == 1, \"Does not consider batch_size > 1 case!\"\n if task_manager.cur_global_id < len(testing_vars_load_from):\n testing_vars.load(testing_vars_load_from[task_manager.cur_global_id])\n else:\n print(\" [!] Run out of previous ckpt! Start from random!\")\n else:\n testing_vars.load(testing_vars_load_from)\n InteractiveSampler(task_manager, testing_vars, config)\n else:\n if args.speed_benchmark:\n assert (not hasattr(args, \"inv_record\")) or args.inv_record is None, \"No fancy stuffs in benchmark.\"\n if iter_ < 10: # Do not use the stats from first ten, still unstable\n task_manager.run_next(\n save=False, write_gpu_time=False)\n else:\n task_manager.run_next(\n save=False, write_gpu_time=True)\n elif args.calc_flops:\n task_manager.run_next(save=False, write_gpu_time=False, calc_flops=True)\n if iter_ == 1: exit()\n else:\n task_manager.run_next(\n save=True, write_gpu_time=False,\n inv_records=args.inv_records, inv_placements=args.inv_placements)\n if args.debug:\n task_manager.exit()\n exit()\n\n task_manager.exit()\n\n if args.speed_benchmark:\n if not hasattr(config.task, \"parallel_batch_size\"):\n config.task.parallel_batch_size = -1\n exec_mean, exec_std = task_manager.get_exec_time_stats()\n print(\" [*] Benchmark results over {} samples: {:.4f} +- {:.4f} (sec/image)\".format(\n (config.task.num_gen - 1), exec_mean, exec_std))\n benchmark_record_path = \"./logs-quant/benchmark_results/benchmark-{}.txt\".format(socket.gethostname())\n if not os.path.exists(os.path.dirname(benchmark_record_path)):\n os.makedirs(os.path.dirname(benchmark_record_path))\n with open(benchmark_record_path, \"a\") as f:\n f.write(\"[-] EXP: Res {}x{} ; Parabatch {} ; {} GPUs\\n\".format(\n config.task.height, config.task.width, config.task.parallel_batch_size, config.var.n_gpu))\n f.write(\"{:.6f} +- {:.6f}\\n\".format(exec_mean, exec_std))\n f.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n\n try:\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model-config\", type=str)\n parser.add_argument(\"--test-config\", type=str)\n\n parser.add_argument(\"--exp-suffix\", type=str, default=None)\n parser.add_argument(\"--ckpt\", type=str, default=None)\n parser.add_argument(\"--seed\", type=int, default=None)\n parser.add_argument(\"--interactive\", action=\"store_true\")\n parser.add_argument(\"--override-save-idx\", type=int, default=None)\n\n parser.add_argument(\"--speed-benchmark\", action=\"store_true\")\n parser.add_argument(\"--calc-flops\", action=\"store_true\")\n\n # Flag for inversion distributed testing\n parser.add_argument(\"--inv-start-idx\", type=int, default=None)\n parser.add_argument(\"--try-restrict-memory\", type=float, default=1.)\n\n # Load from inversion\n parser.add_argument(\"--inv-records\", type=str, default=None)\n parser.add_argument(\"--inv-placements\", type=str, default=None)\n\n # Interactive recover from ckpt\n parser.add_argument(\"--inter-ckpt\", type=str, default=None)\n\n parser.add_argument(\"--debug\", action=\"store_true\")\n parser.add_argument(\"--verbose\", action=\"store_true\")\n parser.add_argument(\"--archive-mode\", action=\"store_true\")\n parser.add_argument(\"--clear-fid-cache\", action=\"store_true\")\n args = parser.parse_args()\n\n if hasattr(torch.cuda.memory, \"set_per_process_memory_fraction\"):\n torch.cuda.memory.set_per_process_memory_fraction(args.try_restrict_memory)\n print(\" [*] Set memory limit to {}%!\".format(args.try_restrict_memory*100))\n\n \"\"\"\n Parse Inv args\n \"\"\"\n def parse_tuple(v):\n return tuple([float(vv) for vv in v.split(\",\")])\n if args.inv_records is not None:\n args.inv_records = [\n param for param in args.inv_records.split(\":\")]\n if args.inv_placements is None:\n args.inv_placements = (0.5, 0.5)\n else:\n args.inv_placements = [\n parse_tuple(param) for param in args.inv_placements.split(\":\")]\n\n \"\"\"\n Normal init\n \"\"\"\n if args.verbose:\n def annoy_print(x):\n torch.cuda.synchronize()\n print(x, end=\"\")\n else:\n annoy_print = dummy_func\n \n with open(args.model_config, \"r\") as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n config = EasyDict(config)\n config.var = EasyDict()\n config.var.exp_name = os.path.basename(args.model_config).split(\".yaml\")[0]\n print(\" [*] Config {} loaded!\".format(args.model_config))\n\n with open(args.test_config, \"r\") as f:\n config.task = EasyDict(yaml.load(f, Loader=yaml.FullLoader))\n config.task.config_name = os.path.basename(args.test_config).split(\".yaml\")[0]\n config.task.config_path = args.test_config\n config.task.exp_suffix = args.exp_suffix\n\n if args.inv_start_idx is not None:\n config.task.init_index = args.inv_start_idx\n assert config.task.batch_size == 1\n assert config.task.num_gen == 1\n elif args.override_save_idx is not None:\n config.task.init_index = args.override_save_idx\n assert config.task.batch_size == 1\n assert config.task.num_gen == 1\n else:\n config.task.init_index = 0\n\n if hasattr(config.task, \"override_dataset_name\"):\n print(\" [!] Override dataset name to {} with specification in test-config!\".format(\n config.task.override_dataset_name))\n config.data_params.dataset = config.task.override_dataset_name\n if hasattr(config.task, \"override_dataset_data_size\"):\n print(\" [!] Override dataset raw resolution to {} with specification in test-config!\".format(\n config.task.override_dataset_data_size))\n config.train_params.data_size = config.task.override_dataset_data_size\n if hasattr(config.task, \"override_dataset_full_size\"):\n print(\" [!] Override dataset full-image resolution to {} with specification in test-config!\".format(\n config.task.override_dataset_full_size))\n config.train_params.full_size = config.task.override_dataset_full_size\n\n if args.seed is not None:\n print(\" [!] Forcingly use seed from cmdline!\")\n cur_seed = args.seed\n elif config.task.seed is not None:\n print(\" [!] Use default seed in task-config!!\")\n cur_seed = config.task.seed\n else:\n print(\" [!] Seed not specified, randomly assign one now!\")\n cur_seed = np.random.randint(0, 9487)\n print(\" [!] Current seed: {}\".format(cur_seed))\n manually_seed(cur_seed)\n\n \"\"\"\n Batch size calibration\n \"\"\"\n if config.task.num_gen % config.task.batch_size != 0:\n bs = config.task.batch_size\n config.task.num_gen = math.ceil(config.task.num_gen / bs) * bs\n print(\" [!] Force number of generated images to a multiple of batch size => {}\".format(config.task.num_gen))\n config.train_params.batch_size = config.task.batch_size\n\n \"\"\"\n Trait generation has specific num_gen\n \"\"\"\n if hasattr(config.task, \"is_trait_figure\") and config.task.is_trait_figure:\n if hasattr(config.task, \"n_trait_x\"):\n config.task.num_gen = config.task.n_trait_x * config.task.n_trait_y\n else:\n config.task.num_gen = 1\n\n \"\"\"\n Archive mode\n \"\"\"\n if args.archive_mode:\n config.var.log_dir = \"../../\" # We are running in ./logs//codes/\n else:\n config.var.log_dir = \"./logs/\"\n\n \"\"\"\n Error file writing handling\n Remove previous error file (will make confusion on log synchronizing)\n \"\"\"\n error_f = os.path.join(config.var.log_dir, config.var.exp_name, \"error-log.txt\")\n if os.path.exists(error_f):\n os.remove(error_f)\n\n if \"CUDA_VISIBLE_DEVICES\" in os.environ:\n n_gpu = len(os.environ[\"CUDA_VISIBLE_DEVICES\"].split(\",\"))\n config.var.dataparallel = n_gpu > 1\n config.var.n_gpu = n_gpu\n if n_gpu > 1:\n torch.backends.cudnn.benchmark = True\n else:\n torch.backends.cudnn.benchmark = False\n else:\n raise ValueError(\" [!] Please specify CUDA_VISIBLE_DEVICES!\")\n\n # [NOTE] In debug mode:\n # 1. Will not write any logs\n # 2. Exit after first full iteration\n # 3. Force eval FID with one batch of fake samples; will not write FID cache if real stats are not exist\n if args.debug:\n print(\" [Warning] Debug mode; Do not use this unless you know what you are doing!\")\n bs = 1\n config.task.batch_size = bs * n_gpu\n config.log_params.n_save_sample = bs * n_gpu\n \n \"\"\"\n Build G\n \"\"\"\n g_ema = import_func(config.train_params.g_arch)(config=config)\n\n \"\"\"\n Multi-GPU\n \"\"\"\n if config.var.dataparallel:\n device = \"cpu\" # torch will auto do the GPU partitioning in backend\n g_ema = nn.DataParallel(g_ema).cuda()\n else:\n device = \"cuda\"\n g_ema = g_ema.to(device)\n g_ema.eval()\n\n\n \"\"\"\n Load checkpoint\n \"\"\"\n if args.ckpt is None:\n ckpt_dir = os.path.join(config.var.log_dir, config.var.exp_name, \"ckpt\")\n best_ckpt = os.path.join(ckpt_dir, \"best_fid.pth.tar\")\n\n assert os.path.exists(best_ckpt), \"Cannot find checkpoint at {}!\".format(best_ckpt)\n print(\" [*] Found ckpt, load model from:\", best_ckpt)\n ckpt = torch.load(best_ckpt, map_location=lambda storage, loc: storage)\n else:\n ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)\n safe_load_state_dict(g_ema, ckpt[\"g_ema\"]) #, strict=False)\n print(\" [*] Loaded ckpt at {} iter with FID {:.4f}\".format(ckpt[\"iter\"], ckpt[\"best_fid\"]))\n\n with torch.no_grad():\n inference(g_ema, device, config, args)\n\n except Exception as e:\n if e is not KeyboardInterrupt:\n error_f = os.path.join(config.var.log_dir, config.var.exp_name, \"test-error-log.txt\")\n with FileLock(error_f, timeout=10, delay=0.1) as lock:\n with open(error_f, \"w+\") as f:\n f.write(str(e) + \"\\n\")\n f.write(\" *** stack trace *** \\n\")\n f.write(traceback.format_exc())\n raise e\n","repo_name":"hubert0527/infinityGAN","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13729,"program_lang":"python","lang":"en","doc_type":"code","stars":313,"dataset":"github-code","pt":"57"} +{"seq_id":"11538750978","text":"from utils import *\nfrom nlp_utils import *\n\nimport ipywidgets as widgets\nfrom IPython.display import display, HTML, Image\nfrom PIL import Image as PILImage, ImageOps\nfrom io import BytesIO\n\ndigi_base_url = \"https://digi.kansalliskirjasto.fi/search\"\nleft_image_path = \"https://www.topuniversities.com/sites/default/files/profiles/logos/tampere-university_5bbf14847d023f5bc849ec9a_large.jpg\"\nright_image_path = \"https://digi.kansalliskirjasto.fi/images/logos/logo_fi_darkblue.png\"\n\nTKs=list()\nflinks=list()\n\nlmMethod: str=\"stanza\"\nnSPMs: int=58\nspm_files_dir=f\"/scratch/project_2004072/Nationalbiblioteket/dataframes_x{nSPMs}/\"\nfprefix=f\"concatinated_{nSPMs}_SPMs\"\n\nwith HiddenPrints():\n\tconcat_spm_U_x_T=load_pickle(fpath=glob.glob( spm_files_dir+'/'+f'{fprefix}'+'*_USERs_TOKENs_spm_*_nUSRs_x_*_nTOKs.gz')[0])\n\tconcat_spm_usrNames=load_pickle(fpath=glob.glob( spm_files_dir+'/'+f'{fprefix}'+'*_USERs_TOKENs_spm_user_ip_names_*_nUSRs.gz')[0])\n\tconcat_spm_tokNames=load_pickle(fpath=glob.glob( spm_files_dir+'/'+f'{fprefix}'+'*_USERs_TOKENs_spm_token_names_*_nTOKs.gz')[0])\n\tidf_vec=load_pickle(fpath=glob.glob( spm_files_dir+'/'+f'{fprefix}'+'*_idf_vec_1_x_*_nTOKs.gz')[0])\n\tusrNorms=load_pickle(fpath=glob.glob( spm_files_dir+'/'+f'{fprefix}'+'*_users_norm_1_x_*_nUSRs.gz')[0])\n\nleft_image = PILImage.open(BytesIO(requests.get(left_image_path).content))\nright_image = PILImage.open(BytesIO(requests.get(right_image_path).content))\nleft_image_widget = widgets.Image(value=requests.get(left_image_path).content, format='png', width=300, height=300)\nright_image_widget = widgets.Image(value=requests.get(right_image_path).content, format='png', width=300, height=300)\nwelcome_lbl = widgets.HTML(value=\"

Welcome to User-based Recommendation System!
What are you looking after?

\")\n\n# Modified entry widget\nentry = widgets.Text(placeholder=\"Enter your query keywords here...\", \n\t\t\t\t\t\t\t\t\t\t layout=widgets.Layout(width='800px', \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t height='50px',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t font_size='30px', \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t padding='5px',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t font_weight='bold',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t font_family='Ubuntu',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t)\n\n# Added vertical padding\nvbox_layout = widgets.Layout(align_items='center', padding='15px')\n\nbutton_style = {'button_color': 'darkgray', 'font_weight': 'bold', 'font_size': '16px'}\nsearch_btn = widgets.Button(description=\"Search NLF\", layout=widgets.Layout(width='150px'), style=button_style)\nclean_search_btn = widgets.Button(description=\"Clear\", layout=widgets.Layout(width='150px'), style=button_style)\nrec_btn = widgets.Button(description=\"Recommend Me\", layout=widgets.Layout(width='150px'), style=button_style)\nclean_recsys_btn = widgets.Button(description=\"Clear\", layout=widgets.Layout(width='150px'), style=button_style)\nexit_btn = widgets.Button(description=\"Exit\", layout=widgets.Layout(width='100px'), style=button_style)\n\ncountdown_lbl = widgets.HTML()\nrecys_lbl = widgets.HTML()\nnlf_link_lable = widgets.HTML()\n\n# Modified slider to have a minimum value of 3 and a maximum value of 15\nslider_style={'description_width': 'initial'}\nslider_value = widgets.IntSlider(value=5, min=3, max=20, description='Recsys Count', style=slider_style)\nslider_value.layout.visibility = 'hidden' # Initially hidden\n\nprogress_bar_style = {'description_width': 'initial', 'bar_color': 'blue', 'background_color': 'darkgray'}\nprogress_bar_description_style = {'description_width': 'initial', 'font-size': '25px', 'fort_family': 'Futura'}\nprogress_bar = widgets.IntProgress(value=0, min=0, max=350, description='Please wait...', style=progress_bar_style)\nprogress_bar.description_style = progress_bar_description_style\nprogress_bar.layout.visibility = 'hidden' # Initially hidden\n\ndef run_recSys(query_phrase: str=\"This is a sample raw query phrase!\", ):\n\tquery_phrase_tk = get_lemmatized_sqp(qu_list=[query_phrase], lm=lmMethod)\n\tquery_vector=get_query_vec(\tmat=concat_spm_U_x_T,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmat_row=concat_spm_usrNames, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmat_col=concat_spm_tokNames, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttokenized_qu_phrases=query_phrase_tk,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\tccs=get_optimized_cs(\tspMtx=concat_spm_U_x_T,\n\t\t\t\t\t\t\t\t\t\t\t\tquery_vec=query_vector, \n\t\t\t\t\t\t\t\t\t\t\t\tidf_vec=idf_vec,\n\t\t\t\t\t\t\t\t\t\t\t\tspMtx_norm=usrNorms, # must be adjusted, accordingly!\n\t\t\t\t\t\t\t\t\t\t\t)\n\tavgRecSys=get_avg_rec(spMtx=concat_spm_U_x_T,\n\t\t\t\t\t\t\t\t\t\t\t\tcosine_sim=ccs**5,\n\t\t\t\t\t\t\t\t\t\t\t\tidf_vec=idf_vec,\n\t\t\t\t\t\t\t\t\t\t\t\tspMtx_norm=usrNorms,\n\t\t\t\t\t\t\t\t\t\t\t)\n\ttopKtokens=get_topK_tokens(\tmat=concat_spm_U_x_T, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmat_rows=concat_spm_usrNames,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmat_cols=concat_spm_tokNames,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tavgrec=avgRecSys,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tqu=query_phrase_tk,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\treturn topKtokens\n\ndef close_window(count=8):\n\tif count > 0:\n\t\tcountdown_lbl.value = f\"Thanks for using our service, Have a Good Day!

closing in {count} sec...\"\n\t\ttime.sleep(1)\n\t\tclose_window(count-1)\n\telse:\n\t\tdisplay(HTML(\"Bye\"))\n\ndef get_nlf_link(change):\n\tquery = entry.value\n\tif query and query != \"Enter your query keywords here...\":\n\t\tencoded_query = urllib.parse.quote(query)\n\t\tgen_link=f\"{digi_base_url}?query={encoded_query}\"\n\t\tnlf_link_lable.value=f\"Click here to open National Library Results\"\n\telse:\n\t\tnlf_link_lable.value = \"

Oops! Enter a valid search query to proceed!

\"\n\ndef on_entry_click(widget, event, data):\n\tif widget.value == \"Enter your query keywords here...\":\n\t\twidget.value = \"\"\n\t\twidget.style = {'description_width': 'initial', 'color': 'black'}\n\ndef clean_search_entry(change):\n\tnlf_link_lable.value = \"\"\n\tentry.value = \"\"\n\tentry.placeholder = \"Enter your query keywords here...\"\n\ndef update_recys_lbl(_):\n\tquery = entry.value\n\tif query and query != \"Enter your query keywords here...\":\n\t\trecys_lbl.value = generate_recys_html(query, TKs, flinks, slider_value.value)\n\telse:\n\t\trecys_lbl.value = \"

Enter a valid search query first!

\"\n\ndef generate_recys_html(query, TKs, flinks, slider_value):\n\trecys_lines = \"\"\n\tfor i in np.arange(slider_value):\n\t\trecys_lines += f\"{query} + {TKs[i]}
\"\n\treturn f\"

\" \\\n\t\t\t\t f\"Since you searched
\" \\\n\t\t\t\t f\"{query}
\" \\\n\t\t\t\t f\"you might be also interested in:
\" \\\n\t\t\t\t f\"{recys_lines}\" \\\n\t\t\t\t f\"

\"\n\ndef clean_recsys_entry(change):\n\tentry.value = \"\"\n\tentry.placeholder = \"Enter your query keywords here...\"\n\trecys_lbl.value = \"\"\n\tslider_value.layout.visibility = 'hidden' # Hide slider\n\ndef rec_btn_click(change):\n\tquery = entry.value\n\tif query and query != \"Enter your query keywords here...\":\n\t\tprogress_bar.layout.visibility = 'visible' # Show progress bar\n\t\tglobal TKs, flinks\n\t\twith HiddenPrints():\n\t\t\tTKs=run_recSys(query_phrase=query)\n\t\tflinks=[f\"{digi_base_url}?query={urllib.parse.quote(f'{query} {tk}')}\" for tk in TKs]\n\t\tprogress_bar.layout.visibility = 'hidden' # Hide progress bar\n\t\tslider_value.layout.visibility = 'visible' # Show slider\n\telse:\n\t\trecys_lbl.value = \"

Enter a valid search query first!

\"\n\tslider_value.value = 5 # Reset slider to its initial value\n\tupdate_recys_lbl(None)\n\ndef run_gui():\n\t# load files and spm:\n\n\tGUI=widgets.VBox(\n\t\t[widgets.HBox([left_image_widget, widgets.Label(value=' '), right_image_widget], layout=vbox_layout),\n\t\t welcome_lbl,\n\t\t entry,\n\t\t widgets.HBox([search_btn, widgets.Label(value=' '), clean_search_btn], layout=vbox_layout),\n\t\t nlf_link_lable,\n\t\t widgets.HBox([rec_btn, widgets.Label(value=' '), clean_recsys_btn], layout=vbox_layout),\n\t\t slider_value, # Added slider\n\t\t recys_lbl,\n\t\t progress_bar, # Added progress bar\n\t\t widgets.HBox([exit_btn], layout=vbox_layout),\n\t\t countdown_lbl],\n\t\tlayout=vbox_layout\n\t)\n\tdisplay(GUI)\n\nsearch_btn.on_click(get_nlf_link)\nclean_search_btn.on_click(clean_search_entry)\nslider_value.observe(update_recys_lbl, names='value') # real-time behavior\nrec_btn.on_click(rec_btn_click)\nclean_recsys_btn.on_click(clean_recsys_entry)","repo_name":"mrgransky/DARIAH-FI","sub_path":"gui_backend.py","file_name":"gui_backend.py","file_ext":"py","file_size_in_byte":8366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73553017457","text":"import json\nimport requests\nfrom flask import url_for\n\n\nwith open('feynman/workshops/utils/loremipsum.md') as f:\n guide = f.read()\n\n\ndef populate_workshops(ammount=None):\n url = url_for('workshops.workshop_list')\n data_file = open('feynman/workshops/utils/data.json')\n workshops = json.load(data_file)\n for workshop in workshops[:ammount]:\n workshop['guide'] = guide\n requests.post(url, json=workshop)\n return requests.get(url).json()['data']\n","repo_name":"didactar/feynman","sub_path":"feynman/workshops/utils/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70463383217","text":"import fileinput\nimport numpy as np\n\n\ndef sig_one(a):\n sig = ''\n for r in a.T:\n ones = list(r).count('1')\n zeroes = list(r).count('0')\n if ones >= zeroes:\n sig += '1'\n else:\n sig += '0'\n\n return sig\n\n\ndef sig_zero(a):\n sig = ''\n for r in a.T:\n ones = list(r).count('1')\n zeroes = list(r).count('0')\n if ones >= zeroes:\n sig += '0'\n else:\n sig += '1'\n\n return sig\n\n\ndef part1(a):\n gamma = sig_one(a)\n epsilon = sig_zero(a)\n\n gamma = int(gamma, base=2)\n epsilon = int(epsilon, base=2)\n\n return gamma * epsilon\n\n\ndef part2(a):\n o2_rating = np.copy(a)\n for i in range(len(sig_one(o2_rating))):\n if len(o2_rating) == 1:\n break\n\n sig = sig_one(o2_rating)\n\n new_rating = []\n for r in o2_rating:\n if r[i] == sig[i]:\n new_rating.append(list(r))\n\n o2_rating = np.array(new_rating)\n\n co2_rating = np.copy(a)\n for i in range(len(sig_zero(co2_rating))):\n if len(co2_rating) == 1:\n break\n\n sig = sig_zero(co2_rating)\n\n new_rating = []\n for r in co2_rating:\n if r[i] == sig[i]:\n new_rating.append(list(r))\n\n co2_rating = np.array(new_rating)\n\n o2_rating = ''.join(o2_rating[0])\n co2_rating = ''.join(co2_rating[0])\n\n o2_rating = int(o2_rating, base=2)\n co2_rating = int(co2_rating, base=2)\n\n return o2_rating * co2_rating\n\n\nif __name__ == '__main__':\n lines = []\n for line in fileinput.input():\n lines.append(list(line.strip()))\n\n a = np.array(lines)\n\n print(part1(a))\n print(part2(a))\n","repo_name":"theandrew168/advent-of-code","sub_path":"2021/day03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"23331969222","text":"#1. Make a program that selects a word from a list of words\n# 2. From each category, select a random word\n# 3. Accept user input that asks the user to type in a category\n# randomly_selected_word = \"orange\"\n# 4. _ _ _ _ _ _\n# 5. if you make a correct choice = \"o\" => o _ _ _ _ _\n# randomly_selected_word = \"watermelon\"\n# 6. _ _ _ _ _ _ _ _ _ _ _\n# 7. if you make a correct choice = \"e\" => _ _ _ e _ _ e _ _ _\n# 8. 7 attempts\n# 9. if you exhausted all 7 choices, show the correct word\n\n#setting up varialbes\nimport random\nguess_words = [\"pineapple\",\"pear\",\"orange\",\"banana\",\"mango\",\"apple\",\"grape\",\"watermelon\", \"peach\", \"dragonfruit\"]\n\nanswer = random.choice(guess_words)\nincorrect_guess_total = 0\nguessed_letters = []\nincorrect_letters = []\n\n#Running the game\nprint(\"Welcome to hangman!\")\n\ngame_over = False\nwhile game_over == False:\n guess = input(\"What is your next Guess?\")\n guessed_letters.append(guess)\n\n done = False\n while not done:\n for letter in answer:\n if letter in guessed_letters:\n print(letter, end=\" \")\n else:\n print(\"_\", end=\" \")\n if guess not in answer:\n incorrect_guess_total += 1\n incorrect_letters.append(guess)\n done = True\n\n\n print(\"totally incorrect guesses:\" + str(incorrect_guess_total))\n print(\"incorrect letters:\" + str(incorrect_letters))\n if incorrect_guess_total == 7:\n print(\"Game over!\")\n game_over == True\n\n\n","repo_name":"Brandon594/hangman_game_codingtemple","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33682720688","text":"#!/usr/bin/python3\n\n\"\"\"\nDefines a class\n\"\"\"\n\n\nclass Student:\n \"\"\"Represents a student\"\"\"\n\n def __init__(self, first_name, last_name, age):\n \"\"\"Initialize a new Student instance\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\"Retrieve a dictionary representation of the Student instance\"\"\"\n if attrs is None:\n return self.__dict__\n json_dict = {}\n for attr in attrs:\n if attr in self.__dict__:\n json_dict[attr] = self.__dict__[attr]\n return json_dict\n","repo_name":"abdallahfarag72/alx-higher_level_programming","sub_path":"0x0B-python-input_output/10-student.py","file_name":"10-student.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8996754844","text":"import numpy as np\nimport pandas as pd\n\n\ndef _setattr(self, column_name, column):\n\n if column_name in \"Chromosome Strand\".split():\n raise Exception(\"The columns Chromosome and Strand can not be reset.\")\n\n isiterable = isinstance(column, list) or isinstance(\n column, pd.Series) or isinstance(column, np.ndarray)\n isdict = isinstance(column, dict)\n\n if isiterable:\n if not len(self) == len(column):\n raise Exception(\"DataFrame and column must be same length.\")\n\n already_exists = column_name in self.values()[0]\n\n if already_exists:\n pos = list(self.values()[0].columns).index(column_name)\n else:\n pos = self.values()[0].shape[1]\n\n start_length, end_length = 0, 0\n\n dfs = {}\n for k, df in self.items():\n\n end_length += len(df)\n\n if already_exists:\n df = df.drop(column_name, axis=1)\n\n if isiterable:\n df.insert(pos, column_name, column[start_length:end_length])\n elif isdict:\n df.insert(pos, column_name, column[k])\n else:\n df.insert(pos, column_name, column)\n\n start_length = end_length\n\n dfs[k] = df\n\n self.__dict__[\"dfs\"] = dfs\n\n\ndef _getattr(self, name):\n if name in self.values()[0]:\n return pd.concat([df[name] for df in self.values()])\n else:\n raise Exception(\"PyRanges object has no attribute\", name)\n","repo_name":"xtmgah/pyranges","sub_path":"pyranges/methods/attr.py","file_name":"attr.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"6823342378","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n 判断线程是否已经启动.\n 线程的核心特征是能够以非确定性的方式(即,何时开始,何时被打断,何时恢复执行完全由操作系统调度管理,用户无法确定)独立\n 执行,如果程序中有其他线程需要判断某个线程是否已经到达执行过程中的某个点(线程同步问题),可以使用threading库中的Event对象\n\"\"\"\nfrom threading import Thread, Event\nimport threading\nimport time\n\ndef countdown(n, started_evt):\n print('countdown starting')\n started_evt.set()\n while n > 0:\n print('T-minus', n)\n n -= 1\n time.sleep(2)\n\nstarted_evt = Event()\n\nprint('Launching countdown')\n#主线程等待\nt = Thread(target=countdown, args=(10, started_evt))\nt.start()\nstarted_evt.wait()\nprint('countdown is running')\n\n\n\"\"\"\nEvent对象最好只用于一次性事件。\n如果线程打算一遍又一遍重复通知某个事件,最好使用Condition对象来处理。\n\"\"\"\n\n","repo_name":"ttlttl/PythonCookBook-study","sub_path":"12/12.2.py","file_name":"12.2.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33204355364","text":"class Solution:\n def addBinary(self, a: str, b: str) -> str:\n carry = 0\n m = len(a)\n n = len(b)\n res = ''\n\n for i in range(max(m,n)):\n a_digit = 0 if i >= m or a[-i-1] == '0' else 1\n b_digit = 0 if i >= n or b[-i-1] == '0' else 1\n total = a_digit + b_digit + carry\n\n carry = 0 if total < 2 else 1\n res = ('0' if total % 2 == 0 else '1') + res\n\n return '1' + res if carry else res\n","repo_name":"alvinkgao/leetcode_solutions","sub_path":"Python3/67.Add_Binary.py","file_name":"67.Add_Binary.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7966210462","text":"class Solution:\n def maxArea(self, height: List[int]) -> int:\n\n left = 0\n\n right = len(height) - 1\n\n current_max = 0\n\n while left != right:\n\n #get lowest height of the two pointers\n if height[left] > height[right]:\n lowest_height = height[right]\n container = (right - left) * lowest_height\n right -= 1\n else:\n lowest_height = height[left]\n container = (right - left) * lowest_height\n left += 1\n\n if current_max < container:\n current_max = container\n\n return current_max\n","repo_name":"Harrisfactory/LeetCode-Python","sub_path":"container-with-most-water.py","file_name":"container-with-most-water.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39221151064","text":"import pygtk\nimport gtk\n\nfrom IsCoder.Constants import *\n\nimport locale\nimport gettext\nlocale.setlocale(locale.LC_ALL, \"\")\ngettext.bindtextdomain(\"iscoder\", DataDir + \"/locale\")\ngettext.textdomain(\"iscoder\")\n_ = gettext.gettext\n\nclass Plugin(gtk.ScrolledWindow):\n \"\"\"Plugin class, all plugins should be inherited from this.\"\"\"\n\n def __init__(self, pname = \"\", cname = \"\", widgets = ()):\n gtk.ScrolledWindow.__init__(self)\n\n self.pname = pname\n self.cname = cname\n self.widgets = widgets\n\n self.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)\n\n viewport = gtk.Viewport()\n viewport.set_shadow_type(gtk.SHADOW_NONE)\n self.add(viewport)\n\n vbox = gtk.VBox()\n vbox.set_border_width(5)\n viewport.add(vbox)\n\n for widget in widgets:\n vbox.pack_start(widget, False, False)\n","repo_name":"iven/IsCoder","sub_path":"IsCoder/Plugin.py","file_name":"Plugin.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41208490712","text":"class BookDB:\n books = [{\"name\": \"test_book\", \"author\": \"test_author\"}]\n\n def get_all(self):\n return self.books\n\n def retrieve_by_name(self, name):\n for book in self.books:\n if book[\"name\"] == name:\n return book\n return None\n\n def add(self, name, author):\n book_already_exists = False\n for book in self.books:\n if book[\"name\"] == name:\n book_already_exists = True\n if book_already_exists is False:\n new_book = {\n \"name\": name,\n \"author\": author\n }\n self.books.append(new_book)\n return new_book\n else:\n return False\n\n def delete_by_name(self, name):\n self.books = [book for book in self.books if book[\"name\"] != name]\n","repo_name":"doLphin3/python-Batwing-advanced","sub_path":"flask_rest/db/books_db.py","file_name":"books_db.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"3914067027","text":"class NumArray(object):\n\n def __init__(self, nums):\n \"\"\"\n :type nums: List[int]\n \"\"\"\n self.numArray = []\n for num in nums:\n self.numArray.append(num)\n\n def sumRange(self, left, right):\n \"\"\"\n :type left: int\n :type right: int\n :rtype: int\n \"\"\"\n \n # parameter\n Sum = 0\n\n # start computing\n for i in range(left, right+1):\n Sum += self.numArray[i]\n \n return Sum\n\n\n# Your NumArray object will be instantiated and called as such:\n# obj = NumArray(nums)\n# param_1 = obj.sumRange(left,right)\n","repo_name":"Koyama-Tsubasa/LeetCode","sub_path":"Problems/No.303_Range_Sum_Query/range_sum_query.py","file_name":"range_sum_query.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"18725204475","text":"import json, random, sys\n\njsonparse = json.loads(sys.argv[1])\n\ntransacoes = {}\ntotalempresas = {}\n\ntotaldepositado = 0\nndepositos = 0\n\ntotalentretenimento = 0\nnentretenimento = 0\n\ntotalinvestimento = 0\nninvestimento = 0\n\ntotaldespesas = 0\nndespesas = 0\n\nfor transacao in jsonparse['transacoes']:\n transacoes[transacao['descricao']] = {\"valor\" : transacao['valor'], \"categoria\" : transacao['categoria'], \"tipo\" : transacao['tipo']}\n \n if transacao['tipo'] != 'deposito':\n if transacao['descricao'] in totalempresas:\n if transacao['valor'] < 0:\n totalempresas[transacao['descricao']][\"valor\"] += transacao['valor']*-1\n totalempresas[transacao['descricao']][\"pagamentos\"] += 1\n else:\n totalempresas[transacao['descricao']] = {\"valor\" : (transacao['valor']*-1), \"pagamentos\" : 1}\n\n if transacao['tipo'] == \"deposito\":\n ndepositos += 1\n totaldepositado += transacao['valor']\n else:\n if transacao['categoria'] == \"entretenimento\":\n nentretenimento += 1\n totalentretenimento -= transacao['valor']\n\n elif transacao['categoria'] == \"investimento\":\n ninvestimento += 1\n totalinvestimento -= transacao['valor']\n\n elif transacao['categoria'] == \"despesas\":\n ndespesas += 1\n totaldespesas -= transacao['valor']\n\ngrandtotal = totalentretenimento + totalinvestimento + totaldespesas\nntotal = nentretenimento + ninvestimento + ndespesas\n\ndepositado = ''\ndivisao = ''\ndica = ''\n\nif totaldepositado == 0:\n depositado = 'Você não depositou nada neste período!'\nelse:\n depositado = 'Você depositou '+str(ndepositos)+' vezes neste período! tendo depositado um total de '+str(totaldepositado)+' com média de R$'+str(totaldepositado/ndepositos)+' por depósito!'\n\nif grandtotal == 0:\n divisao = 'Você não gastou nada neste período!'\nelse:\n divisao = 'Você gastou um total de R$'+str(grandtotal)+' neste período! Com um total de '+str(ntotal)+' transações, '\n divisao += 'sendo:\\n'+str(nentretenimento)+' em entretenimento, compondo '+str(round(totalentretenimento/grandtotal*100))+'% do total gasto, com um total de R$'\n if nentretenimento != 0:\n divisao += str(totalentretenimento)+'\\n'\n else:\n divisao += \"0\\n\"\n divisao += str(ninvestimento)+' em investimentos, compondo '+str(round(totalinvestimento/grandtotal*100))+'% do total gasto, total de R$'\n if ninvestimento != 0:\n divisao += str(totalinvestimento)+'\\n'\n else:\n divisao += \"0\\n\"\n divisao += str(ndespesas)+' em despesas, compondo '+str(round(totaldespesas/grandtotal*100))+'% do total gasto, total de R$'\n if ndespesas != 0:\n divisao += str(totaldespesas)+'\\n\\n'\n else:\n divisao += \"0\\n\"\n divisao += 'Suas transações foram centradas nas seguintes empresas/despesas:\\n'\n for name in totalempresas:\n divisao += name+' - '+str(totalempresas[name][\"pagamentos\"])+' Pagamento(s) total de R$'+str(totalempresas[name][\"valor\"])+'\\n'\n\nflags = {}\n\nif grandtotal > totaldepositado:\n flags[\"1\"] = {\"status\" : True, \"comment\" : \"none\"}\n\nif totalentretenimento > (totalinvestimento + totaldespesas):\n flags[\"2\"] = {\"status\" : True, \"comment\" : \"none\"}\n\nfor ts in transacoes:\n if transacoes[ts][\"categoria\"] == \"entretenimento\" and transacoes[ts][\"valor\"] > 5000:\n flags[\"3\"] = {\"status\" : True, \"comment\" : transacoes[ts][\"valor\"]}\n break\n if transacoes[ts][\"categoria\"] == \"investimento\" and transacoes[ts][\"valor\"] > 50000:\n flags[\"4\"] = {\"status\" : True, \"comment\" : ts}\n break\n\nfor name in totalempresas:\n if totalempresas[name][\"valor\"] > 5000:\n flags[\"5\"] = {\"status\" : True, \"comment\" : name}\n break\n\nchoice = random.randint(1, 5)\nif choice == 1:\n if \"1\" in flags:\n dica1 = 'Uma dica para lhe guiar melhor seria: Você gastou mais do que depositou! Evite este tipo de prática ou poderá acabar endividado/com saldo negativo!'\n dica = dica1\n choose = False\nelif choice == 2:\n if \"2\" in flags:\n dica2 = 'Uma dica para lhe guiar melhor seria: Você gastou mais com entretenimento do que com investimentos e despesas! Maus hábitos levam a más situações!'\n dica = dica2\n choose = False\nelif choice == 3:\n if \"3\" in flags:\n dica3 = 'Uma dica para lhe guiar melhor seria: Você gastou demais com entretenimento! com um total de R$'+str(flags[\"3\"][\"comment\"])+' gastos! Tente gastar menos no próximo período!'\n dica = dica3\n choose = False\nelif choice == 4:\n if \"4\" in flags:\n dica4 = 'Uma dica para lhe guiar melhor seria: Você investiu demais no '+flags[\"4\"][\"comment\"]+', tente distribuir mais seus investimentos, para evitar perder muito de uma vez caso falhe!'\n dica = dica4\n choose = False\nelif choice == 5:\n if \"5\" in flags:\n dica5 = 'Uma dica para lhe guiar melhor seria: Você gastou demais com '+flags[\"5\"][\"comment\"]+'! tente diminuir seus gastos neste estabelecimento!'\n dica = dica5\n choose = False\n\nif dica == '':\n dica = 'Você parece estar indo bem! caso continuar assim não precisará de dicas!'\n\nresultado = 'Visão geral:\\n\\n'+depositado+'\\n'+divisao+'\\n\\n'+dica\n\n\nprint('Depositado: {}\\n'.format(depositado))\nprint('Visão geral: {}\\n'.format(divisao))\nprint('Dica: {}\\n'.format(dica))","repo_name":"marcelosasamoto/NSFF-Back-end","sub_path":"src/PyScripts/SistemaEspecialista.py","file_name":"SistemaEspecialista.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"32207172365","text":"from enum import IntEnum\r\nfrom typing import List\r\n\r\n\r\nclass GatesIdentfications(IntEnum):\r\n hadamard = 1\r\n cnot = 2\r\n ccnot = 3\r\n paulix = 4\r\n pauliy = 5\r\n pauliz = 5\r\n reset = 6\r\n invert_all_zero = 7\r\n invert_some_one = 8\r\n invert_all_one = 9\r\n diffusion = 10\r\n\r\n\r\nclass Gate:\r\n def __init__(self, gatter: GatesIdentfications, qubits: List[int]):\r\n self.qubits = qubits\r\n self.gatter = gatter\r\n\r\n def __repr__(self):\r\n return self.gatter.__repr__() + \" \" + self.qubits.__repr__()\r\n","repo_name":"marian-lingsch/qc-simulators","sub_path":"src/gate.py","file_name":"gate.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"69932948017","text":"# encoding: utf-8\n\"\"\"\n--------------------------------------\n@describe 数据整理\n@version: 1.0\n@project: yuqing_system\n@file: 词频统计_LDA主题模型.py\n@author: yuanlang \n@time: 2019-08-07 10:00\n---------------------------------------\n\"\"\"\nimport os\nimport jieba\nimport pymysql\nimport pandas as pd\nimport gensim\nimport numpy\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud#词云包\nfrom gensim import corpora, models, similarities\n# 编码问题\nplt.rcParams['figure.figsize'] = (5.0, 5.0)\nplt.rcParams['font.sans-serif'] = ['simhei']\nplt.rcParams['axes.unicode_minus'] = False\n\nprint(os.path.dirname(__file__))\n# 导入停用词\nstopwords=pd.read_csv(f\"{os.path.dirname(__file__)}/stopwords.txt\",index_col=False,quoting=3,sep=\"\\t\",names=['stopword'], encoding='utf-8')\nstopwords=stopwords['stopword'].values\n\n# 读取新闻内容\ndf = pd.read_csv(f\"{os.path.dirname(__file__)}/地陷事件.csv\", encoding='utf-8',sep = '&@@&')\n# df = pd.read_csv(f\"{os.path.dirname(__file__)}/出租车罢工.csv\", encoding='utf-8',sep = '&@@&')\n# df = pd.read_csv(f\"{os.path.dirname(__file__)}/好一新大火.csv\", encoding='utf-8',sep = '&@@&')\n\nx=0\nlines=[((++x),item) for item in df.content.values.tolist()]\n\n# 原始数据\n# conn = pymysql.connect(host=\"127.0.0.1\", port=3306, user=\"root\", passwd=\"lang1994\", db=\"yuqing_db\", charset=\"utf8\")\n# cursor = conn.cursor()\n# cursor.execute(\"select * from context\")\n# lines=cursor.fetchall()\n\ndef db_to_csv(lines):\n \"\"\"保存到本地\"\"\"\n with open(\"好一新大火.csv\",\"w\",encoding=\"utf-8\") as f:\n f.writelines(\"url&@@&content\\n\")\n for line in lines:\n text = line[1].replace(\"\\n\", \"\").replace(\" \", \"\").replace(\"\\t\", \"\")\n print(text)\n f.writelines(\"\\\"\"+line[0]+\"\\\"\"+\"&@@&\"+\"\\\"\"+text+\"\\\"\\n\")\n\n# db_to_csv(lines)\n\ndef word_count(lines,stopwords):\n # 词频统计\n segment = []\n for line in lines:\n try:\n text = line[1].replace(\"\\n\", \"\").replace(\" \", \"\").replace(\"\\t\", \"\")\n segs = jieba.__lcut(text)\n for seg in segs:\n if len(seg) > 1 and seg != '\\r\\n' and seg not in stopwords:\n segment.append(seg)\n # print(segment)\n except Exception as e:\n print(e)\n\n words_df = pd.DataFrame({'segment': segment})\n words_stat = words_df.groupby(by=['segment'])['segment'].agg([\"size\"])\n words_stat = words_stat[1300:]\n words_stat = words_stat.reset_index().sort_values(by=[\"size\"], ascending=False)\n print(words_stat[:1500])\n wordcloud = WordCloud(font_path=\"simhei.ttf\", background_color=\"white\", max_font_size=80)\n word_frequence = {x[0]: x[1] for x in words_stat.head(1500).values}\n wordcloud = wordcloud.fit_words(word_frequence)\n plt.imshow(wordcloud)\n plt.show()\n\n# word_count(lines,stopwords)\n\ndef lda(lines,stopwords):\n \"\"\"lda主题\"\"\"\n sentences = []\n for line in lines:\n try:\n text = line[1].replace(\"\\n\", \"\").replace(\" \", \"\").replace(\"\\t\", \"\")\n segs = jieba.__lcut(text)\n segs = filter(lambda x: len(x) > 1, segs)\n segs = [seg for seg in list(segs) if seg not in stopwords]\n sentences.append(segs)\n except Exception as e:\n print(e)\n\n # 词袋模型\n dictionary = corpora.Dictionary(sentences)\n corpus = [dictionary.doc2bow(_sentence) for _sentence in sentences]\n lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=20)\n\n # 主题模型打印\n print(lda.print_topics())\n wors={}\n for topic in lda.print_topics():\n words=topic[1].split(\"+\")\n for word in words:\n ss=[ii.replace(\" \",\"\").replace(\"\\\"\",\"\") for ii in word.split(\"*\")]\n print(wors.get(ss[1],0),ss[0],wors.get(ss[1],0)+float(ss[0]))\n wors[ss[1]]=wors.get(ss[1],0)+float(ss[0])\n # print(ss)\n wors={x:float('%.3f'%y) for x,y in wors.items()}\n\n # 合并词\n data_dic = {'count': wors}\n data_df = pd.DataFrame(data_dic)\n data_df = data_df.reset_index().sort_values(by=[\"count\"], ascending=False)\n print(data_df[:10][\"index\"])\n print(data_df[:10].index)\n print(data_df[:10][\"count\"])\n\n number = numpy.array(data_df[:10][\"count\"].values*1000)\n work_type = data_df[:10][\"index\"].values\n\n\n labels = tuple(work_type)\n fracs = number\n\n print(labels)\n plt.pie(x=fracs, labels=labels, autopct='%.0f%%') # autopct显示百分比\n plt.show()\n\n\nlda(lines,stopwords)","repo_name":"langgithub/yuqing_system","sub_path":"clean/词频统计_LDA主题模型.py","file_name":"词频统计_LDA主题模型.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"57"} +{"seq_id":"70593419380","text":"# https://docs.google.com/spreadsheets/d/17ZXagPqNiEMrDuTvUTRTe0eJrG_8x_dzuRZXousIRGQ/edit?usp=sharing\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport time\nimport pandas as pd\n\nmaster_list = []\nsearch_term = \"python web developer\"\nlocation = \"California\"\ncount = 0\nbase_url = \"https://www.indeed.com/viewjob?\"\n\n\nopts = Options()\nopts.add_argument(\n \"user-agent=[Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36]\")\n# opts.add_argument(\"--headless\")\nopts.add_argument(\n \"executable_path=[C:\\\\Users\\\\ayanU\\\\OneDrive\\\\Desktop\\\\Labs\\\\python\\\\bs4\\\\driver\\\\chromedriver.exe]\")\n\n\ndef make_page_request(url, opts):\n driver = webdriver.Chrome(options=opts)\n driver.get(url)\n pageSource = driver.page_source\n time.sleep(5)\n driver.quit()\n return pageSource\n\n\ndef parse(html, bs):\n soup = BeautifulSoup(html, 'html.parser')\n page_title = soup.title.text\n result_contents = soup.find_all('td', class_=\"resultContent\")\n\n for rc in result_contents:\n title = rc.find('h2', {'class': 'jobTitle'}).text\n company_location = rc.find('div', {'class': 'company_location'}).text\n company_name = rc.find('span', {'class': 'companyName'}).text\n try:\n salary = rc.find('div', {'class': 'salaryOnly'}).text\n except Exception as e:\n salary = \"NA\"\n url_link = base_url + \\\n rc.find('a', {'class': 'jcs-JobTitle'}).get('href').split('?')[-1]\n\n master_list.append({'title': title, 'company_name': company_name,\n 'company_location': company_location, 'salary': salary, 'link': url_link})\n\n\nfor i in range(1, 6):\n url = f\"https://www.indeed.com/jobs?q={search_term}&l={location}&start={count}\"\n source = make_page_request(url, opts)\n parse(source, BeautifulSoup)\n count += 10\n print(f'Scraped page {i}')\n\ndf = pd.DataFrame(master_list)\ndf.to_csv(f'{search_term}.csv', index=False)\n\nprint(f\"Total Scraped {len(master_list)} items\")\n","repo_name":"AyanUpadhaya/indeed-scraper","sub_path":"iscraper.py","file_name":"iscraper.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37937929604","text":"import pygame\n\nfrom planetjump.utils import load_image, terminate\nfrom planetjump.start_screen import StartScreen\n\n\nclass PauseScreen:\n def __init__(self, game):\n self.game = game\n self.surface = game.surface\n self.background = load_image(\"start_background.png\")\n self.pause = load_image(\"pause.png\")\n\n restart = load_image(\"restart.png\")\n self.restart_images = [(restart, (118, 300)),\n (pygame.transform.scale(restart, (170, 80)), (115, 298))]\n self.restart = self.restart_images[0]\n\n menu = load_image(\"menu.png\")\n self.menu_images = [(menu, (118, 400)),\n (pygame.transform.scale(menu, (170, 80)), (115, 398))]\n self.menu = self.menu_images[0]\n\n continue_button = load_image(\"continue.png\")\n self.continue_images = [(continue_button, (118, 200)),\n (pygame.transform.scale(continue_button, (170, 80)), (115, 198))]\n self.continue_button = self.continue_images[0]\n\n self.run()\n\n def run(self):\n while self.game.pause:\n self.events()\n self.update()\n\n def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n\n def update(self):\n self.mouse_handler()\n self.surface.blit(self.background, (0, 0))\n self.surface.blit(self.pause, (0, 50))\n self.surface.blit(self.restart[0], self.restart[1])\n self.surface.blit(self.menu[0], self.menu[1])\n self.surface.blit(self.continue_button[0], self.continue_button[1])\n pygame.display.flip()\n\n def mouse_handler(self):\n pos = pygame.mouse.get_pos()\n mouse_pressed = pygame.mouse.get_pressed()[0]\n\n if pygame.Rect(*self.continue_button[1],\n *self.continue_button[0].get_rect().size).collidepoint(*pos):\n self.continue_button = self.continue_images[1]\n if mouse_pressed:\n self.game.pause = False\n else:\n self.continue_button = self.continue_images[0]\n\n if pygame.Rect(*self.restart[1], *self.restart[0].get_rect().size).collidepoint(*pos):\n self.restart = self.restart_images[1]\n if mouse_pressed:\n self.game.__init__(self.surface)\n else:\n self.restart = self.restart_images[0]\n\n if pygame.Rect(*self.menu[1], *self.menu[0].get_rect().size).collidepoint(*pos):\n self.menu = self.menu_images[1]\n if mouse_pressed:\n start = StartScreen(self.surface)\n start.run()\n self.game.__init__(self.surface)\n else:\n self.menu = self.menu_images[0]\n","repo_name":"Protocs/pygame-for-kids","sub_path":"planetjump/pause_screen.py","file_name":"pause_screen.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19540876895","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 12 20:40:16 2018\n\n@author: X201\n\"\"\"\n\nlist=[30,98,12,191,66,47,82,54]\nfor i in range (0,len(list)-1):\n min=i\n for l in range(i+1,len(list)):\n if list[l]{0}=\"\"...>
'\n u'{0} edge of the clipping rectangle. (float (µm), {1}INF by default)'.format(b, sign))\n sign = '+' if sign == '-' else '-'\n super().construct_form()\n\n def fill_form(self):\n super().fill_form()\n for b in self.node.bound_names():\n getattr(self, b).setText(none_to_empty(getattr(self.node, b)))\n\n\nclass GNFlipMirrorController(GNObjectController):\n\n def _save_axis_undoable(self):\n self._set_node_by_setter_undoable(lambda n, v: n.set_axis(v),\n empty_to_none(\n self.axis.currentText()), self.node.axis, 'change axis property')\n #self.node.set_axis(empty_to_none(self.axis.currentText()))\n\n def fill_form(self):\n super().fill_form()\n with BlockQtSignals(self.axis):\n self.axis.setEditText(none_to_empty(self.node.axis_str()))\n\n\nclass GNFlipController(GNFlipMirrorController):\n\n def construct_form(self):\n self.construct_group('Flip Settings')\n self.axis = self.construct_combo_box('Flipped axis', items=self.node.get_axes_conf_dim(), change_cb=self._save_axis_undoable)\n self.axis.setToolTip('<flip axis=\"\" ...>
'\n 'Name of the inverted axis (i.e. perpendicular to the reflection plane). (required)')\n super().construct_form()\n\n\nclass GNMirrorController(GNFlipMirrorController):\n\n def construct_form(self):\n self.construct_group('Mirror Settings')\n self.axis = self.construct_combo_box('Mirrored axis', items=self.node.get_axes_conf_dim(), change_cb=self._save_axis_undoable)\n self.axis.setToolTip('<mirror axis=\"\" ...>
'\n 'Name of the mirrored axis (i.e. perpendicular to the reflection plane). (required)')\n super().construct_form()\n\n\nclass GNExtrusionController(GNObjectController):\n\n def construct_form(self):\n self.construct_group('Extrusion Settings')\n self.length = self.construct_line_edit('Length:', unit=u'µm', node_property_name='length', display_property_name='extrusion length')\n self.length.setToolTip(u'<extrusion length=\"\" ...>
'\n u'Length of the extrusion. (float (µm), required)')\n super().construct_form()\n\n def fill_form(self):\n super().fill_form()\n self.length.setText(none_to_empty(self.node.length))\n\n\nclass GNRevolutionController(GNObjectController):\n\n def construct_form(self):\n self.construct_group('Revolution Settings')\n self.auto_clip = self.construct_combo_box('Auto clip:', items=['', 'yes', 'no'],\n node_property_name='auto_clip', display_property_name='auto clip')\n self.auto_clip.setToolTip(u'<revolution auto-clip=\"\" ...>
'\n u'The value of this attribute can be either \\'yes\\' of \\'no\\'.'\n u' It specifies whether the item will be implicitly clipped to non-negative '\n u'transverse coordinates. Defaults to \\'no\\'.')\n super().construct_form()\n self.rev_step_num = self.construct_line_edit('Maximum steps number:', node_property_name='rev_step_num',\n display_property_name='maximum steps number')\n self.rev_step_num.setToolTip(u'<revolution rev-steps-num=\"\" rev-steps-dist=\"\" ...>
'\n u'Maximum number of the mesh steps in horizontal directions the revolution is '\n u'divided into. (integer)')\n self.rev_step_num.setPlaceholderText('10')\n self.rev_step_dist = self.construct_line_edit('Minimum step size:', node_property_name='rev_step_dist',\n display_property_name='minimum step size', unit=u'µm')\n self.rev_step_dist.setToolTip(u'<revolution rev-steps-num=\"\" rev-steps-dist=\"\" ...>
'\n u'Minimum step size in horizontal directions.')\n self.rev_step_dist.setPlaceholderText('0.005')\n\n def fill_form(self):\n super().fill_form()\n with BlockQtSignals(self.auto_clip, self.rev_step_num, self.rev_step_dist):\n self.auto_clip.setEditText(none_to_empty(self.node.auto_clip))\n self.rev_step_num.setText(none_to_empty(self.node.rev_step_num))\n self.rev_step_dist.setText(none_to_empty(self.node.rev_step_dist))\n\n\nclass GNTranslationController(GNObjectController):\n\n def construct_form(self):\n self.construct_group('Translation Settings')\n def setter(n, v): n.vector = v\n weakself = weakref.proxy(self)\n self.vector = self.construct_point_controllers(row_name='Vector', change_cb=lambda point:\n weakself._set_node_by_setter_undoable(setter, list(point),\n weakself.node.vector, 'change translation vector'))\n super().construct_form()\n\n def fill_form(self):\n super().fill_form()\n for i in range(0, self.node.dim):\n self.vector[i].setText(none_to_empty(self.node.vector[i]))\n\n\nclass GNArrangeController(GNObjectController):\n\n def construct_form(self):\n self.construct_group('Arrange Settings')\n def setter(n, v): n.step = v\n weakself = weakref.proxy(self)\n self.step = self.construct_point_controllers(row_name='Step', change_cb=lambda point:\n weakself._set_node_by_setter_undoable(setter, list(point), weakself.node.step, 'change step in arrange'))\n self.count = self.construct_line_edit('Count:', node_property_name='count')\n self.count.setToolTip(u'<arrange count=\"\" ...>
'\n u'Number of item repetitions.')\n super().construct_form()\n\n def fill_form(self):\n super().fill_form()\n for i in range(0, self.node.dim):\n self.step[i].setText(none_to_empty(self.node.step[i]))\n self.count.setText(none_to_empty(self.node.count))\n","repo_name":"PhotonicsTUL/PLaSK","sub_path":"gui/controller/geometry/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":6705,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"15647892821","text":"import pygame \nimport random \nimport time \npygame.init()\n\nWIDTH, HEIGHT = 650,650\n\nWIN = pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption('Confetti !!!')\n\nFPS = 60 \n\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nRED = (255,0,0)\nGREEN = (0,255,0)\nBLUE = (0,0,255)\nRANDOM1 = (88,0,44)\nRANDOM2 = (99,44,76)\nRANDOM3 = (56,27,84)\nRANDOM4 = (0,58,99)\nRANDOM5 = (254,96,177)\nRANDOM6 = (200,6,99)\nRANDOM7 = (50,74,9)\nRANDOM8 = (80,60,89)\nRANDOM9 = (0,50,60)\nRANDOM10 = (50,0,60)\nCOLORS = [RED, GREEN, BLUE, WHITE,RANDOM1,RANDOM2,RANDOM3,RANDOM4,RANDOM5,RANDOM6,RANDOM7,RANDOM8,RANDOM9,RANDOM10 ]\n\n\n\n\n\nclass Square: \n def __init__ (self, x, y, width, height, y_vel, color, x_vel ): \n self.x = self.original_x = x\n self.y = y \n self.width = width \n self.height = height \n self.y_vel = y_vel\n self.color = color\n self.x_vel = x_vel \n\n def draw(self, win): \n pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.height))\n\n\n def move(self):\n self.y = self.y + self.y_vel \n self.x += self.x_vel \n\n\nclass Circle: \n def __init__ (self, x, y, radius, y_vel, color, x_vel): \n self.x = self.original_x = x \n self.y = y \n self.radius = radius \n self.color = color\n self.y_vel = y_vel \n self.x_vel = x_vel \n\n def draw(self, win): \n pygame.draw.circle(win, self.color, (self.x,self.y), self.radius)\n\n def move(self): \n self.y += self.y_vel \n self.x += self.x_vel \n\n\n\nclass Triangle: \n def __init__ (self, x,y, y_vel, color, x_vel): \n self.x = self.original_x = x \n self.y = y \n self.color = color \n self.y_vel = y_vel \n self.x_vel = x_vel \n \n def draw(self, win):\n pygame.draw.polygon(win, self.color, ((self.x,self.y), (self.x-8,self.y+8), (self.x+8, self.y+8))) \n\n def move(self): \n self.y += self.y_vel \n self.x += self.x_vel \n\n\n\ndef draw(win, squares, circles, triangles):\n win.fill(BLACK)\n \n \n # putting in my square \n for square in squares:\n if square.y < HEIGHT: \n square.draw(win)\n else: \n square.y = 0\n\n \n for circle in circles: \n if circle.y < HEIGHT: \n circle.draw(win)\n else:\n circle.y = 0 \n\n for triangle in triangles: \n if triangle.y < HEIGHT: \n triangle.draw(win)\n else: \n triangle.y = 0 \n\n\n\n pygame.display.update()\n\n\n\ndef main(): \n run = True \n clock = pygame.time.Clock()\n squares = []\n for idx in range(15): \n squares.append(Square(random.randint(0,650) ,0,10,10,random.randint(2,10), random.choice(COLORS), random.randint(-2,2)))\n\n circles = []\n for idx in range(15): \n circles.append(Circle(random.randint(0,650),0, 5, random.randint(2,10), random.choice(COLORS), random.randint(-2,2)))\n\n\n triangles = []\n for idx in range(15): \n triangles.append(Triangle(random.randint(0,650), 0, random.randint(2,10), random.choice(COLORS),random.randint(-2,2)))\n\n\n while run: \n clock.tick(FPS)\n now = time.time()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n run = False \n \n \n for square in squares: \n square.move()\n\n for circle in circles: \n circle.move()\n\n for triangle in triangles: \n triangle.move()\n\n \n draw(WIN, squares, circles, triangles )\n \n quit()\n\n\nif __name__ == '__main__': \n main()","repo_name":"PurchaseColin/Python-Apps-","sub_path":"app-10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31196512401","text":"lst = [4, 5, 1, 1, 2, 7, 8, 9, 2, 1, 3, 2, 3, 2, 6, 8, 7, 9, 10]\r\n\r\ndict1 ={value:lst.count(value) for value in lst}\r\nres = {}\r\nfor i, j in dict1.items():\r\n if i not in res:\r\n res.setdefault(i, j)\r\nprint(res)\r\n\r\nprint(\"-\" * 40)\r\n\r\nsample_list = [4, 5, 1, 1, 2, 7, 8, 9, 2, 1, 3, 2, 3, 2, 6, 8, 7, 9, 10]\r\nN = int(input(\"Enter number: \"))\r\ncount = sample_list.count(N)\r\nprint(count)\r\n","repo_name":"Vaishuingle/Code-test","sub_path":"Day 03- Assignments/Day 03/day03-13.py","file_name":"day03-13.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"23648372528","text":"import pymongo\r\nimport json\r\nimport os\r\n\r\nclient = pymongo.MongoClient('localhost', 27017)\r\ndb = client.log_db\r\nweek1 = db.log_col1\r\nweek2 = db.log_col2\r\nweek3 = db.log_col3\r\nweek4 = db.log_col4\r\n\r\n\r\nfor file in os.listdir('./'):\r\n if file.endswith(\".json\"):\r\n name = str(file)\r\n date = int(name[8:10])\r\n if 0Page not found')\n\ndef common_search(request):\n if request.method == 'POST':\n sequence = request.POST.get('sequence')\n technolgies = request.POST.getlist('checks')\n \n 'Check if only 2 technologies have been selected'\n if len(technolgies) < 2 or len(technolgies) > 2:\n return HttpResponseNotFound('Go back and select only two technologies please')\n if len(sequence) == 0:\n return HttpResponseNotFound('Go back and please input a sequence')\n \n tech1 = get_object_or_404(Technology,pk=technolgies[0])\n tech2 = get_object_or_404(Technology, pk=technolgies[1])\n \n 'Check if user upload snp-file'\n form = DocumentForm(request.POST, request.FILES) \n if form.is_valid():\n tech1.snp_file = snp_file = request.FILES['docfile']\n tech2.snp_file = snp_file = request.FILES['docfile']\n tech1.save()\n tech2.save()\n 'If no upload nothing happens'\n \n save_folder1 = gc.draw_common(tech1, tech2, sequence)\n save_folder2 = gd.draw_differ(tech1,tech2,sequence)\n tech1.commonGraph = save_folder1\n tech1.differGraph = save_folder2\n \n tech2.commonGraph = save_folder1\n tech2.differGraph = save_folder2\n \n tech1.compared_with = tech2.technology\n tech2.compared_with = tech1.technology\n \n tech1.save() \n tech2.save()\n \n return render(request, 'pages/common_motif.html', {'technology': tech1})\n \n return HttpResponseNotFound('

Page not found

') \n \n#===============================================================================\n# Helper methods retrieve files and create immage\n#===============================================================================\n","repo_name":"CostaLab/practical_SS2015","sub_path":"Webinterface/database/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"2818750048","text":"import json\r\nimport paho.mqtt.client as mqtt\r\nimport sqlite3\r\nimport os\r\nimport datetime\r\nimport configparser\r\n\r\nDB_PATH = \"./rasp.sqlite3\"\r\n\r\n\r\ndef on_connect(client, userdata, flag, rc):\r\n\tprint('mqtt status {0}'.format(rc))\r\n\tclient.subscribe(\"labmen/pi/info\")\r\n\r\n\r\ndef on_message(client, userdata, msg):\r\n\tprint(\"Received message! -> {0}\".format(datetime.datetime.now()))\r\n\ttry:\r\n\t\tdata = json.loads(msg.payload.decode(\"utf-8\"))\r\n\t\tdata_time = \"{0}-{1}-{2} {3}:{4}:{5}\".format(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tdata[\"time\"][\"year\"], \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tdata[\"time\"][\"month\"],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tdata[\"time\"][\"day\"], \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tdata[\"time\"][\"hour\"], \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tdata[\"time\"][\"minute\"], \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tdata[\"time\"][\"second\"]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\r\n\t\tif not os.path.isfile(DB_PATH):\r\n\t\t\tprint(\"Error DB not found!\")\r\n\t\t\texit(1)\r\n\t\tq = \"insert into rasp values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);\"\r\n\t\td = (\r\n\t\t\tdata_time, \r\n\t\t\tdata[\"cpu\"][\"temp (\\'C)\"], \r\n\t\t\tdata[\"cpu\"][\"clock (Hz)\"],\r\n\t\t\tdata[\"cpu\"][\"use rate (%)\"], \r\n\t\t\tdata[\"memory\"][\"total (MB)\"], \r\n\t\t\tdata[\"memory\"][\"used (MB)\"], \r\n\t\t\tdata[\"memory\"][\"free (MB)\"], \r\n\t\t\tdata[\"system\"][\"power status\"][\"raw_value\"], \r\n\t\t\tdata[\"system\"][\"power status\"][\"status\"],\r\n\t\t\tdata[\"system\"][\"process\"][\"process_num\"]\r\n\t\t\t)\r\n\t\tconn = sqlite3.connect(DB_PATH)\r\n\t\tc = conn.cursor()\r\n\t\tc.execute(q, d)\r\n\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\t\tprint(\"Insert complete! -> {0}\".format(datetime.datetime.now()))\r\n\texcept:\r\n\t\timport traceback\r\n\t\ttraceback.print_exc()\r\n\r\n\r\ndef on_disconnect(client, userdata, flag, rc):\r\n\tif rc != 0:\r\n\t\tprint(\"Unexpected disconnection.\")\r\n\r\n\r\ndef main():\r\n\tsetting_fpath = \"./setting.ini\"\r\n\tconfig = configparser.ConfigParser()\r\n\tif not os.path.isfile(setting_fpath):\r\n\t\tprint(\"please type broker ip address -> \", end=\"\")\r\n\t\tip = input()\r\n\t\tprint(\"please type broker port -> \", end=\"\")\r\n\t\tport = input()\r\n\t\tconfig[\"Broker\"] = {\r\n\t\t\t\"ip\": ip,\r\n\t\t\t\"port\": port\r\n\t\t}\r\n\t\twith open(setting_fpath, 'w') as configfile:\r\n\t\t\tconfig.write(configfile)\r\n\t\tprint(\"Finish to save setting file\")\r\n\t\tprint(\"exit...\")\r\n\t\texit()\r\n\tif not os.path.isfile(DB_PATH):\r\n\t\tq = 'create table rasp(\\\r\n\t\t\ttime text, \\\r\n\t\t\tcpu_temp float, \\\r\n\t\t\tcpu_freq integer, \\\r\n\t\t\tcpu_usage float, \\\r\n\t\t\tmem_total integer, \\\r\n\t\t\tmem_used integer, \\\r\n\t\t\tmem_free integer, \\\r\n\t\t\tthrottled_raw text, \\\r\n\t\t\tthrottled_stat text, \\\r\n\t\t\tprocess_num integer \\\r\n\t\t\t);'\r\n\r\n\t\tconn = sqlite3.connect(DB_PATH)\r\n\t\tc = conn.cursor()\r\n\t\tc.execute(q)\r\n\t\tconn.commit()\r\n\t\tconn.close()\r\n\r\n\tconfig.read(setting_fpath)\r\n\tip = config[\"Broker\"][\"ip\"]\r\n\tport = int(config[\"Broker\"][\"port\"])\r\n\tprint(\"Broker: {0}:{1}\".format(ip, port))\r\n\r\n\tclient = mqtt.Client()\r\n\tclient.on_connect = on_connect\r\n\tclient.on_disconnect = on_disconnect\r\n\tclient.on_message = on_message\r\n\r\n\tclient.connect(ip, port, keepalive=60)\r\n\r\n\ttry:\r\n\t\tclient.loop_forever()\r\n\texcept KeyboardInterrupt:\r\n\t\tpass\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","repo_name":"takkaO/ESP32-Pi_SIM_Project","sub_path":"db/mqtt_sub-raspi_info/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28179295254","text":"from arm import Arm\nfrom endeffector import EndEffector\nfrom armvisualizer import ArmVisualizer\nfrom wordfinder import wordFinder\nfrom linalg_utils import LinalgUtils as LU\n\nimport numpy as np\nimport modern_robotics as mr\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d.axes3d as p3\n\ndef main():\n const_r = 0\n const_r_mat = np.array([[1, 0, 0, 0],\n [0, np.cos(const_r), -np.sin(const_r), 0],\n [0, np.sin(const_r), np.cos(const_r), 0],\n [0, 0, 0, 1]])\n arm = Arm(\"6dof.urdf\", const_r_mat)\n armvis = ArmVisualizer(arm)\n # armvis.startMeshcat()\n \n seconds = 2\n # joint_traj = [[t, t, t, t, t, t, t, d, d] for t, d in zip(np.linspace(0, 2*np.pi, seconds*60), np.linspace(0, 0.05, seconds*60))]\n # armvis.animateArm(joint_traj)\n \n start_thetas = [0, 0, 0, 0, 0, 0, 0, 0]\n \n goal_config = np.array([.2, .2, .2, 1, 0, 0, 0]) # x y z qw qx qy qz\n joint_traj = arm.IK(goal_config, start_thetas=start_thetas)\n \n start_p = arm.getArmTipTransform(start_thetas)[:3,-1]\n endpositions = [start_p]\n for thetas in joint_traj:\n pos = arm.getArmTipTransform(thetas)[:3,-1]\n endpositions.append(pos.copy())\n endpositions = np.array(endpositions)\n \n # print(start_thetas)\n \n # end_thetas = [1, 1, 1, 1, 1, 1, .020, .020]\n \n # goal_config = np.array([.3, -.2, .8, -0.11698, 0.07755, 0.82524, 0.54706]) # x y z qw qx qy qz\n # goal_thetas = arm.IK(goal_config)\n \n # # TODO: end_thetas = arm.getJointAngles(endeffector_pose_end)\n # joint_traj = mr.JointTrajectory(start_thetas, end_thetas, seconds, seconds*60, method=3)\n # armvis.animateArm(joint_traj)\n \n \n \n \n # player_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'H', 'I', 'J', 'J', 'K', 'L', 'M', 'N', 'O', 'A', 'C', 'E', 'K']\n # finder = wordFinder(player_letters)\n\n # finder.findBestMove()\n # finder.findBestMove()\n \n \n # Plot solution\n fig = plt.figure(figsize=(5, 6))\n ax = fig.add_subplot(projection='3d')\n ax.scatter(goal_config[0], goal_config[1], goal_config[2], color='blue')\n ax.scatter(start_p[0], start_p[1], start_p[2], color='orange')\n ax.plot3D(endpositions[:,0], endpositions[:,1], endpositions[:,2], 'chartreuse')\n\n\n # Compute goal coordinate frame axes\n axis_length = .01\n g_goal = LU.xyzQuatToTransform(goal_config)\n goal_p = g_goal[:3,-1]\n goal_axes = np.array([(g_goal @ np.append(np.eye(3)[:,i]*axis_length, 1))[:-1] for i in range(3)])\n\n # Draw coordinate frames\n ax.plot3D((goal_p[0], goal_axes[0,0]), (goal_p[1], goal_axes[0,1]), (goal_p[2], goal_axes[0,2]), color='red')\n ax.plot3D((goal_p[0], goal_axes[1,0]), (goal_p[1], goal_axes[1,1]), (goal_p[2], goal_axes[1,2]), color='green')\n ax.plot3D((goal_p[0], goal_axes[2,0]), (goal_p[1], goal_axes[2,1]), (goal_p[2], goal_axes[2,2]), color='blue')\n\n # Compute start coordinate frame axes\n g_start = arm.getArmTipTransform(joint_traj[0])\n start_s = g_start[:3,-1]\n start_axes = np.array([(g_start @ np.append(np.eye(3)[:,i]*axis_length, 1))[:-1] for i in range(3)])\n\n # Draw coordinate frames\n ax.plot3D((start_p[0], start_axes[0,0]), (start_p[1], start_axes[0,1]), (start_p[2], start_axes[0,2]), color='red')\n ax.plot3D((start_p[0], start_axes[1,0]), (start_p[1], start_axes[1,1]), (start_p[2], start_axes[1,2]), color='green')\n ax.plot3D((start_p[0], start_axes[2,0]), (start_p[1], start_axes[2,1]), (start_p[2], start_axes[2,2]), color='blue')\n\n # set axis limits\n # maxZ = 2.25\n # minZ = maxZ - 0.02\n # maxX = 0.55\n # minX = maxX - 0.12\n # maxY = 1.12\n # minY = maxY - 0.12\n\n # ax.scatter([0, 0], [0, 0], [maxZ, minZ], s=0)\n # ax.axis([minX, maxX, minY, maxY])\n\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n\n ax.legend([\"start pose\", \"goal pose 2\"])\n plt.show()\n\n\nif __name__ == '__main__':\n main()","repo_name":"sschoedel/KDCproject","sub_path":"main_iterative_jacobian_ik.py","file_name":"main_iterative_jacobian_ik.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22186688097","text":"import datetime\nimport pandas as pd\nimport numpy as np\nimport os\n\nimport rent_per_owner as rent\n\n\n# ----------------------------------------------------------------------------\n# functions\n\n\ndef load_csv_as_df(csv_file):\n \"\"\"\n returns dataframe for csv_file\n \"\"\"\n\n skipped_rows = list(range(9))\n return pd.read_csv(\n csv_file,\n delimiter=';', \n skiprows=skipped_rows, \n parse_dates=True, \n header=0, \n names=header).drop(labels='Keine Ahnung', axis=1\n )\n\n\ndef load_data():\n \"\"\"\n merges all dataframes from avaliable csv files\n \"\"\"\n df = pd.DataFrame(columns=header).drop(labels='Keine Ahnung', axis=1)\n\n csv_files = []\n substrings = [\"Umsatzauskunft_KtoNr0910387433\", \".csv\"]\n counter = 0\n for file in os.listdir():\n if substrings[0] in file and substrings[1] in file:\n df_aux = load_csv_as_df(file)\n if df_aux is not None:\n csv_files.append(file)\n df = pd.concat([df, df_aux])\n counter += 1\n\n print(f'Include {counter} files into data:')\n for csv in csv_files:\n print(f' Included {csv}')\n print()\n df.drop_duplicates(subset=header.remove(\"Keine Ahnung\"), inplace=True)\n\n return df\n\n\ndef filter_rent_time_for_month(df, month, year):\n \"\"\"\n returns lines, where rent for specific month and year is payed typically\n \"\"\"\n\n days_before = 7\n days_after = 20\n\n date_string = f'1.{month}.{year}'\n\n date_beg = pd.to_datetime(date_string, format='%d.%m.%Y')\n date_beg -= datetime.timedelta(days=days_before)\n date_end = pd.to_datetime(date_string, format='%d.%m.%Y')\n date_end += datetime.timedelta(days=days_after)\n\n return (df.date_booking >= date_beg) & (df.date_booking <= date_end)\n\n\ndef filter_quarter_of_year(df, quarter, year):\n \"\"\"\n returns lines of specific quarter of a year\n \"\"\"\n\n month_beg = (quarter-1) * 3 + 1\n\n date_string_beg = f'1.{month_beg}.{year}'\n date_string_end = f'1.{(month_beg + 3)%12}.{year + (0 if quarter != 4 else 1)}'\n\n date_beg = pd.to_datetime(date_string_beg, format='%d.%m.%Y')\n date_end = pd.to_datetime(date_string_end, format='%d.%m.%Y')\n date_end -= datetime.timedelta(days=1)\n\n return (df.date_booking >= date_beg) & (df.date_booking <= date_end)\n\n\ndef rent_payment_by_inhabitant(inhabitants, month, year):\n \"\"\"\n returns dict: rent payed by each inhabitants in specific month and year\n \"\"\"\n \n global volume\n\n # mask for lines representing rent payments\n #rent = volume.detail.str.lower().str.find('miet') >= 0\n right_amount = (volume.amount > 300) & (volume.amount < 900)\n\n rent_payment = {}\n for inhabitant in inhabitants:\n is_inhabitant = volume.orderer.str.lower().str.find(inhabitant.lower()) >=0\n rent_payment[inhabitant] = volume[\n is_inhabitant &\n filter_rent_time_for_month(volume, month, year) & \n right_amount\n ].amount.sum()\n return rent_payment\n \n\ndef persistent_outgoing_per_quarter(receiver, col_dict, quarter, year):\n \"\"\"\n returns sum of persistent outgoings of one type (search string) for a quarter and year\n \"\"\"\n\n global volume\n\n filter_quarter = filter_quarter_of_year(volume, quarter, year)\n if receiver == \"Prager\":\n filter_quarter = filter_rent_time_for_month(volume, (quarter-1)*3+1 , year) | \\\n filter_rent_time_for_month(volume, (quarter-1)*3+2 , year) | \\\n filter_rent_time_for_month(volume, (quarter-1)*3+3 , year)\n\n for col in col_dict:\n filter_quarter = filter_quarter & (volume[col].str.lower().str.find(col_dict[col].lower()) >= 0)\n return volume[filter_quarter].amount.sum()\n\n\ndef print_rent_payments(dict_payment_by_inhabitant):\n print(f'Mieten für {month:02}.{year}')\n sum = 0\n for inh in dict_payment_by_inhabitant:\n if dict_rpbi[inh] != 0: print(f' {inh}: {dict_rpbi[inh]:#.2f} EUR')\n sum += dict_rpbi[inh]\n print(f' Summe: {sum:#.2f} EUR')\n print()\n\n\ndef print_persistent_outgoing_of_quarter(filter_dict, quarter, year):\n print(f'Ständige Ausgänge in Quartal {quarter} von {year}:')\n sum = 0\n for receiver in filter_dict:\n amount = persistent_outgoing_per_quarter(receiver, filter_dict[receiver], quarter, year)\n sum += amount\n print(f' {receiver}: {amount:.2f} EUR')\n print(f' Summe: {sum:.2f} EUR')\n print()\n\n\ndef clean_data():\n volume.amount = volume.amount.str.extract(r'([-,0-9]*)')\n volume.amount = volume.amount.str.replace('.', '', regex=False).str.replace(',', '.', regex=False).astype(float)\n\n volume.balance = volume.balance.str.extract(r'([-,.0-9]*)')\n volume.balance = volume.balance.str.replace('.', '', regex=False).str.replace(',', '.', regex=False).astype(float)\n\n volume.date_booking = pd.to_datetime(volume.date_booking, format='%d.%m.%Y')\n volume.date_value = pd.to_datetime(volume.date_value, format='%d.%m.%Y')\n\n\n# ----------------------------------------------------------------------------\n# global variables\n\nheader = [\n \"date_booking\",\n \"date_value\",\n \"volume_kind\", \n \"detail\", \n \"orderer\", \n \"receiver\", \n \"amount\", \n \"balance\", \n \"Keine Ahnung\"\n]\n\ninhabitants = [\n 'Thomas Vogg', \n 'Carl-Maria Stracke', \n 'Felicia Wiehler', \n 'Engels Teresa', \n 'Florian Duffe', \n 'Mara Pollak', \n 'Sahra Al-Yassin',\n 'Natascha Reichert'\n]\n\npersistent_outgoings_dict = {\n \"Stadtwerke\":{\n \"receiver\":\"swm\"}, \n \"M-Net\":{\n \"receiver\":\"m-net\"},\n \"Rundfunkgebühr\":{\n \"receiver\":\"rundfunk\"},\n \"Postbank\":{\n \"volume_kind\":\"zinsen\"},\n \"Prager\":{\n \"receiver\":\"liegenschaftsverwaltung\", \"detail\":\"miete\"}\n}\n\n\n\n# ----------------------------------------------------------------------------\n# load and clean data\n\nvolume = load_data()\nclean_data()\nvolume.to_csv('saved.csv')\n\n\n# ----------------------------------------------------------------------------\n# main\n\n# example for rents\nyear = 2021\nfor month in range(1,8):\n dict_rpbi = rent_payment_by_inhabitant(inhabitants, month, year)\n print_rent_payments(dict_rpbi)\n\n\n# ständige ausaben quartal\n# datum | betrag | name | Verwendungszweck\nprint_persistent_outgoing_of_quarter(persistent_outgoings_dict, 1, 2021)\nprint_persistent_outgoing_of_quarter(persistent_outgoings_dict, 2, 2021)\nprint_persistent_outgoing_of_quarter(persistent_outgoings_dict, 3, 2021)\nprint_persistent_outgoing_of_quarter(persistent_outgoings_dict, 4, 2021)\n\n# ausserordentliche ausaben\n# datum | betrag | name | Verwendungszweck\n\n\n# eingänge\n# ständig\n# ausserordentlich\n\n\n","repo_name":"tomobones/python_automation","sub_path":"account_monthly/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"29342236049","text":"# (C) Daniel Strano and the Qrack contributors 2017-2023. All rights reserved.\n#\n# Use of this source code is governed by an MIT-style license that can be\n# found in the LICENSE file or at https://opensource.org/licenses/MIT.\n\nimport copy\nimport ctypes\nimport math\nimport re\nfrom .qrack_system import Qrack\nfrom .pauli import Pauli\n\n_IS_QISKIT_AVAILABLE = True\ntry:\n from qiskit.circuit import QuantumRegister, Qubit\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from qiskit.compiler import transpile\n from qiskit.qobj.qasm_qobj import QasmQobjExperiment\n from qiskit.quantum_info.operators.symplectic.clifford import Clifford\n from .util import convert_qiskit_circuit_to_qasm_experiment\nexcept ImportError:\n _IS_QISKIT_AVAILABLE = False\n\n_IS_NUMPY_AVAILABLE = True\ntry:\n import numpy as np\nexcept:\n _IS_NUMPY_AVAILABLE = False\n\n\nclass QrackSimulator:\n \"\"\"Interface for all the QRack functionality.\n\n Attributes:\n qubitCount(int): Number of qubits that are to be simulated.\n sid(int): Corresponding simulator id.\n \"\"\"\n\n def _get_error(self):\n return Qrack.qrack_lib.get_error(self.sid)\n\n def _throw_if_error(self):\n if self._get_error() != 0:\n raise RuntimeError(\"QrackSimulator C++ library raised exception.\")\n\n def __init__(\n self,\n qubitCount=-1,\n cloneSid=-1,\n isTensorNetwork=True,\n isSchmidtDecomposeMulti=True,\n isSchmidtDecompose=True,\n isStabilizerHybrid=True,\n isBinaryDecisionTree=False,\n isPaged=True,\n isCpuGpuHybrid=True,\n isOpenCL=True,\n isHostPointer=False,\n pyzxCircuit=None,\n qiskitCircuit=None,\n ):\n self.sid = None\n\n if pyzxCircuit is not None:\n qubitCount = pyzxCircuit.qubits\n elif qiskitCircuit is not None and qubitCount < 0:\n raise RuntimeError(\n \"Must specify qubitCount with qiskitCircuit parameter in QrackSimulator constructor!\"\n )\n\n if qubitCount > -1 and cloneSid > -1:\n raise RuntimeError(\n \"Cannot clone a QrackSimulator and specify its qubit length at the same time, in QrackSimulator constructor!\"\n )\n\n if isBinaryDecisionTree and isStabilizerHybrid:\n raise RuntimeError(\n \"isBinaryDecisionTree and isStabilizerHybrid are currently incompatible constructor options to QrackSimulator! (Please set one or both options to False.)\"\n )\n\n self.is_tensor_network = isTensorNetwork\n\n if cloneSid > -1:\n self.sid = Qrack.qrack_lib.init_clone(cloneSid)\n else:\n if qubitCount < 0:\n qubitCount = 0\n\n if (\n isTensorNetwork\n and isSchmidtDecompose\n and isStabilizerHybrid\n and not isBinaryDecisionTree\n and isPaged\n and isCpuGpuHybrid\n and isOpenCL\n ):\n if isSchmidtDecomposeMulti:\n self.sid = Qrack.qrack_lib.init_count(qubitCount, isHostPointer)\n else:\n self.sid = Qrack.qrack_lib.init_count_pager(\n qubitCount, isHostPointer\n )\n else:\n self.sid = Qrack.qrack_lib.init_count_type(\n qubitCount,\n isTensorNetwork,\n isSchmidtDecomposeMulti,\n isSchmidtDecompose,\n isStabilizerHybrid,\n isBinaryDecisionTree,\n isPaged,\n False,\n isCpuGpuHybrid,\n isOpenCL,\n isHostPointer,\n )\n\n self._throw_if_error()\n\n if pyzxCircuit is not None:\n self.run_pyzx_gates(pyzxCircuit.gates)\n elif qiskitCircuit is not None:\n self.run_qiskit_circuit(qiskitCircuit)\n\n def __del__(self):\n if self.sid is not None:\n Qrack.qrack_lib.destroy(self.sid)\n self.sid = None\n\n def _int_byref(self, a):\n return (ctypes.c_int * len(a))(*a)\n\n def _ulonglong_byref(self, a):\n return (ctypes.c_ulonglong * len(a))(*a)\n\n def _double_byref(self, a):\n return (ctypes.c_double * len(a))(*a)\n\n def _complex_byref(self, a):\n t = [(c.real, c.imag) for c in a]\n return self._double_byref([float(item) for sublist in t for item in sublist])\n\n def _real1_byref(self, a):\n # This needs to be c_double, if PyQrack is built with fp64.\n if Qrack.fppow < 6:\n return (ctypes.c_float * len(a))(*a)\n return (ctypes.c_double * len(a))(*a)\n\n def _bool_byref(self, a):\n return (ctypes.c_bool * len(a))(*a)\n\n def _qrack_complex_byref(self, a):\n t = [(c.real, c.imag) for c in a]\n return self._real1_byref([float(item) for sublist in t for item in sublist])\n\n def _to_ubyte(self, nv, v):\n c = math.floor((nv - 1) / 8) + 1\n b = (ctypes.c_ubyte * (c * (1 << nv)))()\n n = 0\n for u in v:\n for _ in range(c):\n b[n] = u & 0xFF\n u >>= 8\n n += 1\n\n return b\n\n def _to_ulonglong(self, m, v):\n b = (ctypes.c_ulonglong * (m * len(v)))()\n n = 0\n for u in v:\n for _ in range(m):\n b[n] = u & 0xFFFFFFFFFFFFFFFF\n u >>= 64\n n += 1\n\n return b\n\n # See https://stackoverflow.com/questions/5389507/iterating-over-every-two-elements-in-a-list#answer-30426000\n def _pairwise(self, it):\n it = iter(it)\n while True:\n try:\n yield next(it), next(it)\n except StopIteration:\n # no more elements in the iterator\n return\n\n # non-quantum\n def seed(self, s):\n Qrack.qrack_lib.seed(self.sid, s)\n self._throw_if_error()\n\n def set_concurrency(self, p):\n Qrack.qrack_lib.set_concurrency(self.sid, p)\n self._throw_if_error()\n\n # standard gates\n\n ## single-qubits gates\n def x(self, q):\n \"\"\"Applies X gate.\n\n Applies the Pauli “X” operator to the qubit at position “q.”\n The Pauli “X” operator is equivalent to a logical “NOT.”\n\n Args:\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.X(self.sid, q)\n self._throw_if_error()\n\n def y(self, q):\n \"\"\"Applies Y gate.\n\n Applies the Pauli “Y” operator to the qubit at “q.”\n The Pauli “Y” operator is equivalent to a logical “NOT\" with\n permutation phase.\n\n Args:\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.Y(self.sid, q)\n self._throw_if_error()\n\n def z(self, q):\n \"\"\"Applies Z gate.\n\n Applies the Pauli “Z” operator to the qubit at “q.”\n The Pauli “Z” operator flips the phase of `|1>`\n\n Args:\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.Z(self.sid, q)\n self._throw_if_error()\n\n def h(self, q):\n \"\"\"Applies H gate.\n\n Applies the Hadarmard operator to the qubit at “q.”\n\n Args:\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.H(self.sid, q)\n self._throw_if_error()\n\n def s(self, q):\n \"\"\"Applies S gate.\n\n Applies the 1/4 phase rotation to the qubit at “q.”\n\n Args:\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.S(self.sid, q)\n self._throw_if_error()\n\n def t(self, q):\n \"\"\"Applies T gate.\n\n Applies the 1/8 phase rotation to the qubit at “q.”\n\n Args:\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.T(self.sid, q)\n self._throw_if_error()\n\n def adjs(self, q):\n \"\"\"Adjoint of S gate\n\n Applies the gate equivalent to the inverse of S gate.\n\n Args:\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.AdjS(self.sid, q)\n self._throw_if_error()\n\n def adjt(self, q):\n \"\"\"Adjoint of T gate\n\n Applies the gate equivalent to the inverse of T gate.\n\n Args:\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.AdjT(self.sid, q)\n self._throw_if_error()\n\n def u(self, q, th, ph, la):\n \"\"\"General unitary gate.\n\n Applies a gate guaranteed to be unitary.\n Spans all possible single bit unitary gates.\n\n `U(theta, phi, lambda) = RZ(phi + pi/2)RX(theta)RZ(lambda - pi/2)`\n\n Args:\n q: the qubit number on which the gate is applied to.\n th: theta\n ph: phi\n la: lambda\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.U(\n self.sid, q, ctypes.c_double(th), ctypes.c_double(ph), ctypes.c_double(la)\n )\n self._throw_if_error()\n\n def mtrx(self, m, q):\n \"\"\"Operation from matrix.\n\n Applies arbitrary operation defined by the given matrix.\n\n Args:\n m: row-major complex list representing the operator.\n q: the qubit number on which the gate is applied to.\n\n Raises:\n ValueError: 2x2 matrix 'm' in QrackSimulator.mtrx() must contain at least 4 elements.\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(m) < 4:\n raise ValueError(\"2x2 matrix 'm' in QrackSimulator.mtrx() must contain at least 4 elements.\")\n Qrack.qrack_lib.Mtrx(self.sid, self._complex_byref(m), q)\n self._throw_if_error()\n\n def r(self, b, ph, q):\n \"\"\"Rotation gate.\n\n Rotate the qubit along the given pauli basis by the given angle.\n\n\n Args:\n b: Pauli basis\n ph: rotation angle\n q: the qubit number on which the gate is applied to\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.R(self.sid, ctypes.c_ulonglong(b), ctypes.c_double(ph), q)\n self._throw_if_error()\n\n def exp(self, b, ph, q):\n \"\"\"Arbitrary exponentiation\n\n `exp(b, theta) = e^{i*theta*[b_0 . b_1 ...]}`\n where `.` is the tensor product.\n\n\n Args:\n b: Pauli basis\n ph: coefficient of exponentiation\n q: the qubit number on which the gate is applied to\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(b) != len(q):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n Qrack.qrack_lib.Exp(\n self.sid,\n len(b),\n self._ulonglong_byref(b),\n ctypes.c_double(ph),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n ## multi-qubit gates\n def mcx(self, c, q):\n \"\"\"Multi-controlled X gate\n\n If all controlled qubits are `|1>` then the target qubit is flipped.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCX(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mcy(self, c, q):\n \"\"\"Multi-controlled Y gate\n\n If all controlled qubits are `|1>` then the Pauli \"Y\" gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCY(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mcz(self, c, q):\n \"\"\"Multi-controlled Z gate\n\n If all controlled qubits are `|1>` then the Pauli \"Z\" gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCZ(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mch(self, c, q):\n \"\"\"Multi-controlled H gate\n\n If all controlled qubits are `|1>` then the Hadarmard gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCH(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mcs(self, c, q):\n \"\"\"Multi-controlled S gate\n\n If all controlled qubits are `|1>` then the \"S\" gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCS(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mct(self, c, q):\n \"\"\"Multi-controlled T gate\n\n If all controlled qubits are `|1>` then the \"T\" gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCT(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mcadjs(self, c, q):\n \"\"\"Multi-controlled adjs gate\n\n If all controlled qubits are `|1>` then the adjs gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCAdjS(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mcadjt(self, c, q):\n \"\"\"Multi-controlled adjt gate\n\n If all controlled qubits are `|1>` then the adjt gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCAdjT(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mcu(self, c, q, th, ph, la):\n \"\"\"Multi-controlled arbitraty unitary\n\n If all controlled qubits are `|1>` then the unitary gate described by\n parameters is applied to the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n th: theta\n ph: phi\n la: lambda\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCU(\n self.sid,\n len(c),\n self._ulonglong_byref(c),\n q,\n ctypes.c_double(th),\n ctypes.c_double(ph),\n ctypes.c_double(la),\n )\n self._throw_if_error()\n\n def mcmtrx(self, c, m, q):\n \"\"\"Multi-controlled arbitrary operator\n\n If all controlled qubits are `|1>` then the arbitrary operation by\n parameters is applied to the target qubit.\n\n Args:\n c: list of controlled qubits\n m: row-major complex list representing the operator.\n q: target qubit\n\n Raises:\n ValueError: 2x2 matrix 'm' in QrackSimulator.mcmtrx() must contain at least 4 elements.\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(m) < 4:\n raise ValueError(\"2x2 matrix 'm' in QrackSimulator.mcmtrx() must contain at least 4 elements.\")\n Qrack.qrack_lib.MCMtrx(\n self.sid, len(c), self._ulonglong_byref(c), self._complex_byref(m), q\n )\n self._throw_if_error()\n\n def macx(self, c, q):\n \"\"\"Anti multi-controlled X gate\n\n If all controlled qubits are `|0>` then the target qubit is flipped.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MACX(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def macy(self, c, q):\n \"\"\"Anti multi-controlled Y gate\n\n If all controlled qubits are `|0>` then the Pauli \"Y\" gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MACY(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def macz(self, c, q):\n \"\"\"Anti multi-controlled Z gate\n\n If all controlled qubits are `|0>` then the Pauli \"Z\" gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MACZ(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mach(self, c, q):\n \"\"\"Anti multi-controlled H gate\n\n If all controlled qubits are `|0>` then the Hadarmard gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MACH(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def macs(self, c, q):\n \"\"\"Anti multi-controlled S gate\n\n If all controlled qubits are `|0>` then the \"S\" gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MACS(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def mact(self, c, q):\n \"\"\"Anti multi-controlled T gate\n\n If all controlled qubits are `|0>` then the \"T\" gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MACT(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def macadjs(self, c, q):\n \"\"\"Anti multi-controlled adjs gate\n\n If all controlled qubits are `|0>` then the adjs gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MACAdjS(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def macadjt(self, c, q):\n \"\"\"Anti multi-controlled adjt gate\n\n If all controlled qubits are `|0>` then the adjt gate is applied to\n the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MACAdjT(self.sid, len(c), self._ulonglong_byref(c), q)\n self._throw_if_error()\n\n def macu(self, c, q, th, ph, la):\n \"\"\"Anti multi-controlled arbitraty unitary\n\n If all controlled qubits are `|0>` then the unitary gate described by\n parameters is applied to the target qubit.\n\n Args:\n c: list of controlled qubits.\n q: target qubit.\n th: theta\n ph: phi\n la: lambda\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MACU(\n self.sid,\n len(c),\n self._ulonglong_byref(c),\n q,\n ctypes.c_double(th),\n ctypes.c_double(ph),\n ctypes.c_double(la),\n )\n self._throw_if_error()\n\n def macmtrx(self, c, m, q):\n \"\"\"Anti multi-controlled arbitraty operator\n\n If all controlled qubits are `|0>` then the arbitrary operation by\n parameters is applied to the target qubit.\n\n Args:\n c: list of controlled qubits.\n m: row-major complex matrix which defines the operator.\n q: target qubit.\n\n Raises:\n ValueError: 2x2 matrix 'm' in QrackSimulator.macmtrx() must contain at least 4 elements.\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(m) < 4:\n raise ValueError(\"2x2 matrix 'm' in QrackSimulator.macmtrx() must contain at least 4 elements.\")\n Qrack.qrack_lib.MACMtrx(\n self.sid, len(c), self._ulonglong_byref(c), self._complex_byref(m), q\n )\n self._throw_if_error()\n\n def ucmtrx(self, c, m, q, p):\n \"\"\"Multi-controlled arbitrary operator with arbitrary controls\n\n If all control qubits match 'p' permutation by bit order, then the arbitrary\n operation by parameters is applied to the target qubit.\n\n Args:\n c: list of control qubits\n m: row-major complex list representing the operator.\n q: target qubit\n p: permutation of list of control qubits\n\n Raises:\n ValueError: 2x2 matrix 'm' in QrackSimulator.ucmtrx() must contain at least 4 elements.\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(m) < 4:\n raise ValueError(\"2x2 matrix 'm' in QrackSimulator.ucmtrx() must contain at least 4 elements.\")\n Qrack.qrack_lib.UCMtrx(\n self.sid, len(c), self._ulonglong_byref(c), self._complex_byref(m), q, p\n )\n self._throw_if_error()\n\n def multiplex1_mtrx(self, c, q, m):\n \"\"\"Multiplex gate\n\n A multiplex gate with a single target and an arbitrary number of\n controls.\n\n Args:\n c: list of controlled qubits.\n m: row-major complex matrix which defines the operator.\n q: target qubit.\n\n Raises:\n ValueError: Multiplex matrix 'm' in QrackSimulator.multiplex1_mtrx() must contain at least 4 elements.\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(m) < ((1 << len(c)) * 4):\n raise ValueError(\"Multiplex matrix 'm' in QrackSimulator.multiplex1_mtrx() must contain at least (4 * 2 ** len(c)) elements.\")\n Qrack.qrack_lib.Multiplex1Mtrx(\n self.sid, len(c), self._ulonglong_byref(c), q, self._complex_byref(m)\n )\n self._throw_if_error()\n\n def mx(self, q):\n \"\"\"Multi X-gate\n\n Applies the Pauli “X” operator on all qubits.\n\n Args:\n q: list of qubits to apply X on.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MX(self.sid, len(q), self._ulonglong_byref(q))\n self._throw_if_error()\n\n def my(self, q):\n \"\"\"Multi Y-gate\n\n Applies the Pauli “Y” operator on all qubits.\n\n Args:\n q: list of qubits to apply Y on.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MY(self.sid, len(q), self._ulonglong_byref(q))\n self._throw_if_error()\n\n def mz(self, q):\n \"\"\"Multi Z-gate\n\n Applies the Pauli “Z” operator on all qubits.\n\n Args:\n q: list of qubits to apply Z on.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MZ(self.sid, len(q), self._ulonglong_byref(q))\n self._throw_if_error()\n\n def mcr(self, b, ph, c, q):\n \"\"\"Multi-controlled arbitrary rotation.\n\n If all controlled qubits are `|1>` then the arbitrary rotation by\n parameters is applied to the target qubit.\n\n Args:\n b: Pauli basis\n ph: coefficient of exponentiation.\n c: list of controlled qubits.\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.MCR(\n self.sid,\n ctypes.c_ulonglong(b),\n ctypes.c_double(ph),\n len(c),\n self._ulonglong_byref(c),\n q,\n )\n self._throw_if_error()\n\n def mcexp(self, b, ph, cs, q):\n \"\"\"Multi-controlled arbitrary exponentiation\n\n If all controlled qubits are `|1>` then the target qubit is\n exponentiated an pauli basis basis with coefficient.\n\n Args:\n b: Pauli basis\n ph: coefficient of exponentiation.\n q: the qubit number on which the gate is applied to.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(b) != len(q):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n Qrack.qrack_lib.MCExp(\n self.sid,\n len(b),\n self._ulonglong_byref(b),\n ctypes.c_double(ph),\n len(cs),\n self._ulonglong_byref(cs),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n def swap(self, qi1, qi2):\n \"\"\"Swap Gate\n\n Swaps the qubits at two given positions.\n\n Args:\n qi1: First position of qubit.\n qi2: Second position of qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.SWAP(self.sid, qi1, qi2)\n self._throw_if_error()\n\n def iswap(self, qi1, qi2):\n \"\"\"Swap Gate with phase.\n\n Swaps the qubits at two given positions.\n If the bits are different then there is additional phase of `i`.\n\n Args:\n qi1: First position of qubit.\n qi2: Second position of qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.ISWAP(self.sid, qi1, qi2)\n self._throw_if_error()\n\n def adjiswap(self, qi1, qi2):\n \"\"\"Swap Gate with phase.\n\n Swaps the qubits at two given positions.\n If the bits are different then there is additional phase of `-i`.\n\n Args:\n qi1: First position of qubit.\n qi2: Second position of qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.AdjISWAP(self.sid, qi1, qi2)\n self._throw_if_error()\n\n def fsim(self, th, ph, qi1, qi2):\n \"\"\"Fsim gate.\n\n The 2-qubit “fSim” gate\n Useful in the simulation of particles with fermionic statistics\n\n Args:\n qi1: First position of qubit.\n qi2: Second position of qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.FSim(\n self.sid, ctypes.c_double(th), ctypes.c_double(ph), qi1, qi2\n )\n self._throw_if_error()\n\n def cswap(self, c, qi1, qi2):\n \"\"\"Controlled-swap Gate\n\n Swaps the qubits at two given positions if the control qubits are `|1>`\n\n Args:\n c: list of controlled qubits.\n qi1: First position of qubit.\n qi2: Second position of qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.CSWAP(self.sid, len(c), self._ulonglong_byref(c), qi1, qi2)\n self._throw_if_error()\n\n def acswap(self, c, qi1, qi2):\n \"\"\"Anti controlled-swap Gate\n\n Swaps the qubits at two given positions if the control qubits are `|0>`\n\n Args:\n c: list of controlled qubits.\n qi1: First position of qubit.\n qi2: Second position of qubit.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.ACSWAP(self.sid, len(c), self._ulonglong_byref(c), qi1, qi2)\n self._throw_if_error()\n\n # standard operations\n def m(self, q):\n \"\"\"Measurement gate\n\n Measures the qubit at \"q\" and returns Boolean value.\n This operator is not unitary & is probabilistic in nature.\n\n Args:\n q: qubit to measure\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Measurement result.\n \"\"\"\n result = Qrack.qrack_lib.M(self.sid, q)\n self._throw_if_error()\n return result\n\n def force_m(self, q, r):\n \"\"\"Force-Measurement gate\n\n Acts as if the measurement is applied and the result obtained is `r`\n\n Args:\n q: qubit to measure\n r: the required result\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Measurement result.\n \"\"\"\n result = Qrack.qrack_lib.ForceM(self.sid, q, r)\n self._throw_if_error()\n return result\n\n def m_all(self):\n \"\"\"Measure-all gate\n\n Measures measures all qubits.\n This operator is not unitary & is probabilistic in nature.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Measurement result of all qubits.\n \"\"\"\n result = Qrack.qrack_lib.MAll(self.sid)\n self._throw_if_error()\n return result\n\n def measure_pauli(self, b, q):\n \"\"\"Pauli Measurement gate\n\n Measures the qubit at \"q\" with the given pauli basis.\n This operator is not unitary & is probabilistic in nature.\n\n Args:\n b: Pauli basis\n q: qubit to measure\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Measurement result.\n \"\"\"\n if len(b) != len(q):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n result = Qrack.qrack_lib.Measure(\n self.sid, len(b), self._int_byref(b), self._ulonglong_byref(q)\n )\n self._throw_if_error()\n return result\n\n def measure_shots(self, q, s):\n \"\"\"Multi-shot measurement operator\n\n Measures the qubit at \"q\" with the given pauli basis.\n This operator is not unitary & is probabilistic in nature.\n\n Args:\n q: list of qubits to measure\n s: number of shots\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n list of measurement result.\n \"\"\"\n m = self._ulonglong_byref([0] * s)\n Qrack.qrack_lib.MeasureShots(self.sid, len(q), self._ulonglong_byref(q), s, m)\n self._throw_if_error()\n return [m[i] for i in range(s)]\n\n def reset_all(self):\n \"\"\"Reset gate\n\n Resets all qubits to `|0>`\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.ResetAll(self.sid)\n self._throw_if_error()\n\n # arithmetic-logic-unit (ALU)\n def _split_longs(self, a):\n \"\"\"Split operation\n\n Splits the given integer into 64 bit numbers.\n\n\n Args:\n a: number to split\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n list of split numbers.\n \"\"\"\n aParts = []\n if a == 0:\n aParts.append(0)\n while a > 0:\n aParts.append(a & 0xFFFFFFFFFFFFFFFF)\n a = a >> 64\n return aParts\n\n def _split_longs_2(self, a, m):\n \"\"\"Split simultanoues operation\n\n Splits 2 integers into same number of 64 bit numbers.\n\n Args:\n a: first number to split\n m: second number to split\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n pair of lists of split numbers.\n \"\"\"\n aParts = []\n mParts = []\n if a == 0 and m == 0:\n aParts.append(0)\n mParts.append(0)\n while a > 0 or m > 0:\n aParts.append(a & 0xFFFFFFFFFFFFFFFF)\n a = a >> 64\n mParts.append(m & 0xFFFFFFFFFFFFFFFF)\n m = m >> 64\n return aParts, mParts\n\n def add(self, a, q):\n \"\"\"Add integer to qubit\n\n Adds the given integer to the given set of qubits.\n\n Args:\n a: first number to split\n q: list of qubits to add the number\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n aParts = self._split_longs(a)\n Qrack.qrack_lib.ADD(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(q),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n def sub(self, a, q):\n \"\"\"Subtract integer to qubit\n\n Subtracts the given integer to the given set of qubits.\n\n Args:\n a: first number to split\n q: list of qubits to subtract the number\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n aParts = self._split_longs(a)\n Qrack.qrack_lib.SUB(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(q),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n def adds(self, a, s, q):\n \"\"\"Signed Addition integer to qubit\n\n Signed Addition of the given integer to the given set of qubits,\n if there is an overflow the resultant will become negative.\n\n Args:\n a: number to add\n s: qubit to store overflow\n q: list of qubits to add the number\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n aParts = self._split_longs(a)\n Qrack.qrack_lib.ADDS(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n s,\n len(q),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n def subs(self, a, s, q):\n \"\"\"Subtract integer to qubit\n\n Subtracts the given integer to the given set of qubits,\n if there is an overflow the resultant will become negative.\n\n Args:\n a: number to subtract\n s: qubit to store overflow\n q: list of qubits to subtract the number\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n aParts = self._split_longs(a)\n Qrack.qrack_lib.SUBS(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n s,\n len(q),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n def mul(self, a, q, o):\n \"\"\"Multiplies integer to qubit\n\n Multiplies the given integer to the given set of qubits.\n Carry register is required for maintaining the unitary nature of\n operation and must be as long as the input qubit register.\n\n Args:\n a: number to multiply\n q: list of qubits to multiply the number\n o: carry register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot mul()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot mul()! (Turn off just this option, in the constructor.)\")\n\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts = self._split_longs(a)\n Qrack.qrack_lib.MUL(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(q),\n self._ulonglong_byref(q),\n self._ulonglong_byref(o),\n )\n self._throw_if_error()\n\n def div(self, a, q, o):\n \"\"\"Divides qubit by integer\n\n 'Divides' the given qubits by the integer.\n (This is rather the adjoint of mul().)\n Carry register is required for maintaining the unitary nature of\n operation.\n\n Args:\n a: integer to divide by\n q: qubits to divide\n o: carry register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot div()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot div()! (Turn off just this option, in the constructor.)\")\n\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts = self._split_longs(a)\n Qrack.qrack_lib.DIV(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(q),\n self._ulonglong_byref(q),\n self._ulonglong_byref(o),\n )\n self._throw_if_error()\n\n def muln(self, a, m, q, o):\n \"\"\"Modulo Multiplication\n\n Modulo Multiplication of the given integer to the given set of qubits\n Out-of-place register is required to store the resultant.\n\n Args:\n a: number to multiply\n m: modulo number\n q: list of qubits to multiply the number\n o: carry register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts, mParts = self._split_longs_2(a, m)\n Qrack.qrack_lib.MULN(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n self._ulonglong_byref(mParts),\n len(q),\n self._ulonglong_byref(q),\n self._ulonglong_byref(o),\n )\n self._throw_if_error()\n\n def divn(self, a, m, q, o):\n \"\"\"Modulo Division\n\n 'Modulo Division' of the given set of qubits by the given integer\n (This is rather the adjoint of muln().)\n Out-of-place register is required to retrieve the resultant.\n\n Args:\n a: integer by which qubit will be divided\n m: modulo integer\n q: qubits to divide\n o: carry register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts, mParts = self._split_longs_2(a, m)\n Qrack.qrack_lib.DIVN(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n self._ulonglong_byref(mParts),\n len(q),\n self._ulonglong_byref(q),\n self._ulonglong_byref(o),\n )\n self._throw_if_error()\n\n def pown(self, a, m, q, o):\n \"\"\"Modulo Power\n\n Raises the qubit to the power `a` to which `mod m` is applied to.\n Out-of-place register is required to store the resultant.\n\n Args:\n a: number in power\n m: modulo number\n q: list of qubits to exponentiate\n o: out-of-place register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot pown()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot pown()! (Turn off just this option, in the constructor.)\")\n\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts, mParts = self._split_longs_2(a, m)\n Qrack.qrack_lib.POWN(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n self._ulonglong_byref(mParts),\n len(q),\n self._ulonglong_byref(q),\n self._ulonglong_byref(o),\n )\n self._throw_if_error()\n\n def mcadd(self, a, c, q):\n \"\"\"Controlled-add\n\n Adds the given integer to the given set of qubits if all controlled\n qubits are `|1>`.\n\n Args:\n a: number to add.\n c: list of controlled qubits.\n q: list of qubits to add the number\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n aParts = self._split_longs(a)\n Qrack.qrack_lib.MCADD(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(c),\n self._ulonglong_byref(c),\n len(q),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n def mcsub(self, a, c, q):\n \"\"\"Controlled-subtract\n\n Subtracts the given integer to the given set of qubits if all controlled\n qubits are `|1>`.\n\n Args:\n a: number to subtract.\n c: list of controlled qubits.\n q: list of qubits to add the number\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n aParts = self._split_longs(a)\n Qrack.qrack_lib.MCSUB(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(c),\n self._ulonglong_byref(c),\n len(q),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n def mcmul(self, a, c, q, o):\n \"\"\"Controlled-multiply\n\n Multiplies the given integer to the given set of qubits if all controlled\n qubits are `|1>`.\n Carry register is required for maintaining the unitary nature of\n operation.\n\n Args:\n a: number to multiply\n c: list of controlled qubits.\n q: list of qubits to add the number\n o: carry register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot mcmul()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot mcmul()! (Turn off just this option, in the constructor.)\")\n\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts = self._split_longs(a)\n Qrack.qrack_lib.MCMUL(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(c),\n self._ulonglong_byref(c),\n len(q),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n def mcdiv(self, a, c, q, o):\n \"\"\"Controlled-divide.\n\n 'Divides' the given qubits by the integer if all controlled\n qubits are `|1>`.\n (This is rather the adjoint of mcmul().)\n Carry register is required for maintaining the unitary nature of\n operation.\n\n Args:\n a: number to divide by\n c: list of controlled qubits.\n q: qubits to divide\n o: carry register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot mcdiv()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot mcdiv()! (Turn off just this option, in the constructor.)\")\n\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts = self._split_longs(a)\n Qrack.qrack_lib.MCDIV(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(c),\n self._ulonglong_byref(c),\n len(q),\n self._ulonglong_byref(q),\n )\n self._throw_if_error()\n\n def mcmuln(self, a, c, m, q, o):\n \"\"\"Controlled-modulo multiplication\n\n Modulo multiplication of the given integer to the given set of qubits\n if all controlled qubits are `|1>`.\n Out-of-place register is required to store the resultant.\n\n Args:\n a: number to multiply\n c: list of controlled qubits.\n m: modulo number\n q: list of qubits to add the number\n o: out-of-place output register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts, mParts = self._split_longs_2(a, m)\n Qrack.qrack_lib.MCMULN(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(c),\n self._ulonglong_byref(c),\n self._ulonglong_byref(mParts),\n len(q),\n self._ulonglong_byref(q),\n self._ulonglong_byref(o),\n )\n self._throw_if_error()\n\n def mcdivn(self, a, c, m, q, o):\n \"\"\"Controlled-divide.\n\n Modulo division of the given qubits by the given number if all\n controlled qubits are `|1>`.\n (This is rather the adjoint of mcmuln().)\n Out-of-place register is required to retrieve the resultant.\n\n Args:\n a: number to divide by\n c: list of controlled qubits.\n m: modulo number\n q: qubits to divide\n o: carry register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts, mParts = self._split_longs_2(a, m)\n Qrack.qrack_lib.MCDIVN(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(c),\n self._ulonglong_byref(c),\n self._ulonglong_byref(mParts),\n len(q),\n self._ulonglong_byref(q),\n self._ulonglong_byref(o),\n )\n self._throw_if_error()\n\n def mcpown(self, a, c, m, q, o):\n \"\"\"Controlled-modulo Power\n\n Raises the qubit to the power `a` to which `mod m` is applied to if\n all the controlled qubits are set to `|1>`.\n Out-of-place register is required to store the resultant.\n\n Args:\n a: number in power\n c: control qubits\n m: modulo number\n q: list of qubits to exponentiate\n o: out-of-place register\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot mcpown()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot mcpown()! (Turn off just this option, in the constructor.)\")\n\n if len(q) != len(o):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n aParts, mParts = self._split_longs_2(a, m)\n Qrack.qrack_lib.MCPOWN(\n self.sid,\n len(aParts),\n self._ulonglong_byref(aParts),\n len(c),\n self._ulonglong_byref(c),\n self._ulonglong_byref(mParts),\n len(q),\n self._ulonglong_byref(q),\n self._ulonglong_byref(o),\n )\n self._throw_if_error()\n\n def lda(self, qi, qv, t):\n \"\"\"Load Accumalator\n\n Quantum counterpart for LDA from MOS-6502 assembly. `t` must be of\n the length `2 ** len(qi)`. It loads each list entry index of t into\n the qi register and each list entry value into the qv register.\n\n Args:\n qi: qubit register for index\n qv: qubit register for value\n t: list of values\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot lda()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot lda()! (Turn off just this option, in the constructor.)\")\n\n Qrack.qrack_lib.LDA(\n self.sid,\n len(qi),\n self._ulonglong_byref(qi),\n len(qv),\n self._ulonglong_byref(qv),\n self._to_ubyte(len(qv), t),\n )\n self._throw_if_error()\n\n def adc(self, s, qi, qv, t):\n \"\"\"Add with Carry\n\n Quantum counterpart for ADC from MOS-6502 assembly. `t` must be of\n the length `2 ** len(qi)`.\n\n Args:\n qi: qubit register for index\n qv: qubit register for value\n t: list of values\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot adc()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot adc()! (Turn off just this option, in the constructor.)\")\n\n Qrack.qrack_lib.ADC(\n self.sid,\n s,\n len(qi),\n self._ulonglong_byref(qi),\n len(qv),\n self._ulonglong_byref(qv),\n self._to_ubyte(len(qv), t),\n )\n self._throw_if_error()\n\n def sbc(self, s, qi, qv, t):\n \"\"\"Subtract with Carry\n\n Quantum counterpart for SBC from MOS-6502 assembly. `t` must be of\n the length `2 ** len(qi)`\n\n Args:\n qi: qubit register for index\n qv: qubit register for value\n t: list of values\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot sbc()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot sbc()! (Turn off just this option, in the constructor.)\")\n\n Qrack.qrack_lib.SBC(\n self.sid,\n s,\n len(qi),\n self._ulonglong_byref(qi),\n len(qv),\n self._ulonglong_byref(qv),\n self._to_ubyte(len(qv), t),\n )\n self._throw_if_error()\n\n def hash(self, q, t):\n \"\"\"Hash function\n\n Replicates the behaviour of LDA without the index register.\n For the operation to be unitary, the entries present in `t` must be\n unique, and the length of `t` must be `2 ** len(qi)`.\n\n\n Args:\n q: qubit register for value\n t: list of values\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot hash()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot hash()! (Turn off just this option, in the constructor.)\")\n\n Qrack.qrack_lib.Hash(\n self.sid, len(q), self._ulonglong_byref(q), self._to_ubyte(len(q), t)\n )\n self._throw_if_error()\n\n # boolean logic gates\n def qand(self, qi1, qi2, qo):\n \"\"\"Logical AND\n\n Logical AND of 2 qubits whose result is stored in the target qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.AND(self.sid, qi1, qi2, qo)\n self._throw_if_error()\n\n def qor(self, qi1, qi2, qo):\n \"\"\"Logical OR\n\n Logical OR of 2 qubits whose result is stored in the target qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.OR(self.sid, qi1, qi2, qo)\n self._throw_if_error()\n\n def qxor(self, qi1, qi2, qo):\n \"\"\"Logical XOR\n\n Logical exlusive-OR of 2 qubits whose result is stored in the target\n qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.XOR(self.sid, qi1, qi2, qo)\n self._throw_if_error()\n\n def qnand(self, qi1, qi2, qo):\n \"\"\"Logical NAND\n\n Logical NAND of 2 qubits whose result is stored in the target\n qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.NAND(self.sid, qi1, qi2, qo)\n self._throw_if_error()\n\n def qnor(self, qi1, qi2, qo):\n \"\"\"Logical NOR\n\n Logical NOR of 2 qubits whose result is stored in the target\n qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.NOR(self.sid, qi1, qi2, qo)\n self._throw_if_error()\n\n def qxnor(self, qi1, qi2, qo):\n \"\"\"Logical XOR\n\n Logical exlusive-NOR of 2 qubits whose result is stored in the target\n qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.XNOR(self.sid, qi1, qi2, qo)\n self._throw_if_error()\n\n def cland(self, ci, qi, qo):\n \"\"\"Classical AND\n\n Logical AND with one qubit and one classical bit whose result is\n stored in target qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.CLAND(self.sid, ci, qi, qo)\n self._throw_if_error()\n\n def clor(self, ci, qi, qo):\n \"\"\"Classical OR\n\n Logical OR with one qubit and one classical bit whose result is\n stored in target qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.CLOR(self.sid, ci, qi, qo)\n self._throw_if_error()\n\n def clxor(self, ci, qi, qo):\n \"\"\"Classical XOR\n\n Logical exlusive-OR with one qubit and one classical bit whose result is\n stored in target qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.CLXOR(self.sid, ci, qi, qo)\n self._throw_if_error()\n\n def clnand(self, ci, qi, qo):\n \"\"\"Classical NAND\n\n Logical NAND with one qubit and one classical bit whose result is\n stored in target qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.CLNAND(self.sid, ci, qi, qo)\n self._throw_if_error()\n\n def clnor(self, ci, qi, qo):\n \"\"\"Classical NOR\n\n Logical NOR with one qubit and one classical bit whose result is\n stored in target qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.CLNOR(self.sid, ci, qi, qo)\n self._throw_if_error()\n\n def clxnor(self, ci, qi, qo):\n \"\"\"Classical XNOR\n\n Logical exlusive-NOR with one qubit and one classical bit whose result is\n stored in target qubit.\n\n Args:\n qi1: qubit 1\n qi2: qubit 2\n qo: target qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.CLXNOR(self.sid, ci, qi, qo)\n self._throw_if_error()\n\n # Particular Quantum Circuits\n\n ## fourier transform\n def qft(self, qs):\n \"\"\"Quantum Fourier Transform\n\n Applies Quantum Fourier Transform on the list of qubits provided.\n\n Args:\n qs: list of qubits\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.QFT(self.sid, len(qs), self._ulonglong_byref(qs))\n self._throw_if_error()\n\n def iqft(self, qs):\n \"\"\"Inverse-quantum Fourier Transform\n\n Applies Inverse-quantum Fourier Transform on the list of qubits\n provided.\n\n Args:\n qs: list of qubits\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.IQFT(self.sid, len(qs), self._ulonglong_byref(qs))\n self._throw_if_error()\n\n # pseudo-quantum\n\n ## allocate and release\n def allocate_qubit(self, qid):\n \"\"\"Allocate Qubit\n\n Allocate 1 new qubit with the given qubit ID.\n\n Args:\n qid: qubit id\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.allocateQubit(self.sid, qid)\n self._throw_if_error()\n\n def release(self, q):\n \"\"\"Release Qubit\n\n Release qubit given by the given qubit ID.\n\n Args:\n q: qubit id\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n If the qubit was in `|0>` state with small tolerance.\n \"\"\"\n result = Qrack.qrack_lib.release(self.sid, q)\n self._throw_if_error()\n return result\n\n def num_qubits(self):\n \"\"\"Get Qubit count\n\n Returns the qubit count of the simulator.\n\n Args:\n q: qubit id\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Qubit count of the simulator\n \"\"\"\n result = Qrack.qrack_lib.num_qubits(self.sid)\n self._throw_if_error()\n return result\n\n ## schmidt decomposition\n def compose(self, other, q):\n \"\"\"Compose qubits\n\n Compose quantum description of given qubit with the current system.\n\n Args:\n q: qubit id\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot compose()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot compose()! (Turn off just this option, in the constructor.)\")\n\n Qrack.qrack_lib.Compose(self.sid, other.sid, self._ulonglong_byref(q))\n self._throw_if_error()\n\n def decompose(self, q):\n \"\"\"Decompose system\n\n Decompose the given qubit out of the system.\n Warning: The qubit subsystem state must be separable, or the behavior \n of this method is undefined.\n\n Args:\n q: qubit id\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot decompose()! (Turn off just this option, in the constructor.)\n\n Returns:\n State of the systems.\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot decompose()! (Turn off just this option, in the constructor.)\")\n\n other = QrackSimulator()\n Qrack.qrack_lib.destroy(other.sid)\n l = len(q)\n other.sid = Qrack.qrack_lib.Decompose(self.sid, l, self._ulonglong_byref(q))\n self._throw_if_error()\n return other\n\n def dispose(self, q):\n \"\"\"Dispose qubits\n\n Minimally decompose a set of contiguous bits from the separably\n composed unit, and discard the separable bits.\n Warning: The qubit subsystem state must be separable, or the behavior \n of this method is undefined.\n\n Args:\n q: qubit\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot dispose()! (Turn off just this option, in the constructor.)\n\n Returns:\n State of the systems.\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot dispose()! (Turn off just this option, in the constructor.)\")\n\n l = len(q)\n Qrack.qrack_lib.Dispose(self.sid, l, self._ulonglong_byref(q))\n self._throw_if_error()\n\n ## miscellaneous\n def dump_ids(self):\n \"\"\"Dump all IDs\n\n Dump all IDs from the selected simulator ID into the callback.\n\n Returns:\n List of ids\n \"\"\"\n global ids_list\n global ids_list_index\n ids_list = [0] * self.num_qubits()\n ids_list_index = 0\n Qrack.qrack_lib.DumpIds(self.sid, self.dump_ids_callback)\n return ids_list\n\n @ctypes.CFUNCTYPE(None, ctypes.c_ulonglong)\n def dump_ids_callback(i):\n \"\"\"C callback function\"\"\"\n global ids_list\n global ids_list_index\n ids_list[ids_list_index] = i\n ids_list_index = ids_list_index + 1\n\n def dump(self):\n \"\"\"Dump state vector\n\n Dump state vector from the selected simulator ID into the callback.\n\n Returns:\n State vector list\n \"\"\"\n global state_vec_list\n global state_vec_list_index\n global state_vec_probability\n state_vec_list = [complex(0, 0)] * (1 << self.num_qubits())\n state_vec_list_index = 0\n state_vec_probability = 0\n Qrack.qrack_lib.Dump(self.sid, self.dump_callback)\n return state_vec_list\n\n @ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_double, ctypes.c_double)\n def dump_callback(r, i):\n \"\"\"C callback function\"\"\"\n global state_vec_list\n global state_vec_list_index\n global state_vec_probability\n state_vec_list[state_vec_list_index] = complex(r, i)\n state_vec_list_index = state_vec_list_index + 1\n state_vec_probability = state_vec_probability + (r * r) + (i * i)\n if (1.0 - state_vec_probability) <= (7.0 / 3 - 4.0 / 3 - 1):\n return False\n return True\n\n def in_ket(self, ket):\n \"\"\"Set state vector\n\n Set state vector for the selected simulator ID. \n Warning: State vector is not always the internal representation leading \n to sub-optimal performance of the method.\n\n Args:\n ket: the state vector to which simulator will be set\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.InKet(self.sid, self._qrack_complex_byref(ket))\n self._throw_if_error()\n\n def out_ket(self):\n \"\"\"Set state vector\n\n Returns the raw state vector of the simulator.\n Warning: State vector is not always the internal representation leading \n to sub-optimal performance of the method.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n list representing the state vector.\n \"\"\"\n amp_count = 1 << self.num_qubits()\n ket = self._qrack_complex_byref([complex(0, 0)] * amp_count)\n Qrack.qrack_lib.OutKet(self.sid, ket)\n self._throw_if_error()\n return [complex(r, i) for r, i in self._pairwise(ket)]\n\n def prob(self, q):\n \"\"\"Probability of `|1>`\n\n Get the probability that a qubit is in the `|1>` state.\n\n Args:\n q: qubit id\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n probability of qubit being in `|1>`\n \"\"\"\n result = Qrack.qrack_lib.Prob(self.sid, q)\n self._throw_if_error()\n return result\n\n def prob_rdm(self, q):\n \"\"\"Probability of `|1>`, (tracing out the reduced\n density matrix without stabilizer ancillary qubits)\n\n Get the probability that a qubit is in the `|1>` state.\n\n Args:\n q: qubit id\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n probability of qubit being in `|1>`\n \"\"\"\n result = Qrack.qrack_lib.ProbRdm(self.sid, q)\n self._throw_if_error()\n return result\n\n def prob_perm(self, q, c):\n \"\"\"Probability of permutation\n\n Get the probability that the qubit IDs in \"q\" have the truth values\n in \"c\", directly corresponding by list index.\n\n Args:\n q: list of qubit ids\n c: list of qubit truth values bools\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n probability that each qubit in \"q[i]\" has corresponding truth\n value in \"c[i]\", at once\n \"\"\"\n\n if len(q) != len(c):\n raise RuntimeError(\"prob_perm argument lengths do not match.\")\n result = Qrack.qrack_lib.PermutationProb(self.sid, len(q), self._ulonglong_byref(q), self._bool_byref(c));\n self._throw_if_error()\n return result\n\n def prob_perm_rdm(self, q, c, r = True):\n \"\"\"Probability of permutation, (tracing out the reduced\n density matrix without stabilizer ancillary qubits)\n\n Get the probability that the qubit IDs in \"q\" have the truth\n values in \"c\", directly corresponding by list index.\n\n Args:\n q: list of qubit ids\n c: list of qubit truth values bools\n r: round Rz gates down from T^(1/2)\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n probability that each qubit in \"q[i]\" has corresponding truth\n value in \"c[i]\", at once\n \"\"\"\n\n if len(q) != len(c):\n raise RuntimeError(\"prob_perm argument lengths do not match.\")\n result = Qrack.qrack_lib.PermutationProbRdm(self.sid, len(q), self._ulonglong_byref(q), self._bool_byref(c), r);\n self._throw_if_error()\n return result\n\n def permutation_expectation(self, q):\n \"\"\"Permutation expectation value\n\n Get the permutation expectation value, based upon the order of\n input qubits.\n\n Args:\n q: qubits, from low to high\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Expectation value\n \"\"\"\n result = Qrack.qrack_lib.PermutationExpectation(\n self.sid, len(q), self._ulonglong_byref(q)\n )\n self._throw_if_error()\n return result\n\n def permutation_expectation_rdm(self, q, r = True):\n \"\"\"Permutation expectation value, (tracing out the reduced\n density matrix without stabilizer ancillary qubits)\n\n Get the permutation expectation value, based upon the order of\n input qubits.\n\n Args:\n q: qubits, from low to high\n r: round Rz gates down from T^(1/2)\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Expectation value\n \"\"\"\n result = Qrack.qrack_lib.PermutationExpectationRdm(\n self.sid, len(q), self._ulonglong_byref(q), r\n )\n self._throw_if_error()\n return result\n\n def factorized_expectation(self, q, c):\n \"\"\"Factorized expectation value\n\n Get the factorized expectation value, where each entry\n in \"c\" is an expectation value for corresponding \"q\"\n being false, then true, repeated for each in \"q\".\n\n Args:\n q: qubits, from low to high\n c: qubit falsey/truthy values, from low to high\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Expectation value\n \"\"\"\n m = max([(x.bit_length() + 63) // 64 for x in c])\n result = Qrack.qrack_lib.FactorizedExpectation(\n self.sid, len(q), self._ulonglong_byref(q), m, self._to_ulonglong(m, c)\n )\n self._throw_if_error()\n return result\n\n def factorized_expectation_rdm(self, q, c, r = True):\n \"\"\"Factorized expectation value, (tracing out the reduced\n density matrix without stabilizer ancillary qubits)\n\n Get the factorized expectation value, where each entry\n in \"c\" is an expectation value for corresponding \"q\"\n being false, then true, repeated for each in \"q\".\n\n Args:\n q: qubits, from low to high\n c: qubit falsey/truthy values, from low to high\n r: round Rz gates down from T^(1/2)\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Expectation value\n \"\"\"\n m = max([(x.bit_length() + 63) // 64 for x in c])\n result = Qrack.qrack_lib.FactorizedExpectationRdm(\n self.sid, len(q), self._ulonglong_byref(q), m, self._to_ulonglong(m, c), r\n )\n self._throw_if_error()\n return result\n\n def factorized_expectation_fp(self, q, c):\n \"\"\"Factorized expectation value (floating-point)\n\n Get the factorized expectation value, where each entry\n in \"c\" is an expectation value for corresponding \"q\"\n being false, then true, repeated for each in \"q\".\n\n Args:\n q: qubits, from low to high\n c: qubit falsey/truthy values, from low to high\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Expectation value\n \"\"\"\n result = Qrack.qrack_lib.FactorizedExpectationFp(\n self.sid, len(q), self._ulonglong_byref(q), self._real1_byref(c)\n )\n self._throw_if_error()\n return result\n\n def factorized_expectation_fp_rdm(self, q, c, r = True):\n \"\"\"Factorized expectation value, (tracing out the reduced\n density matrix without stabilizer ancillary qubits)\n\n Get the factorized expectation value, where each entry\n in \"c\" is an expectation value for corresponding \"q\"\n being false, then true, repeated for each in \"q\".\n\n Args:\n q: qubits, from low to high\n c: qubit falsey/truthy values, from low to high\n r: round Rz gates down from T^(1/2)\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Expectation value\n \"\"\"\n result = Qrack.qrack_lib.FactorizedExpectationFpRdm(\n self.sid, len(q), self._ulonglong_byref(q), self._real1_byref(c), r\n )\n self._throw_if_error()\n return result\n\n def joint_ensemble_probability(self, b, q):\n \"\"\"Ensemble probability\n\n Find the joint probability for all specified qubits under the\n respective Pauli basis transformations.\n\n Args:\n b: pauli basis\n q: specified qubits\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Expectation value\n \"\"\"\n if len(b) != len(q):\n raise RuntimeError(\"Lengths of list parameters are mismatched.\")\n result = Qrack.qrack_lib.JointEnsembleProbability(\n self.sid, len(b), self._ulonglong_byref(b), q\n )\n self._throw_if_error()\n return result\n\n def phase_parity(self, la, q):\n \"\"\"Phase to odd parity\n\n Applies `e^(i*la)` phase factor to all combinations of bits with\n odd parity, based upon permutations of qubits.\n\n Args:\n la: phase\n q: specified qubits\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n RuntimeError: QrackSimulator with isTensorNetwork=True option cannot phase_parity()! (Turn off just this option, in the constructor.)\n \"\"\"\n if self.is_tensor_network:\n raise RuntimeError(\"QrackSimulator with isTensorNetwork=True option cannot phase_parity()! (Turn off just this option, in the constructor.)\")\n\n Qrack.qrack_lib.PhaseParity(\n self.sid, ctypes.c_double(la), len(q), self._ulonglong_byref(q)\n )\n self._throw_if_error()\n\n def try_separate_1qb(self, qi1):\n \"\"\"Manual seperation\n\n Exposes manual control for schmidt decomposition which attempts to\n decompose the qubit with possible performance improvement\n\n Args:\n qi1: qubit to be decomposed\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n State of the qubit.\n \"\"\"\n result = Qrack.qrack_lib.TrySeparate1Qb(self.sid, qi1)\n self._throw_if_error()\n return result\n\n def try_separate_2qb(self, qi1, qi2):\n \"\"\"Manual two-qubits seperation\n\n two-qubits counterpart of `try_separate_1qb`.\n\n Args:\n qi1: first qubit to be decomposed\n qi2: second qubit to be decomposed\n\n Raises:\n Runtimeerror: QrackSimulator raised an exception.\n\n Returns:\n State of both the qubits.\n \"\"\"\n result = Qrack.qrack_lib.TrySeparate2Qb(self.sid, qi1, qi2)\n self._throw_if_error()\n return result\n\n def try_separate_tolerance(self, qs, t):\n \"\"\"Manual multi-qubits seperation\n\n Multi-qubits counterpart of `try_separate_1qb`.\n\n Args:\n qs: list of qubits to be decomposed\n t: allowed tolerance\n\n Raises:\n Runtimeerror: QrackSimulator raised an exception.\n\n Returns:\n State of all the qubits.\n \"\"\"\n result = Qrack.qrack_lib.TrySeparateTol(\n self.sid, len(qs), self._ulonglong_byref(qs), t\n )\n self._throw_if_error()\n return result\n\n def get_unitary_fidelity(self):\n \"\"\"Get fidelity estimate\n\n When using \"Schmidt decomposition rounding parameter\" (\"SDRP\")\n approximate simulation, QrackSimulator() can make an excellent\n estimate of its overall fidelity at any time, tested against a\n nearest-neighbor variant of quantum volume circuits.\n\n Resetting the fidelity calculation to 1.0 happens automatically\n when calling `mall` are can be done manually with\n `reset_unitary_fidelity()`.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n\n Returns:\n Fidelity estimate\n \"\"\"\n result = Qrack.qrack_lib.GetUnitaryFidelity(self.sid)\n self._throw_if_error()\n return result\n\n def reset_unitary_fidelity(self):\n \"\"\"Reset fidelity estimate\n\n When using \"Schmidt decomposition rounding parameter\" (\"SDRP\")\n approximate simulation, QrackSimulator() can make an excellent\n estimate of its overall fidelity at any time, tested against a\n nearest-neighbor variant of quantum volume circuits.\n\n Resetting the fidelity calculation to 1.0 happens automatically\n when calling `m_all` or can be done manually with\n `reset_unitary_fidelity()`.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.ResetUnitaryFidelity(self.sid)\n self._throw_if_error()\n\n def set_sdrp(self, sdrp):\n \"\"\"Set \"Schmidt decomposition rounding parameter\"\n\n When using \"Schmidt decomposition rounding parameter\" (\"SDRP\")\n approximate simulation, QrackSimulator() can make an excellent\n estimate of its overall fidelity at any time, tested against a\n nearest-neighbor variant of quantum volume circuits.\n\n Resetting the fidelity calculation to 1.0 happens automatically\n when calling `m_all` or can be done manually with\n `reset_unitary_fidelity()`.\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.SetSdrp(self.sid, sdrp)\n self._throw_if_error()\n\n def set_reactive_separate(self, irs):\n \"\"\"Set reactive separation option\n\n If reactive separation is available, then this method turns it off/on.\n Note that reactive separation is on by default.\n\n Args:\n irs: is aggresively separable\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.SetReactiveSeparate(self.sid, irs)\n self._throw_if_error()\n\n def set_t_injection(self, iti):\n \"\"\"Set t-injection option\n\n If t-injection is available, then this method turns it off/on.\n Note that t-injection is on by default.\n\n Args:\n iti: use \"reverse t-injection gadget\"\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n Qrack.qrack_lib.SetTInjection(self.sid, iti)\n self._throw_if_error()\n\n def out_to_file(self, filename):\n \"\"\"Output state to file (stabilizer only!)\n\n Outputs the hybrid stabilizer state to file.\n\n Args:\n filename: Name of file\n \"\"\"\n Qrack.qrack_lib.qstabilizer_out_to_file(self.sid, filename.encode('utf-8'))\n self._throw_if_error()\n\n def in_from_file(filename, is_binary_decision_tree = False, is_paged = True, is_cpu_gpu_hybrid = True, is_opencl = True, is_host_pointer = False):\n \"\"\"Input state from file (stabilizer only!)\n\n Reads in a hybrid stabilizer state from file.\n\n Args:\n filename: Name of file\n \"\"\"\n qb_count = 1\n with open(filename) as f:\n qb_count = int(f.readline())\n out = QrackSimulator(\n qubitCount=qb_count,\n isSchmidtDecomposeMulti=False,\n isSchmidtDecompose=False,\n isStabilizerHybrid=True,\n isBinaryDecisionTree=is_binary_decision_tree,\n isPaged=is_paged,\n isCpuGpuHybrid=is_cpu_gpu_hybrid,\n isOpenCL=is_opencl,\n isHostPointer=is_host_pointer\n )\n Qrack.qrack_lib.qstabilizer_in_from_file(out.sid, filename.encode('utf-8'))\n out._throw_if_error()\n\n return out\n\n def file_to_qiskit_circuit(filename, is_hardware_encoded=False):\n \"\"\"Convert an output state file to a Qiskit circuit\n\n Reads in an (optimized) circuit from a file named\n according to the \"filename\" parameter and outputs\n a Qiskit circuit.\n\n Args:\n filename: Name of file\n\n Raises:\n RuntimeErorr: Before trying to file_to_qiskit_circuit() with\n QrackCircuit, you must install Qiskit, numpy, and math!\n \"\"\"\n if not (_IS_QISKIT_AVAILABLE and _IS_NUMPY_AVAILABLE):\n raise RuntimeError(\n \"Before trying to file_to_qiskit_circuit() with QrackCircuit, you must install Qiskit, numpy, and math!\"\n )\n\n lines = []\n with open(filename, 'r') as file:\n lines = file.readlines()\n\n logical_qubits = int(lines[0])\n stabilizer_qubits = int(lines[1])\n\n stabilizer_count = int(lines[2])\n\n reg = QuantumRegister(stabilizer_qubits, name=\"q\")\n circ_qubits = [Qubit(reg, i) for i in range(stabilizer_qubits)]\n clifford_circ = QuantumCircuit(reg)\n line_number = 3\n for i in range(stabilizer_count):\n shard_map_size = int(lines[line_number])\n line_number += 1\n\n shard_map = {}\n for j in range(shard_map_size):\n line = lines[line_number].split()\n line_number += 1\n shard_map[int(line[0])] = int(line[1])\n\n sub_reg = []\n for index, _ in sorted(shard_map.items(), key=lambda x: x[1]):\n sub_reg.append(circ_qubits[index])\n\n line_number += 1\n tableau = []\n row_count = shard_map_size << 1\n for line in lines[line_number:(line_number + row_count)]:\n bits = line.split()\n if len(bits) != (row_count + 1):\n raise QrackException(\"Invalid Qrack hybrid stabilizer file!\")\n row = []\n for b in range(row_count):\n row.append(bool(int(bits[b])))\n row.append(bool((int(bits[-1]) >> 1) & 1))\n tableau.append(row)\n line_number += (shard_map_size << 1)\n tableau = np.array(tableau, bool)\n\n clifford = Clifford(tableau, validate=False, copy=False)\n circ = clifford.to_circuit()\n\n for instr in circ.data:\n qubits = instr.qubits\n n_qubits = []\n for qubit in qubits:\n n_qubits.append(sub_reg[circ.find_bit(qubit)[0]])\n instr.qubits = tuple(n_qubits)\n clifford_circ.data.append(instr)\n del circ\n\n non_clifford_gates = []\n g = 0\n for line in lines[line_number:]:\n i = 0\n tokens = line.split()\n op = np.zeros((2,2), dtype=complex)\n row = []\n for _ in range(2):\n amp = tokens[i].replace(\"(\",\"\").replace(\")\",\"\").split(',')\n row.append(float(amp[0]) + float(amp[1])*1j)\n i = i + 1\n l = math.sqrt(np.real(row[0] * np.conj(row[0]) + row[1] * np.conj(row[1])))\n op[0][0] = row[0] / l\n op[0][1] = row[1] / l\n\n if np.abs(op[0][0] - row[0]) > 1e-5:\n print(\"Warning: gate \", str(g), \" might not be unitary!\")\n if np.abs(op[0][1] - row[1]) > 1e-5:\n print(\"Warning: gate \", str(g), \" might not be unitary!\")\n\n row = []\n for _ in range(2):\n amp = tokens[i].replace(\"(\",\"\").replace(\")\",\"\").split(',')\n row.append(float(amp[0]) + float(amp[1])*1j)\n i = i + 1\n l = math.sqrt(np.real(row[0] * np.conj(row[0]) + row[1] * np.conj(row[1])))\n op[1][0] = row[0] / l\n op[1][1] = row[1] / l\n\n ph = np.real(np.log(np.linalg.det(op)) / 1j)\n\n op[1][0] = -np.exp(1j * ph) * np.conj(op[0][1])\n op[1][1] = np.exp(1j * ph) * np.conj(op[0][0])\n\n if np.abs(op[1][0] - row[0]) > 1e-5:\n print(\"Warning: gate \", str(g), \" might not be unitary!\")\n if np.abs(op[1][1] - row[1]) > 1e-5:\n print(\"Warning: gate \", str(g), \" might not be unitary!\")\n\n non_clifford_gates.append(op)\n g = g + 1\n\n basis_gates = [\"h\", \"x\", \"y\", \"z\", \"sx\", \"sy\", \"s\", \"sdg\", \"cx\", \"cy\", \"cz\", \"swap\", \"iswap\", \"iswap_dg\"]\n try:\n circ = transpile(clifford_circ, basis_gates=basis_gates, optimization_level=3)\n except:\n circ = clifford_circ\n\n for i in range(len(non_clifford_gates)):\n circ.unitary(non_clifford_gates[i], [i])\n\n if is_hardware_encoded:\n for i in range(logical_qubits, stabilizer_qubits, 2):\n circ.h(i + 1)\n circ.cz(i, i + 1)\n circ.h(i + 1)\n\n return circ\n\n def file_to_optimized_qiskit_circuit(filename):\n \"\"\"Convert an output state file to a Qiskit circuit\n\n Reads in a circuit from a file named according to the \"filename\"\n parameter and outputs a 'hyper-optimized' Qiskit circuit that\n favors maximum reduction in gate count and depth at the potential\n expense of additional non-Clifford gates. (Ancilla qubits are\n left included in the output, though they probably have no gates.)\n\n Args:\n filename: Name of file\n\n Raises:\n RuntimeErorr: Before trying to file_to_qiskit_circuit() with\n QrackCircuit, you must install Qiskit, numpy, and math!\n \"\"\"\n circ = QrackSimulator.file_to_qiskit_circuit(filename)\n \n width = 0\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n width = int(file.readline())\n\n sqrt1_2 = 1 / math.sqrt(2)\n ident = np.eye(2, dtype=np.complex128)\n passable_gates = [\"unitary\", \"h\", \"x\", \"y\", \"z\", \"s\", \"sdg\"]\n\n passed_swaps = []\n for i in range(0, circ.width()):\n # We might trace out swap, but we want to maintain the iteration order of qubit channels.\n non_clifford = np.array([[1, 0], [0, 1]], np.complex128)\n j = 0\n while j < len(circ.data):\n op = circ.data[j].operation\n qubits = circ.data[j].qubits\n q1 = circ.find_bit(qubits[0])[0]\n if (len(qubits) < 2) and (q1 == i):\n if op.name == \"unitary\":\n non_clifford = np.matmul(op.params[0], non_clifford)\n elif op.name == \"h\":\n non_clifford = np.matmul(np.array([[sqrt1_2, sqrt1_2], [sqrt1_2, -sqrt1_2]], np.complex128), non_clifford)\n elif op.name == \"x\":\n non_clifford = np.matmul(np.array([[0, 1], [1, 0]], np.complex128), non_clifford)\n elif op.name == \"y\":\n non_clifford = np.matmul(np.array([[0, -1j], [1j, 0]], np.complex128), non_clifford)\n elif op.name == \"z\":\n non_clifford = np.matmul(np.array([[1, 0], [0, -1]], np.complex128), non_clifford)\n elif op.name == \"s\":\n non_clifford = np.matmul(np.array([[1, 0], [0, 1j]], np.complex128), non_clifford)\n elif op.name == \"sdg\":\n non_clifford = np.matmul(np.array([[1, 0], [0, -1j]], np.complex128), non_clifford)\n else:\n print(\"Warning: Something went wrong! (Dropped a single-qubit gate.\")\n\n del circ.data[j]\n continue\n\n if len(qubits) < 2:\n j += 1\n continue\n\n q2 = circ.find_bit(qubits[1])[0]\n\n if op.name == \"swap\":\n if i == q1:\n i = q2\n elif i == q2:\n i = q1\n\n if (i == q1) or (i == q2):\n if circ.data[j] in passed_swaps:\n del circ.data[j]\n continue\n\n passed_swaps.append(circ.data[j])\n\n j += 1\n continue \n\n if (q1 == i) and (op.name == \"cx\" or op.name == \"cy\" or op.name == \"cz\"):\n if (np.isclose(np.abs(non_clifford[0][0]), 1) and np.isclose(np.abs(non_clifford[1][1]), 1) and\n np.isclose(np.abs(non_clifford[0][1]), 0) and np.isclose(np.abs(non_clifford[1][0]), 0)):\n # If we're not buffering anything but phase, the blocking gate has no effect, and we're safe to continue.\n del circ.data[j]\n continue\n\n if (np.isclose(np.abs(non_clifford[0][0]), 0) and np.isclose(np.abs(non_clifford[1][1]), 0) and\n np.isclose(np.abs(non_clifford[0][1]), 1) and np.isclose(np.abs(non_clifford[1][0]), 1)):\n c = QuantumCircuit(1)\n if op.name == \"cx\":\n c.x(0)\n elif op.name == \"cy\":\n c.y(0)\n else:\n c.z(0)\n instr = c.data[0]\n instr.qubits = (qubits[1],)\n circ.data[j] = copy.deepcopy(instr)\n\n j += 1\n continue\n\n if (q1 == i) or (q2 == i) or (op.name != \"cx\"):\n if np.allclose(non_clifford, ident):\n # No buffer content to write to circuit definition\n non_clifford = ident\n break\n\n # We're blocked, so we insert our buffer at this place in the circuit definition.\n c = QuantumCircuit(1)\n c.unitary(non_clifford, 0)\n instr = c.data[0]\n instr.qubits = (qubits[0],)\n circ.data.insert(j, copy.deepcopy(instr))\n non_clifford = ident\n break\n\n j += 1\n\n if (j == len(circ.data)) and (i < width) and not np.allclose(non_clifford, ident):\n # We're at the end of the wire, so add the buffer gate.\n circ.unitary(non_clifford, i)\n\n passed_swaps = []\n for i in range(width, circ.width()):\n # We might trace out swap, but we want to maintain the iteration order of qubit channels.\n non_clifford = np.array([[1, 0], [0, 1]], np.complex128)\n j = len(circ.data) - 1\n while j >= 0:\n op = circ.data[j].operation\n qubits = circ.data[j].qubits\n q1 = circ.find_bit(qubits[0])[0]\n if (len(qubits) < 2) and (q1 == i):\n if op.name == \"unitary\":\n non_clifford = np.matmul(non_clifford, op.params[0])\n elif op.name == \"h\":\n non_clifford = np.matmul(non_clifford, np.array([[sqrt1_2, sqrt1_2], [sqrt1_2, -sqrt1_2]], np.complex128))\n elif op.name == \"x\":\n non_clifford = np.matmul(non_clifford, np.array([[0, 1], [1, 0]], np.complex128))\n elif op.name == \"y\":\n non_clifford = np.matmul(non_clifford, np.array([[0, -1j], [1j, 0]], np.complex128))\n elif op.name == \"z\":\n non_clifford = np.matmul(non_clifford, np.array([[1, 0], [0, -1]], np.complex128))\n elif op.name == \"s\":\n non_clifford = np.matmul(non_clifford, np.array([[1, 0], [0, 1j]], np.complex128))\n elif op.name == \"sdg\":\n non_clifford = np.matmul(non_clifford, np.array([[1, 0], [0, -1j]], np.complex128))\n else:\n print(\"Warning: Something went wrong! (Dropped a single-qubit gate.\")\n\n del circ.data[j]\n j -= 1\n continue\n\n if len(qubits) < 2:\n j -= 1\n continue\n\n q2 = circ.find_bit(qubits[1])[0]\n\n if op.name == \"swap\":\n if i == q1:\n i = q2\n elif i == q2:\n i = q1\n\n if ((i == q1) or (i == q2)) and (q1 >= width) and (q2 >= width):\n if circ.data[j] in passed_swaps:\n del circ.data[j]\n else:\n passed_swaps.append(circ.data[j])\n\n j -= 1\n continue\n\n if (q1 == i) and (op.name == \"cx\" or op.name == \"cy\" or op.name == \"cz\"):\n # Either way, we're cutting this gate.\n orig_instr = circ.data[j]\n del circ.data[j]\n\n if (np.isclose(np.abs(non_clifford[0][0]), 1) and np.isclose(np.abs(non_clifford[1][1]), 1) and\n np.isclose(np.abs(non_clifford[0][1]), 0) and np.isclose(np.abs(non_clifford[1][0]), 0)):\n # If we're not buffering anything but phase, the blocking gate has no effect, and we're safe to continue.\n j -= 1\n continue\n\n h = QuantumCircuit(1)\n h.h(0)\n instr = h.data[0]\n\n # We're replacing CNOT with CNOT in the opposite direction plus four H gates\n instr.qubits = (qubits[0],)\n circ.data.insert(j, copy.deepcopy(instr))\n instr.qubits = (qubits[1],)\n circ.data.insert(j, copy.deepcopy(instr))\n orig_instr.qubits = (qubits[1], qubits[0])\n circ.data.insert(j, copy.deepcopy(orig_instr))\n instr.qubits = (qubits[0],)\n circ.data.insert(j, copy.deepcopy(instr))\n instr.qubits = (qubits[1],)\n circ.data.insert(j, copy.deepcopy(instr))\n j += 4\n\n continue\n\n if (q1 == i) or (op.name != \"cx\"):\n if np.allclose(non_clifford, ident):\n # No buffer content to write to circuit definition\n break\n\n # We're blocked, so we insert our buffer at this place in the circuit definition.\n c = QuantumCircuit(1)\n c.unitary(non_clifford, 0)\n instr = c.data[0]\n instr.qubits = (qubits[0],)\n circ.data.insert(j + 1, copy.deepcopy(instr))\n break\n\n if q2 == i:\n to_inject = np.matmul(non_clifford, np.array([[sqrt1_2, sqrt1_2], [sqrt1_2, -sqrt1_2]]))\n\n if np.allclose(to_inject, ident):\n # No buffer content to write to circuit definition\n del circ.data[j]\n j -= 1\n continue\n\n c = QuantumCircuit(1)\n c.unitary(to_inject, 0)\n instr = c.data[0]\n instr.qubits = (qubits[0],)\n circ.data[j] = copy.deepcopy(instr)\n\n j -= 1\n\n basis_gates=[\"u\", \"x\", \"cx\", \"cy\", \"cz\", \"swap\", \"iswap\", \"iswap_dg\"]\n circ = transpile(circ, basis_gates=basis_gates, optimization_level=3)\n\n #Eliminate unused ancillae\n qasm = circ.qasm()\n qasm = qasm.replace(\"qreg q[\" + str(circ.width()) + \"];\", \"qreg q[\" + str(width) + \"];\")\n highest_index = max([int(x) for x in re.findall(r\"\\[(.*?)\\]\", qasm) if x.isdigit()])\n if highest_index != width:\n qasm = qasm.replace(\"qreg q[\" + str(width) + \"];\", \"qreg q[\" + str(highest_index) + \"];\")\n\n orig_circ = circ\n try:\n circ = QuantumCircuit.from_qasm_str(qasm)\n except:\n circ = orig_circ\n\n return circ\n\n def _apply_pyzx_op(self, gate):\n if gate.name == \"XPhase\":\n self.r(Pauli.PauliX, math.pi * gate.phase, gate.target)\n elif gate.name == \"ZPhase\":\n self.r(Pauli.PauliZ, math.pi * gate.phase, gate.target)\n elif gate.name == \"Z\":\n self.z(gate.target)\n elif gate.name == \"S\":\n self.s(gate.target)\n elif gate.name == \"T\":\n self.t(gate.target)\n elif gate.name == \"NOT\":\n self.x(gate.target)\n elif gate.name == \"HAD\":\n self.h(gate.target)\n elif gate.name == \"CNOT\":\n self.mcx([gate.control], gate.target)\n elif gate.name == \"CZ\":\n self.mcz([gate.control], gate.target)\n elif gate.name == \"CX\":\n self.h(gate.control)\n self.mcx([gate.control], gate.target)\n self.h(gate.control)\n elif gate.name == \"SWAP\":\n self.swap(gate.control, gate.target)\n elif gate.name == \"CRZ\":\n self.mcr(Pauli.PauliZ, math.pi * gate.phase, [gate.control], gate.target)\n elif gate.name == \"CHAD\":\n self.mch([gate.control], gate.target)\n elif gate.name == \"ParityPhase\":\n self.phase_parity(math.pi * gate.phase, gate.targets)\n elif gate.name == \"FSim\":\n self.fsim(gate.theta, gate.phi, gate.control, gate.target)\n elif gate.name == \"CCZ\":\n self.mcz([gate.ctrl1, gate.ctrl2], gate.target)\n elif gate.name == \"Tof\":\n self.mcx([gate.ctrl1, gate.ctrl2], gate.target)\n self._throw_if_error()\n\n def run_pyzx_gates(self, gates):\n \"\"\"PYZX Gates\n\n Converts PYZX gates to `QRackSimulator` and immediately executes them.\n\n Args:\n gates: list of PYZX gates\n\n Raises:\n RuntimeError: QrackSimulator raised an exception.\n \"\"\"\n for gate in gates:\n _apply_pyzx_op(gate)\n\n def _apply_op(self, operation):\n name = operation.name\n\n if (name == 'id') or (name == 'barrier'):\n # Skip measurement logic\n return\n\n conditional = getattr(operation, 'conditional', None)\n if isinstance(conditional, int):\n conditional_bit_set = (self._classical_register >> conditional) & 1\n if not conditional_bit_set:\n return\n elif conditional is not None:\n mask = int(conditional.mask, 16)\n if mask > 0:\n value = self._classical_memory & mask\n while (mask & 0x1) == 0:\n mask >>= 1\n value >>= 1\n if value != int(conditional.val, 16):\n return\n\n if (name == 'u1') or (name == 'p'):\n self._sim.u(operation.qubits[0], 0, 0, float(operation.params[0]))\n elif name == 'u2':\n self._sim.u(\n operation.qubits[0],\n math.pi / 2,\n float(operation.params[0]),\n float(operation.params[1]),\n )\n elif (name == 'u3') or (name == 'u'):\n self._sim.u(\n operation.qubits[0],\n float(operation.params[0]),\n float(operation.params[1]),\n float(operation.params[2]),\n )\n elif (name == 'unitary') and (len(operation.qubits) == 1):\n self._sim.mtrx(operation.params[0].flatten(), operation.qubits[0])\n elif name == 'r':\n self._sim.u(\n operation.qubits[0],\n float(operation.params[0]),\n float(operation.params[1]) - math.pi / 2,\n (-1 * float(operation.params[1])) + math.pi / 2,\n )\n elif name == 'rx':\n self._sim.r(Pauli.PauliX, float(operation.params[0]), operation.qubits[0])\n elif name == 'ry':\n self._sim.r(Pauli.PauliY, float(operation.params[0]), operation.qubits[0])\n elif name == 'rz':\n self._sim.r(Pauli.PauliZ, float(operation.params[0]), operation.qubits[0])\n elif name == 'h':\n self._sim.h(operation.qubits[0])\n elif name == 'x':\n self._sim.x(operation.qubits[0])\n elif name == 'y':\n self._sim.y(operation.qubits[0])\n elif name == 'z':\n self._sim.z(operation.qubits[0])\n elif name == 's':\n self._sim.s(operation.qubits[0])\n elif name == 'sdg':\n self._sim.adjs(operation.qubits[0])\n elif name == 'sx':\n self._sim.mtrx(\n [(1 + 1j) / 2, (1 - 1j) / 2, (1 - 1j) / 2, (1 + 1j) / 2],\n operation.qubits[0],\n )\n elif name == 'sxdg':\n self._sim.mtrx(\n [(1 - 1j) / 2, (1 + 1j) / 2, (1 + 1j) / 2, (1 - 1j) / 2],\n operation.qubits[0],\n )\n elif name == 't':\n self._sim.t(operation.qubits[0])\n elif name == 'tdg':\n self._sim.adjt(operation.qubits[0])\n elif name == 'cu1':\n self._sim.mcu(\n operation.qubits[0:1], operation.qubits[1], 0, 0, float(operation.params[0])\n )\n elif name == 'cu2':\n self._sim.mcu(\n operation.qubits[0:1],\n operation.qubits[1],\n math.pi / 2,\n float(operation.params[0]),\n float(operation.params[1]),\n )\n elif (name == 'cu3') or (name == 'cu'):\n self._sim.mcu(\n operation.qubits[0:1],\n operation.qubits[1],\n float(operation.params[0]),\n float(operation.params[1]),\n float(operation.params[2]),\n )\n elif name == 'cx':\n self._sim.mcx(operation.qubits[0:1], operation.qubits[1])\n elif name == 'cy':\n self._sim.mcy(operation.qubits[0:1], operation.qubits[1])\n elif name == 'cz':\n self._sim.mcz(operation.qubits[0:1], operation.qubits[1])\n elif name == 'ch':\n self._sim.mch(operation.qubits[0:1], operation.qubits[1])\n elif name == 'cp':\n self._sim.mcmtrx(\n operation.qubits[0:1],\n [\n 1,\n 0,\n 0,\n math.cos(float(operation.params[0])) + 1j * math.sin(float(operation.params[0])),\n ],\n operation.qubits[1],\n )\n elif name == 'csx':\n self._sim.mcmtrx(\n operation.qubits[0:1],\n [(1 + 1j) / 2, (1 - 1j) / 2, (1 - 1j) / 2, (1 + 1j) / 2],\n operation.qubits[1],\n )\n elif name == 'csxdg':\n self._sim.mcmtrx(\n operation.qubits[0:1],\n [(1 - 1j) / 2, (1 + 1j) / 2, (1 + 1j) / 2, (1 - 1j) / 2],\n operation.qubits[1],\n )\n elif name == 'dcx':\n self._sim.mcx(operation.qubits[0:1], operation.qubits[1])\n self._sim.mcx(operation.qubits[1:2], operation.qubits[0])\n elif name == 'ccx':\n self._sim.mcx(operation.qubits[0:2], operation.qubits[2])\n elif name == 'ccy':\n self._sim.mcy(operation.qubits[0:2], operation.qubits[2])\n elif name == 'ccz':\n self._sim.mcz(operation.qubits[0:2], operation.qubits[2])\n elif name == 'mcx':\n self._sim.mcx(operation.qubits[0:-1], operation.qubits[-1])\n elif name == 'mcy':\n self._sim.mcy(operation.qubits[0:-1], operation.qubits[-1])\n elif name == 'mcz':\n self._sim.mcz(operation.qubits[0:-1], operation.qubits[-1])\n elif name == 'swap':\n self._sim.swap(operation.qubits[0], operation.qubits[1])\n elif name == 'iswap':\n self._sim.iswap(operation.qubits[0], operation.qubits[1])\n elif name == 'iswap_dg':\n self._sim.adjiswap(operation.qubits[0], operation.qubits[1])\n elif name == 'cswap':\n self._sim.cswap(\n operation.qubits[0:1], operation.qubits[1], operation.qubits[2]\n )\n elif name == 'mcswap':\n self._sim.cswap(\n operation.qubits[:-2], operation.qubits[-2], operation.qubits[-1]\n )\n elif name == 'reset':\n qubits = operation.qubits\n for qubit in qubits:\n if self._sim.m(qubit):\n self._sim.x(qubit)\n elif name == 'measure':\n qubits = operation.qubits\n clbits = operation.memory\n cregbits = (\n operation.register\n if hasattr(operation, 'register')\n else len(operation.qubits) * [-1]\n )\n\n self._sample_qubits += qubits\n self._sample_clbits += clbits\n self._sample_cregbits += cregbits\n\n if not self._sample_measure:\n for index in range(len(qubits)):\n qubit_outcome = self._sim.m(qubits[index])\n\n clbit = clbits[index]\n clmask = 1 << clbit\n self._classical_memory = (self._classical_memory & (~clmask)) | (\n qubit_outcome << clbit\n )\n\n cregbit = cregbits[index]\n if cregbit < 0:\n cregbit = clbit\n\n regbit = 1 << cregbit\n self._classical_register = (\n self._classical_register & (~regbit)\n ) | (qubit_outcome << cregbit)\n\n elif name == 'bfunc':\n mask = int(operation.mask, 16)\n relation = operation.relation\n val = int(operation.val, 16)\n\n cregbit = operation.register\n cmembit = operation.memory if hasattr(operation, 'memory') else None\n\n compared = (self._classical_register & mask) - val\n\n if relation == '==':\n outcome = compared == 0\n elif relation == '!=':\n outcome = compared != 0\n elif relation == '<':\n outcome = compared < 0\n elif relation == '<=':\n outcome = compared <= 0\n elif relation == '>':\n outcome = compared > 0\n elif relation == '>=':\n outcome = compared >= 0\n else:\n raise QrackError('Invalid boolean function relation.')\n\n # Store outcome in register and optionally memory slot\n regbit = 1 << cregbit\n self._classical_register = (self._classical_register & (~regbit)) | (\n int(outcome) << cregbit\n )\n if cmembit is not None:\n membit = 1 << cmembit\n self._classical_memory = (self._classical_memory & (~membit)) | (\n int(outcome) << cmembit\n )\n else:\n err_msg = 'QrackSimulator encountered unrecognized operation \"{0}\"'\n raise RuntimeError(err_msg.format(operation))\n\n def _add_sample_measure(self, sample_qubits, sample_clbits, num_samples):\n \"\"\"Generate data samples from current statevector.\n\n Taken almost straight from the terra source code.\n\n Args:\n measure_params (list): List of (qubit, clbit) values for\n measure instructions to sample.\n num_samples (int): The number of data samples to generate.\n\n Returns:\n list: A list of data values in hex format.\n \"\"\"\n # Get unique qubits that are actually measured\n measure_qubit = [qubit for qubit in sample_qubits]\n measure_clbit = [clbit for clbit in sample_clbits]\n\n # Sample and convert to bit-strings\n data = []\n if num_samples == 1:\n sample = self._sim.m_all()\n result = 0\n for index in range(len(measure_qubit)):\n qubit = measure_qubit[index]\n qubit_outcome = (sample >> qubit) & 1\n result |= qubit_outcome << index\n measure_results = [result]\n else:\n measure_results = self._sim.measure_shots(measure_qubit, num_samples)\n\n for sample in measure_results:\n for index in range(len(measure_qubit)):\n qubit_outcome = (sample >> index) & 1\n clbit = measure_clbit[index]\n clmask = 1 << clbit\n self._classical_memory = (self._classical_memory & (~clmask)) | (\n qubit_outcome << clbit\n )\n\n data.append(hex(int(bin(self._classical_memory)[2:], 2)))\n\n return data\n\n def run_qiskit_circuit(self, experiment, shots=1):\n if not _IS_QISKIT_AVAILABLE:\n raise RuntimeError(\n \"Before trying to run_qiskit_circuit() with QrackSimulator, you must install Qiskit!\"\n )\n\n if isinstance(experiment, QuantumCircuit):\n experiment = convert_qiskit_circuit_to_qasm_experiment(experiment)\n\n instructions = []\n if isinstance(experiment, QasmQobjExperiment):\n instructions = experiment.instructions\n else:\n raise RuntimeError('Unrecognized \"run_input\" argument specified for run().')\n\n self._shots = shots\n self._sample_qubits = []\n self._sample_clbits = []\n self._sample_cregbits = []\n self._sample_measure = True\n _data = []\n shotsPerLoop = self._shots\n shotLoopMax = 1\n\n is_initializing = True\n boundary_start = -1\n\n for opcount in range(len(instructions)):\n operation = instructions[opcount]\n\n if operation.name == 'id' or operation.name == 'barrier':\n continue\n\n if is_initializing and (\n (operation.name == 'measure') or (operation.name == 'reset')\n ):\n continue\n\n is_initializing = False\n\n if (operation.name == 'measure') or (operation.name == 'reset'):\n if boundary_start == -1:\n boundary_start = opcount\n\n if (boundary_start != -1) and (operation.name != 'measure'):\n shotsPerLoop = 1\n shotLoopMax = self._shots\n self._sample_measure = False\n break\n\n preamble_memory = 0\n preamble_register = 0\n preamble_sim = None\n\n if self._sample_measure or boundary_start <= 0:\n boundary_start = 0\n self._sample_measure = True\n shotsPerLoop = self._shots\n shotLoopMax = 1\n else:\n boundary_start -= 1\n if boundary_start > 0:\n self._sim = self\n self._classical_memory = 0\n self._classical_register = 0\n\n for operation in instructions[:boundary_start]:\n self._apply_op(operation)\n\n preamble_memory = self._classical_memory\n preamble_register = self._classical_register\n preamble_sim = self._sim\n\n for shot in range(shotLoopMax):\n if preamble_sim is None:\n self._sim = self\n self._classical_memory = 0\n self._classical_register = 0\n else:\n self._sim = QrackSimulator(cloneSid=preamble_sim.sid)\n self._classical_memory = preamble_memory\n self._classical_register = preamble_register\n\n for operation in instructions[boundary_start:]:\n self._apply_op(operation)\n\n if not self._sample_measure and (len(self._sample_qubits) > 0):\n _data += [hex(int(bin(self._classical_memory)[2:], 2))]\n self._sample_qubits = []\n self._sample_clbits = []\n self._sample_cregbits = []\n\n if self._sample_measure and (len(self._sample_qubits) > 0):\n _data = self._add_sample_measure(\n self._sample_qubits, self._sample_clbits, self._shots\n )\n\n del self._sim\n del self._shots\n del self._sample_qubits\n del self._sample_clbits\n del self._sample_cregbits\n del self._sample_measure\n\n return _data\n","repo_name":"unitaryfund/pyqrack","sub_path":"pyqrack/qrack_simulator.py","file_name":"qrack_simulator.py","file_ext":"py","file_size_in_byte":113189,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"51"} +{"seq_id":"32569509827","text":"from django import forms\nfrom .models import Mobile\nfrom django.core import validators\n\n\nmobiles_color = [\n ('white', 'سفید'),\n ('black', 'مشکی'),\n ('gold', 'طلایی'),\n ('silver', 'نقره ای'),\n ('blue', 'آبی'),\n]\n\nclass MobileForm(forms.Form):\n\n brand_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'نام برند را وارد کنید'}),\n label='نام برند')\n\n brand_nationality = forms.CharField(widget=forms.TextInput(attrs={'placeholder': ' ملیت برند را وارد کنید'}),\n label='ملیت برند')\n model = forms.CharField(widget=forms.TextInput(attrs={'placeholder': ' مدل گوشی را وارد کنید'}),\n label='مدل گوشی')\n price = forms.IntegerField(widget=forms.NumberInput(attrs={'placeholder': ' قیمت را وارد کنید','min':'0','oninput':'validity.valid||(value=\"-\");'}),\n label='قیمت')\n color = forms.ChoiceField(widget=forms.Select(attrs={'placeholder': ' رنگ را وارد کنید'}),\n label=' رنگ', choices=mobiles_color)\n resulation = forms.FloatField(widget=forms.NumberInput(attrs={'placeholder': 'سایز صفحه را وارد کنید','min':'0','oninput':'validity.valid||(value=\"-\");'}),\n label='سایز صفحه نمایش') \n is_available = forms.BooleanField(required=False, \n label='وضعیت موجودی')\n manufacturer = forms.CharField(widget=forms.TextInput(attrs={'placeholder': ' کشور سازنده وارد کنید'}),\n label='کشور سازنده') \n\n def clean_model(self):\n model = self.cleaned_data.get('model')\n is_exist_model = Mobile.objects.filter(model = model).exists()\n if is_exist_model:\n raise forms.ValidationError('مدل مورد نظر موجود می باشد.')\n return model\n\n","repo_name":"sepehrsh79/MobileStore","sub_path":"mobiles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"7001250822","text":"\"\"\"\nLeetCode : 1078. Occurrences After Bigram\nproblem : https://leetcode.com/problems/occurrences-after-bigram/\n\"\"\"\n\nclass Solution:\n def findOcurrences(self, text: str, first: str, second: str) -> List[str]:\n text_list = text.split()\n last_index = len(text_list) - 1\n result = []\n for idx, txt in enumerate(text_list):\n if txt == second:\n if 0 <= idx - 1 and idx + 1 <= last_index:\n if text_list[idx - 1] == first:\n result.append(text_list[idx + 1])\n return result\n","repo_name":"2kindsofcs/daily-algo-challenge","sub_path":"daim/190623_1078.py","file_name":"190623_1078.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21579399205","text":"import os\nimport sys\nsys.path.append('../../../')\n\nfrom dependencies import *\nfrom settings import *\nfrom reproducibility import *\nfrom models.TGS_salt.Unet34_scSE_hyper import Unet_scSE_hyper as Net\nimport pickle\nfrom tqdm import tqdm\n\ntrain_path = os.path.join('resnet34')\ntrain_file_list = glob.glob(os.path.join(train_path, '*.p'))\ntrain_file_list = [f.split('/')[-1].split('.')[0] for f in train_file_list]\n\ntrain_path1 = os.path.join('ocnet')\ntrain_file_list1 = glob.glob(os.path.join(train_path1, '*.p'))\ntrain_file_list1 = [f.split('/')[-1].split('.p')[0] for f in train_file_list1]\n\npreds=None\nfor file1 in tqdm(train_file_list):\n sigmoids=pickle.load(open('resnet34/'+file1+\".p\",\"rb\"))\n if preds is None: preds=sigmoids\n else: preds=preds+sigmoids\n\npreds=preds/(len(train_file_list))\nprint(preds.mean())\npickle.dump(preds,open(\"resnet34_all_sigmoids.p\",\"wb\"))\n\n\n\npreds2=None\n\npreds2=None\nfor file1 in tqdm(train_file_list1):\n sigmoids=pickle.load(open('ocnet/'+file1+\".p\",\"rb\"))\n if preds2 is None: preds2=sigmoids\n else: preds2=preds2+sigmoids\n#print(preds2.mean())\npreds2=preds2/(len(train_file_list1))\nprint(preds2.mean())\npickle.dump(preds2,open(\"ocnet_all_sigmoids.p\",\"wb\"))\n\n\npreds = (preds+preds2)/2\nprint(preds.mean())\npickle.dump(preds,open(\"all_sigmoids.p\",\"wb\"))\n","repo_name":"liaopeiyuan/kaggle-solutions","sub_path":"projects/TGS_salt/binary_classifier/combine_solution.py","file_name":"combine_solution.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":281,"dataset":"github-code","pt":"51"} +{"seq_id":"5787355960","text":"from math import sqrt\nfrom typing import Any\nfrom typing import Sequence\n\nfrom hw.alexander_sidorov.helpers import CITIES\n\n\ndef task_01_boundary(seq: Sequence) -> tuple:\n return seq[0], seq[-1]\n\n\ndef task_02_expand(seq: Sequence) -> Any:\n return seq[0] * seq[1:]\n\n\ndef task_03_hdist(seq1: Sequence, seq2: Sequence) -> int:\n dist = abs(len(seq2) - len(seq1))\n zipped = zip(seq1, seq2)\n for first_pair in zipped:\n if first_pair[0] != first_pair[1]:\n dist += 1\n return dist\n\n\ndef calc_dist(x_coord: float, y_coord: float, mul: int) -> float:\n return (x_coord - y_coord) * mul\n\n\ndef get_dist(city1: tuple, city2: tuple) -> float:\n dist_0 = calc_dist(city1[0], city2[0], 111)\n dist_1 = calc_dist(city1[1], city2[1], 65)\n return sqrt((dist_0**2) + (dist_1**2))\n\n\ndef task_04_cities(start_city: str) -> dict:\n res_dist = {}\n for dest_city in CITIES:\n dist = get_dist(CITIES[start_city], CITIES[dest_city])\n res_dist[dest_city] = dist\n return res_dist\n\n\ndef task_05_route(route: list | tuple) -> float:\n dist = 0.0\n real_cities = [city for city in route if city in CITIES]\n for count in range(len(real_cities) - 1):\n dist += get_dist(\n CITIES[real_cities[count]], CITIES[real_cities[count + 1]]\n )\n return dist\n","repo_name":"alexander-sidorov/m-pt1-58-22","sub_path":"hw/eugene_ladyko/lesson06/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"71930333599","text":"import torch\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom models import *\nfrom config import *\nfrom cka import cka\nfrom ptflops import get_model_complexity_info\nimport copy\nimport numpy as np\nimport pickle as pkl\nimport os\nimport argparse\nfrom adabelief_pytorch import AdaBelief\n\n######### Parser #########\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-m\", \"--model\", help=\"model architecture\", default='vanilla_cnn', choices=['vanilla_cnn', 'resnet-18'])\nparser.add_argument(\"--seed\", help=\"setup the random seed\", default='0')\nparser.add_argument(\"--use_pretrained\", help=\"use pretrained CIFAR-10 model\", default='True')\nparser.add_argument(\"--dataset\", help=\"dataset used\", default='cifar', choices=['cifar', 'flowers', 'cal256'])\nparser.add_argument(\"--split_pattern\", help=\"pattern to divide tasks with\", default='coarse_labels', choices=['sequential', 'random', 'coarse_labels'])\nparser.add_argument(\"--batch_size\", help=\"batch size for dataloaders\", default='128')\nparser.add_argument(\"--optimizer\", help=\"optimizer for training\", default='SGD', choices=['SGD', 'AdaBelief'])\nparser.add_argument(\"--use_AGC\", help=\"use AGC?\", default='False', choices=['True', 'False'])\nparser.add_argument(\"--train_type\", help=\"train nets with which method\", choices=['agem', 'ogd', 'er_reservoir'])\nparser.add_argument(\"--grid_search\", help=\"grid search for hyperparameters\", default='False', choices=['True', 'False'])\nparser.add_argument(\"--lr_config\", help=\"learning rate\", default='use_grid')\nparser.add_argument(\"--buffer_size\", help=\"buffer size\", default='500')\nparser.add_argument(\"--print_cka\", help=\"print task-wise cka similarities\", default='False', choices=['True', 'False'])\nparser.add_argument(\"--save_results\", help=\"save CL specific stats for later analysis\", default='False', choices=['True', 'False'])\nargs = parser.parse_args()\n\n######### Setup #########\ntorch.manual_seed(int(args.seed))\ncudnn.deterministic = True\ncudnn.benchmark = False\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nif(device == 'cuda'):\n\t\tprint(\"Backend:\", device)\nelse:\n\traise Exception(\"Please use a cuda-enabled GPU.\")\n\nif not os.path.isdir('cl_models'):\n\tos.mkdir('cl_models')\nif not os.path.isdir('results'):\n\tos.mkdir('results')\nif not os.path.isdir('grid_search'):\n\tos.mkdir('grid_search')\ncl_root = 'cl_models/'\n\n######### Setup the framework from argparser variables #########\nmodel_name = args.model # model architecture\nuse_pretrained = (args.use_pretrained=='True')\nif(model_name=='vanilla_cnn'):\n\tpretrained_path = 'pretrained/vanilla_cnn_temp_1.0_seed_0_plain.pth'\nelse:\n\tpretrained_path = 'pretrained/resnet-18_temp_1.0_seed_0.pth'\ndataset = args.dataset + '_splits'\nsplit_pattern = args.split_pattern\nnum_tasks = len(os.listdir('./../datasets/'+dataset+'/'+split_pattern))\ntotal_classes = 100 if (args.dataset == 'cifar') else 102 if (args.dataset == 'flowers') else 256\nnum_classes = total_classes // num_tasks\nbatch_size = int(args.batch_size)\nopt_type = args.optimizer\nuse_AGC = (args.use_AGC == 'True')\nif(split_pattern == 'coarse_labels'):\n\tnum_tasks, num_classes = 20, 5\ntrain_type = args.train_type\ngrid_search = (args.grid_search == 'True')\nwd = wd_cl # weight decay\nbuffer_size = int(args.buffer_size) # buffer size\nprint_cka = (args.print_cka == 'True')\nsave_results = (args.save_results == 'True')\n\n# Hyperparameters\ncl_epochs = cl_epochs_config\nwd = wd_cl\n\nif(grid_search):\n\tstart_task, end_task = 0, 3\n\tlr_options = [0.1, 0.03, 0.01, 0.003, 0.001]\n\treg_options = [0]\n\tprint_cka = False\n\tsave_results = True\nelif(args.lr_config=='use_grid'):\n\twith open(\"./hyperparams/\" + dataset + '/' + train_type + \"_None\" + \".params\", 'rb') as f:\n\t\tcoeffs = pkl.load(f)['Test']\n\tstart_task, end_task = 3, num_tasks\n\tlr_options = [coeffs['LR']]\n\treg_options = [0]\nelse:\n\tstart_task, end_task = 3, num_tasks\n\tlr_options = [cl_sched_config] if (args.lr_config=='use_config') else [float(args.lr_config)]\n\treg_options = [0]\nnum_tasks = end_task - start_task\n\nhyperparams = []\nfor l in lr_options:\n\tfor r in reg_options:\n\t\thyperparams.append((l, r))\n\nprint(\"\\n------------------ Setup For Training ------------------\\n\")\nprint(\"Model architecture:\", model_name)\nprint(\"Training type:\", train_type)\nprint(\"Buffer size:\", buffer_size)\nprint(\"Data split pattern:\", split_pattern)\nprint(\"Batch size:\", batch_size)\nprint(\"Optimizer:\", opt_type)\nprint(\"Weight decay for CL:\", wd)\nprint(\"Number of tasks:\", num_tasks)\nprint(\"Number of classes per task:\", num_classes)\nprint(\"Pretrained model:\", use_pretrained)\nprint(\"Save stats:\", save_results)\n\n######### Loss #########\ncriterion = nn.CrossEntropyLoss()\n\n######### Dataloaders #########\ndef get_dataloader(task_id, split_pattern):\n\ttransform_train = transforms.Compose(\n\t\t[transforms.Resize((32,32)),\n\t\t # transforms.RandomHorizontalFlip(),\n\t\t transforms.ToTensor(),\n\t\t transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n\t\t ])\n\ttransform_test = transforms.Compose(\n\t\t[transforms.Resize((32,32)),\n\t\t transforms.ToTensor(),\n\t\t transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n\t\t ])\n\n\ttrainset = datasets.ImageFolder(root='./../datasets/'+dataset+'/'+split_pattern+'/task_'+str(task_id)+'/train/', transform=transform_train)\n\ttrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)\n\ttestset = datasets.ImageFolder(root='./../datasets/'+dataset+'/'+split_pattern+'/task_'+str(task_id)+'/test/', transform=transform_test)\n\ttestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4)\n\treturn trainloader, testloader\n\n######### Basic functions #########\ndef clone_model_params(model):\n\tparams = []\n\tfor param in model.parameters():\n\t\tif not param.requires_grad:\n\t\t\tcontinue\n\t\tparams.append(param.detach().clone())\n\treturn params\n\ndef clone_model_grads(model):\n\tgrads = []\n\tfor param in model.parameters():\n\t\tif not param.requires_grad:\n\t\t\tcontinue\n\t\tgrads.append(0. if param.grad is None else param.grad.detach().clone() + 0.)\n\treturn grads\n\n######### Initialization / training functions #########\n# Create model\ndef create_model(name, num_classes=5, num_tasks=20, is_pretrained=False):\n\tif(name == 'vanilla_cnn' and is_pretrained):\n\t\tnet = torch.nn.DataParallel(Vanilla_cnn(num_classes=num_classes))\n\telif(name == 'vanilla_cnn'):\n\t\tnet = torch.nn.DataParallel(Vanilla_cnn_multiclassifier(num_classes=num_classes, num_tasks=num_tasks))\n\telif(name == 'resnet-18' and is_pretrained):\n\t\tnet = torch.nn.DataParallel(ResNet18(num_classes=num_classes))\n\telif(name == 'resnet-18'):\n\t\tnet = torch.nn.DataParallel(ResNet18_multiclassifier(num_classes=num_classes, num_tasks=num_tasks))\n\treturn net\n\n# Training\ndef buffer_update(mem_buffer, label_buffer, tid_buffer, dataloader, buffer_size, task_id):\n\tsamples_per_task = buffer_size // (task_id - start_task + 1)\n\tref_indices = torch.randperm(buffer_size)[0:samples_per_task]\n\tupdate_rounds = (samples_per_task // batch_size) + 1\n\n\tdataloader_iter = iter(dataloader)\n\tupdates_finished = 0\n\twhile (updates_finished < samples_per_task):\n\t\tX, Y = next(dataloader_iter)\n\t\tfor i in range(batch_size):\n\t\t\tupdates_finished += 1\n\t\t\tif(updates_finished == samples_per_task):\n\t\t\t\tbreak\n\t\t\tmem_buffer[ref_indices[updates_finished]] = X[i]\n\t\t\tlabel_buffer[ref_indices[updates_finished]] = Y[i]\n\t\t\ttid_buffer[ref_indices[updates_finished]] = task_id\n\treturn mem_buffer, label_buffer\n\n# AGC\ndef AGC(net, optimizer):\n\teta = optimizer.param_groups[0]['lr']\n\tlambd = optimizer.param_groups[0]['weight_decay']\n\tbeta = optimizer.param_groups[0]['momentum']\n\tthreshold = np.sqrt(2 * lambd / (eta * (1 + beta)))\n\n\tfor mod in net.modules():\n\t\tif(isinstance(mod, nn.Conv2d)):\n\t\t\tg_norms = torch.norm(mod.weight.grad.data.reshape(mod.weight.shape[0], -1), dim=1)\n\t\t\tp_norms = torch.norm(mod.weight.data.reshape(mod.weight.shape[0], -1), dim=1)\n\t\t\tratios = torch.div(g_norms, p_norms + 1e-8) \n\t\t\tmultiplier = (ratios < threshold) * 1\n\t\t\tmultiplier = multiplier + (1 - multiplier) * threshold * (torch.div(p_norms, g_norms + 1e-8))\n\t\t\tmod.weight.grad.data = mod.weight.grad.data * multiplier.view(-1,1,1,1)\t\t\t\t\t\t\t\t\n\treturn net\n\ndef agem_train(net_curr, mem_buffer, label_buffer, tid_buffer, dataloader, epoch, task_id=0, buffer_size=200, batch_size=10):\n\tnet_curr.train()\n\ttrain_loss = 0\n\tcorrect = 0\n\ttotal = 0\n\tfor batch_idx, (inputs, targets) in enumerate(dataloader):\n\t\tif(task_id > start_task):\n\t\t\toptimizer.zero_grad()\n\t\t\tref_indices = torch.randperm(buffer_size - batch_size)\n\t\t\tref_samples_X, ref_samples_Y, ref_samples_tid = (mem_buffer[ref_indices[0]: ref_indices[0]+batch_size]).to(device), (label_buffer[ref_indices[0]: ref_indices[0]+batch_size]).to(device), (tid_buffer[ref_indices[0]: ref_indices[0]+batch_size]).to(device)\n\t\t\toutputs = net_curr(ref_samples_X, ref_samples_tid, num_classes=num_classes)\n\t\t\tloss = criterion(outputs, ref_samples_Y)\n\t\t\tloss.backward()\n\t\t\tref_grads = clone_model_grads(net_curr)\n\n\t\toptimizer.zero_grad()\n\t\tinputs, targets = inputs.to(device), targets.to(device)\n\t\toutputs = net_curr(inputs, (task_id * torch.ones(inputs.shape[0], dtype=torch.long)).to(device), num_classes=num_classes)\n\t\tloss = criterion(outputs, targets)\n\t\tloss.backward()\n\t\tif(use_AGC):\n\t\t\tnet_curr = AGC(net=net_curr, optimizer=optimizer)\n\t\tstep_grads = clone_model_grads(net_curr)\n\n\t\tif(task_id > start_task):\n\t\t\tinner_prod = 0\n\t\t\tref_norm = 0\n\t\t\tfor g_step, g_ref in zip(step_grads, ref_grads):\n\t\t\t\tinner_prod += (g_step * g_ref).sum()\n\t\t\t\tref_norm += g_ref.norm().pow(2)\n\n\t\t\tif(inner_prod < 0):\n\t\t\t\tlind = 0\n\t\t\t\tfor mod in net_curr.modules():\n\t\t\t\t\tif isinstance(mod, nn.Conv2d):\n\t\t\t\t\t\tmod.weight.grad -= (inner_prod / ref_norm) * ref_grads[lind]\n\t\t\t\t\t\tlind += 1\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tmod.bias.grad -= (inner_prod / ref_norm) * ref_grads[lind]\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\telif isinstance(mod, nn.BatchNorm2d):\n\t\t\t\t\t\tmod.weight.grad -= (inner_prod / ref_norm) * ref_grads[lind]\n\t\t\t\t\t\tlind += 1\n\t\t\t\t\t\tmod.bias.grad -= (inner_prod / ref_norm) * ref_grads[lind]\n\t\t\t\t\t\tlind += 1\n\n\t\toptimizer.step()\n\n\t\ttrain_loss += loss.item()\n\t\t_, predicted = outputs.max(1)\n\t\ttotal += targets.size(0)\n\t\tcorrect += predicted.eq(targets).sum().item()\n\t\tprogress_bar(batch_idx, len(dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n\t\t\t% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\ndef er_reservoir_train(net_curr, mem_buffer, dataloader, epoch, task_id=0, buffer_size=200, batch_size=10):\n\tnet_curr.train()\n\ttrain_loss = 0\n\tcorrect = 0\n\ttotal = 0\n\tfor batch_idx, (inputs, targets) in enumerate(dataloader):\n\t\tif(task_id > start_task):\n\t\t\toptimizer.zero_grad()\n\t\t\tref_indices = torch.randperm(buffer_size - batch_size)\n\t\t\tref_samples_X, ref_samples_Y, ref_samples_tid = (mem_buffer[ref_indices[0]: ref_indices[0]+batch_size]).to(device), (label_buffer[ref_indices[0]: ref_indices[0]+batch_size]).to(device), (tid_buffer[ref_indices[0]: ref_indices[0]+batch_size]).to(device)\n\t\t\toutputs = net_curr(ref_samples_X, ref_samples_tid, num_classes=num_classes)\n\t\t\tloss = criterion(outputs, ref_samples_Y)\n\t\t\tloss.backward()\n\t\t\tif(use_AGC):\n\t\t\t\tnet_curr = AGC(net=net_curr, optimizer=optimizer)\n\t\t\toptimizer.step()\n\n\t\toptimizer.zero_grad()\n\t\tinputs, targets = inputs.to(device), targets.to(device)\n\t\toutputs = net_curr(inputs, (task_id * torch.ones(inputs.shape[0], dtype=torch.long)).to(device), num_classes=num_classes)\n\t\tloss = criterion(outputs, targets)\n\t\tloss.backward()\n\t\tif(use_AGC):\n\t\t\tnet_curr = AGC(net=net_curr, optimizer=optimizer)\n\t\toptimizer.step()\n\n\t\ttrain_loss += loss.item()\n\t\t_, predicted = outputs.max(1)\n\t\ttotal += targets.size(0)\n\t\tcorrect += predicted.eq(targets).sum().item()\n\t\tprogress_bar(batch_idx, len(dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n\t\t\t% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n# eval\ndef eval(net, testloader, task_id=0, T=1.0, save=False):\n\tnet.eval()\n\ttest_loss = 0\n\tcorrect = 0\n\ttotal = 0\n\twith torch.no_grad():\n\t\tfor batch_idx, (inputs, targets) in enumerate(testloader):\n\t\t\tinputs, targets = inputs.to(device), targets.to(device)\n\t\t\toutputs = net(inputs, (task_id * torch.ones(inputs.shape[0], dtype=torch.long)).to(device), num_classes=num_classes)\n\t\t\tloss = criterion(outputs, targets)\n\t\t\ttest_loss += loss.item()\n\t\t\t_, predicted = outputs.max(1)\n\t\t\ttotal += targets.size(0)\n\t\t\tcorrect += predicted.eq(targets).sum().item()\n\t\t\tif(not save):\n\t\t\t\tprogress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n\t\t\t\t% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n\t# Save best checkpoint\n\tif(save):\n\t\tacc = 100.*correct/total\n\t\tprint('\\nSaving...', end=\"\")\n\t\tstate = {'net': net.state_dict()}\n\t\ttorch.save(state, cl_root+'{mod_name}_{data_name}_train_{ttype}_lr_{lr}_tasks_{ntasks}_taskid_{tid}_seed_{sid}.pth'.format(mod_name=model_name, data_name=dataset, ttype=train_type, lr=lr_method, ntasks=num_tasks, tid=task_id, sid=int(args.seed)))\n\t\treturn acc\n\n# Calculate accuracy on a given dataloader\ndef cal_acc(net, use_loader, task_id=0):\n\tnet.eval()\n\ttest_loss = 0\n\tcorrect = 0\n\ttotal = 0\n\twith torch.no_grad():\n\t\tfor batch_idx, (inputs, targets) in enumerate(use_loader):\n\t\t\tinputs, targets = inputs.to(device), targets.to(device)\n\t\t\toutputs = net(inputs, (task_id * torch.ones(inputs.shape[0], dtype=torch.long)).to(device), num_classes=num_classes)\n\t\t\tloss = criterion(outputs, targets)\n\t\t\ttest_loss += loss.item()\n\t\t\t_, predicted = outputs.max(1)\n\t\t\ttotal += targets.size(0)\n\t\t\tcorrect += predicted.eq(targets).sum().item()\n\treturn 100.*(correct / total)\n\n######### Rewinding functions #########\ndef rewind_conv(net, net_base):\n\tfor mod, mod_base in zip(net.modules(), net_base.modules()):\n\t\tif(isinstance(mod, nn.Conv2d)):\n\t\t\tmod.weight.data = mod_base.weight.data.detach().clone()\n\t\t\ttry:\n\t\t\t\tmod.bias.data = mod_base.bias.data.detach().clone()\n\t\t\texcept:\n\t\t\t\tpass\n\t\telif(isinstance(mod, nn.BatchNorm2d)):\n\t\t\tmod.weight.data = mod_base.weight.data.detach().clone()\n\t\t\tmod.bias.data = mod_base.bias.data.detach().clone()\n\t\t\tmod.running_mean.data = mod_base.running_mean.data.clone()\n\t\t\tmod.running_var.data = mod_base.running_var.data.clone()\n\treturn net\n\ndef net_rewinding(net_features, net_classifier):\n\tnet_rewind = create_model(name=model_name, num_classes=num_classes, num_tasks=num_tasks+start_task)\n\tfor (mod_rewind, mod_features) in zip(net_rewind.modules(), net_features.modules()):\n\t\tif(isinstance(mod_rewind, nn.Conv2d)):\n\t\t\tmod_rewind.weight.data = mod_features.weight.data.clone()\n\t\t\ttry:\n\t\t\t\tmod.bias.data = mod_base.bias.data.detach().clone()\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\telif(isinstance(mod_rewind, nn.BatchNorm2d)):\n\t\t\tmod_rewind.weight.data = mod_features.weight.data.clone()\n\t\t\tmod_rewind.bias.data = mod_features.bias.data.clone()\n\t\t\tmod_rewind.running_mean.data = mod_features.running_mean.data.clone()\n\t\t\tmod_rewind.running_var.data = mod_features.running_var.data.clone()\n\n\tfor (mod_rewind, mod_classifier) in zip(net_rewind.modules(), net_classifier.modules()):\n\t\tif(isinstance(mod_rewind, nn.Linear)):\n\t\t\tmod_rewind.weight.data = mod_classifier.weight.data.clone()\n\t\t\tmod_rewind.bias.data = mod_classifier.bias.data.clone()\n\treturn net_rewind\n\ndef layerwise_cka(net, net_base, task_id=1):\n\tif(print_cka):\n\t\tprint(\"\\n------------------ CKA similarity between Task 0 and Task {:d} ------------------\".format(task_id))\n\torig_loader, _ = get_dataloader(0, split_pattern=split_pattern)\n\tfor n, (X1, _) in enumerate(orig_loader):\n\t\tif(n==0):\n\t\t\tX = X1.clone()\n\t\telse:\n\t\t\ttorch.cat((X, X1), dim=0)\n\t\tif(n * batch_size > 500):\n\t\t\tbreak\n\twith torch.no_grad():\n\t\tlind = 0\n\t\tfor mod, mod_base in zip(net.module.features, net_base.module.features):\n\t\t\tlind += 1\n\t\t\tif(isinstance(mod, nn.Conv2d)):\n\t\t\t\t# Current model's gram matrix w.r.t ReLU features\n\t\t\t\tf_curr = (net.module.features[0:lind+1](X.to(device))).reshape(batch_size, -1)\n\t\t\t\tgram_curr = torch.matmul(f_curr, f_curr.t()).cpu().numpy()\n\t\t\t\t# Original model's gram matrix w.r.t ReLU features\n\t\t\t\tf_orig = (net_base.module.features[0:lind+1](X.to(device))).reshape(batch_size, -1)\n\t\t\t\tgram_orig = torch.matmul(f_orig, f_orig.t()).cpu().numpy()\n\t\t\t\t# CKA\n\t\t\t\tcka_val = cka(gram_curr, gram_orig, debiased=True)\n\t\t\t\tstat[task_id]['cka'].append(cka_val)\n\t\t\t\tif(print_cka):\n\t\t\t\t\tprint(\"Layer {:d}: {:.3f}\".format(lind-1, cka_val))\n\ndef update_results(net_curr, upper_id, split_pattern, task_final=False, lr_method=0):\n\tav_tracc, av_teacc = 0, 0\n\tprint(\"\\n\")\n\tfor tid in range(start_task, upper_id+1):\n\t\t### net_classifier ###\n\t\tnet_classifier = create_model(name=model_name, num_classes=num_classes, num_tasks=num_tasks+start_task)\n\t\tnet_path = cl_root+'{mod_name}_{data_name}_train_{ttype}_lr_{lr}_tasks_{ntasks}_taskid_{tid}_seed_{sid}.pth'.format(mod_name=model_name, data_name=dataset, ttype=train_type, lr=lr_method, ntasks=num_tasks, tid=tid, sid=int(args.seed))\n\t\tnet_dict = torch.load(net_path)\n\t\tnet_classifier.load_state_dict(net_dict['net'])\n\t\n\t\t### Dataloaders ###\n\t\ttrloader, teloader = get_dataloader(tid, split_pattern=split_pattern)\n\n\t\t### rewind net ###\n\t\tnet_r = net_rewinding(net_features=net_curr, net_classifier=net_classifier)\n\n\t\ttracc = cal_acc(net_r.eval(), trloader, task_id=tid)\n\t\tteacc = cal_acc(net_r.eval(), teloader, task_id=tid)\n\n\t\tif(teacc > stat[tid]['max_acc']):\n\t\t\tstat[tid]['max_acc'] = teacc\n\t\tif(task_final):\n\t\t\tstat[tid]['final_acc'] = teacc\n\n\t\tav_tracc += tracc\n\t\tav_teacc += teacc\n\n\t\tprint(\"Task \"+str(tid)+\" results:\", end=\" \")\n\t\tprint(\"Train: {:.2f}\".format(tracc), end=\"; \")\n\t\tprint(\" Test: {:.2f}\".format(teacc))\t\t\n\t\t\n\tprint(\"\\nAverage Train Accuracy: {:.2f}\".format(av_tracc / (upper_id+1 - start_task)))\n\tprint(\"Average Test Accuracy: {:.2f}\".format(av_teacc / (upper_id+1 - start_task)))\n\n\treturn av_tracc / (upper_id+1 - start_task), av_teacc / (upper_id+1 - start_task)\n\nfor (lr_method, reg_constant) in hyperparams: \n\n\t### Initialize model ###\n\tnet_curr = create_model(name=model_name, num_classes=num_classes, num_tasks=num_tasks+start_task)\n\tstat = {task_id:{'orig_acc': 0, 'final_acc': 0, 'max_acc': 0, 'cka': []} for task_id in range(start_task, end_task)}\n\n\t### Use pretrained model ###\n\tif(use_pretrained):\n\t\tprint(\"\\n------------------ Loading pretrained model ------------------\\n\")\n\t\tnet_pretrained = create_model(name=model_name, num_classes=10, is_pretrained=True)\n\t\tnet_dict = torch.load(pretrained_path)\n\t\tnet_pretrained.load_state_dict(net_dict['net'])\n\t\tnet_curr = rewind_conv(net_curr, net_pretrained)\n\t\tdel net_dict, net_pretrained\n\n\t######### CL process begins here #########\n\tfor task_id in range(start_task, end_task):\n\t\tprint(\"\\n------------------ Task ID: {tid} ------------------\\n\".format(tid=task_id))\n\n\t\t### Dataloaders ###\n\t\tcurr_trainloader, curr_testloader = get_dataloader(task_id, split_pattern=split_pattern)\n\t\tif(task_id == start_task):\n\t\t\tdataloader_iter = iter(curr_trainloader)\n\t\t\tX, Y = next(dataloader_iter)\n\t\t\tmem_buffer, label_buffer, tid_buffer = torch.zeros(buffer_size, X.shape[1], X.shape[2], X.shape[3]), torch.zeros(buffer_size, dtype=torch.long), torch.zeros(buffer_size, dtype=torch.long)\n\t\t\tdel dataloader_iter\n\n\t\t### Optimizer ###\n\t\tif(task_id == start_task):\n\t\t\tif(opt_type=='SGD'):\n\t\t\t\toptimizer = optim.SGD(net_curr.parameters(), lr=0, momentum=0.9, weight_decay=1e-4)\n\t\t\telif(opt_type=='AdaBelief'):\n\t\t\t\toptimizer = AdaBelief(net_curr.parameters(), lr=1e-3, eps=1e-8, betas=(0.9,0.999), weight_decay=wd_cl, weight_decouple = True, rectify = False)\n\t\telse:\n\t\t\tif(opt_type=='SGD'):\n\t\t\t\toptimizer = optim.SGD(net_curr.parameters(), lr=0, momentum=0.9, weight_decay=wd_cl)\n\t\t\telif(opt_type=='AdaBelief'):\n\t\t\t\toptimizer = AdaBelief(net_curr.parameters(), lr=1e-3, eps=1e-8, betas=(0.9,0.999), weight_decay=wd_cl, weight_decouple = True, rectify = False)\n\n\t\t### Train ###\n\t\tepoch = 0\n\t\toptimizer.param_groups[0]['lr'] = lr_method\n\n\t\tprint(\"\\n--Training at {lr} learning rate for {n} epochs\".format(lr=lr_method, n=cl_epochs))\n\n\t\tfor n in range(cl_epochs):\n\t\t\tprint('\\nEpoch: {}'.format(epoch))\n\n\t\t\t# Train\n\t\t\tif(train_type == 'agem'):\n\t\t\t\tagem_train(net_curr=net_curr, mem_buffer=mem_buffer, label_buffer=label_buffer, tid_buffer=tid_buffer, dataloader=curr_trainloader, epoch=epoch, task_id=task_id, buffer_size=buffer_size, batch_size=batch_size)\n\n\t\t\telif(train_type == 'ogd'):\n\t\t\t\togd_train(net_curr=net_curr, mem_buffer=mem_buffer, dataloader=curr_trainloader, epoch=epoch, task_id=task_id, buffer_size=buffer_size, batch_size=batch_size)\n\n\t\t\telif(train_type == 'er_reservoir'):\n\t\t\t\ter_reservoir_train(net_curr=net_curr, mem_buffer=mem_buffer, dataloader=curr_trainloader, epoch=epoch, task_id=task_id, buffer_size=buffer_size, batch_size=batch_size)\n\n\t\t\t# # Eval\n\t\t\t# eval(net_curr, testloader=curr_testloader, task_id=task_id, save=False)\n\n\t\t\tepoch += 1\n\t\ttask_acc = eval(net_curr, testloader=curr_testloader, task_id=task_id, save=True) # save current task's model\n\n\t\t### Update buffer ###\n\t\tprint(\" Updating buffer... \", end=\"\")\n\t\tmem_buffer, label_buffer = buffer_update(mem_buffer=mem_buffer, label_buffer=label_buffer, tid_buffer=tid_buffer, dataloader=curr_trainloader, buffer_size=buffer_size, task_id=task_id)\n\t\tprint(\"Done.\")\n\n\t\t### Update accuracy numbers ###\n\t\tstat[task_id]['orig_acc'] = task_acc\n\t\tif(save_results and (not grid_search)):\n\t\t\tprint(\"\\n------------------ Progress Check ------------------\")\n\t\t\tav_train_acc, av_test_acc = update_results(net_curr=net_curr, upper_id=task_id, split_pattern=split_pattern, task_final=(task_id==end_task-1), lr_method=lr_method)\n\n\t\tif(task_id > start_task and (not grid_search)):\n\t\t\tpass #layerwise_cka(net_curr, net_orig, task_id=task_id)\n\t\telse:\n\t\t\tnet_orig = copy.deepcopy(net_curr)\n\n\t######### Print and save final stats #########\n\tprint(\"\\n------------------ Final Stats ------------------\")\n\tif(not save_results or grid_search):\n\t\tav_train_acc, av_test_acc = update_results(net_curr=net_curr, upper_id=task_id, split_pattern=split_pattern, task_final=(task_id==end_task-1), lr_method=lr_method)\n\n\tav_forgetting = 0\n\tfor task_id in range(start_task, end_task):\n\t\tprint(\"Task \"+str(task_id)+\":\")\n\t\tprint(\"\\tOriginal Accuracy: {:.2f}\".format(stat[task_id]['orig_acc']))\n\t\tprint(\"\\tMaximum Accuracy: {:.2f}\".format(stat[task_id]['max_acc']))\n\t\tprint(\"\\tFinal Accuracy: {:.2f}\".format(stat[task_id]['final_acc']))\n\t\tif(save_results):\n\t\t\tprint(\"\\tForgetting: {:.2f}\".format(stat[task_id]['max_acc'] - stat[task_id]['final_acc']))\n\t\t\tav_forgetting += stat[task_id]['max_acc'] - stat[task_id]['final_acc']\n\t\tprint(\"\\tCKA similarity:\")\n\t\tfor lind, sim in enumerate(stat[task_id]['cka']):\n\t\t\tprint(\"\\t\\tConv {:d}: {:.3f}\".format(lind, sim))\n\n\tprint(\"\\n------------------ Average Results ------------------\\n\")\n\tprint(\"Train Accuracy: {:.2f}\".format(av_train_acc))\n\tstat['Av_train'] = av_train_acc\n\tprint(\"Test Accuracy: {:.2f}\".format(av_test_acc))\n\tstat['Av_test'] = av_test_acc\n\tif(save_results):\n\t\tprint(\"Forgetting: {:.2f} \\n\".format(av_forgetting / num_tasks))\n\t\tstat['Av_forgetting'] = av_forgetting / num_tasks\n\tstat['lr'] = lr_method\n\tstat['reg'] = reg_constant\n\n\tif(save_results):\n\t\tresults_loc = './results/' + dataset + '/'\n\t\tresults_loc += (model_name + '_')\n\t\tresults_loc += train_type + '_None_'\n\t\tresults_loc += 'num_tasks_' + str(num_tasks) + '_'\n\t\tresults_loc += 'LR_' + str(lr_method) + '_'\n\t\tresults_loc += 'Reg_' + str(reg_constant) + '_'\n\t\tresults_loc += 'seed_' + args.seed\n\t\tresults_loc += '.pkl'\n\n\t\twith open(results_loc, 'wb') as f:\n\t\t\tpkl.dump(stat, f)\n\n\tif(grid_search):\n\t\tgrid_search_loc = './grid_search/' + dataset + '/'\n\t\tgrid_search_loc += (model_name + '_')\n\t\tgrid_search_loc += train_type + '_None_'\n\t\tgrid_search_loc += 'num_tasks_' + str(num_tasks) + '_'\n\t\tgrid_search_loc += 'LR_' + str(lr_method) + '_'\n\t\tgrid_search_loc += 'Reg_' + str(reg_constant) + '_'\n\t\tgrid_search_loc += 'seed_' + args.seed\n\t\tgrid_search_loc += '.pkl'\n\n\t\twith open(grid_search_loc, 'wb') as f:\n\t\t\tpkl.dump(stat, f)","repo_name":"EkdeepSLubana/QRforgetting","sub_path":"opt_based.py","file_name":"opt_based.py","file_ext":"py","file_size_in_byte":23882,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"51"} +{"seq_id":"37493885697","text":"import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow import keras\nimport numpy as np\nfrom PIL import Image\n\nprint(f\"np version{np.__version__}\")\nbatch_size = 128\n\n\n\ndef load_data():\n (x, y), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()\n x = x.astype(float).reshape(-1, 784) / 255.\n x_test = x_test.astype(float).reshape(-1, 784) / 255.\n\n db_train = tf.data.Dataset.from_tensor_slices((x, x)).shuffle(batch_size*5).batch(batch_size)\n db_test = tf.data.Dataset.from_tensor_slices((x_test, x_test)).batch(batch_size)\n\n return db_train, db_test\n\nclass AE(keras.Model):\n def __init__(self, input_dim, h_dim):\n super(AE, self).__init__()\n self.encoder = keras.Sequential([\n layers.Dense(256, 'relu'),\n layers.Dense(128, 'relu'),\n layers.Dense(h_dim)\n ])\n self.decoder = keras.Sequential([\n layers.Dense(128, 'relu'),\n layers.Dense(256, 'relu'),\n layers.Dense(input_dim, 'sigmoid')\n ])\n\n def call(self, inputs, training=None):\n x = self.encoder(inputs)\n x = self.decoder(x)\n return x\n\ndef save_images(r, g, name):\n # 创建280x280大小图片阵列\n new_im = Image.new('L', (280, 280)) \n index = 0\n for i in range(0, 140, 28): # 10 行图片阵列\n for j in range(0, 280, 28): # 10 列图片阵列\n im = r[index]\n im = Image.fromarray(im, mode='L') \n new_im.paste(im, (i, j)) # 写入对应位置\n index += 1 # 保存图片阵列\n index = 0\n for i in range(140, 280, 28): # 10 行图片阵列\n for j in range(0, 280, 28): # 10 列图片阵列\n im = g[index]\n im = Image.fromarray(im, mode='L') \n new_im.paste(im, (i, j)) # 写入对应位置\n index += 1 # 保存图片阵列\n new_im.save(name)\n\nif __name__ == \"__main__\":\n epoches = 20\n db_train, db_test = load_data()\n model = AE(784, 64)\n model.compile(optimizer=keras.optimizers.Adam(0.001),\n loss=keras.losses.BinaryCrossentropy(False),\n )\n model.fit(db_train, epochs=epoches, validation_data=db_test)\n\n x = next(iter(db_test))[0]\n # 重建图片,从测试集采样一批图片\n x_hat = model(x[:50]) # 打平并送入自编码器 \n \n # 恢复为 28x28,[b, 784] => [b, 28, 28]\n x = tf.cast(tf.reshape(x, (-1, 28, 28)), dtype=tf.float32)\n x_hat = tf.reshape(x_hat, (-1, 28, 28))\n # 输入的前50张+重建的前50张图片合并,[b, 28, 28] => [2b, 28, 28] \n #x_concat = tf.concat([x, x_hat], axis=0)\n x_concat = x_hat\n x_concat = x_concat.numpy() * 255. # 恢复为0~255范围\n x_concat = x_concat.astype(np.uint8) # 转换为整型\n\n x = x.numpy() * 255. # 恢复为0~255范围\n x = x.astype(np.uint8) # 转换为整型\n\n save_images(x, x_concat, '/Users/oswin/ae_images/epoch_%d.png' % epoches) # 保存图片","repo_name":"xdtcssdi/notes","sub_path":"tf练习/AE.py","file_name":"AE.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43223276395","text":"#!/usr/bin/env python3\n\n\"\"\" Merge featureCount outputs to count matrix. \"\"\"\n\nimport re\nimport sys\nimport logging\nimport argparse\nimport pandas as pd\nfrom typing import List\nfrom pathlib import Path\n\n\ndef main(infiles: List, samples: List) -> None:\n\n if len(samples) != len(infiles):\n # Raise warning if sample names are provided by not the correct number\n if samples:\n logging.error('Length of sample names must match length of infiles')\n # By default set sample name to filename (without path)\n samples = [str(Path(file)) for file in infiles]\n\n countMatrix = []\n for sample, file in zip(samples, infiles):\n countMatrix.append(pd.read_csv(\n file, names=['geneID', sample], header=0,\n index_col=0, usecols=[0,6], sep='\\t', comment='#'))\n\n pd.concat(countMatrix, axis=1).to_csv(sys.stdout)\n\n\ndef parse_arguments():\n \"\"\" Parse command line arguments. \"\"\"\n\n epilog='Stephen Richer, University of Bath, Bath, UK (sr467@bath.ac.uk)'\n parser = argparse.ArgumentParser(epilog=epilog, description=__doc__)\n parser.add_argument(\n 'infiles', nargs='+',\n help='featureCounts results to merge')\n parser.add_argument(\n '--samples', nargs='+', default=[],\n help='Sample names for counts matrix - must be one for each infile')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n sys.exit(main(**vars(args)))\n","repo_name":"StephenRicher/RNAFlow","sub_path":"workflow/scripts/mergeCounts.py","file_name":"mergeCounts.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"26557378662","text":"from sqlite3 import Error\n\nfrom db import database_connector\n\n\ndef create_table(conn, create_table_sql):\n \"\"\" create a table from the create_table_sql statement\n :param conn: Connection object\n :param create_table_sql: a CREATE TABLE statement\n :return:\n \"\"\"\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except Error as e:\n print(e)\n\n\ndef main():\n sql_create_pokemon_table = \"\"\" CREATE TABLE IF NOT EXISTS pokemon (\n id integer PRIMARY KEY,\n name text NOT NULL,\n type text NOT NULL,\n height integer NOT NULL,\n description text NOT NULL\n ); \"\"\"\n\n sql_create_sprites_table = \"\"\" CREATE TABLE IF NOT EXISTS pokemon_sprites (\n id integer PRIMARY KEY,\n pokemon_id integer NOT NULL,\n sprite blob NOT NULL,\n FOREIGN KEY (pokemon_id) REFERENCES pokemon (id)\n ); \"\"\"\n\n # create a database connection\n conn = database_connector.create_pokedb_connection()\n\n # create tables\n if conn is not None:\n # create pokemon table\n create_table(conn, sql_create_pokemon_table)\n\n # create sprites table\n create_table(conn, sql_create_sprites_table)\n else:\n print(\"Error! Cannot create the database connection.\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jacksterooney/PokemonGenerator","sub_path":"db/table_creator.py","file_name":"table_creator.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12344817383","text":"# Question 1\n#Using the given list, print out a filtered version of the list with only the numbers that are less than ten.\nalist = [1,11,14,5,8,9]\n\nalist = [1, 11, 14, 5, 8, 9]\nfiltered_list = [num for num in alist if num < 10]\nprint(filtered_list)\n\n# Question 2\n#Merge and sort the two lists below\n#Hint: You can use the .sort() method\nl_1 = [1,2,3,4,5,6]\nl_2 = [3,4,5,6,7,8,10]\n\nl_1 = [1, 2, 3, 4, 5, 6]\nl_2 = [3, 4, 5, 6, 7, 8, 10]\n\nmerged_list = l_1 + l_2\nmerged_list.sort()\n\nprint(merged_list)\n\n# Question 3\n# Square every number from 1 to 15\n\nsquared_numbers = [num**2 for num in range(1, 16)]\nprint(squared_numbers)\n\n# Question 4\n#Using List Comprehension and the given list, print out a filtered list with only the names that start with the letter 'a'. The names in the outputted list should be title cased and have no whitespace.\nnames_list = [' amy', 'Briant', 'Ryan ', ' Alex', 'steve', ' ']\n#expected output = ['Amy', 'Alex']\n\nnames_list = [' amy', 'Briant', 'Ryan ', ' Alex', 'steve', ' ']\n\nfiltered_list = [name.strip().title() for name in names_list if name.strip().lower().startswith('a')]\nprint(filtered_list)\n\n# Question 5\n# Print all Prime numbers from 1 to 100\n\ndef is_prime(num):\n if num < 2:\n return False\n for i in range(2, int(num ** 0.5) + 1):\n if num % i == 0:\n return False\n return True\n\nprime_numbers = [num for num in range(1, 101) if is_prime(num)]\nprint(prime_numbers)","repo_name":"derrmagiya/w2d2questions","sub_path":"Homework_9/W2D2.py","file_name":"W2D2.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"39560362572","text":"class Solution:\n def three_sum(self, nums):\n nums.sort()\n result = []\n\n for i in range(len(nums) - 2):\n if nums[i] > 0:\n break\n\n if i == 0 or nums[i - 1] != nums[i]:\n self.check_and_append(nums, i, result)\n\n def check_and_append(self, nums, pos, result):\n low = pos\n high = len(nums) - 1\n\n while low < high:\n sum = nums[pos] + nums[low] + nums[high]\n\n if sum == 0:\n result.append([nums[pos], nums[low], nums[hight]])\n low += 1\n high -= 1\n\n while low < high and nums[low] == nums[low - 1]:\n low += 1\n elif sum > 0:\n high -= 1\n else:\n low -= 1\n\n return result\n","repo_name":"skp96/DS-Algos","sub_path":"Leetcode/Array and Strings/three_sum.py","file_name":"three_sum.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21936187232","text":"import pygame,sys,numpy as np\npygame.init()\n\nboard_row = 3\nboard_col = 3\n\nscreen = pygame.display.set_mode((600,600))\npygame.display.set_caption(\"tictac_demo\")\nscreen.fill(\"black\")\n\ndef draw_lines():\n #vertical lines\n pygame.draw.line(screen,\"white\",(200,0),(200,600),15)\n pygame.draw.line(screen,\"white\",(400,0),(400,600),15)\n #horizontal lines\n pygame.draw.line(screen,\"white\",(0,200),(600,200),15)\n pygame.draw.line(screen,\"white\",(0,400),(600,400),15)\n\n#console board\nboard = np.zeros((board_row,board_col))\nprint(board,\"initial board\\n\")\n\ndef mark_sqr(row,col,player):\n board[row][col] = player\n\nmark_sqr(0,0,1)\nmark_sqr(1,1,1)\nmark_sqr(2,2,1)\nprint(board,\"new board\")\n\n\ndraw_lines()\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n pygame.display.update()\n\n\n\n\n\n '''\n SUMMARY OF THIS CODE:\n 1.Import numpy module.\n 2.Create console board.\n 3.Test a console board through a mark_sqr function.\n '''","repo_name":"smartmohanx/Tictactoe_game","sub_path":"tic_part2.py","file_name":"tic_part2.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17490026108","text":"import sublime\nimport sublime_plugin\nimport re\n\n\ndef read_next_line(view, point):\n if (point >= view.size()):\n return\n\n next_line = view.line(point)\n return view.substr(next_line)\n\n\ndef write(view, strng):\n view.run_command(\n 'insert_snippet', {\n 'contents': strng\n }\n )\n\n\ndef counter():\n count = 0\n while True:\n count += 1\n yield(count)\n\n\nclass PrefillParamsCommand(sublime_plugin.TextCommand):\n\n def run(self, edit):\n v = self.view\n settings = sublime.load_settings(\"jsdocs.sublime-settings\")\n point = v.sel()[0].end()\n indentSpaces = max(0, settings.get(\"indentation_spaces\", 1))\n alignTags = settings.get(\"align_tags\", True)\n extraTags = settings.get('extra_tags', [])\n prefix = \"\\n*\" + (\" \" * indentSpaces)\n tabIndex = counter()\n out = []\n\n # read the next line\n line = read_next_line(v, point + 1)\n\n # write the first linebreak and star. this sets the indentation for the following snippets\n write(v, \"\\n *\" + (\" \" * indentSpaces))\n\n # if there is a line following this\n if (line):\n # match against a javascript function declaration. TODO: extend for other languages\n res = re.search(\n # fnName = function, fnName : function\n '(?:(?P[a-zA-Z_$][a-zA-Z_$0-9]*)\\s*[:=]\\s*)?'\n + 'function'\n # function fnName\n + '(?:\\s+(?P[a-zA-Z_$][a-zA-Z_$0-9]*))?'\n # (arg1, arg2)\n + '\\s*\\((?P.*)\\)',\n line\n )\n if (res):\n # grab the name out of \"name1 = function name2(foo)\" preferring name1\n name = res.group('name1') or res.group('name2')\n args = res.group('args')\n\n out.append(\"${%d:%s description}\" % (tabIndex.next(), name))\n\n def replaceUserTabs(m):\n return \"%s%d%s\" % (m.group(1), tabIndex.next(), m.group(2))\n\n for index, extra in enumerate(extraTags):\n extraTags[index] = re.sub(\"(\\$\\{)\\d*(:[^}]+})\", replaceUserTabs, extra)\n out.extend(extraTags)\n\n # if there are arguments, add a @param for each\n if (args):\n # remove comments inside the argument list.\n args = re.sub(\"/\\*.*?\\*/\", '', args)\n for arg in re.split('\\s*,\\s*', args):\n out.append(\"@param {${%d:type}} %s ${%d:description}\" % (tabIndex.next(), arg, tabIndex.next()))\n\n # unless the function starts with 'set' or 'add', add a @return tag\n if not re.match('[$_]?(?:set|add)[A-Z_]', name):\n out.append(\"@return {${%d:type}}\" % (tabIndex.next()))\n\n if alignTags:\n maxWidth = 0\n regex = re.compile(\"(@\\S+)\")\n for line in out:\n res = regex.match(line)\n if res:\n maxWidth = max(maxWidth, res.end())\n\n for index, line in enumerate(out):\n res = regex.match(line)\n if res:\n out[index] = line[:res.end()] \\\n + (\" \" * (1 + maxWidth - res.end())) \\\n + line[res.end():].strip(' \\t')\n\n write(v, prefix.join(out) + \"\\n*/\")\n\n # if there was no line, or no match, then just close the comment and carry on\n if not line or not res:\n write(v, \"$0\\n*/\")\n","repo_name":"rfloriano/sublime-text-2","sub_path":"Packages/JSDocs/prefill_params.py","file_name":"prefill_params.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"70461268000","text":"from heartrate_monitor import HeartRateMonitor\r\nimport RPi.GPIO as GPIO\r\nimport importlib \r\nimport time\r\nimport sys\r\nimport argparse\r\n\r\n#these are calling relevant library to specific variables\r\n#GPIO NUM\r\nmakerobo_Buzz= 11\r\n\r\n#LOAD OTHER FUCTIONS\r\nHrcalc=importlib.import_module('hrcalc')\r\nMax30102=importlib.import_module('max30102')\r\n#INIT FUCTIONS\r\n\r\n# now initialize all libarary for starting up\r\n#set up fundamental variables to judge crtical situations\r\n\r\nparser = argparse.ArgumentParser(description=\"Read and print data from MAX30102\")\r\nparser.add_argument(\"-r\", \"--raw\", action=\"store_true\",\r\n help=\"print raw data instead of calculation result\")\r\nparser.add_argument(\"-t\", \"--time\", type=int, default=30,\r\n help=\"duration in seconds to read from sensor, default 30\")\r\nargs = parser.parse_args()\r\n\r\nprint('sensor starting...')\r\nhrm = HeartRateMonitor(print_raw=args.raw, print_result=(not args.raw))\r\nhrm.start_sensor()\r\ntry:\r\n time.sleep(args.time)\r\nexcept KeyboardInterrupt:\r\n print('keyboard interrupt detected, exiting...')\r\n\r\nhrm.stop_sensor()\r\nprint('sensor stoped!')\r\n\r\ndef makerobo_destroy():\r\n heartrate_monitor.stop_sensor()\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n makerobo_setup()\r\n makerobo_loop()\r\n except KeyboardInterrupt:\r\n makerobo_destory()\r\n","repo_name":"BillyHePro/life-monitor","sub_path":"wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34686972965","text":"\nimport time\n\nimport PySide6.QtCore\nimport numpy as np\nimport pandas as pd\nimport pyqtgraph as pg\nfrom PySide6.QtCore import Qt\nfrom PySide6.QtGui import QTextDocument\nfrom PySide6.QtWidgets import (\n QLabel,\n QComboBox,\n QCheckBox,\n QPushButton,\n QVBoxLayout,\n QHBoxLayout,\n QWidget,\n QLineEdit,\n QGridLayout,\n QButtonGroup,\n QRadioButton,\n QScrollArea,\n QPlainTextDocumentLayout,\n QTableWidget,\n QTableWidgetItem\n)\nfrom funcs import *\nfrom state_detection_widget import StateDetectionWidget\nfrom unknown_pleasures_widget import UnknownPleasuresWidget\n\n\n# Enables user to capture and compare the bandpower of two recording sessions\nclass ABBandpowerWidget(QWidget):\n\n def __init__(self, parent):\n\n # ** CLASS VARIABLE INITIALIZATION ** #\n\n super().__init__()\n self.parent = parent\n\n # Data gathering variables\n self.record_time = 0\n self.a_record_time = 0\n self.b_record_time = 0\n self.c_record_time = 0\n self.d_record_time = 0\n self.a_data = None\n self.b_data = None\n self.c_data = None\n self.d_data = None\n\n # Plotting info\n self.plot_state = [False, False, False, False] # a, b, c, d\n\n # Bandpower processing\n self.bp_processing = parent.bp_processing\n\n # Processed data variables\n self.a_processed_bp = None\n self.b_processed_bp = None\n self.c_processed_bp = None\n self.d_processed_bp = None\n self.a_frequencies = [0, 0]\n self.b_frequencies = [0, 0]\n self.c_frequencies = [0, 0]\n self.d_frequencies = [0, 0]\n\n self.a_standard_band_values = [0, 0, 0, 0, 0]\n self.b_standard_band_values = [0, 0, 0, 0, 0]\n self.c_standard_band_values = [0, 0, 0, 0, 0]\n self.d_standard_band_values = [0, 0, 0, 0, 0]\n\n # Timers\n self.a_timer = PySide6.QtCore.QTimer()\n self.a_timer.setSingleShot(True)\n self.a_timer.timeout.connect(self.record_a)\n self.b_timer = PySide6.QtCore.QTimer()\n self.b_timer.setSingleShot(True)\n self.b_timer.timeout.connect(self.record_b)\n self.c_timer = PySide6.QtCore.QTimer()\n self.c_timer.setSingleShot(True)\n self.c_timer.timeout.connect(self.record_c)\n self.d_timer = PySide6.QtCore.QTimer()\n self.d_timer.setSingleShot(True)\n self.d_timer.timeout.connect(self.record_d)\n\n # ** WIDGET CONSTRUCTION ** #\n\n # ############# #\n # Input Section #\n # ############# #\n\n # Record time #\n\n record_time_label = make_label(\"Set Record Time (s): \")\n\n record_time_input = QLineEdit()\n record_time_input.setPlaceholderText(\"0\")\n record_time_input.setMaxLength(2)\n record_time_input.textChanged.connect(self.set_record_time)\n record_time_input.setStyleSheet(\"\"\"background-color: #fff; color: #000;font: 15px; min-width: 50px;\n margin-bottom: 0px; max-width: 50px; padding: 5px;\"\"\")\n\n record_time_l = QHBoxLayout()\n record_time_l.addWidget(record_time_label)\n record_time_l.addWidget(record_time_input)\n\n # State A #\n\n a_record_time_label = make_label(\"State A Record Time (s): \")\n\n a_record_time_input = QLineEdit()\n a_record_time_input.setPlaceholderText(\"0\")\n a_record_time_input.setMaxLength(2)\n a_record_time_input.textChanged.connect(self.set_a_record_time)\n a_record_time_input.setStyleSheet(\"\"\"background-color: #fff; color: #000;font: 15px; min-width: 50px;\n margin-bottom: 0px; max-width: 50px; padding: 5px;\"\"\")\n\n self.a_record_button = make_button(\"Record State A\")\n self.a_record_button.pressed.connect(self.pressed_record_a)\n # self.a_record_button.released.connect(self.record_a)\n\n state_a = QHBoxLayout()\n # state_a.addWidget(a_record_time_label)\n # state_a.addWidget(a_record_time_input)\n state_a.addWidget(self.a_record_button)\n\n # State B #\n\n b_record_time_label = make_label(\"State B Record Time (s): \")\n\n b_record_time_input = QLineEdit()\n b_record_time_input.setPlaceholderText(\"0\")\n b_record_time_input.setMaxLength(2)\n b_record_time_input.textChanged.connect(self.set_b_record_time)\n b_record_time_input.setStyleSheet(\"\"\"background-color: #fff; color: #000;font: 15px; min-width: 50px;\n margin-bottom: 0px; max-width: 50px; padding: 5px;\"\"\")\n\n self.b_record_button = make_button(\"Record State B\")\n self.b_record_button.pressed.connect(self.pressed_record_b)\n # self.b_record_button.released.connect(self.record_b)\n\n state_b = QHBoxLayout()\n # state_b.addWidget(b_record_time_label)\n # state_b.addWidget(b_record_time_input)\n state_b.addWidget(self.b_record_button)\n\n # State C #\n\n c_record_time_label = make_label(\"State C Record Time (s): \")\n\n c_record_time_input = QLineEdit()\n c_record_time_input.setPlaceholderText(\"0\")\n c_record_time_input.setMaxLength(2)\n c_record_time_input.textChanged.connect(self.set_c_record_time)\n c_record_time_input.setStyleSheet(\"\"\"background-color: #fff; color: #000;font: 15px; min-width: 50px;\n margin-bottom: 0px; max-width: 50px; padding: 5px;\"\"\")\n\n self.c_record_button = make_button(\"Record State C\")\n self.c_record_button.pressed.connect(self.pressed_record_c)\n # self.c_record_button.released.connect(self.record_c)\n\n state_c = QHBoxLayout()\n # state_c.addWidget(c_record_time_label)\n # state_c.addWidget(c_record_time_input)\n state_c.addWidget(self.c_record_button)\n\n # State D #\n\n d_record_time_label = make_label(\"State D Record Time (s): \")\n\n d_record_time_input = QLineEdit()\n d_record_time_input.setPlaceholderText(\"0\")\n d_record_time_input.setMaxLength(2)\n d_record_time_input.textChanged.connect(self.set_d_record_time)\n d_record_time_input.setStyleSheet(\"\"\"background-color: #fff; color: #000;font: 15px; min-width: 50px;\n margin-bottom: 0px; max-width: 50px; padding: 5px;\"\"\")\n\n self.d_record_button = make_button(\"Record State D\")\n self.d_record_button.pressed.connect(self.pressed_record_d)\n # self.d_record_button.released.connect(self.record_d)\n\n state_d = QHBoxLayout()\n # state_d.addWidget(d_record_time_label)\n # state_d.addWidget(d_record_time_input)\n state_d.addWidget(self.d_record_button)\n\n # Clear states button #\n self.clear_states_button = make_button(\"Clear States\")\n self.clear_states_button.pressed.connect(self.clear_states_button_pressed)\n self.clear_states_button.released.connect(self.clear_states_button_released)\n\n # Bandpower Selection #\n\n bp_label = make_label(\"Bandpower processing method: \")\n\n bp_method_list = QComboBox()\n bp_method_list.addItems(self.bp_processing.bp_methods)\n bp_method_list.setStyleSheet(\"\"\"background-color: gray; color: #fff; min-width: 50px; max-width: 150px;\"\"\")\n bp_method_list.setCurrentIndex(0)\n bp_method_list.currentIndexChanged.connect(self.set_bp_method)\n\n bandpower_selection = QHBoxLayout()\n bandpower_selection.addWidget(bp_label)\n bandpower_selection.addWidget(bp_method_list)\n\n # Relative Plot Selection #\n\n relative_plot_checkbox = QCheckBox(\"Relative plot\")\n relative_plot_checkbox.setStyleSheet(\"\"\"color: #fff; font-size: 15px;\"\"\")\n relative_plot_checkbox.stateChanged.connect(self.set_relative_plot)\n\n # 60 Hz Filter Selection #\n\n sixty_hz_filter_checkbox = QCheckBox(\"Filter 60 Hz\")\n sixty_hz_filter_checkbox.setStyleSheet(\"\"\"color: #fff; font-size: 15px;\"\"\")\n sixty_hz_filter_checkbox.stateChanged.connect(self.set_sixty_hz_filter)\n\n # 0-5 Hz Filter Selection #\n\n zero_to_five_hz_filter_checkbox = QCheckBox(\"Filter 0-5 Hz\")\n zero_to_five_hz_filter_checkbox.setStyleSheet(\"\"\"color: #fff; font-size: 15px;\"\"\")\n zero_to_five_hz_filter_checkbox.stateChanged.connect(self.set_zero_to_five_hz_filter)\n\n # Frequency Range Selection #\n\n # Build all frequencies option\n all_frequencies_button = QRadioButton(\"Include all frequencies\")\n all_frequencies_button.setStyleSheet(\"\"\"color: #fff; font-size: 15px;\"\"\")\n all_frequencies_button.setChecked(True)\n\n # Build custom frequencies option\n custom_frequencies_button = QRadioButton(\"Custom frequency range\")\n custom_frequencies_button.setStyleSheet(\"\"\"color: #fff; font-size: 15px;\"\"\")\n\n custom_frequencies_low = QLineEdit()\n custom_frequencies_low.setPlaceholderText(\"0\")\n custom_frequencies_low.setMaxLength(3)\n custom_frequencies_low.textChanged.connect(self.set_low_frequency)\n custom_frequencies_low.setStyleSheet(\"\"\"background-color: #fff; color: #000;font: 15px; min-width: 50px;\n margin-bottom: 0px; max-width: 50px; padding: 5px;\"\"\")\n custom_frequencies_high = QLineEdit()\n custom_frequencies_high.setPlaceholderText(\"50\")\n custom_frequencies_high.setMaxLength(3)\n custom_frequencies_high.textChanged.connect(self.set_high_frequency)\n custom_frequencies_high.setStyleSheet(\"\"\"background-color: #fff; color: #000;font: 15px; min-width: 50px;\n margin-bottom: 0px; max-width: 50px; padding: 5px;\"\"\")\n\n custom_frequencies_option = QHBoxLayout()\n custom_frequencies_option.addWidget(custom_frequencies_button)\n custom_frequencies_option.addWidget(custom_frequencies_low)\n custom_frequencies_option.addWidget(custom_frequencies_high)\n\n self.frequency_selection = QButtonGroup()\n self.frequency_selection.addButton(all_frequencies_button)\n self.frequency_selection.addButton(custom_frequencies_button)\n self.frequency_selection.setId(all_frequencies_button, 1)\n self.frequency_selection.setId(custom_frequencies_button, 2)\n self.frequency_selection.idPressed.connect(self.toggle_custom_frequency)\n\n custom_frequencies_label = make_label(\"Set custom frequency range\")\n\n # Build frequency range selection\n frequency_range_selection = QVBoxLayout()\n frequency_range_selection.addWidget(all_frequencies_button)\n frequency_range_selection.addLayout(custom_frequencies_option)\n\n # Included Electrode Selection #\n\n # Build all electrodes option\n all_electrodes_button = QRadioButton(\"Include all electrodes\")\n all_electrodes_button.setStyleSheet(\"\"\"color: #fff; font-size: 15px;\"\"\")\n all_electrodes_button.setChecked(True)\n\n # Build custom electrodes option\n custom_electrodes_button = QRadioButton(\"Include custom electrodes\")\n custom_electrodes_button.setStyleSheet(\"\"\"color: #fff; font-size: 15px;\"\"\")\n\n electrode_1_button = QCheckBox(\"1\")\n electrode_2_button = QCheckBox(\"2\")\n electrode_3_button = QCheckBox(\"3\")\n electrode_4_button = QCheckBox(\"4\")\n electrode_5_button = QCheckBox(\"5\")\n electrode_6_button = QCheckBox(\"6\")\n electrode_7_button = QCheckBox(\"7\")\n electrode_8_button = QCheckBox(\"8\")\n\n self.selected_electrodes_group = QButtonGroup()\n self.selected_electrodes_group.addButton(electrode_1_button)\n self.selected_electrodes_group.addButton(electrode_2_button)\n self.selected_electrodes_group.addButton(electrode_3_button)\n self.selected_electrodes_group.addButton(electrode_4_button)\n self.selected_electrodes_group.addButton(electrode_5_button)\n self.selected_electrodes_group.addButton(electrode_6_button)\n self.selected_electrodes_group.addButton(electrode_7_button)\n self.selected_electrodes_group.addButton(electrode_8_button)\n self.selected_electrodes_group.setId(electrode_1_button, 1)\n self.selected_electrodes_group.setId(electrode_2_button, 2)\n self.selected_electrodes_group.setId(electrode_3_button, 3)\n self.selected_electrodes_group.setId(electrode_4_button, 4)\n self.selected_electrodes_group.setId(electrode_5_button, 5)\n self.selected_electrodes_group.setId(electrode_6_button, 6)\n self.selected_electrodes_group.setId(electrode_7_button, 7)\n self.selected_electrodes_group.setId(electrode_8_button, 8)\n self.selected_electrodes_group.idPressed.connect(self.select_custom_electrode)\n self.selected_electrodes_group.setExclusive(False)\n\n electrode_1_button.setStyleSheet(\"\"\"color: #fff;font-size: 15px;\"\"\")\n electrode_2_button.setStyleSheet(\"\"\"color: #fff;font-size: 15px;\"\"\")\n electrode_3_button.setStyleSheet(\"\"\"color: #fff;font-size: 15px;\"\"\")\n electrode_4_button.setStyleSheet(\"\"\"color: #fff;font-size: 15px;\"\"\")\n electrode_5_button.setStyleSheet(\"\"\"color: #fff;font-size: 15px;\"\"\")\n electrode_6_button.setStyleSheet(\"\"\"color: #fff;font-size: 15px;\"\"\")\n electrode_7_button.setStyleSheet(\"\"\"color: #fff;font-size: 15px;\"\"\")\n electrode_8_button.setStyleSheet(\"\"\"color: #fff;font-size: 15px;\"\"\")\n\n custom_electrode_selection = QHBoxLayout()\n custom_electrode_selection.addWidget(electrode_1_button)\n custom_electrode_selection.addWidget(electrode_2_button)\n custom_electrode_selection.addWidget(electrode_3_button)\n custom_electrode_selection.addWidget(electrode_4_button)\n custom_electrode_selection.addWidget(electrode_5_button)\n custom_electrode_selection.addWidget(electrode_6_button)\n custom_electrode_selection.addWidget(electrode_7_button)\n custom_electrode_selection.addWidget(electrode_8_button)\n\n custom_electrodes_option = QVBoxLayout()\n custom_electrodes_option.addWidget(custom_electrodes_button)\n custom_electrodes_option.addLayout(custom_electrode_selection)\n\n # Build electrode selection\n\n self.all_electrodes_toggle = QButtonGroup()\n self.all_electrodes_toggle.addButton(all_electrodes_button)\n self.all_electrodes_toggle.addButton(custom_electrodes_button)\n self.all_electrodes_toggle.setId(all_electrodes_button, 1)\n self.all_electrodes_toggle.setId(custom_electrodes_button, 2)\n self.all_electrodes_toggle.idPressed.connect(self.toggle_all_electrodes)\n\n electrode_selection = QVBoxLayout()\n electrode_selection.addWidget(all_electrodes_button)\n electrode_selection.addLayout(custom_electrodes_option)\n\n # Input Section Layout #\n\n # Titles\n data_recording_title = make_label(\"Record/Load Data\")\n data_processing_title = make_label(\"Data Processing Options\")\n\n states = QVBoxLayout()\n states.addLayout(record_time_l)\n states.addLayout(state_a)\n states.addLayout(state_b)\n states.addLayout(state_c)\n states.addLayout(state_d)\n states.addWidget(self.clear_states_button)\n states_frame = QFrame()\n states_frame.setLayout(states)\n states_scrollable = QScrollArea()\n states_scrollable.setWidget(states_frame)\n states_scrollable_layout = QHBoxLayout()\n states_scrollable_layout.addWidget(states_scrollable)\n states_box = QFrame()\n states_box.setLayout(states_scrollable_layout)\n states_box.setStyleSheet(\"\"\"\n background-color: #8d949c;\n border-radius: 10px;\n border-color: pink;\n color: black;\n font: 14px;\n height: 1em;\n max-width: 30em;\n \"\"\")\n\n processing_options = QVBoxLayout()\n processing_options.addLayout(bandpower_selection)\n processing_options.addWidget(relative_plot_checkbox)\n processing_options.addWidget(sixty_hz_filter_checkbox)\n processing_options.addWidget(zero_to_five_hz_filter_checkbox)\n processing_options.addLayout(frequency_range_selection)\n processing_options.addLayout(electrode_selection)\n\n processing_box = QFrame()\n processing_box.setLayout(processing_options)\n processing_box.setStyleSheet(\"\"\"\n background-color: #8d949c;\n border-radius: 10px;\n border-color: pink;\n color: black;\n font: 14px;\n height: 1em;\n max-width: 30em;\n \"\"\")\n\n input_layout = QVBoxLayout()\n input_layout.addWidget(data_recording_title)\n input_layout.addWidget(states_box)\n input_layout.addWidget(data_processing_title)\n input_layout.addWidget(processing_box)\n\n # ############## #\n # Output Section #\n # ############## #\n\n # State A/B Graph #\n\n self.graph = Graph(self, self.parent)\n\n # Rescale graph button #\n\n rescale_graph_button = make_button(\"Rescale Graph\")\n rescale_graph_button.pressed.connect(self.rescale_graph)\n\n self.graph_w = QVBoxLayout()\n self.graph_w.addWidget(self.graph)\n self.graph_w.addWidget(rescale_graph_button)\n\n # State A Information #\n\n self.a_title_w = make_label(\"State A\")\n # self.a_title_w.setStyleSheet(\"\"\"color: #fff;font-size: 15px;\"\"\")\n\n self.a_frequency_spacing_w = make_label(\"Frequency Bin Spacing (Hz): N/A\")\n self.a_delta_w = make_label(\"Delta Relative Power: N/A\")\n self.a_theta_w = make_label(\"Theta Relative Power: N/A\")\n self.a_alpha_w = make_label(\"Alpha Relative Power: N/A\")\n self.a_beta_w = make_label(\"Beta Relative Power: N/A\")\n self.a_gamma_w = make_label(\"Gamma Relative Power: N/A\")\n\n a_output_information = QVBoxLayout()\n a_output_information.addWidget(self.a_title_w)\n a_output_information.addWidget(self.a_frequency_spacing_w)\n a_output_information.addWidget(self.a_delta_w)\n a_output_information.addWidget(self.a_theta_w)\n a_output_information.addWidget(self.a_alpha_w)\n a_output_information.addWidget(self.a_beta_w)\n a_output_information.addWidget(self.a_gamma_w)\n\n # State B Information #\n\n self.b_title_w = make_label(\"State B\")\n\n self.b_frequency_spacing_w = make_label(\"Frequency Bin Spacing (Hz): N/A\")\n\n self.b_delta_w = make_label(\"Delta Relative Power: N/A\")\n self.b_theta_w = make_label(\"Theta Relative Power: N/A\")\n self.b_alpha_w = make_label(\"Alpha Relative Power: N/A\")\n self.b_beta_w = make_label(\"Beta Relative Power: N/A\")\n self.b_gamma_w = make_label(\"Gamma Relative Power: N/A\")\n\n b_output_information = QVBoxLayout()\n b_output_information.addWidget(self.b_title_w)\n b_output_information.addWidget(self.b_frequency_spacing_w)\n b_output_information.addWidget(self.b_delta_w)\n b_output_information.addWidget(self.b_theta_w)\n b_output_information.addWidget(self.b_alpha_w)\n b_output_information.addWidget(self.b_beta_w)\n b_output_information.addWidget(self.b_gamma_w)\n\n # Output Section Layout #\n\n # Bandpower Graph\n\n self.state_graph_frame = QFrame()\n self.state_graph_frame.setLayout(self.graph_w)\n\n # Live Bandpowers\n\n self.unknown_pleasures = UnknownPleasuresWidget(self.parent)\n self.state_detection = StateDetectionWidget(self.parent)\n self.bandpower_stats = make_label(\"Bandpower stats\")\n self.bandpower_stats = QTableWidget(4, 5)\n self.bandpower_stats.setStyleSheet(\"\"\"\n background-color: #103650;\n border-width: 5px;\n border-color: black;\n color: #e3ebec;\n font: 14px;\n \"\"\")\n delta_header = QTableWidgetItem()\n delta_header.setData(Qt.DisplayRole, \"Delta (0-4)\")\n self.bandpower_stats.setHorizontalHeaderItem(0, delta_header)\n theta_header = QTableWidgetItem()\n theta_header.setData(Qt.DisplayRole, \"Theta (4-7)\")\n self.bandpower_stats.setHorizontalHeaderItem(1, theta_header)\n alpha_header = QTableWidgetItem()\n alpha_header.setData(Qt.DisplayRole, \"Alpha (7-12)\")\n self.bandpower_stats.setHorizontalHeaderItem(2, alpha_header)\n beta_header = QTableWidgetItem()\n beta_header.setData(Qt.DisplayRole, \"Beta (12-30)\")\n self.bandpower_stats.setHorizontalHeaderItem(3, beta_header)\n gamma_header = QTableWidgetItem()\n gamma_header.setData(Qt.DisplayRole, \"Gamma (30-50)\")\n self.bandpower_stats.setHorizontalHeaderItem(4, gamma_header)\n a_header = QTableWidgetItem()\n a_header.setData(Qt.DisplayRole, \"State A\")\n self.bandpower_stats.setVerticalHeaderItem(0, a_header)\n b_header = QTableWidgetItem()\n b_header.setData(Qt.DisplayRole, \"State B\")\n self.bandpower_stats.setVerticalHeaderItem(1, b_header)\n c_header = QTableWidgetItem()\n c_header.setData(Qt.DisplayRole, \"State C\")\n self.bandpower_stats.setVerticalHeaderItem(2, c_header)\n d_header = QTableWidgetItem()\n d_header.setData(Qt.DisplayRole, \"State D\")\n self.bandpower_stats.setVerticalHeaderItem(3, d_header)\n\n for i in range(4):\n for j in range(5):\n table_item = QTableWidgetItem()\n table_item.setData(Qt.DisplayRole, \"0\")\n self.bandpower_stats.setItem(i, j, table_item)\n\n self.output_bins_button = make_button(\"Output frequency bins\")\n self.output_bins_button.pressed.connect(self.output_bins_button_pressed)\n self.output_bins_button.released.connect(self.output_bins_button_released)\n\n self.stats_layout = QVBoxLayout()\n self.stats_layout.addWidget(self.bandpower_stats)\n self.stats_layout.addWidget(self.output_bins_button)\n self.stats_frame = QFrame()\n self.stats_frame.setLayout(self.stats_layout)\n\n self.live_timeseries = make_label(\"Live timeseries\")\n\n\n self.output_stack = QStackedLayout()\n self.output_stack.addWidget(self.state_graph_frame)\n self.output_stack.addWidget(self.stats_frame)\n self.output_stack.addWidget(self.state_detection)\n self.output_stack.addWidget(self.unknown_pleasures)\n\n # ############ #\n # Macro Layout #\n # ############ #\n\n self.output_options = ['Bandpower Graph', 'Bandpower Statistics', 'Live State Detection', 'Unknown Pleasures']\n self.selected_output_option = self.output_options[0]\n\n output_options_list = QComboBox()\n output_options_list.addItems(self.output_options)\n output_options_list.setStyleSheet(\"\"\"background-color: gray; color: #fff; font: 20px;\n height: 1em;\"\"\")\n output_options_list.setCurrentIndex(0)\n output_options_list.currentIndexChanged.connect(self.set_output_option)\n\n title = QLabel(\"Record and Compare Bandpowers\")\n title.setStyleSheet(\"\"\"\n background-color: light gray;\n border-width: 5px;\n border-color: black;\n color: #e3ebec;\n font: 30px;\n height: 1em;\n max-width: 30em;\n padding: 6px;\n\n \"\"\")\n title.setAlignment(Qt.AlignHCenter)\n self.home_button = make_button(\"Home\")\n self.home_button.pressed.connect(self.home_button_pressed)\n self.home_button.released.connect(self.home_button_released)\n\n # Layout\n\n layout = QGridLayout()\n layout.addWidget(self.home_button, 0, 0)\n layout.addWidget(output_options_list, 0, 1)\n layout.addLayout(input_layout, 1, 0)\n layout.addLayout(self.output_stack, 1, 1)\n\n layout.setColumnStretch(0, 1)\n layout.setColumnStretch(1, 2)\n\n self.setLayout(layout)\n\n def pressed_record_a(self):\n self.a_record_button.setText(\"Recording...\")\n self.a_timer.start(self.record_time * 1000)\n\n def record_a(self):\n num_data_points = self.parent.sampling_rate * self.record_time\n print('recording state a')\n self.a_record_button.setText(\"Record State A\")\n self.a_data = self.parent.board_shim.get_current_board_data(num_samples=num_data_points)[1:9]\n self.plot_state[0] = True\n self.process_bp()\n pd.DataFrame(np.transpose(self.a_data)).to_csv('state_raw_data/a_data.csv')\n\n def pressed_record_b(self):\n self.b_record_button.setText(\"Recording...\")\n self.b_timer.start(self.record_time * 1000)\n\n def record_b(self):\n num_data_points = self.parent.sampling_rate * self.record_time\n print('recording state b')\n self.b_record_button.setText(\"Record State B\")\n self.b_data = self.parent.board_shim.get_current_board_data(num_samples=num_data_points)[1:9]\n self.plot_state[1] = True\n self.process_bp()\n pd.DataFrame(np.transpose(self.b_data)).to_csv('state_raw_data/b_data.csv')\n\n def pressed_record_c(self):\n self.c_record_button.setText(\"Recording...\")\n self.c_timer.start(self.record_time * 1000)\n\n def record_c(self):\n num_data_points = self.parent.sampling_rate * self.record_time\n print('recording state c')\n self.c_record_button.setText(\"Record State C\")\n self.c_data = self.parent.board_shim.get_current_board_data(num_samples=num_data_points)[1:9]\n self.plot_state[2] = True\n self.process_bp()\n pd.DataFrame(np.transpose(self.c_data)).to_csv('state_raw_data/c_data.csv')\n\n def pressed_record_d(self):\n self.d_record_button.setText(\"Recording...\")\n self.d_timer.start(self.record_time * 1000)\n\n def record_d(self):\n num_data_points = self.parent.sampling_rate * self.record_time\n print('recording state d')\n self.d_record_button.setText(\"Record State D\")\n self.d_data = self.parent.board_shim.get_current_board_data(num_samples=num_data_points)[1:9]\n self.plot_state[3] = True\n self.process_bp()\n pd.DataFrame(np.transpose(self.d_data)).to_csv('state_raw_data/d_data.csv')\n\n def set_record_time(self, seconds):\n self.record_time = int(seconds)\n print(self.record_time)\n\n def set_a_record_time(self, seconds):\n self.a_record_time = int(seconds)\n print(self.a_record_time)\n\n def set_b_record_time(self, seconds):\n self.b_record_time = int(seconds)\n print(self.b_record_time)\n\n def set_c_record_time(self, seconds):\n self.c_record_time = int(seconds)\n print(self.c_record_time)\n\n def set_d_record_time(self, seconds):\n self.d_record_time = int(seconds)\n print(self.d_record_time)\n\n def set_bp_method(self, i):\n self.bp_processing.selected_bp_method = self.bp_processing.bp_methods[i]\n print(self.bp_processing.selected_bp_method)\n self.process_bp()\n\n def set_relative_plot(self, i):\n if i == 0:\n self.bp_processing.relative = False\n else:\n self.bp_processing.relative = True\n print(self.bp_processing.relative)\n self.process_bp()\n self.rescale_graph()\n\n def set_sixty_hz_filter(self, i):\n if i == 0:\n self.bp_processing.filter_sixty_hz = False\n else:\n self.bp_processing.filter_sixty_hz = True\n print(self.bp_processing.filter_sixty_hz)\n self.process_bp()\n\n def set_zero_to_five_hz_filter(self, i):\n if i == 0:\n self.bp_processing.filter_zero_to_five_hz = False\n else:\n self.bp_processing.filter_zero_to_five_hz = True\n print(self.bp_processing.filter_zero_to_five_hz)\n self.process_bp()\n\n def toggle_custom_frequency(self, i):\n if int(i) == 1:\n self.bp_processing.include_all_frequencies = True\n else:\n self.bp_processing.include_all_frequencies = False\n print(self.bp_processing.include_all_frequencies)\n self.process_bp()\n self.rescale_graph()\n\n def set_low_frequency(self, low):\n if low == \"\":\n self.bp_processing.custom_low_frequency = 0\n else:\n self.bp_processing.custom_low_frequency = int(low)\n print(self.bp_processing.custom_low_frequency)\n self.process_bp()\n if not self.bp_processing.include_all_frequencies:\n self.rescale_graph()\n\n def set_high_frequency(self, high):\n if high == \"\":\n self.bp_processing.custom_high_frequency = 50\n else:\n self.bp_processing.custom_high_frequency = int(high)\n print(self.bp_processing.custom_high_frequency)\n self.process_bp()\n if not self.bp_processing.include_all_frequencies:\n self.rescale_graph()\n\n def toggle_all_electrodes(self, i):\n if i == 1:\n self.bp_processing.include_all_electrodes = True\n else:\n self.bp_processing.include_all_electrodes = False\n print(self.bp_processing.include_all_electrodes)\n self.process_bp()\n\n def select_custom_electrode(self, electrode_num):\n if electrode_num in self.bp_processing.custom_electrode_selection:\n self.bp_processing.custom_electrode_selection.remove(electrode_num)\n else:\n self.bp_processing.custom_electrode_selection.append(electrode_num)\n self.bp_processing.custom_electrode_selection.sort()\n print(self.bp_processing.custom_electrode_selection)\n self.process_bp()\n\n def rescale_graph(self):\n self.graph.p.autoRange()\n\n # Processes the bandpower for states a and b given the current settings, updates gui\n def process_bp(self):\n\n if self.plot_state[0] is True:\n self.a_frequencies, self.a_processed_bp, self.a_standard_band_values = \\\n self.bp_processing.process_state(self.a_data)\n if self.plot_state[1] is True:\n self.b_frequencies, self.b_processed_bp, self.b_standard_band_values = \\\n self.bp_processing.process_state(self.b_data)\n if self.plot_state[2] is True:\n self.c_frequencies, self.c_processed_bp, self.c_standard_band_values = \\\n self.bp_processing.process_state(self.c_data)\n if self.plot_state[3] is True:\n self.d_frequencies, self.d_processed_bp, self.d_standard_band_values = \\\n self.bp_processing.process_state(self.d_data)\n\n self.graph.clear_plots()\n\n if self.plot_state[0] is True:\n self.graph.plot_a(self.a_frequencies, self.a_processed_bp)\n if self.plot_state[1] is True:\n self.graph.plot_b(self.b_frequencies, self.b_processed_bp)\n if self.plot_state[2] is True:\n self.graph.plot_c(self.c_frequencies, self.c_processed_bp)\n if self.plot_state[3] is True:\n self.graph.plot_d(self.d_frequencies, self.d_processed_bp)\n\n # Update output\n self.update_output_information()\n\n def update_output_information(self):\n\n if self.plot_state[0] is True:\n for j in range(5):\n print('adding')\n table_item = QTableWidgetItem()\n table_item.setData(Qt.DisplayRole, str(round(self.a_standard_band_values[j], 3)))\n self.bandpower_stats.setItem(0, j, table_item)\n if self.plot_state[1] is True:\n for j in range(5):\n table_item = QTableWidgetItem()\n table_item.setData(Qt.DisplayRole, str(round(self.b_standard_band_values[j], 3)))\n self.bandpower_stats.setItem(1, j, table_item)\n if self.plot_state[2] is True:\n for j in range(5):\n table_item = QTableWidgetItem()\n table_item.setData(Qt.DisplayRole, str(round(self.c_standard_band_values[j], 3)))\n self.bandpower_stats.setItem(2, j, table_item)\n if self.plot_state[3] is True:\n for j in range(5):\n table_item = QTableWidgetItem()\n table_item.setData(Qt.DisplayRole, str(round(self.d_standard_band_values[j], 3)))\n self.bandpower_stats.setItem(3, j, table_item)\n\n def home_button_pressed(self):\n self.home_button.setStyleSheet(\"\"\"\n background-color: #284351;\n border-style: outset;\n border-width: 2px;\n border-radius: 10px;\n border-color: #e3ebec;\n color: white;\n font: 14px;\n height: 1em;\n max-width: 30em;\n padding: 6px;\n margin: 1em 0;\n \"\"\")\n\n def home_button_released(self):\n self.home_button.setStyleSheet(\"\"\"\n background-color: #73787c;\n border-style: outset;\n border-width: 2px;\n border-radius: 10px;\n border-color: #e3ebec;\n color: white;\n font: 14px;\n height: 1em;\n max-width: 30em;\n padding: 6px;\n margin: 1em 0;\n \"\"\")\n self.parent.change_page_index(0)\n\n def set_output_option(self, i):\n self.output_stack.setCurrentIndex(i)\n\n def clear_states_button_pressed(self):\n self.home_button.setStyleSheet(\"\"\"\n background-color: #284351;\n border-style: outset;\n border-width: 2px;\n border-radius: 10px;\n border-color: #e3ebec;\n color: white;\n font: 14px;\n height: 1em;\n max-width: 30em;\n padding: 6px;\n margin: 1em 0;\n \"\"\")\n\n def clear_states_button_released(self):\n self.home_button.setStyleSheet(\"\"\"\n background-color: #73787c;\n border-style: outset;\n border-width: 2px;\n border-radius: 10px;\n border-color: #e3ebec;\n color: white;\n font: 14px;\n height: 1em;\n max-width: 30em;\n padding: 6px;\n margin: 1em 0;\n \"\"\")\n # Plotting info\n self.plot_state = [False, False, False, False] # a, b, c, d\n\n # Processed data variables\n self.a_processed_bp = None\n self.b_processed_bp = None\n self.c_processed_bp = None\n self.d_processed_bp = None\n self.a_frequencies = [0, 0]\n self.b_frequencies = [0, 0]\n self.c_frequencies = [0, 0]\n self.d_frequencies = [0, 0]\n\n self.a_standard_band_values = [0, 0, 0, 0, 0]\n self.b_standard_band_values = [0, 0, 0, 0, 0]\n self.c_standard_band_values = [0, 0, 0, 0, 0]\n self.d_standard_band_values = [0, 0, 0, 0, 0]\n\n for i in range(4):\n for j in range(5):\n table_item = QTableWidgetItem()\n table_item.setData(Qt.DisplayRole, \"0\")\n self.bandpower_stats.setItem(i, j, table_item)\n\n self.graph.clear_plots()\n\n def output_bins_button_pressed(self):\n self.output_bins_button.setStyleSheet(\"\"\"\n background-color: #284351;\n border-style: outset;\n border-width: 2px;\n border-radius: 10px;\n border-color: #e3ebec;\n color: white;\n font: 14px;\n height: 1em;\n max-width: 30em;\n padding: 6px;\n margin: 1em 0;\n \"\"\")\n\n def output_bins_button_released(self):\n self.output_bins_button.setStyleSheet(\"\"\"\n background-color: #73787c;\n border-style: outset;\n border-width: 2px;\n border-radius: 10px;\n border-color: #e3ebec;\n color: white;\n font: 14px;\n height: 1em;\n max-width: 30em;\n padding: 6px;\n margin: 1em 0;\n \"\"\")\n\n # Build csv\n\n index = []\n columns = []\n if self.plot_state[0] is True:\n columns = self.a_frequencies\n elif self.plot_state[1] is True:\n columns = self.b_frequencies\n elif self.plot_state[2] is True:\n columns = self.c_frequencies\n elif self.plot_state[3] is True:\n columns = self.d_frequencies\n\n data = []\n if self.plot_state[0] is True:\n data.append(self.a_processed_bp)\n index.append(\"State A\")\n if self.plot_state[1] is True:\n data.append(self.b_processed_bp)\n index.append(\"State B\")\n if self.plot_state[2] is True:\n data.append(self.c_processed_bp)\n index.append(\"State C\")\n if self.plot_state[3] is True:\n data.append(self.d_processed_bp)\n index.append(\"State D\")\n\n pd.DataFrame(data=data, index=index, columns=columns).to_csv(\"Frequency bins.csv\")\n\n\n\n\n\nclass Graph(pg.GraphicsLayoutWidget):\n\n def __init__(self, plotSelf, parentSelf):\n self.plotSelf = plotSelf\n self.parentSelf = parentSelf\n super().__init__()\n self.p = self.addPlot(row=0, col=0)\n self.legend = self.p.addLegend()\n\n def plot_a(self, x, y):\n plot_item = self.p.plot(x, y, pen=pg.mkPen(color=(45, 201, 55)))\n self.legend.addItem(plot_item, \"State A\")\n\n def plot_b(self, x, y):\n plot_item = self.p.plot(x, y, pen=pg.mkPen(color=(231, 180, 22)))\n self.legend.addItem(plot_item, \"State B\")\n\n def plot_c(self, x, y):\n plot_item = self.p.plot(x, y, pen=pg.mkPen(color=(219, 123, 43)))\n self.legend.addItem(plot_item, \"State C\")\n\n def plot_d(self, x, y):\n plot_item = self.p.plot(x, y, pen=pg.mkPen(color=(204, 50, 50)))\n self.legend.addItem(plot_item, \"State D\")\n\n def clear_plots(self):\n self.p.clear()\n","repo_name":"jamesdollard/brainboi3000","sub_path":"NEW_GUI/ab_bandpower_widget.py","file_name":"ab_bandpower_widget.py","file_ext":"py","file_size_in_byte":38004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"11594156861","text":"from mmdet.apis import init_detector, inference_detector\n\n# Specify the path to model config and checkpoint file\nrandom_config = 'configs/vis/random.py'\npretrain_config = 'configs/vis/pretrain.py'\nfinetune_config = 'configs/vis/finetune.py'\n\nsup_pretrain = 'pretrain/finetune_mask-rcnn_1x_coco_lr3e-2_wd5e-5/epoch_12.pth'\nour_pretrain = 'pretrain/selfsup_mask-rcnn_mstrain-soft-teacher_sampler-4096_temp0.5/final_model.pth'\n\n# build the model from a config file and a checkpoint file\nrandom_model = init_detector(pretrain_config, None, device='cuda:0')\nsup_model = init_detector(finetune_config, sup_pretrain, device='cuda:0')\nour_model = init_detector(pretrain_config, our_pretrain, device='cuda:0')\n\n# test a single image and show the results\nimg = '000000339129.jpg' # or img = mmcv.imread(img), which will only load it once\n\nresult = inference_detector(random_model, img)\nrandom_model.CLASSES = ['object']\nrandom_model.show_result(img, result, out_file='random.jpg')\n\nresult = inference_detector(sup_model, img)\nsup_model.show_result(img, result, out_file='sup.jpg')\n\nresult = inference_detector(our_model, img)\nour_model.CLASSES = ['object']\nour_model.show_result(img, result, out_file='result.jpg')","repo_name":"mitming/AlignDet","sub_path":"tools/analysis_tools/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23901458191","text":"from itertools import count\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom restApiProject.settings import SECRET_KEY\nfrom .serializers import BoardSerializer\nfrom .models import Board\nfrom users.models import User\nfrom django.db.models import Count\nimport jwt\n\n# Create your views here.\n\ndef validate_token(request) :\n \"\"\"\n To validate jwt token in cookies\n \"\"\"\n token = request.COOKIES.get('jwt')\n payload = None\n \n if not token :\n return False\n \n try :\n payload = jwt.decode(token, SECRET_KEY, algorithms='HS256')\n except :\n payload = False\n \n return payload\n\nclass CreateView(APIView) :\n def post(self, request) :\n payload = validate_token(request)\n \n if payload :\n user = User.objects.filter(id=payload['id']).first()\n else :\n return Response({\"message\": \"The token is not valid\"})\n \n if user :\n request.data.update({'uid': user.id})\n serializer = BoardSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n \n return Response(serializer.data)\n\nclass GetView(APIView) :\n def get(self, request) :\n payload = validate_token(request)\n \n if payload :\n user = User.objects.filter(id=payload['id']).first()\n else :\n return Response({\"message\": \"The token is not valid\"})\n \n if user :\n board_list = list(Board.objects.values())\n \n return Response({'data': board_list})\n\nclass GetSortView(APIView) :\n def get(self, request) :\n payload = validate_token(request)\n \n if payload :\n user = User.objects.filter(id=payload['id']).first()\n else :\n return Response({\"message\": \"The token is not valid\"})\n \n if user :\n board_list = list(Board.objects.order_by('name').values())\n \n return Response({'data': board_list})\n\nclass GetGroupByView(APIView) :\n def get(self, request) :\n payload = validate_token(request)\n \n if payload :\n user = User.objects.filter(id=payload['id']).first()\n else :\n return Response({\"message\": \"The token is not valid\"})\n \n if user :\n board_list = (Board.objects\n .values('name')\n .annotate(dcount=Count('name'))\n .order_by()\n )\n \n return Response({'data': board_list})\n\nclass UpdateView(APIView) :\n def patch(self, request) :\n board_id = request.data['id']\n payload = validate_token(request)\n if payload :\n user = User.objects.filter(id=payload['id']).first()\n else :\n return Response({\"message\": \"The token is not valid\"})\n \n if user :\n board = Board.objects.filter(id=board_id).first()\n if user.is_superuser or user.is_staff or board.uid.id == user.id :\n serializer = BoardSerializer(instance=board, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n message = 'Data Updated'\n else :\n message = 'Not allowed'\n \n return Response({\n 'message': message\n })\n\nclass DeleteView(APIView):\n \n def delete(self, request) :\n board_id = request.data['id']\n payload = validate_token(request)\n if payload :\n user = User.objects.filter(id=payload['id']).first()\n else :\n return Response({\"message\": \"The token is not valid\"})\n \n if user :\n board = Board.objects.filter(id=board_id).first()\n if user.is_superuser or user.is_staff or board.uid.id == user.id :\n board.delete()\n message = 'Data deleted'\n else :\n message = 'Not allowed'\n \n return Response({\n 'message': message\n })\n\n \n \n","repo_name":"chaanto/Django-restApiProject","sub_path":"boards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18193926606","text":"import numpy as np\r\nimport sklearn.preprocessing as prep\r\nimport tensorflow as tf\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn import manifold\r\nimport json\r\n\r\nn_1 = 40\r\nn_2 = 20\r\nn_3 = 10\r\nstartplace = 45\r\ndataset = 'tokyo'\r\ndataset2 = 'tokyo'\r\n\r\ntraining_epochs = 300\r\nnew_epochs = 0\r\n\r\ndef score(arr1, arr2):\r\n scalar = prep.MinMaxScaler()\r\n dis1 = arr1.reshape(-1, 1)\r\n dis2 = arr2.reshape(-1, 1)\r\n dis1 = scalar.fit_transform(dis1).reshape(-1)\r\n dis2 = scalar.fit_transform(dis2).reshape(-1)\r\n loss = np.sum((dis1-dis2) ** 2, 0)\r\n return dis1, dis2, loss\r\n\r\ndef score2(arr1, arr2):\r\n scalar = prep.StandardScaler()\r\n dis1 = arr1.reshape(-1, 1)\r\n dis2 = arr2.reshape(-1, 1)\r\n dis1 = scalar.fit_transform(dis1).reshape(-1)\r\n dis2 = scalar.fit_transform(dis2).reshape(-1)\r\n loss = np.sum((dis1 - dis2) ** 2, 0)\r\n return dis1, dis2, loss\r\n\r\n\r\ndef xavier_init(fan_in, fan_out, constant = 1):\r\n low = -constant * np.sqrt(6.0 / (fan_in + fan_out))\r\n high = constant * np.sqrt(6.0 / (fan_in + fan_out))\r\n return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32, seed=1)\r\n\r\nclass AdditiveGaussianNoiseAutoencoder(object):\r\n def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer(), scale = 0.01):\r\n #n_input:输入变量数\r\n #n_hidden:隐藏层节点数\r\n #transfer_function:隐藏层激活函数\r\n #optimizer:优化器Adam\r\n #scale:高斯噪声系数\r\n\r\n self.n_input = n_input\r\n self.n_hidden = n_hidden\r\n self.n_1 = n_1\r\n self.n_2 = n_2\r\n self.n_3 = n_3\r\n self.scale = tf.placeholder(tf.float32)\r\n self.training_scale = scale\r\n\r\n # network structure\r\n # distribution on the hidden layer relies on the activation function\r\n self.x = tf.placeholder(tf.float32, [None, self.n_input], name='X')\r\n network_weights = self._initialize_weights()\r\n self.weights = network_weights\r\n\r\n # encode\r\n # self.att1 = self.x * self.weights['attention1']\r\n # self.tmp = tf.nn.softmax(self.x)\r\n self.layer1 = tf.nn.sigmoid(tf.add(tf.matmul(self.x , self.weights['w1']), self.weights['b1']))\r\n self.layer2 = tf.nn.softplus(tf.add(tf.matmul(self.layer1, self.weights['w2']), self.weights['b2']))\r\n self.layer3 = tf.nn.softplus(tf.add(tf.matmul(self.layer2, self.weights['w3']), self.weights['b3']))\r\n\r\n self.hidden = tf.nn.softplus(tf.add(tf.matmul(self.layer3, self.weights['w4']), self.weights['b4']))\r\n\r\n # decode\r\n self.layer5 = tf.nn.softplus(tf.add(tf.matmul(self.hidden, self.weights['w5']), self.weights['b5']))\r\n self.layer6 = tf.nn.softplus(tf.add(tf.matmul(self.layer5, self.weights['w6']), self.weights['b6']))\r\n self.layer7 = tf.nn.sigmoid(tf.add(tf.matmul(self.layer6, self.weights['w7']), self.weights['b7']))\r\n\r\n self.reconstruction = tf.nn.tanh(tf.add(tf.matmul(self.layer7, self.weights['w8']), self.weights['b8']))\r\n # self.reconstruction = (self.att2 * (1.0/self.weights['attention2']))\r\n\r\n # self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))\r\n self.cost = tf.reduce_sum(-tf.reduce_sum(tf.nn.softmax(self.x) * tf.log(tf.nn.softmax(self.reconstruction)), reduction_indices=[1]))\r\n self.optimizer = optimizer.minimize(self.cost)\r\n\r\n init = tf.global_variables_initializer()\r\n self.sess = tf.Session()\r\n\r\n self.sess.run(init)\r\n # self.writer = tf.summary.FileWriter('./graphs', self.sess.graph)\r\n\r\n def _initialize_weights(self):\r\n all_weights = {}\r\n # all_weights['attention1'] = (tf.Variable(tf.ones([self.n_input], dtype=tf.float32), name='attention1'))\r\n # all_weights['attention2'] = (tf.Variable(tf.ones([self.n_input], dtype=tf.float32)))\r\n\r\n all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_1), name='w1')\r\n all_weights['b1'] = tf.Variable(tf.zeros([self.n_1], dtype=tf.float32), name='b1')\r\n all_weights['w2'] = tf.Variable(xavier_init(self.n_1, self.n_2), name='w2')\r\n all_weights['b2'] = tf.Variable(tf.zeros([self.n_2], dtype=tf.float32), name='b2')\r\n all_weights['w3'] = tf.Variable(xavier_init(self.n_2, self.n_3), name='w3')\r\n all_weights['b3'] = tf.Variable(tf.zeros([self.n_3], dtype=tf.float32), name='b3')\r\n all_weights['w4'] = tf.Variable(xavier_init(self.n_3, self.n_hidden), name='w4')\r\n all_weights['b4'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32), name='b4')\r\n\r\n all_weights['w5'] = tf.Variable(xavier_init(self.n_hidden, self.n_3), name='w5')\r\n all_weights['b5'] = tf.Variable(tf.zeros([self.n_3], dtype=tf.float32), name='b5')\r\n all_weights['w6'] = tf.Variable(xavier_init(self.n_3, self.n_2), name='w6')\r\n all_weights['b6'] = tf.Variable(tf.zeros([self.n_2], dtype=tf.float32), name='b6')\r\n all_weights['w7'] = tf.Variable(xavier_init(self.n_2, self.n_1), name='w7')\r\n all_weights['b7'] = tf.Variable(tf.zeros([self.n_1], dtype=tf.float32), name='b7')\r\n all_weights['w8'] = tf.Variable(xavier_init(self.n_1, self.n_input), name='w8')\r\n all_weights['b8'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32), name='b8')\r\n return all_weights\r\n\r\n def partial_fit(self, X):\r\n #get the value of cost and optimize it\r\n cost, opt = self.sess.run((self.cost, self.optimizer),\r\n feed_dict = {self.x : X, self.scale : self.training_scale})\r\n return cost\r\n\r\n def calc_total_cost(self, X):\r\n #get the value of cost\r\n return self.sess.run(self.cost, feed_dict={self.x: X, self.scale : self.training_scale})\r\n\r\n def transform(self, X):\r\n #get the value of hidden layer\r\n return self.sess.run(self.hidden, feed_dict={self.x:X, self.scale:self.training_scale})\r\n\r\n def generate(self, hidden = None):\r\n if hidden is None:\r\n hidden = np.random.normal(size = self.weights['b1'])\r\n return self.sess.run(self.reconstruction, feed_dict={self.hidden : hidden})\r\n\r\n def reconstruct(self, X):\r\n return self.sess.run(self.reconstruction, feed_dict={self.x : X, self.scale : self.training_scale})\r\n\r\n def getWeights(self):\r\n return self.sess.run(self.weights['w1'])\r\n\r\n def getBiases(self):\r\n return self.sess.run(self.weights['b1'])\r\n\r\n # def getAttent1(self):\r\n # return self.sess.run(self.weights['attention1'])\r\n\r\n # def getAttent2(self):\r\n # return self.sess.run(self.weights['attention2'])\r\n\r\ndef standard_scale(X_train, X_test):\r\n #标准化数据,均值为0,标准差为1\r\n preprocessor = prep.StandardScaler().fit(X_train)\r\n X_train = preprocessor.transform(X_train)\r\n preprocessor = prep.StandardScaler().fit(X_test)\r\n X_test = preprocessor.transform(X_test)\r\n return X_train, X_test\r\n\r\ndef get_random_block_from_data(data, batch_size):\r\n #get a batch size of batch_size randomly\r\n start_index = np.random.randint(0, len(data) - batch_size)\r\n return data[start_index : (start_index + batch_size)]\r\n\r\ndef main(_):\r\n # mnist = input_data.read_data_sets('MNIST_data', one_hot=False)\r\n # print (mnist.test.labels)\r\n # X_train = mnist.train.images\r\n # X_test = mnist.test.images\r\n # labels = mnist.test.labels\r\n\r\n X_train = np.loadtxt(\"data/\"+dataset+\"_X.txt\");\r\n X_test = np.loadtxt(\"data/\"+dataset2+\"_X.txt\")\r\n try:\r\n labels = np.loadtxt(\"data/\"+dataset+\"_labels.txt\");\r\n except IOError:\r\n labels = np.zeros(X_test.shape[0])\r\n labels[startplace] = 1\r\n\r\n try:\r\n names = open(\"data/\"+dataset+\"_names.txt\", 'r', encoding='utf8').read();\r\n names = names.split()\r\n except IOError:\r\n names = np.array(range(X_test.shape[0])).astype(dtype=str)\r\n\r\n\r\n n, m = X_train.shape\r\n\r\n\r\n # Scale\r\n X_train, X_test = standard_scale(X_train, X_test)\r\n\r\n print('calculating PCA...')\r\n pca = PCA(n_components=2)\r\n Y_pca = pca.fit_transform(X_test)\r\n\r\n print('calculating MDS...')\r\n mds = manifold.MDS(n_components=2, max_iter=100, n_init=1)\r\n Y_mds = mds.fit_transform(X_test)\r\n\r\n print('calculating ISOMAP...')\r\n isomap = manifold.Isomap(n_neighbors=5, n_components=2)\r\n Y_iso = isomap.fit_transform(X_test)\r\n\r\n print('calculating TSNE...')\r\n tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)\r\n Y_tsne = tsne.fit_transform(X_test)\r\n\r\n #===============================================================Our Method==================\r\n\r\n\r\n n_samples = X_train.shape[0]\r\n batch_size = n_samples\r\n display_step = 1\r\n\r\n autoencoder = AdditiveGaussianNoiseAutoencoder(n_input=m,\r\n n_hidden=2,\r\n optimizer=tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.5, beta2=0.5),\r\n scale=0.005)\r\n print('calculating AE...')\r\n total_batch = int(n_samples // batch_size)\r\n for epoch in range(training_epochs):\r\n avg_cost = 0.\r\n for i in range(total_batch+1):\r\n # batch_xs = get_random_block_from_data(X_train, batch_size)\r\n if i+batch_size < n_samples:\r\n batch_xs = X_train[i : i+batch_size]\r\n else:\r\n batch_xs = X_train[i: ]\r\n cost = autoencoder.partial_fit(batch_xs)\r\n avg_cost += cost / n_samples\r\n if epoch % display_step == 0:\r\n print('Epoch:', '%04d' % (epoch + 1), \"cost=\", '{:.9f}'.format(avg_cost))\r\n\r\n for epoch in range(new_epochs):\r\n avg_cost = 0.\r\n for i in range(total_batch + 1):\r\n # batch_xs = get_random_block_from_data(X_train, batch_size)\r\n if i + batch_size < n_samples:\r\n batch_xs = X_test[i: i + batch_size]\r\n else:\r\n batch_xs = X_test[i:]\r\n cost = autoencoder.partial_fit(batch_xs)\r\n avg_cost += cost / n_samples\r\n\r\n if epoch % display_step == 0:\r\n print('Epoch:', '%04d'%(epoch+1), \"cost=\", '{:.9f}'.format(avg_cost))\r\n # print('Total cost: ' + str(autoencoder.calc_total_cost(X_test)))\r\n Y = autoencoder.transform(X_test)\r\n\r\n dif1 = (X_test-X_test[startplace])**2\r\n dif2 = (Y - Y[startplace]) ** 2\r\n dif3 = (Y_pca - Y_pca[startplace]) ** 2\r\n dif4 = (Y_mds - Y_mds[startplace]) ** 2\r\n dif5 = (Y_iso - Y_iso[startplace]) ** 2\r\n dif6 = (Y_tsne - Y_tsne[startplace]) ** 2\r\n\r\n dis1 = np.sqrt(np.sum(dif1, 1))\r\n dis2 = np.sqrt(np.sum(dif2, 1))\r\n dis3 = np.sqrt(np.sum(dif3, 1))\r\n dis4 = np.sqrt(np.sum(dif4, 1))\r\n dis5 = np.sqrt(np.sum(dif5, 1))\r\n dis6 = np.sqrt(np.sum(dif6, 1))\r\n\r\n # rank1 = np.argsort(dis1)\r\n # rank1 = {rank: idx for idx, rank in enumerate(rank1)}\r\n # rank2 = np.argsort(dis2)\r\n # rank2 = {rank: idx for idx, rank in enumerate(rank2)}\r\n # print(rank1)\r\n # print(rank2)\r\n\r\n # _, _, loss = score2(dis1, dis2)\r\n _, dis3, loss = score(dis1, dis3)\r\n print(\"The loss of PCA is: \", loss)\r\n _, dis4, loss = score(dis1, dis4)\r\n print(\"The loss of MDS is: \", loss)\r\n _, dis5, loss = score(dis1, dis5)\r\n print(\"The loss of ISOMAP is: \", loss)\r\n _, dis6, loss = score(dis1, dis6)\r\n print(\"The loss of TSNE is: \", loss)\r\n dis1, dis2, loss = score(dis1, dis2)\r\n print(\"The loss of AE is: \", loss)\r\n\r\n with open('data/tokyo_gps.txt') as f:\r\n X = f.read()\r\n X = X.split('\\n')\r\n f.close()\r\n mapDic = []\r\n for i, name in enumerate(names):\r\n X[i] = X[i].split(' ')\r\n tmpdic = {}\r\n tmpdic[\"lng\"] = float(X[i][0])\r\n tmpdic[\"lat\"] = float(X[i][1])\r\n tmpdic[\"count\"] = 100 * (1 - dis1[i])\r\n mapDic.append(tmpdic)\r\n jsonStr = json.dumps(mapDic)\r\n f = open(\"data1.json\", \"w\")\r\n print(jsonStr, file=f)\r\n f.close()\r\n\r\n mapDic = []\r\n for i, name in enumerate(names):\r\n tmpdic = {}\r\n tmpdic[\"lng\"] = float(X[i][0])\r\n tmpdic[\"lat\"] = float(X[i][1])\r\n tmpdic[\"count\"] = 100 * (1 - dis2[i])\r\n mapDic.append(tmpdic)\r\n jsonStr = json.dumps(mapDic)\r\n f = open(\"data2.json\", \"w\")\r\n print(jsonStr, file=f)\r\n f.close()\r\n\r\n mapDic = []\r\n for i, name in enumerate(names):\r\n tmpdic = {}\r\n tmpdic[\"lng\"] = float(X[i][0])\r\n tmpdic[\"lat\"] = float(X[i][1])\r\n tmpdic[\"count\"] = 100 * (1 - dis3[i])\r\n mapDic.append(tmpdic)\r\n jsonStr = json.dumps(mapDic)\r\n f = open(\"data3.json\", \"w\")\r\n print(jsonStr, file=f)\r\n f.close()\r\n\r\n mapDic = []\r\n for i, name in enumerate(names):\r\n tmpdic = {}\r\n tmpdic[\"lng\"] = float(X[i][0])\r\n tmpdic[\"lat\"] = float(X[i][1])\r\n tmpdic[\"count\"] = 100 * (np.fabs(dis1[i]-dis2[i]))\r\n mapDic.append(tmpdic)\r\n jsonStr = json.dumps(mapDic)\r\n f = open(\"data4.json\", \"w\")\r\n print(jsonStr, file=f)\r\n f.close()\r\n\r\n mapDic = []\r\n for i, name in enumerate(names):\r\n tmpdic = {}\r\n tmpdic[\"lng\"] = float(X[i][0])\r\n tmpdic[\"lat\"] = float(X[i][1])\r\n tmpdic[\"count\"] = 100 * (np.fabs(dis1[i]-dis3[i]))\r\n mapDic.append(tmpdic)\r\n jsonStr = json.dumps(mapDic)\r\n f = open(\"data5.json\", \"w\")\r\n print(jsonStr, file=f)\r\n f.close()\r\n\r\n mapDic = []\r\n for i, name in enumerate(names):\r\n tmpdic = {}\r\n tmpdic[\"lng\"] = float(X[i][0])\r\n tmpdic[\"lat\"] = float(X[i][1])\r\n tmpdic[\"count\"] = 100 * (1 - dis4[i])\r\n mapDic.append(tmpdic)\r\n jsonStr = json.dumps(mapDic)\r\n f = open(\"data6.json\", \"w\")\r\n print(jsonStr, file=f)\r\n f.close()\r\n\r\n mapDic = []\r\n for i, name in enumerate(names):\r\n tmpdic = {}\r\n tmpdic[\"lng\"] = float(X[i][0])\r\n tmpdic[\"lat\"] = float(X[i][1])\r\n tmpdic[\"count\"] = 100 * (1 - dis5[i])\r\n mapDic.append(tmpdic)\r\n jsonStr = json.dumps(mapDic)\r\n f = open(\"data7.json\", \"w\")\r\n print(jsonStr, file=f)\r\n f.close()\r\n\r\n mapDic = []\r\n for i, name in enumerate(names):\r\n tmpdic = {}\r\n tmpdic[\"lng\"] = float(X[i][0])\r\n tmpdic[\"lat\"] = float(X[i][1])\r\n tmpdic[\"count\"] = 100 * (np.fabs(dis1[i] - dis4[i]))\r\n mapDic.append(tmpdic)\r\n jsonStr = json.dumps(mapDic)\r\n f = open(\"data8.json\", \"w\")\r\n print(jsonStr, file=f)\r\n f.close()\r\n\r\n mapDic = []\r\n for i, name in enumerate(names):\r\n tmpdic = {}\r\n tmpdic[\"lng\"] = float(X[i][0])\r\n tmpdic[\"lat\"] = float(X[i][1])\r\n tmpdic[\"count\"] = 100 * (np.fabs(dis1[i] - dis5[i]))\r\n mapDic.append(tmpdic)\r\n jsonStr = json.dumps(mapDic)\r\n f = open(\"data9.json\", \"w\")\r\n print(jsonStr, file=f)\r\n f.close()\r\n\r\n # fig = plt.figure(figsize=(15, 8))\r\n # plt.suptitle(dataset)\r\n # ax = fig.add_subplot(1, 2, 1)\r\n # plt.title('MLP')\r\n # plt.scatter(Y[:,0], Y[:,1], c=labels, cmap=plt.cm.Spectral, marker='.')\r\n # ax = fig.add_subplot(1, 2, 2)\r\n # plt.title('pca')\r\n # plt.scatter(Y_pca[:,0], Y_pca[:,1], c=dis2, cmap=plt.cm.Spectral)\r\n\r\n fig, ax = plt.subplots()\r\n sc = plt.scatter(Y[:,0], Y[:,1], marker='o', c=dis4, cmap=plt.cm.Spectral)\r\n plt.xlim(xmin=0, xmax=8) # adjust the max leaving min unchanged\r\n annot = ax.annotate(\"\", xy=(0, 0), xytext=(20, 20), textcoords=\"offset points\",\r\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\r\n arrowprops=dict(arrowstyle=\"->\"))\r\n annot.set_visible(False)\r\n\r\n def update_annot(ind):\r\n\r\n pos = sc.get_offsets()[ind[\"ind\"][0]]\r\n annot.xy = pos\r\n text = names[ind[\"ind\"][0]]\r\n annot.set_text(text)\r\n # annot.get_bbox_patch().set_facecolor(cmap(norm(c[ind[\"ind\"][0]])))\r\n # annot.get_bbox_patch().set_alpha(0.4)\r\n\r\n def hover(event):\r\n vis = annot.get_visible()\r\n if event.inaxes == ax:\r\n cont, ind = sc.contains(event)\r\n if cont:\r\n update_annot(ind)\r\n annot.set_visible(True)\r\n fig.canvas.draw_idle()\r\n else:\r\n if vis:\r\n annot.set_visible(False)\r\n fig.canvas.draw_idle()\r\n\r\n fig.canvas.mpl_connect(\"motion_notify_event\", hover)\r\n\r\n # plt.figure()\r\n # tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)\r\n # Y3 = tsne.fit_transform(X_test)\r\n # plt.scatter(Y3[:, 0], Y3[:, 1], s=(10 - labels) ** 2, c=labels, cmap=plt.cm.Spectral)\r\n\r\n\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.app.run(main = main)\r\n","repo_name":"Ph0en1xGSeek/evacuation_route","sub_path":"MLP_tokyo.py","file_name":"MLP_tokyo.py","file_ext":"py","file_size_in_byte":16751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72230162077","text":"# PASS\r\nimport torch\r\nfrom torch import nn\r\n\r\n\r\nimport numpy as np\r\nimport time\r\nimport math\r\nimport PIL\r\nimport PIL.Image as Image\r\n\r\n\r\nimport torchvision as tv\r\nimport torch.nn as nn\r\nimport torchvision.transforms as trans\r\nimport torchvision.datasets as dsets\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nfrom torchvision.transforms import ToPILImage as tensor2PIL\r\n\r\ndef weights_init_kaiming(m):\r\n classname = m.__class__.__name__\r\n if classname.find('Linear') != -1:\r\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\r\n nn.init.constant_(m.bias, 0.0)\r\n elif classname.find('Conv') != -1:\r\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\r\n if m.bias is not None:\r\n nn.init.constant_(m.bias, 0.0)\r\n elif classname.find('BatchNorm') != -1:\r\n if m.affine:\r\n nn.init.constant_(m.weight, 1.0)\r\n nn.init.constant_(m.bias, 0.0)\r\n\r\nclass Bottleneck(nn.Module):\r\n expansion = 4\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None):\r\n super(Bottleneck, self).__init__()\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(planes * 4)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n# Use ResNet SAME padding for conv_same\r\nclass OrientationPredictor(nn.Module):\r\n def __init__(self, isDefaultImageSize = True, OP_train_dropout = False, dropout_rate = 0.6):\r\n # TODO: 'isDefaultImageSize' Not USE in new Res Structure!!!!!!\r\n super(OrientationPredictor, self).__init__()\r\n self.OP_train_dropout = OP_train_dropout\r\n\r\n # if isDefaultImageSize: # 56 * 56 * 256\r\n # self.conv1 = nn.Conv2d(256, 128, kernel_size=5, stride=3, padding=0, bias=False) # VALID\r\n # self.bn1 = nn.BatchNorm2d(128)\r\n # self.conv2 = nn.Conv2d(128, 256, kernel_size=5, stride=3, padding=2, bias=False) # SAME\r\n # self.bn2 = nn.BatchNorm2d(256)\r\n # self.conv3 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False) # SAME\r\n # self.bn3 = nn.BatchNorm2d(512)\r\n # self.conv4 = nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=0, bias=False) # VALID\r\n # self.bn4 = nn.BatchNorm2d(1024)\r\n # else: # 64 * 32 * 256\r\n # self.conv1 = nn.Conv2d(256, 128, kernel_size=5, stride=3, padding=2, bias=False) # SAME\r\n # self.bn1 = nn.BatchNorm2d(128)\r\n # self.conv2 = nn.Conv2d(128, 256, kernel_size=5, stride=3, padding=2, bias=False) # SAME\r\n # self.bn2 = nn.BatchNorm2d(256)\r\n # self.conv3 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False) # SAME\r\n # self.bn3 = nn.BatchNorm2d(512)\r\n # self.conv4 = nn.Conv2d(512, 1024, kernel_size=(4,2), stride=1, padding=0, bias=False) # VALID\r\n # # H - kernel[0] W - kernel[1]\r\n # self.bn4 = nn.BatchNorm2d(1024)\r\n\r\n self.net1 = self._make_layer(Bottleneck, 128, 4, 256, stride=2) # (B, 512, 28, 28)\r\n\r\n if self.OP_train_dropout:\r\n self.dropout = nn.Dropout(p=dropout_rate,inplace=False) # True will wrong 'variable require grad changed'!!\r\n\r\n # self.conv5 = nn.Conv2d(1024,3,kernel_size=1,bias=True) # No need BN layer\r\n self.net2 = nn.Conv2d(512, 3, kernel_size=1, bias=True)\r\n\r\n self.gap = nn.AdaptiveAvgPool2d((1,1))\r\n\r\n self.relu = nn.ReLU(inplace=True)\r\n self.softmax = nn.Softmax(dim=1) # for full model use, to weight Orientation Branch\r\n self.logits = 0 # for training, by using nn.CrossEntropy\r\n\r\n # May use Sequential or List cover all Conv !!!!!!!\r\n self.kaiming_init()\r\n\r\n def forward(self, x):\r\n # out = self.conv1(x)\r\n # out = self.bn1(out)\r\n # out = self.relu(out)\r\n # # print('after conv1: {}'.format(out.shape)) # ([B, 128, 18, 18]) ([B, 128, 22, 11])\r\n #\r\n # out = self.conv2(out)\r\n # out = self.bn2(out)\r\n # out = self.relu(out)\r\n # # print('after conv2: {}'.format(out.shape)) # ([B, 256, 6, 6]) ([B, 256, 8, 4])\r\n #\r\n # out = self.conv3(out)\r\n # out = self.bn3(out)\r\n # out = self.relu(out)\r\n # # print('after conv3: {}'.format(out.shape)) # ([B, 512, 3, 3]) ([B, 512, 4, 2])\r\n #\r\n # out = self.conv4(out)\r\n # out = self.bn4(out)\r\n # out = self.relu(out)\r\n # # print('after conv4: {}'.format(out.shape)) # ([B, 1024, 1, 1]) ([B, 1024, 1, 1])\r\n\r\n out = self.net1(x)\r\n # print('after net1_layer2: {}'.format(out.shape)) # torch.Size([B, 512, 28, 28]) ([2, 512, 32, 16])\r\n\r\n out = self.gap(out)\r\n # print('after GAP: {}'.format(out.shape)) # torch.Size([B, 512, 1, 1])\r\n\r\n if self.OP_train_dropout:\r\n out = self.dropout(out)\r\n # print('apply dropout')\r\n\r\n # out = self.conv5(out)\r\n # # print('after conv5: {}'.format(out.shape)) # ([B, 3, 1, 1])\r\n\r\n out = self.net2(out)\r\n # print('after net2: {}'.format(out.shape)) # torch.Size([B, 3, 1, 1])\r\n\r\n out = out.reshape((out.shape[0],-1)) # flatten to 2D\r\n self.logits = out\r\n # print('logits shape: {}'.format(self.logits.shape)) # ([B, 3])\r\n out = self.softmax(out)\r\n return out\r\n\r\n def _make_layer(self, block, planes, blocks, inplanes, stride=1):\r\n downsample = None\r\n if stride != 1 or inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(inplanes, planes * block.expansion,\r\n kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(planes * block.expansion),\r\n )\r\n\r\n layers = []\r\n layers.append(block(inplanes, planes, stride, downsample))\r\n self.inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers.append(block(self.inplanes, planes))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def kaiming_init(self,if_linear_init = False):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\r\n if m.bias is not None:\r\n nn.init.constant_(m.bias, 0.0)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n if m.affine:\r\n nn.init.constant_(m.weight, 1.0)\r\n nn.init.constant_(m.bias, 0.0)\r\n elif isinstance(m, nn.BatchNorm1d):\r\n if m.affine:\r\n nn.init.constant_(m.weight, 1.0)\r\n nn.init.constant_(m.bias, 0.0)\r\n elif isinstance(m,nn.Linear) and if_linear_init:\r\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\r\n nn.init.constant_(m.bias, 0.0)\r\n\r\n\r\nif __name__ == '__main__':\r\n synthesize_input_1 = torch.randn((2,256,56,56))\r\n synthesize_input_2 = torch.randn((2,256,64,32))\r\n my_model_1 = OrientationPredictor(isDefaultImageSize=True,OP_train_dropout=True)\r\n my_model_2 = OrientationPredictor(isDefaultImageSize=False,OP_train_dropout=False)\r\n print(20 * '-' + '>' + ' model 1 ' + '<' + 20 * '-')\r\n print(my_model_1)\r\n print(20 * '-' + '>' + ' model 2 ' + '<' + 20 * '-')\r\n print(my_model_2)\r\n print(20 * '-' + '>' + ' forward model 1 ' + '<' + 20 * '-')\r\n recv = my_model_1(synthesize_input_1)\r\n print('softmax res: {}'.format(recv))\r\n print(20 * '-' + '>' + ' forward model 2 ' + '<' + 20 * '-')\r\n recv = my_model_2(synthesize_input_2)\r\n print('softmax res: {}'.format(recv))\r\n print('logits res: {}'.format(my_model_2.logits))\r\n print(20 * '-' + '>' + ' model 2 parameters ' + '<' + 20 * '-')\r\n for idx,(name, param) in enumerate(my_model_2.named_parameters()):\r\n print('{} {}: {}'.format(idx,name,param.shape))\r\n\r\n print(20 * '-' + '>' + ' grad test ' + '<' + 20 * '-')\r\n # This prove OP grad is related to main_stream forward value, bigger value bigger grad\r\n synthesize_main_stream_1 = torch.rand((2,2048,16,8)) / 1e9 # (2,2048,8,4)\r\n synthesize_main_stream_2 = torch.zeros((2,2048,16,8)) # (2,2048,8,4)\r\n synthesize_main_stream_3 = torch.zeros((2,2048,16,8)) # (2,2048,8,4)\r\n weighted_1 = recv[:,0].reshape((-1,1,1,1)) * synthesize_main_stream_1\r\n weighted_2 = recv[:,1].reshape((-1,1,1,1)) * synthesize_main_stream_2\r\n weighted_3 = recv[:,2].reshape((-1,1,1,1)) * synthesize_main_stream_3\r\n fused = weighted_1 + weighted_2 + weighted_3\r\n loss = fused.mean()\r\n loss.backward()\r\n # weighted_1.mean().backward()\r\n # print('my_model_2.conv5.bias.grad: {}'.format(my_model_2.conv5.bias.grad))\r\n # print('my_model_2.conv1.weight.grad: {}'.format(my_model_2.conv1.weight.grad))\r\n print('my_model_2.net1[0].conv1.weight.grad: {}'.format(my_model_2.net1[0].conv1.weight.grad))\r\n print('my_model_2.net2.weight.grad: {}'.format(my_model_2.net2.weight.grad))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"XuJiaMing1997/Graduate_Stage_File_Record","sub_path":"OrientationPredictor.py","file_name":"OrientationPredictor.py","file_ext":"py","file_size_in_byte":9846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32203613338","text":"import glob\nimport os\nimport argparse\nfrom functools import partial\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pickle\nimport matlab\nimport matlab.engine\n\nimport tensorflow as tf\nfrom tensorflow.keras import losses, metrics, optimizers, layers, Sequential\nfrom tensorflow.keras.layers.experimental import preprocessing\n\nimport gen_model\nfrom FEM.vox2mesh18 import vox2mesh18\nfrom FEM.FEM_truss import FEM_truss\nfrom FEM.vGextC2extC import vGextC2extC\nfrom FEM.vGextF2extF import vGextF2extF\n\ndef get_data(filename):\n\twith open(filename, \"rb\") as fp:\n\t\tout = pickle.load(fp)\n\treturn out\n\ndef augment_data(dataset):\n\n\treturn new_data\n\ndef get_optimizer():\n\t# Just constant learning rate schedule\n\toptimizer = optimizers.Adam(learning_rate=1e-4)\n\treturn optimizer\n\ndef custom_loss_function(true_struct, new_struct, struct):\n\t# Apply direct stiffness method as loss function\n\t#(E, N,_) = eng.vox2mesh18(x);\n\t#(sE, dN) = eng.FEM_truss(N,E, extF,extC)\n\t#loss = max(abs(dN))\n\t\"\"\"\n\tx = tf.reduce_sum(struct)\n\ty = tf.reduce_sum(new_struct)\n\t#print(x)\n\t#print(y)\n\t#loss = tf.losses.mean_squared_error(tf.reduce_sum(new_struct), tf.reduce_sum(struct))\n\tloss = tf.abs(tf.abs(x)-tf.abs(y))\n\tprint(loss)\n\t\"\"\"\n\ttrue_struct2 = tf.math.subtract(struct, true_struct)\n\tnew_struct2 = tf.math.subtract(struct, new_struct)\n\t#diff = tf.abs(tf.math.subtract(new_struct2, true_struct2))\n\t#loss = tf.reduce_sum(diff)\n\tloss = tf.losses.mean_squared_error(true_struct2, new_struct2)\n\treturn loss\n\n\n\ndef convert_to_matlabint8(inarr):\n\treturn matlab.int8(np.int8(np.ceil(inarr)).tolist())\n\ndef runstuff(train_dir, test_number, threshold, model_type=2):\n\t# Construct model and measurements\n\tcollist = matlab.double([0, 0.68, 0.7647])\n\tbatch_size = 1\n\n\ttrainAug = Sequential([\n\tlayers.RandomFlip(mode=\"horizontal_and_vertical\"),\n\tlayers.RandomRotation(0.25)\n\t])\n\tos.makedirs(os.path.dirname(\"test_results/\" + \"model\" + str(model_type) + \"_\" + test_number + \"_\" + threshold + \"/\"), exist_ok=True)\n\n\tnames = matlab.engine.find_matlab()\n\t#print(names)\n\tif not names:\n\t\teng = matlab.engine.start_matlab()\n\telse:\n\t\teng = matlab.engine.connect_matlab(names[0])\n\n\tdef get_struct(arm_radiusx,arm_radiusy,wrist_radius,height):\n\n\t\tstructog, _, vGextC, vGextF, vGstayOff = eng.get_struct1(14,14,12,100, nargout=5)\n\n\t\tstruct = np.array(structog)\n\t\tstructC = np.array(vGextC)\n\t\tstructF = np.array(vGextF)\n\t\tstructOff = np.array(vGstayOff)\n\n\t\t(xstruct,ystruct,zstruct) = struct.shape\n\t\t(xC,yC,zC) = structC.shape\n\t\t(xF,yF,zF) = structF.shape\n\t\t(xoff,yoff,zoff) = structOff.shape\n\n\t\tstruct = tf.convert_to_tensor(struct, dtype=tf.float32)\n\t\tstructCten = tf.convert_to_tensor(structC, dtype=tf.float32)\n\t\tstructFten = tf.convert_to_tensor(structF, dtype=tf.float32)\n\t\tstructOfften = tf.convert_to_tensor(structOff, dtype=tf.float32)\n\n\t\tnew_dataset = tf.data.Dataset.from_tensors((struct,structCten,structFten,structOfften))\n\t\tnew_dataset = new_dataset.batch(batch_size)\n\t\teng.plotVg_safe(structog, 'edgeOff', nargout=0)\n\n\t\treturn new_dataset, structog, vGextC, vGextF, vGstayOff\n\t#structog, vGextC, vGextF, vGstayOff = eng.get_struct2(nargout=4)\n\tnew_dataset, structog, vGextC, vGextF, vGstayOff = get_struct(14,14,12,100)\n\n\n\n\t\"\"\"\n\t#model = gen_model.ConvModel3D((xstruct,ystruct,zstruct), (xC,yC,zC), (xF,yF,zF), (xoff,yoff,zoff))\n\tmodel = gen_model.ConvStructModel3D((xstruct,ystruct,zstruct,4))\n\toptimizer = get_optimizer()\n\tmse = losses.MeanSquaredError()\n\ttrain_loss = metrics.Mean()\n\tval_loss = metrics.Mean()\n\tmodel.summary()\n\t#tf.keras.utils.plot_model(model, \"3Dconv_model.png\", show_shapes=True\n\n\tos.makedirs(os.path.dirname(\"test_results/\" + test_number + \"/\"), exist_ok=True)\n\tcheckpoint_path = \"training_reinforce_all2/\" + test_number + \"/cp-{epoch:04d}.ckpt\"\n\tcheckpoint_dir = os.path.dirname(checkpoint_path)\n\tcp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1)\n\ttry:\n\t\tlatest = tf.train.latest_checkpoint(checkpoint_dir)\n\t\tmodel.load_weights(latest)\n\texcept AttributeError:\n\t\tprint(\"No trained weights to test on\")\n\t\"\"\"\n\n\tif model_type == 0:\n\t\tmodel = tf.keras.models.load_model('models_0/' + test_number + '/Genmodel')\n\telif model_type == 1:\n\t\tmodel = tf.keras.models.load_model('models_1/' + test_number + '/Genmodel')\n\telif model_type == 2:\n\t\tmodel = tf.keras.models.load_model('models_2/' + test_number + '/Genmodel')\n\telse:\n\t\tmodel = tf.keras.models.load_model('models/' + test_number + '/Genmodel')\n\n\tmodel.summary()\n\n\tdef bend_function(struct, vGextC, vGextF):\n\t\tsE, dN = eng.Struct_bend(convert_to_matlabint8(struct[0]), convert_to_matlabint8(vGextC[0]), convert_to_matlabint8(vGextF[0]), nargout=2)\n\t\treturn np.max(np.abs(np.array(dN))), np.sum(np.abs(np.array(dN)))\n\n\n\n\n\tprint(\"Summaries are written to '%s'.\" % train_dir)\n\ttrain_writer = tf.summary.create_file_writer(\n\t\tos.path.join(train_dir, \"test\"), flush_millis=3000)\n\n\tsummary_interval_step = 50\n\tsummary_interval = 1\n\ttest_results = []\n\tcompare_results = []\n\ttol = np.linspace(0.0, 1.0, 101)\n\n\tfor struct, structC, structF, structOff in new_dataset.take(1):\n\t\togtruct = struct\n\t\tvG = convert_to_matlabint8(struct)\n\t\tlossog = bend_function(struct, structC, structF)\n\t\tprint('Original max bending: %f | sum bending: %f' % (lossog[0], lossog[1]))\n\t\tfor epoch in range(200):\n\t\t\tprint(\"Epoch: \", epoch)\n\n\t\t\tif model_type == 0:\n\t\t\t\tnew_struct = model(struct, training=False)\n\t\t\telif model_type == 1:\n\t\t\t\tnew_struct = model([struct, structC, structF, structOff], training=False)\n\t\t\telse:\n\t\t\t\tinpus = tf.stack([struct, structC, structF, structOff], axis=4)\n\t\t\t\tnew_struct = model(inpus, training=False)\n\n\t\t\t# Trains model on structures with a truth structure created from\n\t\t\t# The direct stiffness method and shifted vox\n\t\t\t\"\"\"\n\t\t\tout_true = ogtruct.numpy()\n\t\t\tcheck_diff = 1e23\n\t\t\tfor i in tol:\n\t\t\t\tout = new_struct.numpy()\n\t\t\t\tout[out <= i] = 0\n\t\t\t\tout[out > i] = 1\n\n\t\t\t\tcheck_tol = np.abs(np.sum(out_true) - np.sum(out))\n\t\t\t\tif check_tol < check_diff:\n\t\t\t\t\tcheck_diff = check_tol\n\t\t\t\t\tcurrent_tol = i\n\n\t\t\t#avg_tol_val += current_tol\n\t\t\tout = new_struct.numpy()\n\t\t\tout[out <= current_tol] = 0\n\t\t\tout[out > current_tol] = 1\n\t\t\t\"\"\"\n\t\t\tout = new_struct.numpy()\n\n\t\t\tout[out <= float(threshold)/100] = 0\n\t\t\tout[out > float(threshold)/100] = 1\n\t\t\tloss = bend_function(out, structC, structF)\n\t\t\ttest_results.append([epoch, loss[0]/lossog[0], loss[1]/lossog[1], np.sum(out)])\n\n\t\t\ttry:\n\t\t\t\tstructog, sE, dN = eng.reinforce_struct_test(structog, vGextC, vGextF, vGstayOff, 100, nargout=3)\n\t\t\t\tcompare_results.append([epoch, np.max(np.abs(np.array(dN)))/lossog[0], np.sum(np.abs(np.array(dN)))/lossog[1], np.sum(np.array(structog))])\n\t\t\texcept:\n\t\t\t\tsE = np.nan\n\t\t\t\tdN = np.nan\n\n\n\t\t\tprint(\"3Dconv Model: max bending change: %f | sum bending change: %f | Amount: %d\" % (loss[0]/lossog[0], loss[1]/lossog[1], np.sum(out)))\n\t\t\tprint(\"Generative : max bending change: %f | sum bending change: %f | Amount: %d\" % (np.max(np.abs(np.array(dN)))/lossog[0], np.sum(np.abs(np.array(dN)))/lossog[1], np.sum(np.array(structog))))\n\n\t\t\teng.clf(nargout=0)\n\t\t\teng.plotVg_safe(matlab.int8(np.int8(np.ceil(out[0])).tolist()), 'edgeOff', 'col',collist, nargout=0)\n\t\t\teng.saveFigToAnimGif('3Dconvoxnet_testing' + test_number + '.gif', epoch==0, nargout=0)\n\t\t\tstruct = out\n\n\twith open(\"test_results/\" + \"model\" + str(model_type) + \"_\" + test_number + \"_\" + threshold + \"/3Dconvmodel_loss.txt\", \"wb\") as fp:\n\t\tpickle.dump(test_results, fp)\n\twith open(\"test_results/\" + \"model\" + str(model_type) + \"_\" + test_number + \"_\" + threshold + \"/generative_loss.txt\", \"wb\") as fp:\n\t\tpickle.dump(compare_results, fp)\n\twith open(\"test_results/\" + \"model\" + str(model_type) + \"_\" + test_number + \"_\" + threshold + \"/3Dconv_structures.txt\", \"wb\") as fp:\n\t\tpickle.dump([ogtruct[0].numpy(),out[0]], fp)\n\twith open(\"test_results/\" + \"model\" + str(model_type) + \"_\" + test_number + \"_\" + threshold + \"/generative_structures.txt\", \"wb\") as fp:\n\t\tpickle.dump([ogtruct[0].numpy(),np.array(structog)], fp)\n\n\tprint('Original max bending: %f | sum bending: %f' % (lossog[0], lossog[1]))\n\tprint(\"New max bending: %f | sum bending: %f\" % (loss[0], loss[1]))\n\teng.clf(nargout=0)\n\teng.plotVg_safe(matlab.int8(np.int8(np.ceil(out[0])).tolist()), 'edgeOff', 'col',collist, nargout=0)\n\tprint(\"Testing have been completed.\")\n\tinput(\"Press Enter to close...\")\n\ndef parse_args():\n\t\"\"\"Parse command line argument.\"\"\"\n\tparser = argparse.ArgumentParser(\"Train segmention model on 3D structures.\")\n\tparser.add_argument(\"model_type\", help=\"Choose model type to train, 0, 1 or 2\")\n\tparser.add_argument(\"train_dir\", help=\"Directory to put logs and saved model.\")\n\tparser.add_argument(\"test_number\", help=\"logs the result files to specific runs\")\n\tparser.add_argument(\"threshold\", help=\"Choose the threshold split to determine when a space is a voxel\")\n\n\treturn parser.parse_args()\n\nif __name__ == '__main__':\n\targs = parse_args()\n\trunstuff(args.train_dir, args.test_number, args.threshold, int(args.model_type))\n","repo_name":"AreNiko/Genarm","sub_path":"python_model/run_test_model.py","file_name":"run_test_model.py","file_ext":"py","file_size_in_byte":9080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"37581098107","text":"#Implement a function called maxMin(lst) which will re-arrange the elements of a sorted list\r\n#such that the first position will have the largest number,\r\n#the second will have the smallest, and the third will have second largest, and so on\r\n#Input : [1,2,3,4,5]\r\n# Output : [5,1,4,2,3]\r\n\r\nimport pytest\r\n\r\nclass Solution:\r\n \r\n def reArrange(self, sortedList):\r\n result = []\r\n i , j = 0, len(sortedList)-1\r\n while i <= j:\r\n if i == j:\r\n # this will be the last element in an odd numbered list\r\n result.append(sortedList[i])\r\n break\r\n else:\r\n result.append(sortedList[j])\r\n result.append(sortedList[i])\r\n i += 1\r\n j -= 1\r\n return result\r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\nclass TestSolution:\r\n def test_rearrange_positivevaluessonly(self):\r\n s = Solution()\r\n assert s.reArrange([1,2,3,4,5]) == [5,1,4,2,3]\r\n \r\n def test_rearrange_allsamevalues(self):\r\n s = Solution()\r\n assert s.reArrange([1, 1, 1, 1, 1]) == [1, 1, 1, 1, 1]\r\n \r\n \r\n def test_rearrange_positiveandnegativevalues(self):\r\n s = Solution()\r\n assert s.reArrange([-10, -1, 1, 1, 1, 1]) == [1, -10, 1, -1, 1, 1]\r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"chefmohima/DS_Algo","sub_path":"rearrangeMaxMin.py","file_name":"rearrangeMaxMin.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25483210471","text":"n = int(input())#N筆資訊\r\ndatas = []#資訊\r\ndatas_change = []#小寫資訊\r\nfor i in range(n):\r\n data = input()\r\n datas.append(data)\r\n datas_change.append(data.lower())\r\nsearch = input().split()#關鍵字\r\nsearch_change = []#小寫關鍵字\r\nfor i in range(len(search)):\r\n search_change.append(search[i].lower())\r\nans = [[]for _ in range(n)]\r\nfor i in range(len(datas)):\r\n time = 0#關鍵字次數\r\n check = datas[i].split()\r\n check_change = datas_change[i].split()\r\n for j in range(len(search)):\r\n time += datas_change[i].count(search_change[j])\r\n for k in range(len(check)):\r\n for l in range(len(check_change[k])):\r\n\r\n pos = check_change[k].find(search_change[j])#關鍵字位置\r\n if pos != -1:\r\n check_change[k] = check_change[k][0:pos] + check_change[k][pos:pos+len(search_change[j]):].upper() + check_change[k][pos+len(search_change[j])::]\r\n check[k] = check[k].replace(check[k][pos:pos+len(search_change[j]):],search[j].upper())\r\n datas[i] = ' '.join(check)\r\n ans[i].append(0-time)\r\n ans[i].append(i)\r\n ans[i].append(datas[i])\r\nans.sort()\r\nfor i in range(len(ans)):\r\n print(ans[i][2])","repo_name":"Zixun55/ntut_python","sub_path":"計算機程式設計/046.py","file_name":"046.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"1905957732","text":"####################################################\n# FileName:functions.py\n# Author: Zorondras Rodriguez\n# Creation Date: December 5, 2020\n# Version: 0.13\n# Revision Date: December 9, 2020\n# Course: ENSF 310\n# Assignment: Final Project\n#\n# Description: module for functions for final project\n####################################################\n\n## container for required data to pass to the grabber for each commodity type\nclass commoData:\n ''' Class to hold commodity file and folder data for grabber'''\n def __init__(self,folderName:str,fileName:str):\n self.folderName = folderName\n self.fileName = fileName\n\n def __repr__(self):\n print(\"Folder: \" +self.folderName, \"FileName: \"+ self.fileName )\n\nclass timeInterval:\n '''\n Class / Data structure to hold two dates either as decimal dates or strings\n '''\n def __init__(self,T1:int,T2:int):\n self.T1 =T1\n self.T2 =T2\n\n def __repr__(self):\n print(\"Lower Time: \"+ self.T1 +\" Upper Time: \"+self.T2)\n\n\ndef getDateRange():\n ''' \n Ask for user input for a time range / date range of data to plot,\n return a timeInterval Object based on user specifications\n '''\n userResponse=input(\"Would you like to change the data range to plot? Y(y)/N(n):\\n\")\n\n if (userResponse == 'Y' or userResponse == 'y'): \n T1str = input (\"Enter the start date: YYYY-MM-DD:\\n\")\n T2str = input (\"Enter the end date: YYYY-MM-DD:\\n\")\n \n # now we are going to convert the strings to decimal date\n # and then load them into a timeInterval object and return it\n # we assume that the user entered a string in the proper format with year month day\n strSplit1=T1str.split('-',-1)\n strSplit2=T2str.split('-',-1)\n\n T1=int(strSplit1[0])+(int(strSplit1[1])-1)/12+int(strSplit1[2])/365\n T2=int(strSplit2[0])+(int(strSplit2[1])-1)/12+int(strSplit2[2])/365\n\n tRange=timeInterval(T1,T2)\n return tRange \n\n elif (userResponse == 'N' or userResponse == 'n'):\n return False \n else:\n print(\"Input was not Y,y,N or n, no action taken.\")\n return False\n\n\ndef applyDateRange(data):\n ''' Apply a date range to the input data set , return an output data set\n filters the input pandas dataframe and returns the filtered data\n '''\n dateRange = getDateRange() # get a date range from user input returns a timeScale object or boolean \n\n ## if user answered no then return the original data unfiltered\n if (dateRange == False):\n return data\n else: # otherwise filter the data\n dataOut = data[data['decimal_date'] >= dateRange.T1] # clip from the left range\n dataOut = dataOut[dataOut['decimal_date'] <= dateRange.T2] # clip from the right range \n return dataOut\n\n\ndef grabData( commodity: commoData, source:str):\n ''' \n Grab the dataset requested from the internet\n Input commodity is a string specific to the website datahub.io, or stock ticker symbol\n from the websites quandl.com,finance.yahoo.com, or marketstack.com \n '''\n import urllib.request, urllib.parse, urllib.error\n import requests\n import pandas as pd\n import os\n\n # These are seconds after 1970, used to index yahoo finance historical dataset (just go from 1970-2025)\n T1=0 # 1970 \n T2=(2025-1970)*(3600*24*(365)) # approximate number of seconds to 2021 excluding leap years etc close enough to full range\n\n quandlAPICode='BkxodE_kpxbAszq7rpPn' # My API code for quandl data\n marketStackAPICode='b0d09a6b303b8533e9f1b0091078fcfb' # My API code for marketstack data\n interval='15min' # 15min, 30min, 1h (Default), 3h, 6h,\n #interval='1h' # marketstack data interval\n ## setup the data link\n if (source == 'datahub'):\n resourceURL= 'https://datahub.io/core/'+ commodity.folderName + '/r/' + commodity.fileName +'.csv'\n hostname = 'datahub.io'\n elif (source =='quandl'):\n resourceURL = 'https://www.quandl.com/api/v3/datasets/' + commodity.folderName+'/'+commodity.fileName+'.csv?api_key='+quandlAPICode\n hostname = 'www.quandl.com'\n elif (source =='marketstack'):\n resourceURL ='http://api.marketstack.com/v1/'+commodity.folderName+'?access_key='+marketStackAPICode+'&symbols='+commodity.fileName+'&interval='+ interval\n hostname = 'api.marketstack.com'\n elif (source == 'yahoo'):\n resourceURL = \"https://query1.finance.yahoo.com/v7/finance/download/\"+commodity.fileName+\"?period1=\"+str(T1)+\"&period2=\"+str(T2)+\"&interval=1d&events=history&includeAdjustedClose=true\"\n\n if (os.name == 'nt'): # for Windows NT systems\n dataFolder=\"data\\\\\"\n else: # for unix/linux systems\n dataFolder=\"data/\"\n\n if (source == 'datahub' or source =='quandl' or source == 'yahoo' ):\n #https://docs.python.org/3/howto/urllib2.html\n #https://docs.python.org/3/library/urllib.request.html\n try: \n # Original strategy (based on the course notes)\n #fhand = urllib.request.urlopen(resourceURL) # open the resource and return a handle\n #read the data into a pandas dataframe\n #data = pd.read_csv(fhand) # read the data into a pandas file\n #save this as a text file csv in the directory for recall\n #data.to_csv(commodity.folderName+\"_\"+commodity.fileName +\".csv\") # This also writes the header to line 0\n #fhand.close() # close the connection\n \n ### Alternative strategy \n #https://docs.python.org/3/library/urllib.request.html\n local_filename, headers = urllib.request.urlretrieve(resourceURL)\n html = open(local_filename)\n data = pd.read_csv(html) # read into a pandas data frame from csv\n ## save the csv to the directory data/\n data.to_csv(dataFolder+commodity.folderName+\"_\"+commodity.fileName +\".csv\") # This also writes the header to line 0\n html.close() # close the conenction\n\n return data # return the pandas dataset data\n except urllib.error.HTTPError as e:\n print('The server couldn\\'t fulfill the request.')\n print('Error code: ', e.code)\n except urllib.error.URLError as e:\n print('We failed to reach a server.')\n print('Reason: ', e.reason)\n \n elif (source == 'marketstack'):\n #https: // marketstack.com/documentation\n api_result = requests.get(resourceURL) # , params)\n api_response = api_result.json() # convert from json into dict\n ## convert from json to pandas format\n data = pd.DataFrame(api_response['data'])\n #save this as a text file csv in the directory for recall\n data.to_csv(commodity.folderName+\"_\"+ commodity.fileName +\".csv\") # This also writes the header to line 0\n api_result.close() # close the connection\n #api_response.close() # close the response ( is this even required?)\n\n ### Alternative strategy (BROKEN)\n #https://docs.python.org/3/library/urllib.request.html\n #local_filename, headers = urllib.request.urlretrieve(resourceURL)\n #html = open(local_filename)\n #dataIn = html.json()\n ##dataIn = pd.read_json(html)\n #data = pd.DataFame(dataIn['data'])\n #data.to_csv(commodity.folderName+\"_\"+commodity.fileName +\".csv\") # This also writes the header to line 0\n #html.close()\n \n return data # return the pandas dataset data\n\n\ndef playIntroMenu():\n ''' Play a partial menu to populate the data and ticker on first use'''\n print(\"Welcome to the Commodity and Stock Market Data Plotter:\")\n print(\"1) Get data from online.\")\n print(\"2) Open Data from file.\")\n print(\"q) Quit Program\\n\")\n userSelection=input(\"Please select an Option:\\n\")\n return userSelection\n\ndef playMenu():\n ''' Play the full menu for the user loop in main'''\n print(\"Welcome to the Commodity and Stock Market Data Plotter:\")\n print(\"1) Get data from online.\")\n print(\"2) Open Data from file.\")\n print(\"3) Plot Data\")\n print(\"4) Show head rows of loaded data\")\n print(\"5) Show tail rows of loaded data\")\n print(\"6) Crop Data\")\n print(\"7) Save Current Data to Disk File\")\n print(\"q) Quit Program\\n\")\n userSelection=input(\"Please select an Option:\\n\")\n return userSelection\n\ndef generateMarkStackHourLabels(startDay, endDay, data):\n import numpy as np\n\n # generate day ticks from startDay to endDay by 1\n hourTicksNP = np.arange(startDay, endDay, 1/24) # Ticks on the hour\n hourTicks = hourTicksNP.tolist() # convert the numpy array to a List\n labelList = list() # make a list for the tick labels\n\n numTicks = len(hourTicks) # get the number of ticks\n\n monthLabels = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\",\n \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n\n # assume your not in a leap year # we can test for this and fix it if necessary\n daysInMonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n startYear = data['Year'].iloc[0]\n # Decide if it is a leap year\n # divisible by 4 (except for years evenly divisible by 100, which are not leap years unless evenly divisible by 400\n if (startYear % 4 == 0 and startYear % 100 != 0 or startYear % 400 == 0): # if year is leap year\n daysInMonth[1] = 29 # fix the days in february to 29\n\n oldDay = data['Day'].iloc[0]\n oldMonth = data['Month'].iloc[0]\n # construct the days list\n\n tickMonth = oldMonth\n tickDay = oldDay\n\n ## For loop to construct the labelList with month dates\n label = \"\"\n ### data independent solution purley math based (don't even need to check the data past the first element)\n for k in range(0, numTicks):\n rem = k % 24\n\n if (k == 0):\n tickMonth = oldMonth\n tickDay = oldDay\n\n if (k > 0 and rem == 0): # new day so update the days\n tickDay = (oldDay+1) % daysInMonth[oldMonth-1]\n if(tickDay == 0):\n # markup for non zero index of days\n tickDay = daysInMonth[oldMonth-1]\n if(tickDay == 1): # you must have rolled over to the next month\n # increment the month and wrap around if necessary\n tickMonth = (oldMonth+1) % 12\n if(tickMonth == 0): # if it mods to 0 change the month to 12\n tickMonth = 12 # markup for non zero indexing of months\n\n ## update the day and month number\n oldDay = tickDay\n oldMonth = tickMonth\n\n if (rem == 0):\n label = monthLabels[tickMonth - 1] + \" \" + str(tickDay)+\" /\"+ \"0\"+str(rem)+\":00\"\n else:\n if(rem < 10):\n label = str(tickDay)+\" /\"+\"0\"+str(rem)+\":00\"\n else:\n label = str(tickDay)+\" /\"+str(rem)+\":00\"\n # add the string label for the tick to the list\n labelList.append(label)\n\n return hourTicks, labelList\n\ndef generateMarkStackDayLabels(startDay, endDay, data):\n '''\n Helper function to generate x-axis day labels for dataPlotterPlus if the time scale range is < 1 month\n returns a list of tickmarks and x-axis labels based on the day data category in data which is a pandas dataframe\n '''\n import numpy as np\n\n # generate day ticks from startDay to endDay by 1\n dayTicksNP = np.arange(startDay, endDay, 1)\n dayTicks = dayTicksNP.tolist() # convert the numpy array to a List\n labelList = list() # make a list for the tick labels\n\n numTicks = len(dayTicks) # get the number of ticks\n\n monthLabels = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\",\n \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n # assume your not in a leap year # we can test for this and fix it if necessary\n daysInMonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n startYear = data['Year'].iloc[0]\n # Decide if it is a leap year\n # divisible by 4 (except for years evenly divisible by 100, which are not leap years unless evenly divisible by 400\n if (startYear % 4 == 0 and startYear % 100 != 0 or startYear % 400 == 0): # if year is leap year\n daysInMonth[1] = 29 # fix the days in february to 29\n\n oldDay = data['Day'].iloc[0]\n oldMonth = data['Month'].iloc[0]\n tickDay = oldDay\n tickMonth = oldMonth\n ## For loop to construct the labelList with month dates\n ### MATH BASED SOLUTION\n label = \"\"\n for k in range(0, numTicks):\n ### if there is no data in the next tick period the tick day will stay constant\n ### This is an attempt at a bug fix for this problem\n if (k > 0):\n tickDay = (oldDay+1) % daysInMonth[oldMonth-1]\n if(tickDay == 0):\n # markup for non zero index of days\n tickDay = daysInMonth[oldMonth-1]\n if(tickDay == 1): # you must have rolled over to the next month\n # increment the month and wrap around if necessary\n tickMonth = (oldMonth+1) % 12\n if(tickMonth == 0): # if it mods to 0 change the month to 12\n tickMonth = 12 # markup for non zero indexing of months\n\n # store the oldDay and oldMonth as the new ones calculated\n oldDay = tickDay\n oldMonth = tickMonth\n ## construct the label as Month + day\n label = monthLabels[tickMonth - 1] + \" \" + str(tickDay)\n #Add the string label for the tick to the list\n labelList.append(label)\n\n return dayTicks, labelList\n\ndef generateDayAxisLabels(data):\n '''\n Helper function to generate x-axis day labels for dataPlotterPlus if the time scale range is < 4 months\n returns a list of tickmarks and x-axis labels based on the day data category in data which is a pandas dataframe\n '''\n labelList=list() # make a list for the labels\n tickLocations=data['decimal_date'].tolist() # make ticks at the location of the data\n monthLabels = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\",\n \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n numTicks=len(tickLocations)\n\n ## For loop to construct the labelList with days as labels\n label=\"\"\n oldMonth=0\n for k in range(0,numTicks):\n dayNum=data['Day'].iloc[k] # get the day of the year\n monthNum= data['Month'].iloc[k] # get the month number \n if ( dayNum==1 or k==0 or monthNum!=oldMonth): # for the first of the month label the month and the day\n year = data['Year'].iloc[k]\n label = str(dayNum)+\"\\n\\n\"+str(year)+\" \"+monthLabels[monthNum-1]\n else:\n label = str(dayNum) # otherwise the label is the day \n oldMonth=monthNum # update to the new month\n labelList.append(label) # add the string label for the tick to the list\n return tickLocations, labelList\n\ndef generateMonthAxisLabels(yearStart,yearEnd):\n '''\n Helper function to generate month axis labels to plot in dataPloterPlus \n if the time range is shorter than 3 years. returns a list of tickmarks and ticklabels \n '''\n import numpy as np\n #https: // numpy.org/doc/stable/reference/generated/numpy.arange.html # numpy.arange\n monthTicksNP=np.arange(yearStart,yearEnd,1/12) # generate ticks here\n monthTicks=monthTicksNP.tolist() # convert to a List\n labelList=list() # make a list for the tick labels\n\n numTicks = len(monthTicks) #get the number of ticks \n monthLabels = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\",\n \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n ## For loop to construct the labelList with month dates\n label=\"\"\n for k in range(0,numTicks):\n rem=k%12\n year = yearStart+int(k/12)\n if (rem ==0):\n label=monthLabels[rem]+\"\\n\\n\"+str(year)\n else:\n label = monthLabels[rem]\n\n labelList.append(label) # add the string label for the tick to the list\n return monthTicks, labelList\n\ndef dataPlotterDouble(data, xCol: str, yCol1: str,yCol2:str, xLab: str, yLab1: str, yLab2:str, newTicker: commoData, smaList: list, smaBool: bool, logBool:bool):\n '''Plot a subplot array from the dataset, plot price and volume vs time this version allows for direct selection of the \n variables and their xlabel and ylabel strings. \n '''\n import matplotlib.pyplot as plt\n from matplotlib.ticker import StrMethodFormatter\n from random import random\n\n colorList=['b', 'g', 'r', 'c', 'm', 'y']\n\n fig = plt.figure(figsize=(16, 10))\n #### make a grid using GridSpec\n grid = plt.GridSpec(4, 1, wspace=0.4, hspace=0.4)\n ## assign subplots to figure handles\n ax1 = fig.add_subplot(grid[0:3, 0])\n ax2 = fig.add_subplot(grid[3, 0])\n\n ############################# PLACE SOME TITLES and AXIS LABELS ###################################\n ### Axes Labels\n ax1.set(title=newTicker.folderName+'/'+newTicker.fileName)\n ax1.set(ylabel=yLab1)\n ax1.set(xlabel=xLab)\n ax1.title.set_fontsize(18)\n ax1.title.set_fontweight('bold')\n ax1.xaxis.label.set_fontsize(12)\n #ax1.xaxis.set_fontweight('bold')\n ax1.yaxis.label.set_fontsize(12)\n #ax1.yaxis.set_fontweight('bold')\n #### Second Plot axes Labels\n #ax2.set(title=yLab2) # title interferes with x-axis of plot above it\n ax2.set(ylabel=yLab2)\n ax2.set(xlabel=xLab)\n ax2.title.set_fontsize(14)\n ax2.title.set_fontweight('bold')\n ax2.xaxis.label.set_fontsize(12)\n #ax2.xaxis.set_fontweight('bold') \n ax2.yaxis.label.set_fontsize(12)\n #ax2.yaxis.set_fontweight('bold')\n ### Turn on grids\n ax1.grid('both')\n ax2.grid('both')\n\n\n # ax1.plot(xtr, color='r', label='HHZ 1')\n ### Legend Parameters\n #ax1.legend(loc=\"upper left\")\n #ax2.legend(loc=\"upper left\") ## newest data is on the right, try not to cover it\n\n nRows = int(len(data.index)) # get the number of data rows\n #ax = plt.gca() # get the axis of the current plot\n\n #https://stackoverflow.com/questions/773814/plot-logarithmic-axes-with-matplotlib-in-python\n # set y-axis to logarithmic scale if user requests\n if(logBool):\n ax1.set_yscale('log')\n\n #https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html\n yearStart = int(data['decimal_date'].iloc[0])\n yearEnd = int(data['decimal_date'].iloc[nRows-1]) + 1 # make data up to the next year\n # width in years of the data\n maxYears = int(data['decimal_date'].iloc[nRows-1]-data['decimal_date'].iloc[0])\n\n\n legendList = list()\n legendList.append(newTicker.fileName)\n if (maxYears > 3):\n #### format the x axis with this many digits (Just plot years)\n ax1.xaxis.set_major_formatter(StrMethodFormatter('{x:.2f}')) \n ax2.xaxis.set_major_formatter(StrMethodFormatter('{x:.2f}')) \n ax1.plot(data[xCol], data[yCol1], '-k') # plot the price data\n ax2.plot(data[xCol], data[yCol2], '-k') # plot the volume data\n \n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n ax1.plot(data[xCol], data[yCol1].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n ax1.legend(legendList, fontsize=16)\n else:\n ax1.legend(legendList, fontsize=16)\n\n elif (maxYears < 3 and maxYears>1):\n monthTicks,labelList=generateMonthAxisLabels(yearStart, yearEnd)\n ## SET XTICK LABELS\n ax1.set_xticks(monthTicks, minor=False)\n ax1.set_xticklabels(labelList, rotation=40)\n ax2.set_xticks(monthTicks, minor=False)\n ax2.set_xticklabels(labelList, rotation=40)\n\n ax1.plot(data[xCol], data[yCol1], '-k') # plot the price data\n ax2.plot(data[xCol], data[yCol2], '-k') # plot the volume data\n\n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n ax1.plot(data[xCol], data[yCol1].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n ax1.legend(legendList, fontsize=16)\n else:\n ax1.legend([newTicker.fileName], fontsize=16)\n else: \n # if less than a year of data is to be plotted\n # compute dMonths\n dMonths = data['Month'].iloc[nRows-1]-data['Month'].iloc[0]\n #print(\"Dmonths =\",dMonths)\n if(dMonths>3):\n monthTicks, labelList = generateMonthAxisLabels(yearStart, yearEnd)\n ## SET XTICK LABELS\n ax1.set_xticks(monthTicks, minor=False)\n ax1.set_xticklabels(labelList, rotation=40)\n ax2.set_xticks(monthTicks, minor=False)\n ax2.set_xticklabels(labelList, rotation=40)\n ax1.plot(data[xCol], data[yCol1], '-k') # plot the price data\n ax2.plot(data[xCol], data[yCol2], '-k') # plot the volume \n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n ax1.plot(data[xCol], data[yCol1].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n ax1.legend(legendList, fontsize=16)\n else:\n ax1.legend([newTicker.fileName], fontsize=16)\n else:\n # overide and plot by decimal_date but make labels by day and month\n tickLocations, labelList = generateDayAxisLabels(data)\n ## SET XTICK LABELS\n ax1.set_xticks(tickLocations, minor=False)\n ax1.set_xticklabels(labelList, rotation=40)\n ax1.plot(data[xCol], data[yCol1], '-k') # plot the price data\n ax2.set_xticks(tickLocations, minor=False)\n ax2.set_xticklabels(labelList, rotation=40)\n ax2.plot(data[xCol], data[yCol1], '-k') # plot the volume\n\n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n ax1.plot(data[xCol], data[yCol1].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n ax1.legend(legendList, fontsize=16)\n else:\n ax1.legend([newTicker.fileName], fontsize=16)\n\n ### work on something similar if dMonths is small\n ### if years > 5 only plot the year\n ### if years < 5 and years >1 plot months with yearly Jan 2000 Feb, ....Jan 2001 Feb Mar....\n ### if dMonths < 4 , print the days with Month index\n \n ### Set the plot limits\n # Set this to be the decimal degree limits\n ### you could use this to zoom / scale the data ranges for zooms\n xL_lim = data[xCol].iloc[0] # First data point for decimal date\n # Last data point for decimal date\n xU_lim = data[xCol].iloc[nRows-1]\n ax1.set_xlim(xL_lim, xU_lim) # set to the full data range\n ax2.set_xlim(xL_lim, xU_lim) # set to the full data range\n #ax.set_ylim(yL_lim,yU_lim) # let the y value follow the data (let the plot take care of this)\n\n plt.show()\n return\n\n\ndef dataPlotterPlus(data, xCol: str, yCol: str, xLab: str, yLab: str, newTicker: commoData, smaList: list, smaBool: bool, logBool:bool):\n '''Plot the completed dataset as inputed, this version allows for direct selection of the \n variables and their xlabel and ylabel strings.\n '''\n import matplotlib.pyplot as plt\n from matplotlib.ticker import StrMethodFormatter\n from random import random\n\n colorList=['b', 'g', 'r', 'c', 'm', 'y']\n\n fig1 = plt.figure(figsize=(16, 10))\n plt.title(newTicker.folderName+'/'+newTicker.fileName,fontsize=18, fontweight='bold')\n plt.xlabel(xLab, fontsize=14, fontweight='bold') # xlabel\n plt.ylabel(yLab, fontsize=14, fontweight='bold') # ylabel\n plt.grid('both') # grid on\n\n nRows = int(len(data.index)) # get the number of data rows\n ax = plt.gca() # get the axis of the current plot\n\n #https://stackoverflow.com/questions/773814/plot-logarithmic-axes-with-matplotlib-in-python\n # set y-axis to logarithmic scale if user requests\n if(logBool):\n ax.set_yscale('log')\n\n\n #https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html\n yearStart = int(data['decimal_date'].iloc[0])\n yearEnd = int(data['decimal_date'].iloc[nRows-1]) + 1 # make data up to the next year\n # width in years of the data\n maxYears = int(data['decimal_date'].iloc[nRows-1]-data['decimal_date'].iloc[0])\n\n\n legendList = list()\n legendList.append(newTicker.fileName)\n if (maxYears > 3):\n #### format the x axis with this many digits (Just plot years)\n ax.xaxis.set_major_formatter(StrMethodFormatter('{x:.2f}')) \n plt.plot(data[xCol], data[yCol], '-k') # plot the data\n\n \n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n plt.plot(data[xCol], data[yCol].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n plt.legend(legendList, fontsize=16)\n else:\n plt.legend(legendList, fontsize=16)\n\n elif (maxYears < 3 and maxYears>1):\n monthTicks,labelList=generateMonthAxisLabels(yearStart, yearEnd)\n ## SET XTICK LABELS\n ax.set_xticks(monthTicks, minor=False)\n ax.set_xticklabels(labelList, rotation=40)\n\n plt.plot(data[xCol], data[yCol], '-k') # plot the data\n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n plt.plot(data[xCol], data[yCol].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n plt.legend(legendList, fontsize=16)\n else:\n plt.legend([newTicker.fileName], fontsize=16)\n else: \n # if less than a year of data is to be plotted\n # compute dMonths\n dMonths = data['Month'].iloc[nRows-1]-data['Month'].iloc[0]\n #print(\"Dmonths =\",dMonths)\n if(dMonths>3):\n monthTicks, labelList = generateMonthAxisLabels(yearStart, yearEnd)\n ## SET XTICK LABELS\n ax.set_xticks(monthTicks, minor=False)\n ax.set_xticklabels(labelList, rotation=40)\n plt.plot(data[xCol], data[yCol], '-k') # plot the data\n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n plt.plot(data[xCol], data[yCol].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n plt.legend(legendList, fontsize=16)\n else:\n plt.legend([newTicker.fileName], fontsize=16)\n else:\n # overide and plot by decimal_date but make labels by day and month\n tickLocations, labelList = generateDayAxisLabels(data)\n ## SET XTICK LABELS\n ax.set_xticks(tickLocations, minor=False)\n ax.set_xticklabels(labelList, rotation=40)\n plt.plot(data[xCol], data[yCol], '-k') # plot the data\n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n plt.plot(data[xCol], data[yCol].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n plt.legend(legendList, fontsize=16)\n else:\n plt.legend([newTicker.fileName], fontsize=16)\n\n ### work on something similar if dMonths is small\n ### if years > 5 only plot the year\n ### if years < 5 and years >1 plot months with yearly Jan 2000 Feb, ....Jan 2001 Feb Mar....\n ### if dMonths < 4 , print the days with Month index\n \n ### Set the plot limits\n # Set this to be the decimal degree limits\n ### you could use this to zoom / scale the data ranges for zooms\n xL_lim = data[xCol].iloc[0] # First data point for decimal date\n # Last data point for decimal date\n xU_lim = data[xCol].iloc[nRows-1]\n ax.set_xlim(xL_lim, xU_lim) # set to the full data range\n #ax.set_ylim(yL_lim,yU_lim) # let the y value follow the data (let the plot take care of this)\n\n plt.show()\n return\n\ndef dataPlotterMarketStack(data, xCol: str, yCol: str, xLab: str, yLab: str, newTicker: commoData, smaList: list, smaBool: bool, logBool:bool):\n ''' Specialize plot utility for short time scale data from market stack (2 weeks - 1 day)\n plots the x-axis with the month and day labels if time range is between 1-month to 4 days\n plots the day and the hour on the x-axis if the time range is < 4 days. \n '''\n import matplotlib.pyplot as plt\n\n colorList=['b', 'g', 'r', 'c', 'm', 'y']\n legendList = list()\n legendList.append(newTicker.fileName)\n\n fig1 = plt.figure(figsize=(16,10))\n #plt.title(newTicker.folderName+'/'+newTicker.fileName, fontsize=18, fontweight='bold')\n plt.xlabel(xLab,fontsize=14,fontweight='bold') # xlabel\n plt.ylabel(yLab,fontsize=14,fontweight='bold') # ylabel\n plt.grid('both') # grid on \n ax = plt.gca() # get the axis of the current plot\n\n #https://stackoverflow.com/questions/773814/plot-logarithmic-axes-with-matplotlib-in-python\n # set y-axis to logarithmic scale if user requests\n if(logBool):\n ax.set_yscale('log')\n\n ## I have made new columns decimal_day and decimal_date\n ## the data is usually 5-14 days long so if it's greater than 5 days, only label the day at the start of the day\n # If the data is < 3 days long label the hours \n\n nRows = int(len(data.index)) # get the number of data rows\n\n startDay= int(data['decimal_day'].iloc[0]) # the lowest whole number decimal day\n endDay = int(data['decimal_day'].iloc[nRows-1]) +1 # the largest whole number decimal day\n numDays = int(data['decimal_day'].iloc[nRows-1]-data['decimal_day'].iloc[0]) # the approximate amount of days in the data\n\n\n if (numDays > 4):\n tickLocations, labelList=generateMarkStackDayLabels(startDay, endDay, data)\n ## SET XTICK LABELS\n ax.set_xticks(tickLocations, minor=False)\n ax.set_xticklabels(labelList, rotation=40)\n ##### PERFORM DATA PLOTS\n plt.plot(data[xCol], data[yCol], '-k') # plot the data \n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n plt.plot(data[xCol], data[yCol].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n plt.legend(legendList, fontsize=16)\n else:\n plt.legend(legendList, fontsize=16)\n else:\n tickLocations, labelList = generateMarkStackHourLabels(startDay, endDay, data)\n ## SET XTICK LABELS\n ax.set_xticks(tickLocations, minor=False)\n ax.set_xticklabels(labelList, rotation=40)\n ##### PERFORM DATA PLOTS\n plt.plot(data[xCol], data[yCol], '-k') # plot the data\n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n plt.plot(data[xCol], data[yCol].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n plt.legend(legendList, fontsize=16)\n else:\n plt.legend(legendList, fontsize=16)\n\n ### Set the plot limits/ Restore the plot limits to the data ranges\n # Set this to be the decimal degree limits\n ### you could use this to zoom / scale the data ranges for zooms\n xL_lim = data[xCol].iloc[0] # First data point for decimal day\n # Last data point for decimal day\n xU_lim = data[xCol].iloc[nRows-1]\n ax.set_xlim(xL_lim, xU_lim) # set to the full data range\n #ax.set_ylim(yL_lim,yU_lim) # let the y value follow the data (let the plot take care of t\n\n ## Placed Here to add the Year back into the title\n plt.title(newTicker.folderName+'/'+newTicker.fileName + \" Year: \"+ str(data['Year'].iloc[0]),\n fontsize=18, fontweight='bold')\n\n plt.show()\n return\n\n\ndef dataPlotter(data, newTicker:commoData, smaList:list, smaBool:bool, logBool:bool):\n '''Plot the completed dataset as inputed\n dataPlotter(data,newTicker:commoData, kday:int, smaBool:bool)\n '''\n import matplotlib.pyplot as plt\n\n colorList=['b', 'g', 'r', 'c', 'm', 'y']\n legendList = list()\n legendList.append(newTicker.fileName)\n\n fig1 = plt.figure(figsize=(16,10))\n plt.title(newTicker.folderName+'/'+newTicker.fileName, fontsize=18, fontweight='bold')\n plt.xlabel('Date',fontsize=14,fontweight='bold') # xlabel\n plt.ylabel('Price [$]',fontsize=14,fontweight='bold') # ylabel\n plt.grid('both') # grid on \n\n ax=plt.gca()\n #https://stackoverflow.com/questions/773814/plot-logarithmic-axes-with-matplotlib-in-python\n # set y-axis to logarithmic scale if user requests\n if(logBool):\n ax.set_yscale('log')\n\n plt.plot(data['decimal_date'], data['Price'], '-k') # plot the data \n\n if (smaBool):\n for k in range(len(smaList)):\n smaDays=smaList[k]\n # plot the data with 30 day SMA\n plt.plot(data['decimal_date'], data['Price'].rolling(smaDays).mean(), '-'+colorList[k])\n legendList.append(str(smaDays)+\" day SMA\")\n plt.legend(legendList, fontsize=16)\n else:\n plt.legend(legendList, fontsize=16)\n\n plt.show()\n return\n\ndef createTicker():\n '''Interactively ask the user to create a commoData object\n consisting of the folder where the data comes from\n and the ticker symbol\n newTicker=commoData(folderName,tickerName)\n newTicker=createTicker() \n '''\n source=input(\"Enter Data Source (quandl,datahub,marketstack,yahoo):\\n\")\n tickerSymbol=input(\"Enter Stock Ticker (e.g. MSFT):\\n\")\n sourceFolder=input(\"Enter Data Source Folder(eg. EOD,eod,intraday):\\n\")\n commodity=commoData(sourceFolder,tickerSymbol)\n return commodity,source\n\ndef getPlotOptions(): # get some plotting options\n ''' Asks the user if they want to plot a SMA on the data plot\n if so, asks the user for the length in days of the SMA\n if not returns (1,false)\n if so return (SMAdays,True)\n also prompts for y-log scale \n SMAdays,SMAbool,logBool=getPlotOptions()\n ''' \n\n userResponse=input(\"Would you like the Y-axis to be in logarimic scale?:Y/N\\n\")\n if (userResponse == 'Y' or userResponse == 'y' or userResponse.lower() == 'yes'):\n logBool=True\n else:\n logBool=False\n\n SMAlist=list()\n\n userResponse=input(\"Would you like to plot a Simple Moving Average (SMA):Y/N\\n\")\n if (userResponse=='Y' or userResponse=='y' or userResponse.lower()=='yes'):\n SMAbool=True\n\n numSMAs = input(\"How many SMA's would you like to plot?:\\n\")\n if (int (numSMAs)<1):\n numSMAs = 1\n elif (int(numSMAs)>5):\n numSMAs = 5\n\n for k in range(int(numSMAs)):\n SMAdays = input(\"How many days are in the rolling SMA for SMA\"+str(k)+\"?:\\n\")\n if (int(SMAdays)>=1):\n SMAlist.append(int(SMAdays)) # smaDay moving average \n else:\n SMAlist.append(7+k) # 7+k day moving average \n\n return SMAlist,SMAbool,logBool\n \n else: \n SMAbool=False\n return SMAlist,SMAbool,logBool\n \ndef openFileFromDisk():\n ''' Prompts the User for a File Name, \n opens the csv file and returns it as a pandas data frame\n data = openFileFromDisk()\n '''\n\n import pandas as pd\n import os\n\n if (os.name == 'nt'): # for Windows NT systems\n dataFolder=\"data\\\\\"\n else: # for unix/linux systems\n dataFolder=\"data/\"\n\n fileName=input(\"Enter the filename from folder:\"+dataFolder+\"\\n\")\n try:\n fhand = open(dataFolder+fileName,'r') #open the file for reading\n data = pd.read_csv(fhand) # use pandas to import the csv file from disk\n return data \n except:\n print(\"The file \"+fileName+\" could not be openned for reading.\")\n return None \n\ndef reformatQuandlInputData(data):\n ''' Take the data with string dates and turn them into numbers\n and return a pandas dataframe with the new columns inserted \n make a smaller data frame with integer dates, float decimal date and \n a subset of the original data values (loads and plots faster)\n '''\n import pandas as pd\n import numpy as np\n \n dateList1 = data['Date'].values # these are all datetime64 objects of the date\n integerDates = list() # make a new list to hold the integer vector of the date\n floatDates = list() # make a list to store the floating point date\n yearList = list() # make a list for the year column\n monthList = list() # make a list for the month column \n dayList = list() # make a list for the day column\n\n for item in dateList1:\n # split the date string\n date2string= np.datetime_as_string(item, unit='D')\n strSplit = date2string.split('-', -1)\n ## convert the date into integers for year month and day, then add to their own lists\n strSplit[0] = int(strSplit[0])\n yearList.append(strSplit[0])\n strSplit[1] = int(strSplit[1])\n monthList.append(strSplit[1])\n strSplit[2] = int(strSplit[2])\n dayList.append(strSplit[2])\n integerDates.append(strSplit)\n decimalDate = strSplit[0]+(strSplit[1]-1)/12 + strSplit[2]/365 # sloppy but will get the job done\n floatDates.append(decimalDate) # add the decimal\n \n dataNew = pd.DataFrame({'Year': yearList, 'Month': monthList,'Day': dayList, 'decimal_date': floatDates,\\\n 'Open': data['Open'].values, 'High': data['High'], 'Low': data['Low'], 'Close':data['Close'],'Volume':data['Volume'] })\n return dataNew\n\ndef reformatGrabberInputData(data,source:str):\n ''' Take the data with string dates and turn them into numbers\n and return a pandas dataframe with the new columns inserted \n make a smaller data frame with integer dates, float decimal date and \n a subset of the original data values (loads and plots faster)\n '''\n import pandas as pd\n import numpy as np\n from datetime import datetime\n\n dateList1 = data['Date'].values # these are all strings of the date\n integerDates = list() # make a new list to hold the integer vector of the date\n floatDates = list() # make a list to store the floating point date\n yearList = list() # make a list for the year column\n monthList = list() # make a list for the month column\n dayList = list() # make a list for the day column\n\n for item in dateList1:\n # split the date string\n #date2string = item # is already in string format\n strSplit = item.split('-', -1)\n ## convert the date into integers for year month and day, then add to their own lists\n strSplit[0] = int(strSplit[0])\n yearList.append(strSplit[0])\n strSplit[1] = int(strSplit[1])\n monthList.append(strSplit[1])\n strSplit[2] = int(strSplit[2])\n dayList.append(strSplit[2])\n integerDates.append(strSplit)\n decimalDate = strSplit[0]+(strSplit[1]-1)/12 + strSplit[2]/365 # sloppy but will get the job done\n floatDates.append(decimalDate) # add the decimal\n\n if (source=='quandl'):\n dataNew = pd.DataFrame({'Year': yearList, 'Month': monthList, 'Day': dayList, 'decimal_date': floatDates,\n 'Open': data['Open'], 'High': data['High'], 'Low': data['Low'], 'Close': data['Close'], 'Volume': data['Volume']})\n return dataNew\n elif(source=='datahub'):\n dataNew = pd.DataFrame({'Year': yearList, 'Month': monthList, 'Day': dayList, 'decimal_date': floatDates,'Price': data['Price']})\n return dataNew\n elif (source=='yahoo'):\n dataNew = pd.DataFrame({'Year': yearList, 'Month': monthList, 'Day': dayList, 'decimal_date': floatDates,\n 'Open': data['Open'], 'High': data['High'], 'Low': data['Low'], 'Close': data['Close'],'Adj_Close': data['Adj Close'], 'Volume': data['Volume']})\n return dataNew\n\n## for market stack data \ndef reformatMarketStackInputData(data, folder:str):\n ''' Take the data with string dates and turn them into numbers\n and return a pandas dataframe with the new columns inserted \n make a smaller data frame with integer dates, float decimal date and \n a subset of the original data values (loads and plots faster)\n '''\n import pandas as pd\n import numpy as np\n from datetime import datetime\n\n dateList1 = data['date'].values # these are all strings of the date\n integerDates = list() # make a new list to hold the integer vector of the date\n floatDates = list() # make a list to store the floating point date\n yearList = list() # make a list for the year column\n monthList = list() # make a list for the month column\n dayList = list() # make a list for the day column\n hourList = list() # make a list column for the hour of the ticker\n minuteList = list() # make a column for the minute of the ticker \n decimalDayList = list() # make a column for day hour\n for item in dateList1:\n # split the date string\n #date2string = item # is already in string format\n strSplit = item.split('-', -1)\n ## convert the date into integers for year month and day, then add to their own lists\n strSplit[0] = int(strSplit[0])\n yearList.append(strSplit[0])\n strSplit[1] = int(strSplit[1])\n monthList.append(strSplit[1])\n timeSplit=strSplit[2].split('T',-1)[1].split(':',-1) # split off the time\n strSplit[2] = int(strSplit[2].split('T', -1)[0]) # day\n dayList.append(int(strSplit[2]))\n hourList.append(int(timeSplit[0]))\n minuteList.append(int(timeSplit[1]))\n # add hour minute second to dataOut frame , then calculate decDate as\n decimalDate = int(strSplit[0])+int(strSplit[1]-1)/12 + int(strSplit[2])/(365)+int(timeSplit[0])/(24*365)\\\n + int(timeSplit[1])/(60*24*365) # should work down to the minute rework for seconds and miliseconds if necessary\n integerDates.append(strSplit)\n floatDates.append(decimalDate) # add the decimal\n ## add decimal day for short time windows of data\n decimalDay = 365*int(strSplit[1]-1)/12+int(strSplit[2])+int(timeSplit[0])/24+int(timeSplit[1])/(24*60)\n decimalDayList.append(decimalDay)\n\n if (folder == \"intraday\"): \n dataNew = pd.DataFrame({'Year': yearList, 'Month': monthList, 'Day': dayList, 'Hour':hourList,'Minute':minuteList,'decimal_date': floatDates, 'decimal_day':decimalDayList,\\\n 'Last': data['last'].values,'Open': data['open'].values, 'High': data['high'], 'Low': data['low'], 'Close': data['close'], 'Volume': data['volume']})\n return dataNew.sort_values(by='decimal_date', ascending=True)\n elif(folder == \"eod\"):\n dataNew = pd.DataFrame({'Year': yearList, 'Month': monthList, 'Day': dayList, 'Hour': hourList, 'Minute': minuteList, 'decimal_date': floatDates, 'decimal_day': decimalDayList,\n 'Open': data['open'].values, 'High': data['high'], 'Low': data['low'], 'Close': data['close'], 'Volume': data['volume']})\n return dataNew.sort_values(by='decimal_date', ascending=True)\n\ndef clearScreen():\n import os\n if (os.name == 'nt'): # for Windows NT systems\n os.system('cls')\n else: # for unix/linux systems\n os.system('clear')\n\n\n############################## MAIN PROGRAM FILE ############################################################\ndef main_program():\n import pandas as pd\n source = 'quandl' # this can be modified to another data vendor like datahub\n data=pd.DataFrame([]) # point to Null\n newTicker=commoData(\"NULL\",\"NULL\")\n\n clearScreen()# clear the terminal on restart \n ### play the menu\n response=playIntroMenu()\n ### first run has no data handle this and get data or quit\n if (response == '1'):\n newTicker,source = createTicker() # Ask the user for the ticker info\n # download the data set selected by the user\n try:\n dataIn = grabData(newTicker, source)\n if (source == 'quandl' or source == 'datahub' or source=='yahoo'):\n data = reformatGrabberInputData(dataIn, source).sort_values(by='decimal_date', ascending=True) # fix up the data for plotting\n elif(source == 'marketstack'): \n data = reformatMarketStackInputData(dataIn,newTicker.folderName).sort_values(by='decimal_date',ascending=True)\n except:\n print(\"Data could not be retrieved. Try again\")\n pass\n elif (response == '2'):\n try:\n dataIn = openFileFromDisk() # open the data file from disk\n newTicker,source = createTicker() # Ask the user for the ticker info\n if (source == 'quandl' or source == 'datahub' or source == 'yahoo'):\n data = reformatGrabberInputData(dataIn, source).sort_values(by='decimal_date', ascending=True) # fix up the data for plotting\n elif(source == 'marketstack'):\n data = reformatMarketStackInputData(dataIn,newTicker.folderName).sort_values(by='decimal_date',ascending=True) \n except:\n print(\"Data could not be Loaded. Try again\")\n pass # return to loop start\n \n elif (response == 'q' or response == 'Q'):\n print(\"Thanks for playing, see you later!\\n\")\n quit() # exit the program\n else:\n print(\"Bad selection. Try again.\\n\")\n\n response='Y' # set this to enter the loop \n #### Menu Loop to open/grab/plot data\n while (response != 'q' and response != 'Q'): # changed from while True, could not seem to break the loop (bug)\n ### play the menu\n response=playMenu()\n if (response == '1'):\n clearScreen() # clear the terminal on each return to the Menu from plotting \n newTicker, source = createTicker() # Ask the user for the ticker info\n try:\n dataIn=grabData(newTicker,source) # download the data set selected by the user\n if (source == 'quandl' or source == 'datahub' or 'yahoo'):\n data = reformatGrabberInputData(dataIn, source).sort_values(by='decimal_date', ascending=True) # fix up the data for plotting\n elif(source == 'marketstack'): \n data = reformatMarketStackInputData(dataIn,newTicker.folderName).sort_values(by='decimal_date',ascending=True)\n except:\n print(\"Data could not be retrieved. Try again.\")\n print(newTicker.fileName,newTicker.folderName,source)\n continue\n elif (response == '2'):\n try:\n dataIn=openFileFromDisk() # open the data file from disk\n newTicker,source = createTicker() # Ask the user for the ticker info \n if (source == 'quandl' or source == 'datahub' or source == 'yahoo' ):\n data = reformatGrabberInputData(dataIn, source).sort_values(by='decimal_date', ascending=True) # fix up the data for plotting\n elif(source == 'marketstack'): \n data = reformatMarketStackInputData(dataIn,newTicker.folderName).sort_values(by='decimal_date',ascending=True)\n except:\n print(\"Data could not be loaded.Try again\")\n print(newTicker.fileName, newTicker.folderName, source)\n continue\n \n elif (response == '3'):\n ## we assume that the data already exists from step 1 or 2 above \n SMAdays,SMABool,logBool = getPlotOptions() # get some plotting options \n dataOut = applyDateRange(data) # Ask user if they want to plot a range of data\n #clearScreen() # clear the terminal on each return to the Menu from plotting \n\n if (source == 'quandl' ): \n try:\n dataPlotterPlus(dataOut,'decimal_date','Close','Date', 'Price [$]', newTicker, SMAdays, SMABool,logBool)\n except:\n print(\"Some Plotting Parameter is Wrong. Investigate and Try again.\")\n continue\n elif(source == 'datahub'):\n try:\n # dataPlotter(dataOut, newTicker, SMAdays, SMABool) # deprecated\n dataPlotterPlus(dataOut,'decimal_date','Price','Date','Price [$]',newTicker,SMAdays,SMABool,logBool)\n except:\n print(\"Some Plotting Parameter is Wrong. Investigate and Try again.\")\n continue\n elif (source =='marketstack' and newTicker.folderName == 'intraday'):\n try:\n dataPlotterMarketStack(dataOut,'decimal_day','Last','Date','Price [$]',newTicker,SMAdays,SMABool,logBool)\n except:\n print(\"Some Plotting Parameter is Wrong. Investigate and Try again.\")\n continue\n elif (source =='marketstack' and newTicker.folderName == 'eod'):\n try:\n dataPlotterPlus(dataOut,'decimal_date','Close','Date','Price [$]',newTicker,SMAdays,SMABool,logBool)\n except:\n print(\"Some Plotting Parameter is Wrong. Investigate and Try again.\")\n continue\n elif (source =='yahoo'):\n #try:\n dataPlotterDouble(dataOut,'decimal_date','Close','Volume','Date','Price [$]','Volume',newTicker,SMAdays,SMABool,logBool)\n #except:\n # print(\"Some Plotting Parameter is Wrong. Investigate and Try again.\")\n # continue\n elif (response == '4'):\n ## we assume that the data already exists from step 1 or 2 above\n clearScreen()\n print(data.head()) # This has stopped working\n #print(\"There should be data rows from head here!\")\n elif (response == '5'):\n ## we assume that the data already exists from step 1 or 2 above\n clearScreen()\n print(data.tail()) # This has stopped working....\n #print(\"There should be data rows from tail here!\")\n elif(response == '6'):\n clearScreen() # clear the terminal on each return to the Menu from plotting \n print(\"Data clipping will overwrite the current dataset!!\") \n print(\"This will return a crop of the original data for plotting or saving to disk\")\n # Ask user if they want to plot a range of data\n data = applyDateRange(data) \n elif(response == '7'):\n ### save the dataFrame to a csv file (usefull after data crop)\n clearScreen()\n fileOutName = input(\"Enter a filename to save:\\n\") \n data.to_csv(fileOutName)\n print(fileOutName + \" was saved to disk.\")\n elif (response == 'q' or response == 'Q'):\n break\n else:\n print(\"Bad selection. Try again.\\n\")\n\n print(\"Out of the Loop...Exiting Program\") # This works now\n print(\"Thanks for playing, see you later!\\n\")\n return True\n################################ END OF MAIN PROGRAM #####################################\n\n\n\n############################## DEPRECATED #############################################################\n\n## This is now handled in main\ndef handleIntroUserSelection(response): \n if (response == '1'):\n commodity,source=dataOptions()# show the options \n data =grabData(commodity,source) # download the data set selected by the user\n return data,commodity\n elif (response == '2'):\n data=openFileFromDisk() # open the data file from disk\n return data\n elif (response == 'q' or response == 'Q'):\n print(\"Thanks for playing, see you later!\\n\")\n quit() # exit the program\n else:\n print(\"Bad selection. Try again.\\n\")\n\n### This is now handled in main\ndef handleUserSelection(response, data): \n if (response == '1'):\n commodity=dataOptions()# show the options \n data=grabData(commodity) # download the data set selected by the user\n return data\n elif (response == '2'):\n data=openFileFromDisk() # open the data file from disk\n return data\n elif (response == '3'): \n kday,smaBool=getPlotOptions() \n dataPlotter(data)\n return data\n elif (response == 'q' or response == 'Q'):\n print(\"Thanks for playing, see you later!\\n\")\n quit() # exit the program\n else:\n print(\"Bad selection. Try again.\\n\")\n\n### No longer required (project switched to Quandl data)\ndef dataOptions():\n print(\"The following data sources are available:\\n\")\n print(\"0) Stock Ticker\")\n print(\"1) Natural Gas.\")\n print(\"2) West Texas Intermediate (WTI).\")\n print(\"3) Brent Crude\")\n print(\"4) Gold \")\n print(\"5) 10 Year T-bill\")\n print(\"6) S&P 500\")\n userSelection = input(\"Please select an Option:\\n\")\n if (userSelection == \"0\"):\n tickerSymbol = input(\"Enter Stock Ticker:\\n\")\n sourceFolder = input(\"Enter Quandl Data Source Folder:\\n\")\n commodity = commoData(sourceFolder, tickerSymbol)\n return commodity, \"quandl\"\n if (userSelection == \"1\"):\n commodity = commoData(\"natural-gas\", \"daily\")\n return commodity, \"datahub\"\n elif (userSelection == \"2\"):\n commodity = commoData(\"oil-prices\", \"wti-daily\")\n return commodity, \"datahub\"\n elif (userSelection == \"3\"):\n commodity = commoData(\"oil-prices\", \"brent-daily\")\n return commodity, \"datahub\"\n elif (userSelection == \"4\"):\n commodity = commoData(\"gold-prices\", \"monthly\")\n return commodity, \"datahub\"\n elif (userSelection == \"5\"):\n commodity = commoData(\"bond-yields-us-10y\", \"monthly\")\n return commodity, \"datahub\"\n elif (userSelection == \"6\"):\n commodity = commoData(\"s-and-p-500\", \"data\")\n return commodity, \"datahub\"\n else:\n print(\"Bad selection, please try again!\\n\")\n return\n\n############################################ END OF FILE #################################################\n","repo_name":"zarodrig/MarketPlots","sub_path":"marketPlotsModule.py","file_name":"marketPlotsModule.py","file_ext":"py","file_size_in_byte":54757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"39173561967","text":"from __future__ import print_function\nimport copy\n\nimport torch.utils.data as data\nimport random\nfrom PIL import Image\nfrom dataloader import preprocess\nfrom dataloader import readpfm as rp\nimport numpy as np\nimport math\n\n# train/ validation image crop size constants\nDEFAULT_TRAIN_IMAGE_HEIGHT = 256\nDEFAULT_TRAIN_IMAGE_WIDTH = 512\n# DEFAULT_TRAIN_IMAGE_HEIGHT = 540\n# DEFAULT_TRAIN_IMAGE_WIDTH = 960\n\n\ndef default_loader(path):\n return Image.open(path).convert('RGB')\n\n\ndef disparity_loader(path):\n return rp.readPFM(path)\n\n\nclass SceneflowLoader(data.Dataset):\n def __init__(self, left_images, right_images, left_disparity, left_cam, right_cam, network_downsample_scale, training, loader=default_loader, dploader=disparity_loader):\n\n self.left_images = left_images\n self.right_images = right_images\n self.left_disparity = left_disparity\n self.left_cam = left_cam\n self.right_cam = right_cam\n self.loader = loader\n self.dploader = dploader\n self.training = training\n\n # network_downsample_scale denotes maximum times the image features are downsampled by the network.\n # Since the image size used for evaluation may not be divisible by the network_downsample_scale,\n # we pad it with zeros, so that it becomes divible and later unpad the extra zeros.\n self.downsample_scale = network_downsample_scale\n\n def __getitem__(self, index):\n left_img_fn = self.left_images[index]\n right_img_fn = self.right_images[index]\n left_disp = self.left_disparity[index]\n left_cam = copy.deepcopy(self.left_cam[index])\n right_cam = copy.deepcopy(self.right_cam[index])\n\n left_img = self.loader(left_img_fn)\n right_img = self.loader(right_img_fn)\n left_disp, left_scale = self.dploader(left_disp)\n left_disp = np.ascontiguousarray(left_disp, dtype=np.float32)\n\n # if self.training:\n w, h = left_img.size\n th, tw = DEFAULT_TRAIN_IMAGE_HEIGHT, DEFAULT_TRAIN_IMAGE_WIDTH\n\n x1 = random.randint(0, w - tw)\n y1 = random.randint(0, h - th)\n\n left_img = left_img.crop((x1, y1, x1 + tw, y1 + th))\n right_img = right_img.crop((x1, y1, x1 + tw, y1 + th))\n left_disp = left_disp[y1:y1 + th, x1:x1 + tw]\n left_cam['intrinsics']['cx'] -= x1\n left_cam['intrinsics']['cy'] -= y1\n right_cam['intrinsics']['cx'] -= x1\n right_cam['intrinsics']['cy'] -= y1\n\n processed = preprocess.get_transform()\n left_img = processed(left_img)\n right_img = processed(right_img)\n\n return left_img, right_img, left_disp, left_cam, right_cam, left_img_fn, right_img_fn\n # else:\n # w, h = left_img.size\n #\n # dw = w + (self.downsample_scale - (w%self.downsample_scale + (w%self.downsample_scale==0)*self.downsample_scale))\n # dh = h + (self.downsample_scale - (h%self.downsample_scale + (h%self.downsample_scale==0)*self.downsample_scale))\n #\n # # if w-dw < 0, crop() will pad with black pixels\n # left_img = left_img.crop((w - dw, h - dh, w, h))\n # right_img = right_img.crop((w - dw, h - dh, w, h))\n # left_disp_tmp = np.zeros((max(dh, h), max(dw, w)), dtype=np.float32)\n # sh = max(0, dh - h) // 2\n # sw = max(0, dw - w) // 2\n # left_disp_tmp[sh:(sh+h), sw:(sw+w)] = left_disp\n # sh = max(0, h - dh) // 2\n # sw = max(0, w - dw) // 2\n # left_disp = left_disp_tmp[sh:(sh+dh), sw:(sw+dw)]\n # left_cam['intrinsics']['cx'] += dw - w\n # left_cam['intrinsics']['cy'] += dh - h\n # right_cam['intrinsics']['cx'] += dw - w\n # right_cam['intrinsics']['cy'] += dh - h\n #\n # processed = preprocess.get_transform()\n # left_img = processed(left_img)\n # right_img = processed(right_img)\n #\n # return left_img, right_img, left_disp, dw-w, dh-h, left_cam, right_cam, left_img_fn, right_img_fn\n\n def __len__(self):\n return len(self.left_images)","repo_name":"McMvMc/790NLPRoboVision","sub_path":"790NLPRoboVision/code/dataloader/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"15202342631","text":"str = list(set(input()))\nstr.sort()\nfor item in str:\n print(item,end=\"\")\n\n# str1 = list(input())\n# k = -1\n# l = len(str1)\n\n\n# while k != (0 - l):\n# if str1.count(str1[k]) != 1:\n# temp = str1[k]\n# str1.remove(temp)\n# # temp = str1[k]\n# # #while j < str1.count(str1[k]):\n# # for j in range(0, str1.count(str1[k])):\n# # str1.remove(temp)\n# l = len(str1)\n# k -= 1\n# else:\n# k -= 1\n\n# str1.sort()\n# for i in range(len(str1)):\n# print(str1[i], end = '')\n# print(str1)\n# print(str1.count(str1[0]))\n# str1.remove('a')\n# print(str1)","repo_name":"CHENTHIRTEEN/PTA-PYTHON","sub_path":"chap3/3-16.py","file_name":"3-16.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"38878259226","text":"# funcion para la generacion y pasaje de manera comoda el vector idiomas\ndef idiomas():\n vec_idiomas = ['Español', 'Ingles', 'Frances', 'Italiano', 'otros']\n\n return vec_idiomas\n\n\n# funcion para la generacion y pasaje de manera comoda el vector de generos\ndef generos():\n vec_generos = ['auto ayuda', 'arte', 'ficcion', 'computacion', 'economia', 'escolar', 'ficcion',\n 'gastronomia', 'infatil', 'otros']\n\n return vec_generos\n\n\n# funcion para el ordenamiento de titulos de forma acendente\ndef shell_sort_for_titles(vec_libros):\n n = len(vec_libros)\n h = 1\n while h <= n // 9:\n h = 3*h + 1\n\n while h > 0:\n for j in range(h, n):\n y = vec_libros[j] # registro\n k = j - h\n while k >= 0 and y.titulo < vec_libros[k].titulo: # titulo(ordenar los titulos)\n vec_libros[k + h] = vec_libros[k]\n k -= h\n vec_libros[k + h] = y\n h //= 3\n\n\n# funcion para sacar y sumar el diez porciento del precio con ISBN\ndef diez_porciento(precio):\n diez_porcentaje = (10 * precio) / 100 + precio\n\n return diez_porcentaje\n\n\n# funcion para determinar el genero y contarlo\ndef contar_libros_por_genero(vector_registro):\n vec_conteo = [0] * len(generos())\n\n for i in range(len(vector_registro)):\n if vector_registro[i].genero == 0:\n vec_conteo[0] += 1\n\n elif vector_registro[i].genero == 1:\n vec_conteo[1] += 1\n\n elif vector_registro[i].genero == 2:\n vec_conteo[2] += 1\n\n elif vector_registro[i].genero == 3:\n vec_conteo[3] += 1\n\n elif vector_registro[i].genero == 4:\n vec_conteo[4] += 1\n\n elif vector_registro[i].genero == 5:\n vec_conteo[5] += 1\n\n elif vector_registro[i].genero == 6:\n vec_conteo[6] += 1\n\n elif vector_registro[i].genero == 7:\n vec_conteo[7] += 1\n\n elif vector_registro[i].genero == 8:\n vec_conteo[8] += 1\n\n elif vector_registro[i].genero == 9:\n vec_conteo[9] += 1\n\n return vec_conteo\n\n\n# funcion que define el mayor genero ofrecido del total de libros\ndef buscar_genero_mas_ofrecido(vector_conteo):\n indice_de_may = 0\n\n for i in range(len(vector_conteo)):\n if vector_conteo[i] > vector_conteo[indice_de_may]:\n indice_de_may = i\n\n return indice_de_may\n\n\n# funcion que busca el ISBN consultado\ndef linear_search_isbn(isbn_a_buscar, vec_libros):\n n = len(vec_libros)\n for i in range(n):\n if isbn_a_buscar == vec_libros[i].ISBN:\n return i\n return -1\n\n\n# funcion para aumentar el diez porciento del precio total\ndef aumentar_precio_libro(vec_libros, indice_de_libro):\n precio = vec_libros[indice_de_libro].precio\n vec_libros[indice_de_libro].precio = round((precio * 0.1) + precio, 2)\n\n\n# funcion que sirve para determinar el mayor precio para un idioma\ndef buscar_mayor_precio_de_libro_de_idioma(idioma, vec_libros):\n precio_may = 0\n indice_de_may = 0\n for i in range(len(vec_libros)):\n if vec_libros[i].idioma == idioma and vec_libros[i].precio > precio_may:\n precio_may = vec_libros[i].precio\n indice_de_may = i\n\n return indice_de_may\n\n\n# funcion para el ordenamiento de precios de mayor a menor\ndef shell_sort_for_precios(vec_libros):\n n = len(vec_libros)\n h = 1\n while h <= n // 9:\n h = 3*h + 1\n\n while h > 0:\n for j in range(h, n):\n y = vec_libros[j]\n k = j - h\n while k >= 0 and y.precio > vec_libros[k].precio:\n vec_libros[k + h] = vec_libros[k]\n k -= h\n vec_libros[k + h] = y\n h //= 3\n\n\n# funcion para buscar los ISBN cargados en el sistema\ndef linear_search_isbn_group(vec_libros, vec_isbn):\n n = len(vec_libros)\n indices_de_isbn_buscados = [-1] * len(vec_isbn)\n\n for i in range(n):\n for j in range(len(vec_isbn)):\n if vec_libros[i].ISBN == vec_isbn[j]:\n indices_de_isbn_buscados[j] = i\n\n return indices_de_isbn_buscados\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"shackGerc/2021_AED_TP3","sub_path":"logica.py","file_name":"logica.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38922689880","text":"import spectral\nimport numpy as np\nimport scipy.io as sio\nfrom torch import nn\nimport os\nimport matplotlib.pyplot as plt\n\n\ndef map_result(data_class, data, out_dir, label, predicted, idx):\n if data_class == \"IP\":\n colors = np.array([[255, 255, 255],\n [255, 218, 185],\n [150, 205, 205],\n [0, 229, 238],\n [0, 139, 139],\n [0, 0, 205],\n [0, 255, 0],\n [255, 255, 0],\n [255, 105, 106],\n [255, 69, 0],\n [255, 0, 0],\n [205, 38, 38],\n [205, 0, 205],\n [139, 0, 139],\n [105, 105, 105]])\n elif data_class == \"PU\":\n colors = np.array([[255, 255, 255],\n [255, 218, 185],\n [150, 205, 205],\n [0, 229, 238],\n [0, 139, 139],\n [0, 0, 205],\n [0, 255, 0],\n [255, 255, 0],\n [255, 105, 106],\n [255, 69, 0]])\n elif data_class == \"SV\":\n colors = np.array([[255, 255, 255],\n [255, 218, 185],\n [150, 205, 205],\n [0, 229, 238],\n [0, 139, 139],\n [0, 0, 205],\n [0, 255, 0],\n [255, 255, 0],\n [255, 105, 106],\n [255, 69, 0],\n [255, 0, 0],\n [205, 38, 38],\n [205, 0, 205],\n [139, 0, 139],\n [105, 105, 105]])\n else:\n colors = np.array([[255, 255, 255],\n [255, 218, 185],\n [150, 205, 205],\n [0, 229, 238],\n [0, 139, 139],\n [0, 0, 205],\n [0, 255, 0],\n [255, 255, 0],\n [255, 105, 106],\n [255, 69, 0],\n [255, 0, 0],\n [205, 38, 38],\n [205, 0, 205],\n [139, 0, 139]])\n img = spectral.imshow(classes=data.astype(int), figsize=(9, 9), colors=colors)\n path_cam_labels = os.path.join(out_dir, \"cam_lab-%s_pre-%s_idx-%s.jpg\" % (label, predicted, idx))\n plt.savefig(path_cam_labels, dpi=100)\n plt.cla()\n plt.close()\n # plt.show()\n\n\ndef loadData(name):\n data_path = os.path.join(os.getcwd(), 'data')\n if name == 'IP':\n labels = sio.loadmat(os.path.join(data_path, 'indian_pines_gt.mat'))['indian_pines_gt']\n return labels\n elif name == 'SV':\n labels = sio.loadmat(os.path.join(data_path, 'salinas_gt.mat'))['salinas_gt']\n return labels\n elif name == 'UP':\n labels = sio.loadmat(os.path.join(data_path, 'paviaU_gt.mat'))['paviaU_gt']\n return labels\n elif name == 'KSC':\n labels = sio.loadmat(os.path.join(data_path, 'KSC_gt.mat'))['KSC_gt']\n return labels\n else:\n print(\"NO DATASET\")\n exit()\n\n\nclass eca_layer(nn.Module):\n \"\"\"Constructs a ECA module.\n Args:\n channel: Number of channels of the input feature map\n k_size: Adaptive selection of kernel size\n \"\"\"\n\n def __init__(self, channel, k_size=3):\n super(eca_layer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n # x: input features with shape [b, c, h, w]\n b, c, h, w = x.size()\n\n # feature descriptor on the global spatial information\n y = self.avg_pool(x)\n\n # Two different branches of ECA module\n y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)\n\n # Multi-scale information fusion\n y = self.sigmoid(y)\n\n return x * y.expand_as(x)\n","repo_name":"luolihrbeu/Lightweight-Spectral-Spatial-Attention-Network","sub_path":"HSI_UP/util/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"20884357005","text":"#!/usr/bin/python3\nimport sys\nfrom parser import parse\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n if len(argv) != 3:\n print(\"Usage: \")\n inputName = argv[1]\n outputName = argv[2]\n inFile = open(inputName, 'r')\n sourceCode = inFile.read()\n try:\n tree = parse(sourceCode)\n compiledCode = tree.generateCode()\n print(compiledCode)\n except SyntaxError:\n print(\"Syntax error. Rest of tokens:\", tokens[pos:])\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"nedn/MinCompiler","sub_path":"minCompile.py","file_name":"minCompile.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"8200926262","text":"import cupy as cp\nimport numpy as np\n\n\nfrom gpuaffman_networks import general_network, ragged_general_network\n\n\ndef test_ragged_k_state_update_extra_dims():\n batch_size = 100\n N = 15\n k_true = 3\n k_cont = 5\n states = np.random.binomial(1, 0.5, (batch_size, N)).astype(np.bool_)\n true_connectivity = np.random.randint(0, N, (batch_size, N, k_true))\n true_functions = np.random.binomial(1, 0.5, (batch_size, N, 1< str :\n return str(self.__list)\n \n def __repr__ (self) -> list :\n return self.__list\n\n def __len__(self) -> int :\n return len(self.__list)\n\n def __make_heap (self) : # O(n*log(n)) or O(n)\n n = len(self.__list)\n for k in range(n//2-1, -1, -1) :\n self.__heapify_down(k)\n\n def __heapify_down (self, k) : # O(log(n))\n n = len(self.__list)\n while k < n//2 :\n if k*2+2 <= n-1 : index_max = max([k,2*k+1,2*k+2], key = lambda i: self.__list[i])\n else : index_max = max([k,2*k+1], key = lambda i: self.__list[i])\n if not index_max == k : \n self.__list[index_max], self.__list[k] = self.__list[k], self.__list[index_max]\n k = index_max\n else : break\n \n def __heapify_up (self, k) : # O(log(n))\n while k > 0 :\n if self.__list[k] > self.__list[(k-1)//2] : \n self.__list[k], self.__list[(k-1)//2] = self.__list[(k-1)//2], self.__list[k]\n k = (k-1)//2\n else : break\n\n def insert (self, value) : # O(log(n))\n k = len(self.__list)\n self.__list.append(value)\n self.__heapify_up(k)\n \n def find_max (self) -> int : # O(1)\n return self.__list[0]\n \n def delete_max (self) -> int : # O(log(n))\n self.__list[0], self.__list[-1] = self.__list[-1], self.__list[0]\n return_val = self.__list.pop()\n self.__heapify_down(0)\n return return_val\n \n def heap_sort (self, reverse=False) -> list : # O(n*log(n))\n n = len(self.__list) # O(1)\n __list = self.__list[:] # O(n). 얕은 복사\n temp = [None] * n # O(1)\n if reverse : \n for i in range(n) : temp[i] = self.delete_max() # O(n*log(n))\n else :\n for i in range(n-1,-1,-1) : temp[i] = self.delete_max() # O(n*log(n))\n self.__list = __list # O(1)\n return temp\n\n def heap_plot (self) : # 나중에 그려봅시다.\n pass\n\ndef __main() :\n lst = [0,10,3,7,11,13,30,6,12]\n x = Heap(lst)\n print(x)\n x.insert(50)\n print(x)\n x.insert(70)\n print(x)\n print(x.delete_max())\n print(x)\n print(x.delete_max())\n print(x)\n print(x.find_max())\n print(x.heap_sort())\n print(x.heap_sort(reverse=True))\n print(x)\n\ndef __timechk () : # 그냥 sorted가 왜 더 빠를까?! 힙 의미가 있을까?? 최대값을 하나씩 제외할 때 의미가 있을 지도\n from datetime import datetime\n lst = list(range(1000000))\n start = datetime.now()\n print(sorted(lst,reverse=True)[0])\n end = datetime.now()\n print(end - start)\n start = datetime.now()\n k = Heap(lst)\n print(k.find_max())\n end = datetime.now()\n print(end - start)\n\n\nif __name__ == \"__main__\" :\n __main()\n # __timechk()","repo_name":"KayyoungHL/DataStructure","sub_path":"data_structure/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42154458693","text":"from django.core.files.storage import FileSystemStorage\nfrom django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom core import settings\nfrom . import serializers, models\nfrom .models import Material\n\n\n@api_view(['GET'])\ndef materials_get_range(request, start: int, end: int):\n if request.method == 'GET':\n materials = models.Material.objects.get_all_in_range(start, end)\n serializer_data = serializers.MaterialSerializer(materials, context={'request': request}, many=True)\n return Response({\n 'data': serializer_data.data\n }, status=200)\n\n\n@api_view(['GET'])\ndef material_get_by_id(request, id: int):\n if request.method == 'GET':\n material = models.Material.objects.find_by_id(id)\n serializer_data = serializers.MaterialSerializer(material, context={'request': request}, many=True)\n return Response({\n 'data': serializer_data.data\n }, status=200)\n\n\n@api_view(['GET'])\ndef game_get_by_id(request, id: int):\n if request.method == 'GET':\n game = models.Game.objects.find_by_id(id)\n serializer_data = serializers.GameSerializer(game, context={'request': request}, many=True)\n return Response({\n 'data': serializer_data.data\n }, status=200)\n","repo_name":"DanielDaVinci/Tabletop_Mania_Api","sub_path":"App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14141796685","text":"'''\nAuthor: Francis Laclé, Alexander Pinkerton\nLicense: MIT\nVersion: 0.1\n\nScript to compute \"true\" code churn of a Git repository.\n\nCode churn has several definitions, the one that to me provides the\nmost value as a metric is:\n\n\"Code churn is when an engineer\nrewrites their own code in a short period of time.\"\n\nReference: https://blog.gitprime.com/why-code-churn-matters/\n\nThis script looks at a range of commits per author. For each commit it\nbook-keeps the files that were changed along with the lines of code (LOC)\nfor each file. LOC are kept in a sparse structure and changes per LOC are taken\ninto account as the program loops. When a change to the same LOC is detected it\nupdates this separately to bookkeep the true code churn.\n\nResult is a print with aggregated contribution and churn per author for a\ngiven time period.\n\nTested with Python version 3.5.3 and Git version 2.20.1\n\n'''\n\n# Usage: python gitcodechurn.py --config --before=2020-05-30 --after=2020-05-20 --chart\n\nimport subprocess\nimport shlex\nimport os\nimport argparse\nimport datetime\nimport json\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef main():\n parser = argparse.ArgumentParser(\n description = 'Compute true git code churn (for project managers)'\n )\n parser.add_argument(\n '--before',\n type = str,\n help = 'before a certain date, in YYYY-MM-DD format'\n )\n parser.add_argument(\n '--after',\n type = str,\n help = 'after a certain date, in YYYY-MM-DD format'\n )\n parser.add_argument(\n '--author',\n type = str,\n help = 'author string (not committer). Use \\'ALL\\' for all authors'\n )\n parser.add_argument(\n '--dir',\n type = str,\n help = 'Git repository directory'\n )\n parser.add_argument(\n '--config',\n type = str,\n help = 'File containing various configuration information.'\n )\n parser.add_argument(\n '--chart',\n dest = \"chart\",\n action = \"store_true\",\n help = 'Show the churn chart.'\n )\n\n args = parser.parse_args()\n\n if not (args.author or args.config):\n parser.error('No action requested, add --author or --authorFile')\n\n before = args.before\n after = args.after\n author = args.author\n dir = args.dir\n configFile = args.config\n\n agg_results = {}\n\n # if a config file was provided\n if configFile:\n # Parse the data in the authorFile\n with open(configFile) as json_file:\n configData = json.load(json_file)\n authorData = configData.get(\"aliasMap\", None)\n repositories = configData.get(\"repositories\", None)\n\n if repositories:\n # Clone each repository and then brng them up to date\n for repo in repositories:\n # If the repo does not exist, clone it\n directory = repo.split(\"/\")[1].replace(\".git\",\"\")\n if not os.path.isdir(directory):\n command = f'git clone {repo}'\n print(\"Cloning repository: \", repo, command, \"\\n\")\n out = get_proc_out(command, \".\").splitlines()\n else:\n # otherwise, ensure it is up to date.\n command = 'git pull'\n print(\"Bringing repo up to date\", repo)\n out = get_proc_out(command, directory).splitlines()\n \n # Calculate the churn for the repo and aggregate into total\n repo_results = get_churn_for_repo(before, after, directory, authorData=authorData)\n for alias in repo_results:\n # Get the existing totals, if any\n existing = agg_results.get(alias, None)\n repo_total = repo_results[alias]\n if existing:\n # add em up\n agg_results[alias]['churn'] += repo_total['churn']\n agg_results[alias]['contribution'] += repo_total['contribution']\n else:\n agg_results[alias] = repo_total\n\n # If there was a specified author\n elif author == \"ALL\":\n authors = get_authors(dir)\n for name in authors:\n print(\"Calculating churn for \", name)\n name = name.replace(\"'\", \"\")\n data = calc_churn(before, after, name, dir)\n if(data[\"churn\"] != 0 or data[\"contribution\"] !=0 ):\n del data['name']\n agg_results[name] = data\n else:\n data = calc_churn(before, after, author, dir)\n del data['name']\n agg_results[author] = data\n\n repostr = \"\\n\".join(repositories)\n \n if args.chart == True:\n show_chart(agg_results, before, after, repostr)\n\n print(agg_results)\n\ndef get_churn_for_repo(before, after, directory, authorData=None):\n \n results = {}\n\n print(f\"Calculating churn for {directory}\")\n\n if authorData:\n for name, aliases in authorData.items():\n print(\"Calculating churn for \", name)\n total_contributions = 0\n total_churn = 0\n\n for alias in aliases:\n try:\n data = calc_churn(before, after, alias, directory)\n if(data[\"churn\"] != 0 or data[\"contribution\"] !=0 ):\n print(\"\\t\", alias, data[\"contribution\"], data[\"churn\"])\n total_contributions += data[\"contribution\"]\n total_churn += data[\"churn\"]\n except UnicodeDecodeError as e:\n print(\"Failed to calculate churn for \", alias, e)\n \n if(total_churn != 0 or total_contributions !=0):\n results[name] = {\"churn\":total_churn, \"contribution\":total_contributions}\n \n else:\n # default to all authors?\n pass\n \n return results\n\n\ndef show_chart(results, before, after, directory):\n x = [ k for k,v in results.items() ]\n y_1 = [ v[\"contribution\"] for k,v in results.items() ]\n y_2 = [ v[\"churn\"] for k,v in results.items() ]\n\n fig, ax = plt.subplots(num=\"Code Churn\")\n ax.set_title(\"Repositories\\n\" + directory + \"\\n\\n\" + after + \" to \" + before)\n ax.set_xlabel('Author')\n ax.set_ylabel('Contributions / Churn')\n\n ax.bar(x, y_1, color=(122/255, 219/255, 163/255, 0.8))\n ax.bar(x, y_2, color=(252/255, 97/255, 90/255, 0.8))\n ax.axhline(linewidth=1, color='gray')\n\n # Formatting x labels\n plt.xticks(rotation=90)\n plt.tight_layout()\n\n plt.show()\n\ndef calc_churn(before, after, name, dir):\n \n commits = get_commits(before, after, name, dir)\n\n # structured like this: files -> LOC\n files = {}\n contribution = 0\n churn = 0\n\n for commit in commits:\n [files, contribution, churn] = get_loc(\n commit,\n dir,\n files,\n contribution,\n churn\n )\n \n return {\"name\":name, \"contribution\":contribution, \"churn\":-churn}\n\ndef get_authors(directory):\n # Get all of the authors for this repository\n authors = subprocess.Popen([\"git\", \"log\", \"--format='%aN'\"], stdout=subprocess.PIPE, universal_newlines=True, cwd=directory)\n # Sort and remove duplicates\n sort = subprocess.Popen([\"sort\", \"-u\"], stdin=authors.stdout, stdout=subprocess.PIPE, universal_newlines=True, cwd=directory)\n names = []\n for output in sort.stdout.readlines():\n names.append(output.strip())\n\n return names\n\ndef get_loc(commit, dir, files, contribution, churn):\n # git show automatically excludes binary file changes\n command = 'git show --format= --unified=0 --no-prefix ' + commit\n results = get_proc_out(command, dir).splitlines()\n file = ''\n loc_changes = ''\n\n # loop through each row of output\n for result in results:\n new_file = is_new_file(result, file)\n if file != new_file:\n file = new_file\n if file not in files:\n files[file] = {}\n else:\n new_loc_changes = is_loc_change(result, loc_changes)\n if loc_changes != new_loc_changes:\n loc_changes = new_loc_changes\n locc = get_loc_change(loc_changes)\n for loc in locc:\n if loc in files[file]:\n files[file][loc] += locc[loc]\n churn += abs(locc[loc])\n else:\n files[file][loc] = locc[loc]\n contribution += abs(locc[loc])\n else:\n continue\n return [files, contribution, churn]\n\n# arrives in a format such as -13 +27,5 (no decimals == 1 loc change)\n# returns a dictionary where left are removals and right are additions\n# if the same line got changed we subtract removals from additions\ndef get_loc_change(loc_changes):\n # removals\n left = loc_changes[:loc_changes.find(' ')]\n left_dec = 0\n if left.find(',') > 0:\n comma = left.find(',')\n left_dec = int(left[comma+1:])\n left = int(left[1:comma])\n else:\n left = int(left[1:])\n left_dec = 1\n\n # additions\n right = loc_changes[loc_changes.find(' ')+1:]\n right_dec = 0\n if right.find(',') > 0:\n comma = right.find(',')\n right_dec = int(right[comma+1:])\n right = int(right[1:comma])\n else:\n right = int(right[1:])\n right_dec = 1\n\n if left == right:\n return {left: (right_dec - left_dec)}\n else:\n return {left : left_dec, right: right_dec}\n\n\n\ndef is_loc_change(result, loc_changes):\n # search for loc changes (@@ ) and update loc_changes variable\n if result.startswith('@@'):\n loc_change = result[result.find(' ')+1:]\n loc_change = loc_change[:loc_change.find(' @@')]\n return loc_change\n else:\n return loc_changes\n\ndef is_new_file(result, file):\n # search for destination file (+++ ) and update file variable\n if result.startswith('+++'):\n return result[result.rfind(' ')+1:]\n else:\n return file\n\ndef get_commits(before, after, author, dir):\n # note --no-merges flag (usually we coders do not overhaul contributions)\n # note --reverse flag to traverse history from past to present\n command = 'git log --author=\"'+author+'\" --format=\"%h\" --no-abbrev '\n command += '--before=\"'+before+'\" --after=\"'+after+'\" --no-merges --reverse'\n\n # print(command)\n\n return get_proc_out(command, dir).splitlines()\n\n# not used but still could be of value in the future\ndef get_files(commit, dir):\n # this also works in case --no-merges flag is ommitted prior\n command = 'git show --numstat --pretty=\"\" ' + commit\n results = get_proc_out(command, dir).splitlines()\n for i in range(len(results)):\n # remove the tabbed stats from --numstat\n results[i] = results[i][results[i].rfind('\\t')+1:]\n return(results)\n\ndef get_proc_out(command, dir):\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=dir,\n shell=True\n )\n return process.communicate()[0].decode(\"utf-8\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"AlexanderPinkerton/truegitcodechurn","sub_path":"gitcodechurn.py","file_name":"gitcodechurn.py","file_ext":"py","file_size_in_byte":11204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"32343907516","text":"import interfazproducto, interfazcliente, interfazventa\n\n\ndef menuBienvenida():\n opcion = 0\n interfazproductoInstancia = interfazproducto.InterfazProducto()\n interfazclienteInstancia = interfazcliente.InterfazCliente()\n listaCliente = interfazclienteInstancia.devolverListaCliente()\n listaProducto = interfazproductoInstancia.devolverListaProductos()\n diccionarioCliente = interfazclienteInstancia.devolverDiccionarioCliente()\n diccionarioProducto = interfazproductoInstancia.devolverDiccionarioProductos()\n interfazventaInstancia = interfazventa.InterfazVenta(listaCliente, listaProducto, diccionarioCliente, diccionarioProducto)\n while opcion!= 9:\n print(\"---------------------------------------------------------------\")\n print(\"Bienvenido al sistema de compras, decida la opcion que necesite\")\n print(\"[1] Productos\\n[2] Clientes\\n[3] Ventas\\n[9] Salida\")\n print(\"---------------------------------------------------------------\")\n try:\n opcion = int(input(\"Opcion: \"))\n except ValueError:\n print(\"Opcion no valida\")\n if opcion == 1:\n interfazproductoInstancia.menuProductos()\n opcion = 0\n if opcion == 2:\n interfazclienteInstancia.menuClientes()\n opcion = 0\n if opcion == 3:\n interfazventaInstancia.menuVentas()\n opcion = 0\n\n\nmenuBienvenida()","repo_name":"VictorIsaacLK/SistemaVenta","sub_path":"interfaz.py","file_name":"interfaz.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41771962094","text":"import discord\r\nfrom discord.ext import commands\r\nfrom discord.ext.audiorec import NativeVoiceClient\r\nimport asyncio\r\nimport random\r\nimport io\r\nimport wavelink\r\n\r\n\r\nintents = discord.Intents.all()\r\nclient = commands.Bot(command_prefix=\"pls \", intents=intents)\r\n\r\n\r\n\r\n#initiation#\r\n@client.event\r\nasync def on_ready():\r\n print(\"READY!\")\r\n asyncio.create_task(change_status())\r\n\r\n#change statuses#\r\nasync def change_status():\r\n status = [\"ayaka is the best\", \"28 days left\"] #\"ayaka is the best\",\"ayaka #1\"\r\n index = 0\r\n while True:\r\n status_chose = status[index]\r\n await client.change_presence(activity=discord.Game(name=status_chose))\r\n index += 1\r\n if index == len(status):\r\n index = 0\r\n await asyncio.sleep(60) \r\n\r\n\r\n#delete messages#\r\n@client.command(aliases = ['clean','del','delete'])\r\nasync def clear(ctx,amount=1):\r\n print(ctx.author,client.user)\r\n await ctx.channel.purge(limit=amount+1)\r\n await ctx.send(content = f\"{amount} messages deleted.\", delete_after = 2)\r\n \r\n\r\n#!csgo#\r\n@client.command()\r\nasync def csgo(ctx):\r\n with open(\"csgo.txt\",\"r\") as file:\r\n data = file.read()\r\n print(data)\r\n await ctx.send(data)\r\n\r\n#pls choose#\r\n@client.command()\r\nasync def choose(ctx,*arg):\r\n await ctx.send(f'{arg[random.randint(0,len(arg)-1)]}')\r\n\r\n#countdown#\r\n@client.command()\r\nasync def countdown(ctx,second):\r\n minute = 0\r\n second = int(second)\r\n if second < 0:\r\n await ctx.send(\"no negative numbers la\")\r\n else:\r\n while second > 60:\r\n second -= 60\r\n minute += 1\r\n message = await ctx.send(f\"time remaining: {minute} minute {second} second\")\r\n while second > -1 or minute != 0 :\r\n await message.edit(content = f\"time remaining: {minute} minute {second} second\") \r\n await asyncio.sleep(1)\r\n second -= 1\r\n if minute != 0 and second == 0:\r\n minute -= 1\r\n second += 60\r\n else:\r\n await ctx.send(\"Done!\")\r\n\r\n\r\n\r\n#join channel#\r\n@client.command(pass_context = True)\r\nasync def join(ctx):\r\n if (ctx.author.voice):\r\n global channel\r\n channel = ctx.message.author.voice.channel\r\n await channel.connect(cls = NativeVoiceClient)\r\n\r\n\r\n else:\r\n await ctx.send(content = \"Not In Channel\",delete_after = 2)\r\n\r\n#record audio#\r\n@client.command()\r\nasync def record(ctx):\r\n second = 0\r\n minute = 0\r\n global sw\r\n sw = True\r\n ctx.voice_client.record(lambda e: print(f\"Exception: {e}\"))\r\n message = await ctx.send(f\"Recording Started **[ Time Elapsed: {second} second ]**\")\r\n while sw:\r\n await asyncio.sleep(1)\r\n second += 1\r\n if second == 60:\r\n second = 0\r\n minute += 1\r\n if minute != 0:\r\n await message.edit(content = f\"Recording Started **[ Time Elapsed: {minute} minute {second} second ]**\")\r\n else:\r\n await message.edit(content = f\"Recording Started **[ Time Elapsed: {second} second ]**\")\r\n\r\n#stop recording#\r\n@client.command()\r\nasync def stop(ctx):\r\n\r\n try:\r\n if ctx.voice_client.is_recording() == True:\r\n global sw\r\n sw = False\r\n second = 5\r\n message = await ctx.send(\"Waiting for **5** second(s)\")\r\n for i in range(5):\r\n await asyncio.sleep(1)\r\n second -= 1\r\n await message.edit(content = f\"Waiting for **{second}** second(s)\")\r\n wav_bytes = await ctx.voice_client.stop_record()\r\n wav_file = discord.File(io.BytesIO(wav_bytes), filename=\"Recorded.wav\")\r\n await ctx.send(file = wav_file)\r\n except:\r\n await ctx.send(\"Not Recording\")\r\n\r\n#play song# ##java -jar Lavalink.jar\r\n@client.command()\r\nasync def play(ctx):\r\n await client.wait_until_ready()\r\n node = await wavelink.NodePool.create_node(bot=client,\r\n host = '127.0.0.1',\r\n port = 2333,\r\n password = 'youshallnotpass')\r\n\r\n \r\n#disconnect#\r\n@client.command(aliases = [\"disconnect\",\"disc\",\"quit\"], pass_context = True)\r\nasync def leave(ctx):\r\n try:\r\n await ctx.message.guild.voice_client.disconnect()\r\n except:\r\n await ctx.send(\"?\")\r\n\r\n\r\n\r\nclient.run('x')\r\n\r\n","repo_name":"ItsMeOX/Application1","sub_path":"discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30772549630","text":"from django.test import TestCase\nfrom rest_framework.test import APIClient\n\nfrom decimal import Decimal\n\nfrom .models import Marker\n\nclass MarkerTestCase(TestCase):\n def test_create_marker(self):\n factory = APIClient()\n request_body = {'latitude': 1123.45678, 'longitude': -1901.23456, 'altitude': 1789.01234}\n \n actual = factory.post('/markers/', request_body, format='json').data\n expected = [{'id': 1, 'latitude': Decimal('1123.45678'), 'longitude': Decimal('-1901.23456'), 'altitude': Decimal('1789.01234')}]\n\n self.assertEqual(actual, expected)\n\n def test_get_markers(self):\n # Create marker\n factory = APIClient()\n request_body = {'latitude': 123.45678, 'longitude': -901.23456, 'altitude': 789.01234}\n\n factory.post('/markers/', request_body, format='json')\n\n # Get markers\n actual = factory.get('/markers/', format='json').data\n expected = [{'id': 1, 'latitude': Decimal('123.45678'), 'longitude': Decimal('-901.23456'), 'altitude': Decimal('789.01234')}]\n\n self.assertEqual(actual, expected)\n\n def test_delete_marker(self):\n # Create marker\n factory = APIClient()\n request_body = {'latitude': 123.45678, 'longitude': -901.23456, 'altitude': 789.01234}\n\n factory.post('/markers/', request_body, format='json')\n\n # Delete marker\n actual = factory.delete('/markers/1', format='json').data\n expected = []\n\n self.assertEqual(actual, expected)\n\n def test_clear_markers(self):\n # Create marker\n factory = APIClient()\n request_body = {'latitude': 123.45678, 'longitude': -901.23456, 'altitude': 789.01234}\n\n factory.post('/markers/', request_body, format='json')\n\n # Clear markers\n actual = factory.delete('/markers/', format='json').data\n expected = []\n\n self.assertEqual(actual, expected)","repo_name":"andrewrobles/3d-globe-visualizer","sub_path":"server/server/api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"21911253790","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"auctions\"\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"listing\", views.listing,name=\"listing\"),\n path(\"closedBids\", views.closedBids,name=\"closedBids\"), \n path(\"listing/\",views.details, name=\"details\"),\n path(\"listing_category\", views.listing_category,name=\"listing_category\"),\n path(\"create_listing\", views.create_listing, name=\"create_listing\"),\n path(\"watchlist\",views.WatchList, name=\"WatchList\"),\n path(\"add_watchlist/\",views.add_watchlist, name=\"add_watchlist\"),\n path(\"remove_watchlist/\",views.remove_watchlist, name=\"remove_watchlist\"),\n path(\"add_bid/\",views.add_bid, name=\"add_bid\"),\n path(\"add_comment/\",views.add_comment, name=\"add_comment\"), \n path(\"closeBid/\", views.closeBid,name=\"closeBid\"), \n]\n# ClosedBids and closeBid/ are two different url for different use\r\n","repo_name":"abhinab-choudhury/CS50-Projects","sub_path":"Project-3/auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33489573442","text":"from django.contrib import messages\nfrom django.shortcuts import redirect, render\nfrom .forms import SignupForm_employee,SignupForm_owner,SignupForm_manager\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\nfrom .models import CustomUser\nfrom verify_email.email_handler import send_verification_email\n\n\n# Create your views here.\ndef landing_view(request):\n if request.user.is_authenticated:\n return redirect(\"/dashboard\")\n return render(request, 'landing.html')\n\n\ndef login_view(request):\n if request.user.is_authenticated:\n return redirect(\"/dashboard\")\n\n context = {}\n if request.method == 'POST':\n email = request.POST[\"email\"]\n password = request.POST[\"password\"]\n\n user = CustomUser.objects.filter(email=email.lower()).first()\n\n if user is None:\n context = {\"error\": \"User not found, Register your account first.\"}\n return render(request, 'login.html', context)\n \n if user.is_active==False:\n context = {\"error\": \"Please verify your email address first.\"}\n return render(request, 'login.html', context)\n \n authUser = authenticate(request, email=email, password=password)\n \n if authUser is None:\n context = {\"error\": \"Invalid password\"}\n return render(request, 'login.html', context)\n \n \n login(request,authUser)\n msg = \"Welcome \" + CustomUser.objects.get(email=email).first_name + \" !!\"\n messages.success(request,msg)\n return redirect(\"/dashboard\")\n\n return render(request, \"login.html\", context)\n\n\n\ndef register_view(request):\n if request.user.is_authenticated:\n return redirect(\"/home\")\n return render(request, \"register.html\")\n\n\n\ndef register_view_owner(request):\n if request.user.is_authenticated:\n return redirect(\"/home\")\n form = SignupForm_owner()\n if request.method == 'POST':\n form = SignupForm_owner(request.POST)\n if form.is_valid():\n user = form.save()\n user.user_type = 'owner'\n user.save()\n send_verification_email(request, form)\n messages.success(request, 'Please verify your email for ProPlaning and login after that')\n return redirect('/login')\n return render(request, 'owner/register_as_admin.html', {\"form\": form})\n\n\ndef register_view_manager(request):\n if request.user.is_authenticated:\n return redirect(\"/home\")\n form = SignupForm_manager()\n if request.method == 'POST':\n form = SignupForm_manager(request.POST)\n if form.is_valid():\n user = form.save()\n user.user_type = 'manager'\n user.save()\n send_verification_email(request, form)\n messages.success(request, 'Please verify your email for ProPlaning and login after that')\n return redirect('/login')\n return render(request, 'manager/register_as_manager.html', {\"form\": form})\n\n\ndef register_view_employee(request):\n if request.user.is_authenticated:\n return redirect(\"/home\")\n form = SignupForm_employee()\n if request.method == 'POST':\n form = SignupForm_employee(request.POST)\n if form.is_valid():\n user = form.save()\n user.user_type = 'employee'\n user.save()\n send_verification_email(request, form)\n messages.success(request, 'Please verify your email for ProPlaning and login after that')\n CustomUser.is_employee=True\n return redirect('/login')\n return render(request, 'employee/register_as_employee.html', {\"form\": form})\n\ndef Logout(request):\n request.session.flush()\n logout(request)\n return redirect('')\n\n\n\n@login_required\ndef home_view(request):\n return render(request, 'home.html')","repo_name":"shlok4803/project_management_system_group_19","sub_path":"projectManagementSystem/Project/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"27925929832","text":"# TO DOs: get last prices for every asset (need to wait until I get the data); find a way to consider current date as to\n# avoid lookahead bias and also control which day the portfolio is in\nimport os\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom GetStockData import download_stocks_data, get_correlation_list, construct_pair_df, get_todays_datetime\n\npd.options.mode.chained_assignment = None\n\n\n# parent class of all assets\nclass Asset:\n\n def __init__(self, name, current_date=get_todays_datetime()):\n self.name = name\n self.current_date = current_date\n\n\nclass Stock(Asset):\n\n def __init__(self, name, current_date=get_todays_datetime()):\n super().__init__(name, current_date)\n self.paper_type = 'stock'\n self.path = 'tickers_data'\n self.data = self.load_data()\n self.stddev = self.get_stddev()\n self.mean = self.get_mean()\n self.current_price = self.get_price()\n\n def load_data(self):\n\n path_exists = os.path.exists(f'{self.path}/{self.name}_data.csv')\n\n # checks if it is necessary to update all data\n if not path_exists:\n download_stocks_data(update_all=True)\n\n return pd.read_csv(f'{self.path}/{self.name}_data.csv', parse_dates=['Date']).sort_values('Date', ascending=False)\n\n def get_stddev(self, period_considered=90):\n\n temp_df = self.data[(self.data['Date'] < self.current_date) &\n (self.data['Date'] >= self.current_date - dt.timedelta(days=period_considered))]\n return np.std(temp_df[f'Close_{self.name}'])\n\n def get_mean(self, period_considered=90):\n\n temp_df = self.data[(self.data['Date'] < self.current_date) &\n (self.data['Date'] >= self.current_date - dt.timedelta(days=period_considered))]\n return temp_df[f'Close_{self.name}'].mean()\n\n def update_date(self, new_date):\n\n self.current_date = new_date\n print(f'Date updated to {self.current_date}')\n\n def get_price(self):\n\n return self.data.loc[self.data['Date'] == self.current_date, f'Close_{self.name}']\n\n\nclass Commodity(Asset):\n\n def __init__(self, name, current_date=get_todays_datetime()):\n super().__init__(name, current_date)\n self.paper_type = 'commodity'\n\n\n# parent class of all derivatives\n# TO DOs: get last prices for every derivative; needs to take into consideration the underlying asset\n\nclass Derivative():\n\n def __init__(self, name, asset_name, current_date=get_todays_datetime()):\n # super().__init__(asset_name, current_date)\n self.name = name\n self.asset_name = asset_name\n self.current_date = current_date\n # self.current_asset_price = self.current_price\n # self.current_price =\n\n\nclass Option(Derivative):\n def __init__(self, name, asset_name, current_date=get_todays_datetime()):\n super().__init__(name, asset_name, current_date)\n self.paper_type = 'option'\n\n\nclass Future(Derivative):\n def __init__(self, name, asset_name, current_date=get_todays_datetime()):\n super().__init__(name, asset_name, current_date)\n self.paper_type = 'future'\n\n\ndef create_paper(paper_type, name, current_date=get_todays_datetime(), asset_name=None):\n\n if paper_type == 'stock':\n return Stock(name, current_date)\n elif paper_type == 'commodity':\n return Commodity(name, current_date)\n elif paper_type == 'option':\n return Option(name, asset_name, current_date)\n elif paper_type == 'future':\n return Future(name, asset_name, current_date)\n else:\n print('Paper type not supported')\n\n\n# portfolio\n# TO DOs: risk management methods should be coded here\nclass Portfolio:\n\n def __init__(self, initial_date, initial_history=None, initial_cash=1e7):\n self.initial_date = initial_date\n self.current_date = self.initial_date\n\n if initial_history is not None:\n self.history = initial_history\n self.initial_history = self.history\n\n else:\n self.history = pd.DataFrame(data=['cash', 'cash', initial_cash, 1, initial_cash, initial_date, 'starting_portfolio'],\n columns=['paper_name', 'paper_type', 'amount', 'price', 'quantity', 'date', 'operation'])\n self.initial_history = self.history\n\n # needs to store net exposure somewhere\n def buy_paper(self, paper_name, paper_type, quantity, date=get_todays_datetime(), underlying_asset_name=None):\n\n paper = create_paper(paper_type, paper_name, underlying_asset_name)\n paper_row = pd.DataFrame(data=[paper_name, paper_type, paper.current_price * quantity, paper.current_price,\n quantity, date, 'buy_order'],\n columns=['paper_name', 'paper_type', 'amount', 'price', 'quantity', 'date', 'operation'])\n minus_cash_row = pd.DataFrame(data=['cash', 'cash', -paper.current_price * quantity, 1,\n -quantity*(paper.current_price), date, 'buy_order_cash_decrease'],\n columns=['paper_name', 'paper_type', 'amount', 'price', 'quantity', 'date', 'operation'])\n self.history = pd.concat([self.history, paper_row, minus_cash_row])\n\n print(f'Bought {quantity} units of {paper_name}')\n\n # needs to consider short-selling (margin call) and store net exposure somewhere\n def sell_paper(self, paper_name, paper_type, quantity, date=get_todays_datetime(), underlying_asset_name=None):\n\n paper = create_paper(paper_type, paper_name, underlying_asset_name)\n paper_row = pd.DataFrame(data=[paper_name, paper_type, -paper.current_price * quantity, paper.current_price, -quantity,\n date, 'sell_order'],\n columns=['paper_name', 'paper_type', 'amount', 'price', 'quantity', 'date', 'operation'])\n plus_cash_row = pd.DataFrame(data=['cash', 'cash', paper.current_price * quantity, 1, paper.current_price * quantity,\n date, 'sell_order_cash_increase'],\n columns=['paper_name', 'paper_type', 'amount', 'price', 'quantity', 'date', 'operation'])\n self.history = pd.concat([self.history, paper_row, plus_cash_row])\n\n print(f'Sold {quantity} units of {paper_name}')\n\n # find a way to calculate correlation of portfolio as a whole\n def get_portfolio_correlation(self):\n pass\n\n # need to set current date to be used in the portfolio; must remember to update this everyday in the data; maybe\n # also set a time of the day\n def set_current_date(self, new_date):\n\n self.current_date = new_date\n print(f'Portfolio date updated to {new_date}.')\n return self.current_date\n\n # should close positions, based on if the portfolio is long or short on it\n def liquidate_asset(self, paper_name, paper_type, date=None, underlying_asset_name=None):\n\n if date is None:\n date = self.set_current_date()\n\n asset_history = self.history[self.history['paper_name'] == paper_name]\n remaining_quantity = asset_history['quantity'].sum()\n\n if remaining_quantity != 0:\n\n if remaining_quantity > 0:\n self.sell_paper(paper_name, paper_type, remaining_quantity, date, underlying_asset_name)\n\n else:\n self.buy_paper(paper_name, paper_type, remaining_quantity, date, underlying_asset_name)\n\n print(f'Position on asset {paper_name} was closed on {date}.')\n\n # should return a dictionary in the form {'paper_name': current_price}, so we can use it below for mapping\n def get_current_prices(self, date=None):\n\n if date is None:\n date = self.set_current_date()\n\n current_prices_dict = {'cash': 1}\n pass\n\n # should return net exposure from both buy_paper and sell_paper\n def get_exposure(self):\n\n aggregate_df = self.history.groupby('paper_name').sum().reset_index(inplace=True)\n open_positions = aggregate_df[aggregate_df['quantity'] != 0]\n open_positions['current_price'] = open_positions['paper_name'].map(self.get_current_prices())\n open_positions['current_amount'] = open_positions['current_price'] * open_positions['quantity']\n\n return open_positions[['paper_name', 'current_amount']]\n\n # returns the current value of all assets under management (sum of the column from history)\n def current_aum(self):\n return self.get_exposure['current_amount'].sum()\n\n # returns the performance of the portfolio\n def return_to_date(self):\n return self.current_aum() / self.initial_history['amount'].sum()\n\n # plots the evolution of the AUM\n def plot_evolution(self):\n pass\n\n\n# strategies; need to consider the start and end dates\n# TO DOs: needs to manage risk in decisions (can't just buy everything possible); need to think of long bias strategies\n\nclass Strategy:\n\n def __init__(self, start_date, end_date, all_data):\n self.start_date = start_date\n self.end_date = end_date\n self.all_data = all_data\n\n def pair_correlation(self, paper1, paper2, period):\n pass\n\n def pair_statistical_arbitrage(self):\n pass\n\n\nif __name__ == '__main__':\n # USE FUNCTIONS TO GET A LIST OF POSITIVELY CORRELATED AND NEGATIVELY CORRELATED STOCKS;\n # BASED ON EACH LIST LOOK FOR OPPORTUNITIES (IF IT'S THE SUM OR DIFFERENCE OF Z-SCORES)\n\n print(len(get_correlation_list(dt.datetime(year=2021, month=1, day=1), abs_corr_coef=0.95, positive_corr=True)))","repo_name":"George-Lobo/quant","sub_path":"QuantObjects.py","file_name":"QuantObjects.py","file_ext":"py","file_size_in_byte":9656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8623977328","text":"# 9205_맥주-마시면서-걸어가기_문제풀이\n# 2022-03-21\n\nimport sys\nsys.stdin = open('input.txt', 'r')\n\n\ndef BFS(i, j):\n global visited\n queue = [(i, j)]\n front = -1\n rear = 0\n while front != rear:\n front += 1\n r, c = queue[front]\n # 현재 위치에서 도착위치로 갈 수 있으면\n if abs(end_x - r) + abs(end_y - c) <= 1000:\n return 'happy'\n # 현재 위치에서 갈 수 있는 편의점 탐색\n for i in range(N):\n if visited[i] == 0:\n nr, nc = dir_lst[i]\n if abs(nr - r) + abs(nc - c) <= 1000:\n queue.append((nr, nc))\n visited[i] = 1\n rear += 1\n # 도착점 까지 못갔을 경우\n return 'sad'\n\n\nT = int(input())\n\nfor tc in range(1, T+1):\n N = int(input())\n start_x, start_y = map(int, input().split())\n result = 'happy'\n visited = [0 for i in range(N+1)]\n dir_lst = []\n for i in range(N):\n x, y = map(int, input().split())\n dir_lst.append((x, y))\n end_x, end_y = map(int, input().split())\n ans = BFS(start_x, start_y)\n print(ans)","repo_name":"jangchangwan/TIL","sub_path":"docs/03_baekjoon/BFS/9205_맥주-마시면서-걸어가기/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"21702009445","text":"from django.urls import path, include\r\nfrom ui_ux.views.admin_dashboard.admin_view import (\r\n chart, tables, index, update_product, update_user, add_users, add_products, add_promo,add_categories,update_category,\r\n add_products_bought, update_products_bought, issue_products, receive_products,\r\n del_categories, del_product_bought, del_users, del_categories, del_products, del_promo\r\n)\r\n\r\nurlpatterns = [\r\n path(\"\", include(\"ui_ux.views.eshop_ui.eshop_urls\")),\r\n \r\n path(\"admin/\", index, name=\"admin_dashboard\"),\r\n path(\"tables/\", tables, name=\"tables\"),\r\n path(\"chart/\", chart, name=\"charts\"),\r\n\r\n path(\"update_products/\", update_product, name=\"update_pdct\"),\r\n path(\"update_users/\", update_user, name=\"update_pdct\"),\r\n path(\"update_category/\", update_category, name=\"update_category\"),\r\n path(\"update_ordered/\", update_products_bought, name=\"update_products_bought\"),\r\n path(\"issue_products/\", issue_products, name=\"issue_products\"),\r\n path(\"receive_products/\", receive_products, name=\"receive_products\"),\r\n\r\n path(\"del_products/\", del_products, name=\"del_products\"),\r\n path(\"del_categories/\", del_categories, name=\"del_categories\"),\r\n path(\"del_product_bought/\", del_product_bought, name=\"del_product_bought\"),\r\n path(\"del_users/\", del_users, name=\"del_users\"),\r\n path(\"del_promo/\", del_promo, name=\"del_promo\"),\r\n\r\n path(\"add_category/\", add_categories, name=\"add_categories\"),\r\n path(\"add_products/\", add_products, name=\"add_products\"),\r\n path(\"add_promo/\", add_promo, name=\"add_promo\"),\r\n path(\"add_users/\", add_users, name=\"add_users\"),\r\n path(\"add_ordered/\", add_products_bought, name=\"add_products_bought\"),\r\n]","repo_name":"fathela-domm/eshop_","sub_path":"ui_ux/ui_ux_urls.py","file_name":"ui_ux_urls.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"14969848730","text":"import argparse\nfrom datetime import datetime\nfrom configparser import ConfigParser\nfrom revenue_analyzer.analyzer import RevenueAnalyzer\n\ndef main(args, config):\n analyzer = RevenueAnalyzer(args.input_file, config)\n analyzer.process_file()\n\n output_filename = f'output/{datetime.now().strftime(\"%Y-%m-%d\")}_SearchKeywordPerformance.tab'\n analyzer.write_output(output_filename)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Analyze revenue from search engines and keywords.')\n parser.add_argument('input_file', help='Input file containing hit level data.')\n args = parser.parse_args()\n\n config = ConfigParser()\n config.read('config.ini')\n\n main(args, config)\n","repo_name":"layakota01/Assessment","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34950544720","text":"# Module imports\nimport sys\nimport time\nimport platform\nimport dateutil.parser\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom datetime import date\n\n# User imports\n \nclass Window(QtWidgets.QMainWindow):\n # Set defaults\n width = 700\n height = 550\n title = \"OpenJournal (alpha)\"\n\t\n def __init__(self, journal):\n super().__init__()\n \n self.firstView = True\n self.JournalController = journal\n\n QtWidgets.QShortcut(QtGui.QKeySequence(\"Ctrl+W\"), self, self.closeProgram)\n QtWidgets.QShortcut(QtGui.QKeySequence(\"Ctrl+E\"), self, self.changeView)\n\n self.buildUI()\n\n self.startDateLoop()\n\n def checkDate(self):\n if self.JournalController.journal.date != date.today():\n self.JournalController.reset()\n self.updateDateLabel()\n self.textEditor.setPlainText(self.JournalController.journal.text)\n\n def startDateLoop(self):\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.checkDate)\n self.timer.start(1)\n\n def buildUI(self):\n # Create our pages (stacks)\n self.editStack = QtWidgets.QWidget()\n self.viewStack = QtWidgets.QWidget()\n self.editStackUI()\n self.viewStackUI()\n\n # Put them in a stack widget so we can rotate between them\n self.stack = QtWidgets.QStackedWidget(self)\n self.stack.addWidget(self.editStack)\n self.stack.addWidget(self.viewStack)\n self.setCentralWidget(self.stack)\n\n # Set the initial size and title\n self.initSize()\n self.setWindowTitle(self.title)\n\n # Show our UI\n self.show()\n\n def editStackUI(self):\n grid = QtWidgets.QGridLayout()\n grid.setSpacing(0) \n grid.setContentsMargins(2,2,2,2)\n self.setStyleSheet(\"background-color: white\");\n\n # Text editor widget\n self.textEditor = QtWidgets.QPlainTextEdit(self)\n self.textEditor.setFrameStyle(QtWidgets.QFrame.NoFrame);\n self.textEditor.setPlainText(self.JournalController.journal.text)\n\n # Directions widget\n self.instructLabel = QtWidgets.QLabel()\n self.instructLabel.setText(\"Press \" + (\"Cmd\" if platform.system() == \"Darwin\" else \"Ctrl\") + \"+E to view journals\")\n self.instructLabel.setFixedHeight(18)\n self.instructLabel.setStyleSheet('color: gray')\n # Current date widget\n self.dateLabel = QtWidgets.QLabel()\n self.dateLabel.setFixedHeight(18)\n self.updateDateLabel()\n\n\n grid.addWidget(self.textEditor, 0, 0, 100, 1)\n grid.addWidget(self.instructLabel, 0, 1, 1, 1)\n grid.addWidget(self.dateLabel, 1, 1, 1, 1, QtCore.Qt.AlignRight)\n\n self.editStack.setLayout(grid)\n\n def viewStackUI(self):\n grid = QtWidgets.QGridLayout()\n grid.setSpacing(0) \n grid.setContentsMargins(2,2,2,2)\n self.setStyleSheet(\"background-color: white\");\n\n # Text viewer widget\n self.textViewer = QtWidgets.QPlainTextEdit(self)\n self.textViewer.setFrameStyle(QtWidgets.QFrame.NoFrame);\n\n self.textViewer.setReadOnly(True)\n\n # Directions widget\n self.instructLabel = QtWidgets.QLabel()\n self.instructLabel.setText(\"Press \" + (\"Cmd\" if platform.system() == \"Darwin\" else \"Ctrl\") + \"+E to edit your journal\")\n self.instructLabel.setFixedHeight(18)\n self.instructLabel.setStyleSheet('color: gray')\n # Current date widget\n self.dateLabel = QtWidgets.QLabel()\n self.dateLabel.setFixedHeight(18)\n self.updateDateLabel()\n\n self.backButton = QtWidgets.QPushButton(\"<\")\n self.backButton.clicked.connect(self.goBack)\n self.backButton.setFixedWidth(50)\n self.forwardButton = QtWidgets.QPushButton(\">\")\n self.forwardButton.clicked.connect(self.goForward)\n self.forwardButton.setFixedWidth(50)\n\n grid.addWidget(self.textViewer, 0, 0, 100, 1)\n grid.addWidget(self.instructLabel, 0, 1, 1, 2)\n grid.addWidget(self.dateLabel, 1, 1, 1, 2, QtCore.Qt.AlignRight)\n grid.addWidget(self.backButton, 2, 1, 1, 1, QtCore.Qt.AlignRight)\n grid.addWidget(self.forwardButton, 2, 2, 1, 1, QtCore.Qt.AlignRight)\n\n self.viewStack.setLayout(grid)\n\t\t\n def initSize(self):\n # Get user's computer resolution to properly size the window\n screen = QtWidgets.QDesktopWidget().screenGeometry()\n\n # Make the window height 9/10th's of the screen height\n self.height = (8/10) * screen.height()\n\t\t\n # Get the coordinates for a centered window\n centeredX = (screen.width() / 2) - (self.width / 2)\n centeredY = (screen.height() / 2) - (self.height / 2)\n\n # Change height based on screen\n self.setGeometry(centeredX, centeredY, self.width, self.height)\n\n def updateDateLabel(self):\n self.dateLabel.setText(time.strftime(\"%B %d, %Y\"))\n\n def goBack(self):\n journal = self.JournalController.back()\n\n if journal:\n self.textViewer.setPlainText(journal.text)\n self.dateLabel.setText(journal.date.strftime(\"%B %d, %Y\"))\n\n def goForward(self):\n journal = self.JournalController.forward()\n\n if journal:\n self.textViewer.setPlainText(journal.text)\n self.dateLabel.setText(journal.date.strftime(\"%B %d, %Y\"))\n\n def keyReleaseEvent(self, event):\n if self.stack.currentIndex() == 0:\n text = str(self.textEditor.toPlainText())\n self.JournalController.update(text)\n\n\n def closeProgram(self):\n QtWidgets.QApplication.quit() \n\n def changeView(self):\n if self.stack.currentIndex() == 0:\n self.stack.setCurrentIndex(1)\n journal = self.JournalController.back()\n journal = self.JournalController.forward()\n if journal:\n self.textViewer.setPlainText(journal.text)\n elif self.stack.currentIndex() == 1:\n self.stack.setCurrentIndex(0)\n","repo_name":"prattcmp/OpenJournal","sub_path":"UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27517961674","text":"import ldap\nfrom symantec.ssim.utils import ldaphelper\n\nclass Location:\n def __init__(self, search_result):\n self.dn = search_result.dn\n self.host = search_result.get_attr_values('host')[0]\n self.addresses = search_result.get_attr_values('symcIPAddresses')\n self.address = self.addresses[0]\n self.dn = search_result.get_dn()\n self.name = search_result.get_attr_values('dlmCaption')[0]\n #parse datetime\n self.install_date = search_result.get_attr_values('dlmInstallDate')[0]\n self.install_date = ldaphelper.parse_generalized_time(self.install_date)\n\n def __eq__(self,other):\n if other != None and self.dn == other.dn:\n return True\n else:\n return False\n \n def __hash__(self):\n return hash(self.dn)\n\n def __str__(self):\n return self.host\n\ndef all(l, base_dn, name_list = None):\n filter = '(objectclass=dlm1ComputerSystem)'\n if name_list and len(name_list) > 0:\n filter = '(&' + filter + '(|'\n for name in name_list:\n filter += '(dlmName=%s)' % name\n filter += '))'\n attrs = ['host','dlmInstallDate','dlmName', 'dlmCaption','symcIPAddresses']\n raw_res = l.search_s( \"ou=Locations,\"+base_dn, ldap.SCOPE_SUBTREE, filter, attrs)\n search_result = ldaphelper.get_search_results( raw_res )\n list = [Location(item) for item in search_result]\n return list\n\ndef client_dn(l, base_dn, clientAddress):\n filter = '(symcIPAddresses=%s)' % clientAddress\n attrs = ['host','dlmInstallDate','dlmName', 'dlmCaption', 'symcIPAddresses']\n raw_res = l.search_s( \"ou=Locations,\"+base_dn, ldap.SCOPE_SUBTREE, filter, attrs)\n search_result = ldaphelper.get_search_results( raw_res )\n return search_result[0].get_dn()\n\n\n","repo_name":"bioform/oldspice","sub_path":"ssim/utils/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"28628611628","text":"from bot.commons import gw2_guilds\nfrom bot.commons import discord_utils\nfrom bot.commons import discord_interactions\nfrom bot.commons import template_utils\nfrom . import templates\nfrom . import scheduled_lambda_utils\n\nannounce_prefix = '[ANNOUNCE]'\n\n\ndef handler_release_announcement(\n event,\n guilds_repo: gw2_guilds.Gw2GuildRepo,\n personality: discord_interactions.WebhookPersonality\n):\n \"\"\"\n Called when a successful deployment was made. The commit message (if marked for announcement)\n will be posted on the announcement channels.\n \"\"\"\n commit_message: str = event['commit_message']\n if commit_message.startswith(announce_prefix):\n trimmed_commit_message = commit_message.removeprefix(announce_prefix).strip()\n for guild in guilds_repo.find_all_guilds([\n gw2_guilds.announcement_channels_field_name,\n gw2_guilds.language_field_name\n ]):\n locale = scheduled_lambda_utils.get_guild_language_or_default(guild)\n announcement_message = template_utils.get_localized_template(templates.release_announcement, locale).format(\n emote_robot=discord_utils.default_emote('robot'),\n commit_message=trimmed_commit_message,\n emote_github=discord_utils.custom_emote('github', discord_utils.github_emote_id)\n )\n\n announcement_channels = scheduled_lambda_utils.get_guild_attribute_or_empty(guild, gw2_guilds.announcement_channels_field_name)\n scheduled_lambda_utils.post_to_announcement_channels(\n guild_id=scheduled_lambda_utils.get_guild_attribute_or_throw(guild, gw2_guilds.guild_id_field_name),\n announcement_channels=announcement_channels,\n message=announcement_message,\n personality=personality\n )\n else:\n print(f'The deployment with commit message \"{commit_message}\" was not marked for announcement, and is ignored.')\n","repo_name":"Gtomika/lambda-wvw-bot","sub_path":"bot/scheduled_lambda_function/release_handler.py","file_name":"release_handler.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39498799277","text":"from flask import Flask, request, jsonify, render_template\nimport load_nn_v2 as nn\nimport preprocess_v1 as pp\nfrom werkzeug.utils import secure_filename\n\n\napp = Flask(__name__)\n\n@app.route('/predict',methods=['GET', 'POST'])\ndef predict_api():\n '''\n For direct API calls through request\n '''\n f = request.files['file']\n filename = f.filename\n filePath = \"/tmp/\" + secure_filename(filename)\n f.save(filePath) \n \n #pre-process the audio file\n #pp.preprocess(filename)\n\n #prediction\n #predictions = nn.prediction(W, B)\n \n predictions = nn.predict(filename)\n \n return jsonify(predictions)\n \n@app.route('/prediction',methods=['GET', 'POST'])\ndef predict():\n '''\n For Testing\n '''\n f = request.files['file']\n filename = f.filename\n filePath = \"/tmp/\" + secure_filename(filename)\n f.save(filePath) \n \n #pre-process the audio file\n #pp.preprocess(filename)\n\n #prediction\n #predictions = nn.prediction(W, B)\n \n predictions = nn.predict(filename)\n \n return render_template('home.html',chat_in=predictions)\n\nW, B = nn.load()\n \nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"NehaDadarwala/Bird-Classification-Audio-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"588282902","text":"#User function Template for python3\n\nclass Solution:\n def AllParenthesis(self,n):\n #code here\n res = []\n def recurse(right,left,str1):\n if left > right:\n return\n if right == n and left == n:\n res.append(str1)\n return\n if right > n or left > n:\n return\n \n recurse(right+1,left,str1+\"(\")\n recurse(right,left+1,str1+\")\")\n \n recurse(0,0,\"\")\n return(list(set(res)))\n \n\n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\n\n \nif __name__==\"__main__\":\n t=int(input())\n for i in range(0,t):\n n=int(input())\n ob=Solution()\n result=ob.AllParenthesis(n)\n result.sort()\n for i in range(0,len(result)):\n print(result[i])\n \n\n# } Driver Code Ends\n","repo_name":"UdhayaShan1/Geeksforgeeks","sub_path":"Recursion/Generate Parentheses.py","file_name":"Generate Parentheses.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"35361786544","text":"class Solution:\n def minDistance(self, word1, word2):\n \"\"\"\n >>> s=Solution()\n >>> s.minDistance(\"horse\",\"ros\")\n 3\n >>> s.minDistance(\"intention\", \"execution\")\n 5\n >>> s.minDistance(\"zoologicoarchaeologist\", \"zoogeologist\")\n 10\n\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n\n max_x = len(word1) + 1\n max_y = len(word2) + 1\n board = [[i] + list(range(1, max_x)) for i in range(max_y)]\n for y in range(1, max_y):\n for x in range(1, max_x):\n board[y][x] = min(board[y - 1][x] + 1, # 一波操作之后,word2 插入一个字符变 word1\n board[y][x - 1] + 1, # 一波操作之后,word1 插入一个字符变 word2\n board[y - 1][x - 1] + int(word1[x - 1] != word2[y - 1])) # 替换\n return board[-1][-1]\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n","repo_name":"CallMeNP/leetcode","sub_path":"solutions/072-edit-distance/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"14778387919","text":"\"\"\"\nОпределить, какие из слов «attribute», «класс», «функция», «type»\nневозможно записать в байтовом типе используя маркировку b.\n\"\"\"\n\n\ndef check_convert(word):\n \"\"\"\n функция проверки конвертации строки в байтовый тип, т.е. ��остоит ли она из символов ascii\n :param word: str\n :return: str, None\n \"\"\"\n try:\n word.encode('ascii')\n except UnicodeEncodeError:\n return word\n return None\n\n\nWORDS = ['attribute', 'класс', 'функция', 'type']\n\nNOT_CONVERTED = [word for word in WORDS if check_convert(word)]\n","repo_name":"alferovyuriy/geekbrains","sub_path":"Python(Advanced)/home_work_1/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"14207621164","text":"# Given an array of integers and an integer “K”. Return an array of integers keeping the\r\n# first element in the given array intact and the cyclical rotation of the successive elements. \r\n\r\n# For example:\r\n# Example 1:\r\n# Arr[] = {10, 20, 30, 40, 50} and K = 2 (Two cyclical rotations)\r\n# After 1st rotation = {10, 50, 20, 30, 40}\r\n# After 2nd rotation = {10, 40, 50, 20, 30}\r\n\r\n# Example 2:\r\n# Arr[] = {10, 20, 30, 40} and K = 1 (One cyclical rotation)\r\n# After 1st rotation = {10, 40, 20, 30}\r\n\r\n# Example 3:\r\n# Arr[] = {10, 20, 30} and K = 4 (four cyclical rotations)\r\n# After 1st rotation = {10, 30, 20}\r\n# After 2nd rotation = {10, 20, 30}\r\n# After 3rd rotation = {10, 30, 20}\r\n# After 4th rotation = {10, 20, 30}\r\n\r\n# Constraints:\r\n# 1 < N < = 100\r\n# -100 < = Arr[i] < = 100\r\n# 1 < = K < = 100\r\n\r\narr=[int(i) for i in input().split()] #list comprehension for list input in single line with spaces 10 20 30 40 50\r\nk=int(input())\r\nfor i in range(k):\r\n prev = arr[-1]\r\n for j in range(1,len(arr)):\r\n arr[j], prev = prev,arr[j] \r\nprint(arr)","repo_name":"JahnaviKommaraju/csd_sem5","sub_path":"Ex_02.py","file_name":"Ex_02.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"741438124","text":"# https://leetcode.com/problems/goal-parser-interpretation/\nclass Solution:\n def interpret(self, command: str) -> str:\n resp = []\n word = ''\n \n for letter in command:\n word += letter\n if word == \"G\":\n resp.append(\"G\")\n word = ''\n elif word == \"()\":\n resp.append('o')\n word = ''\n elif word == \"(al)\":\n resp.append('al')\n word = ''\n \n return ''.join(resp)\n","repo_name":"rodoufu/challenges","sub_path":"leetCode/string/GoalParserInterpretation.py","file_name":"GoalParserInterpretation.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"57"} +{"seq_id":"70681470580","text":"\"\"\"\r\nAdaptor for Raspberry Pi GPIO plugs.\r\n\r\nThe JSON config looks like this:\r\n\r\n { \"source\": {\r\n \"type\": \"gpio\",\r\n \"pin\": 18,\r\n \"low_state_block\": [0, 0],\r\n \"high_state_block\": [45, 0]\r\n },\r\n \r\n \"target\": {\r\n \"type\": \"gpio\",\r\n \"pin\": 12\r\n }\r\n }\r\n\"\"\"\r\n\r\nfrom rube_mc_pi.mcpi import block\r\nfrom rube_mc_pi.mcpi.block import Block\r\nimport RPi.GPIO as GPIO\r\nimport rube_mc_pi.rube as rube\r\n\r\n\r\n\r\nclass GpioSource(rube.Source): #pylint: disable=R0903\r\n \"\"\"\r\n Use the input from the raspberry pi GPIO pin.\r\n\t\tThe low_state_block is the block to report if the pin in low\r\n\t\tThe high_state_block is what to report if the pin is high\r\n \"\"\"\r\n \r\n @staticmethod\r\n def gpio_in_setup(pin):\r\n \"\"\"Set the pin up for input\"\"\"\r\n GPIO.setmode(GPIO.BOARD)\r\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\r\n\r\n \r\n def __init__(self, attribs):\r\n super(GpioSource, self).__init__()\r\n self.pin = attribs[\"pin\"]\r\n self.low_state_block = Block(attribs[\"low_state_block\"][0], \r\n attribs[\"low_state_block\"][1])\r\n self.high_state_block = Block(attribs[\"high_state_block\"][0],\r\n attribs[\"high_state_block\"][1])\r\n GpioSource.gpio_in_setup(self.pin)\r\n\r\n def poll_state(self):\r\n if GPIO.input(self.pin):\r\n return self.low_state_block\r\n else:\r\n return self.high_state_block\r\n\r\nclass GpioTarget(rube.Target): #pylint: disable=R0903\r\n \"\"\"\r\n Simply if the state pass is Block.AIR (0,0) the turn output low.\r\n Any other block type put the output high\r\n \"\"\"\r\n \r\n @staticmethod\r\n def gpio_out_setup(pin):\r\n \"\"\"Set GPIO up for output and initialise it to low\"\"\"\r\n GPIO.setmode(GPIO.BOARD)\r\n GPIO.setup(pin, GPIO.OUT)\r\n GPIO.output(pin, GPIO.LOW)\r\n \r\n \r\n def __init__(self, attribs):\r\n super(GpioTarget, self).__init__()\r\n self.pin = attribs[\"pin\"]\r\n GpioTarget.gpio_out_setup(self.pin)\r\n\r\n def update_state(self, new_state):\r\n if new_state == block.AIR:\r\n GPIO.output(self.pin, GPIO.LOW)\r\n else:\r\n GPIO.output(self.pin, GPIO.HIGH)\n","repo_name":"davegoopot/rube-mc-pi","sub_path":"rube_mc_pi/gpio.py","file_name":"gpio.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"29888257929","text":"\"\"\"\nModule that contains the command line app.\n\nAssumed to be called with one command line argument -- the file to be checked.\nWhen called from the command line, the SigVerifyTooBig exception is not raised.\n\n\nLayout based on https://github.com/ionelmc/cookiecutter-pylibrary\n\"\"\"\n\nimport argparse\nimport subprocess\nimport sys\n# set up path for everything else\nimport fx_sig_verify\nfrom fx_sig_verify.validate_moz_signature import (MozSignedObject,\n SigVerifyException)\n\n\nclass MozSignedObjectViaCLI(MozSignedObject):\n def __init__(self, fname=None, *args, **kwargs):\n super(type(self), self).__init__(*args, **kwargs)\n self.artifact_name = fname\n self.url = \"file://{}\".format(fname)\n\n def get_location(self):\n \"For S3, we need the bucket & key names\"\n return self.bucket, self.key\n\n def report_validity(self, valid):\n \"\"\"\n For invoked cli functions, we have 2 report channels:\n 1. print to stdout\n 2. exit code\n\n The severity of any failure controls the what & where.\n Any filtering or special casing should probably be applied in this\n function. (E.g. excluding any artifacts from rules.)\n \"\"\"\n if self.verbose:\n print(self.format_message())\n\n def summary(self):\n json_info = {\n 'bucket': self.bucket_name,\n 'key': self.key_name,\n 'status': self.get_status(),\n 'results': self.errors + self.messages,\n }\n return json_info\n\n def get_flo(self):\n flo = open(self.artifact_name, 'rb')\n return flo\n\n def process_one_local_file(self):\n if self.verbose:\n print('Processing {}'.format(self.artifact_name))\n try:\n valid_sig = self.check_exe()\n except Exception as e:\n valid_sig = False\n if isinstance(e, SigVerifyException):\n self.add_error(\"Exception {}\".format(type(e).__name__))\n else:\n self.add_error(\"failed to process local file {} '{}'\"\n .format(self.artifact_name, repr(e)))\n self.set_status(\"pass\" if valid_sig else \"fail\")\n return valid_sig\n\n\ndef parse_args(cmd_line=None):\n parser = argparse.ArgumentParser(description='Check executable validity.')\n parser.add_argument('--version', action='version',\n version=\"%(prog)s \" + fx_sig_verify.__version__,\n help='print version and exit')\n parser.add_argument('suspect', help='file to check for validity',\n nargs=1)\n args = parser.parse_args(cmd_line)\n return args\n\n\ndef main(cmd_line=None):\n \"\"\"\n Check if the file specified on the command line is a valid Mozilla\n executable for Windows\n\n :param filename: path to ``exe`` file\n :returns result_code: 0 if no failure, per unix conventions\n \"\"\"\n MozSignedObject.set_verbose(True)\n MozSignedObject.set_production_criteria(False)\n found_bad_file = False\n args = parse_args(cmd_line=cmd_line)\n for arg in args.suspect:\n artifact = MozSignedObjectViaCLI(arg)\n try:\n valid = artifact.process_one_local_file()\n except SigVerifyException:\n valid = False\n artifact.report_validity(valid)\n if not valid:\n found_bad_file = True\n raise SystemExit(1 if found_bad_file else 0)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"mozilla-services/fx-sig-verify","sub_path":"src/fx_sig_verify/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"32373411608","text":"\"\"\"\nsolvency2: Life SCR radar chart\n=====================================\n\nThis script draws radar charts for selected policies.\nEach spoke of the radar chart represents a sub risk of SCR life risks,\nand the sizes of the sub risks by duration are drawn in the radar chart.\n\n.. seealso::\n * The :mod:`~solvency2` library\n\"\"\"\nimport modelx as mx\nimport pandas as pd\nfrom draw_charts_radar import draw_radar\n\nmodel = mx.read_model(\"model\")\nscr = model.SCR_life\nrisks = ('mort', 'longev', 'disab', 'exps', 'lapse')\nscenid = 1\n\n\ndef draw(polid):\n\n data = {}\n for t in range(0, 20, 5):\n data['t=' + str(t)] = scr[t, polid, scenid].Life.to_series(risks)\n \n draw_radar(pd.DataFrame(data), \n ax_title='Policy ID: ' + str(polid),\n fig_title='SCR Life Risks')\n\nfor i in (41, 171):\n draw(i)\n\n\n","repo_name":"lifelib-dev/lifelib","sub_path":"lifelib/projects/solvency2/plot_scr_radar.py","file_name":"plot_scr_radar.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"57"} +{"seq_id":"502528395","text":"from pyVmomi import vim\nfrom typing import List\n\nfrom .vsphere_base import VSphereBase\nfrom .vsphere_datastore import VSphereDatastore\nfrom .vsphere_network import VSphereNetwork\n\n\nclass VSphereHost(VSphereBase):\n def __init__(self, host: vim.HostSystem, **kwargs):\n if not isinstance(host, vim.HostSystem):\n raise ValueError(f\"Parameter 'host' is not of type 'vim.HostSystem'\")\n super().__init__(name=host.name, **kwargs)\n\n # Internal variables\n self.__vim_host = host\n self.__datastores = None\n self.__networks = None\n\n @property\n def datastores(self) -> List[VSphereDatastore]:\n if not self.__datastores:\n self.__datastores = sorted(\n [\n VSphereDatastore(datastore=entity)\n for entity in self.__vim_host.datastore\n ],\n key=lambda x: x.name,\n )\n return self.__datastores\n\n def find_datastore(self, name: str) -> VSphereDatastore:\n return next((x for x in self.datastores if x.name == name), None)\n\n @property\n def networks(self) -> List[VSphereNetwork]:\n if not self.__networks:\n self.__networks = sorted(\n [VSphereNetwork(network=entity) for entity in self.__vim_host.network],\n key=lambda x: x.name,\n )\n return self.__networks\n\n def find_network(self, name: str) -> VSphereNetwork:\n return next(\n (x for x in self.networks if x.name == name),\n None,\n )\n","repo_name":"cleeistaken/automation-mssql-linux","sub_path":"lib/mssql-linux-config/src/vsphere_host.py","file_name":"vsphere_host.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"10754608039","text":"from datetime import datetime, timedelta\nimport threading\n\n# For active windows\nfrom win32gui import GetWindowText, GetForegroundWindow\n\n# Listeners for Mouse and Keyboard Events\nfrom mouse_listener import MouseListener\nfrom keyboard_listener import KeyboardListener\n\n# Logging\nfrom terminaltables import AsciiTable\nfrom textwrap import wrap\nimport logging, time, requests\n\nfrom allFiles import FileManager\n\n'''\nfrom window_listener import WindowListener\n'''\n\n\n\nlogging.basicConfig(filename='app.log', format='%(message)s', level=logging.INFO)\n\nclass ActivityMonitor:\n\n\tdef __init__(self):\n self.FM = FileManager()\n self.FM.allFiles(self.FM.basepath)\n self.ML = MouseListener()\n self.KL = KeyboardListener()\n self.start_time = datetime.now()\n self.report_interval_secs = 30 # seconds\n self.update_interval_secs = 5 # seconds\n self.total_clicks = 0\n self.total_keystrokes = 0\n self.last_time_stamp = self.start_time\n self.application_id = '{}{}{}{}{}{}'.format(\n\t\t\t\t\t\t\t\tself.start_time.month,\n\t\t\t\t\t\t\t\tself.start_time.day,\n\t\t\t\t\t\t\t\tself.start_time.year,\n\t\t\t\t\t\t\t\tself.start_time.hour,\n\t\t\t\t\t\t\t\tself.start_time.minute,\n\t\t\t\t\t\t\t\tself.start_time.second\n\t\t\t\t\t\t\t)\n\n self.reports = []\n self.last_report_time = self.start_time\n self.url = 'http://lviv.ixioo.com:8001/ActivityTracking'\n print ('Application ID {} initialised at {}'.format(self.application_id, self.start_time))\n\n\tdef get_opened_files(self, active_window):\n res = []\n for file in self.FM.files:\n if file in active_window:\n res.append(self.FM.files[file])\n if res == []:\n return ['None']\n return res\n\n\tdef log(self):\n\n\t\ttable_data =[\n\t\t\t['Active Window', ''],\n\t\t\t['Keyboard Strokes', ''],\n\t\t\t['Mouse Clicks', ''],\n\t\t\t['Timestamp', ''],\n ['Files Open', '']\n\t\t]\n\n\n\t\ttable = AsciiTable(table_data)\n\t\tmax_width = table.column_max_width(1)\n\n\t\tactive_window = GetWindowText(GetForegroundWindow())\n\n\t\tactive_window_str = '\\n'.join(wrap(active, max_width))\n\t\ttable.table_data[0][1] = active_window_str\n\n\t\tkeyboard_strokes_str = '\\n'.join(wrap(str(self.KL.strokes), max_width))\n\t\ttable.table_data[1][1] = keyboard_strokes_str\n\n\t\tmouse_clicks_str = '\\n'.join(wrap(str(self.ML.clicks), max_width))\n\t\ttable.table_data[2][1] = mouse_clicks_str\n\n\t\ttime_stamp_str = '\\n'.join(wrap(str(self.last_time_stamp + timedelta(seconds=self.interval)), max_width))\n\t\ttable.table_data[3][1] = time_stamp_str\n\n\t\tfiles_opened_str = '\\n'.join(wrap('; '.join(self.get_opened_files(active)), max_width))\n\t\ttable.table_data[4][1] = files_opened_str\n\n\n\t\tlogging.info(table.table)\n\n\tdef send_report(self, report):\n\t\trequests.post(\n\t\t\turl=self.url,\n\t\t\tdata=report\n\t\t)\n\n\n\tdef send_reports(self):\n\t\tr = requests.post(\n url=self.url,\n\t\t \tjson=self.reports\n\t\t)\n\t\tprint ('TOTAL: {}'.format(len(self.reports)))\n\t\tprint (self.reports)\n\t\tprint (r.text)\n\t\tself.last_report_time = datetime.now()\n\t\tself.reports = []\n\n\tdef reset(self):\n\t\tself.ML.reset_clicks()\n\t\tself.KL.reset_strokes()\n\t\tself.last_time_stamp = datetime.now()\n\n\tdef update(self):\n\t\tself.total_clicks += self.ML.get_clicks()\n\t\tself.total_keystrokes += self.KL.get_strokes()\n\n\t\treport_time = datetime.now()\n\t\tactive_window = GetWindowText(GetForegroundWindow())\n\t\topened_files = [s.replace('\\\\', '\\\\\\\\') for s in self.get_opened_files(active_window)]\n\t\treport = {\n\t\t\t\"ApplicationID\": self.application_id,\n\t\t\t\"InfoDataTime\": str(report_time),\n\t\t\t\"InfoDuration\": (report_time - self.start_time).seconds,\n\t\t\t\"TitleActiveWindows\": str(active_window),\n\t\t\t\"MouseClicks\": self.total_clicks,\n\t\t\t\"KeysPressed\": self.total_keystrokes,\n\t\t\t\"OpenDocuments\": opened_files\n\t\t}\n\t\t#print (report)\n\n\t\t# send report\n\t\tself.reports.append(report)\n\n\n\n\tdef run(self):\n\t\twhile True:\n\t\t\tnow = datetime.now()\n\t\t\tupd_diff = now - self.last_time_stamp\n\t\t\trep_diff = now - self.last_report_time\n\t\t\t# print (upd_diff.seconds, rep_diff.seconds)\n\t\t\tif upd_diff.seconds >= self.update_interval_secs:\n\t\t\t\t# self.log()\n\t\t\t\tself.update()\n\t\t\t\tself.reset()\n\n\t\t\tif rep_diff.seconds >= self.report_interval_secs:\n\t\t\t\tself.send_reports()\n\t\t\t\t\n\t\t\ttime.sleep(1)\n\t\t\t\n\n\n\n\tdef monitor(self):\n\t\tt0 = threading.Thread(target=self.run).start()\n\t\tt1 = threading.Thread(target=self.ML.start).start()\n\t\tt2 = threading.Thread(target=self.KL.start).start()\n\nif __name__ == '__main__':\n\tam = ActivityMonitor()\n\tam.monitor()\n","repo_name":"arnavkohli/ActivityMonitorFinal","sub_path":"activity_monitor.py","file_name":"activity_monitor.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71164092980","text":"import PyQt5.uic\n\nimport os.path\nimport sys\n\nfrom krita import Resource\n\nfrom PyQt5.Qt import *\nfrom PyQt5.QtCore import (\n pyqtSignal as Signal\n )\nfrom PyQt5.QtWidgets import (\n QWidget\n )\n\nfrom ..modules.utils import loadXmlUi\nfrom ..modules.resutils import (\n ManagedResourceTypes,\n ManagedResource,\n DBManagedResources,\n ManagedResourcesModel\n )\nfrom ..modules.iconsizes import IconSizes\nfrom ..modules.imgutils import buildIcon\nfrom ..pktk import *\nfrom .wlineedit import WLineEdit\nfrom .wtaginput import WTagInput\n\n\nclass ManagedResourcesProxyModel(QSortFilterProxyModel):\n \"\"\"A proxy model to manage filtering\"\"\"\n\n FILTER_TAG_COMBINATION_AND = 0\n FILTER_TAG_COMBINATION_OR = 1\n\n def __init__(self, parent=None):\n super(ManagedResourcesProxyModel, self).__init__(parent)\n\n self.__tagIdList = []\n self.__tagCombination = ManagedResourcesProxyModel.FILTER_TAG_COMBINATION_AND\n\n self.setSortCaseSensitivity(Qt.CaseInsensitive)\n self.setFilterCaseSensitivity(Qt.CaseInsensitive)\n self.setRecursiveFilteringEnabled(False)\n\n def filterAcceptsRow(self, sourceRow, sourceParent):\n defaultRule = super(ManagedResourcesProxyModel, self).filterAcceptsRow(sourceRow, sourceParent)\n if not defaultRule:\n # default rule exclude row, no need to continue\n return False\n\n sourceModel = self.sourceModel()\n modelIndex = sourceModel.index(sourceRow, 0, sourceParent)\n\n if len(self.__tagIdList) == 0:\n # we have no search based on tags, then validate it\n return True\n\n tagList = modelIndex.data(ManagedResourcesModel.ROLE_TAGSID)\n if len(tagList) == 0:\n # no tag for item\n # we have a search based on tags, then exclude it\n return False\n\n for tag in self.__tagIdList:\n if self.__tagCombination == ManagedResourcesProxyModel.FILTER_TAG_COMBINATION_AND:\n if tag not in tagList:\n # expected tag not in resource tags\n # exclude item\n return False\n else:\n if tag in tagList:\n # expected tag in resource tags\n # validate item\n return True\n\n if self.__tagCombination == ManagedResourcesProxyModel.FILTER_TAG_COMBINATION_AND:\n # for AND combination, being here means that all tags were found in resource tags\n return True\n else:\n # for OR combination, being here means that no tags were found in resource tags\n return False\n\n def filterTags(self):\n \"\"\"Return list of tag id used for filter\"\"\"\n return self.__tagIdList\n\n def setFilterTag(self, tagIdList):\n \"\"\"Set list of tag id used for filter\"\"\"\n # tag id are stored as string, need an integer\n self.__tagIdList = [int(tagId) for tagId in tagIdList]\n self.invalidateFilter()\n\n def filterTagCombination(self):\n \"\"\"Return filter tag combination rule\"\"\"\n return self.__tagCombination\n\n def setFilterTagCombination(self, value):\n \"\"\"Set filter tag combination rule\"\"\"\n self.__tagCombination = value\n self.invalidateFilter()\n\n\nclass WManagedResourcesLv(QListView):\n\n iconSizeIndexChanged = Signal(int, QSize)\n\n def __init__(self, parent=None):\n super(WManagedResourcesLv, self).__init__(parent)\n\n self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)\n self.setResizeMode(QListView.Adjust)\n self.setUniformItemSizes(True)\n self.setAutoScroll(True)\n self.setSpacing(0)\n\n self.__sourceModel = ManagedResourcesModel()\n\n self.__managedResourcesProxyModel = ManagedResourcesProxyModel(self)\n self.__managedResourcesProxyModel.setSourceModel(self.__sourceModel)\n self.__managedResourcesProxyModel.setFilterRole(ManagedResourcesModel.ROLE_NAME)\n\n self.setModel(self.__managedResourcesProxyModel)\n\n self.__iconSize = IconSizes([32, 64, 96, 128, 192, 256, 384])\n self.setIconSizeIndex(3)\n self.setViewMode(QListView.IconMode)\n\n def __setSelectedItem(self, resource):\n \"\"\"Select item if found in model, otherwise do nothing\"\"\"\n index = self.__sourceModel.getResource(resource, True)\n if index is not None:\n self.selectionModel().select(index, QItemSelectionModel.Select)\n\n def wheelEvent(self, event):\n \"\"\"Manage zoom level through mouse wheel\"\"\"\n if event.modifiers() & Qt.ControlModifier:\n if event.angleDelta().y() > 0:\n # Zoom in\n sizeChanged = self.__iconSize.next()\n else:\n # zoom out\n sizeChanged = self.__iconSize.prev()\n\n if sizeChanged:\n self.setIconSizeIndex()\n else:\n super(WManagedResourcesLv, self).wheelEvent(event)\n\n def iconSizeIndex(self):\n \"\"\"Return current icon size index\"\"\"\n return self.__iconSize.index()\n\n def setIconSizeIndex(self, index=None):\n \"\"\"Set icon size from index value\"\"\"\n if index is None or self.__iconSize.setIndex(index):\n # new size defined\n iconSizeValue = self.__iconSize.value()\n if self.__sourceModel.resourceType() == ManagedResourceTypes.RES_GRADIENTS:\n iconSize = QSize(iconSizeValue << 1, iconSizeValue)\n else:\n iconSize = QSize(iconSizeValue, iconSizeValue)\n\n self.setGridSize(iconSize)\n self.setIconSize(iconSize)\n self.iconSizeIndexChanged.emit(self.__iconSize.index(), iconSize)\n\n def selectedItems(self):\n \"\"\"Return a list of selected brushes items\"\"\"\n returned = []\n if self.selectionModel():\n for item in self.selectionModel().selectedIndexes():\n resource = item.data(ManagedResourcesModel.ROLE_MANAGEDRESOURCE)\n if resource is not None:\n returned.append(resource)\n return returned\n\n def setSelectedItems(self, resources):\n \"\"\"Set selected resources\n\n given `resources` can be:\n - None (clear selection)\n - A list\n - An integer (then, represent an Id)\n - A tuple (name, fileName)\n - A ManagedResource\n - A Resource\n \"\"\"\n if not (isinstance(resources, (list, ManagedResource, int, tuple, Resource)) or resources is None):\n raise EInvalidType(\"Given `resources` is not valid\")\n\n if not self.selectionModel():\n return\n\n if isinstance(resources, list):\n self.selectionModel().clearSelection()\n for resource in resources:\n self.__setSelectedItem(resource)\n elif resources is None or isinstance(resources, ManagedResource) and resources.id() is None:\n self.selectionModel().clearSelection()\n else:\n self.__setSelectedItem(resources)\n\n def nbSelectedItems(self):\n \"\"\"Return number of selected items\"\"\"\n return len(self.selectedItems())\n\n def setViewMode(self, value):\n \"\"\"Set if if view is icon mode\"\"\"\n super(WManagedResourcesLv, self).setViewMode(value)\n if self.viewMode() == QListView.IconMode:\n self.__sourceModel.setDisplayName(False)\n else:\n self.__sourceModel.setDisplayName(True)\n\n def resourceType(self):\n \"\"\"return current managed resource type\"\"\"\n return self.__sourceModel.__resourceType()\n\n def setResourceType(self, value):\n \"\"\"set current managed resource type\"\"\"\n self.__sourceModel.updateResources(value)\n # force icon size to be recalculated\n self.setIconSizeIndex()\n\n\nclass WManagedResourcesSelector(QWidget):\n \"\"\"A widget to browse resources\n\n +---------------------------------------------------------------------------------- tags entry\n |\n |\n | +------------------------------------------------------------------------------ search text entry\n | |\n | |\n | | +----------------------------------+---+\n | | | | V | <--------------------------------- resource type combobox\n | | +----------------------------------+---+\n | | +-------------------------------+ +---+\n | +-> | xxxx | | | <--------------------------------- search text entry 'regular expression mode' option\n | +-------------------------------+ +---+\n | +-------------------------------+ +---+\n +-----> | xxxx | | | <--------------------------------- tag popup menu 'Match all tags (AND)' or 'Match on of tags (OR)'\n +-------------------------------+ +---+\n +--------------------------------------+\n | |\n | | <--------------------------------- Resources listview\n | |\n | |\n | |\n | |\n | |\n | |\n | |\n | |\n +--------------------------------------+\n +------+ +---+\n Found: XXX |..*...| | | <--------------------------------- listview popup menu (icon mode/list mode | square icon|rect icon)\n ^ +------+ +---+\n | ^\n | |\n | +--------------------------------------------- Slider to define icon size\n |\n +------------------------------------------------------------------ Number of found items\n\n \"\"\"\n # selected item changed, provide a list of ManagedResource\n selectionChanged = Signal(list)\n\n # Trigerred when filter is applied (value >= 0 provide number of found items)\n # Trigerred when filter is removed (value == -1)\n filterChanged = Signal(int)\n\n # Trigerred when resources are loaded\n resourcesLoaded = Signal(ManagedResourceTypes, int)\n\n def __init__(self, parent=None):\n super(WManagedResourcesSelector, self).__init__(parent)\n\n uiFileName = os.path.join(os.path.dirname(__file__), '..', 'resources', 'wmanagedresourcesselector.ui')\n\n # temporary add path to sys.path to let 'pktk.widgets.xxx' being accessible during xmlLoad()\n # because of WLineEdit and WTagInput, path that must be absolute in UI file\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n loadXmlUi(uiFileName, self)\n\n # remove temporary added path\n sys.path.pop()\n\n self.__model = self.lvManagedResources.model()\n self.__resourceType = ManagedResourceTypes.RES_GRADIENTS\n self.__resourceTypes = [ManagedResourceTypes.RES_GRADIENTS]\n\n self.__loadResources()\n self.cbResourceType.setVisible(False)\n self.leFilterName.textEdited.connect(self.__updateFilter)\n self.wtiFilterTag.tagSelection.connect(self.__updateFilter)\n self.tbFilterNameRegEx.toggled.connect(self.__updateFilter)\n self.hsManagedResourcesIconSize.valueChanged.connect(self.__iconSizeIndexSliderChanged)\n self.lvManagedResources.iconSizeIndexChanged.connect(self.__iconSizeIndexChanged)\n self.lvManagedResources.selectionModel().selectionChanged.connect(self.__selectionChanged)\n self.cbResourceType.currentIndexChanged.connect(self.__updateResourceType)\n\n self.__initPopupMenu()\n\n def __initPopupMenu(self):\n \"\"\"Initialise popup menu for toolbuttons\"\"\"\n self.__actionViewModeGroup = QActionGroup(self)\n self.__actionViewModeList = QAction(buildIcon('pktk:list_view_details'), i18n(\"List view\"))\n self.__actionViewModeList.setCheckable(True)\n self.__actionViewModeList.setChecked(False)\n self.__actionViewModeList.setActionGroup(self.__actionViewModeGroup)\n self.__actionViewModeList.toggled.connect(self.__viewModeChanged)\n self.__actionViewModeIcon = QAction(buildIcon('pktk:list_view_icon'), i18n(\"Icon view\"))\n self.__actionViewModeIcon.setCheckable(True)\n self.__actionViewModeIcon.setChecked(True)\n self.__actionViewModeIcon.setActionGroup(self.__actionViewModeGroup)\n self.__actionViewModeIcon.toggled.connect(self.__viewModeChanged)\n\n self.__menuViewMode = QMenu(self.tbManagedResourcesViewMode)\n self.__menuViewMode.addAction(self.__actionViewModeList)\n self.__menuViewMode.addAction(self.__actionViewModeIcon)\n self.tbManagedResourcesViewMode.setMenu(self.__menuViewMode)\n\n self.__viewModeChanged()\n\n self.__actionFilterTagModeGroup = QActionGroup(self)\n self.__actionFilterTagModeAnd = QAction(buildIcon('pktk:sign_logical_and'), i18n(\"Match all tags (AND)\"))\n self.__actionFilterTagModeAnd.setCheckable(True)\n self.__actionFilterTagModeAnd.setChecked(False)\n self.__actionFilterTagModeAnd.setActionGroup(self.__actionFilterTagModeGroup)\n self.__actionFilterTagModeAnd.toggled.connect(self.__FilterTagModeChanged)\n self.__actionFilterTagModeOr = QAction(buildIcon('pktk:sign_logical_or'), i18n(\"Match any tag (OR)\"))\n self.__actionFilterTagModeOr.setCheckable(True)\n self.__actionFilterTagModeOr.setChecked(True)\n self.__actionFilterTagModeOr.setActionGroup(self.__actionFilterTagModeGroup)\n self.__actionFilterTagModeOr.toggled.connect(self.__FilterTagModeChanged)\n\n self.__menuFilterTagMode = QMenu(self.tbManagedResourcesViewMode)\n self.__menuFilterTagMode.addAction(self.__actionFilterTagModeAnd)\n self.__menuFilterTagMode.addAction(self.__actionFilterTagModeOr)\n self.tbFilterTagRules.setMenu(self.__menuFilterTagMode)\n\n self.__FilterTagModeChanged()\n\n def __loadResources(self):\n \"\"\"Initialise resource listview\"\"\"\n self.lvManagedResources.setResourceType(self.__resourceType)\n sourceModel = self.__model.sourceModel()\n\n # build tag list from ALL resource (even filtered one, so use source model)\n allTags = []\n for rowNumber in range(sourceModel.rowCount()):\n modelIndex = sourceModel.index(rowNumber, 0)\n tagsList = sourceModel.data(modelIndex, ManagedResourcesModel.ROLE_TAGS)\n\n for tag in tagsList:\n if tag not in allTags:\n # tag id must be \n allTags.append((f\"{tag[0]}\", tag[1]))\n\n self.wtiFilterTag.setAvailableTags(allTags)\n self.wFilterTag.setVisible(len(allTags) > 0)\n self.__updateFilter()\n self.resourcesLoaded.emit(self.__resourceType, sourceModel.rowCount())\n\n def __updateFilter(self):\n \"\"\"Filter definition has been modified, need to apply it\"\"\"\n if self.tbFilterNameRegEx.isChecked():\n regEx = QRegularExpression(self.leFilterName.text(), QRegularExpression.CaseInsensitiveOption)\n self.__model.setFilterRegularExpression(regEx)\n else:\n self.__model.setFilterFixedString(self.leFilterName.text())\n\n self.__model.setFilterTag(self.wtiFilterTag.selectedTags())\n\n nbFound = self.lvManagedResources.model().rowCount()\n self.lblManagedResourcesFoundItems.setText(f\"{nbFound}\")\n\n if self.leFilterName.text() != '' or len(self.wtiFilterTag.selectedTags()) > 0:\n self.filterChanged.emit(nbFound)\n else:\n self.filterChanged.emit(-1)\n\n def __iconSizeIndexSliderChanged(self, newSize):\n \"\"\"Icon size has been changed from slider\"\"\"\n self.lvManagedResources.setIconSizeIndex(newSize)\n\n def __iconSizeIndexChanged(self, newSize, newQSize):\n \"\"\"Icon size has been changed from listview\"\"\"\n self.hsManagedResourcesIconSize.setValue(newSize)\n\n def __viewModeChanged(self):\n \"\"\"View mode Icon/List has changed\"\"\"\n if self.__actionViewModeList.isChecked():\n self.tbManagedResourcesViewMode.setIcon(self.__actionViewModeList.icon())\n self.lvManagedResources.setViewMode(QListView.ListMode)\n else:\n self.tbManagedResourcesViewMode.setIcon(self.__actionViewModeIcon.icon())\n self.lvManagedResources.setViewMode(QListView.IconMode)\n\n def __FilterTagModeChanged(self):\n \"\"\"Filter tag mode AND/OR has changed\"\"\"\n if self.__actionFilterTagModeAnd.isChecked():\n self.tbFilterTagRules.setIcon(self.__actionFilterTagModeAnd.icon())\n self.lvManagedResources.model().setFilterTagCombination(ManagedResourcesProxyModel.FILTER_TAG_COMBINATION_AND)\n else:\n self.tbFilterTagRules.setIcon(self.__actionFilterTagModeOr.icon())\n self.lvManagedResources.model().setFilterTagCombination(ManagedResourcesProxyModel.FILTER_TAG_COMBINATION_OR)\n\n def __updateResourceType(self, index):\n \"\"\"Update resource type from cbResourceType\"\"\"\n self.setResourceType(self.cbResourceType.currentData())\n\n def __selectionChanged(self, selected=None, deselected=None):\n \"\"\"Selected item has changed\"\"\"\n self.selectionChanged.emit(self.lvManagedResources.selectedItems())\n\n def resourceType(self):\n \"\"\"Return current managed resource type\"\"\"\n return self.__resourceType\n\n def setResourceType(self, value):\n \"\"\"Set current managed resource type\"\"\"\n if isinstance(value, ManagedResourceTypes) and value != self.__resourceType and value in self.__resourceTypes:\n self.__resourceType = value\n self.__loadResources()\n\n def resourceTypes(self):\n \"\"\"Return list of managed resource types \"\"\"\n return self.__resourceTypes\n\n def setResourceTypes(self, values):\n \"\"\"Set list of managed resource types\n\n Given `value` is a or a list of \n If more than one is provided, widget will display a combobox to let user chose resource type\n \"\"\"\n if isinstance(values, ManagedResourceTypes):\n values = [values]\n elif not isinstance(values, (list, tuple)):\n raise EInvalidType(\"Given `values` must be a or a list of \")\n\n self.__resourceTypes = []\n\n for value in values:\n if isinstance(value, ManagedResourceTypes) and value not in self.__resourceTypes:\n self.__resourceTypes.append(value)\n else:\n raise EInvalidType(\"Given `values` items must be \")\n\n if len(self.__resourceTypes) == 0:\n raise EInvalidValue(\"At least one resource type must be provided\")\n\n if len(self.__resourceTypes) > 1:\n self.cbResourceType.clear()\n for resource in sorted(self.__resourceTypes, key=lambda value: value.value):\n self.cbResourceType.addItem(resource.label(), resource)\n self.cbResourceType.setVisible(True)\n else:\n self.cbResourceType.setVisible(False)\n\n if self.__resourceType not in self.__resourceTypes:\n self.setResourceType(self.__resourceTypes[0])\n\n def selectionMode(self):\n \"\"\"Return current selection mode\"\"\"\n return self.lvManagedResources.selectionMode()\n\n def setSelectionMode(self, value):\n \"\"\"Set current selection mode\"\"\"\n self.lvManagedResources.setSelectionMode(value)\n\n def viewMode(self):\n \"\"\"Return current view mode\"\"\"\n return self.lvManagedResources.viewMode()\n\n def setViewMode(self, value):\n \"\"\"Set current selection mode\"\"\"\n if value == QListView.ListMode:\n self.__actionViewModeList.setChecked(True)\n else:\n self.__actionViewModeIcon.setChecked(True)\n\n def iconSizeIndex(self):\n \"\"\"Return current view mode\"\"\"\n return self.lvManagedResources.iconSizeIndex()\n\n def setIconSizeIndex(self, value):\n \"\"\"Set current selection mode\"\"\"\n self.lvManagedResources.setIconSizeIndex(value)\n\n def setSelectedResources(self, resources):\n \"\"\"Set selected resources\n\n given `resources` can be:\n - None\n - A list\n - An integer (then, represent an Id)\n - A tuple (name, fileName)\n - A ManagedResource\n - A Resource\n \"\"\"\n self.lvManagedResources.setSelectedItems(resources)\n\n def selectedResources(self):\n \"\"\"Return a list of selected resources\"\"\"\n return self.lvManagedResources.selectedItems()\n\n def selectedResourcesCount(self):\n \"\"\"Return number of selected resources\"\"\"\n return self.lvManagedResources.selectedItems()\n\n def resources(self, filtered=False):\n \"\"\"Return list of current resources\n\n If filtered is True, return only resources available through current filter, if any active/applied\n \"\"\"\n returned = []\n model = self.lvManagedResources.model()\n if not filtered:\n model = model.sourceModel()\n\n for row in range(model.rowCount()):\n index = model.index(row, 0)\n returned.append(model.data(index, ManagedResourcesModel.ROLE_MANAGEDRESOURCE))\n\n return returned\n","repo_name":"Grum999/BuliBrushSwitch","sub_path":"bulibrushswitch/bulibrushswitch/pktk/widgets/wmanagedresourcesselector.py","file_name":"wmanagedresourcesselector.py","file_ext":"py","file_size_in_byte":21863,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"57"} +{"seq_id":"17963095125","text":"from pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\n\n# For reading data from files on any file system compatible with the HDFS API\n# (that is, HDFS, S3, NFS, etc.), a DStream can be created as:\n# streamingContext.textFileStream(dataDirectory)\n#\n# Spark Streaming will monitor the directory dataDirectory and process any\n# files created in that directory (files written in nested directories not\n# supported). Note that\n# - The files must have the same data format.\n# - The files must be created in the dataDirectory by atomically moving or\n# renaming them into the data directory.\n# - Once moved, the files must not be changed. So if the files are being\n# continuously appended, the new data will not be read.\n#\n# File streams do not require running a receiver, hence does not require\n# allocating cores.\n\nsc = SparkContext(\"local[2]\", \"File Stream\")\nssc = StreamingContext(sc, 1)\n\nlines = ssc.textFileStream(\"fileStreamSource\")\n\nwords = lines.flatMap(lambda line: line.split(\" \"))\npairs = words.map(lambda word: (word, 1))\n\ncounts = pairs.reduceByKey(lambda x, y: x + y)\ncounts.pprint()\n\nssc.start()\nssc.awaitTermination()\n","repo_name":"chvillap/spark-scripts","sub_path":"spark_streaming/file_stream.py","file_name":"file_stream.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26012392949","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport re\nfrom bases import LogicError, InternalError\n\nFILE_INFO_DIR = 0\nFILE_INFO_FILE = 1\n\ndef is_address(ip, port):\n # 判断输入是否为一个IP地址\n assert type(ip) == str, 'input ip not a string'\n assert type(port) == int, 'input port not a number'\n ip_re = re.compile('^((25[0-5]|2[0-4]\\d|[01]?\\d?\\d)\\.){3}((25[0-5]|2[0-4]\\d|[01]?\\d?\\d))$')\n \n if not ip_re.match(ip):\n return False\n if port < 0 or port > 65535:\n return False\n return True\n\ndef parse_file_info(file_str):\n '''\n 解析形如\n -rw-r--r-- 1 1000 121 5689110 Oct 31 13:54 bbb.pdf\n 格式的字符串\n 返回json字典,type、size、name字段分别表示:类型,大小,名字\n '''\n file_re = re.compile('^(\\S*)\\s*\\S*\\s*\\S*\\s*\\S*\\s*(\\S*)\\s*\\S*\\s*\\S*\\s*\\S*\\s*(.*)$')\n match_ans = file_re.match(file_str)\n if len(match_ans.groups()) < 3:\n raise InternalError('parse file info accept wrong input: ' + file_str)\n \n info = {}\n if match_ans.groups()[0][0] == 'd':\n info['type'] = FILE_INFO_DIR\n else:\n info['type'] = FILE_INFO_FILE\n \n info['size'] = int(match_ans.groups()[1])\n info['name'] = match_ans.groups()[2]\n return info","repo_name":"wonderfulnx/FTP","sub_path":"py_client/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1890116541","text":"# encoding=utf-8\n\nimport os, sys, json, types\n\nDIR_ROOT = os.path.dirname(os.path.abspath(__file__))\nDIR_RES = os.path.join(DIR_ROOT, 'res/')\nRES_SKIP = ['csb/UI', 'csb/Default', 'csb/Font', 'Map/Block', 'UI']\nRES_JSON = os.path.join(DIR_ROOT, 'src/resource.json')\nRES_DICT = {}\nRES_SPEC = {'Map/MinMap': 'MinMap_'}\nSORT_TYPE = {\n # \"png\" : \"image\",\n \"jpg\" : \"Image\",\n \"fnt\" : \"fnt\",\n \"plist\": \"animate\",\n \"tmx\" : \"map\",\n \"pb\" : \"proto\",\n \"mp3\" : \"sound\",\n \"csb\" : \"csb\",\n \"sheet\": \"texture\",\n \"json\" : \"jsonfont\"\n}\n\n'''\n调用资源处理方法\n拆分方式:字体、图片、图集、地图、帧动画、Proto\n'''\ndef callRes(filepath):\n if filepath[-7:] == 'pvr.ccz':\n return\n\n if filepath.find(' ') != -1:\n return\n\n for item in RES_SKIP:\n dirname = os.path.join(DIR_RES, item)\n if filepath.find(dirname) != -1:\n return\n \n prefix = ''\n if 'Map/MinMap' in filepath:\n prefix = 'MinMap_'\n\n base = os.path.basename(filepath)\n ext = base.split('.')[1]\n path = filepath.replace(DIR_RES, '')\n sort = path.split('/')[0]\n\n if ext == 'plist' and sort.find('.plist') != -1:\n sort = ext = 'sheet'\n sort_type = SORT_TYPE.get(ext)\n\n if not sort_type:\n # print(ext, sort)\n return\n \n base = prefix + base\n RES_DICT[base] = {'path':path, 'type': sort_type}\n\n\n'''\n搜索文件,交给回调处理\n'''\ndef walk(rootdir, call):\n for root, dirs, files in os.walk(rootdir):\n for filename in files:\n if filename == '.DS_Store':\n continue\n filepath = os.path.join(root, filename)\n call(filepath)\n\n'''\n写入资源文件\n'''\ndef writeResJson():\n with open(RES_JSON, 'w') as f:\n f.write(json.dumps(RES_DICT, indent=4, ensure_ascii=False, sort_keys=True))\n\n\nif __name__ == '__main__':\n walk(DIR_RES, callRes)\n writeResJson()","repo_name":"DoooReyn/AssetsLoader","sub_path":"ResLoader.py","file_name":"ResLoader.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"75081665777","text":"import torch\nfrom dataclasses import dataclass\nfrom lib.dataspec import DataSpec\n\n\n@dataclass(frozen=True)\nclass DataSpiralsConfig:\n seed: int\n N: int\n angle_factor: float = 1.0\n\n def serialize_human(self):\n return dict(seed=self.seed, N=self.N)\n\n\n@dataclass\nclass Spiral:\n xs: torch.Tensor\n ys: torch.Tensor\n sample_ids: torch.Tensor\n\n\ndef generate_spiral_points(N, angle_factor):\n angles = 4 * 3 * torch.rand(N, 1)\n r = 1.0 + 0.1 * torch.randn(N, 1)\n\n xs1 = torch.stack(\n [\n r * angles / (4 * 3) * torch.cos(angle_factor * angles),\n r * angles / (4 * 3) * torch.sin(angle_factor * angles),\n ],\n dim=1,\n )\n ys1 = torch.zeros(N, dtype=torch.long)\n\n xs2 = torch.stack(\n [\n r * angles / (4 * 3) * torch.cos(angle_factor * angles + 3.14),\n r * angles / (4 * 3) * torch.sin(angle_factor * angles + 3.14),\n ],\n dim=1,\n )\n ys2 = torch.ones(N, dtype=torch.long)\n xs = torch.concat([xs1, xs2], dim=0)\n ys = torch.concat([ys1, ys2], dim=0)\n sample_ids = torch.arange(0, xs.shape[0], 1, dtype=torch.int32)\n return Spiral(xs, ys, sample_ids)\n\n\nclass DataSpirals(torch.utils.data.Dataset):\n def __init__(self, data_config: DataSpiralsConfig):\n torch.manual_seed(data_config.seed)\n self.spiral = generate_spiral_points(data_config.N, data_config.angle_factor)\n self.n_classes = 2\n\n def data_spec(self):\n return DataSpec(\n input_shape=self.spiral.xs.shape[1:],\n target_shape=self.spiral.ys.shape[1:],\n output_shape=torch.Size([self.n_classes]),\n )\n\n def __getitem__(self, idx):\n return self.spiral.xs[idx], self.spiral.ys[idx], self.spiral.sample_ids[idx]\n\n def __len__(self):\n return self.spiral.xs.shape[0]\n","repo_name":"hlinander/equivariant-posteriors","sub_path":"lib/datasets/spiral.py","file_name":"spiral.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8145310604","text":"# Parse the salad recipes XML file ccc_salad.xml. Return a list of recipes (Recipe objects).\r\n\r\nimport xml.etree.ElementTree as ET\r\n\r\n\r\nclass Ingredient:\r\n def __init__(self, name, quantity, unit, qualifier):\r\n self.name = name\r\n self.quantity = quantity\r\n self.unit = unit\r\n self.qualifier = qualifier\r\n\r\n def __str__(self):\r\n return str(self.quantity) + \" \" + self.unit + \" \" + self.name + \" \" + self.qualifier\r\n\r\n\r\nclass Recipe:\r\n def __init__(self, number, name, ingredients, steps, excludedDiets):\r\n self.id = number\r\n self.name = name\r\n self.ingredients = ingredients\r\n self.steps = steps\r\n self.excludedDiets = excludedDiets\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\ndef parseXMLSaladRecipes():\r\n print(\"\\n ----- Parsing salad recipes XML file ccc_salad.xml ... ----- \\n\")\r\n\r\n tree = ET.parse('ccc_salad.xml')\r\n root = tree.getroot()\r\n\r\n recipeList = []\r\n for recipe in root:\r\n\r\n # 'recipeid' element\r\n recipeID = recipe[0].text\r\n\r\n # 'title' element\r\n recipeName = recipe[1].text\r\n\r\n # 'ingredients' element ('ingredient' elements)\r\n recipeIngredients = []\r\n for ingredientXmlElem in recipe[2]:\r\n ingredientObj = Ingredient(ingredientXmlElem.attrib['ingredient'],\r\n float(ingredientXmlElem.attrib['quantity']) if ingredientXmlElem.attrib['quantity'] != \"\" else 0,\r\n ingredientXmlElem.attrib['unit'],\r\n ingredientXmlElem.attrib['qualifiers'])\r\n recipeIngredients.append(ingredientObj)\r\n\r\n # 'preparation' element ('step' elements)\r\n recipeSteps = []\r\n for step in recipe[3]:\r\n recipeSteps.append(step.text)\r\n\r\n # 'diet' element ('exclude-for-diet' elements)\r\n recipeDiets = []\r\n for diet in recipe[4]:\r\n recipeDiets.append(diet.text)\r\n\r\n recipeObj = Recipe(recipeID, recipeName, recipeIngredients, recipeSteps, recipeDiets)\r\n\r\n recipeList.append(recipeObj)\r\n\r\n print(\"\\n ----- Parsing salad recipes XML file ccc_salad.xml DONE ----- \\n\")\r\n\r\n return recipeList\r\n","repo_name":"alexandru-cohal/Thesis-CBRSolPersonalizationCookingRecipes","sub_path":"code/parseXMLSaladRecipes.py","file_name":"parseXMLSaladRecipes.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6799804882","text":"import struct, string\n\nclass TicTacToeBoard:\n\n def __init__(self):\n self.board = (['N']*3,['N']*3,['N']*3)\n\n def PrintBoard(self):\n print(self.board[0][0] + \"|\" + self.board[1][0] + \"|\" + self.board[2][0])\n \n print(self.board[0][1] + \"|\" + self.board[1][1] + \"|\" + self.board[2][1])\n \n print(self.board[0][2] + \"|\" + self.board[1][2] + \"|\" + self.board[2][2])\n\n def get_empty_squares(self):\n emptySquares = []\n for i in range(3):\n for j in range(3):\n if(self.board[i][j]=='N'):\n emptySquares.append((i,j))\n return emptySquares\n\n def play_square(self, col, row, val):\n self.board[col][row] = val\n\n def get_square(self, col, row):\n return self.board[col][row]\n\n def full_board(self):\n for i in range(3):\n for j in range(3):\n if(self.board[i][j]=='N'):\n return False\n return True\n\n #if there is a winner this will return their symbol (either 'X' or 'O'),\n #otherwise it will return 'N'\n def winner(self):\n #check the cols\n for col in range(3):\n if(self.board[col][0]!='N' and self.board[col][0] == self.board[col][1] and self.board[col][0]==self.board[col][2] ):\n return self.board[col][0]\n #check the rows\n for row in range(3):\n if(self.board[0][row]!='N' and self.board[0][row] == self.board[1][row] and self.board[0][row]==self.board[2][row] ):\n return self.board[0][row]\n #check diagonals\n if(self.board[0][0]!='N' and self.board[0][0] == self.board[1][1] and self.board[0][0]==self.board[2][2] ):\n return self.board[0][0]\n if(self.board[2][0]!='N' and self.board[2][0] == self.board[1][1] and self.board[2][0]==self.board[0][2]):\n return self.board[2][0]\n return 'N'\n\ndef make_simple_cpu_move(board, cpuval):\n for i in range(3):\n for j in range(3):\n if(board.get_square(i,j)=='N'):\n board.play_square(i,j,cpuval)\n return True\n return False\n\ndef play():\n Board = TicTacToeBoard()\n humanval = 'X'\n cpuval = 'O'\n Board.PrintBoard()\n \n while( Board.full_board()==False and Board.winner() == 'N'):\n print(\"your move, pick a row (0-2)\")\n row = int(input())\n print(\"your move, pick a col (0-2)\")\n col = int(input())\n\n if(Board.get_square(col,row)!='N'):\n print(\"square already taken!\")\n continue\n else:\n Board.play_square(col,row,humanval)\n if(Board.full_board() or Board.winner()!='N'):\n break\n else:\n Board.PrintBoard()\n print(\"CPU Move\")\n ab_decision(Board,cpuval) ##change this\n Board.PrintBoard()\n\n Board.PrintBoard()\n if(Board.winner()=='N'):\n print(\"Cat game\")\n elif(Board.winner()==humanval):\n print(\"You Win!\")\n elif(Board.winner()==cpuval):\n print(\"CPU Wins!\")\n\ndef playAsO():\n Board=TicTacToeBoard()\n humanval='O'\n cpuval='X'\n Board.PrintBoard()\n print(\"\\n\")\n\n while( Board.full_board()==False and Board.winner() == 'N'):\n ab_decision(Board,cpuval) ### change this\n if(Board.full_board() or Board.winner()!='N'):\n break\n\n print(\"CPU Move\")\n Board.PrintBoard()\n\n print(\"your move, pick a row (0-2)\")\n row = int(input())\n print(\"your move, pick a col (0-2)\")\n col = int(input())\n\n while(Board.get_square(col,row)!='N'):\n print(\"square already taken!\")\n print(\"your move again, pick a row (0-2)\")\n row = int(input())\n print(\"your move again, pick a col (0-2)\")\n col = int(input())\n\n Board.play_square(col,row,humanval)\n if(Board.full_board() or Board.winner()!='N'):\n break\n else:\n Board.PrintBoard()\n print(\"\\n\")\n\n Board.PrintBoard()\n if(Board.winner()=='N'):\n print(\"Cat game\")\n elif(Board.winner()==humanval):\n print(\"You Win!\")\n elif(Board.winner()==cpuval):\n print(\"CPU Wins!\")\n\ndef minimax(board,cpVal):\n if cpVal == \"X\":\n move,score = maxMove(board)\n else:\n move,score = minMove(board)\n board.play_square(move[0],move[1],cpVal)\n\ndef maxMove(board):\n bestScore =None\n bestMove= None\n freeSquares = board.get_empty_squares()\n for square in freeSquares:\n board.play_square(square[0],square[1],\"X\")\n if board.full_board() and board.winner()=='N':\n score= 0\n elif board.winner()==\"X\":\n score= 1\n elif board.winner()==\"O\":\n score= -1\n else:\n move_pos,score = minMove(board)\n \n board.play_square(square[0],square[1],\"N\")\n\n if bestScore == None or score >bestScore:\n bestScore=score\n bestMove = square\n return bestMove,bestScore\n\ndef minMove(board):\n bestScore =None\n bestMove= None\n freeSquares = board.get_empty_squares()\n for square in freeSquares:\n board.play_square(square[0],square[1],\"O\")\n if board.full_board() and board.winner()=='N':\n score= 0\n elif board.winner()==\"X\":\n score= 1\n elif board.winner()==\"O\":\n score= -1\n else:\n move_pos,score = maxMove(board)\n \n board.play_square(square[0],square[1],\"N\")\n\n if bestScore == None or score bestScore:\n bestScore = score\n bestMove = square\n\n if bestScore >= b:\n return bestMove,bestScore\n a = max(a, bestScore)\n\n return bestMove,bestScore\n\ndef ab_min(board, a, b):\n bestScore =None\n bestMove= None\n freeSquares = board.get_empty_squares()\n for square in freeSquares:\n board.play_square(square[0],square[1],\"O\")\n if board.full_board() and board.winner()=='N':\n score= 0\n elif board.winner()==\"X\":\n score= 1\n elif board.winner()==\"O\":\n score= -1\n else:\n move_pos,score = ab_max(board, a, b)\n \n board.play_square(square[0],square[1],\"N\")\n\n if bestScore == None or score < bestScore:\n bestScore = score\n bestMove = square\n\n if bestScore <= a:\n return bestMove,bestScore\n b = min(b, bestScore)\n\n return bestMove,bestScore\n\ndef main():\n play()\n\nmain()","repo_name":"salilgupta1/tictactoe","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":7444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13535355062","text":"\n\nn, m, x = map(int, input().split())\nINF = int(1e9)\n\n# 방문했는지 확인\nvisited = [False]*(n+1)\n\n# 현재 단계의 최단거리 테이블\ndistance = [INF]*(n+1)\n\n# 지도 \nboard = [[] for i in range(n+1)]\n\nfor i in range(m) :\n y, x, t = map(int, input().split())\n board[y].append((x,t))\n \ndef get_small_node() :\n min_value = INF\n index = 0\n for i in range(1, n+1) :\n if distance[i] < min_value and not visited[i] :\n min = distance[i]\n index = i\n return index\n \ndef dijkstra(start) :\n # 방문지 체크 \n visited[start]=True\n \n # 시작 노드 제외하고 모든 노드에 대해 반복하기\n for i in range(n-1) : \n # 방문지에서 연결된 노드 중 아직 방문하지 않은 노드 중 가장 짧은 노드를 꺼내서 방문 처리 \n now = get_small_node()\n visited[now]=True\n # 선택된 노드에 연결된 노드 확인\n for next in board[now] :\n cost = distance[now]+next[1]\n \n # 선택된 노드를 거쳐서 다른 노드로 이동하는 경우가, 기존 최단경로보다 더 짧은 경우 갱신 \n if cost < distance[next[0]] :\n distance[next[0]] = cost\n \n \n\n\n# 도착 마을에서 시작 \ndijkstra(x)","repo_name":"sand116/python","sub_path":"algorithm/최단경로/Untitled-1.py","file_name":"Untitled-1.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74397297777","text":"# -*- coding: utf-8 -*-\n# pylint: disable-msg=E1103\n\"\"\"\nCreated on Mon Apr 13 12:47:53 2015\n\nThis module contains the main QRS delineation routines. It may be used as a\nlibrary or directly through the qrsdel command line tool.\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense as published by the Free Software Foundation; either\nversion 3.0 of the License, or (at your option) any later version.\n\nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with this library.\n\n@author: T. Teijeiro\n\"\"\"\n\nimport utils.signal_measures as sig_meas\nimport operator\nimport numpy as np\nimport math\nimport bisect\nfrom model import QRS, QRSShape, Interval as Iv\nfrom utils.constants import CONSTANTS as C\nfrom utils.constraints import verify, InconsistencyError\nfrom utils.wave_extraction import extract_waves\nfrom utils.units_helper import (msec2samples as ms2sp, phys2digital as ph2dg,\n digital2mm as dg2mm, samples2mm as sp2mm)\nfrom utils.signal_measures import get_peaks\nfrom collections import OrderedDict\nfrom scipy.cluster.vq import kmeans2, whiten\n\n\ndef _find_peak(siginfo):\n \"\"\"\n Obtains an estimation of the peak situation of a QRS complex, from the\n energy interval that forms the base evidence, a fragment of signal evidence,\n a reference time point, and the interval of valid points for the peak.\n \"\"\"\n dist = lambda p : 1.0 + 2.0 * abs(p - C.QRS_BANN_DMAX)/ms2sp(150)\n dist = np.vectorize(dist)\n peak = None\n #For each lead, the peak will be the maximum deviation point wrt the\n #baseline, and applying the distance function just defined. We give more\n #importance to the first leads, as they supposedly have more quality.\n for _, sig, points, baseline, _ in siginfo:\n if len(points) < 3:\n continue\n peaks = points[sig_meas.get_peaks(sig[points])]\n if len(peaks) == 0:\n continue\n peakscore = abs(sig[peaks]-baseline)/dist(peaks)\n lpeak = peaks[peakscore.argmax()]\n if peak is None:\n peak = lpeak\n elif abs(peak-lpeak) <= C.TMARGIN:\n peak = lpeak if lpeak < peak else peak\n return peak\n\n\ndef _combine_limits(limits, siginfo, peak):\n \"\"\"\n Combines the QRS limits detected in a set of leads, applying ad-hoc rules\n for the situation in which a paced beat is detected. This function raises\n an *InconsistencyError* exception if the limits cannot be properly combined.\n\n Parameters\n ----------\n limits:\n Dictionary, indexed by lead, with a tuple in each one indicating if a\n paced beat was detected in that lead, and an Interval instance with\n the delineation result.\n siginfo:\n List with the information about the signal we are dealing with. It is\n the result of the *_characterize_signal* function.\n peak:\n Situation of the QRS peak point.\n\n Returns\n -------\n (start, end):\n Absolute endpoints of the QRS complex obtained from the combination of\n the limits in all leads.\n \"\"\"\n start = end = None\n if any(v[0] for v in limits.itervalues()):\n #There is a pacing detection, we will check if the information of\n #all leads is consistent with detection.\n #First, all spikes must start within a 40ms margin.\n try:\n spkstart = [v[1].start for v in limits.itervalues() if v[0]]\n verify(max(spkstart)-min(spkstart) <= C.TMARGIN)\n #Second, all non-paced leads must start their QRS complex in the\n #40 ms after the first spike has appeared.\n spkstart = min(spkstart)\n verify(all(-C.TMARGIN <= v[1].start-spkstart <= C.TMARGIN\n for v in limits.itervalues() if not v[0]))\n #We have confirmed the beat is a paced beat, we set the limits\n start = spkstart\n end = max(v[1].end for v in limits.itervalues() if v[0])\n for _, endpoints in limits.itervalues():\n if (0 < endpoints.end - end <= C.TMARGIN and\n endpoints.end-start <= C.QRS_EANN_DMAX):\n end = endpoints.end\n except InconsistencyError:\n #We set the non-paced delineation for previously detected paced\n #leads.\n for lead in (k for k, v in limits.iteritems() if v[0]):\n _, sig, points, _, _ = ([info for info in siginfo\n if info[0]==lead][0])\n endpoints = _qrs_delineation(sig, points, peak)\n if endpoints is not None:\n limits[lead] = (False, endpoints)\n else:\n limits.pop(lead)\n #If we have discarded all limits, we raise an exception.\n verify(limits)\n #If there is no a paced beat, we join the limits estimation of every\n #lead, by order of quality.\n if start is None:\n start, end = limits.values()[0][1].start, limits.values()[0][1].end\n for _, endpoints in limits.itervalues():\n if (0 < start-endpoints.start <= C.TMARGIN and\n end-endpoints.start <= C.QRS_EANN_DMAX):\n start = endpoints.start\n if (0 < endpoints.end - end <= C.TMARGIN and\n endpoints.end-start <= C.QRS_EANN_DMAX):\n end = endpoints.end\n return (start, end)\n\n\ndef _qrs_delineation(signal, points, peak):\n \"\"\"\n Returns the interval points of a possible QRS complex in a signal fragment.\n\n Parameters\n ----------\n signal:\n Array containing a signal fragment with a possible QRS inside its limits\n points:\n Representative points candidates to be the limits..\n peak:\n Point of the determined QRS peak.\n\n Returns\n -------\n out:\n The interval of the QRS.\n \"\"\"\n try:\n verify(len(points) >= 3)\n #We get the slope of each segment determined by the relevant points\n slopes = ((signal[points][1:]-signal[points][:-1])/\n (points[1:]-points[:-1]))\n #We also get the peaks determined by the signal simplification.\n pks = points[sig_meas.get_peaks(signal[points])]\n verify(len(pks) > 0)\n #Now we perform a clustering operation over each slope, with a certain\n #set of features.\n features = []\n for i in xrange(len(slopes)):\n #We obtain the midpoint of the segment, and its difference with\n #respect to the peak, applying a temporal margin.\n #We get as representative point of the segment the starting point\n #if the segment is prior to the peak, and the ending point\n #otherwise.\n point = points[i] if points[i] < peak else points[i+1]\n #The features are the slope in logarithmic scale and the distance to\n #the peak.\n dist = abs(point - peak)\n features.append([math.log(abs(slopes[i])+1.0), dist])\n #We perform a clustering operation on the extracted features\n features = whiten(features)\n #We initialize the centroids in the extremes (considering what is\n #interesting of each feature for us)\n fmin = np.min(features, 0)\n fmax = np.max(features, 0)\n tags = kmeans2(features, np.array([[fmin[0], fmax[1]],\n [fmax[0], fmin[1]]]),\n minit = 'matrix')[1]\n valid = np.where(tags)[0]\n verify(np.any(valid))\n start = points[valid[0]]\n end = points[valid[-1]+1]\n #If the relation between not valid and valid exceeds 0.5, we take the\n #highest valid interval containing the peak.\n if _invalidtime_rate(points, valid) > 0.5:\n #We get the last valid segment before the peak, and the first valid\n #segment after the peak. We expand them with consecutive valid\n #segments.\n try:\n start = max(v for v in valid if points[v] <= peak)\n while start-1 in valid:\n start -= 1\n end = min(v for v in valid if points[v+1] >= peak)\n while end+1 in valid:\n end += 1\n start, end = points[start], points[end+1]\n except ValueError:\n return None\n #We ensure there is a peak between the limits.\n verify(np.any(np.logical_and(pks > start, pks < end)))\n #If there are no peaks, we don't accept the delineation\n return Iv(start, end)\n except InconsistencyError:\n return None\n\n\ndef _paced_qrs_delineation(signal, points, peak, baseline):\n \"\"\"\n Checks if a sequence of waves is a paced heartbeat. The main criteria is\n the presence of a spike at the beginning of the beat, followed by at least\n one significant wave.\n \"\"\"\n try:\n #Gets the slope between two points.\n slope = lambda a, b : abs(dg2mm((signal[b]-signal[a])/sp2mm(b-a)))\n #First we search for the spike.\n spike = _find_spike(signal, points)\n verify(spike)\n if not spike[-1] in points:\n points = np.insert(points, bisect.bisect(points, spike[-1]),\n spike[-1])\n #Now we get relevant points, checking some related constraints.\n bpts = points[points <= spike[0]]\n apts = points[points >= spike[-1]]\n verify(len(apts) >= 2)\n #Before and after the spike there must be a significant slope change.\n verify(slope(spike[0], spike[1]) > 2.0 * slope(bpts[-2], bpts[-1]))\n verify(slope(spike[1], spike[-1]) > 2.0 * slope(apts[0], apts[1]))\n #Now we look for the end of the QRS complex, by applying the same\n #clustering strategy than regular QRS, but only for the end.\n slopes = (signal[apts][1:]-signal[apts][:-1])/(apts[1:]-apts[:-1])\n features = []\n for i in xrange(len(slopes)):\n #The features are the slope in logarithmic scale and the distance to\n #the peak.\n features.append([math.log(abs(slopes[i])+1.0),\n abs(apts[i+1] - peak)])\n features = whiten(features)\n #We initialize the centroids in the extremes (considering what is\n #interesting of each feature for us)\n fmin = np.min(features, 0)\n fmax = np.max(features, 0)\n valid = np.where(kmeans2(features, np.array([[fmin[0], fmax[1]],\n [fmax[0], fmin[1]]]), minit = 'matrix')[1])[0]\n verify(np.any(valid))\n end = apts[valid[-1]+1]\n #The duration of the QRS complex after the spike must be more than 2\n #times the duration of the spike.\n verify((end-apts[0]) > 2.0 * (spike[-1]-spike[0]))\n #The amplitude of the qrs complex must higher than 0.5 the amplitude\n #of the spike.\n sgspike = signal[spike[0]:spike[-1]+1]\n sgqrs = signal[apts[0]:end+1]\n verify(np.ptp(sgqrs) > ph2dg(0.5))\n verify(np.ptp(sgqrs) > 0.5 * np.ptp(sgspike))\n #There must be at least one peak in the QRS fragment.\n qrspt = signal[apts[apts <= end]]\n verify(len(qrspt) >= 3)\n verify(abs(signal[end] - signal[spike[0]]) <= ph2dg(0.3)\n or len(get_peaks(qrspt)) > 0)\n #The area of the rest of the QRS complex must be higher than the spike.\n verify(np.sum(np.abs(sgspike-sgspike[0])) <\n np.sum(np.abs(sgqrs-sgspike[0])))\n #The distance between the beginning of the spike and the baseline\n #cannot be more than the 30% of the amplitude of the complex.\n verify(abs(signal[spike[0]]-baseline) <\n 0.3 * np.ptp(signal[spike[0]:end+1]))\n #At last, we have found the paced QRS limits.\n return Iv(spike[0], end)\n except InconsistencyError:\n return None\n\n\ndef _get_qrs_shape(signal, points, peak, baseline):\n \"\"\"\n Obtains the QRSShape object that best fits a signal fragment, considering\n the simplification determined by points, and the peak and baseline\n estimations. The detected QRS shape must collect the majority of the total\n energy of the waves present in the signal fragment.\n \"\"\"\n try:\n waves = extract_waves(signal, points, baseline)\n verify(waves)\n total_energ = sum(w.e for w in waves)\n #We find the longest valid sequence of waves with the highest energy.\n sequences = []\n for i in xrange(len(waves)):\n #Largest valid sequence starting in the i-th wave.\n seq = [waves[i]]\n j = i+1\n while j < len(waves) and _is_qrs_complex(waves[i:j+1]):\n seq.append(waves[j])\n j += 1\n #We add the valid sequence and the acumulated energy (we require\n #the peak to actually be inside the sequence.)\n tag = _tag_qrs(seq)\n energ = sum(w.e for w in seq)\n if (tag in C.QRS_SHAPES and energ/total_energ > 0.5 and\n any(w.l <= peak <= w.r for w in seq)):\n sequences.append((seq, tag, energ))\n #We get the sequence with the maximum value\n verify(sequences)\n seq, tag, energ = max(sequences, key= operator.itemgetter(2))\n shape = QRSShape()\n shape.energy = energ\n shape.tag = tag\n shape.waves = seq\n shape.sig = signal[seq[0].l:seq[-1].r+1] - signal[seq[0].l]\n shape.maxslope = np.max(np.abs(np.diff(shape.sig)))\n shape.amplitude = np.ptp(shape.sig)\n return shape\n except (ValueError, InconsistencyError):\n return None\n\n\ndef _get_paced_qrs_shape(signal, points, start, end):\n \"\"\"\n Obtains the QRSShape object corresponding to a paced QRS complex delimited\n inside a signal fragment.\n\n Parameters\n ----------\n signal:\n Signal fragment containing a paced QRS complex. The limits of the\n signal should be the limits determined by the *_paced_qrs_delineation*\n function.\n points:\n Relevant points in the signal fragment.\n start:\n Start point of the pace spike wrt the start of the signal.\n end:\n Finish point of the paced QRS wrt the start of the signal.\n\n Returns\n -------\n out:\n QRSShape object representing the paced beat.\n \"\"\"\n try:\n signal = signal[start:end+1]\n points = points[np.logical_and(points >= start, points <= end)] - start\n verify(len(points)>0)\n if points[0] != 0:\n points = np.insert(points, 0, 0)\n if points[-1] != len(signal) - 1:\n points = np.append(points, len(signal) - 1)\n verify(len(points) >= 3)\n #We assume the baseline level is the start signal value of the spike\n waves = extract_waves(signal, points, signal[points[0]])\n verify(waves)\n total_energ = sum(w.e for w in waves)\n #We get the longest wave sequence with a valid QRS tag.\n i = 0\n while i < len(waves) and _tag_qrs(waves[:i+1]) in C.QRS_SHAPES:\n i += 1\n tag = _tag_qrs(waves[:i])\n verify(tag in C.QRS_SHAPES)\n shape = QRSShape()\n shape.waves = waves[:i]\n shape.energy = sum(w.e for w in shape.waves)\n shape.tag = tag\n shape.sig = (signal[shape.waves[0].l:shape.waves[-1].r+1] -\n signal[shape.waves[0].l])\n shape.maxslope = np.max(np.abs(np.diff(shape.sig)))\n shape.amplitude = np.ptp(shape.sig)\n shape.move(start)\n verify(shape.energy/total_energ > 0.5)\n return shape\n except (ValueError, InconsistencyError):\n return None\n\n\ndef _tag_qrs(waves):\n \"\"\"\n Creates a new string tag for a QRS complex from a sequence of waves. This\n tag matches the name given by cardiologists to the different QRS waveforms.\n \"\"\"\n #This method consists in a concatenation of heuristic rules described with\n #more or less precision in \"European Heart Journal: Recommendations for\n #measurement standards in quantitative electrocardiography. (1985)\".\n result = ''\n waves = list(waves)\n while waves:\n wav = waves.pop(0)\n #If the first wave is negative...\n if not result and wav.sign == -1:\n if not waves:\n result = 'QS' if abs(wav.amp) > ph2dg(0.5) else 'Q'\n else:\n result = 'Q' if abs(wav.amp) > ph2dg(0.2) else 'q'\n else:\n newt = 'r' if wav.sign == 1 else 's'\n if abs(wav.amp) > ph2dg(0.5):\n newt = newt.upper()\n result += newt\n return result\n\n\ndef _reference_wave(shape):\n \"\"\"\n Obtains the index of the wave that must be taken as reference to\n establish the QRS complex reference point, based on the shape of the\n complex and the energy of the waves.\n \"\"\"\n #If one wave has more than twice the enrgy than any one else, it is the\n #reference.\n mxe = max(w.e for w in shape.waves)\n idx = -1\n for i in xrange(len(shape.waves)):\n wav = shape.waves[i]\n if wav.e == mxe:\n idx = i\n elif float(wav.e / mxe) > 0.5:\n idx = -1\n break\n if idx == -1:\n if shape.tag == 'QS':\n return len(shape.waves)-1\n if shape.tag in ('R', 'r', 'RS', 'Rs', 'rs', 'RSR', 'rsr', 'RsR',\n 'RrS', 'RR', 'Rr', 'rr', 'Q', 'Qr'):\n return 0\n elif shape.tag in ('qRs', 'QRs', 'rS', 'rSr', 'rR', 'qR', 'QR', 'qr',\n 'Qs', 'qS'):\n return 1\n elif shape.tag in ('QrS', 'rsR'):\n return 2\n raise ValueError('Unknown QRS shape {0}.'.format(shape))\n else:\n return idx\n\ndef _is_qrs_complex(wave_seq):\n \"\"\"\n Checks if a sequence of Wave objects conform a recognized QRS shape. For\n this, the waves must be consecutive, and conform a recongined pattern.\n \"\"\"\n #The waves must be consecutive.\n for i in xrange(1, len(wave_seq)):\n if wave_seq[i].l != wave_seq[i-1].r:\n return False\n #The shape must already be valid.\n return _tag_qrs(wave_seq) in C.QRS_SHAPES\n\ndef _find_spike(signal, points):\n \"\"\"\n Looks for a pacemaker spike in a signal fragment, applying fixed thresholds\n on wave duration, angles and amplitude. These thresholds are the following:\n\n - The duration of the spike must be shorter than 30ms.\n - The ascent and descent angles of the spike must be higher than 75º in\n common ECG scale.\n - The amplitude of the spike must be at least 0.2 mV (2mm) in the edge with\n lower amplitude.\n - The falling edge must be of lower amplitude than the rising edge.\n\n Parameters\n ----------\n signal:\n Numpy array containing the signal information referenced by the wave\n object.\n points:\n Relevant points detected on the signal.\n\n Returns\n -------\n out:\n Tuple with three integer values, which are the begin, peak, and\n end of the detected spike. If no spikes were detected, returns None.\n\n \"\"\"\n #Angle between two points\n angle = lambda a, b : math.atan(dg2mm(abs(signal[b]-signal[a])/sp2mm(b-a)))\n #First we search for the left edge of the spike.\n spike = []\n for i in xrange(1, len(points)-3):\n for j in xrange(i+1, len(points)-2):\n pts = points[i:j+1]\n llim = pts[-1]\n #There can be no peaks inside the left edge.\n if (llim-pts[0] > C.SPIKE_DUR or\n (len(pts) >= 3 and len(get_peaks(signal[pts])) > 0)):\n break\n #The end of the left edge must be a peak.\n if len(get_peaks(signal[llim-1:llim+2])) < 1:\n continue\n #Left edge candidate\n ledge = abs(signal[pts[0]] - signal[llim])\n if (ledge >= C.SPIKE_EDGE_AMP and\n angle(pts[0], llim) >= math.radians(85)):\n #Right edge delineation.\n ulim = min(int(pts[0]+C.SPIKE_DUR), points[-1])\n rsig = signal[llim:ulim+1]\n if len(rsig) < 3:\n break\n rpks = get_peaks(rsig)\n if np.any(rpks):\n ulim = llim + rpks[0]\n ulim = ulim-1 if ulim-1 in points else ulim\n ulim = ulim+1 if ulim+1 in points else ulim\n while ulim > llim:\n redge = abs(signal[ulim] - signal[llim])\n if redge < C.SPIKE_EDGE_AMP:\n break\n if (redge-ledge < C.SPIKE_ECGE_DIFF and\n angle(llim, ulim) >= math.radians(75)):\n #Spike candidate detected\n spike.append((pts[0], llim, ulim))\n break\n ulim -= 1\n if not spike or max(sp[0] for sp in spike) >= min(sp[-1] for sp in spike):\n return None\n #We get the spike with highest energy.\n return max(spike, key = lambda spk:\n np.sum(np.diff(signal[spk[0]:spk[-1]+1])**2))\n\ndef _invalidtime_rate(points, valid):\n \"\"\"\n Obtains the time rate between the points marked as not valid and the rest\n inside the whole valid domain.\n\n Parameters\n ----------\n points:\n Array with numerical values determining time points.\n valid:\n Array of boolean values, with the same shape of points, that determines\n if a point is valid or not. At least one value must be valid.\n\n Returns\n -------\n out:\n Float number with the time rate of not valid points vs valid.\n \"\"\"\n assert np.any(valid)\n validtime = 0.0\n invalidtime = 0.0\n idx = valid[0]\n while idx <= valid[-1]:\n if idx in valid:\n validtime += points[idx+1] - points[idx]\n else:\n invalidtime += points[idx+1] - points[idx]\n idx += 1\n return invalidtime/validtime\n\n#####################################\n### Proper QRS delineation method ###\n#####################################\n\ndef delineate_qrs(siginfo):\n \"\"\"\n Performs the multi-lead delineation of a QRS complex enclosed in a\n specific time interval, returning an instance of the QRS class.\n\n Parameters\n ----------\n siginfo:\n List-like structure containing all the necessary information of the\n ECG signal in the searching time interval. Each entry in this list\n is assumed to be a tuple of the **LeadInfo** class, and the list is\n assumed to be ordered by the quality of the signal in each lead.\n\n Returns\n -------\n out:\n QRS object with all the attributes properly set. If the delineation\n cannot be performed, an InconsistencyError is raised.\n \"\"\"\n verify(siginfo)\n qrs = QRS()\n #Peak point estimation.\n peak = _find_peak(siginfo)\n verify(peak is not None)\n #QRS start and end estimation\n #For each lead, we first check if it is a paced beat, whose delineation\n #process is different. In case of failure, we perform common delineation.\n limits = OrderedDict()\n for lead, sig, points, baseline, _ in siginfo:\n endpoints = _paced_qrs_delineation(sig, points, peak, baseline)\n if endpoints is None:\n endpoints = _qrs_delineation(sig, points, peak)\n if endpoints is None:\n continue\n limits[lead] = (False, endpoints)\n else:\n limits[lead] = (True, endpoints)\n #Now we combine the limits in all leads.\n start, end = _combine_limits(limits, siginfo, peak)\n verify(start is not None and end > start)\n #QRS waveform extraction for each lead.\n for lead, sig, points, baseline, _ in siginfo:\n #We constrain the area delineated so far.\n sig = sig[start:end+1]\n points = points[np.logical_and(points >= start,\n points <= end)] - start\n if len(points) == 0:\n continue\n if points[0] != 0:\n points = np.insert(points, 0, 0)\n if points[-1] != len(sig) - 1:\n points = np.append(points, len(sig) - 1)\n if len(points) < 3:\n continue\n #We define a distance function to evaluate the peaks\n dist = (lambda p : 1.0 + 2.0 * abs(start + p - C.QRS_BANN_DMAX)\n /ms2sp(150))\n dist = np.vectorize(dist)\n #We get the peak for this lead\n pks = points[sig_meas.get_peaks(sig[points])]\n if len(pks) == 0:\n continue\n peakscore = abs(sig[pks]-baseline)/dist(pks)\n peak = pks[peakscore.argmax()]\n #Now we get the shape of the QRS complex in this lead.\n shape = None\n #If there is a pace detection in this lead\n if lead in limits and limits[lead][0]:\n endpoints = limits[lead][1]\n shape = _get_paced_qrs_shape(sig, points,\n endpoints.start - start,\n min(endpoints.end-start,len(sig)))\n if shape is None:\n limits[lead] = (False, endpoints)\n if shape is None:\n shape = _get_qrs_shape(sig, points, peak, baseline)\n if shape is None:\n continue\n qrs.shape[lead] = shape\n #There must be a recognizable QRS waveform in at least one lead.\n verify(qrs.shape)\n #The detected shapes may constrain the delineation area.\n llim = min(qrs.shape[lead].waves[0].l for lead in qrs.shape)\n if llim > 0:\n start = start + llim\n for lead in qrs.shape:\n qrs.shape[lead].move(-llim)\n ulim = max(qrs.shape[lead].waves[-1].r for lead in qrs.shape)\n if ulim < end-start:\n end = start + ulim\n #The definitive peak is assigned to the first relevant wave\n #(each QRS shapeform has a specific peak point.)\n peak = start + min(s.waves[_reference_wave(s)].m\n for s in qrs.shape.itervalues())\n #Segmentation points set\n qrs.paced = any(v[0] for v in limits.itervalues())\n qrs.start, qrs.peak, qrs.end = start, peak, end\n ###################################################################\n #Amplitude conditions (between 0.5mV and 6.5 mV in at least one\n #lead or an identified pattern in most leads).\n ###################################################################\n verify(len(qrs.shape) > len(siginfo)/2.0 or\n C.QRS_MIN_AMP <= max(s.amplitude for s in qrs.shape.itervalues())\n <= C.QRS_MAX_AMP)\n return qrs\n\nif __name__ == \"__main__\":\n pass","repo_name":"citiususc/qrsdel","sub_path":"qrsdel/delineation.py","file_name":"delineation.py","file_ext":"py","file_size_in_byte":27363,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"57"} +{"seq_id":"17622366344","text":"__author__ = \"Vasyl Khomenko\"\n__copyright__ = \"Copyright 2013, Qubell.com\"\n__license__ = \"Apache\"\n__email__ = \"vkhomenko@qubell.com\"\n\nimport logging as log\nimport simplejson as json\nfrom qubell.api.private import exceptions\nfrom qubell.api.provider.router import InstanceRouter\nfrom qubell.api.private.common import QubellEntityList, Entity\n\n\nclass Role(Entity, InstanceRouter):\n\n # noinspection PyShadowingBuiltins\n def __init__(self, organization, id):\n self.organization = organization\n self.organizationId = self.organization.organizationId\n self.roleId = self.id = id\n\n @staticmethod\n def new(router, organization, name, permissions=\"\"):\n log.info(\"Creating role: %s\" % name)\n log.debug(\"Creating role: %s, permissions: %s\" % (name, permissions))\n resp = router.post_roles(org_id=organization.id,\n data=json.dumps({\"name\": name, \"permissions\": permissions}))\n role = Role(organization, resp.json()['id']).init_router(router)\n return role\n\n @property\n def name(self):\n return self.json()['name']\n\n @property\n def permissions(self):\n return self.json()['permissions']\n\n def __getattr__(self, key):\n resp = self.json()\n if key in resp:\n raise exceptions.NotFoundError('Cannot get property %s' % key)\n return resp[key] or False\n\n def json(self):\n return self._router.get_role(org_id=self.organizationId, role_id=self.roleId).json()\n\n def update(self, name=None, permissions=\"\"):\n name = name or self.name\n permissions = permissions or self.permissions\n self._router.put_role(org_id=self.organization.id,\n role_id=self.id,\n data=json.dumps({\"name\": name,\n \"permissions\": permissions}))\n return True\n\n def delete(self):\n self._router.delete_role(org_id=self.organizationId, role_id=self.roleId)\n return True\n\n\nclass RoleList(QubellEntityList):\n base_clz = Role","repo_name":"chemikadze/contrib-python-qubell-client","sub_path":"qubell/api/private/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73686241137","text":"# Udělejte hru, kde si počítač bude myslet jedno číslo a\n# uživatel bude hádat jaké to číslo je.\n# Pokud uživatel číslo uhádně, tak ho pochvalte\n\nfrom random import randint\n\nnum = randint(1, 6)\n# my_num = -999\n# while num != my_num:\n # my_num = int(input('Zadej cislo: '))\n # if num == my_num:\n # print('Uhadl si')\n # break\n # else:\n # print('Smula')\nwhile True:\n my_num = int(input('Zadej cislo: '))\n if num == my_num:\n print('Uhadl si')\n break\n else:\n print('Smula')\n","repo_name":"Bulva/python-2022","sub_path":"04-cykly_a_importy/ukoly/cislo.py","file_name":"cislo.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7963848714","text":"def comb(m): # 요리를 짝짓는 방법을 visited에 append 시키는 함수\n global n, visited\n for i in range(1< abs(count_a-count_b):\n min_diff = abs(count_a-count_b)\n print('#{0} {1}'.format(case, min_diff))","repo_name":"ChoneungSon/BeakJoon","sub_path":"A형보충문제/4012_모의요리사_손초능.py","file_name":"4012_모의요리사_손초능.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1231292433","text":"#!/usr/bin/python3 -u\nimport math\nimport os\nimport threading\nimport time\n\nimport RPi.GPIO as GPIO\nimport board\nimport neopixel\n\nimport wakeup\n\nswitch_pin = 13\nled_pin = board.D12\n\npixels = neopixel.NeoPixel(led_pin, 1)\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\nprint(\"Starting...\")\ndimmer = .04\n\nfrom remote_service import RemoteService\n\ndef run_alarm(time):\n wakeup.set_alarmclock(time)\n\n\ndef set_alarm(alarm):\n threading.Thread(target=run_alarm, args=(alarm,)).start()\n\n\ndef timer(silent = False):\n timer_mins = 20\n\n if silent:\n os.system('sudo systemctl stop tuner')\n os.system(f\"say Silent\")\n else:\n os.system('sudo systemctl start tuner')\n\n os.system(f\"say Timer {timer_mins} minutes\")\n time.sleep(timer_mins * 60)\n\n if silent:\n os.system('sudo systemctl start tuner')\n else:\n os.system('sudo systemctl stop tuner')\n\n\ndef minute_countdown(duration):\n for i in range(0, duration):\n os.system(f\"say {duration - i}\")\n time.sleep(60)\n os.system('say You are done')\n\n\ndef custom_timer(duration):\n os.system(f\"say Timer {duration} minutes\")\n time.sleep(duration * 60)\n\n os.system(f\"say Timer {duration} minutes done\")\n\ndef exercise(title, change=False):\n os.system('say %s' % title)\n if change:\n time.sleep(30) \n os.system('say change')\n time.sleep(30)\n else:\n time.sleep(60)\n\ndef daily_workout():\n os.system('say workout start')\n exercise('Stand')\n exercise('Squat')\n exercise('Dog')\n exercise('Pigeon', change=True)\n exercise('Knee head', change=True)\n exercise('Cycle')\n exercise('Baby')\n exercise('Child')\n os.system('say You are done')\n\ndef playpause():\n pixels[0] = (0, 0, 0)\n os.system('/usr/bin/mpc pause &')\n\ndef button_pressed(channel):\n playpause()\n\ndef start_timer():\n threading.Thread(target=timer).start()\n\n\ndef start_minute_countdown(duration):\n threading.Thread(target=minute_countdown, args=[duration]).start()\n\ndef start_custom_timer(duration):\n threading.Thread(target=custom_timer, args=[duration]).start()\n\ndef start_daily_workout():\n threading.Thread(target=daily_workout, args=[]).start()\n\ndef start_silent_timer():\n threading.Thread(target=timer,args=(True,)).start()\n\ndef on_key_pressed(key):\n print(key)\n if key == 'KEY_': pass\n if key == 'KEY_FASTFORWARD': os.system('curl -X POST \"https://api.spotify.com/v1/me/player/next\" -H \"Authorization: Bearer BQDy-sPybW8wtFbDhA9VfbTN1PSnoNZ6RHTzQrykoQgbvSXiSjbpotv3Tx6QzVzFt0WtNYXBgANRULfVczCpq9tjNfw_wpSMRwFNhW4fLyBXODHcs-r_C8JSQwyhcSIHdjS7ntgGE7scyAg\" &')\n if key == 'KEY_REWIND': os.system('curl -X POST \"https://api.spotify.com/v1/me/player/previous\" -H \"Authorization: Bearer BQDy-sPybW8wtFbDhA9VfbTN1PSnoNZ6RHTzQrykoQgbvSXiSjbpotv3Tx6QzVzFt0WtNYXBgANRULfVczCpq9tjNfw_wpSMRwFNhW4fLyBXODHcs-r_C8JSQwyhcSIHdjS7ntgGE7scyAg\" &')\n\n if key == 'KEY_SEARCH':\n os.system('say `sudo python3 /home/pi/stereopi/time_to_speech.py` &')\n elif key == 'KEY_RED':\n pixels[0] = (int(255 * dimmer), 0, 0)\n elif key == 'KEY_GREEN':\n pixels[0] = (0, int(255 * dimmer), 0)\n elif key == 'KEY_YELLOW':\n pixels[0] = (int(255 * dimmer), int(255 * dimmer), 0)\n elif key == 'KEY_BLUE':\n pixels[0] = (0, 0, int(255 * dimmer))\n elif key == 'KEY_MUTE':\n playpause()\n elif key == 'KEY_PLAYPAUSE':\n playpause()\n elif key == 'KEY_VOLUMEUP':\n os.system(\"amixer set PCM 5%+\")\n elif key == 'KEY_RECORD':\n os.system(\"say record\")\n elif key == 'KEY_INFO':\n os.system(\"sudo systemctl stop tuner\")\n os.system(\"sudo systemctl restart shairport-sync\")\n os.system(\"sudo systemctl restart raspotify\")\n elif key == 'KEY_TUNER':\n os.system('sudo systemctl restart raspotify')\n os.system(\"sudo systemctl restart tuner\")\n os.system(\"say starting tuner\")\n elif key == 'KEY_VOLUMEDOWN':\n os.system(\"amixer set PCM 5%-\")\n elif key == 'KEY_1':\n start_daily_workout()\n elif key == 'KEY_2':\n start_custom_timer(2)\n elif key == 'KEY_3':\n start_custom_timer(3)\n elif key == 'KEY_4':\n start_custom_timer(4)\n elif key == 'KEY_5':\n start_custom_timer(5)\n elif key == 'KEY_0':\n start_custom_timer(10)\n elif key == 'KEY_PREVIOUSSONG':\n start_silent_timer()\n elif key == 'KEY_NEXTSONG':\n start_timer()\n elif key == 'KEY_HOMEPAGE':\n alarm = '6:15'\n os.system(f\"say setting alarm to {alarm}\")\n set_alarm(alarm)\n elif key == 'KEY_ENTER':\n import random\n result = 'yes' if random.randint(0, 1) == 1 else 'no'\n os.system(f'say {result}')\n\n else:\n pixels[0] = (int(255 * dimmer), 0, int(255 * dimmer))\n\nGPIO.add_event_detect(switch_pin, GPIO.FALLING, callback=button_pressed, bouncetime=250)\nservice = RemoteService()\nservice.start_listening(on_key_pressed) # This call is blocking so we never come here\n\nx = 0\nincrement = 0.1\nsleep = 0.01\n\nwhile True:\n\n if GPIO.input(switch_pin) == 0:\n pixels[0] = (0, 0, int(255 * dimmer))\n blue = abs(int(math.sin(x) * 255 * dimmer))\n red = abs(int(math.cos(x) * 255 * dimmer))\n # green = abs(int(math.cos(x + math.pi/4)*255*dimmer))\n pixels[0] = (red, 0, blue)\n x = x + increment\n time.sleep(sleep)\n","repo_name":"besi/stereopi","sub_path":"stereopi.py","file_name":"stereopi.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"18043687455","text":"import bleach\nfrom .models import *\n\ndef cleanAndCheckNumber(numberString):\n x = bleach.clean(str(numberString))\n try:\n x = int(x)\n if x > 100 or x < 1:\n raise ValueError\n except ValueError:\n raise ValueError(\"Input must be a valid number from 1 to 100.\")\n return x\n\n\ndef clearDatabase():\n x = GuessThis.objects.all()\n x.delete()\n x = Guesses.objects.all()\n x.delete()\n if len(GuessThis.objects.all()) != 0 or len(Guesses.objects.all()) != 0:\n raise ResourceWarning(\"clearDatabase() reads that database has not \\\n been fully cleared\")\n return\n\ndef setNumber(number):\n x = GuessThis(number=number)\n x.save()\n return\n\ndef getNumber():\n if len(GuessThis.objects.all()):\n return GuessThis.objects.all()[0].number\n return\n\ndef setAndCheckGuess(number):\n check = GuessThis.objects.all()[0].number\n current = Guesses(guesses=number)\n current.save()\n if number == check:\n return \"Win\"\n elif number < check:\n return \"Higher\"\n else:\n return \"Lower\"\n\ndef getGuessList():\n return [x.guesses for x in Guesses.objects.all()]\n\ndef isGameOver():\n return len(Guesses.objects.all()) >= 10\n","repo_name":"stajama/serverTest1","sub_path":"gameServer/guessingGame/workers.py","file_name":"workers.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33217213563","text":"import numpy as np\nfrom modules.module import Module\nfrom modules.lstm_layer import LSTMLayer\n\nclass LSTM(Module):\n def __init__(self, input_size, hidden_size):\n super(LSTM, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n self.cell = LSTMLayer(input_size, hidden_size)\n\n def forward(self, X, prev_h):\n '''\n Does the LSTM forward for a sequence `X`.\n\n Arguments:\n X: numpy array of shape (seq_len x batch_size x input_size)\n The input sequence.\n h_n: numpy array of shape (batch_size x hidden_size)\n The initial hidden state.\n c_n: numpy array of shape (batch_size x hidden_size)\n The initial cell state.\n\n Returns: output, (h_n, c_n)\n output: numpy array of shape (seq_len x batch_size x hidden_size)\n Stack of LSTM outputs (hidden states) at each timestep.\n h_n: numpy array of shape (batch_size x hidden_size)\n Final hidden state after consuming the input sequence.\n c_n: numpy array of shape (batch_size x hidden_size)\n Final cell state after consuming the input sequence.\n '''\n h_n, c_n = prev_h\n seq_len = X.shape[0]\n\n hiddens = []\n self.activations = []\n for t in range(seq_len):\n h_n, c_n, act_t = self.cell(X[t], (h_n, c_n))\n\n hiddens.append(h_n)\n self.activations.append(act_t)\n\n return np.stack(hiddens), (h_n, c_n)\n\n def backward_once(self, activations, dLdOut):\n dLdX = []\n\n dLdOut_t = (dLdOut, 0)\n for act_t in activations[::-1]:\n dLdx_t, dLdOut_t = self.cell.backward(act_t, dLdOut_t)\n dLdX.append(dLdx_t)\n\n return dLdX[::-1]\n\n def backward(self, dLdOut):\n '''\n Differentiate error w.r.t. weights and w.r.t. input.\n Steps:\n 1. differentiate outputs w.r.t. weights and inputs\n - delegate gradient computation to LSTMLayer\n 2. apply chain rule: accumulate \"outer\" gradient\n\n Arguments:\n dLdOut: numpy array of shape (batch_size x hidden_size)\n Gradient w.r.t. the LSTM output.\n\n Returns: dLdX\n dLdX: list of numpy arrays of shape (batch_size x hidden_size)\n Gradients for each timestep w.r.t. the LSTM inputs.\n '''\n if dLdOut.ndim == 2:\n return self.backward_once(self.activations, dLdOut)\n elif dLdOut.ndim == 3:\n seq_len = dLdOut.shape[0]\n batch_size = dLdOut.shape[1]\n dLdX = np.zeros([seq_len, batch_size, self.input_size])\n\n for t in range(dLdOut.shape[0], 0, -1):\n dX_t = self.backward_once(self.activations[:t], dLdOut[t-1])\n\n for x_acc, x_new in zip(dLdX[:t], dX_t):\n x_acc += x_new\n\n return dLdX\n","repo_name":"mullovc/LSTM-Task","sub_path":"modules/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22942430315","text":"# https://leetcode.com/explore/challenge/card/december-leetcoding-challenge/569/week-1-december-1st-december-7th/3555/\n\n# Can Place Flowers\n\n\n# You have a long flowerbed in which some of the plots are planted, and some are not. \n# However, flowers cannot be planted in adjacent plots.\n\n# Given an integer array flowerbed containing 0's and 1's, where 0 means empty and 1 means not empty, \n# and an integer n, return if n new flowers can be planted in the flowerbed without violating the no-adjacent-flowers rule.\n\n\n\n# Example 1:\n# Input: flowerbed = [1,0,0,0,1], n = 1\n# Output: true\n\n# Example 2:\n# Input: flowerbed = [1,0,0,0,1], n = 2\n# Output: false\n \n\n# Constraints:\n# 1 <= flowerbed.length <= 2 * 10^4\n# flowerbed[i] is 0 or 1.\n# There are no two adjacent flowers in flowerbed.\n# 0 <= n <= flowerbed.length\n\n\n# from typing import List\n# def canPlaceFlowers(flowerbed: List[int], n: int) -> bool:\n# \tif 0 not in flowerbed:\n# \t\treturn False\n# \ti = flowerbed.index(1)\n# \tn -= i // 2\n# \tcount = 0\n# \twhile i < len(flowerbed):\n# \t\tif flowerbed[i] == 0:\n# \t\t\tcount += 1\n# \t\telse:\n# \t\t\tn -= (max(0, count - 1)) // 2\n# \t\t\tcount = 0\n# \t\t\tif n <= 0:\n# \t\t\t\treturn True\n# \t\ti += 1\n# \tif count:\n# \t\tn -= count // 2\n# \treturn n <= 0\n\n\n\nfrom typing import List\ndef canPlaceFlowers(flowerbed: List[int], n: int) -> bool:\n\tflowerbed = [1, 0] + flowerbed + [0, 1]\n\tcount = 0\n\tfor plot in flowerbed:\n\t\tif plot == 0:\n\t\t\tcount += 1\n\t\telse:\n\t\t\tn -= (max(0, count - 1)) // 2\n\t\t\tif n <= 0:\n\t\t\t\treturn True\n\t\t\tcount = 0\n\treturn False\n\n\nassert(canPlaceFlowers([1,0,0,0,1], 1) == True)\nassert(canPlaceFlowers([1,0,0,0,1], 2) == False)\nassert(canPlaceFlowers([0,0,0,1,1,0,0], 2) == True)\nassert(canPlaceFlowers([1], 2) == False)\nassert(canPlaceFlowers([0,0,0,0,1,0,0,1,0,0,1], 3) == False)\n\n\n\n\n\n\n\n","repo_name":"candyer/leetcode","sub_path":"2020 December LeetCoding Challenge/05_canPlaceFlowers.py","file_name":"05_canPlaceFlowers.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"19445564954","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Data Creation: Day Types\n# \n# This notebook builds a function that adds the type of day for each calendar day.\n# \n# Including type of day as a short term predictor in energy load forecasts has been shown as a useful predictor [here](https://www.mdpi.com/1996-1073/12/1/164/pdf) and [here](https://www.mdpi.com/1996-1073/11/5/1120/pdf). The types of days (exogenous varaible) that are generated by this function are:\n# \n# - named day of the week\n# - weekend or weekday\n# - holiday or special event\n# \n\n# In[1]:\n\n\n#import relevant libraries\nfrom datetime import date\nimport pandas as pd\nimport holidays\n\n\ndef get_holidays(start='1/1/2018', stop='31/12/2018', country='ES', frequency='H'):\n \"\"\"\n Takes in a start and stop date and a country.\n \n Produces a dataframe with a daily date time index and columns:\n day_of_week - numerical day of the week identifier 0 for monday\n holiday_bool - boolean true or false for holiday\n holiday_name - name of the holiday if holiday_bool is true\n \n Returns a dataframe\n \"\"\"\n \n #generate the range of daily dates\n dates = pd.date_range(start=start, end=stop, freq=frequency)\n \n #create the holiday object\n country_holidays = holidays.CountryHoliday(country)\n\n #create a list for the holiday bool and name\n holiday_list = []\n \n #loop through the dates\n for date in dates:\n #true if holiday in object, false otherwise\n holiday_bool = date in country_holidays\n holiday_names = country_holidays.get(date)\n \n holiday_list.append([holiday_bool, holiday_names])\n \n #create return dataframe\n holidays_data = pd.DataFrame(holiday_list, index=dates, columns=['holiday_bool', 'holiday_name'])\n \n return holidays_data\n \n\n\n# In[17]:\n\n\ndef get_days_dummies(start='1/1/2018', stop='31/12/2018', frequency='H'):\n \"\"\"\n Takes in a start and stop date and frequency.\n \n Produces a dataframe with dummy values for the day of the week with columns mon - sun:\n weekday_id - numerical day of the week identifier 0 for monday\n \n Returns a dataframe\n \"\"\"\n \n #generate the range of daily dates\n dates = pd.date_range(start=start, end=stop, freq=frequency)\n \n #create a dataframe of weekday categories\n days = pd.DataFrame(list(dates.weekday), index=dates, columns=['weekday_id'])\n \n days = pd.get_dummies(days['weekday_id'])\n \n columns = ['mon', 'tue', 'wed', 'thur', 'fri', 'sat', 'sun']\n \n days.columns = columns\n \n return days\n\n\n\n\n","repo_name":"NaifAlqahtani/forcasting","sub_path":"prophet/create_day_types.py","file_name":"create_day_types.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37331899431","text":"# Author: Christian Olo\n# Program Description: A LOLCode interpreter developed using python\n\n# source code for the semantic analyzer\n\nfrom Symbol import Symbol # uses the symbol class to store valid symbol objects\nimport math\nimport re\n\nclass SymbolAnalyzer:\n def __init__(self, atnode_tree, stdin = []):\n self.atnode_tree = atnode_tree\n self.symbol_table = {'IT': Symbol('Noob', value = None)}\n self.stdin = stdin # pre-defined user inputs\n self.output = '' # stores the stdout\n self.err = '' \n self.line_number = 1\n\n # main function to start the analyzer\n def analyze(self):\n for node in self.atnode_tree.children_nodes:\n if node.type == 'Statement':\n self.analyzeStatement(node.children_nodes)\n self.line_number += 1\n elif node.type == 'Multiline Comment':\n self.line_number += len(node.children_nodes) - 1\n else:\n self.line_number += 1 \n \n return self.symbol_table, self.output\n \n # analyzes a statement\n def analyzeStatement(self, node):\n statement = node[0]\n if statement.type == 'Output Statement':\n self.visible(statement)\n elif statement.type == 'Assignment Statement':\n self.assignment(statement)\n elif statement.type == 'Typecast Statement':\n self.cast(statement)\n elif statement.type == 'Exprvar':\n self.expression(statement)\n elif statement.type == 'Input Statement':\n self.gimmeh(statement)\n elif statement.type == 'Loop Statement':\n self.loop(statement)\n elif statement.type == 'Switch Statement':\n self.switch(statement)\n elif statement.type == 'Declaration Statement':\n self.declaration(statement)\n elif statement.type == 'If-else Statement':\n return self.ifElse(statement)\n elif statement.type == 'Multiline Comment':\n self.line_number += len(statement.children_nodes)\n elif statement.type == 'Break Statement':\n return 1\n\n return 0\n\n # analyzes visible statement\n def visible(self, statement):\n printNodes = list(statement.children_nodes)[1:]\n toPrint = ''\n # iteratively checks the provided to be printed values for visible\n for i in printNodes:\n expression = i.children_nodes[0]\n expValue = self.getValue(expression.children_nodes[0])\n if expValue.type != 'Yarn Literal':\n expValue = self.strTypecast(expValue)\n toPrint += expValue.value\n self.output += toPrint + '\\n' # stores the result to later print to the GUI terminal\n\n # analyzes declaration statement\n def declaration(self, statement):\n variable = statement.children_nodes[1].value\n\n if len(statement.children_nodes) > 2: # value is initialized\n value = self.getValue(statement.children_nodes[3].children_nodes[0]) # gets the value of the expression\n self.symbol_table[variable] = Symbol(value.type, value.value)\n else:\n self.symbol_table[variable] = Symbol('Noob') # set as None\n \n # analyzes input statements (uses stdin)\n def gimmeh(self, statement):\n variable = statement.children_nodes[1].value\n self.lookup(variable) # checks if the variable is declared\n # uses the value provided in stdin\n if len(self.stdin) > 0:\n temp = self.stdin[0]\n self.stdin.pop(0)\n else:\n temp = ''\n # typecasts numeric value to float or int\n try:\n self.symbol_table[variable] = self.numTypecast(Symbol('Yarn Literal', temp))\n except:\n self.symbol_table[variable] = Symbol('Yarn Literal', temp)\n\n # analyzes assignment statement\n def assignment(self, statement):\n variable = statement.children_nodes[0].value\n self.lookup(variable)\n value = self.getValue(statement.children_nodes[2].children_nodes[0])\n self.symbol_table[variable] = value\n \n # analyzes expressions\n def expression(self, statement):\n value = self.getValue(statement.children_nodes[0])\n self.symbol_table['IT'] = value\n\n # analyzes typecase (IS NOW A) statement\n def cast(self, statement):\n variable = statement.children_nodes[0].value\n self.lookup(variable)\n value = self.symbol_table[variable]\n dataType = statement.children_nodes[2].value\n newValue = self.typecast(value, dataType)\n\n self.symbol_table[variable] = newValue\n\n # analyzes loop statement\n def loop(self, statement):\n operation = statement.children_nodes[2].value # uppin/nerfin\n variable = statement.children_nodes[4].value # variable to change\n self.lookup(variable)\n\n # typecasts the value to numerical types if not numbr or numbar\n if self.symbol_table[variable].type != 'Numbr Literal' and self.symbol_table[variable].type != 'Numbar Literal':\n self.symbol_table[variable] = self.numTypecast(self.symbol_table[variable])\n \n condition = False\n expression = None\n \n stIdx = 5\n\n # checks if there is a provided condition\n if statement.children_nodes[5].type == 'Condition Keyword':\n condition = statement.children_nodes[5].value\n expressionNode = statement.children_nodes[6]\n expression = self.evaluateExpression(expressionNode.children_nodes[0])\n stIdx = 7\n \n self.line_number += 1\n\n # code block\n statements = list(statement.children_nodes)[stIdx:]\n\n temp = self.symbol_table[variable].value\n stopExec = False\n\n tempLine = self.line_number\n init = True\n while True:\n # checks condition, breaks if condition is met\n if condition == 'TIL' or condition == 'WILE':\n expression = self.evaluateExpression(expressionNode.children_nodes[0])\n if expression.type != 'Troof Literal':\n expression = self.boolTypecast(expression)\n if ((condition == 'TIL' and expression.value == 'WIN') or\n (condition == 'WILE' and expression.value == 'FAIL')):\n break\n\n # analyzes statements within the loop\n for statement in statements:\n if statement.type == 'Loop End':\n break\n if not statement.children_nodes:\n self.line_number += 1 if init else 0\n continue\n if self.analyzeStatement(statement.children_nodes):\n stopExec = True\n self.line_number += 1 if init else 0\n break\n self.line_number += 1 if init else 0\n\n if stopExec:\n break\n if operation == 'UPPIN':\n temp += 1\n elif operation == 'NERFIN':\n temp -= 1\n self.symbol_table[variable] = Symbol(self.symbol_table[variable].type, temp)\n init = False\n self.line_number = tempLine + len(statements) - 2\n\n # analyzes switch statements\n def switch(self, statement):\n temp = self.symbol_table['IT'].value\n cases = list(statement.children_nodes)[1:]\n self.line_number += 1\n i = 0\n matched = False\n\n # analyzes the cases\n while cases[i].type == 'Case':\n omg = cases[i]\n value = self.getValue(omg.children_nodes[1])\n\n if value.value == temp:\n matched = True\n break\n self.line_number += len(omg.children_nodes) - 1\n i += 1\n \n # if a case is matched\n if matched:\n tempLine = self.line_number\n statements = list(cases[i].children_nodes)[2:]\n for statement in statements:\n if not statement.children_nodes:\n self.line_number += 1\n continue\n status = self.analyzeStatement(statement.children_nodes)\n self.line_number += 1\n if status == 1:\n break\n self.line_number = tempLine + len(cases[i].children_nodes) - 1\n i += 1\n while cases[i].type == 'Case' or cases[i].type == 'Default Case':\n self.line_number += len(cases[i].children_nodes) - 1 if cases[i].type == 'Case' else len(cases[i].children_nodes)\n i += 1\n elif cases[-2].type == 'Default Case': # default case\n tempLine = self.line_number\n statements = list(cases[-2].children_nodes)[1:]\n for statement in statements:\n if not statement.children_nodes:\n self.line_number += 1\n continue\n status = self.analyzeStatement(statement.children_nodes)\n self.line_number += 1\n if status == 1:\n break\n self.line_number = tempLine + len(cases[-2].children_nodes)\n\n # analyzes ifElse statements\n def ifElse(self, statement):\n temp = self.symbol_table['IT']\n \n if temp.type != 'Troof Literal':\n temp = self.boolTypecast(temp)\n \n self.line_number += 1\n\n status = 0\n\n # ya rly\n if temp.value == 'WIN':\n block = statement.children_nodes[1]\n self.line_number += 1\n\n statements = list(block.children_nodes)[1:]\n for s in statements:\n if not s.children_nodes:\n self.line_number += 1\n continue\n status = self.analyzeStatement(s.children_nodes)\n self.line_number += 1\n i = 2\n while statement.children_nodes[i].type == 'Else-if' or statement.children_nodes[i].type == 'Else':\n self.line_number += len(statement.children_nodes[i].children_nodes) - 1 if statement.children_nodes[i].type == 'Else-if' else len(statement.children_nodes[i].children_nodes)\n i += 1\n else:\n self.line_number += len(statement.children_nodes[1].children_nodes)\n i = 2\n matched = False\n # mebbe\n while statement.children_nodes[i].type == 'Else-if':\n conditionNode = statement.children_nodes[i].children_nodes[1]\n condition = self.getValue(conditionNode.children_nodes[0])\n\n if condition.type != 'Troof Literal':\n condition = self.boolTypecast(condition)\n\n if condition.value == 'WIN':\n self.line_number += 1\n matched = True\n block = statement.children_nodes[i]\n statements = list(block.children_nodes)[2:]\n for s in statements:\n if not s.children_nodes:\n self.line_number += 1\n continue\n status = self.analyzeStatement(s.children_nodes)\n self.line_number += 1\n j = i+1\n while statement.children_nodes[j].type == 'Else-if' or statement.children_nodes[j].type == 'Else':\n self.line_number += len(statement.children_nodes[j].children_nodes) - 1 if statement.children_nodes[j].type == 'Else-if' else len(statement.children_nodes[j].children_nodes)\n j += 1\n break\n else:\n self.line_number += len(statement.children_nodes[i].children_nodes) - 1\n i += 1\n if not matched:\n if statement.children_nodes[i].type == 'Else': # else\n block = statement.children_nodes[i]\n statements = list(block.children_nodes)[1:]\n for s in statements:\n if not s.children_nodes:\n self.line_number += 1\n continue\n status = self.analyzeStatement(s.children_nodes)\n self.line_number += 1\n return status\n # --------------------Getting Values of expresison/literal/variable-------------------\n # gets the value of an expression, literal, or variable, return a symbol\n def getValue(self, expression):\n expType = expression\n\n if expType.type == 'Literal':\n litType = expType.children_nodes[0]\n if litType.type == 'Yarn Literal':\n return Symbol('Yarn Literal', litType.children_nodes[0].value)\n elif litType.type == 'Numbr Literal':\n return Symbol(litType.type, int(litType.value))\n elif litType.type == 'Numbar Literal':\n return Symbol(litType.type, float(litType.value))\n else:\n return Symbol(litType.type, litType.value)\n elif expType.type == 'Variable Identifier' or expType.type == 'Implicit Variable':\n variable = expType.value\n self.lookup(variable)\n return self.symbol_table[variable]\n else: # expression\n return self.evaluateExpression(expType.children_nodes[0])\n \n # evaulates an expression\n def evaluateExpression(self, expression):\n if (expression.type == 'Addition' or \n expression.type == 'Subtraction' or\n expression.type == 'Multiplication' or\n expression.type == 'Division' or\n expression.type == 'Modulo' or\n expression.type == 'Min' or\n expression.type == 'Max'):\n return self.arithmetic(expression)\n elif (expression.type == 'And' or\n expression.type == 'Or' or\n expression.type == 'Xor' or\n expression.type == 'Not'):\n return self.boolean(expression)\n elif (expression.type == 'Infinite And' or\n expression.type == 'Infinite Or'):\n return self.infBoolean(expression)\n elif (expression.type == 'Equality Check' or\n expression.type == 'Inequality Check'):\n return self.comparison(expression)\n elif (expression.type == 'Concatenate'):\n return self.smoosh(expression)\n elif (expression.type == 'Maek'):\n return self.maek(expression)\n \n # evaulates arithmetic operation\n def arithmetic(self, expression):\n operands = expression.children_nodes\n op1Exp = self.getValue(operands[1].children_nodes[0])\n op2Exp = self.getValue(operands[2].children_nodes[0])\n\n # typcasts operands if not numeric type\n if op1Exp.type != 'Numbr Literal' and op1Exp.type != 'Numbar Literal':\n temp = self.numTypecast(op1Exp)\n op1 = temp.value\n else:\n op1 = op1Exp.value\n if op2Exp.type != 'Numbr Literal' and op2Exp.type != 'Numbar Literal':\n temp = self.numTypecast(op2Exp)\n op2 = temp.value\n else:\n op2 = op2Exp.value\n\n if expression.type == 'Addition':\n ans = op1 + op2\n elif expression.type == 'Subtraction':\n ans = op1 - op2\n elif expression.type == 'Multiplication':\n ans = op1 * op2\n elif expression.type == 'Division':\n if op1Exp.type == 'Numbr Literal' and op2Exp.type == 'Numbr Literal':\n ans = op1 // op2\n else:\n ans = op1 / op2\n elif expression.type == 'Modulo':\n ans = op1 % op2\n elif expression.type == 'Max':\n ans = max(op1, op2)\n elif expression.type == 'Min':\n ans = min(op1, op2)\n\n if isinstance(ans, int):\n return Symbol('Numbr Literal', ans)\n else:\n return Symbol('Numbar Literal', ans)\n\n # evaulates boolean expressions\n def boolean(self, expression):\n operands = expression.children_nodes\n op1Exp = self.getValue(operands[1].children_nodes[0])\n\n # typecasting if necessary\n if op1Exp.type == 'Troof Literal':\n if op1Exp.value == 'WIN':\n op1 = True\n else:\n op1 = False\n else:\n temp = self.boolTypecast(op1Exp)\n if temp.value == 'WIN':\n op1 = True\n else:\n op1 = False\n if expression.type != 'Not':\n op2Exp = self.getValue(operands[2].children_nodes[0])\n if op2Exp.type == 'Troof Literal':\n if op2Exp.value == 'WIN':\n op2 = True\n else:\n op2 = False\n else:\n temp = self.boolTypecast(op2Exp)\n if temp.value == 'WIN':\n op2 = True\n else:\n op2 = False\n \n if expression.type == 'And':\n ans = op1 and op2\n elif expression.type == 'Or':\n ans = op1 or op2\n elif expression.type == 'Not':\n ans = not op1\n elif expression.type == 'Xor':\n ans = op1 ^ op2\n \n if ans == True:\n return Symbol('Troof Literal', 'WIN')\n else:\n return Symbol('Troof Literal', 'FAIL')\n\n # evaluate all of an any of\n def infBoolean(self, expression):\n operations = list(expression.children_nodes)[2:]\n\n res = self.getValue(expression.children_nodes[1].children_nodes[0])\n if res.type != 'Troof Literal':\n res = self.boolTypecast(res)\n if res.value == 'WIN':\n ans = True\n else:\n ans = False\n for op in operations:\n res = self.getValue(op.children_nodes[0])\n if res.type != 'Troof Literal':\n res = self.boolTypecast(res)\n if res.value == 'WIN':\n temp = True\n else:\n temp = False\n if expression.type == 'Infinite And':\n ans = ans and temp\n else:\n ans = ans or temp\n \n if ans == True:\n return Symbol('Troof Literal', 'WIN')\n else:\n return Symbol('Troof Literal', 'FAIL')\n\n # evaluates smoosh operation\n def smoosh(self, expression):\n operations = list(expression.children_nodes)[1:]\n\n toConcat = ''\n for op in operations:\n temp = self.getValue(op.children_nodes[0])\n if temp.type != 'Yarn Literal':\n temp = self.strTypecast(temp)\n toConcat += temp.value\n \n return Symbol('Yarn Literal', value = toConcat)\n\n # analyze comparison expressions\n def comparison(self, expression):\n operands = expression.children_nodes\n op1Exp = self.getValue(operands[1].children_nodes[0])\n op2Exp = self.getValue(operands[2].children_nodes[0])\n\n if op1Exp.type == 'Troof Literal':\n op1 = True\n else:\n op1 = op1Exp.value\n \n if op2Exp.type == 'Troof Literal':\n op2 = True\n else:\n op2 = op2Exp.value\n\n if expression.type == 'Equality Check':\n ans = op1 == op2\n elif expression.type == 'Inequality Check':\n ans = op1 != op2\n\n if ans == True:\n return Symbol('Troof Literal', 'WIN')\n else:\n return Symbol('Troof Literal', 'FAIL')\n \n # evaluates maek expression (explicit typecasting)\n def maek(self, expression):\n value = self.getValue(expression.children_nodes[1].children_nodes[0])\n dataType = expression.children_nodes[2].value\n return self.typecast(value, dataType)\n # --------------------Getting Values of expresison/literal/variable-------------------\n\n # --------------------Implicit typecasting-------------------\n # typecast a value to boolean\n def boolTypecast(self, expression):\n if expression.type == 'Yarn Literal':\n if expression.value == '':\n return Symbol('Troof Literal', 'FAIL')\n else:\n return Symbol('Troof Literal', 'WIN')\n elif expression.type == 'Numbr Literal' or expression.type == 'Numbar Literal':\n if float(expression.value) == 0:\n return Symbol('Troof Literal', 'FAIL')\n else:\n return Symbol('Troof Literal', 'WIN')\n elif expression.type == 'Noob':\n return Symbol('Troof Literal', 'FAIL')\n else:\n self.err = f\"Semantic Error:{self.line_number}: {expression.value} cannot be typecasted to TROOF\"\n raise Exception()\n \n # typecasts a value to numerical type\n def numTypecast(self, expression):\n if expression.type == 'Yarn Literal':\n if re.match(r\"-?[0-9]+\\.[0-9]+$\", expression.value):\n return Symbol('Numbar Literal', float(expression.value))\n elif re.match(r\"-?[0-9]+$\", expression.value):\n return Symbol('Numbr Literal', int(expression.value))\n else:\n self.err = f\"Semantic Error:{self.line_number}: {expression.value} cannot be typecasted to numerical data type\"\n raise Exception()\n elif expression.type == 'Troof Literal':\n if expression.value == 'WIN':\n return Symbol('Numbr Literal', 1)\n else:\n return Symbol('Numbr Literal', 0)\n else:\n self.err = f\"Semantic Error:{self.line_number}: {expression.value} cannot be typecasted to numerical data type\"\n raise Exception()\n\n # typecasts the value to a string (yarn)\n def strTypecast(self, expression):\n if expression.type == 'Numbr Literal':\n return Symbol('Yarn Literal', str(expression.value))\n elif expression.type == 'Numbar Literal':\n return Symbol('Yarn Literal', str(math.floor(expression.value*100)/100))\n elif expression.type == 'Troof Literal':\n return Symbol('Yarn Literal', expression.value)\n else:\n self.err = f\"Semantic Error:{self.line_number}: {expression.value} cannot be typecasted to YARN\"\n raise Exception()\n # --------------------Implicit typecasting-------------------\n\n # --------------------Explicit typecasting-------------------\n # symbol is the to be typecasted and data type is the target data type\n def typecast(self, symbol, dataType):\n if symbol.type == 'Noob':\n if dataType == 'NUMBR':\n return Symbol('Numbr Literal', 0)\n elif dataType == 'NUMBAR':\n return Symbol('Numbar Literal', 0.0)\n elif dataType == 'YARN':\n return Symbol('Yarn Literal', '')\n elif dataType == 'TROOF':\n return Symbol('Troof Literal', 'FAIL')\n elif dataType == 'NOOB':\n return symbol\n elif symbol.type == 'Troof Literal':\n if dataType == 'NUMBR':\n temp = 1 if symbol.value == 'WIN' else 0\n return Symbol('Numbr Literal', temp)\n elif dataType == 'NUMBAR':\n temp = 1.0 if symbol.value == 'WIN' else 0.0\n return Symbol('Numbar Literal', temp)\n elif dataType == 'YARN':\n return Symbol('Yarn Literal', symbol.value)\n elif dataType == 'TROOF':\n return symbol\n else:\n self.err = f\"Semantic Error:{self.line_number}: {symbol.value} cannot be typecasted to {dataType}\"\n raise Exception()\n elif symbol.type == 'Numbar Literal':\n if dataType == 'NUMBR':\n return Symbol('Numbr Literal', int(symbol.value))\n elif dataType == 'YARN':\n return Symbol('Yarn Literal', str(math.floor(symbol.value*100)/100))\n elif dataType == 'TROOF':\n temp = 'WIN' if symbol.value != 0 else 'FAIL'\n return Symbol('Troof Literal', temp)\n elif dataType == 'NUMBAR':\n return symbol\n else:\n self.err = f\"Semantic Error:{self.line_number}: {symbol.value} cannot be typecasted to {dataType}\"\n raise Exception()\n elif symbol.type == 'Numbr Literal':\n if dataType == 'NUMBAR':\n return Symbol('Numbar Literal', float(symbol.value))\n elif dataType == 'YARN':\n return Symbol('Yarn Literal', str(symbol.value))\n elif dataType == 'TROOF':\n temp = 'WIN' if symbol.value != 0 else 'FAIL'\n return Symbol('Troof Literal', temp)\n elif dataType == 'NUMBR':\n return symbol\n else:\n self.err = f\"Semantic Error:{self.line_number}: {symbol.value} cannot be typecasted to {dataType}\"\n raise Exception()\n elif symbol.type == 'Yarn Literal':\n if dataType == 'NUMBAR':\n if re.match(r\"-?[0-9]+\\.[0-9]+$\", symbol.value) or re.match(r\"-?[0-9]+$\", symbol.value):\n return Symbol('Numbar Literal', float(symbol.value))\n else:\n self.err = f\"Semantic Error:{self.line_number}: {symbol.value} cannot be typecasted to {dataType}\"\n raise Exception()\n elif dataType == 'NUMBR':\n if re.match(r\"-?[0-9]+$\", symbol.value):\n return Symbol('Numbr Literal', int(symbol.value))\n else:\n self.err = f\"Semantic Error:{self.line_number}: {symbol.value} cannot be typecasted to {dataType}\"\n raise Exception()\n elif dataType == 'YARN':\n return symbol\n elif dataType == 'TROOF':\n if symbol.value == '':\n return Symbol('Troof Literal', 'FAIL')\n else:\n return Symbol('Troof Literal', 'WIN')\n else:\n self.err = f\"Semantic Error:{self.line_number}: {symbol.value} cannot be typecasted to {dataType}\"\n raise Exception()\n # --------------------Explicit typecasting-------------------\n\n # --------------------Utils-------------------\n # checks if a variable is declared\n def lookup(self, key):\n if key in self.symbol_table.keys():\n return\n \n self.err = f\"Semantic Error:{self.line_number}: Variable \\'{key}\\' not declared\"\n raise Exception()\n # --------------------Utils-------------------","repo_name":"ChristianLois/CMSC124-LOLCODE-INTERPRETER","sub_path":"SymbolAnalyzer.py","file_name":"SymbolAnalyzer.py","file_ext":"py","file_size_in_byte":26776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7454190480","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils import weight_norm\nimport torch.nn.functional as F\nfrom lib.ResidualModule import ResidualModule\n\n\nclass Writer(nn.Module):\n def __init__(self, indim, hiddim, outdim, ds_times, normalize, nlayers=4):\n super(Writer, self).__init__()\n\n self.ds_times = ds_times\n\n if normalize == 'gate':\n ifgate = True\n else:\n ifgate = False\n\n self.decoder = ResidualModule(modeltype='decoder', indim=indim, hiddim=hiddim, outdim=hiddim,\n nres=self.ds_times, nlayers=nlayers, ifgate=ifgate, normalize=normalize)\n\n self.out_conv = nn.Conv2d(hiddim, outdim, 3, 1, 1)\n\n def forward(self, x):\n out = self.decoder(x)\n out = self.out_conv(out)\n\n return out\n","repo_name":"Lucas2012/ProbabilisticNeuralProgrammedNetwork","sub_path":"lib/modules/ResWriter.py","file_name":"ResWriter.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"57"} +{"seq_id":"43113874200","text":"from aiogram import types, Dispatcher\nfrom state import UserState\nfrom aiogram.dispatcher import FSMContext\nimport kb\n\nasync def s_botom_handler(callback: types.CallbackQuery):\n await callback.message.answer('Как эта карта отвечает на твой вопрос?\\n\\nДля более глубоко понимания и анализа отправь ответ в чат. Этот чат конфиденциальный, никто (включая меня) не увидит твои ответы',\n reply_markup=kb.zhelanie)\n await UserState.question1.set()\n\nasync def reply(msg:types.message, state:FSMContext):\n await state.update_data(question1 = msg.text)\n await msg.answer('Что ты видишь на карте?\\n\\nОпиши подробно что и кто на ней изображены? Какая ситуация проигрывается?',\n reply_markup=kb.zhelanie)\n await UserState.question2.set()\n\nasync def reply2(msg:types.message, state:FSMContext):\n await state.update_data(question2 = msg.text)\n await msg.answer('Что, на твой взгляд, чувствуют и переживают персонаж(и)?',\n reply_markup=kb.zhelanie)\n await UserState.question3.set()\n\nasync def reply3(msg:types.message, state:FSMContext):\n await state.update_data(question3 = msg.text)\n await msg.answer('Ты есть на этой карте? Если есть, то где? Если нет, то какова твоя роль в этой композиции?',\n reply_markup=kb.zhelanie)\n await UserState.question4.set()\n\nasync def reply4(msg:types.message, state:FSMContext):\n await state.update_data(question4 = msg.text)\n await msg.answer('Что ты чувствуешь, смотря на эту карту? Что тебе хочется сделать?',\n reply_markup=kb.zhelanie)\n await UserState.question5.set()\n\nasync def reply5(msg:types.message, state:FSMContext):\n await state.update_data(question5 = msg.text)\n await msg.answer('Как эта карта отвечает на твой основной вопрос?',\n reply_markup=kb.zhelanie)\n await UserState.question6.set()\n\ndef register_handler_help_with_card_description(dp:Dispatcher):\n dp.register_callback_query_handler(s_botom_handler, text='С ботом2', state=UserState.questi)\n dp.register_message_handler(reply, state=UserState.question1)\n dp.register_message_handler(reply2, state=UserState.question2)\n dp.register_message_handler(reply3, state=UserState.question3)\n dp.register_message_handler(reply4, state=UserState.question4)\n dp.register_message_handler(reply5, state=UserState.question5)","repo_name":"NLJS1509/Metaphora","sub_path":"handlers/help_with_card_description.py","file_name":"help_with_card_description.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"74134293297","text":"def is_valid_iterable(iterable):\n try:\n iter(iterable)\n return True\n except TypeError:\n return False\n\ndef ft_filter(function_to_apply, iterable):\n \"\"\"Filter the result of function apply to all elements of the iterable.\n Args:\n function_to_apply: a function taking an iterable.\n iterable: an iterable object (list, tuple, iterator).\n Return:\n An iterable.\n None if the iterable can not be used by the function.\n \"\"\"\n # ... Your code here ...\n if not is_valid_iterable(iterable):\n print(\"The object is not iterable\")\n return None\n return [item for item in iterable if function_to_apply(item)]\n\nx = [1, 2, 3, 4, 5]\n# Output:\nprint(list(ft_filter(lambda dum: not (dum % 2), x)))\nft_filter(lambda y: y + 2, 42)","repo_name":"armendes42/Piscine-Python-ML","sub_path":"PythonModule02/ex00/ft_filter.py","file_name":"ft_filter.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"42666416984","text":"\"\"\"\nSynthetic imaging / simulated observing of the NGC 1333/Perseus Herschel map at\n250um (selected for resolution matching) observed at the distance and\nsensitivity of W51\n\"\"\"\nfrom astropy.io import fits\nfrom astropy import units as u\nfrom astropy import constants\nimport paths\nimport requests\nimport os\nimport numpy as np\nimport dust_emissivity\n\ndperseus = 140.\ndw51 = 5410.\ndistance_scaling = dperseus/dw51\nfreq_alma = 226.6*u.GHz\nwave_alma = constants.c/(freq_alma)\nwave_herschel = 250*u.um\nfreq_herschel = constants.c/wave_herschel\nnu = u.Quantity([freq_herschel, freq_alma])\nflux = dust_emissivity.blackbody.modified_blackbody(nu, temperature=20*u.K, beta=1.5)\nalpha = np.log(flux[0]/flux[1])/np.log(nu[0]/nu[1])\nflux_scaling = (wave_herschel/wave_alma).decompose().value**alpha\n\n# Herschel 250um has ~18\" beam\n#in_bm = (2*np.pi*(18.*u.arcsec/206265./2.35)**2)\n# but we want to keep constant MJy/sr (surface brightness) out to a larger\n# distance, then convert that to Jy/(ALMA beam)\n# use 18.2\" from Konyves 2015\nout_bm = (2*np.pi*((18.2*distance_scaling)*u.arcsec/2.35)**2)\n#MJySr_to_JyBm = (1*u.MJy/u.sr).to(u.Jy/in_bm).value\nMJySr_to_JyBm = (1*u.MJy/u.sr).to(u.Jy/out_bm).value\n\nim_perseus = paths.dpath('perseus04-250.fits.gz')\nperseus_rescaled = paths.dpath('perseus04-250-rescaled.fits')\n\nif not os.path.exists(im_perseus):\n result = requests.get('http://www.herschel.fr/cea/gouldbelt/en/archives/perseus04/perseus04-250.fits.gz')\n with open(im_perseus,'w') as f:\n f.write(result.content)\n\n# scale data to 1100um\nffile = fits.open(im_perseus)\nffile[0].header['CRVAL1'] = 290.92402\nffile[0].header['CRPIX1'] = 1100\nffile[0].header['CUNIT1'] = 'deg'\nffile[0].header['CDELT1'] = ffile[0].header['CDELT1'] * distance_scaling\nffile[0].header['CTYPE1'] = 'RA---TAN'\nffile[0].header['CRVAL2'] = 14.512736\nffile[0].header['CRPIX2'] = 1553\nffile[0].header['CUNIT2'] = 'deg'\nffile[0].header['CDELT2'] = ffile[0].header['CDELT2'] * distance_scaling\nffile[0].header['CTYPE2'] = 'DEC--TAN'\nffile[0].header['CRPIX3'] = 1\nffile[0].header['CRVAL3'] = 2.33946806e+11\nffile[0].header['CUNIT3'] = 'Hz'\nffile[0].header['CDELT3'] = 1e9\nffile[0].header['CTYPE3'] = 'FREQ'\nffile[0].header['CRPIX4'] = 1\nffile[0].header['CRVAL4'] = 1\nffile[0].header['CUNIT4'] = ''\nffile[0].header['CDELT4'] = 0\nffile[0].header['CTYPE4'] = 'STOKES'\nffile[0].header['RESTFRQ'] = 2.33946806e+11\nffile[0].header['BUNIT'] = 'Jy/beam'\nffile[0].data *= flux_scaling * MJySr_to_JyBm\n#ffile[0].data = np.expand_dims(ffile[0].data, axis=0)\nffile.writeto(perseus_rescaled, clobber=True)\nhdr = ffile[0].header\n\nperseus_casa_image = 'perseus_250_to_w51.image'\n# doesn't always work: unreliable = don't use.\n# rmresult = rmtables([perseus_casa_image])\nos.system('rm -rf {0}'.format(perseus_casa_image))\nimportfits(fitsimage=perseus_rescaled,\n imagename=perseus_casa_image,\n overwrite=True,\n defaultaxes=True,#['RA---TAN','DEC--TAN','FREQUENCY','STOKES'],\n defaultaxesvalues=['2.909234541667E+02deg',\n '1.451177222222E+01deg',\n '233.9468GHz','I'],\n # 18\" = 1.22 lambda/D\n beam=[\"{0}deg\".format(18/3600.*distance_scaling),\n \"{0}deg\".format(18/3600.*distance_scaling),\n \"0deg\"],\n )\nprint(\"FITS CDELT1={0}, CDELT2={1}\".format(hdr['CDELT1'], hdr['CDELT2']))\nprint(\"image CDELT1={0[value]}{0[unit]}, CDELT2={1[value]}{1[unit]}\"\n .format(imhead(imagename=perseus_casa_image, mode='get', hdkey='CDELT1'),\n imhead(imagename=perseus_casa_image, mode='get',\n hdkey='CDELT2'),)\n )\n\n#imhead(perseus_casa_image, mode='put', hdkey='CDELT1', hdvalue={'value':hdr['CDELT1'], 'unit':'deg'})\n#imhead(perseus_casa_image, mode='put', hdkey='CDELT2', hdvalue={'value':hdr['CDELT2'], 'unit':'deg'})\n#imhead(perseus_casa_image, mode='put', hdkey='CRVAL1', hdvalue={'value':hdr['CRVAL1'], 'unit':'deg'})\n#imhead(perseus_casa_image, mode='put', hdkey='CRVAL2', hdvalue={'value':hdr['CRVAL2'], 'unit':'deg'})\n#imhead(perseus_casa_image, mode='put', hdkey='CRPIX1', hdvalue=hdr['CRPIX1'])\n#imhead(perseus_casa_image, mode='put', hdkey='CRPIX2', hdvalue=hdr['CRPIX2'])\n#imhead(perseus_casa_image, mode='put', hdkey='CTYPE3', hdvalue='FREQ')\n# can convert units and use this\n#imhead(perseus_casa_image, mode='put', hdkey='BUNIT', hdvalue='Jy/beam')\n#imhead(perseus_casa_image, mode='put', hdkey='BUNIT', hdvalue='Jy/pixel') #WRONG!!!\nexportfits(perseus_casa_image, perseus_casa_image+\".fits\",\n overwrite=True)\nhdr = fits.getheader(perseus_casa_image+\".fits\")\nprint(\"CDELT1={0}, CDELT2={1}\".format(hdr['CDELT1'], hdr['CDELT2']))\n\nos.system('rm -rf {0}'.format(perseus_casa_image))\n# try re-importing (this definitely should not fix any outstanding issues)\nimportfits(fitsimage=perseus_rescaled,\n imagename=perseus_casa_image,\n overwrite=True,\n # The beam is OBVIOUSLY AND CORRECTLY in the header.\n # but if you leave this out, CASA complains loudly\n beam=[\"{0}deg\".format(18/3600.*distance_scaling),\n \"{0}deg\".format(18/3600.*distance_scaling),\n \"0deg\"],\n )\nia.open(perseus_casa_image)\nprint(\"BUNIT: \",ia.summary()['unit'])\nia.close()\n\n#sm.openfromms(\"w51_contvis_selfcal_0.ms\")\nsm.openfromms(\"continuum_7m12m_noflag.ms\")\n#sm.openfromms(\"w51_test_onechan.ms\")\nsm.setvp()\nsuccess = sm.predict(perseus_casa_image)\nassert success\n# TODO: get these from ASDM_CALWVR and WEATHER\nsuccess2 = sm.setnoise(mode='tsys-atm', relhum=60.0, pwv='2mm', tatmos=265.0, )\nsuccess3 = sm.corrupt()\nsm.done()\nsm.close()\n\n# problem:\n# plotms(vis='continuum_7m12m_noflag.ms', xaxis='uvdist', ydatacolumn='model')\n\nos.system('rm -rf perseus_250_model.ms')\nassert split(vis=\"continuum_7m12m_noflag.ms\", outputvis=\"perseus_250_model.ms\",\n datacolumn='data')\n\nphasecenter = 'J2000 19h23m41.580 +14d30m41.37'\n\ndelmod(vis='perseus_250_model.ms')\nos.system('rm -rf perseus_250_model_tclean_dirty*')\ntclean(vis='perseus_250_model.ms',\n imagename='perseus_250_model_tclean_dirty',\n field='',\n spw='',\n specmode='mfs',\n deconvolver='clark',\n imsize = [1536,1536],\n cell= '0.1arcsec',\n weighting = 'uniform',\n phasecenter=phasecenter,\n #scales=[0,3,9,27,81],\n robust = -2.0,\n niter = 0,\n threshold = '1.0mJy',\n interactive = False,\n gridder = 'mosaic',\n savemodel='none',\n )\nexportfits(imagename='perseus_250_model_tclean_dirty.residual', fitsimage='perseus_250_model_tclean_dirty.image.fits', dropdeg=True, overwrite=True)\n\n# dirtyimage = 'perseus_250_model_tclean_dirty.residual'\n# assert ia.open(dirtyimage)\n# assert ia.calcmask(mask=dirtyimage+\" > 0.1\", name='dirty_mask_100mJy')\n# assert ia.close()\n# makemask(mode='copy', inpimage=dirtyimage,\n# inpmask=dirtyimage+\":dirty_mask_100mJy\", output='dirty_100mJy.mask',\n# overwrite=True)\n# exportfits('dirty_100mJy.mask', 'dirty_100mJy.mask.fits', dropdeg=True, overwrite=True)\n# \n# os.system('rm -rf perseus_250_model_tclean_clean_masked*')\n# tclean(vis='perseus_250_model.ms',\n# imagename='perseus_250_model_tclean_clean_masked',\n# field='',\n# spw='',\n# specmode='mfs',\n# deconvolver='clark',\n# imsize = [1536,1536],\n# cell= '0.1arcsec',\n# weighting = 'uniform',\n# phasecenter=phasecenter,\n# #scales=[0,3,9,27,81],\n# robust = -2.0,\n# niter = 50000,\n# threshold = '7.0mJy',\n# interactive = False,\n# gridder = 'mosaic',\n# savemodel='none',\n# mask='dirty_100mJy.mask',\n# )\n# exportfits(imagename='perseus_250_model_tclean_clean.image', fitsimage='perseus_250_model_tclean_clean.image.fits', dropdeg=True, overwrite=True)\n# exportfits(imagename='perseus_250_model_tclean_clean.model', fitsimage='perseus_250_model_tclean_clean.model.fits', dropdeg=True, overwrite=True)\n\n\nos.system('rm -rf perseus_250_model_tclean_clean*')\ntclean(vis='perseus_250_model.ms',\n imagename='perseus_250_model_tclean_clean',\n field='',\n spw='',\n specmode='mfs',\n deconvolver='clark',\n imsize = [1536,1536],\n cell= '0.1arcsec',\n weighting = 'uniform',\n phasecenter=phasecenter,\n #scales=[0,3,9,27,81],\n robust = -2.0,\n niter = 50000,\n threshold = '0.5mJy',\n interactive = False,\n gridder = 'mosaic',\n savemodel='none',\n )\nexportfits(imagename='perseus_250_model_tclean_clean.image', fitsimage='perseus_250_model_tclean_clean.image.fits', dropdeg=True, overwrite=True)\nexportfits(imagename='perseus_250_model_tclean_clean.model', fitsimage='perseus_250_model_tclean_clean.model.fits', dropdeg=True, overwrite=True)\n\nos.system('rm -rf perseus_250_model_tclean_msclean*')\ntclean(vis='perseus_250_model.ms',\n imagename='perseus_250_model_tclean_msclean',\n field='',\n spw='',\n specmode='mfs',\n deconvolver='multiscale',\n imsize = [1536,1536],\n cell= '0.1arcsec',\n weighting = 'uniform',\n phasecenter=phasecenter,\n scales=[0,3,9,27],\n robust = -2.0,\n niter = 50000,\n threshold = '0.5mJy',\n interactive = False,\n gridder = 'mosaic',\n savemodel='none',\n )\nexportfits(imagename='perseus_250_model_tclean_msclean.image', fitsimage='perseus_250_model_tclean_msclean.image.fits', dropdeg=True, overwrite=True)\nexportfits(imagename='perseus_250_model_tclean_msclean.model', fitsimage='perseus_250_model_tclean_msclean.model.fits', dropdeg=True, overwrite=True)\n","repo_name":"keflavich/W51_ALMA_2013.1.00308.S","sub_path":"analysis/synth_imaging_perseus.py","file_name":"synth_imaging_perseus.py","file_ext":"py","file_size_in_byte":9652,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"30190180713","text":"import turtle\nturtle.tracer(0,0) # accélération du tracé\nturtle.screensize(2000,2000) # taille fenêtre graphique\nturtle.pu()\nturtle.goto(-500,0)\nturtle.pd()\n\ndef dessiner(courbe, longueur, angle): \n \"\"\" réalise une représentation graphique d'une courbe donnée par des chaines de caractères \"\"\"\n for caractere in courbe:\n if caractere == '+': turtle.left(angle)\n elif caractere == '-': turtle.right(angle)\n elif caractere in ['F', 'G']: turtle.forward(longueur)\n\n\n#dessiner('F', 50, 60)\n\ndef regleKoch(chaine):\n nouvelleChaine = '' # on crée une nouvelle chaine de caractères VIDE\n for lettre in chaine: # on épelle la chaine de caractères donnée en paramètres\n if lettre == 'F': # si dans l'ancienne chaine, il y a un 'F'\n nouvelleChaine = nouvelleChaine + 'F+F--F+F' # alors, on écrit F+F--F+F dans la nouvelle chaine \n else :\n nouvelleChaine = nouvelleChaine + lettre # sinon, on reporte la lettre telle quelle\n return nouvelleChaine\n\n\ndef courbeKoch(motifInitial, niter):\n \"\"\" \n appelle niter fois regleKoch pour créer la courbe de Koch\n \"\"\"\n courbe = motifInitial # on part du motif initial donné par l'utilisateur en paramètres\n for i in range(niter):\n nouveauMotif = regleKoch(courbe) # on trouve le nouveau Motif à partir du motif de départ\n courbe = nouveauMotif # on dit que le nouveau Motif est maintenant le motif de départ\n return courbe\n\n\n\n#courbe = courbeKoch('F',3)\n#dessiner(courbe,50, 60)\n\ndef flocon(motifInitial, niter):\n courbe = courbeKoch(motifInitial, niter)\n flocon = ''\n for _ in range(3):\n flocon += courbe\n flocon += '--' \n return flocon\n\nlongueur = 2\nangle = 60\nniter = 6\ndessiner(courbeKoch('F', niter), longueur, angle)\n\n\nturtle.update() # accélération du tracé\nturtle.exitonclick() # permet la fermeture de la fenêtre graphique","repo_name":"bouillotvincent/bouillotvincent.github.io","sub_path":"files/N1G/C03/TP_Note1_Corrige.py","file_name":"TP_Note1_Corrige.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"fr","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"33314481208","text":"__author__ = 'Michele Johnson'\r\n\r\n# Program Requirements\r\n# Write a program to do the following. Ask the user to enter two numbers. Compare them and display the larger one\r\n# and the smaller one. Define and use the following functions:\r\n#\r\n# get_numbers: Ask user to enter two numbers. Use a return statement to return them.\r\n#\r\n# large_small: Compare two numbers. Use a return statement to return the larger number and the smaller number\r\n#\r\n# Also write a main function to implement the mainline logic. Call the get_numbers function to get two numbers.\r\n# Then call the large_small function and pass the two numbers as arguments. Display the larger and the smaller\r\n# numbers in main.\r\n\r\n# Introduce Program\r\nprint(\"COMPARE NUMBERS\\n\")\r\nprint(\"This program will: \"\r\n \"\\n\\tAsk for two numbers, \"\r\n \"\\n\\tCompare them, and \"\r\n \"\\n\\tReport which is the largest and the smallest number.\\n\")\r\n\r\n\r\ndef main():\r\n # Main function for COMPARE NUMBERS program\r\n # Function will receive two numbers, and display if which is the larger and the smaller number, or\r\n # Will display if the numbers are equal\r\n num1, num2 = get_numbers()\r\n print()\r\n if num1 == num2:\r\n print(\"The numbers are equal.\")\r\n else:\r\n larger, smaller = compare_numbers(num1, num2)\r\n print(larger, \"is the larger number.\", )\r\n print(smaller, \"is the smaller number.\")\r\n\r\n\r\ndef get_numbers():\r\n # Function to obtain two numbers\r\n # Will return numbers obtained\r\n def valid_entry_num1():\r\n # Function to obtain valid entry per program requirements\r\n # Will ask user for input per program requirements\r\n # Will return an error message if entry is invalid\r\n\r\n # Initiate test variable to false\r\n is_entry_valid = False\r\n # Initiate entry variable to match correct variable type and value: number; 0.0\r\n entry = 0.0\r\n while is_entry_valid is False:\r\n try:\r\n entry = float(input(\"Enter the first number:\\t\\t\"))\r\n if entry >= 0:\r\n is_entry_valid = True\r\n else:\r\n print(\"Invalid Entry\\n\")\r\n except ValueError:\r\n pass\r\n print(\"\\nInvalid Entry\\n\")\r\n return entry\r\n\r\n def valid_entry_num2():\r\n # Function to obtain valid entry per program requirements\r\n # Will ask user for input per program requirements\r\n # Will return an error message if entry is invalid\r\n\r\n # Initiate test variable to false\r\n is_entry_valid = False\r\n # Initiate entry variable to match correct variable type and value: number; 0.0\r\n entry = 0.0\r\n while is_entry_valid is False:\r\n try:\r\n entry = float(input(\"Enter the second number:\\t\"))\r\n if entry >= 0:\r\n is_entry_valid = True\r\n else:\r\n print(\"Invalid Entry\\n\")\r\n except ValueError:\r\n pass\r\n print(\"\\nInvalid Entry\\n\")\r\n return entry\r\n\r\n entry1 = valid_entry_num1()\r\n entry2 = valid_entry_num2()\r\n return entry1, entry2\r\n\r\n\r\ndef compare_numbers(a_num1, a_num2):\r\n # Function to compare two numbers\r\n # Will return the largest first and smallest second\r\n if a_num1 > a_num2:\r\n a_large = a_num1\r\n a_small = a_num2\r\n else:\r\n a_large = a_num2\r\n a_small = a_num1\r\n return a_large, a_small\r\n\r\n\r\nmain()\r\n","repo_name":"mischelay2001/WTCSC121","sub_path":"CSC121Lab01_Lab08_Misc/CSC121Lab8Problem4_LargerOrSmaller.py","file_name":"CSC121Lab8Problem4_LargerOrSmaller.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"72811036658","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 30 09:31:13 2021\r\n\r\n@author: Barun\r\n\"\"\"\r\nfrom WeatherSensor import UMBError, WS_UMB\r\n\r\nfrom PyQt5.uic import loadUi\r\nfrom PyQt5 import QtWidgets, QtCore\r\nfrom PyQt5.QtWidgets import QDialog, QApplication, QWidget\r\n\r\nimport time, struct, sys\r\nimport serial.tools.list_ports\r\n\r\n#temp[degC], rel. humidity[%], rel. air pressure[hpa], wind speed[m/s], wind direction[deg]\r\n#MinMax = ['120', '140', '220', '240', '325', '345', '420', '440', '520', '540']\r\nChannels = ['100', '200', '305', '400', '500']\r\n\r\n\r\nclass WelcomeScreen(QDialog):\r\n def __init__(self):\r\n super(WelcomeScreen, self).__init__()\r\n loadUi(\"main.ui\", self)\r\n self.pushButton.clicked.connect(self.port_select)\r\n \r\n self.pushButton_2.clicked.connect(self.savedata)\r\n self.pushButton_3.clicked.connect(self.exit_program) \r\n \r\n \r\n def exit_program(self):\r\n sys.exit(0)\r\n \r\n def port_select(self):\r\n for i in serial.tools.list_ports.comports():\r\n self.comboBox.addItems(i)\r\n self.COM_Port = str(self.comboBox.currentText())\r\n return self.COM_Port\r\n \r\n def savedata(self):\r\n self.worker = WorkerThread()\r\n self.worker.start()\r\n #self.worker.finished.connect(self.evt_worker_finished)\r\n \r\n def evt_worker_finished(self):\r\n QtWidgets.QMessageBox.information(self, \"Done\", \"Worker Thread complete\")\r\n\r\nimport csv\r\n\r\ndef getdata():\r\n weatherData = []\r\n with WS_UMB() as umb:\r\n for channel in Channels:\r\n if 100 <= int(channel) <= 29999:\r\n value, status = umb.onlineDataQuery(channel)\r\n if status == 0:\r\n weatherData.append(value)\r\n return weatherData \r\n\r\ndef file_save():\r\n file = 'DataOutput.csv' ## Windows Python3 \r\n with open(file, 'a', newline='') as f:\r\n writer = csv.writer(f)\r\n try:\r\n writer.writerow(getdata())\r\n #print(getdata())\r\n except KeyboardInterrupt:\r\n print(\"Keyboard Interrupt\")\r\n except:\r\n print(\"Other exception\")\r\n \r\n\r\nclass WorkerThread(QtCore.QThread):\r\n def run(self):\r\n while True:\r\n file_save()\r\n time.sleep(5)\r\n \r\n \r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv) \r\n welcome = WelcomeScreen()\r\n widget = QtWidgets.QStackedWidget()\r\n widget.setWindowTitle(\"UMB\")\r\n widget.addWidget(welcome)\r\n widget.show()\r\n sys.exit(app.exec_())","repo_name":"benjamin2044/UMB-Protocol-Lufft-python-","sub_path":"UMB.py","file_name":"UMB.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71155935540","text":"# Converted the Jupyter Notebook code into Python code\n# Dependencies and Setup\nfrom bs4 import BeautifulSoup as bs\nfrom splinter import Browser\nimport pandas as pd\nimport datetime as dt\n\n# Set Executable Path & Initialize Chrome Browser\nexecutable_path = {\"executable_path\": \"./chromedriver.exe\"}\nbrowser = Browser(\"chrome\", **executable_path)\n\n\n# Scrape title and paragraph from NASA Mars News Site\ndef mars_news(browser):\n # Visit the NASA Mars News Site\n news_url = \"https://mars.nasa.gov/news/\"\n browser.visit(news_url)\n\n # Get the first list item & wait half a second if not immediately present\n browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=0.5)\n \n news_html = browser.html\n news_soup = bs(news_html, \"html.parser\")\n\n # Parse Results HTML with BeautifulSoup\n # Find Everything Inside:\n #