diff --git "a/5338.jsonl" "b/5338.jsonl" new file mode 100644--- /dev/null +++ "b/5338.jsonl" @@ -0,0 +1,638 @@ +{"seq_id":"35341006","text":"from app.objects.c_obfuscator import Obfuscator\nfrom app.utility.base_world import BaseWorld\nfrom plugins.stockpile.app.stockpile_svc import StockpileService\n\nname = 'Stockpile'\ndescription = 'A stockpile of abilities, adversaries, payloads and planners'\naddress = '/plugin/stockpile/gui'\naccess = BaseWorld.Access.APP\n\n\nasync def enable(services):\n stockpile_svc = StockpileService(services)\n services.get('app_svc').application.router.add_route('GET', '/plugin/stockpile/gui', stockpile_svc.splash)\n await services.get('file_svc').add_special_payload('.donut', 'plugins.stockpile.app.donut.donut_handler')\n await stockpile_svc.data_svc.store(\n Obfuscator(name='plain-text',\n description='Does no obfuscation to any command, instead running it in plain text',\n module='plugins.stockpile.app.obfuscators.plain_text')\n )\n await stockpile_svc.data_svc.store(\n Obfuscator(name='base64',\n description='Obfuscates commands in base64',\n module='plugins.stockpile.app.obfuscators.base64_basic')\n )\n await stockpile_svc.data_svc.store(\n Obfuscator(name='base64jumble',\n description='Obfuscates commands in base64, then adds characters to evade base64 detection. '\n 'Disclaimer: this may cause duplicate links to run.',\n module='plugins.stockpile.app.obfuscators.base64_jumble')\n )\n await stockpile_svc.data_svc.store(\n Obfuscator(name='caesar cipher',\n description='Obfuscates commands through a caesar cipher algorithm, which uses a randomly selected '\n 'shift value.',\n module='plugins.stockpile.app.obfuscators.caesar_cipher')\n )\n await stockpile_svc.data_svc.store(\n Obfuscator(name='base64noPadding',\n description='Obfuscates commands in base64, then removes padding',\n module='plugins.stockpile.app.obfuscators.base64_no_padding')\n )\n await stockpile_svc.data_svc.store(\n Obfuscator(name='steganography',\n description='Obfuscates commands through image-based steganography',\n module='plugins.stockpile.app.obfuscators.steganography')\n )\n","sub_path":"hook.py","file_name":"hook.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"578033595","text":"from telegram.ext import Updater, MessageHandler, Filters\nfrom telegram.ext import CallbackContext, CommandHandler\nimport time as times\n\n\ndef time(update, context):\n update.message.reply_text(times.asctime().split()[3])\n\n\ndef date(update, context):\n ans = ' '.join(times.asctime().split()[:3]) + \" \" + times.asctime().split()[-1]\n update.message.reply_text(ans)\n\n\ndef task(context):\n job = context.job\n context.bot.send_message(job.context, text='Вернулся')\n\n\ndef set_timer(update, context):\n chat_id = update.message.chat_id\n try:\n due = (context.args[0])\n if due < 0:\n update.message.reply_text('Прошлое не круто')\n return\n if 'job' in context.chat_data:\n old_job = context.chat_data['job']\n old_job.shedule_removal()\n new_job = context.job_queue.run_once(task, due, context=chat_id)\n context.chat_data['job'] = new_job\n update.message.reply_text(f'Вернусь через {due} секунд')\n except (IndexError, ValueError):\n update.message.reply_text('Глупышка')\n\ndef echo(update, context):\n text = update.message.text\n update.message.reply_text(f'Я получил сообщение \"{text}\"')\n\n\ndef main():\n updater = Updater('1121846344:AAH6Xmry_2HfNc9i6YjxvSoBnMJbJ0jKeLo',\n use_context=True)\n dp = updater.dispatcher\n dp.add_handler(CommandHandler(\"set_timer\", set_timer))\n dp.add_handler(CommandHandler(\"date\", date))\n dp.add_handler(CommandHandler(\"time\", time))\n text_handler = MessageHandler(Filters.text, echo)\n dp.add_handler(text_handler)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339285265","text":"#!/usr/bin/env python\n#\n# 2012-2014 (BSD) Ioannis Filippidis\n\"\"\"\ncreate/delete junctions based on \"dirlink\" files\n(win, mac, linux)\n\"\"\"\nimport logging\nlogging.basicConfig(level=logging.WARNING)\nlogger = logging.getLogger()\n\nimport sys\nimport os\nimport subprocess\nimport argparse\nimport shlex\nimport fnmatch\nimport platform\n\ndef locate(pattern, root=os.curdir):\n \"\"\"Recursively find files under given directory matching pattern.\n \"\"\"\n for path, dirs, files in os.walk(os.path.abspath(root)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)\n\ndef create_delete_dirlinks_darwin(dirlink, args):\n fid = open(dirlink, 'r')\n igot = fid.readlines()\n fid.close()\n \n path = dirlink.replace('dirlink', '')\n path = path.replace('\\\\', '/')\n logger.debug('current dirlink file in: ' + str(path))\n \n n = 0;\n for line in igot:\n n = n +1\n logger.debug('Line No.' + str(n))\n \n line = line.replace('\\\\', '/')\n if line.find('target') > -1:\n target = line.replace('target = ', '')\n logger.debug('The target is: ' + target)\n elif line.find('link') > -1:\n junction = line.replace('link = ', '')\n logger.debug('The Link is: ' + junction)\n \n junction = path + junction\n \n if '$' in target:\n target = os.path.expandvars(target)\n else:\n target = path + target\n \n if args.create:\n args = shlex.split('ln -s ' + target + ' ' + junction);\n elif args.delete:\n args = shlex.split('rm -f -v ' + junction);\n else:\n raise sys.exit(\n 'Unknown operation. '\n 'Available operations: '\n '-c | --create | -d | --delete'\n )\n \n logger.debug(args)\n subprocess.call(args)\n\ndef create_delete_dirlinks_windows(dirlink, args):\n fid = open(dirlink, 'r')\n igot = fid.readlines()\n fid.close()\n \n path = dirlink.replace('dirlink', '')\n path = path.replace('\\\\', '\\\\\\\\')\n logger.info('current dirlink file in: ' + path)\n \n n = 0;\n for line in igot:\n n = n +1\n logger.debug('Line No.' + str(n))\n \n line = line.replace('\\\\', '\\\\\\\\')\n if line.find('target') > -1:\n target = line.replace('target = ', '')\n logger.info('The target is: ' + target)\n elif line.find('link') > -1:\n junction = line.replace('link = ', '')\n logger.info('The Link is :' + junction)\n \n junction = path +junction\n target = path +target\n \n if args.create:\n args = shlex.split('ln1 --junction ' + junction +\n ' ' + target)\n elif args.delete:\n args = shlex.split('junction -d ' + junction)\n else:\n raise sys.exit(\n 'Unknown operation. '\n 'Available operations: '\n '-c | --create | -d | --delete'\n )\n \n subprocess.call(args)\n\nif __name__ == '__main__':\n desc = (\n 'locate() is used in case of exporting only to a PDF '\n '(w/o latex export). '\n 'Only in that case \\includegraphics{} is still '\n 'able to find the PDF without a path. '\n 'If latex export is used, '\n 'then the produced .pdf_tex should be used, '\n 'with an \\input{} command in latex and '\n 'a relative path is mandatory for '\n 'the \\input{} command to work.'\n )\n \n parser = argparse.ArgumentParser(description=desc)\n \n g = parser.add_mutually_exclusive_group()\n \n g.add_argument('-c', '--create', help='add symbolic links',\n action='store_true')\n g.add_argument('-d', '--delete', help='erase symbolic links',\n action='store_true')\n \n parser.add_argument('-f', '--filename', help='filename',\n default='dirlink')\n parser.add_argument('-v', '--verbose', help='increase output verbosity',\n action='store_true')\n \n args = parser.parse_args()\n \n # no args ?\n if len(sys.argv)==1:\n parser.print_help()\n sys.exit(1)\n \n if args.verbose:\n logger.setLevel(logging.DEBUG)\n \n filename = args.filename\n logger.debug('instruction in filename = ' + str(filename))\n \n # create or delete junctions\n file_generator = locate(filename, './')\n osname = platform.system()\n for f in file_generator:\n flag = 1\n logger.debug('Found file named: ' + str(f))\n \n if osname == 'Darwin':\n create_delete_dirlinks_darwin(f, args)\n else:\n create_delete_dirlinks_windows(f, args)\n","sub_path":"junctions.py","file_name":"junctions.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66165241","text":"import cv2\nimport numpy as np\nfrom display_utils import *\n\nA = 0.75\nB = 1.25\n\ndef render_circle(mask, cx, cy, radius):\n y,x = np.ogrid[-radius:radius, -radius:radius]\n index = x**2 + y**2 <= radius**2\n mask[cy-radius:cy+radius, cx-radius:cx+radius][index] = 1\n\ndef render_butterworth_curve(mask, radius):\n sy,sx = mask.shape\n radius2 = radius ** 2\n sx /= 2\n sy /= 2\n y,x = np.ogrid[-sy:sy, -sx:sx]\n x2 = x ** 2\n y2 = y ** 2\n mask[:,:] = 1 / (1 + ((x2 + y2) / radius2) ** 3)\n\ndef render_gaussian_curve(mask, radius):\n sy,sx = mask.shape\n radius2 = 2 * (radius ** 2)\n sx /= 2\n sy /= 2\n y,x = np.ogrid[-sy:sy, -sx:sx]\n x2 = x ** 2\n y2 = y ** 2\n mask[:,:] = np.exp(-(x2 + y2) / radius2)\n\n\nimg = cv2.imread('trees.jpg')\norig_img = np.copy(img)\nimg = np.sum(img, 2) / 3\nimg = np.log1p(img)\n\nfft = np.fft.fft2(img)\nfshift = np.fft.fftshift(fft)\n\ncutoff_radius = 2\n\ndef scale(thing, clip=True):\n if clip:\n thing = np.maximum(0, np.minimum(thing, 255))\n else:\n thing -= np.min(thing)\n thing /= np.max(thing)\n thing *= 255\n thing = thing.astype(np.uint8)\n return thing\n\ndef apply_mask(mask):\n masked_fshift = (A + B * mask) * fshift\n return scale(\n np.exp(\n np.real(\n np.fft.ifft2(\n np.fft.ifftshift(masked_fshift)))) - 1)\n\ndef ideal(radius = cutoff_radius):\n mask = np.zeros(img.shape, np.uint8)\n center_x = img.shape[1] // 2\n center_y = img.shape[0] // 2\n render_circle(mask, center_x, center_y, radius)\n rmask = np.uint8(mask != 1)\n return (\n rmask * 255, apply_mask(rmask),\n )\n\ndef butterworth(radius = cutoff_radius):\n mask = np.zeros(img.shape, np.float)\n render_butterworth_curve(mask, radius)\n rmask = 1 - mask\n return (\n np.uint8(rmask * 255), apply_mask(rmask),\n )\n\ndef gaussian(radius = cutoff_radius):\n mask = np.zeros(img.shape, np.float)\n render_gaussian_curve(mask, radius)\n rmask = 1 - mask\n return (\n np.uint8(rmask * 255), apply_mask(rmask),\n )\n\n\ncv2.namedWindow('Output', cv2.WINDOW_NORMAL)\ncv2.resizeWindow('Output', 1000, 1000)\n\nview = nxm_matrix_view(range(6), ['ideal', 'butterworth', 'gaussian'], 1, 2)\ncv2.imshow('Output', side_by_side(\n layout_with_names({\n \"Homomorphic Fourier Filters - AliMPFard\": Horizontal(6,6),\n '': view,\n }, Vertical), *ideal(), *butterworth(), *gaussian(), orig_img\n))\n\n\nwait_for_key('q', lambda: cv2.waitKey(0), cv2.destroyAllWindows)\n","sub_path":"homomorphic-filter.py","file_name":"homomorphic-filter.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"650633241","text":"\nfrom HTMLParser import HTMLParser\n\nclass LinkExtractor(HTMLParser):\n\n def reset(self):\n HTMLParser.reset(self)\n self.links = []\n\n def setPattern(self, url, mask, type):\n self.mask = mask\n self.type = type\n if \"/\" in url:\n self.base = url[0:url.rfind(\"/\")] + \"/\"\n else:\n self.base = url + \"/\"\n\n def handle_starttag(self, tag, attrs):\n if tag == \"a\":\n attrs = dict(attrs) # store attributes in object\n if tag == \"a\" and \"href\" in attrs:\n href = attrs[\"href\"]\n if \"/\" in href:\n file = href[href.rfind(\"/\")+1:].lower()\n else:\n file = href.lower()\n# if href.lower().endswith(\".\"+self.type) and (href.lower().maskswith(self.mask) or (\"/\"+self.mask) in href.lower()):\n if file.endswith(\".\" + self.type) and fnmatch.fnmatch(file, self.mask):\n if not href.lower().startswith(\"http://\") and not href.lower().startswith(\"https://\"):\n href = self.base + href\n self.links.append(href)\n\n","sub_path":"openaddresses/lib/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"208393199","text":"from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.kubernetes.checks.resource.base_spec_check import BaseK8Check\n\n\nclass ShareHostPID(BaseK8Check):\n def __init__(self) -> None:\n # CIS-1.3 1.7.2\n # CIS-1.5 5.2.2\n name = \"Containers should not share the host process ID namespace\"\n id = \"CKV_K8S_17\"\n # Location: Pod.spec.hostPID\n # Location: CronJob.spec.jobTemplate.spec.template.spec.hostPID\n # Location: *.spec.template.spec.hostPID\n supported_kind = (\n \"Pod\",\n \"Deployment\",\n \"DaemonSet\",\n \"StatefulSet\",\n \"ReplicaSet\",\n \"ReplicationController\",\n \"Job\",\n \"CronJob\",\n )\n categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)\n\n def scan_spec_conf(self, conf: dict[str, Any]) -> CheckResult:\n spec = {}\n\n if conf[\"kind\"] == \"Pod\":\n if \"spec\" in conf:\n spec = conf[\"spec\"]\n elif conf[\"kind\"] == \"CronJob\":\n spec = conf.get(\"spec\")\n if spec:\n job_template = spec.get(\"jobTemplate\")\n if job_template:\n job_template_spec = job_template.get(\"spec\")\n if job_template_spec:\n template = job_template_spec.get(\"template\")\n if template:\n if \"spec\" in template:\n spec = template[\"spec\"]\n else:\n inner_spec = self.get_inner_entry(conf, \"spec\")\n spec = inner_spec if inner_spec else spec\n if spec:\n if \"hostPID\" in spec:\n if spec[\"hostPID\"]:\n return CheckResult.FAILED\n\n # This value is by default set to false\n return CheckResult.PASSED\n\n\ncheck = ShareHostPID()\n","sub_path":"checkov/kubernetes/checks/resource/k8s/ShareHostPID.py","file_name":"ShareHostPID.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"186651478","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom tensorflow import keras\n\nclass pictionary():\n\n\n\n# function to predict the drawing using the model\n def predict(self,im1):\n pred_probab=self.model.predict(im1)[0]\n pred_class = list(pred_probab).index(max(pred_probab))\n if(max(pred_probab)>0.5):\n print(max(pred_probab), self.pred[pred_class-1])\n\n# mouse callback function\n def pic_draw(self,event,former_x,former_y,flags,param):\n\n if event==cv2.EVENT_LBUTTONDOWN:\n self.drawing=True\n self.current_former_x,self.current_former_y=former_x,former_y\n\n elif event==cv2.EVENT_MOUSEMOVE:\n if self.drawing==True:\n if self.mode==True:\n cv2.line(self.im,(self.current_former_x,self.current_former_y),(former_x,former_y),(0,0,255),1)\n self.current_former_x = former_x\n self.current_former_y = former_y\n\n\n elif event==cv2.EVENT_LBUTTONUP:\n self.drawing=False\n if self.mode==True:\n cv2.line(self.im,(self.current_former_x,self.current_former_y),(former_x,former_y),(0,0,255),1)\n self.current_former_x = former_x\n self.current_former_y = former_y\n # return former_x,former_y\n\n\n# im=np.zeros((255,255), dtype='float32')\n\n def __init__(self):\n self.drawing=False # true if mouse is pressed\n self.mode=True # if True, draw rectangle.\n self.pred=[\"alarm clock\",\"bicycle\",\"bed\",\"airplane\",\"apple\",\"belt\",\"banana\",\"cake\"]\n self.model=keras.models.load_model('/home/diksha/pictionary/my_model2.h5')\n\n self.im=cv2.imread(r'./white.jpg')\n self.img1=cv2.imread(r'./black.jpg')\n cv2.namedWindow(\"Pictionary\")\n cv2.setMouseCallback('Pictionary',self.pic_draw)\n\n def run(self):\n while(1):\n cv2.imshow('Pictionary',self.im)\n img=cv2.cvtColor(self.im, cv2.COLOR_BGR2GRAY)\n th, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n blur1 = cv2.GaussianBlur(img, (5, 5), 0)\n cnts, heir= cv2.findContours(blur1, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n if(len(cnts)>1):\n cv2.drawContours(self.img1, cnts,-1, (255,255,255),5)\n\n cnt = max(cnts, key=cv2.contourArea)\n\n print(cv2.contourArea(cnt))\n if cv2.contourArea(cnt) > 2000:\n x, y, w, h = cv2.boundingRect(cnt)\n self.im1 = self.img1[y:y + h, x:x + w]\n\n self.im1 = cv2.resize(self.im1, (28, 28))\n self.im1 = np.array(self.im1, dtype=np.float32)\n self.im1 = np.reshape(self.im1, (-1, 28, 28, 1))\n self.predict(self.im1)\n \n\n k=cv2.waitKey(1)&0xFF\n if k==27:\n break\n\n elif k==97:\n self.im=cv2.imread(r'./white.jpg')\n self.img1=cv2.imread(r'./black.jpg')\n\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n pictionary().run()\n\n# /kaggle/input/quick-draw/full_numpy_bitmap_alarm clock.npy\n# /kaggle/input/quick-draw/full_numpy_bitmap_bicycle.npy\n# /kaggle/input/quick-draw/full_numpy_bitmap_bed.npy\n# /kaggle/input/quick-draw/full_numpy_bitmap_airplane.npy\n# /kaggle/input/quick-draw/full_numpy_bitmap_apple.npy\n# /kaggle/input/quick-draw/full_numpy_bitmap_belt.npy\n# /kaggle/input/quick-draw/full_numpy_bitmap_banana.npy\n# /kaggle/input/quick-draw/full_numpy_bitmap_birthday cake.npy\n","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"380654231","text":"# TO-DO: complete the helper function below to merge 2 sorted arrays\ndef merge(arrA, arrB):\n elements = len(arrA) + len(arrB)\n merged_arr = [0] * elements #creates a list of 0's \n #[0 for _ in range(elements)] is another way to do this\n\n # Your code here\n #start pointers at the start of both lists\n #compare value of pointers and smallest gets added to merge list\n #increment pointer to move onto next list\n \n #initialize each list for both pointers\n a = 0\n b = 0\n \n #loop through merged_arr\n for i in range(len(merged_arr)):\n #make sure pointer is in range of array\n #if it is out of range, copy other array\n if a >= len(arrA):\n merged_arr[i] = arrB[b]\n b += 1\n elif b >= len(arrB):\n merged_arr[i] = arrA[a]\n a += 1\n elif arrA[a] < arrB[b]: #both indices are in range of their own array\n merged_arr[i] = arrA[a]\n a += 1\n else:\n merged_arr[i] = arrB[b]\n b += 1\n\n \n return merged_arr\n\n# while a < len(arrA) and b < len(arrB):\n# if arrA[a] < arrB[b]:\n# combined.append(arrA[a])\n# a += 1\n# else:\n# combined.append(arrB[b])\n# b +=1\n \n# #this will create the combined list\n \n# while a < len(arrA):\n# combined.append(arrA[a])\n# a += 1\n# while b < len(arrB):\n# combined.append(arrB[b])\n# b += 1\n \n# return combined\n\n\n# TO-DO: implement the Merge Sort function below recursively\ndef merge_sort(arr):\n # Your code here\n if len(arr) > 1:\n left = merge_sort(arr[:len(arr) // 2])\n right = merge_sort(arr[len(arr) // 2:])\n arr = merge(left, right)\n\n return arr\n\n\n# STRETCH: implement the recursive logic for merge sort in a way that doesn't \n# utilize any extra memory\n# In other words, your implementation should not allocate any additional lists \n# or data structures; it can only re-use the memory it was given as input\n# def merge_in_place(arr, start, mid, end):\n# # Your code here\n# pass\n\n# def merge_sort_in_place(arr, l, r):\n# # Your code here\n# pass\n","sub_path":"src/sorting/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"172952026","text":"from _Helper import *\n\n# \n# Use three times of multiplication to calculate complex number, inspired by Strassen algorithm.\n# \n\n# \n# TIME: O(1)\n# SPACE: O(1)\n# \n\n\n# \nclass Solution(object):\n def CalculateComplexNumStrassen(self, a, b, c, d):\n A = a * (c + d)\n B = b * (c - d)\n C = c * (b - a)\n return (B - C, A + C)\n\n def CalculateComplexNumNaive(self, a, b, c, d):\n return (a * c - b * d, a * d + b * c)\n\n\n# \n\n# \ns = Solution()\na = 0.5\nb = 1\nc = 1.5\nd = 2\nexpected = (-1.25, 2.5)\nactual = s.CalculateComplexNumStrassen(a, b, c, d)\nnaive = s.CalculateComplexNumNaive(a, b, c, d)\nAreEqual(expected, actual, a, b, c, d)\nAreEqual(expected, naive, a, b, c, d)\n# another pair\ns = Solution()\na = -2\nb = 1\nc = 4\nd = 2\nexpected = (-10.0, 0.0)\nactual = s.CalculateComplexNumStrassen(a, b, c, d)\nnaive = s.CalculateComplexNumNaive(a, b, c, d)\nAreEqual(expected, actual, a, b, c, d)\nAreEqual(expected, naive, a, b, c, d)\n# ","sub_path":"assets/Introduction-to-Algorithms/Python/4_2_CalculateComplexNum.py","file_name":"4_2_CalculateComplexNum.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327459382","text":"import sys\nimport os.path\nsys.path.append(\n os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\nfrom cork import *\n\nEngine.init(\"Hello World!\", 800, 600)\nclass HelloWorld(Entity):\n def __init__(self):\n self.w = 180\n self.h = 50\n self.x = Engine.window[\"width\"]/2 - (self.w / 2)\n self.y = Engine.window[\"height\"]/2 - (self.h / 2)\n self.drawable = Text(\"Hello World\", self)\n super(HelloWorld, self).__init__()\n\nhw = HelloWorld()\n\nEngine.run()\n","sub_path":"examples/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266609711","text":"from tiger.stork import Stork, StorkConfigurationError\nfrom tiger.stork.component import BaseComponent\nfrom tiger.stork.font import FontComponent\nfrom tiger.stork.swatch import SwatchComponent\nfrom nose.tools import raises\n\n\ndef test_component_groups_contain_components():\n panels = Stork(config_path='stork/tests/fixtures/valid.yml')\n for panel in panels:\n for group in panel:\n for component in group:\n assert isinstance(component, BaseComponent)\n\n\ndef test_components_have_unique_keys():\n panels = Stork(config_path='stork/tests/fixtures/valid.yml')\n for panel in panels:\n for group in panel:\n for component in group:\n assert component.key == '%s-%s' % (panel.name.lower(), component.name.lower())\n\n\n@raises(StorkConfigurationError)\ndef test_no_component_type_fails():\n panels = Stork(config_path='stork/tests/fixtures/no_component_type.yml')\n\n\n@raises(StorkConfigurationError)\ndef test_invalid_component_type_fails():\n panels = Stork(config_path='stork/tests/fixtures/invalid_component_type.yml')\n\n\ndef test_all_components_are_in_cache():\n panels = Stork(config_path='stork/tests/fixtures/valid.yml')\n for panel in panels:\n for group in panel:\n for component in group:\n assert component in [v for k, v in panels.component_cache]\n assert panels[component.key] == component\n\n\ndef test_correct_subclass_is_assigned():\n panels = Stork(config_path='stork/tests/fixtures/valid.yml')\n assert isinstance(panels['foo-baz'], SwatchComponent)\n assert isinstance(panels['bar-quux'], FontComponent)\n","sub_path":"tiger/stork/tests/test_components.py","file_name":"test_components.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"169636419","text":"from tkinter import *\n\nclass Text_Settings(object):\n def __init__(self,master):\n tx = self.tx = Toplevel(master,bd=15)\n tx.title(\"Text Settings\")\n pad_x = 5\n pad_y = 5\n\n self.size_m = Message(tx,text = 'Input Text Size:',aspect = 500, justify = LEFT).pack(anchor = CENTER)\n self.size_e = Entry(tx)\n self.size_e.pack(anchor=CENTER)\n\n self.okay_button = Button(tx,text = \"OKAY\", command = self.cleanup)\n self.okay_button.pack(anchor=S,side = BOTTOM,pady=pad_y)\n\n def cleanup(self):\n text_size = self.size_e.get()\n self.settings = [text_size]\n self.tx.destroy()\n","sub_path":"Separation/Text_Settings.py","file_name":"Text_Settings.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376466695","text":"# Copyright 2015 OpenStack Foundation\n# Copyright 2015 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport collections\nimport json\nimport logging\nimport random\nimport socket\nimport threading\nimport time\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef default_echo_handler(message, ovsconn):\n logging.debug(\"responding to echo\")\n ovsconn.send({\"result\": message.get(\"params\", None),\n \"error\": None, \"id\": message['id']})\n\n\ndef default_message_handler(message, ovsconn):\n ovsconn.responses.append(message)\n\n\nclass OVSDBConnection(threading.Thread):\n \"\"\"Connects to an ovsdb server that has manager set using\n\n ovs-vsctl set-manager ptcp:5000\n\n clients can make calls and register a callback for results, callbacks\n are linked based on the message ids.\n\n clients can also register methods which they are interested in by\n providing a callback.\n \"\"\"\n\n def __init__(self, IP, PORT, **handlers):\n super(OVSDBConnection, self).__init__()\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((IP, PORT))\n self.responses = []\n self.callbacks = {}\n self.read_on = True\n self.handlers = handlers or {\"echo\": default_echo_handler}\n self.start()\n\n def send(self, message, callback=None):\n if callback:\n self.callbacks[message['id']] = callback\n self.socket.send(json.dumps(message))\n\n def response(self, id):\n return [x for x in self.responses if x['id'] == id]\n\n def set_handler(self, method_name, handler):\n self.handlers[method_name] = handler\n\n def _on_remote_message(self, message):\n try:\n json_m = json.loads(message,\n object_pairs_hook=collections.OrderedDict)\n handler_method = json_m.get('method', None)\n if handler_method:\n self.handlers.get(handler_method, default_message_handler)(\n json_m, self)\n elif json_m.get(\"result\", None) and json_m['id'] in self.callbacks:\n id = json_m['id']\n if not self.callbacks[id](json_m, self):\n self.callbacks.pop(id)\n\n else:\n default_message_handler(message, self)\n except Exception as e:\n logging.exception(\n \"exception [%s] in handling message [%s]\", e.message, message)\n\n def __echo_response(message, self):\n self.send({\"result\": message.get(\"params\", None),\n \"error\": None, \"id\": message['id']})\n\n def run(self):\n\n chunks = []\n lc = rc = 0\n while self.read_on:\n try:\n response = self.socket.recv(4096)\n if response:\n response = response.decode('utf8')\n message_mark = 0\n for i, c in enumerate(response):\n if c == '{':\n lc += 1\n elif c == '}':\n rc += 1\n\n if rc > lc:\n raise Exception(\"json string not valid\")\n\n elif lc == rc and lc is not 0:\n chunks.append(response[message_mark:i + 1])\n message = \"\".join(chunks)\n self._on_remote_message(message)\n lc = rc = 0\n message_mark = i + 1\n chunks = []\n\n chunks.append(response[message_mark:])\n except Exception:\n # Pass to avoid EOF error\n pass\n\n def stop(self, force=False):\n self.read_on = False\n if force:\n self.socket.close()\n\n def select_table(self, table):\n select_dict = {\"op\": \"select\", \"table\": table, \"where\": []}\n op_id = str(random.getrandbits(128))\n params = ['hardware_vtep']\n params.append(select_dict)\n query_select = {\"method\": \"transact\",\n \"params\": params,\n \"id\": op_id}\n return query_select\n\n def find_row(self, net_id, count, resp_dec):\n for i in range(count):\n row = str(resp_dec['result'][0]['rows'][i])\n if net_id in row:\n return row\n\n def get_response(self, OVSDB_IP, OVSDB_PORT, table):\n query = self.select_table(table)\n self.send(query)\n time.sleep(2)\n resp = self.responses\n resp = str(resp[0])\n return resp\n","sub_path":"networking_l2gw/tests/scenario/ovsdb_connections.py","file_name":"ovsdb_connections.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"484309155","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\nfrom decimal import Decimal\nimport datetime\nfrom .models import (\n RequestHeader,\n SelfReportedCashFlow,\n Address,\n Business,\n CFApplicationData,\n Owner,\n LoanApp,\n)\n\n# Request Header Model Test (Convert request_date to datetime)\nclass RequestHeaderModelTest(TestCase):\n def setUp(self):\n RequestHeader.objects.create(\n cf_request_id=\"500653901\",\n request_date=\"2019-06-26T23:05:41.2898238Z\",\n cf_api_user_id=None,\n cf_api_password=None,\n is_test_lead=True,\n )\n\n def test_model_created(self):\n request_header = RequestHeader.objects.first()\n self.assertEquals(request_header.cf_request_id, \"500653901\")\n self.assertEquals(request_header.request_date, \"2019-06-26T23:05:41.2898238Z\")\n self.assertEquals(request_header.cf_api_user_id, None)\n self.assertEquals(request_header.cf_api_password, None)\n self.assertTrue(request_header.is_test_lead)\n\n\nclass SelfReportedCashFlowModelTest(TestCase):\n def setUp(self):\n SelfReportedCashFlow.objects.create(\n annual_revenue=49999999.01,\n monthly_average_bank_balance=94941.0,\n monthly_average_credit_card_volume=18191.0,\n )\n\n def test_model_created(self):\n self_reported_cash_flow = SelfReportedCashFlow.objects.first()\n self.assertEquals(\n self_reported_cash_flow.annual_revenue, Decimal(\"49999999.01\")\n )\n self.assertEquals(\n self_reported_cash_flow.monthly_average_bank_balance, Decimal(\"94941.00\")\n )\n self.assertEquals(\n self_reported_cash_flow.monthly_average_credit_card_volume,\n Decimal(\"18191.00\"),\n )\n\n\nclass AddressModelTest(TestCase):\n def setUp(self):\n Address.objects.create(\n address1=\"1234 Red Ln\",\n address2=None,\n city=\"Santa Monica\",\n state=\"CA\",\n zipcode=\"45321\",\n )\n\n def test_model_created(self):\n address = Address.objects.first()\n self.assertEquals(address.address1, \"1234 Red Ln\")\n self.assertEquals(address.address2, None)\n self.assertEquals(address.city, \"Santa Monica\")\n self.assertEquals(address.state, \"CA\")\n self.assertEquals(address.zipcode, \"45321\")\n\n\nclass BusinessModelTest(TestCase):\n\n self_reported_cash_flow = SelfReportedCashFlow.objects.create(\n annual_revenue=49999999.01,\n monthly_average_bank_balance=94941.0,\n monthly_average_credit_card_volume=18191.0,\n )\n\n address = Address.objects.create(\n address1=\"1234 Red Ln\",\n address2=None,\n city=\"Santa Monica\",\n state=\"CA\",\n zipcode=\"45321\",\n )\n\n def setUp(self):\n self.self_reported_cash_flow.save()\n self.address.save()\n Business.objects.create(\n name=\"Wow Inc\",\n self_reported_cash_flow=self.self_reported_cash_flow,\n address=self.address,\n tax_id=\"839674398\",\n phone=\"6573248876\",\n naics=\"79232\",\n has_been_profitable=True,\n has_been_bankrupted_in_last_7_years=False,\n inception_date=datetime.datetime.now(),\n )\n\n def test_model_created(self):\n business = Business.objects.first()\n self.assertEquals(business.name, \"Wow Inc\")\n self.assertEquals(\n business.self_reported_cash_flow, self.self_reported_cash_flow\n )\n self.assertEquals(business.address, self.address)\n self.assertEquals(business.tax_id, \"839674398\")\n self.assertEquals(business.phone, \"6573248876\")\n self.assertEquals(business.naics, \"79232\")\n self.assertEquals(business.has_been_profitable, True)\n self.assertEquals(business.has_been_bankrupted_in_last_7_years, False)\n # self.assertTrue(business.inception_date < datetime.datetime.now())\n\n\nclass CFApplicationDataModelTest(TestCase):\n def setUp(self):\n CFApplicationData.objects.create(\n requested_loan_amount=\"49999999\",\n stated_credit_history=1,\n legal_entity_type=\"LLC\",\n filter_id=\"897079\",\n )\n\n def test_model_created(self):\n cf_application_data = CFApplicationData.objects.first()\n self.assertEquals(cf_application_data.requested_loan_amount, \"49999999\")\n self.assertEquals(cf_application_data.stated_credit_history, 1)\n self.assertEquals(cf_application_data.legal_entity_type, \"LLC\")\n self.assertEquals(cf_application_data.filter_id, \"897079\")\n\n","sub_path":"loanapplicationapi/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115729582","text":"# index的admin文件\r\n# 修改title和header\r\nfrom django.contrib import admin\r\nfrom .models import *\r\n\r\n# Register your models here.\r\n# 一:直接将模型注册到admin后台\r\n# admin.site.register(Product)\r\n# admin.site.register(Type)\r\n\r\n# 修改title和header\r\nadmin.site.site_header = 'MyDjango'\r\nadmin.site.site_title = 'MyDjango 后台管理'\r\n\r\n\r\n# 二:自定义ProductAdmin类并集成ModelAdmin\r\n# 注册方法一,使用Python装饰器讲ProductAdmin和模型Product绑定并注册到后台\r\n@admin.register(Product)\r\nclass ProductAdmin(admin.ModelAdmin):\r\n # 设置显示的字段\r\n list_display = ['id','name','weight','size','type',]\r\n list_display.append('colored_type')\r\n # 设置可搜索的字段并在Admin后台数据生成搜索框,如有外键,因使用双下划线连接两个模型地方字段\r\n search_fields = ['id', 'name', 'type__type_name']\r\n # 设置过滤器,在后台数据的右侧生成导航栏,如有外键,因使用双下划线连接两个模型的字段\r\n list_filter = ['name', 'type__type_name']\r\n # 设置排序方式,['id']为升序,降序为['-id']\r\n ordering = ['id']\r\n # 设置时间选择器,如字段中有时间格式才可以使用\r\n # date_hierarchy = Field\r\n # 添加新数据时,设置可添加数据的字段\r\n fields = ['name', 'weight', 'size', 'type']\r\n # 设置可读字段,在修改或新增数据时使其无法设置\r\n readonly_fields = ['name']\r\n # 重写get_readonly_fields()函数,设设置超级用户和普通用户 的权限\r\n def get_readonly_fields(self, request, obj=None):\r\n if request.user.is_superuser:\r\n self.readonly_fields = []\r\n return self.readonly_fields\r\n def get_queryset(self, request):\r\n qs = super(ProductAdmin,self).get_queryset(request)\r\n if request.user.is_superuser:\r\n return qs\r\n else:\r\n return qs.filter(id__lt=6)\r\n\r\n@admin.register(Type)\r\nclass TypeAdmin(admin.ModelAdmin):\r\n list_display = ['id', 'type_name']\r\n\r\n# 注册方法二\r\n# admin.site.register(Product, ProductAdmin)","sub_path":"django/MyDjango/index/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353567573","text":"import random, os, sys\n\ncardName = {1: 'As', 2: 'Dos', 3: 'Tres', 4: 'Quatre', 5: 'Cinc', 6: 'Sis', 7: 'Set', 8: 'Vuit', 9: 'Nou',\n 10: 'Deu', 11: 'Jota', 12: 'Reina', 13: 'Rei'}\ncardSuit = {'c': 'Piques', 'h': 'Cors', 's': 'Espases', 'd': 'Diamants'}\n\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\nclass Card:\n\n def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit\n\n def __str__(self):\n return (cardName[self.rank] + \" de \" + cardSuit[self.suit])\n\n def getRank(self):\n return (self.rank)\n\n def getSuit(self):\n return (self.suit)\n\n def BJValue(self):\n if self.rank > 9:\n return (10)\n else:\n return (self.rank)\n\n\ndef showHand(hand):\n for card in hand:\n print(\" \" + str(card))\n\n\ndef showCount(hand):\n print(\"Puntuació: \" + str(handCount(hand)))\n\n\ndef handCount(hand):\n handCount = 0\n for card in hand:\n handCount += card.BJValue()\n return (handCount)\n\ndef gameEnd(score):\n print(color.BOLD +\"Blackjack!! \"+ color.END + \"-*Puntuació*- maquina: \" + str(score['maquina']) + \" Puntuació jugador: \" + str(score['jugador']))\n sys.exit(0)\n\n\ndeck = []\nsuits = ['c', 'h', 'd', 's']\nscore = {'maquina': 0, 'jugador': 0}\nhand = {'maquina': [], 'jugador': []}\n\nfor suit in suits:\n for rank in range(1, 14):\n deck.append(Card(rank, suit))\n\nkeepPlaying = True\n\nwhile keepPlaying:\n\n random.shuffle(deck)\n random.shuffle(deck)\n random.shuffle(deck)\n\n # Deal Cards\n\n hand['jugador'].append(deck.pop(0))\n hand['maquina'].append(deck.pop(0))\n\n hand['jugador'].append(deck.pop(0))\n hand['maquina'].append(deck.pop(0))\n\n playHuman = True\n bustedHuman = False\n\n while playHuman:\n print(color.RED + \"---------------------------------------------------------\" + color.END)\n print(color.BOLD + \"_-*Blackjack*-_ \"+ color.BLUE + \"Maquina: \" + str(score['maquina']) + color.PURPLE + \" - Jugador: \" + str(score['jugador']) + color.END)\n\n print()\n\n print(color.BOLD + \"La maquina ha robat: \" + color.END + str(hand['maquina'][-1]) )\n print()\n\n print(color.BOLD + \"La teva ma:\" + color.END)\n\n showHand(hand['jugador'])\n print()\n showCount(hand['jugador'])\n\n print()\n\n inputCycle = True\n userInput = ''\n\n while inputCycle:\n userInput = input(\"(U)na carta mes, (R)es mes, or (S)ortir: \").upper()\n if userInput == 'U' or 'S' or 'Q':\n inputCycle = False\n\n if userInput == 'U':\n hand['jugador'].append(deck.pop(0))\n if handCount(hand['jugador']) > 21:\n playHuman = False\n bustedHuman = True\n elif userInput == 'R':\n playHuman = False\n else:\n gameEnd(score)\n\n playComputer = True\n bustedComputer = False\n\n while not bustedHuman and playComputer:\n print(handCount(hand['maquina']))\n if handCount(hand['maquina']) < 17:\n hand['maquina'].append(deck.pop(0))\n else:\n playComputer = False\n\n if handCount(hand['maquina']) > 21:\n playComputer = False\n bustedComputer = True\n\n if bustedHuman:\n print(color.PURPLE + 'El jugador ha perdut' + color.END)\n score['maquina'] += 1\n elif bustedComputer:\n print(color.BLUE + 'La maquina ha perdut' + color.END)\n score['jugador'] += 1\n elif handCount(hand['jugador']) > handCount(hand['maquina']):\n print(color.PURPLE +'El jugador guanya' + color.END)\n score['jugador'] += 1\n else:\n print(color.BLUE + 'La maquina guanya' + color.END)\n score['maquina'] += 1\n\n print()\n print('Ma de la maquina:')\n showHand(hand['maquina'])\n showCount(hand['maquina'])\n\n print()\n print('Ma de l\\'usuari')\n showHand(hand['jugador'])\n showCount(hand['jugador'])\n print()\n if input(\"(S)ortir o intro per jugar una nova ronda\").upper() == 'S':\n gameEnd(score)\n\n deck.extend(hand['maquina'])\n deck.extend(hand['jugador'])\n\n del hand['maquina'][:]\n del hand['jugador'][:]","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"48636531","text":"import sys\nimport os\nos.chdir('C:\\\\adb')\nif len(sys.argv) != 2:\n print('Enter the Filename: ')\n raise SystemExit(1)\nf = open(sys.argv[1])\nlines = f.readlines()\nf.close()\nflines = [float(line) for line in lines]\n\nprint('the max of two numbers is ' + max(flines))\nprint('the min of two numbers is' + min(flines))","sub_path":"Scripts-Pyc/argv.py","file_name":"argv.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"190803242","text":"\"\"\"\nKurtosis-specific routines\n\"\"\"\nimport warnings\n\nimport numpy as np\nfrom scipy.signal import lfilter\nfrom obspy.core.stream import Stream\nfrom obspy.core.trace import Trace\n\nfrom .utils import smooth_filter\nfrom .pick_candidate import PickCandidate\n# from .logger import log\n\n\nclass Kurtosis_pick():\n \"\"\"\n Holds one Kurtosis-based pick and the values that chose it\n \"\"\"\n def __init__(self, time, k_jump, kurtosis):\n \"\"\"\n :param time: kurtosis pick time\n :param k_jump: jump in kurtosis associated with the pick\n :param kurtosis: kurtosis value at pick\n \"\"\"\n self.time = time\n self.k_jump = k_jump\n self.kurtosis = kurtosis\n\n\nclass Kurtosis():\n \"\"\"\n Class for picking seismograms using the Kurtosis\n\n Kurtosis picking is a n-step process whose goal is to identify jumps\n in the Kurtosis:\n 1) calculate the Kurtosis\n 2) calculate the cumulative sum of the kurtosis (kurto_cum)\n 3) remove a linear fit from the beginning to the end of the kurto_cum\n (corr_kurto_cum)\n 4) Identify peaks (highs) and subtract this from the preceding data. Set\n all positive values to zero\n 5) identify where the slope of the resulting function changes from positive\n to negative\n \"\"\"\n def __init__(self, freq_bands, wind_lengths, n_smooth, extrem_smooths=None,\n plot=False):\n \"\"\"\n :param FBs: list of [min, max] frequency bands\n :param window_lengths: list of window lengths (seconds)\n :param n_smooth: smoothings to apply in calculating Kurtosis (samples)\n :param extrem_smooth: smoothing sequence to use when calculating times\n of extrema\n :param plot: make a plot of Kurtosis parameters\n \"\"\"\n assert isinstance(freq_bands, list), \"freq_bands is not a list\"\n assert isinstance(wind_lengths, list), \"wind_lengths is not a list\"\n assert isinstance(n_smooth, int), \"n_smooth is not an int\"\n\n self.freq_bands = freq_bands\n self.wind_lengths = wind_lengths\n self.n_smooth = n_smooth\n self.plot = plot\n self.extrem_smooths = None # smoothings for extrema following\n\n # Mean trace over freq_bands, wind_lengths & smoothing\n self.mean_kurtosis = None\n # Mean trace, cumulated and detrended\n self.mean_cumulative_kurtosis = None\n # Gradients of mean_cumulative_kurtosis, for each extrem_smoothing\n self.kurto_gradients = None\n\n def __str__(self):\n s = \"Kurtosis:\\n\"\n s += f\" freq bands = {self.freq_bands}\\n\"\n s += f\" window_lengths = {self.wind_lengths}\\n\"\n s += f\" n_smooth = {self.n_smooth}\\n\"\n s += f\" extrem_smooths = {self.extrem_smooths}\\n\"\n s += f\" mean_kurtosis = {self.mean_kurtosis}\\n\"\n s += f\" mean_cumulative kurtosis = {self.mean_cumulative_kurtosis}\\n\"\n s += f\" len{self.kurto_gradients} kurto_gradients\"\n return s\n\n def pick_trace(self, trace, n_candidates, starttime=None, endtime=None,\n extrem_type='mini', extrem_smooths=[1, 5, 10],\n extrem_normalize=False, extrem_which='max'):\n \"\"\"\n Pick a trace using the Kurtosis\n\n :param trace: waveform trace\n :param n_candidates: maximum number of candidates to return\n :param starttime: first time of interest\n :param endtime: last time of interest\n :param extrem_type: 'mini' or 'maxi', depending on wheter you want to\n follow minima or maxima\n :param extrem_smooths: smoothing to apply when following extrema\n :param extrem_normalize: normalize the cumulative Kurtosis gradient\n :param extrem_which:\n 'first': select first 'n_extrema' extrema > 0.1, order right to\n left\n 'max': select the 'n_extrema' biggest extrema, order biggest\n to smallest\n :returns: list of PickCandidates\n \"\"\"\n self.extrem_smooths = extrem_smooths\n self.calc_kurtocum(trace, starttime, endtime)\n candidates = self.follow_extrem(type_=extrem_type,\n n_extrema=n_candidates,\n normalize=extrem_normalize,\n sense=extrem_which)\n return candidates\n\n def calc_kurtocum(self, trace, starttime=None, endtime=None, debug=False):\n \"\"\"\n Calculate cumulative kurtosis over bandwidths and windows\n\n Puts mean kurtosis over all windows and freq_bands in\n self.mean_cumulative_kurtosis\n :param trace: the raw trace (works one trace, not a stream)\n :param starttime: is the first time of interest\n :param endtime: is the last time of interest\n \"\"\"\n # trace2FWkurto.m:26\n assert isinstance(trace, Trace), \"trace is not an obspy Trace\"\n\n # sr = trace.stats.sampling_rate\n # data_length = trace.stats.endtime - trace.stats.starttime\n if starttime is None:\n starttime = trace.stats.starttime\n elif starttime < trace.stats.starttime:\n starttime < trace.stats.starttime\n if endtime is None:\n endtime = trace.stats.endtime\n elif endtime > trace.stats.endtime:\n endtime = trace.stats.endtime\n\n # Filter traces in different frequency bands\n B = []\n for FB in self.freq_bands:\n f = trace.copy()\n if debug:\n print(f'trace2FWkurto: filtering from {FB[0]} to {FB[1]} Hz')\n f.detrend('demean')\n f.filter(type='bandpass', freqmin=FB[0], freqmax=FB[1], corners=3)\n f = f.slice(starttime, endtime)\n f.data[np.arange(0, 50)] = 0\n B.append(f)\n\n # 2-level lists: : 1st dim: window_lengths, 2nd dim: freq bands\n K, C = [], [] # all kurtoses and all cumulative, detrended kurtoses\n for win_len in self.wind_lengths:\n corr_cums, kurtos = self.calc_cum_kurtoses(B, win_len)\n C.append(corr_cums)\n K.append(kurtos)\n self.mean_kurtosis = _mean_trace(K)\n self.mean_cumulative_kurtosis = _mean_trace(C)\n\n def calc_cum_kurtoses(self, B, win_len):\n \"\"\"\n Calculate kurtoses and cumulative kurtoses for given window length\n\n :param B: list of data, filtered in different bands\n :win_len: window length in seconds\n \"\"\"\n corr_cums, kurtos = [], []\n sr = B[0].stats.sampling_rate\n data_length = B[0].stats.endtime - B[0].stats.starttime\n if win_len > data_length:\n warnings.warn('Kurtosis window > data window ('\n f'{win_len:.3g}s > {data_length:.3g}s), skipping!')\n return [], []\n else:\n win_samps = int(np.floor(win_len * sr)) + 1\n win_samps = min(win_samps, len(B[0].data))\n for tr, fb in zip(B, self.freq_bands):\n k = _fast_kurtosis(tr, win_samps)\n filt = smooth_filter(k, self.n_smooth)\n corr_cum, _ = _f_cumul(filt)\n corr_cum.detrend('simple')\n # corr_cum = kurto_cum.copy()\n # line = _f_segment(corr_cum)\n # corr_cum.data -= line.data\n if self.plot:\n self._plot_kurtosis(win_len, fb, tr, k, corr_cum)\n kurtos.append(k)\n corr_cums.append(corr_cum)\n return corr_cums, kurtos\n\n @staticmethod\n def _plot_kurtosis(wl, fb, trace, kurtosis, corr_cum):\n print(f'Plot kurtosis for win_len={wl}, f_band={fb}', 'debug')\n cor = corr_cum.copy()\n cor.stats.channel = 'COR'\n kurtosis.stats.channel = 'KUR'\n Stream([trace, kurtosis, cor]).plot(\n size=(600, 600), equal_scale=False)\n\n def follow_extrem(self, type_='mini', n_extrema=2,\n normalize=False, sense=None, debug=False):\n \"\"\"\n Return extrema of the cumulative kurtosis\n\n Steps:\n 1) Smooth the cumulative kurtosis (self.mean_cumulative_kurtosis)\n using the windows specified in self.extrem_smooths\n 2) locate the first 'n' first extremas of the smoothest function\n 3) refine these extrema step by step through less and less smooth\n functions\n\n Uses self.mean_cumulative kurtosis\n :param type: 'mini' or 'maxi': follow minima or maxima\n :param n_extrema: number of extrema to follow\n :param normalize: normalize the gradient?\n :param sense:\n 'first': select first 'n_extrema' extrema > 0.1, ordered right\n to left\n 'max': select from biggest to smallest\n :returns: list of PickCandidates\n \"\"\"\n # follow_extrem.m:29\n # Parameters\n assert not len(self.mean_cumulative_kurtosis) == 0,\\\n 'no mean cumulative kurtosis!'\n st = self.mean_cumulative_kurtosis.stats.starttime\n sr = self.mean_cumulative_kurtosis.stats.sampling_rate\n\n all_extrema, self.kurto_gradients, _ =\\\n _get_extrema(self.extrem_smooths, self.mean_cumulative_kurtosis,\n type_, normalize)\n\n selected_extrema = _select_extrema(all_extrema[0],\n n_extrema, sense)\n\n # Sharpen the indices/values using the smaller smoothing values\n sharp_extrema = []\n for e in selected_extrema:\n extrema = e\n for finer_extrema in all_extrema[1:]:\n c_extrem = _find_close_extrema(\n extrema, finer_extrema, 40)\n if c_extrem is not None:\n extrema = c_extrem\n sharp_extrema.append(extrema)\n\n return [PickCandidate(st + x['index']/sr,\n 'kurtosis',\n x['value'],\n sampling_rate=sr)\n for x in sharp_extrema]\n\n\ndef _find_close_extrema(best_extrem, finer_extrema, max_diff=40):\n if len(finer_extrema) == 0:\n warnings.warn('No extrema found for {:d}-sample smoothing'.format(\n finer_extrema['smoothing']))\n return None\n i_diff = abs(best_extrem['index'] - [x['index'] for x in finer_extrema])\n if np.any(i_diff <= max_diff):\n return finer_extrema[np.argmin(i_diff)]\n return None\n\n\ndef _get_extrema(smoothing_list, cumul_k, sense, normalize, debug=False):\n \"\"\"\n Returns a list of extrema, from smoothest to roughest\n\n :param smoothing list: list of smoothings in number of samples\n :param cumul_k: cumulative kurtosis Trace\n :param sens: 'maxi' or 'mini', passed on to ext_indices\n :param normalize: passed on to _cum2grad()\n :returns: list of {indices: value, trace: value} from smoothest to\n roughest. value is the approximate height of the kurtosis jump\n \"\"\"\n extrema, gradients = [], []\n # Put the strongest smoothing first\n sorted_smooth = sorted(smoothing_list, reverse=True)\n for smoothing in sorted_smooth:\n v_smooth = smooth_filter(cumul_k, smoothing)\n v_smooth = _cum2grad(v_smooth, normalize)\n ext_indices, _ = _loca_ext(v_smooth, None, None, sense)\n extrema.append([{'index': i, 'value': -v_smooth.data[i]}\n for i in ext_indices])\n gradients.append(v_smooth)\n # create a list of {index: value:} dictionaries with all of the\n # detected extrema in the smoothest trace\n # extrema = [{'index': i, 'value': extrema[0]['trace'].data[i]}\n # for i in extrema[0]['indices']]\n return extrema, gradients, sorted_smooth\n\n\ndef _select_extrema(extrema, N, sense='max', threshold=0.1):\n \"\"\"\n Return N extrema, ordered by size or time\n :param extrema: list of {'value': val, 'index': i}\n :param sense: 'max' return from largest to smallest\n 'first': return from first to last\n :param threshold: minimum value to accept for sense=='first'\n \"\"\"\n if sense == 'first':\n big_extrema = [x for x in extrema if x['value'] >= threshold]\n if len(big_extrema) > 1:\n ext = sorted(big_extrema, key=lambda k: k['index'])\n else:\n ext = sorted(extrema, key=lambda k: k['index'])\n try:\n selected = [x for x in ext[:N:-1]]\n except Exception:\n selected = [x for x in ext[::-1]]\n elif sense == 'max':\n ext = sorted(extrema, key=lambda k: np.abs(k['value']), reverse=True)\n try:\n selected = [x for x in ext[:N]]\n except Exception:\n selected = [x for x in ext]\n else:\n raise NameError(\"sense not 'max' or 'first'\")\n return selected\n\n\ndef _fast_kurtosis(trace, win_samps):\n \"\"\"\n Compute kurtosis really quickly using \"filter\" function\n\n could I just use obspy kurtosis?\n\n :param trace: one trace\n :param win_samps: number of samples in the sliding window\n :returns: Kurtosis trace\n \"\"\"\n assert isinstance(trace, Trace), \"trace is not an obspy Trace\"\n win_samps = int(round(win_samps))\n # log(win_samps, 'debug')\n # fast_kurtosis.m:11\n if win_samps == 1:\n win_samps = 2\n\n # Set NaNs to 0 for computational stability\n f = trace.copy()\n f.detrend(type='demean')\n # f.data[np.isnan(f.data)] = 0\n\n # Compute kurtosis\n a = np.divide(np.ones(win_samps), float(win_samps))\n b = 1.\n m_2 = lfilter(a, b, f.data**2)\n m_4 = lfilter(a, b, f.data**4)\n out = trace.copy()\n out.data = np.divide(m_4, (m_2 ** 2))\n # Protect against edge effect\n out.data[:win_samps] = out.data[win_samps]\n\n # Set any kurtosis value to nan for any indices within win_samples of\n # an NaN in the original data.\n # I think this is outdated, should just trim\n for i in np.nonzero(trace.data == np.nan)[0]:\n if i:\n out.data[i: i + win_samps] = np.nan\n return out\n\n\ndef _f_cumul(f):\n \"\"\"\n Calculate the positive gradient cumulative of f\n\n If the gradient is positive we take the cumulative, if the gradient is\n negative then, as long as the gradient is negative, the value is that\n of the last positive gradient. The output has then only positive\n gradients\n ___/\n /\n ___/\n :param f: trace, stream or list of traces containing the data\n :returns: trace or list of cumulative output traces, first value = 0\n list of cumulative output traces\n \"\"\"\n # f_cumul.m:14\n bare_trace = False\n if isinstance(f, Trace):\n f = [f]\n bare_trace = True\n g = f.copy()\n for t in g:\n tdata = t.data.copy() # Backup copy for info\n t.data[np.isnan(t.data)] = 0\n t.differentiate(method='gradient')\n t.data[t.data < 0] = 0\n t.data = np.cumsum(t.data)\n t.data[np.isnan(tdata)] = np.nan\n\n p = g.copy()\n\n # Subtract first non-nan value from g\n for t in g:\n ind = np.nonzero(np.isfinite(t.data))[0]\n if len(ind) > 0:\n t.data -= t.data[ind[0]]\n\n if bare_trace:\n return g[0], p[0]\n else:\n return g, p\n\n\ndef _f_segment(f):\n \"\"\"\n Return a line segment between the first and last values of function.\n SHOULD BE REPLACEABLE BY DETREND('linear')\n\n Goes from first non-nan value to last non-nan value\n Input and output have the same size.\n :param f: trace, stream or list of data traces\n :returns: line segment trace or list of traces\n \"\"\"\n bare_trace = False\n if isinstance(f, Trace):\n f = [f]\n bare_trace = True\n assert isinstance(f[0], Trace), f'f is list of {type(f[0])}s!'\n # f_segment.m:12\n segments = []\n # for i in arange(1,n).reshape(-1):\n for trace in f:\n # print(type(trace), trace)\n # clear('a','b','ya','yb','lin')\n a = np.nonzero(np.isfinite(trace.data))[0][0]\n b = np.nonzero(np.isfinite(trace.data))[0][-1]\n ya = trace.data[a]\n yb = trace.data[b]\n lin = np.linspace(start=ya, stop=yb, num=b-a+1)\n segment = trace.copy()\n # Ugly handling of different cases\n before = np.empty(a)\n before[:] = np.nan\n after = np.empty(len(trace.data)-b-1)\n after[:] = np.nan\n segment.data = np.concatenate((before, lin, after))\n segments.append(segment)\n if bare_trace:\n return segments[0]\n else:\n return segments\n\n\ndef _cum2grad(f_in, normalize=False, debug=False):\n \"\"\"\n Transform cumulative into function that is shifted below 0 for all maxima\n\n Finds peaks, subtracts the next peak value from all data, gets rid of\n all values > 0. Give negative values that \"jump\" up to zero at the next\n peak\n\n :param f_in: cumulative trace\n :param normalize: 'True' will divide the output by it's min value\n :returns: f_out\n \"\"\"\n # cum2grad.m:7\n assert not len(f_in) == 0, 'f_in is empty!'\n\n tycalpha = np.zeros(len(f_in.data))\n tikxs, _ = _loca_ext(f_in, None, None, 'maxi')\n a = int(np.nonzero(np.isfinite(f_in.data))[0][0])\n b = int(np.nonzero(np.isfinite(f_in.data))[0][-1])\n # tikxs = tikxs.to_list()\n if len(tikxs) == 0:\n tikxs = np.array([a, b])\n else:\n tikxs = np.array([a] + tikxs.tolist() + [b])\n\n tikys = f_in.data[tikxs]\n # function equal to the next peak\n for ilo, ihi, y in zip(tikxs[:-2], tikxs[1:], tikys[1:]):\n tycalpha[ilo:ihi] = y\n # tycalpha[np.arange(tikx(j), tikx(j + 1))] = tiky(j + 1)\n\n f_out = f_in.copy()\n f_out.data -= tycalpha # input minus the next peak value\n f_out.data[f_out.data > 0] = 0 # Get rid of everything above zero\n if normalize:\n f_out.data /= abs(min(f_out.data))\n if debug:\n Stream([f_in, f_out]).plot()\n return f_out\n\n\ndef _loca_ext(trace, starttime, endtime, type_, debug=False):\n \"\"\"\n Returns local extrema\n\n Actually just returns where the trace slope changes from positive to\n negative (type_=='maxi'), or vice versa (type='mini')\n :param trace: waveform trace\n :param start_time: start of window to look at\n :param end_time: end of window to look at\n :param type_: 'maxi' or 'mini'\n :returns: indice, trace_value, diff_value\n \"\"\"\n diff = trace.copy()\n diff.data = np.diff(np.sign(np.diff(trace.data)))\n diff = diff.slice(starttime, endtime)\n if debug:\n diff.plot()\n if type_ == 'maxi':\n loc = (diff.data < 0)\n else:\n loc = (diff.data > 0)\n\n i_extremes = np.nonzero(loc)[0] + 1\n return i_extremes, trace.data[i_extremes]\n\n\ndef _mean_trace(traces):\n \"\"\"\n Calculate the mean trace\n\n :param traces: stream or list of traces\n :returns: trace object\n \"\"\"\n if isinstance(traces, Trace):\n return traces.copy()\n if isinstance(traces[0], Trace):\n if len(traces) == 1:\n return traces[0].copy()\n else:\n assert isinstance(traces[0][0], Trace),\\\n \"traces not a Trace, list of Traces, or list of lists of Traces\"\n traces = [t for x in traces for t in x]\n\n data_len = len(traces[0].data)\n for tr in traces[1:]:\n assert len(tr.data) == data_len, 'traces are not the same length'\n\n mean_tr = traces[0].copy()\n for tr in traces[1:]:\n mean_tr.data += tr.data\n mean_tr.data /= len(traces)\n return mean_tr\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"build/lib/ps_picker/kurtosis.py","file_name":"kurtosis.py","file_ext":"py","file_size_in_byte":19446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"446850005","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def minDiffInBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n v = []\n def travel(tree):\n if not tree:\n return\n v.append(tree.val)\n travel(tree.left)\n travel(tree.right)\n travel(root)\n res = 1000000\n v.sort()\n for i in range(1, len(v)):\n if v[i] - v[i-1] < res:\n res = v[i] - v[i-1]\n if res == 1:\n break\n return res","sub_path":"181231/Minimum_Distance_Between_BST_Nodes.py","file_name":"Minimum_Distance_Between_BST_Nodes.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"338666598","text":"from django.test import LiveServerTestCase\ntry:\n from django.urls import reverse\nexcept:\n from django.core.urlresolvers import reverse\n\n\nclass TestMainUsage(LiveServerTestCase):\n def setUp(self):\n from selenium import webdriver\n self.selenium = webdriver.Chrome()\n self.selenium.implicitly_wait(10)\n\n def test_pay(self):\n from selenium.common.exceptions import NoSuchElementException\n self.selenium.get(self.live_server_url + reverse('home') + '?order_id=1')\n self.assertRaises(NoSuchElementException, self.selenium.find_element_by_css_selector, '.errorlist') # noqa\n\n self.selenium.find_element_by_css_selector('[type=submit]').click()\n self.assertEquals(self.selenium.current_url, 'https://secure.futubank.com/testing-pay/') # noqa\n self.assertEquals(self.selenium.title, '[ТЕСТ] Оплата покупки')\n\n def tearDown(self):\n self.selenium.quit()\n\n\n","sub_path":"example/app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"310719356","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n#author: tp7309\n#usage:\n#python showjar.py *.apk/*.aar/*.dex/*.jar\n\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport zipfile\n\ndex2jar=\"dex2jar-2.1-SNAPSHOT\\d2j-dex2jar.bat\"\nif not os.name == 'nt': dex2jar = \"./dex2jar-2.1-SNAPSHOT/d2j-dex2jar.sh\"\njdgui=\"jd-gui-1.4.0.jar\"\napktool=\"apktool_2.3.0.jar\"\nneedUnzipFiles=['patch.jar']\nneedDecompileResources=0\n\n#unpack\n#java -jar apktool_2.2.4.jar d test.apk\n#repack\n#java -jar apktool_2.2.4.jar b test\n\n\ndef sh(command):\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n print(p.stdout.read())\n\n\nif __name__==\"__main__\":\n f = sys.argv[1]\n # fix windows path\n if \":\\\\\" in f and not \":\\\\\\\\\" in f:\n f = f.replace(\"\\\\\", \"\\\\\\\\\")\n dexes = []\n jars = []\n if f.endswith(\".apk\") or f in needUnzipFiles:\n print(\"unzip %s...\"%(f))\n tempDir = os.path.splitext(f)[0]\n with zipfile.ZipFile(f, 'r') as zip:\n zip.extractall(tempDir)\n dexes = [f for f in os.listdir(tempDir) if f.endswith('.dex')]\n print(\"founded dexes: \" + ', '.join(dexes))\n for file in os.listdir(tempDir):\n if file.endswith('-dex2jar.jar') and os.path.exists(file):\n os.remove(file)\n for dex in dexes:\n sh(\"%s -f %s\"%(dex2jar, os.path.join(tempDir, dex)))\n jars.append(os.path.splitext(dex)[0] + \"-dex2jar.jar\")\n shutil.rmtree(tempDir)\n elif f.endswith(\".aar\"):\n print(\"unzip %s...\"%(f))\n tempDir = os.path.splitext(f)[0]\n with zipfile.ZipFile(f, 'r') as zip:\n \tzip.extractall(tempDir)\n dstPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"classes.jar\")\n shutil.move(os.path.join(tempDir, \"classes.jar\"), dstPath)\n shutil.rmtree(tempDir)\n jars.append(\"classes.jar\")\t\n elif f.endswith(\".dex\"):\n sh(\"%s -f %s\"%(dex2jar, f))\n jars.append(os.path.splitext(f)[0] + \"-dex2jar.jar\")\n elif f.endswith(\".jar\"):\n jars.append(f)\n else:\n print(\"error file extension!\")\n exit\n\n if needDecompileResources and f.endswith(\".apk\") or f in needUnzipFiles:\n print(\"decompile resources...\")\n sh(\"java -jar %s d %s\"%(apktool, f))\n print(\"decompile resources done\")\n \n sh(\"java -jar %s %s\"%(jdgui, ' '.join(jars) if len(jars)> 0 else jars[0]))\n print(\"Done\")\n","sub_path":"showjar.py","file_name":"showjar.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197161228","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'teacher_tool.views.index', name='index'),\n url(r'^my_cources/$', 'teacher_tool.views.my_cources_view', name='my_cources_list'),\n url(r'^groups/$', 'teacher_tool.views.groups_view', name='groups_list'),\n\n\n # url(r'^teacher_tool/', include('teacher_tool.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"teacher_tool/teacher_tool/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572862234","text":"# Default Imports\n\nimport pandas as pd\nimport numpy as np\n\nfrom greyatomlib.logistic_regression_project.q02_data_cleaning_all.build import data_cleaning\nfrom greyatomlib.logistic_regression_project.q01_outlier_removal.build import outlier_removal\n\nloan_data = pd.read_csv('data/loan_prediction_uncleaned.csv')\nloan_data = loan_data.drop('Loan_ID', 1)\nloan_data = outlier_removal(loan_data)\nX, y, X_train, X_test, y_train, y_test = data_cleaning(loan_data)\n\n\ndef data_cleaning_2(X_train,X_test,y_train,y_test) :\n\n for i in X_train.select_dtypes(include=[np.number]).columns.tolist() :\n np.sqrt(X_train[i])\n\n for i in X_test.select_dtypes(include=[np.number]).columns.tolist() :\n np.sqrt(X_test[i])\n\n X_test = pd.get_dummies(X_test, drop_first = True )\n #print(X_test.shape)\n\n X_train = pd.get_dummies(X_train, drop_first = True )\n\n #X_train.drop(mylist, axis=1, inplace = True)\n #print(X_train.shape)\n #print(list(X_test['Dependents_1'].value_counts()))\n\n return X_train, X_test, y_train, y_test\n\n# Write your solution here :\n","sub_path":"q02_data_cleaning_all_2/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"261670648","text":"import os\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, RadioField\nfrom flask_migrate import Migrate\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'mysecretkey'\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir,'data.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nMigrate(app,db)\n\ndef decode_resp(s):\n pairs_with_equalto = s.split('&')\n resp = {}\n for item in pairs_with_equalto:\n key, value = item.split('=')\n resp[key] = value \n return resp\ndef day_in_month(month,year):\n month = int(month)\n year = int(year)\n if ((year%100!=0 and year%4==0) or (year%400==0)) and month == 2:\n return 29\n elif month==2:\n return 28\n elif month in [1,3,5,7,8,10,12]:\n return 31\n else:\n return 30\n\ndef blank_divs(year,month):\n return ['' for i in range(int(datetime(int(year),int(month),1).strftime(\"%w\")))]\n\ndef month_name(month):\n return datetime(2020,int(month),1).strftime(\"%B\")\n\nclass GoTo(FlaskForm):\n year = StringField('Year')\n month = StringField('Month')\n submit = SubmitField('GoTo')\n\nclass Task(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n year = db.Column(db.String)\n month = db.Column(db.String)\n day = db.Column(db.String)\n task = db.Column(db.Integer,default=0)\n def __init__(self,year,month,day,task):\n self.year = year\n self.month = month\n self.day = day\n self.task = task\n\n\n\n\n\n@app.route('/', methods=['GET','POST'])\ndef index():\n datalist = []\n year = str(datetime.now().strftime(\"%Y\"))\n month = str(datetime.now().strftime(\"%m\"))\n x_home = str(year) + str(int(month))\n days = day_in_month(month,year) \n form = GoTo()\n if request.method=='POST':\n try:\n # Insert new data\n data = decode_resp(request.data.decode('ascii'))\n db.session.add(Task(data['year'],data['month'],data['day'], 1))\n db.session.commit()\n except:\n # goto another month\n if form.validate_on_submit():\n year = form.year.data \n month = form.month.data \n \n \n for day in range(1,days+1):\n x = Task.query.filter_by(day=day,month=str(int(month)),year=(year)).first()\n \n if x:\n datalist.append([day,x.task])\n \n else:\n \n datalist.append([day,0])\n \n y_home = str(year) + str(int(month))\n return render_template('calender.html', year=year, month=int(month), homebutton=(x_home==y_home) ,fullNameOfMonth=month_name(month),datalist=datalist, form=form, blank_divs=blank_divs(year,month))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"61636452","text":"import re\nimport os\nimport sys\nimport logging\nimport functools\nfrom tests.common.devices.base import AnsibleHostBase\nfrom ansible.utils.unsafe_proxy import AnsibleUnsafeText\n\n# If the version of the Python interpreter is greater or equal to 3, set the unicode variable to the str class.\nif sys.version_info[0] >= 3:\n unicode = str\n\nlogger = logging.getLogger(__name__)\n\nSAMPLE_COMMAND_DATA = '''\nRP/0/RP0/CPU0:vlab-01#show operational LLDP NodeTable Node/NodeName/Rack=0;\nSlot=0;Instance=CPU0 Neighbors DeviceTable Device/DeviceID=vlab-02/Interf$\nWed Aug 10 08:45:43.126 UTC\n......\n \"Operational\": {\n \"LLDP\": {\n \"@MajorVersion\": \"1\",\n \"@MinorVersion\": \"2\",\n \"NodeTable\": {\n \"Node\": {\n \"Naming\": {\n \"NodeName\": {\n \"Rack\": \"0\",\n \"Slot\": \"0\",\n \"Instance\": \"CPU0\"\n }\n },\n \"Neighbors\": {\n \"DeviceTable\": {\n \"Device\": {\n \"Naming\": {\n \"DeviceID\": \"vlab-02\",\n \"InterfaceName\": \"GigabitEthernet0/0/0/1\"\n },\n \"Entry\": {\n \"ReceivingInterfaceName\": \"GigabitEthernet0/0/0/1\",\n \"ReceivingParentInterfaceName\": \"Bundle-Ether1\",\n \"DeviceID\": \"vlab-02\",\n \"ChassisID\": \"5254.0085.5c1c\",\n \"PortIDDetail\": \"fortyGigE0/4\",\n \"HeaderVersion\": \"0\",\n \"HoldTime\": \"120\",\n \"EnabledCapabilities\": \"B,R\",\n \"Detail\": {\n......\n'''\n\n\ndef adapt_interface_name(func):\n \"\"\"Decorator to adapt interface name used in topology to cisco interface name.\"\"\"\n @functools.wraps(func)\n def _decorated(self, *args):\n args_list = list(args)\n new_list = []\n for item in args_list:\n new_item = item\n if isinstance(new_item, str) or isinstance(new_item, unicode) or isinstance(new_item, AnsibleUnsafeText):\n if 'Ethernet' in new_item and 'GigabitEthernet' not in new_item:\n new_item = re.sub(r'(^|\\s)Ethernet', 'GigabitEthernet0/0/0/', new_item)\n elif 'Port-Channel' in new_item:\n new_item = re.sub(r'(^|\\s)Port-Channel', 'Bundle-Ether', new_item)\n new_list.append(new_item)\n new_args = tuple(new_list)\n return func(self, *new_args)\n return _decorated\n\n\nclass CiscoHost(AnsibleHostBase):\n \"\"\"\n @summary: Class for Cisco host\n \"\"\"\n def __init__(self, ansible_adhoc, hostname, ansible_user, ansible_passwd):\n '''Initialize an object for interacting with cisco device using ansible modules\n Args:\n ansible_adhoc (): The pytest-ansible fixture\n hostname (string): hostname of the cisco device\n ansible_user (string): Username for accessing the cisco CLI interface\n ansible_passwd (string): Password for the ansible_user\n '''\n self.ansible_user = ansible_user\n self.ansible_passwd = ansible_passwd\n AnsibleHostBase.__init__(self, ansible_adhoc, hostname)\n # Reserved for execute ansible commands in local device\n self.localhost = ansible_adhoc(inventory='localhost', connection='local', host_pattern=\"localhost\")[\"localhost\"]\n\n def __getattr__(self, module_name):\n if module_name.startswith('iosxr_'):\n evars = {\n 'ansible_connection': 'network_cli',\n 'ansible_network_os': module_name.split('_', 1)[0],\n 'ansible_user': self.ansible_user,\n 'ansible_password': self.ansible_passwd,\n 'ansible_ssh_user': self.ansible_user,\n 'ansible_ssh_pass': self.ansible_passwd,\n }\n else:\n raise Exception(\"Does not have module: {}\".format(module_name))\n self.host.options['variable_manager'].extra_vars.update(evars)\n return super(CiscoHost, self).__getattr__(module_name)\n\n def __str__(self):\n return ''.format(self.hostname)\n\n def __repr__(self):\n return self.__str__()\n\n def commands(self, *args, **kwargs):\n return self.iosxr_command(*args, **kwargs)\n\n def config(self, *args, **kwargs):\n return self.iosxr_config(*args, **kwargs)\n\n @adapt_interface_name\n def shutdown(self, interface_name=None):\n out = self.config(\n lines=['shutdown'],\n parents=['interface {}'.format(interface_name)])\n logging.info('Shut interface [%s]' % interface_name)\n return out\n\n def shutdown_multiple(self, interfaces):\n intf_str = ','.join(interfaces)\n return self.shutdown(interface_name=intf_str)\n\n @adapt_interface_name\n def no_shutdown(self, interface_name):\n out = self.config(\n lines=['no shutdown'],\n parents=['interface {}'.format(interface_name)])\n logging.info('No shut interface [%s]' % interface_name)\n return out\n\n def no_shutdown_multiple(self, interfaces):\n intf_str = ','.join(interfaces)\n return self.no_shutdown(intf_str)\n\n @adapt_interface_name\n def rm_member_from_channel_grp(self, interface_name, channel_group):\n out = self.config(\n lines=['no bundle id {} mode active'.format(channel_group)],\n parents=['interface {}'.format(interface_name)])\n logging.info('Rm interface {} from bundle-ethernet {}'.format(interface_name, channel_group))\n return out\n\n @adapt_interface_name\n def add_member_to_channel_grp(self, interface_name, channel_group):\n out = self.config(\n lines=['bundle id {} mode active'.format(channel_group)],\n parents=['interface {}'.format(interface_name)])\n logging.info('Add interface {} to bundle-ethernet {}'.format(interface_name, channel_group))\n return out\n\n @adapt_interface_name\n def check_intf_link_state(self, interface_name):\n show_int_result = self.commands(\n commands=['show interfaces %s' % interface_name])\n return 'line protocol is up' in show_int_result['stdout_lines'][0]\n\n @adapt_interface_name\n def set_interface_lacp_rate_mode(self, interface_name, mode):\n if mode == 'fast':\n command = 'lacp period short'\n else:\n command = 'no lacp period'\n\n out = self.config(\n lines=[command],\n parents='interface %s' % interface_name)\n return out\n\n def get_lldp_neighbor(self, local_iface=None, remote_device=None):\n try:\n if (local_iface is not None and remote_device is not None):\n command = 'show operational LLDP NodeTable ' \\\n 'Node/NodeName/Rack=0;Slot=0;Instance=CPU0 Neighbors DeviceTable ' \\\n 'Device/DeviceID={}/InterfaceName={} json'.format(local_iface, remote_device)\n else:\n command = 'show operational LLDP json'\n output = self.commands(\n commands=[command],\n module_ignore_errors=True)\n logger.debug('cisco lldp output: %s' % (output))\n return output['stdout_lines'][0]['Response']['Get']['Operational'] if output['failed'] is False else False\n except Exception as e:\n logger.error('command {} failed. exception: {}'.format(command, repr(e)))\n return False\n\n def config_key_chain(self, name, key):\n # create key chain\n output = self.config(\n lines=['key chain {} key 1'.format(name)])\n logger.debug('config key chain: %s' % (output))\n\n # configure key chain parameters\n output = self.config(\n lines=['accept-lifetime 00:00:00 december 01 2014 infinite',\n 'send-lifetime 00:00:00 december 01 2014 infinite',\n 'cryptographic-algorithm HMAC-MD5',\n 'key-string clear {}'.format(key)],\n parents=['key chain {} key 1'.format(name)])\n logger.debug('config key chain parameters: %s' % (output))\n\n def remove_key_chain(self, name):\n # remove key chain\n output = self.config(lines=['no key chain {}'.format(name)])\n logger.debug('remove key chain: %s' % (output))\n\n def isis_config_auth(self, key):\n key_chain_name = 'ISIS'\n self.config_key_chain(key_chain_name, key)\n\n # configure key chain to isis\n output = self.config(\n lines=['lsp-password keychain {} level 2'.format(key_chain_name),\n 'interface Bundle-Ether1 hello-password keychain {}'.format(key_chain_name)],\n parents=['router isis test'])\n logger.debug('config key chain to isis: %s' % (output))\n\n def isis_remove_auth(self, key):\n key_chain_name = 'ISIS'\n # remove key chain from isis\n output = self.config(\n lines=['no lsp-password keychain {} level 2'.format(key_chain_name),\n 'no interface Bundle-Ether1 hello-password keychain {}'.format(key_chain_name)],\n parents=['router isis test'])\n logger.debug('remove key chain from isis: %s' % (output))\n\n self.remove_key_chain(key_chain_name)\n\n def ping_dest(self, dest):\n try:\n command = 'ping {} count 5'.format(dest)\n output = self.commands(commands=[command])\n logger.debug('ping result: %s' % (output))\n return re.search('!!!!!', output['stdout'][0]) is not None if output['failed'] is False else False\n except Exception as e:\n logger.error('command {} failed. exception: {}'.format(command, repr(e)))\n return False\n\n def show_command_to_json(self, command, lookup_key=None, lookup_val=None):\n \"\"\"\n This function will pull the show operational command output as json string and convert it json object and return\n \"\"\"\n try:\n json_command = command + \" json\"\n output = self.commands(commands=[json_command])\n if all([lookup_key, lookup_val]):\n return self.extract_key_val_pair_from_json(output['stdout_lines'], lookup_key)\n elif lookup_key is not None and lookup_val is None:\n return self.extract_val_from_json(output['stdout_lines'], lookup_key)\n else:\n return output['stdout_lines']\n except Exception as e:\n return {\"error\": e}\n\n def extract_key_val_pair_from_json(self, data, lookup_key):\n \"\"\"\n Function to recursivly match provided key in all levels and return list of same level data\n \"\"\"\n result = []\n\n def help(data, lookup_key, result):\n if isinstance(data, dict):\n for k, v in data.items():\n if k == lookup_key:\n result.append(data)\n elif isinstance(v, (list, dict)):\n sub_result = help(v, lookup_key, result)\n if sub_result:\n result.append(sub_result)\n elif isinstance(data, list):\n for ele in data:\n if isinstance(ele, (list, dict)):\n sub_result = help(ele, lookup_key, result)\n if sub_result:\n result.append(sub_result)\n help(data, lookup_key, result)\n return result\n\n def extract_val_from_json(self, json_data, lookup_key):\n \"\"\"\n Function to recursivly match provided key in all levels and return matched key's value into a list\n \"\"\"\n result = []\n\n def help(data, lookup_key, result):\n if isinstance(data, dict):\n for k, v in data.items():\n if k == lookup_key:\n result.append(v)\n elif isinstance(v, (list, dict)):\n sub_result = help(v, lookup_key, result)\n if sub_result:\n result.append(sub_result)\n elif isinstance(data, list):\n for ele in data:\n if isinstance(ele, (list, dict)):\n sub_result = help(ele, lookup_key, result)\n if sub_result:\n result.append(sub_result)\n help(json_data, lookup_key, result)\n return result\n\n def _has_cli_cmd_failed(self, cmd_output_obj):\n err_out = False\n if 'stdout' in cmd_output_obj:\n stdout = cmd_output_obj['stdout']\n msg = stdout[-1] if type(stdout) == list else stdout\n err_out = 'Cannot advertise' in msg\n\n return ('failed' in cmd_output_obj and cmd_output_obj['failed']) or err_out\n\n def load_configuration(self, config_file, backup_file=None):\n if backup_file is None:\n out = self.config(\n src=config_file,\n replace='config',\n )\n else:\n out = self.config(\n src=config_file,\n replace='line',\n backup='yes',\n backup_options={\n 'filename': os.path.basename(backup_file),\n 'dir_path': os.path.dirname(backup_file),\n }\n )\n return not self._has_cli_cmd_failed(out)\n\n @adapt_interface_name\n def get_portchannel_by_member(self, member_intf):\n try:\n command = 'show lacp {}'.format(member_intf)\n output = self.commands(commands=[command])['stdout'][0]\n regex_pc = re.compile(r'Bundle-Ether([0-9]+)', re.U)\n for line in [item.strip().rstrip() for item in output.splitlines()]:\n if regex_pc.match(line):\n return re.sub('Bundle-Ether', 'Port-Channel', line)\n except Exception as e:\n logger.error('Failed to get PortChannel for member interface \"{}\", exception: {}'.format(\n member_intf, repr(e)\n ))\n return None\n\n @adapt_interface_name\n def no_isis_interface(self, isis_instance, interface):\n out = self.config(\n lines=['no interface {}'.format(interface)],\n parents=['router isis {}'.format(isis_instance)])\n return not self._has_cli_cmd_failed(out)\n","sub_path":"tests/common/devices/cisco.py","file_name":"cisco.py","file_ext":"py","file_size_in_byte":14492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227907602","text":"# -*- coding: utf-8 -*-\nimport time\nimport codecs\nimport math\nf_vectors = codecs.open('text_output_standard.txt', 'r', 'utf8')\nf_results = open('min_resWordsWord2vec.txt', 'w')\nf_group = codecs.open('phrases(200,250).txt', 'r', 'utf8')\ngroup = f_group.read().split()\ndict = {}\nfor i in range(len(group)//4):\n dict[group[4 * i + 1] + '_' + group[4 * i + 2]] = 0\n dict[group[4 * i + 1]] = 0\n dict[group[4 * i + 2]] = 0\nword_vector = f_vectors.readline().split()\nstart = time.time()\nwhile len(word_vector) != 0:\n if word_vector[0] in dict:\n dict[word_vector[0]] = word_vector[1:]\n word_vector = f_vectors.readline().split()\nprint (\"--- %s seconds ---\" % (time.time() - start))\nresults = {}\nfor i in range(len(group)//4):\n sq1, sq2, sq3 = 0.0, 0.0, 0.0\n #vector1 = dict[group[4 * i + 1] + '_' + group[i * 4 + 2]]\n vector2 = dict[group[4 * i + 1]]\n vector3 = dict[group[4 * i + 2]]\n if vector2 == 0 or vector3 == 0:\n results[group[4 * i] + ' ' + group[4 * i + 1] + ' ' + group[i * 4 + 2]] = 0.0\n continue\n for j in range(len(vector2)):\n #sq1 += float(vector1[j]) ** 2\n sq2 += float(vector2[j]) ** 2\n sq3 += float(vector3[j]) ** 2\n #sq1 = math.sqrt(sq1)\n sq2, sq3 = math.sqrt(sq2), math.sqrt(sq3)\n sum = 0.0\n sum_vector= 0.0\n for j in range(len(vector2)):\n sum += (float(vector2[j]) / sq2 + float(vector3[j]) / sq3) ** 2\n #sum_vector += float(vector1[j]) * (float(vector2[j]) / sq2 + float(vector3[j]) / sq3)\n sum_vector += float(vector2[j]) * float(vector3[j])\n #f_results.write(words_group[3 * i] + u' : ' + str(sum_vector/ (sq1 * math.sqrt(sum))) + '\\n')\n results[group[4 * i] + ' ' + group[4 * i + 1] + ' ' + group[i * 4 + 2]] = math.fabs(sum_vector/ (math.sqrt(sum)))\nfor i in sorted(results, key=results.__getitem__, reverse=False):\n f_results.write(str(i) + ' ' + str(results[i]) + '\\n')\nf_vectors.close()\nf_results.close()\nf_group.close()","sub_path":"src/2_step.py","file_name":"2_step.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"317682374","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef dl_img(user, current, limit):\n images = []\n while current <= limit:\n url = 'http://%s.tumblr.com/mobile/page/%d' % (user, current)\n source_code = requests.get(url)\n raw_text = source_code.text\n soup = BeautifulSoup(raw_text)\n for image in soup.find_all('img'):\n src = image.get('src')\n images.append(src)\n if not os.path.exists(user):\n os.makedirs(user)\n for image in images:\n dl = requests.get(image)\n content = dl.content\n fo = open('%s/%s' % (user, image[-34:]), 'wb')\n fo.write(content)\n fo.close()\n current += 1\ndef dl_txt(user, current, limit):\n text = []\n while current <= limit:\n url = 'http://%s.tumblr.com/mobile/page/%d' % (user, current)\n source_code = requests.get(url)\n raw_text = source_code.text\n soup = BeautifulSoup(raw_text)\n if not os.path.exists(user):\n os.makedirs(user)\n content = soup.get_text()\n fo = open('%s/%d.txt' % (user, current), 'w', encoding = 'utf8')\n fo.write(content)\n fo.close()\n current += 1\ndef num_pages(user):\n num = 0\n page = 1\n while True:\n url = 'http://%s.tumblr.com/mobile/page/%d' % (user, page)\n source_code = requests.get(url)\n raw_text = source_code.text\n soup = BeautifulSoup(raw_text)\n if len(soup.find_all('img')) > 0:\n num += 1\n else:\n break\n page += 1\n return num\n \n \n","sub_path":"crawlblr.py","file_name":"crawlblr.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620388897","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport pandas as pd\nimport matplotlib\nimport getpass\nimport uuid\nimport argparse\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.colors as clr\nimport numpy as np\nfrom matplotlib import cm\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\nimport scipy\nimport glob\nimport datetime\n\n\"\"\"Plot heatmap given data frame\n\n\n\"\"\"\ncurrent_file_base_name = __file__.split(\"/\")[-1].split(\".\")[0]\n\ndef my_args():\n\tusername = getpass.getuser()\n\taddon_string = str(uuid.uuid4()).split(\"-\")[-1]\n\tmainParser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,description=\"plot heatmap given dataframe.\")\n\tmainParser.add_argument('-f',\"--input\", help=\"data table input\",required=True)\n\tmainParser.add_argument(\"--sort_by\", help=\"usually this column should be logFC\",default=None)\n\tmainParser.add_argument('-c1',\"--value_cutoff_max\", help=\"subset genes by filtering out values above the cutoff, usually it should be p-value, input should be: column_name,value\",default=None)\n\tmainParser.add_argument('-c2',\"--value_cutoff_min\", help=\"subset genes by filtering out values below the cutoff, usually it should be log p-value, input should be: column_name,value\",default=None)\n\tmainParser.add_argument(\"--index_by\", help=\"if the first column is not the correct index, which one?\",default=None)\n\tmainParser.add_argument(\"--remove_cols\",default=\"\")\n\tmainParser.add_argument('-o',\"--output\", help=\"output table name\",default=\"gene_exp_heatmap.\"+username+\"_\"+str(datetime.date.today()))\n\tmainParser.add_argument(\"-pdf\", help=\"plot pdf instead of png, can be slower for large dataset\", action='store_true')\n\tmainParser.add_argument(\"-W\", \"--width\", help=\"Figure width, maximal use 200, usually 8 to 20\",type=int,required=True)\n\tmainParser.add_argument(\"-H\", \"--height\", help=\"Figure height, maximal use 200, usually 10 to 50\",type=int,required=True)\n\tmainParser.add_argument(\"--fontsize\", help=\"you can choose from 8 to 20 \",type=int,default=10)\n\tmainParser.add_argument(\"--linewidths\", help=\"you can choose from 0, 0.1 \",type=float,default=0.1)\n\tmainParser.add_argument(\"--log2_transform\", help=\"input values will be log2 transformed\", action='store_true')\n\tmainParser.add_argument(\"--show_name\", help=\"by default, >100 genes, name will not be shown, this option enforce to show name\", action='store_true')\n\t\n\t\n\n\t##------- add parameters above ---------------------\n\targs = mainParser.parse_args()\t\n\treturn args\ndef guess_sep(x):\n\twith open(x) as f:\n\t\tfor line in f:\n\t\t\ttmp1 = len(line.strip().split(\",\"))\n\t\t\ttmp2 = len(line.strip().split(\"\\t\"))\n\t\t\t# print (tmp1,tmp2)\n\t\t\tif tmp1 > tmp2:\n\t\t\t\treturn \",\"\n\t\t\tif tmp2 > tmp1: \n\t\t\t\treturn \"\\t\"\n\t\t\telse:\n\t\t\t\tprint (\"Can't determine the separator. Please input manually\")\n\t\t\t\texit()\n\t\t\t\t\n\ndef plot_n_save(df,args):\n\tdf_z = df.T\n\tfor c in df_z:\n\t\tdf_z[c] = (df_z[c] - df_z[c].mean())/df_z[c].std()\n\tplt.figure(figsize=(args.width,args.height))\n\tif df_z.T.shape[0]<=100 or args.show_name:\n\t\tg=sns.heatmap(df_z.T,vmin=-2,vmax=2,cmap=\"RdBu_r\",linewidths=args.linewidths,yticklabels=df_z.columns.tolist())\n\t\tg.set_yticklabels(g.get_ymajorticklabels(), fontsize = args.fontsize)\n\telse:\n\t\tg=sns.heatmap(df_z.T,vmin=-2,vmax=2,cmap=\"RdBu_r\",yticklabels=\"\")\n\tif args.pdf:\n\t\tplt.savefig(\"%s_heatmap.pdf\"%(args.output), bbox_inches='tight')\n\telse:\n\t\tplt.savefig(\"%s_heatmap.png\"%(args.output), bbox_inches='tight')\n\ndef main():\n\n\targs = my_args()\n\t\"\"\"below is the same for very dataframe scripts\n\t\n\tby default our input dataframe is bed format, which is \\t separated with no header no index \n\t\n\t\"\"\"\n\n\tdf = pd.read_csv(args.input,sep=guess_sep(args.input),index_col=0)\n\tif args.value_cutoff_max:\n\t\tname,value = args.value_cutoff_max.split(\",\")\n\t\tvalue = float(value)\n\t\tdf = df[df[name]<=value]\n\t\tdf = df.drop([name],axis=1)\n\tif args.value_cutoff_min:\n\t\tname,value = args.value_cutoff_min.split(\",\")\n\t\tvalue = float(value)\n\t\tdf = df[df[name]>=value]\n\t\tdf = df.drop([name],axis=1)\n\tif args.index_by:\n\t\tdf = df.set_index(args.index_by)\n\tif args.args.sort_by:\n\t\tdf = df.sort_values(args.sort_by)\n\t\tdf = df.drop([args.sort_by],axis=1)\n\t#-------------- pre-processing ----------------------\n\tremove_cols = str(args.remove_cols).split(\",\")\n\ttry:\n\t\tremove_cols.remove(\"\")\n\texcept:\n\t\tpass\n\tif len(remove_cols)>0:\n\t\tdf = df.drop(remove_cols,axis=1)\n\tif args.log2_transform:\n\t\tdf = df.transform(lambda x:np.log2(x+1))\n\tprint (\"ploting size:\",df.shape)\n\tplot_n_save(df,args)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n\n\n","sub_path":"bin/plot_gene_exp_heatmap.py","file_name":"plot_gene_exp_heatmap.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"401636093","text":"n = int(input())\na = list(map(int, input().split()))\ncash = 1000\nbill = 0\ntable = []\n\nfor i in range(n-1):\n if a[i] > a[i+1]:\n table.append(False)\n elif a[i] == a[i+1]:\n table.append(\"-\")\n else:\n table.append(True)\n# print(table)\nif table[0] == True:\n bill, cash = divmod(cash, a[0])\n# print(cash, bill)\ni = 0\nwhile i < n-1:\n if table[i] == True:\n while not table[i] == False:\n i += 1\n if i == n-1:\n break\n cash += bill * a[i]\n bill = 0\n # print(cash, bill)\n else:\n while not table[i] == True:\n i += 1\n if i == n-1:\n break\n bill, cash = divmod(cash, a[i])\n # print(cash, bill)\ncash += bill * a[-1]\nprint(cash)\n","sub_path":"ABC/M-SOLUTIONS2020/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537540820","text":"#! python\n\n\"\"\"this module defines functions for parsing c++ enums\"\"\"\n\nimport urllib.request\nimport re\nimport datetime\nfrom tooling.enum_converter import CxxEnum\n\n__author__ = \"Sebastiaan Saarloos\"\n__date__ = datetime.date(2019, 6, 3)\n__status__ = \"Developing\"\n__maintainer__ = \"Isha Geurtsen\"\n\n\ndef get_enum_strings(file_contents: str) -> list:\n \"\"\"Scrapes all the enum strings from a given string\"\"\"\n # Remove all comments from the file.\n file_contents = re.sub(r\"//.*\\n\", \"\", file_contents)\n # Remove redundant spaces\n file_contents = re.sub(r\"\\s+\", \" \", file_contents)\n # enum class regular expression\n pattern = re.compile(\"enum class\\\\s*\\\\w+\\\\s*\\\\:\\\\s*\\\\w+\\\\s*\\\\{[^\\\\{\\\\}]+\\\\};\")\n # Iterates over all the matches, and will yield returned\n for item in pattern.finditer(file_contents):\n # Yield, means: give back a single item, go further a new item is requested\n yield item.group(0).replace(\"\\n\", \"\")\n\n\ndef get_enum_definition(enum_string: str) -> CxxEnum:\n \"\"\"Converts a enum definition represented as a string to a CXX enum object\"\"\"\n # Get the name of the enum\n name = re.findall(r\"(?<=enum class )\\w+\", enum_string)[0]\n # Get the defined type of the enum\n inner_type = re.findall(r\":\\s*(\\w+)\", enum_string)[0]\n # Get all the items of that are defined inside the enum class\n inner_items = re.findall(\n r\"\\{([^{}]+)\\}\", enum_string)[0].replace(\" \", \"\").split(\",\")\n return CxxEnum(name, inner_type, dict(zip(inner_items, [None]*len(inner_items))))\n\n\ndef get_github_file(repository: str, branch: str, file: str):\n \"\"\"Helper function to get a github file from a given repository\"\"\"\n url = (\n \"https://raw.githubusercontent.com/R2D2-2019/\" +\n f\"{repository}/{branch}/{file}\")\n return (\n urllib.request.urlopen(url)\n .read()\n .decode(\"utf-8\")\n )\n\n\ndef get_enum_definitions() -> list:\n \"\"\"collect enums defined in the frame_enums.hpp from the external communications file\"\"\"\n # Get the file from GitHub\n file_content = get_github_file(\n \"internal_communication\",\n \"master\",\n \"code/headers/frame_enums.hpp\"\n )\n # Seperate file in seperate enum strings\n enum_strings = get_enum_strings(file_content)\n # Loop over all enum strings\n for enum_string in enum_strings:\n yield get_enum_definition(enum_string)\n\n\nif __name__ == \"__main__\":\n for definition in get_enum_definitions():\n print(definition)\n","sub_path":"tooling/enum_parser.py","file_name":"enum_parser.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"473860626","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 27 11:25:46 2021\r\n\r\n@author: ChrisLe\r\n\"\"\"\r\n\r\nimport sys\r\nfrom PyQt5.QtWidgets import QMainWindow, QAction, qApp, QApplication\r\nfrom PyQt5.QtGui import QIcon\r\n\r\nclass Example(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n \r\n def initUI(self):\r\n exitAct = QAction (QIcon ('exit.png'), '&Salir', self)\r\n exitAct.setShortcut('Ctrl+Q')\r\n exitAct.setStatusTip('Salir de la Aplicación')\r\n exitAct.triggered.connect(qApp.quit)\r\n \r\n self.statusBar()\r\n menubar = self.menuBar()\r\n fileMenu = menubar.addMenu('&File')\r\n fileMenu.addAction(exitAct)\r\n \r\n self.setGeometry(400, 400, 350, 250)\r\n self.setWindowTitle('Menu Simple')\r\n self.show()\r\n \r\ndef main():\r\n app = QApplication(sys.argv)\r\n ex = Example()\r\n sys.exit(app.exec_())\r\n\r\nif __name__=='__main__':\r\n main()\r\n \r\n ","sub_path":"PyQt5Tutorial/menus-barrasHerramientas/ej8.py","file_name":"ej8.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479238212","text":"from RemoveDuplicatesfromSortedList import ListNode\nclass SortLi:\n def sortList(self, head: ListNode) -> ListNode:\n lis=[]\n while head:\n lis.append(head.val)\n head=head.next\n lis.sort()\n p=ListNode(0)\n s=p\n for i in range(len(lis)):\n p.next=ListNode(lis[i])\n p=p.next\n p.next=None\n return s.next\n","sub_path":"SolutionsPY/SortList.py","file_name":"SortList.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"22923430","text":"import random\n\ndef weighted_choice(weights):\n totals = []\n running_total = 0\n\n for w in weights:\n running_total += w\n totals.append(running_total)\n\n rnd = random.random() * running_total\n for i, total in enumerate(totals):\n if rnd < total:\n return i\n\nfor i in range(10):\n weights= [0.2,0.3,0.5]\n list = weighted_choice(weights)\n print (list)\n\n#zipcodef = open(\"zipcodes.txt\",\"r\")\n#for lines in zipcodef:\n# values = lines.split()\n# number = random.randrange(int(values[1]),int(values[2]))\n# print (values[0],number)\n","sub_path":"rand03.py","file_name":"rand03.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"495253167","text":"#!/usr/bin/python\n\n# (1) http://rosalind.info/problems/iprb/\n# (2) https://en.wikipedia.org/wiki/Punnett_square\n\n\ndef punnett_square(genotype_a, genotype_b):\n grid = []\n for i in genotype_a:\n for j in genotype_b:\n grid.append(i+j)\n\n total = 4\n dominant = sum(1 for l in grid if l.isupper())\n recessive = sum(1 for l in grid if l.islower())\n heterozygous = total-dominant-recessive\n return {\"not-recessive\": (dominant/4.0)+(heterozygous/4.0), \"recessive\": recessive/4.0}\n\n\ndef comb(elements):\n return (elements * (elements - 1))/2\n\n\ndef mendels_first_law(k, m, n):\n \"\"\"\n k individuals are homozygous dominant, m are heterozygous, and n are homozygous recessive.\n\n >>> mendels_first_law(2,2,2)\n '0.78333'\n \"\"\"\n ps = punnett_square \n a = ps(\"AA\", \"AA\")['not-recessive'] * comb(k)\n b = ps(\"AA\", \"Bb\")['not-recessive'] * (k*m)\n c = ps(\"AA\", \"cc\")['not-recessive'] * (k*n)\n d = ps(\"Bb\", \"Bb\")['not-recessive'] * comb(m)\n e = ps(\"Bb\", \"cc\")['not-recessive'] * (m*n)\n f = ps(\"cc\", \"cc\")['not-recessive'] * comb(n)\n return \"%.5f\" % ((a+b+c+d+e+f)/comb(k+m+n))\n\n\nif __name__ == \"__main__\":\n print(\"Running test case...\")\n import doctest\n doctest.testmod()","sub_path":"iprb/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643438328","text":"#!/usr/bin/python\n\n# Basic Ship Class\n\nclass ship:\n def __init__(self):\n # What is the name of the ship\n self.name = \"\"\n # How much Exp does this ship have\n self.exp = \"\"\n # What level is the ship\n self.level = \"\"\n # Max Level\n self.max_level = \"\"\n # What id number is the ship\n self.ident = \"\"\n # Rarity\n self.rarity = \"\"\n # How Many HP the ship has\n self.shield_points = \"\"\n # How much HP is recovered when heal gems are matched\n self.repair_rate = \"\"\n # Percentage of incoming damage reduced\n self.damage_reduction = \"\"\n # Type of damage that gets reduced\n self.damage_reduction_type = \"\"\n # How much damage the ship does when damage gems are matched\n self.attack_value = \"\"\n # How many turns between skill deployment\n self.speed = \"\"\n # Skill ability the ship possesses\n self.skill = \"\"\n # How much damage is increased or decreased via modifications\n self.weapon_mod = \"\"\n # How much armor is increased or decreased via modifications\n self.armor_mod = \"\"\n # How much shield is increased or decreased via modifications\n self.shield_mod = \"\"\n # How much skills speed is increased or decreased via modifications\n self.engine_mod = \"\"\n # What type of damage does this ship do\n self.damage_type = \"\"\n # How many weapon hardpoints does this ship have\n self.weapon_hardpoint = \"\"\n # How many armor hardpoints does this ship have\n self.armor_hardpoint = \"\"\n # How many shield hardpoints does this ship have\n self.shield_hardpoint = \"\"\n # How many engine hardpoints does this have\n self.engine_hardpoint = \"\"\n # What skill does this ship confer when assigned as wing leader\n self.wing_leader_skill = \"\"","sub_path":"Classes/ships.py","file_name":"ships.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"362415939","text":"\n# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\n\nresults = []\nurl_list = []\nresponse = requests.get('https://www.borenius.com/references/')\nsoup = BeautifulSoup(response.content, \"html.parser\")\nurl_base = 'https://www.borenius.com'\nall_anchors = soup.find_all('a', href=True)\n\nfor single_anchor in all_anchors:\n link = single_anchor['href']\n if '/' == link[:1] and '//' != link[:2]:\n link = url_base + link\n if link not in url_list:\n url_list.append(link)\n elif 'www.borenius.com' in link[:len(url_base)]:\n if link not in url_list:\n url_list.append(link)\n\nfor single_url in url_list:\n try:\n response = requests.get(single_url)\n soup = BeautifulSoup(response.content, \"html.parser\")\n page_title = soup.find_all('h1')[0].text\n page_content = soup.find_all(class_='col-sm-12 col-md-6 content-container')[0].text\n\n parsed_results = {}\n parsed_results['title'] = page_title\n parsed_results['content'] = page_content\n parsed_results['link'] = single_url\n results.append(parsed_results)\n except:\n print ('Crawling of URL ' + single_url + ' has failed')\n continue\n\nprint (json.dumps(results, indent=4))\nprint (len(results))\n\n","sub_path":"automatic_crawler_for_url_www_borenius_com.py","file_name":"automatic_crawler_for_url_www_borenius_com.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429764013","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/getml/hyperopt/load_hyperopt.py\n# Compiled at: 2020-03-16 07:21:38\n# Size of source mod 2**32: 10728 bytes\nimport json\nimport getml.communication as comm\nimport getml.models as models\nfrom .hyperopt import RandomSearch, LatinHypercubeSearch, GaussianHyperparameterSearch\n\ndef _decode_hyperopt(rawStr):\n \"\"\"A custom decoder function for\n :class:`~getml.hyperopt.RandomSearch`,\n :class:`~getml.hyperopt.LatinHypercubeSearch`, and\n :class:`~getml.hyperopt.GaussianHyperparameterSearch`.\n\n Args:\n rawStr (str): string containing a valid JSON message.\n\n Raises:\n KeyError: If not all required fields are present in `rawStr`\n to reconstruct a hyperparameter optimization search.\n ValueError: If not all keys in `rawStr` have a trailing\n underscore.\n TypeError: If `rawStr` is not of type :py:class:`dict`.\n\n Returns: \n Union[:class:`~getml.hyperopt.RandomSearch`,:class:`~getml.hyperopt.LatinHypercubeSearch`,:class:`~getml.hyperopt.GaussianHyperparameterSearch`]\n \"\"\"\n if type(rawStr) is not str:\n raise TypeError('_decode_hyperopt is expecting a str containing a valid JSON as input')\n else:\n rawDict = json.loads(rawStr)\n requiredFields = {\n 'name_', 'param_space_', 'session_name_',\n 'n_iter_', 'ratio_iter_',\n 'optimization_algorithm_',\n 'optimization_burn_in_algorithm_',\n 'optimization_burn_ins_',\n 'seed_',\n 'surrogate_burn_in_algorithm_',\n 'gaussian_kernel_',\n 'gaussian_optimization_algorithm_',\n 'gaussian_optimization_burn_in_algorithm_',\n 'gaussian_optimization_burn_ins_'}\n if set(rawDict.keys()).intersection(requiredFields) != requiredFields:\n raise KeyError('Not enough information contained in the response to reconstruct the hyperparameter optimization: ' + str(rawDict.keys()))\n decodingDict = dict()\n for kkey in rawDict:\n if kkey[(len(kkey) - 1)] != '_':\n raise ValueError('All keys in the JSON must have a trailing underscore.')\n elif kkey == 'name_':\n decodingDict['model'] = models.load_model(rawDict[kkey])\n elif kkey == 'param_space_':\n param_space = dict()\n for ddimension in rawDict[kkey]:\n param_space[ddimension[:len(ddimension) - 1]] = rawDict[kkey][ddimension]\n\n decodingDict['param_space'] = param_space\n else:\n if kkey in ('peripheral_names_', 'population_training_name_', 'population_validation_name_'):\n continue\n decodingDict[kkey[:len(kkey) - 1]] = rawDict[kkey]\n\n if decodingDict['ratio_iter'] == 1 and decodingDict['surrogate_burn_in_algorithm'] == 'latinHypercube':\n h = LatinHypercubeSearch(model=(decodingDict['model']), param_space=(decodingDict['param_space']),\n seed=(decodingDict['seed']),\n session_name=(decodingDict['session_name']),\n n_iter=(decodingDict['n_iter']))\n else:\n if decodingDict['ratio_iter'] == 1 and decodingDict['surrogate_burn_in_algorithm'] == 'random':\n h = RandomSearch(model=(decodingDict['model']), param_space=(decodingDict['param_space']),\n seed=(decodingDict['seed']),\n session_name=(decodingDict['session_name']),\n n_iter=(decodingDict['n_iter']))\n else:\n h = GaussianHyperparameterSearch(model=(decodingDict['model']), param_space=(decodingDict['param_space']),\n session_name=(decodingDict['session_name']),\n ratio_iter=(decodingDict['ratio_iter']),\n n_iter=(decodingDict['n_iter']),\n optimization_algorithm=(decodingDict['optimization_algorithm']),\n optimization_burn_in_algorithm=(decodingDict['optimization_burn_in_algorithm']),\n optimization_burn_ins=(decodingDict['optimization_burn_ins']),\n seed=(decodingDict['seed']),\n surrogate_burn_in_algorithm=(decodingDict['surrogate_burn_in_algorithm']),\n gaussian_kernel=(decodingDict['gaussian_kernel']),\n gaussian_optimization_algorithm=(decodingDict['gaussian_optimization_algorithm']),\n gaussian_optimization_burn_in_algorithm=(decodingDict['gaussian_optimization_burn_in_algorithm']),\n gaussian_optimization_burn_ins=(decodingDict['gaussian_optimization_burn_ins']))\n if 'score' in decodingDict:\n h.score = decodingDict['score']\n return h\n\n\ndef load_hyperopt(session_name):\n \"\"\"Loads a hyperparameter optimization run into the Python API.\n\n Args:\n session_name (string): Unique identifier of a particular\n hyperparameter optimization run.\n\n Returns:\n Union[:class:`~getml.hyperopt.RandomSearch`, :class:`~getml.hyperopt.LatinHypercubeSearch`, :class:`~getml.hyperopt.GaussianHyperparameterSearch`]\n\n Raises:\n IOError: If the messages received from the engine is not a\n valid JSON.\n TypeError: if `session_name` is not a string.\n ValueError: if `session_name` is an empty string.\n\n \"\"\"\n if type(session_name) is not string:\n raise TypeError('Only strings are allowed as session_name!')\n elif session_name == '':\n raise ValueError('The session_name must not be empty!')\n else:\n print('Not supported yet!')\n return\n h = _decode_hyperopt(msg)\n multithreaded = False\n if h.model.num_threads > 1:\n multithreaded = True\n if h.model.predictor is not None:\n if isinstance(h.model.predictor, predictors.XGBoostClassifier) or isinstance(h.model.predictor, predictors.XGBoostRegressor):\n if h.model.predictor.n_jobs > 1:\n multithreaded = True\n if not h.model.feature_selector is not None or isinstance(h.model.feature_selector, predictors.XGBoostClassifier) or isinstance(h.model.feature_selector, predictors.XGBoostRegressor):\n if h.model.feature_selector.n_jobs > 1:\n multithreaded = True\n if multithreaded:\n h.model.seed = None","sub_path":"pycfiles/getml-0.10.0-py3.7/load_hyperopt.cpython-37.py","file_name":"load_hyperopt.cpython-37.py","file_ext":"py","file_size_in_byte":6470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"296379012","text":"count = 0\ntotal = 0\navg = 0\n\nwhile True:\n entry = input('Enter a number: ')\n if entry == 'done':\n break\n try:\n total = int(entry) + total\n except Exception as e:\n print('Invalid input')\n continue\n count = count + 1\n\nif count != 0:\n avg = total / count\n\nprint(total, count, avg)\n","sub_path":"course1/week5/ex_05_01.py","file_name":"ex_05_01.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"92415597","text":"from base.basetestcase import BaseTestCase\nimport unittest\nfrom selenium.webdriver.support.ui import Select\n\n\nclass CookiesTest(BaseTestCase):\n URL = 'http://demo-store.seleniumacademy.com/'\n ENV = 'LINUX'\n\n def test_store_cookie(self):\n driver = self.driver\n # get the your language dropdown as instance of select class\n select_language = Select(self.driver.\n find_element_by_id('select-language'))\n\n # check default selected option is english\n self.assertEqual('English', select_language.first_selected_option.text)\n # store cookies should be none\n store_cookie = driver.get_cookie('store')\n self.assertEqual(None, store_cookie)\n\n # select an option using select_by_visible text\n select_language.select_by_visible_text('French')\n\n # store cookie should be populated with selected country\n store_cookie = driver.get_cookie('store')['value']\n self.assertEqual('french', store_cookie)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"chapter_9/cookie_test.py","file_name":"cookie_test.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519827698","text":"import os\n\nfrom flask import render_template, send_from_directory, url_for, flash, redirect, request, abort\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom Course import app, db, bcrypt, qrcode\nfrom Course.forms import LoginForm, CompanyRegistrationForm, UserRegistrationForm, QuestionForm, ClassForm\nfrom Course.models import User, Question, Clas, UserClas, Company\nimport random\n\n\n@app.route('/')\ndef index():\n return redirect(url_for('home'))\n\n\n@app.route('/home')\n@login_required\ndef home():\n c = Clas.query.filter_by(company_id=current_user.company_id)\n mental = c.filter_by(category='Mental').all()\n physical = c.filter_by(category='Physical').all()\n verbal = c.filter_by(category='Verbal').all()\n if current_user.role == 'admin':\n return redirect(url_for('admin'))\n return render_template('home.html', c=[mental, physical, verbal])\n\n\n@app.route('/class')\n@login_required\ndef classs():\n records = current_user.clases\n return render_template('class.html', title='Class', classes=records)\n\n\n@app.route('/intro/', methods=['GET', 'POST'])\n@login_required\ndef intro(class_id):\n course = Clas.query.filter_by(id=class_id).first()\n mark = -1\n enrolled = False\n\n for c in course.users:\n if c.user_id == current_user.id:\n enrolled = True\n mark = c.mark\n return render_template(\n 'intro.html',\n c=course,\n enrolled=enrolled,\n mark=mark,\n courses=Clas.query.all())\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user:\n if bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n if user.username == 'super_admin':\n return redirect(url_for('super_admin'))\n if next_page:\n return redirect(next_page)\n else:\n return redirect(url_for('home'))\n else:\n flash('Login Unsuccessful. Please check your password', 'danger')\n flash('Login Unsuccessful. Username does not exist', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(\n os.path.join(\n app.root_path,\n 'static'),\n 'favicon.ico',\n mimetype='image/vnd.microsoft.icon')\n\n\n@app.route(\"/class/\", methods=['GET'])\n@login_required\ndef get_class(class_id):\n c = Clas.query.get_or_404(class_id)\n enrolled = False\n for user_class in c.users:\n if current_user.id == user_class.user_id:\n enrolled = True\n if not enrolled:\n user = User.query.filter_by(id=current_user.id).first()\n enroll = UserClas(mark=0, user=user)\n c.users.append(enroll)\n db.session.commit()\n flash(\n f'You have been successfully enrolled in {c.description}',\n 'success')\n return render_template('video.html', title='Class', c=c)\n\n\n@app.route(\"/admin\", methods=['GET', 'POST'])\n@login_required\ndef admin():\n if current_user.role != 'admin':\n abort(403)\n users = User.query.filter_by(company_id=current_user.company_id).all()\n form = UserRegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(\n form.password.data).decode('utf8')\n user = User(\n username=form.username.data,\n password=hashed_password,\n role='student',\n company_id=current_user.company_id)\n db.session.add(user)\n db.session.commit()\n return redirect(url_for('admin'))\n return render_template(\n 'user_admin.html',\n title='admin',\n form=form,\n users=users)\n\n\n@app.route(\"/test_admin\", methods=['GET', 'POST'])\n@login_required\ndef test_admin():\n if current_user.role != 'admin':\n abort(403)\n tests = Clas.query.filter_by(company_id=current_user.company_id).all()\n form = ClassForm()\n if form.validate_on_submit():\n c = Clas(\n category=form.category.data,\n description=form.description.data,\n introduction=form.introduction.data,\n content=form.content.data,\n outcome=form.outcome.data,\n lecturer=form.lecturer.data,\n share=form.share.data,\n video_link=form.video_link.data,\n limit_time=30,\n company_id=current_user.company_id)\n db.session.add(c)\n db.session.commit()\n return redirect(url_for('test_admin'))\n return render_template(\n 'test_admin.html',\n title='admin',\n tests=tests,\n form=form)\n\n\n@app.route(\"/test\", methods=['POST', 'GET'])\n@login_required\ndef exam():\n class_id = request.args.get('class_id', default=1, type=int)\n typ = request.args.get('type', default='', type=str)\n current_year = 2021\n current_semester = 2\n q = Question.query.filter_by(\n clas_id=class_id,\n year=current_year,\n semester=current_semester).all()[:5]\n time_limit = Clas.query.filter_by(id=class_id).first().limit_time\n if typ == 'mock':\n q = Question.query.filter_by(clas_id=class_id).filter(\n Question.year != current_year and Question.semester != current_semester).all()\n # q = random.choices(q, k=5)\n q = q[:5]\n return render_template(\n 'test.html',\n title='Test',\n q=q,\n class_id=class_id,\n type=typ,\n time=time_limit)\n\n\n@app.route(\"/super_admin\", methods=['GET', 'POST'])\n@login_required\ndef super_admin():\n c = Company.query.all()\n if current_user.username != 'super_admin':\n abort(403)\n form = CompanyRegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(\n form.password.data).decode('utf8')\n company = Company(name=form.username.data)\n db.session.add(company)\n db.session.commit()\n company_id = Company.query.filter_by(\n name=form.username.data).first().id\n user = User(\n username=form.username.data,\n password=hashed_password,\n role='admin',\n company_id=company_id)\n db.session.add(user)\n db.session.commit()\n return redirect(url_for('logout'))\n return render_template('register.html', title='Register', form=form, c=c)\n\n\n@app.route(\"/change_time//\", methods=['GET', 'POST'])\ndef change_time(clas_id, minutes):\n course = Clas.query.get_or_404(clas_id)\n if current_user.role != 'admin':\n abort(403)\n course.limit_time = minutes\n db.session.commit()\n return redirect(url_for('test_admin'))\n\n\n@app.route(\"/qrcode_login\")\ndef qrcode_login():\n abort(401)\n\n\n@app.route(\"/question_admin\", methods=['GET', 'POST'])\ndef question_admin():\n if current_user.role != 'admin':\n abort(403)\n clas_id = request.args.get('class_id', default=-1, type=int)\n semester = request.args.get('semester', default=-1, type=int)\n year = request.args.get('year', default=-1, type=int)\n questions = Question.query\n if clas_id != -1:\n questions = questions.filter_by(clas_id=clas_id)\n if year != -1:\n questions = questions.filter_by(year=year)\n if semester != -1:\n questions = questions.filter_by(semester=semester)\n\n questions = questions.all()\n form = QuestionForm()\n form.course.choices = list(\n map(lambda x: (x.id, x.description), current_user.owner.clases))\n if form.validate_on_submit():\n q = Question(\n question_type=form.question_type.data,\n year=form.year.data,\n semester=form.semester.data,\n question_text=form.question_text.data,\n clas_id=form.course.data)\n db.session.add(q)\n db.session.commit()\n flash('Question Added', 'success')\n return redirect(\n url_for(\n 'question_admin',\n class_id=clas_id,\n year=year,\n semester=semester))\n return render_template(\n 'question_admin.html',\n title='admin',\n form=form,\n questions=questions,\n dyear=year,\n dsemester=semester,\n dclas_id=clas_id,\n Q=Question)\n\n\n@app.route('/finish_course//')\n@login_required\ndef finish_course(student_id, course_id):\n user_class = UserClas.query.filter_by(\n user_id=student_id, clas_id=course_id).first()\n user_class.mark = random.randint(60, 100)\n db.session.commit()\n return redirect(url_for('classs'))\n","sub_path":"Course/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"34960761","text":"def CheckForTrainStation(station):\r\n\r\n\t#f = open('stops.txt', 'r')\r\n\tstationLocation = []\r\n\t\r\n\twith open('stops.txt', 'r') as f:\r\n\t\tfor line in f:\r\n\t\t\twords = line.split(',')\r\n\r\n\t\t\tif station in words[1]:\r\n\t\t\t\tstationLocation.append(words[2])\r\n\t\t\t\tstationLocation.append(words[3])\r\n\t\t\t\t\r\n\treturn stationLocation","sub_path":"CheckForTrainStation.py","file_name":"CheckForTrainStation.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130900137","text":"from functools import reduce\nfrom collections import Counter\n\n\nimport itertools\nimport operator\n\ndef most_common(L):\n # get an iterable of (item, iterable) pairs\n SL = sorted((x, i) for i, x in enumerate(L))\n # print 'SL:', SL\n groups = itertools.groupby(SL, key=operator.itemgetter(0))\n # auxiliary function to get \"quality\" for an item\n def _auxfun(g):\n item, iterable = g\n count = 0\n min_index = len(L)\n for _, where in iterable:\n count += 1\n min_index = min(min_index, where)\n # print 'item %r, count %r, minind %r' % (item, count, min_index)\n return count, -min_index\n # pick the highest-count/earliest item\n return max(groups, key=_auxfun)[0]\n\nfd = open('data.txt', 'r')\nbook = fd.read().lower()\ncount=0\nfd.close()\n\n\n\nprint(\"Count of proletariat: \" + str(book.count('proletariat')))\nprint(\"Count of 'working class': \" + str(book.count('working class')))\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nwords = []\ncurrent = \"\"\nfor char in book:\n if char in alphabet:\n current+=char\n elif current!=\"\":\n words.append(current)\n current=\"\"\n\n#print(words)\n\n# count=0\n# for x in words:\n# if x==\"proletariat\":\n# count+=1\n# print(count)\n\ncommon_words = [\"the\", \"and\", \"to\", \"by\", \"is\", \"in\", \"of\", \"a\", \"that\", \"it\", \"them\"]\nmax_word = \"\"\nmax_count = 0\nfor x in words:\n if words.count(x)>max_count and x not in common_words:\n max_count = words.count(x)\n max_word = x\n\n\nlist=[[x,words.count(x)] for x in words if x not in common_words]\n#print(most_common(list))\n\nword=[x for x in list if x[1]==max_count]\nw=word[0][0]\nwc=word[0][1]\nprint(\"Max uncommon word: \" + str(w) + \"; Count: \" + str(wc))\n","sub_path":"20_cw/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493169979","text":"from ipykernel.kernelbase import Kernel\nfrom pexpect import replwrap, EOF, spawn\n\nfrom subprocess import check_output\n\nfrom os import path\nimport re\nimport signal\nimport bisect\n\nfrom codecs import open\ndef readfile(filename):\n with open(filename, encoding='utf-8') as f:\n return f.read()\n__version__ = readfile(path.join(path.dirname(__file__), 'VERSION'));\nversion_pat = re.compile(r'version (\\d+(\\.\\d+)+)')\n\n\n\nclass MagmaKernel(Kernel):\n implementation = 'magma_kernel'\n implementation_version = __version__\n\n\n\n language_info = {'name': 'magma',\n 'codemirror_mode': 'magma',\n 'mimetype': 'text/x-sh',\n 'file_extension': '.mgm'}\n _banner = None\n\n @property\n def banner(self):\n if self._banner is None:\n self._banner = check_output('magma').decode('utf-8').split('\\n')[0]\n return self._banner\n\n def __init__(self, **kwargs):\n Kernel.__init__(self, **kwargs)\n self._start_magma()\n\n def _start_magma(self):\n # Signal handlers are inherited by forked processes, and we can't easily\n # reset it from the subprocess. Since kernelapp ignores SIGINT except in\n # message handlers, we need to temporarily reset the SIGINT handler here\n # so that bash and its children are interruptible.\n sig = signal.signal(signal.SIGINT, signal.SIG_DFL)\n try:\n magma = spawn('magma', echo=False, encoding='utf-8')\n magma.expect(u'> ')\n magma.sendline(u'SetLineEditor(false);')\n magma.expect(u'> ')\n magma.sendline(u'')\n self.magmawrapper = replwrap.REPLWrapper(magma,\n u'> ', u'SetPrompt(\"{}\");')\n finally:\n signal.signal(signal.SIGINT, sig)\n\n\n def do_execute(self, code, silent, store_history=True,\n user_expressions=None, allow_stdin=False):\n if not code.strip():\n return {'status': 'ok', 'execution_count': self.execution_count,\n 'payload': [], 'user_expressions': {}}\n\n interrupted = False\n try:\n output = self.magmawrapper.run_command(code.rstrip(), timeout=None)\n except KeyboardInterrupt:\n self.magmawrapper.child.sendintr()\n interrupted = True\n self.magmawrapper._expect_prompt()\n output = self.magmawrapper.child.before\n except EOF:\n output = self.magmawrapper.child.before + 'Restarting Magma'\n self._start_magma()\n\n if not silent:\n # Send standard output\n stream_content = {'name': 'stdout', 'text': output}\n self.send_response(self.iopub_socket, 'stream', stream_content)\n\n if interrupted:\n return {'status': 'abort', 'execution_count': self.execution_count}\n\n return {'status': 'ok', 'execution_count': self.execution_count,\n 'payload': [], 'user_expressions': {}}\n\n _magma_builtins = None;\n def do_complete(self, code, cursor_pos):\n code = code[:cursor_pos];\n default = {'matches': [], 'cursor_start': 0,\n 'cursor_end': cursor_pos, 'metadata': dict(),\n 'status': 'ok'};\n if not code or code[-1] == ' ':\n return default;\n\n if self._magma_builtins is None:\n # the list of builtins is generated by:\n # $ magmadoc=/path/to/magma/doc/html\n # $ cat $magmadoc/*.htm | sed -nr \"s/^.*NAME = \\\"([A-Z][[:alnum:]]*)\\\".*$/\\1/p\" | sort > magma-builtins\n from os import path\n F = open(path.join(path.dirname(__file__), \"magma-builtins\"), 'r');\n self._magma_builtins = F.read().split('\\n');\n F.close();\n\n tokens = code.replace(';', ' ').split();\n if not tokens:\n return default;\n token = tokens[-1];\n start = cursor_pos - len(token);\n\n low = bisect.bisect_left(self._magma_builtins, token);\n #very hacky\n high = bisect.bisect_right(self._magma_builtins, token+chr(127), low); \n matches = self._magma_builtins[low:high];\n\n #TODO add global variables\n\n if not matches:\n return default;\n\n return {'matches': matches, 'cursor_start': start,\n 'cursor_end': cursor_pos, 'metadata': dict(),\n 'status': 'ok'};\n\n","sub_path":"magma_kernel/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"642451212","text":"import os\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nEvaluation = pd.DataFrame(columns=['Date', 'StartTime', 'EndTime'])\ndata_list = []\nStartTime = []\nEndTime = []\n\n\ndef read_txt(file_path):\n i = 0\n i_list = []\n with open(file_path, 'r') as f:\n for line in f.readlines():\n line = line.strip('\\n')\n # print(line)\n if not 'PM1' in line:\n Starttime = line.split(' ')[0][:-3]\n Endtime = line.split(' ')[1][:-3]\n StartTime.append(Starttime)\n EndTime.append(Endtime)\n i += 1\n else:\n i_list.append(i)\n if len(i_list) > 1:\n for k in range(i_list[-1] - i_list[-2] - 1):\n data_list.append(data_list[-1])\n front = line.split('.')[0]\n Date = front[-10:].replace('_', '-')\n data_list.append(Date)\n\n data_list.append(data_list[-1])\n data_list.append(data_list[-1])\n data_list.append(data_list[-1])\n\n Evaluation['Date'] = data_list\n Evaluation['StartTime'] = StartTime\n Evaluation['EndTime'] = EndTime\n Evaluation['Value_Count'] = 1\n return Evaluation\n\n\ndef Evaluation_Process(PATH, Evaluation, analomy_data):\n Evaluation['Timestamp'] = Evaluation['Date'].apply(lambda x: int(time.mktime(time.strptime(x, '%Y-%m-%d'))))\n\n # Evaluation['Start'] = Evaluation['StartTime'].apply(lambda x: int((float(x.split(':')[0])*60)+float(x.split(':')[1])))\n # Evaluation['End'] = Evaluation['EndTime'].apply(lambda x: int((float(x.split(':')[0])*60)+float(x.split(':')[1])))\n\n Evaluation['Start'] = Evaluation['StartTime'].apply(\n lambda x: int((float(x.split(':')[0]) * 4) + float(x.split(':')[1]) // 15))\n Evaluation['End'] = Evaluation['EndTime'].apply(\n lambda x: int((float(x.split(':')[0]) * 4) + float(x.split(':')[1]) // 15))\n\n Evaluation['Start_End'] = Evaluation.apply(lambda row: StartEnd(row['Start'], row['End']), axis=1)\n Evaluation = Evaluation[['Timestamp', 'Start_End']]\n Evaluation = Evaluation.explode('Start_End')\n Evaluation.drop_duplicates(keep='first', inplace=True)\n\n anamaly_path = os.path.join(PATH, 'Analysis_PM1', 'Evaluation_Data.csv')\n # Evaluation.to_csv(anamaly_path)\n np.savetxt(anamaly_path, Evaluation, delimiter=',', fmt='%d')\n Common_Row = pd.merge(Evaluation, analomy_data, how='inner', on=['Timestamp', 'Start_End'])\n print('Common_Row', Common_Row.shape[0])\n print('Evaluation_Row', Evaluation.shape[0])\n print('analomy_data_Row', analomy_data.shape[0])\n # compare = datacompy.Compare(Evaluation,analomy_data,join_columns='Timestamp')\n # print(compare.report())\n print('DONE')\n\n\ndef StartEnd(start, end):\n item_list = []\n for item in range(start, end + 1):\n item_list.append(item)\n return item_list\n\n\nif __name__ == '__main__':\n PATH = os.path.abspath(os.path.dirname(os.getcwd()))\n folder = 'Analysis_PM1'\n file_name = 'CSVs_datalogger630094-CUBIC_IAQ_PM1_UpToAug22_GT.txt'\n analomy_file = 'anomaly_data(GMT)630094_Per15_Weekall_2021-02-28_2021-08-22.csv'\n analomy_data = pd.read_csv(os.path.join(PATH, folder, analomy_file), usecols=range(0, 2))\n file_path = os.path.join(PATH, folder, file_name)\n Evaluation = read_txt(file_path)\n Evaluation_Process(PATH, Evaluation, analomy_data)\n","sub_path":"pyfiles/Quantitative_Evaluation.py","file_name":"Quantitative_Evaluation.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397770554","text":"from app.database.sqlalchemy_extension import db\nfrom app.database.models.user import UserModel\nfrom app.database.models.application import ApplicationModel\nfrom app.database.models.institution import InstitutionModel\n\nfrom app.database.models.application_donor import application_donor\nfrom app.database.models.application_moderator import application_moderator\nfrom datetime import date, timedelta\n\nclass ReservedApplicationModel(db.Model):\n \n # Specifying table\n __tablename__ = \"reserved_application\"\n __table_args__ = {\"extend_existing\": True}\n \n \n id = db.Column(db.Integer, primary_key=True)\n application_id = db.Column(db.Integer, db.ForeignKey(\"application.id\"))\n application = db.relationship(\n ApplicationModel,\n backref='reserved',\n primaryjoin=\"ReservedApplicationModel.application_id == ApplicationModel.id\",\n )\n donor_id = db.Column(db.Integer, db.ForeignKey(\"user.id\"))\n donor = db.relationship(\n UserModel,\n backref=\"reserved_as_donor\",\n primaryjoin=\"ReservedApplicationModel.donor_id == UserModel.id\",\n )\n moderator_id = db.Column(db.Integer, db.ForeignKey(\"user.id\"))\n moderator = db.relationship(\n UserModel,\n backref=\"reserved_as_moderator\",\n primaryjoin=\"ReservedApplicationModel.moderator_id == UserModel.id\",\n )\n \n is_active = db.Column(db.Boolean)\n verified = db.Column(db.Boolean)\n verification_date = db.Column(db.String(20))\n donation_date = db.Column(db.String(20))\n amount = db.Column(db.Integer)\n \n \n def save_to_db(self) -> None:\n '''Add application to database'''\n db.session.add(self)\n db.session.commit()\n\n def delete_from_db(self) -> None:\n '''Deletes application from the database.'''\n db.session.delete(self)\n db.session.commit()\n \n \n @classmethod \n def find_by_id(cls, id: int) -> 'ReservedApplicationModel':\n '''Returns reserved application of given id.'''\n return cls.query.filter_by(id= id).first()\n \n @classmethod \n def find_by_application_id(cls, application_id: int) -> 'ReservedApplicationModel':\n '''Returns reserved application of given donor id.'''\n return cls.query.filter_by(application_id= application_id).first()\n \n @classmethod \n def find_by_donor_id(cls, donor_id: int) -> 'ReservedApplicationModel':\n '''Returns reserved application of given donor id.'''\n return cls.query.filter_by(donor_id= donor_id).first()\n \n @classmethod \n def find_by_moderator_id(cls, moderator_id: int) -> 'ReservedApplicationModel':\n '''Returns reserved application of given moderator id.'''\n return cls.query.filter_by(moderator_id= moderator_id).first()\n \n @classmethod \n def find_reserved_application(cls, application_id: int, donor_id: int, moderator_id: int) -> 'ReservedApplicationModel':\n '''Returns reserved application of given moderator id.'''\n return cls.query.filter_by(application_id=application_id, donor_id=donor_id, moderator_id= moderator_id).first()\n ","sub_path":"app/database/models/reserved_application.py","file_name":"reserved_application.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"191081712","text":"import random\nfrom collections.abc import Iterable\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm\nfrom pytorch_lightning.metrics import Accuracy\nimport torch\nimport os\nfrom p_tqdm import p_umap\nfrom pathos.multiprocessing import ProcessingPool as Pool\nimport multiprocessing\n\ndef generate_random_params(params, n_iter, seed=10):\n \n random_params = []\n random.seed(seed)\n\n # loop for n_iter\n for _iter in range(0,n_iter):\n random_param = {}\n # loop over params\n for key, values in params.items():\n # assert values is a list\n assert isinstance(values,Iterable)\n\n # select a random item out of values\n value = random.choice(values)\n\n random_param[key] = value\n\n random_params.append(random_param)\n\n return random_params\n\n\ndef create_chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\ndef model_qualifier(model_type):\n return model_type[:2]\n\ndef save_ann_model(model,file_name):\n # save actual model\n import joblib\n import copy \n\n # copy the nn module\n torch_nn_module = copy.deepcopy(model.module_).to('cpu')\n\n # reset the net to cpu\n model.set_params(device=\"cpu\")\n model = model.initialize()\n \n # attach the old nn module\n model.module_ = torch_nn_module\n\n # dump to disk\n joblib.dump(model,file_name)\n \n\ndef load_ann_model(file_name):\n import joblib\n model = joblib.load(file_name)\n return model\n\n\ndef save_model(model, file_name):\n import joblib\n from sklearn.utils.validation import check_is_fitted\n check_is_fitted(model)\n joblib.dump(model,file_name)\n\ndef load_model(file_name):\n import joblib\n return joblib.load(file_name)\n\ndef save_scaler(scaler, file_name):\n import joblib\n from sklearn.utils.validation import check_is_fitted\n check_is_fitted(scaler)\n joblib.dump(scaler,file_name)\n \n\ndef load_scaler(file_name):\n import joblib\n return joblib.load(file_name)\n\ndef clean_proteins(protein):\n cleaned = protein.split(\"-\")[0]\n return cleaned\n\ndef get_embedding_vector_for_edge(edge,embeddings_pd,operator=\"l1\"):\n source_node = edge[0]\n target_node = edge[1]\n source_embedding = embeddings_pd[embeddings_pd[\"protein\"] == source_node].drop(columns=[\"protein\"]).to_numpy()\n\n target_embedding = embeddings_pd[embeddings_pd[\"protein\"] == target_node].drop(columns=[\"protein\"]).to_numpy()\n \n # if we get empty vectors then zero arrs\n if source_embedding.size == 0:\n source_embedding = np.zeros(target_embedding.shape)\n\n if target_embedding.size == 0:\n target_embedding = np.zeros(source_embedding.shape)\n\n if operator == \"l1\":\n embedding_np = operator_l1(source_embedding,target_embedding)\n elif operator == \"l2\":\n embedding_np = operator_l2(source_embedding,target_embedding)\n elif operator == \"hadamard\":\n embedding_np = operator_hadamard(source_embedding,target_embedding)\n elif operator == \"avg\":\n embedding_np = operator_avg(source_embedding,target_embedding)\n elif operator == \"cat\":\n embedding_np = operator_cat(source_embedding,target_embedding)\n else:\n raise Exception(f\"Unknown operator : {operator}\")\n \n return embedding_np\n\ndef get_embeddings_vector(edge_list,embeddings_pd,operator=\"l1\",show_progress=True):\n embeddings = []\n cpu_count = multiprocessing.cpu_count()\n pool = Pool(processes=cpu_count-1)\n do_work = lambda edge: get_embedding_vector_for_edge(edge,embeddings_pd,operator=operator)\n\n if show_progress == True:\n embeddings = p_umap(do_work,\n edge_list,\n num_cpus=cpu_count)\n else:\n embeddings = pool.map(do_work, edge_list)\n \n embeddings_vec = np.concatenate(embeddings,axis=0)\n return embeddings_vec\n\ndef calculate_accuracy(labels_predicted,labels_actual):\n accuracy = Accuracy()\n return accuracy(labels_predicted,labels_actual)\n\n\n# define the binary operators\ndef operator_hadamard(u, v):\n return u * v\n\ndef operator_l1(u, v):\n return np.abs(u - v)\n\ndef operator_l2(u, v):\n return (u - v) ** 2\n\ndef operator_avg(u, v):\n return (u + v) / 2.0\n\ndef operator_cat(u, v):\n return np.concatenate([u,v],axis=1)\n\nclass IKGDataSet(torch.utils.data.Dataset):\n\n def __init__(self,embeddings, labels):\n self.embedddings = embeddings\n self.labels = labels\n\n def __len__(self):\n return self.labels.shape[0]\n\n def __getitem__(self,index):\n # get embedding row\n embedding = self.embedddings[index]\n\n # get the label\n label = self.labels[index]\n\n return embedding, label\n\n\ndef get_best_experiment(results_dir):\n files = os.listdir(results_dir)\n \n # sort in descending ordert\n files.sort(reverse=True)\n\n best_result = files[0]\n\n # get the absolute path\n best_result = os.path.join(results_dir,best_result)\n\n return best_result\n\ndef extract_config(result_dict):\n config = {}\n for key,value in result_dict.items():\n # check if this is config key\n if \"config.\" in key:\n # if yes then extract the name\n key_name = key.replace(\"config.\",\"\")\n\n config[key_name] = value\n\n return config\n \n ","sub_path":"src/misc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"479573990","text":"import pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntrain_balanced_data_file_preprocessed = \"traffic-signs-data/train_balanced_preprocessed.p\"\ntest_data_file_preprocessed = \"traffic-signs-data/test_preprocessed.p\"\nvalid_data_file_preprocessed = \"traffic-signs-data/valid_preprocessed.p\"\n\ndef load_data(path):\n with open(path, mode='rb') as f:\n data = pickle.load(f)\n X, y = data['features'], data['labels']\n return X, y\n\ndef normalize(data): \n data = (data - data.min()) / (data.max() - data.min())\n return data\n\nX_train, y_train = load_data(train_balanced_data_file_preprocessed)\nX_valid, y_valid = load_data(valid_data_file_preprocessed)\nX_test, y_test = load_data(test_data_file_preprocessed)\n \nX_train = normalize(X_train)\nX_valid = normalize(X_valid)\nX_test = normalize(X_test)\n\n# Implement LeNet-5\n# Implement the [LeNet-5](http://yann.lecun.com/exdb/lenet/) neural network architecture.\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import flatten\n\ndef conv2d(x, W, b, strides=1):\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='VALID')\n x = tf.nn.bias_add(x, b)\n # Activation.\n return tf.nn.relu(x)\n\ndef maxpool2d(x, k=2):\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='VALID')\n\ndef LeNet(x, is_training):\n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n mu = 0\n sigma = 0.1\n output_size = 43\n \n # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x32.\n w1 = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 32), mean = mu, stddev = sigma))\n b1 = tf.Variable(tf.zeros(32))\n conv1 = conv2d(x, w1, b1)\n\n # Pooling. Input = 28x28x32. Output = 14x14x32.\n conv1 = maxpool2d(conv1, k=2)\n conv1 = tf.cond(is_training, lambda: tf.nn.dropout(conv1, keep_prob = 0.9), lambda: conv1)\n\n # Layer 2: Convolutional. Output = 10x10x64.\n w2 = tf.Variable(tf.truncated_normal(shape=(5, 5, 32, 64), mean = mu, stddev = sigma))\n b2 = tf.Variable(tf.zeros(64))\n conv2 = conv2d(conv1, w2, b2)\n\n # Pooling. Input = 10x10x64. Output = 5x5x64.\n conv2 = maxpool2d(conv2, k=2)\n conv2 = tf.cond(is_training, lambda: tf.nn.dropout(conv2, keep_prob = 0.75), lambda: conv2)\n\n # Flatten. Input = 5x5x64. Output = 1600.\n fc0 = flatten(conv2)\n \n # Layer 3: Fully Connected. Input = 1600. Output = 1024.\n wd1 = tf.Variable(tf.truncated_normal(shape=(1600, 1024), mean = mu, stddev = sigma))\n bd1 = tf.Variable(tf.zeros(1024))\n fc1 = tf.matmul(fc0, wd1) + bd1\n \n # Activation.\n fc1 = tf.nn.relu(fc1)\n\n # Layer 4: Fully Connected. Input = 1024. Output = 512.\n wd2 = tf.Variable(tf.truncated_normal(shape=(1024, 512), mean = mu, stddev = sigma))\n bd2 = tf.Variable(tf.zeros(512))\n fc2 = tf.matmul(fc1, wd2) + bd2\n \n # Activation.\n fc2 = tf.nn.relu(fc2)\n fc2 = tf.cond(is_training, lambda: tf.nn.dropout(fc2, keep_prob = 0.5), lambda: fc2)\n\n # Layer 5: Fully Connected. Input = 512. Output = 43.\n wd3 = tf.Variable(tf.truncated_normal(shape=(512, output_size), mean = mu, stddev = sigma))\n bd3 = tf.Variable(tf.zeros(output_size))\n logits = tf.matmul(fc2, wd3) + bd3\n weights = [wd1,wd2,wd3]\n \n return logits, weights\n\n\nfrom sklearn.utils import shuffle\n\nsave_file = './lenet'\nEPOCHS = 100\nBATCH_SIZE = 256\n\nx = tf.placeholder(tf.float32, (None, 32, 32, 1))\ny = tf.placeholder(tf.int32, (None))\nis_training = tf.placeholder(tf.bool)\none_hot_y = tf.one_hot(y, 43)\n\nrate = 0.001\n\nlogits, weights = LeNet(x, is_training)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)\n\n# L2 Regularization \nregularization = 0.0\nfor w in weights:\n regularization += tf.nn.l2_loss(w)\n\n#http://docs.aws.amazon.com/machine-learning/latest/dg/training-parameters.html\nL2_rate = 1e-4\n\nloss_operation = tf.reduce_mean(cross_entropy) + L2_rate*regularization\noptimizer = tf.train.AdamOptimizer(learning_rate = rate)\ntraining_operation = optimizer.minimize(loss_operation)\n\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, is_training: False})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\n# Train the model\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n \n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, is_training: True})\n \n validation_accuracy = evaluate(X_valid, y_valid)\n if i%5 == 0:\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n \n saver.save(sess, save_file)\n print(\"Model saved\")\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123826350","text":"import serial\nimport traceback\nimport threading\nimport serial.threaded\n\n# arguments\n# port – Device name or None.\n# baudrate (int) – Baud rate such as 9600 or 115200 etc.\n# bytesize – Number of data bits. Possible values: FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS\n# parity – Enable parity checking. Possible values: PARITY_NONE, PARITY_EVEN, PARITY_ODD PARITY_MARK, PARITY_SPACE\n# stopbits – Number of stop bits. Possible values: STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO\n# timeout (float) – Set a read timeout value.\n# xonxoff (bool) – Enable software flow control.\n# rtscts (bool) – Enable hardware (RTS/CTS) flow control. ALWAYS TRUE\n# dsrdtr (bool) – Enable hardware (DSR/DTR) flow control. ALWAYS TRUE\n# write_timeout (float) – Set a write timeout value.\n# inter_byte_timeout (float) – Inter-character timeout, None to disable (default).\n\n# Raises:\n# ValueError – Will be raised when parameter are out of range, e.g. baud rate, data bits.\n# SerialException – In case the device can not be found or can not be configured.\n\n\nclass Connection:\n \"\"\"\n Физическое подключение\n \"\"\"\n class PrintLines(serial.threaded.LineReader):\n \"\"\"\n Класс для асинхронного чтения из порта\n \"\"\"\n on_received = None\n on_exception = None\n buffer = bytearray()\n receiving_now = False\n\n def connection_made(self, transport):\n super(Connection.PrintLines, self).connection_made(transport)\n\n def data_received(self, data):\n # Если порт не расчитан на прием, игнорируем данные\n if self.on_received is None:\n return\n for b in data:\n if b == Connection.start_byte():\n self.receiving_now = True\n self.buffer = bytearray()\n elif b == Connection.stop_byte():\n self.receiving_now = False\n self.on_received(self.buffer)\n self.buffer = bytearray()\n elif self.receiving_now:\n self.buffer.append(b)\n\n def handle_line(self, line):\n pass\n\n def connection_lost(self, exc):\n # exc - исключение, если что-то пошло не так\n if exc:\n if self.on_exception is not None:\n self.on_exception()\n else:\n traceback.print_exc(exc)\n\n def __init__(self):\n self.ser = None\n self.stop_event = threading.Event()\n self.thread = threading.Thread(target=self.reading, args=(self.stop_event, ))\n self.on_received = None\n self.on_exception = None\n\n @staticmethod\n def start_byte():\n return 0x02\n\n @staticmethod\n def stop_byte():\n return 0x04\n\n def connect(self, port_name, **kargs):\n self.ser = serial.Serial(port_name, rtscts=True, dsrdtr=True, **kargs)\n self.thread.start()\n\n def disconnect(self):\n # Останавливаем поток, в котором читаются данные\n self.stop_event.set()\n self.thread.join()\n # Закрываем соединение\n if self.ser is not None:\n self.ser.close()\n self.ser = None\n\n def is_connected(self):\n return self.ser is not None\n\n def reading(self, stop_event):\n # Запускаем чтение в новом потоке\n with serial.threaded.ReaderThread(self.ser, Connection.PrintLines) as protocol:\n protocol.on_received = self.on_received\n protocol.on_exception = self.on_exception\n while not stop_event.is_set():\n serial.time.sleep(0.05)\n\n def write(self, data):\n buffer = bytearray()\n buffer.append(Connection.start_byte())\n buffer.extend(data)\n buffer.append(Connection.stop_byte())\n try:\n self.ser.write(buffer)\n except serial.SerialException as e:\n if self.on_exception is not None:\n self.on_exception()\n else:\n traceback.print_exc(e)\n","sub_path":"physical_layer/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512207712","text":"import cv2\nimport numpy as np\nimport sysv_ipc\n\n# Read image data from shared memory.\nmemory = sysv_ipc.SharedMemory(123456)\ndata = memory.read()\n\n# Construct image using numpy.\nframe = np.frombuffer(data, 'uint8')\nwidth = 640\nheight = 424\nshape = (height, width, 3)\nstrides = (width * 3, 3, 1)\nframe = np.lib.stride_tricks.as_strided(frame, shape, strides)\n\n# Show the image using OpenCV.\ncv2.imshow('image from shared memory', frame)\ncv2.waitKey()\ncv2.destroyAllWindows()\n","sub_path":"imshow.py","file_name":"imshow.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"473400803","text":"import os\n\n\ndef has_weights(root, path_to_check):\n \"\"\"Checks for model weight paths from weights folder\n\n Args:\n root (str): path of peekingduck root folder\n path_to_check (List[str]): list of files/directories to check\n to see if weights exists\n\n Returns:\n boolean: True is files/directories needed exists, else False\n \"\"\"\n\n # Check for whether weights dir even exist. If not make directory\n # Empty directory should then return False\n weights_dir = os.path.join(root, '..', 'weights')\n if not os.path.isdir(weights_dir):\n os.mkdir(weights_dir)\n return False\n\n for check in path_to_check:\n if not os.path.exists(os.path.join(root, check)):\n return False\n return True","sub_path":"peekingduck/weights_utils/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451910979","text":"############################################################################\r\n# #\r\n# Copyright (c)2008, 2009, Digi International (Digi). All Rights Reserved. #\r\n# #\r\n# Permission to use, copy, modify, and distribute this software and its #\r\n# documentation, without fee and without a signed licensing agreement, is #\r\n# hereby granted, provided that the software is used on Digi products only #\r\n# and that the software contain this copyright notice, and the following #\r\n# two paragraphs appear in all copies, modifications, and distributions as #\r\n# well. Contact Product Management, Digi International, Inc., 11001 Bren #\r\n# Road East, Minnetonka, MN, +1 952-912-3444, for commercial licensing #\r\n# opportunities for non-Digi products. #\r\n# #\r\n# DIGI SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED #\r\n# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #\r\n# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, #\r\n# PROVIDED HEREUNDER IS PROVIDED \"AS IS\" AND WITHOUT WARRANTY OF ANY KIND. #\r\n# DIGI HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, #\r\n# ENHANCEMENTS, OR MODIFICATIONS. #\r\n# #\r\n# IN NO EVENT SHALL DIGI BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, #\r\n# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, #\r\n# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF #\r\n# DIGI HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. #\r\n# #\r\n############################################################################\r\n\r\n\"\"\"\\\r\n The Xbee Device Manager Event Specs base class, as well as\r\n a few various classes built upon the base class.\r\n\r\n\r\n An event spec is a tuple:\r\n (addr, endpoint, profile_id, cluster_id)\r\n\r\n The mask is an (inverted) Boolean, so True means check this field.\r\n\r\n (True, False, False, True) means addr and cluster_id must match.\r\n\r\n\"\"\"\r\n\r\n# imports\r\nimport string\r\n\r\nfrom devices.xbee.common.addressing import addresses_equal\r\n\r\n# constants\r\n\r\n# exception classes\r\n\r\n# interface functions\r\n\r\n# classes\r\n\r\nclass XBeeDeviceManagerEventSpec:\r\n \"\"\"\\\r\n The Event Spec base class.\r\n\r\n This class is the foundation class to build various different\r\n Event Spec classes around.\r\n\r\n \"\"\"\r\n def __init__(self):\r\n self.__cb = None\r\n\r\n def cb_set(self, cb):\r\n self.__cb = cb\r\n\r\n\r\n def cb_get(self):\r\n return self.__cb\r\n\r\n\r\n def match_spec_set(self, *args, **kwargs):\r\n \"\"\"\\\r\n This function's calling interface is allowed to change, it is\r\n never called by the device manager.\r\n\r\n \"\"\"\r\n raise NotImplementedError(\"virtual function\")\r\n\r\n\r\n def match_spec_test(self, candidate):\r\n raise NotImplementedError(\"virtual function\")\r\n\r\n\r\nclass XBeeDeviceManagerRxEventSpec(XBeeDeviceManagerEventSpec):\r\n \"\"\"\\\r\n The Rx Event Spec class.\r\n\r\n XBeeDeviceManagerRxEventSpec is called when data comes\r\n in from a specific XBee device that matches the given\r\n specs.\r\n\r\n \"\"\"\r\n def __init__(self):\r\n self._match_spec = None\r\n self._match_mask = None\r\n self._listen_endpoint = None\r\n\r\n XBeeDeviceManagerEventSpec.__init__(self)\r\n\r\n def get_listen_endpoint(self):\r\n if self._listen_endpoint != None:\r\n return self._listen_endpoint\r\n else:\r\n return self._match_spec[1]\r\n\r\n def _check_addr(self, addr, varname=\"addr\"):\r\n if not (isinstance(addr, tuple) and len(addr) >= 4):\r\n raise AttributeError(\r\n \"bad %s, must be tuple of >= length 4. (%s given)\" \\\r\n % (varname, addr))\r\n\r\n if not isinstance(addr[0], str) and addr[0] != None:\r\n raise AttributeError(\r\n \"bad %s, first item must be string or None\" % (varname))\r\n\r\n if not reduce(lambda p, q: p and isinstance(q, int), addr[1:4], True):\r\n raise AttributeError(\r\n \"bad %s, [1:4] must be integers.\" % (varname))\r\n\r\n def match_spec_set(self, match_spec, match_mask, listen_endpoint=None):\r\n\r\n self._check_addr(match_spec, \"match_spec\")\r\n\r\n # is our listen endopoint different than the one passed in\r\n # the match spec? (0 is a valid endpoint)\r\n if listen_endpoint != None:\r\n self._listen_endpoint = listen_endpoint\r\n\r\n if not (isinstance(match_mask, tuple) and len(match_mask) == 4):\r\n raise AttributeError(\"bad match_mask, must be tuple of length 4.\")\r\n\r\n if not reduce(lambda p, q: p and \\\r\n isinstance(q, bool), match_mask, True):\r\n raise AttributeError(\"bad match_spec, [0:4] must be booleans.\")\r\n\r\n if match_spec[0] != None:\r\n addr = match_spec[0].lower()\r\n else:\r\n addr = match_spec[0]\r\n final_match_spec = (addr, match_spec[1], match_spec[2], match_spec[3])\r\n\r\n self._match_spec = final_match_spec\r\n self._match_mask = match_mask\r\n\r\n\r\n def match_spec_get(self):\r\n return (self._match_spec, self._match_mask)\r\n\r\n\r\n def match_spec_test(self, candidate, mac_prematch=False):\r\n if not mac_prematch:\r\n self._check_addr(candidate, \"candidate\")\r\n\r\n # TODO: move stripping logic into common addressing helper lib:\r\n if self._match_mask[0] and \\\r\n not addresses_equal(self._match_spec[0], candidate[0]):\r\n return False\r\n\r\n ##Couple of points to make here. We only match elements 1-3, and allow\r\n ##Short circuiting the if comparison. spec and mask are moved into local\r\n ##scope for performance boost instead of being class variables.\r\n\r\n ##Return False if not matched,\r\n ##Return True if matched.\r\n\r\n spec = self._match_spec\r\n mask = self._match_mask\r\n\r\n if not ((not mask[1] or (candidate[1] == spec[1])) and\r\n (not mask[2] or (candidate[2] == spec[2])) and\r\n (not mask[3] or (candidate[3] == spec[3]))):\r\n return False\r\n return True\r\n\r\n\r\nclass XBeeDeviceManagerRxConfigEventSpec(XBeeDeviceManagerRxEventSpec):\r\n \"\"\"\r\n The Rx Config Event Spec class.\r\n\r\n XBeeDeviceManagerRxConfigEventSpec is a XBeeDeviceManagerRxEventSpec\r\n except that its callbacks get processed when a device is in the\r\n config state instead of the running state.\r\n\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass XBeeDeviceManagerRunningEventSpec(XBeeDeviceManagerEventSpec):\r\n \"\"\"\r\n The Running Event Spec class.\r\n\r\n XBeeDeviceManagerRunningEventSpec is called when a device enters\r\n the running state.\r\n\r\n \"\"\"\r\n def match_spec_set(self):\r\n pass\r\n\r\n\r\n def match_spec_test(self):\r\n pass\r\n","sub_path":"src/devices/xbee/xbee_device_manager/xbee_device_manager_event_specs.py","file_name":"xbee_device_manager_event_specs.py","file_ext":"py","file_size_in_byte":7208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244484908","text":"import folium\nfrom parser import parse_file, convert_to_coordinates, neighbors\n\n\ndef create_map(dct, user_input):\n '''\n dict(), tuple -> ()\n Function for creating a map with all layers.\n '''\n map = folium.Map(location=user_input, zoom_start=3)\n tooltip = 'Click me!'\n\n fg1 = folium.FeatureGroup(name='Locations')\n fg2 = folium.FeatureGroup(name='Movies')\n fg3 = folium.FeatureGroup(name=\"Area\")\n for key in dct:\n folium.Marker(user_input(), popup=key, tooltip=tooltip).add_to(map)\n fg1.add_child(folium.CircleMarker(location=[key[0], key[1]],\n radius=10,\n popup=key,\n color='red',\n fill_opacity=0.5))\n\n fg2.add_child(folium.Marker(location=[key[0], key[1]],\n popup=dct[key],\n icon=folium.Icon()))\n\n fg3.add_child(folium.GeoJson(data=open('world.json', 'r',\n encoding='utf-8-sig').read(),\n style_function=lambda x: {'fillColor': 'white'\n if x['properties']['AREA'] < 1000\n else 'blue' if 1000 <= x['properties']['AREA'] < 100000\n else 'green' if 100000 <= x['properties']['AREA'] < 1000000\n else 'purple'}))\n map.add_child(fg1)\n map.add_child(fg2)\n map.add_child(fg3)\n map.add_child(folium.LayerControl())\n map.save(\"Map_movies.html\")\n\n\nif __name__ == \"__main__\":\n year = input(\"Please enter a year you would like to have a map for: \")\n coordinates = input(\"Please enter your location (format: lat, long): \")\n coordinates = eval(coordinates)\n inf = convert_to_coordinates(parse_file(year))\n loc = neighbors(coordinates, inf)\n create_map(loc, coordinates)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301292726","text":"from keras.models import Sequential\r\nimport cv2\r\nimport numpy as np\r\nfrom keras.preprocessing import image\r\nfrom keras.models import load_model\r\nfrom keras.layers import Convolution2D as Conv2D, MaxPooling2D, Dropout\r\nfrom keras.layers.convolutional import Deconv2D as Conv2DTranspose\r\nfrom keras.layers import Dense, Activation, Flatten\r\n\r\ndef nothing(x):\r\n pass\r\n\r\ndef create_model():\r\n # Initialising the CNN\r\n classifier = Sequential()\r\n\r\n# Adding first convolutional layer, followed by pooling, and dropout\r\n classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))\r\n classifier.add(MaxPooling2D(pool_size = (2, 2)))\r\n classifier.add(Dropout(0.25))\r\n\r\n# Adding second convolutional layer, followed by pooling, and dropout\r\n classifier.add(Conv2D(32, (3, 3), activation = 'relu'))\r\n classifier.add(MaxPooling2D(pool_size = (2, 2)))\r\n classifier.add(Dropout(0.25))\r\n\r\n# Adding third convolutional layer, followed by pooling, and dropout\r\n classifier.add(Conv2D(32, (3, 3), activation = 'relu'))\r\n classifier.add(MaxPooling2D(pool_size = (2, 2)))\r\n classifier.add(Dropout(0.25))\r\n\r\n# Flattening\r\n classifier.add(Flatten())\r\n\r\n# Full connection\r\n classifier.add(Dense(units = 128, activation = 'relu'))\r\n classifier.add(Dense(units = 10, activation = 'softmax'))\r\n\r\n# Compiling the CNN\r\n classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])\r\n return classifier\r\n\r\n\r\n\r\n\r\nimage_x, image_y = 64,64\r\nmodel = create_model()\r\nmodel.load_weights('model.h5')\r\n\r\ndef predictor():\r\n\r\n img = image.load_img('c2.jpg', target_size=(64, 64))\r\n img = image.img_to_array(img)\r\n img = np.expand_dims(img, axis = 0)\r\n result = model(img)\r\n \r\n if result[0][0] == 1:\r\n return '0'\r\n elif result[0][1] == 1:\r\n return '1'\r\n elif result[0][2] == 1:\r\n return '2'\r\n elif result[0][3] == 1:\r\n return '3'\r\n elif result[0][4] == 1:\r\n return '4'\r\n elif result[0][5] == 1:\r\n return '5'\r\n elif result[0][6] == 1:\r\n return '6'\r\n elif result[0][7] == 1:\r\n return '7'\r\n elif result[0][8] == 1:\r\n return '8'\r\n elif result[0][9] == 1:\r\n return '9'\r\ncommand = predictor();\r\nprint(command);","sub_path":"NewModel.py","file_name":"NewModel.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"143023029","text":"# Copyright (c) 2014-2017, iocage\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted providing that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\nimport libiocage.lib.errors\n\n\nclass ZFSShareStorage:\n @property\n def zfs_datasets(self):\n return self._get_zfs_datasets(auto_create=self.auto_create)\n\n def mount_zfs_shares(self, auto_create=False):\n self.logger.log(\"Mounting ZFS shares\")\n self._mount_procfs()\n self._mount_jail_datasets(auto_create=auto_create)\n\n def _get_zfs_datasets(self, auto_create=None):\n dataset_names = self.jail.config[\"jail_zfs_dataset\"]\n\n auto_create = self.auto_create if auto_create is None else auto_create\n\n datasets = set()\n for name in dataset_names:\n\n zpool = None\n try:\n zpool = self._get_pool_from_dataset_name(name)\n except:\n pass\n\n pool_name = f\"{self.jail.zfs_pool_name}/{name}\"\n try:\n # legacy support (datasets not prefixed with pool/)\n zpool = self._get_pool_from_dataset_name(pool_name)\n name = f\"{self.jail.zfs_pool_name}/{name}\"\n except:\n pass\n\n try:\n if auto_create:\n zpool.create(name, {}, create_ancestors=True)\n except:\n pass\n\n try:\n dataset = self.zfs.get_dataset(name)\n datasets.add(dataset)\n except:\n raise libiocage.lib.errors.DatasetNotAvailable(\n dataset_name=name,\n logger=self.logger\n )\n\n return datasets\n\n def _mount_jail_dataset(self, dataset_name):\n self.jail.exec(['zfs', 'mount', dataset_name])\n\n def _mount_jail_datasets(self, auto_create=None):\n\n auto_create = self.auto_create if auto_create is None else (\n auto_create is True)\n\n if self.safe_mode:\n self._require_datasets_exist_and_jailed()\n\n for dataset in self.zfs_datasets:\n\n self._unmount_local(dataset)\n\n # ToDo: bake jail feature into py-libzfs\n libiocage.lib.helpers.exec(\n [\"zfs\", \"jail\", self.jail.identifier, dataset.name])\n\n if dataset.properties['mountpoint']:\n for child in list(dataset.children):\n self._ensure_dataset_exists(child)\n self._mount_jail_dataset(child.name)\n\n def _get_pool_name_from_dataset_name(self, dataset_name):\n return dataset_name.split(\"/\", maxsplit=1)[0]\n\n def _get_pool_from_dataset_name(self, dataset_name):\n target_pool_name = self._get_pool_name_from_dataset_name(dataset_name)\n for zpool in list(self.zfs.pools):\n if zpool.name == target_pool_name:\n return zpool\n\n # silent exception, no logger defined\n raise libiocage.lib.errors.ZFSPoolUnavailable(\n pool_name=target_pool_name\n )\n\n def _require_datasets_exist_and_jailed(self):\n existing_datasets = self.get_zfs_datasets(auto_create=False)\n for existing_dataset in existing_datasets:\n if existing_dataset.properties[\"jailed\"] != \"on\":\n raise libiocage.lib.errors.DatasetNotJailed(\n dataset=existing_dataset,\n logger=self.logger\n )\n","sub_path":"libiocage/lib/ZFSShareStorage.py","file_name":"ZFSShareStorage.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"361004516","text":"#!/usr/bin/python3 \n\nimport os, glob \n\n# getting file list\ndef get_file_list(path):\n return glob.glob(path)\n\n\ndef filter_file_list(path, exp, dir_filter):\n\n def is_dir(fo):\n return True if os.path.isdir(fo) else False\n\n flist = glob.glob(os.path.join(path, exp))\n\n if dir_filter is None:\n return flist\n elif dir_filter:\n return list(filter(os.path.isdir, flist))\n else:\n return list(filter(lambda x:not os.path.isdir(x), flist))\n\n return glob.glob(os.path.join(path, exp))\n\n\ndef main():\n print(\"all list\")\n flist = get_file_list(\"./*\")\n print(flist+\"\\n\")\n\n print(\"all list with filter\")\n flist = filter_file_list(\"./\", \"*.py\", None)\n print(flist+\"\\n\")\n\n print(\"all list\")\n flist = filter_file_list(\"./\", \"*\", None)\n print(flist+\"\\n\")\n\n print(\"all dir list\")\n flist = filter_file_list(\"./\", \"*\", True)\n print(flist+\"\\n\")\n\n print(\"all file list\")\n flist = filter_file_list(\"./\", \"*\", False)\n print(flist+\"\\n\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fs/get_file_list.py","file_name":"get_file_list.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116361730","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com), Abtion(abtion@outlook.com)\n@description: \n\"\"\"\nimport operator\nfrom abc import ABC\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport pytorch_lightning as pl\nfrom pycorrector.macbert import lr_scheduler\nfrom pycorrector.macbert.evaluate_util import compute_corrector_prf, compute_sentence_level_prf\nfrom pycorrector.utils.logger import logger\n\n\nclass FocalLoss(nn.Module):\n \"\"\"\n Softmax and sigmoid focal loss.\n copy from https://github.com/lonePatient/TorchBlocks\n \"\"\"\n\n def __init__(self, num_labels, activation_type='softmax', gamma=2.0, alpha=0.25, epsilon=1.e-9):\n super().__init__()\n self.num_labels = num_labels\n self.gamma = gamma\n self.alpha = alpha\n self.epsilon = epsilon\n self.activation_type = activation_type\n\n def forward(self, input, target):\n \"\"\"\n Args:\n logits: model's output, shape of [batch_size, num_cls]\n target: ground truth labels, shape of [batch_size]\n Returns:\n shape of [batch_size]\n \"\"\"\n if self.activation_type == 'softmax':\n idx = target.view(-1, 1).long()\n one_hot_key = torch.zeros(idx.size(0), self.num_labels, dtype=torch.float32, device=idx.device)\n one_hot_key = one_hot_key.scatter_(1, idx, 1)\n logits = torch.softmax(input, dim=-1)\n loss = -self.alpha * one_hot_key * torch.pow((1 - logits), self.gamma) * (logits + self.epsilon).log()\n loss = loss.sum(1)\n elif self.activation_type == 'sigmoid':\n multi_hot_key = target\n logits = torch.sigmoid(input)\n zero_hot_key = 1 - multi_hot_key\n loss = -self.alpha * multi_hot_key * torch.pow((1 - logits), self.gamma) * (logits + self.epsilon).log()\n loss += -(1 - self.alpha) * zero_hot_key * torch.pow(logits, self.gamma) * (1 - logits + self.epsilon).log()\n return loss.mean()\n\n\ndef make_optimizer(lr, weight_decay, optimizer_name, param_dict):\n params = []\n for key, value in param_dict:\n if not value.requires_grad:\n continue\n if \"bias\" in key:\n lr = lr * 2\n weight_decay = 0\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n optimizer = getattr(torch.optim, optimizer_name)(params)\n return optimizer\n\n\ndef build_lr_scheduler(optimizer):\n scheduler_args = {\n \"optimizer\": optimizer,\n\n # warmup options\n \"warmup_factor\": 0.01,\n \"warmup_epochs\": 1024,\n \"warmup_method\": \"linear\",\n\n # multi-step lr scheduler options\n \"milestones\": (10,),\n \"gamma\": 0.9999,\n\n # cosine annealing lr scheduler options\n \"max_iters\": 10,\n \"delay_iters\": 0,\n \"eta_min_lr\": 3e-7,\n\n }\n scheduler = getattr(lr_scheduler, \"WarmupExponentialLR\")(**scheduler_args)\n return {'scheduler': scheduler, 'interval': 'step'}\n\n\nclass BaseTrainingEngine(pl.LightningModule):\n def __init__(self, lr=5e-5, weight_decay=0.01, optimizer_name='AdamW', *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.lr = lr\n self.weight_decay = weight_decay\n self.optimizer_name = optimizer_name\n\n def configure_optimizers(self):\n optimizer = make_optimizer(self.lr, self.weight_decay, self.optimizer_name, self.named_parameters())\n scheduler = build_lr_scheduler(optimizer)\n\n return [optimizer], [scheduler]\n\n def on_validation_epoch_start(self) -> None:\n logger.info('Valid.')\n\n def on_test_epoch_start(self) -> None:\n logger.info('Testing...')\n\n\nclass CscTrainingModel(BaseTrainingEngine, ABC):\n \"\"\"用于CSC的TrainingModel, 定义了训练及预测步骤\"\"\"\n\n def __init__(self, lr=5e-5, weight_decay=0.01, optimizer_name='AdamW',\n loss_coefficient=0.3, device=torch.device('cuda'), *args, **kwargs):\n super().__init__(lr=lr, weight_decay=weight_decay, optimizer_name=optimizer_name, *args, **kwargs)\n # loss weight\n self.w = loss_coefficient\n self._device = device\n\n def training_step(self, batch, batch_idx):\n ori_text, cor_text, det_labels = batch\n outputs = self.forward(ori_text, cor_text, det_labels)\n loss = self.w * outputs[1] + (1 - self.w) * outputs[0]\n return loss\n\n def validation_step(self, batch, batch_idx):\n ori_text, cor_text, det_labels = batch\n outputs = self.forward(ori_text, cor_text, det_labels)\n loss = self.w * outputs[1] + (1 - self.w) * outputs[0]\n det_y_hat = (outputs[2] > 0.5).long()\n cor_y_hat = torch.argmax((outputs[3]), dim=-1)\n encoded_x = self.tokenizer(cor_text, padding=True, return_tensors='pt')\n encoded_x.to(self._device)\n cor_y = encoded_x['input_ids']\n cor_y_hat *= encoded_x['attention_mask']\n\n results = []\n det_acc_labels = []\n cor_acc_labels = []\n for src, tgt, predict, det_predict, det_label in zip(ori_text, cor_y, cor_y_hat, det_y_hat, det_labels):\n _src = self.tokenizer(src, add_special_tokens=False)['input_ids']\n _tgt = tgt[1:len(_src) + 1].cpu().numpy().tolist()\n _predict = predict[1:len(_src) + 1].cpu().numpy().tolist()\n cor_acc_labels.append(1 if operator.eq(_tgt, _predict) else 0)\n det_acc_labels.append(det_predict[1:len(_src) + 1].equal(det_label[1:len(_src) + 1]))\n results.append((_src, _tgt, _predict,))\n\n return loss.cpu().item(), det_acc_labels, cor_acc_labels, results\n\n def validation_epoch_end(self, outputs) -> None:\n det_acc_labels = []\n cor_acc_labels = []\n results = []\n for out in outputs:\n det_acc_labels += out[1]\n cor_acc_labels += out[2]\n results += out[3]\n loss = np.mean([out[0] for out in outputs])\n self.log('val_loss', loss)\n logger.info(f'loss: {loss}')\n logger.info(f'Detection:\\n'\n f'acc: {np.mean(det_acc_labels):.4f}')\n logger.info(f'Correction:\\n'\n f'acc: {np.mean(cor_acc_labels):.4f}')\n compute_corrector_prf(results, logger)\n compute_sentence_level_prf(results, logger)\n\n def test_step(self, batch, batch_idx):\n return self.validation_step(batch, batch_idx)\n\n def test_epoch_end(self, outputs) -> None:\n logger.info('Test.')\n self.validation_epoch_end(outputs)\n\n def predict(self, texts):\n inputs = self.tokenizer(texts, padding=True, return_tensors='pt')\n inputs.to(self._device)\n with torch.no_grad():\n outputs = self.forward(texts)\n y_hat = torch.argmax(outputs[1], dim=-1)\n expand_text_lens = torch.sum(inputs['attention_mask'], dim=-1) - 1\n rst = []\n for t_len, _y_hat in zip(expand_text_lens, y_hat):\n rst.append(self.tokenizer.decode(_y_hat[1:t_len]).replace(' ', ''))\n return rst\n","sub_path":"pycorrector/macbert/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":7036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358471591","text":"#Displays the credits\r\n\r\nimport pygame\r\nimport constants\r\n\r\nfrom level_manager import *\r\nfrom art import *\r\n\r\nclass CreditsScreen():\r\n def __init__(self):\r\n \r\n title_font = pygame.font.SysFont('castellar', 48, True, False)\r\n credit_font = pygame.font.SysFont('mangal', 20, True, False)\r\n instruction_font = pygame.font.SysFont('mangal', 32, True, False)\r\n self._text = title_font.render(\"Credits\",True,constants.BLACK)\r\n self._instruct_text1 = instruction_font.render(\"Press escape to return to the title screen.\", True, constants.BLACK)\r\n self._credit_text1 = credit_font.render(\"Game designed by Monica Timmerman, Jack Olson,\"\r\n + \" Carrie Mannilla, Bryant Lennick, Camryn Roadley, & Dan Nygard\", True, constants.BLACK)\r\n self._credit_text2 = credit_font.render(\"Game background music is \\\"Awesome Call\\\" by Kevin MacLeod (incompetech.com) \",True, constants.BLACK)\r\n self._credit_text3 = credit_font.render(\"Licensed under Creative Commons: By Attribution 3.0 License http://creativecommons.org/licenses/by/3.0/\", True, constants.BLACK)\r\n self._credit_text4 = credit_font.render(\"Sound effects from free sounds library at https://www.partnersinrhyme.com/pir/PIRsfx.shtml:\", True, constants.BLACK)\r\n self._credit_text5 = credit_font.render(\"Rabbit attack: https://www.partnersinrhyme.com/soundfx/fight_sounds/fight_punch_wav.shtml\", True, constants.BLACK)\r\n self._credit_text6 = credit_font.render(\"Harvest sound: https://www.partnersinrhyme.com/soundfx/office_sounds/office_cash-register2_wav.shtml\", True, constants.BLACK)\r\n\r\n self._credit_text7 = credit_font.render(\"All game art by Bryant Lennick using Piskel (https://www.piskelapp.com/).\",True, constants.BLACK)\r\n self._credit_text8 = credit_font.render(\"Sprite animation created using PygAnim, Copyright 2014 Al Sweigart\", True, constants.BLACK)\r\n\r\n \r\n\r\n def handle_keyboard_event(self, event):\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n LevelManager().leave_level()\r\n\r\n #No need to do anything here, unless we've got some animation\r\n def update(self):\r\n pass\r\n \r\n def draw(self, screen):\r\n # Clear the screen\r\n screen.blit(pygame.image.load('images/background.png').convert_alpha(), [0, 0])\r\n \r\n # Draw my credits text!\r\n screen.blit(self._text, [380, 150])\r\n screen.blit(self._credit_text1, [210, 200])\r\n screen.blit(self._credit_text2, [210, 225])\r\n screen.blit(self._credit_text3, [225, 250])\r\n screen.blit(self._credit_text4, [210, 275])\r\n screen.blit(self._credit_text5, [225, 300])\r\n screen.blit(self._credit_text6, [225, 325])\r\n screen.blit(self._credit_text7, [210, 350])\r\n screen.blit(self._credit_text8, [210, 375])\r\n\r\n screen.blit(self._instruct_text1, [210, 450])\r\n \r\n","sub_path":"credits_screen.py","file_name":"credits_screen.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"640786965","text":"# -*- coding: utf-8 -*-\nfrom multiprocessing import Process, Manager\n\n# API CODE\napi_key = \"\"\napi_secret = \"\"\n\nimport time, base64, hmac, hashlib, urllib.parse, pycurl, json, io\n#빗썸 API 호출 클래스\nclass XCoinAPI:\n def __init__(self):\n self.api_url = \"https://api.bithumb.com\"\n self.api_key = api_key\n self.api_secret = api_secret\n\n def xcoinApiCall(self, EP, Params, nonce = str(time.time()*1000)):\n Params = dict({\"endpoint\" : EP},**Params)\n str_data = urllib.parse.urlencode(Params)\n contents = io.BytesIO()\n\n utf8_data = (EP + chr(0) + str_data + chr(0) + nonce).encode('utf8')\n utf8_key = self.api_secret.encode('utf8')\n utf8_api_sign = base64.b64encode(hmac.new(bytes(utf8_key), utf8_data, hashlib.sha512).hexdigest().encode('utf8')).decode('utf8')\n\n curl_handle = pycurl.Curl()\n curl_handle.setopt(pycurl.POST, 1)\n curl_handle.setopt(pycurl.POSTFIELDS, str_data)\n curl_handle.setopt(curl_handle.URL, self.api_url + EP)\n curl_handle.setopt(curl_handle.HTTPHEADER, ['Api-Key: ' + self.api_key, 'Api-Sign: ' + utf8_api_sign, 'Api-Nonce: ' + nonce])\n curl_handle.setopt(curl_handle.WRITEFUNCTION, contents.write)\n curl_handle.setopt(pycurl.SSL_VERIFYPEER, False)\n curl_handle.setopt(pycurl.SSL_VERIFYHOST, False)\n curl_handle.perform()\n curl_handle.close()\n return json.loads(contents.getvalue().decode())\n\n#apicall\nclass ApiCall() :\n def __init__(self,Coin_List) :\n self.api = XCoinAPI()\n self.Coin_List = Coin_List\n self.Checker = 0\n self.mydict = Manager().dict()\n\n\n def Call(self) :\n start = time.time()\n for self.Coin in self.Coin_List :\n self.mydict[self.Coin] = {}\n self.pro = []\n #for self.Coin in self.Coin_List :\n # self.pro.append(Process(target=self.Odb, args = (self.Coin,)))\n self.pro.append(Process(target=self.Odb_All, args = (self.mydict,)))\n self.pro.append(Process(target=self.Ticker_All, args = (self.mydict,)))\n if (self.Checker % 100) == 0 :\n self.pro.append(Process(target=self.Account, args = (self.mydict,)))\n if (self.Checker % 3) == 0 :\n self.pro.append(Process(target=self.Balance, args = (self.mydict,)))\n self.Ran = len(self.pro)\n for i in range(0,self.Ran) :\n self.pro[i].daemon = True\n self.pro[i].start()\n for i in range(0,self.Ran) :\n self.pro[i].join()\n self.pro[i].terminate()\n self.end = round(time.time() - start,2)\n self.mydict[\"end\"] = self.end\n self.Checker += 1\n return self.mydict\n\n def Ticker_All(self,mydict) :\n self.result = self.api.xcoinApiCall(\"/public/ticker/All\",{}, str(time.time()*1000))\n if self.result['status'] == \"0000\" :\n for self.Coin in self.Coin_List :\n self.mydata = self.result[\"data\"][self.Coin.upper()]\n if self.mydict[self.Coin] == {} :\n mydict[self.Coin] = {\"ticker\" : dict( self.mydata, **{\"Message\" : None})}\n else :\n mydict[self.Coin] = dict(mydict[self.Coin], **{\"ticker\" : dict( self.mydata, **{\"Message\" : None})})\n else :\n for self.Coin in self.Coin_List :\n mydict[self.Coin] = {\"ticker\" : \"Error\"}\n\n def Odb_All(self,mydict) :\n self.result = self.api.xcoinApiCall(\"/public/orderbook/All\",{}, str(time.time()*1000))\n if self.result['status'] == \"0000\" :\n mydict[\"timestamp\"] = self.result[\"data\"][\"timestamp\"]\n for self.Coin in self.Coin_List :\n self.mydata = self.result[\"data\"][self.Coin.upper()]\n if self.mydict[self.Coin] == {} :\n mydict[self.Coin] = {\"orderbook\" : { \"asks\" : self.mydata[\"asks\"], \"bids\" : self.mydata[\"bids\"], \"Message\" : None }}\n else :\n mydict[self.Coin] = dict(mydict[self.Coin], **{\"orderbook\" : { \"asks\" : self.mydata[\"asks\"], \"bids\" : self.mydata[\"bids\"], \"Message\" : None }})\n else :\n for self.Coin in self.Coin_List :\n mydict[self.Coin] = dict(mydict[self.Coin], **{\"orderbook\" : { \"asks\" : \"Error\", \"bids\" : \"Error\", \"Message\" : self.result[\"message\"] }})\n\n def Balance(self,mydict) :\n self.result = self.api.xcoinApiCall(\"/info/balance\",{\"currency\" : \"ALL\"}, str(time.time()*1000))\n if self.result['status'] == \"0000\" :\n mydict[\"balance\"] = dict(self.result[\"data\"],**{ \"Message\" : None })\n else :\n mydict[\"balance\"] = \"Error;\"+str(self.result['message'])\n\n def Account(self,mydict) :\n self.result = self.api.xcoinApiCall(\"/info/account\",{}, str(time.time()*1000))\n if self.result['status'] == \"0000\" :\n mydict[\"account\"] = dict(self.result[\"data\"],**{ \"Message\" : None })\n else :\n mydict[\"account\"] = \"Error;\"+str(self.result['message'])\n\nif __name__ == \"__main__\" :\n ApiCall(['btc', 'eth', 'dash', 'ltc', 'etc', 'xrp', 'bch', 'xmr', 'zec', 'qtum', 'btg', 'eos'])\n","sub_path":"Bithumb_Api.py","file_name":"Bithumb_Api.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228737208","text":"# -*- coding: UTF-8 -*-\n\n\"\"\"\nThis file is part of Pondus, a personal weight manager.\nCopyright (C) 2007-11 Eike Nicklas \n\nThis program is free software licensed under the MIT license. For details\nsee LICENSE or http://www.opensource.org/licenses/mit-license.php\n\"\"\"\n\nimport pygtk\npygtk.require('2.0')\n\nimport gtk\n\nfrom pondus.core import parameters\n\n\ndef sort_function_weight(listmodel, iter1, iter2, data):\n \"\"\"Sorts the weight column correctly, i.e. interprets the weight\n data as floats instead of strings.\"\"\"\n weight1 = float(listmodel.get_value(iter1, 2))\n weight2 = float(listmodel.get_value(iter2, 2))\n return cmp(weight1, weight2)\n\n\ndef register_icons():\n \"\"\"Adds custom icons to the list of stock IDs.\"\"\"\n icon_info = {'pondus_plot': parameters.plot_button_path}\n iconfactory = gtk.IconFactory()\n stock_ids = gtk.stock_list_ids()\n for stock_id in icon_info:\n # only load image files when our stock_id is not present\n if stock_id not in stock_ids:\n icon_file = icon_info[stock_id]\n pixbuf = gtk.gdk.pixbuf_new_from_file(icon_file)\n iconset = gtk.IconSet(pixbuf)\n iconfactory.add(stock_id, iconset)\n iconfactory.add_default()\n\n\ndef get_tooltip(dataset):\n \"\"\"Returns a string containing the data of the optional parameters\n of the dataset.\"\"\"\n\n def append_to_tooltip(tooltip, text):\n if tooltip != '':\n tooltip += '\\n'\n return tooltip + text\n\n tooltip = ''\n if dataset.bodyfat is not None:\n text = _('Bodyfat') + ': ' + str(round(dataset.bodyfat, 1)) + '%'\n tooltip = append_to_tooltip(tooltip, text)\n if dataset.muscle is not None:\n text = _('Muscle') + ': ' + str(round(dataset.muscle, 1)) + '%'\n tooltip = append_to_tooltip(tooltip, text)\n if dataset.water is not None:\n text = _('Water') + ': ' + str(round(dataset.water, 1)) + '%'\n tooltip = append_to_tooltip(tooltip, text)\n if dataset.note is not None:\n text = _('Note') + ': ' + dataset.note\n tooltip = append_to_tooltip(tooltip, text)\n if tooltip == '':\n return None\n return tooltip\n","sub_path":"pondus/gui/guiutil.py","file_name":"guiutil.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227796129","text":"# Default Imports\nfrom greyatomlib.python_intermediate.q05_read_csv_data.build import read_ipl_data_csv\nimport numpy as np\n\npath = 'data/ipl_matches_small.csv'\n\n# Enter Code Here\ndef get_total_extras():\n extras=0\n data=read_ipl_data_csv(path,\"|S50\")\n for i in range(0,data.shape[0]):\n extras+=int(data[i][17])\n\n return extras\n","sub_path":"q08_get_total_extras/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"562289580","text":"fin = open('short_house_list', 'r')\nlines = fin.readlines()\nname2info = dict()\nfor line in lines:\n\tls = line.split('\\t')\n\tif len(ls) == 7:\n\t\tname = line.split('\\t')[0]\n\t\tname2info[name] = line\nfin.close()\n\nfin = open('dict/house_id_match', 'r')\nfout = open('house_list_final', 'w')\nlines = fin.readlines()\nfor line in lines:\n\tname = line.split('\\t')[0]\n\ttid = line.split('\\t')[2]\n\ttid = tid.split('\\n')[0]\n\tif name in name2info:\n\t\tnewline = tid + '\\t' + name2info[name]\n\t\tfout.write(newline)\nfin.close()\nfout.close()\n","sub_path":"data/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"432667779","text":"'''\nImage Transformations\n1 Translate\n2 Rotate\n3 Resize\n4 Flip\n'''\nimport os \nimport cv2 as cv\nimport numpy as np \n\ndir_path = os.getcwd() + '/data/images/'\n\nimg=cv.imread(dir_path+'cat.jpeg')\ncv.imshow('Original cat',img)\n\n# x -->right\n# y -->down\n# -x -->left\n# -y -->up\n\n# Translation\ndef translate(img,x,y):\n\ttransMat=np.float32([[1,0,x],[0,1,y]])\n\tdimensions= (img.shape[1],img.shape[0])\n\treturn cv.warpAffine(img,transMat,dimensions)\n\n# Rotation\ndef rotate(img,angle,rotatePoint=None):\n\n\theight,width=img.shape[:2]\n\n\tif rotatePoint is None:\n\t\trotatePoint=(width/2,height/2)\n\n\trotMat=cv.getRotationMatrix2D(rotatePoint,angle,1.0)\n\tdimensions= (width,height)\n\treturn cv.warpAffine(img,rotMat,dimensions)\n\n\n# Translate\ntranslated=translate(img,100,100)\ncv.imshow('Translated',translated)\n\n# Rotate\nrotated=rotate(img,30)\ncv.imshow('Rotate',rotated)\n\n# Resize\nresized=cv.resize(img,(500,500),interpolation=cv.INTER_CUBIC)\ncv.imshow('Resized',resized)\n\n# Flip\n# 0 is vertical flip, 1 is horizontal flip, -1 is both\nflip=cv.flip(img,0)\ncv.imshow('flip',flip)\n\ncv.waitKey(0)","sub_path":"5 Transform.py","file_name":"5 Transform.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462492359","text":"\"\"\"\nTests the re-creation of object, using the UUID feature\n\"\"\"\nimport zproc\n\n\ndef test_random_addr():\n ctx = zproc.Context()\n ctx.state[\"foo\"] = 42\n\n ctx = zproc.Context(ctx.server_address)\n assert ctx.state.copy() == {\"foo\": 42}\n\n state = zproc.State(ctx.server_address)\n assert state.copy() == {\"foo\": 42}\n\n\nADDRESS = \"tcp://127.0.0.1:50000\"\n\n\ndef test_static_addr():\n zproc.start_server(ADDRESS)\n\n ctx = zproc.Context(ADDRESS)\n ctx.state[\"foo\"] = 42\n\n assert ctx.state.copy() == {\"foo\": 42}\n\n state = zproc.State(ADDRESS)\n assert state.copy() == {\"foo\": 42}\n","sub_path":"tests/test_server_address.py","file_name":"test_server_address.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"594972297","text":"#coding:utf8\n\nfrom _base.orm import Model, Field\nfrom time import localtime, strftime\nfrom config.constant import DEFAULT_ROWS, PUBLISH\n\nclass Article(Model):\n \n id = Field()\n title = Field()\n summary = Field()\n content = Field()\n publishtime = Field()\n labelid = Field()\n status = Field()\n\n @classmethod\n def post(cls, title, summary, content, label_id, status, id=None):\n ''' post the article\n '''\n # set publish time of the article \n publishtime = strftime(\"%Y-%m-%d %H:%M:%S\", localtime())\n\n params = {'title':title, 'summary':summary, 'content':content, 'labelid':int(label_id), \\\n 'status':status, 'publishtime':publishtime}\n if id:\n Article().where(id=id).update(**params)\n else:\n Article(**params).save() \n\n @classmethod\n def get_articles_total_page(cls, rows=DEFAULT_ROWS, status=None, labelid=None):\n '''get total articles page by status\n '''\n # print Article().where().count()[0]\n total_records = 0\n if not status and not labelid:\n total_records = Article().where().count()[0]\n elif not labelid:\n total_records = Article().where(status=status).count()[0]\n else:\n total_page = Article().where(status=status, labelid=labelid).count()[0]\n total_page = total_records / rows;\n total_page = total_page +1 if total_records % rows != 0 or total_records ==0 else total_page\n # return total_page + 1 #19 / 10 = 1 5/5 = 1 4/5 = 0\n return total_page \n\n\n @classmethod\n def get_articles_limit(cls, current_page, status=None, rows=DEFAULT_ROWS,labelid=None):\n offset = (current_page - 1) * rows\n if status != None:\n if labelid:\n return Article().where(labelid=labelid,status=status) \\\n .order_by(('publishtime',),('desc',)) \\\n .limit(rows, offset).select_many()\n else: \n return Article().where(status=status).order_by(('publishtime',),('desc',)) \\\n .limit(rows, offset).select_many()\n else:\n return Article().where().order_by(('publishtime',),('desc',)) \\\n .limit(rows, offset).select_many()\n\n @classmethod\n def get_article(cls, id):\n return Article().where(id=id).select_one()\n\n","sub_path":"model/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266131947","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport Layer_class\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\n\r\nwith tf.Graph().as_default():\r\n BATCH_SIZE = 1\r\n N_CLASSES = 2\r\n val_src = 'images/validate/'\r\n filename = input('write file name that you wanna predict : ')\r\n\r\n image_file = Image.open(val_src + filename)\r\n image_file = image_file.resize([208, 208])\r\n plt.imshow(image_file)\r\n img_file1 = np.array(image_file)\r\n\r\n images = np.reshape(img_file1,newshape=[BATCH_SIZE,208,208,3])\r\n\r\n x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 208, 208, 3])\r\n keep_prob = tf.placeholder(tf.float32)\r\n\r\n logits = Layer_class._build_net(x, batch_size=BATCH_SIZE, n_classes=N_CLASSES, keep_prob=1)\r\n logits = tf.nn.softmax(logits)\r\n logs_train_dir = 'train/logs/'\r\n\r\n saver = tf.train.Saver()\r\n with tf.Session() as sess:\r\n print(\"Reading checkpoints...\")\r\n ckpt = tf.train.get_checkpoint_state(logs_train_dir)\r\n if ckpt and ckpt.model_checkpoint_path:\r\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\r\n saver.restore(sess, ckpt.model_checkpoint_path)\r\n print('Loading success, global_step is %s' % global_step)\r\n else:\r\n print('No checkpoint file found')\r\n prediction = sess.run(logits, feed_dict={x: images, keep_prob : 1})\r\n max_index = np.argmax(prediction)\r\n\r\n if max_index == 1 :\r\n print(\"Prediction : White\")\r\n if max_index == 0 :\r\n print(\"Prediction : Asian\")\r\n if max_index == 2 :\r\n print(\"Prediction : Black\")","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"519619702","text":"from django.db import models\nfrom django.db.models import fields\nfrom rest_framework import serializers\nfrom ..models import Products, Email\nfrom django.contrib.auth import get_user_model\nfrom ast import literal_eval\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Products\n fields = ['name', 'price', 'count_stock', 'brand']\n\nclass ProductCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = Products\n fields = '__all__'\nclass ProductAddSerializer(serializers.ModelSerializer):\n userid = serializers.IntegerField()\n prodid = serializers.IntegerField()\n class Meta:\n model = Products\n fields = ['prodid', 'userid']\n def add_to_user(self):\n userid = self.validated_data['userid']\n prodid = self.validated_data['prodid']\n user = get_user_model().objects.get(id = userid)\n prod = Products.objects.get(id = prodid)\n count_check = prod.count_stock \n user.budget -= int(prod.price)\n if count_check < 1:\n res = \"Out of Stock\"\n return res\n elif user.budget < 0:\n res = \"Your Budget has been exceeded\"\n return res\n else:\n user.product_list.add(prod)\n prod.count_stock = count_check - 1\n prod.save()\n user.save()\n return True\n\nclass ProductReviewSerializer(serializers.ModelSerializer):\n review = serializers.CharField()\n prodid = serializers.IntegerField()\n class Meta:\n model = Products\n fields = ['prodid','review']\n \n def review_add(self):\n review = self.validated_data['review']\n prodid = self.validated_data['prodid']\n item = Products.objects.get(id=prodid)\n rlist = literal_eval(item.review_list)\n rlist.append(review)\n rstring = str(rlist)\n item.review_list = rstring\n item.save()\n\nclass AllReviewSerializer(serializers.ModelSerializer):\n class Meta:\n model = Products\n fields = ['review_list']\n\nclass ProductRateSerializer(serializers.ModelSerializer):\n rate = serializers.IntegerField()\n prodid = serializers.IntegerField()\n class Meta:\n model = Products\n fields = [ 'rate', 'prodid']\n\n def rate_prod(self):\n rate = self.validated_data['rate']\n prodid = self.validated_data['prodid']\n item = Products.objects.get(id=prodid)\n if rate > 0 and rate <= 5:\n new_rating = (item.rating + rate)/2\n item.rating = new_rating\n item.save()\n return True\n else:\n return False\n\n \nclass EmailSerializer(serializers.ModelSerializer):\n class Meta:\n model = Email\n fields = '__all__'","sub_path":"products/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"434737143","text":"\"\"\"\n2019, day 5\n\"\"\"\nfrom day02 import read_intcode_into_dict\n\ndef update_pointer(pointer, opcode, new_pointer):\n if opcode in [1, 2, 7, 8]:\n pointer += 4\n elif opcode in [3, 4, 9]:\n pointer += 2\n elif opcode in [5, 6]:\n if pointer == new_pointer:\n pointer += 3\n else:\n pointer = new_pointer\n else:\n raise NotImplementedError\n return pointer\n\ndef retrieve_modes(raw_opcode):\n opcode = raw_opcode // 100\n return opcode % 10, (opcode // 10) % 10, opcode // 100\n\ndef read_opcode(raw_opcode):\n return raw_opcode % 100\n\ndef retrieve_destination(pointer, opcode, intcode):\n if opcode in [4, 5, 6]:\n return None\n elif opcode == 3:\n return intcode[pointer + 1]\n else:\n return intcode[pointer + 3]\n\ndef apply_parameter(pos, mode, intcode):\n #print(f'parameter: mode {mode} at position {pos}')\n if mode == 0:\n return intcode[intcode[pos]]\n else:\n return intcode[pos]\n\ndef retrieve_inputs(pointer, opcode, intcode):\n modes = retrieve_modes(intcode[pointer])\n inp1 = None\n inp2 = None\n if opcode != 3:\n inp1 = apply_parameter(pointer+1, modes[0], intcode)\n if opcode != 4:\n inp2 = apply_parameter(pointer+2, modes[1], intcode)\n return inp1, inp2\n\ndef run_program(intcode, inp0, pointer=0):\n opcode = read_opcode(intcode[pointer])\n out = None\n inp0_cnt = 0 # used for part7\n while opcode != 99:\n new_pointer = None\n to_store = None\n # eval inputs\n (inp1, inp2) = retrieve_inputs(pointer, opcode, intcode)\n # eval dest\n dest = retrieve_destination(pointer, opcode, intcode)\n # sum or factor two inputs\n if opcode in [1, 2]:\n to_store = inp1 + inp2 if opcode == 1 else inp1 * inp2\n # take input\n elif opcode == 3:\n if isinstance(inp0, int):\n to_store = inp0\n else:\n try:\n to_store = inp0[inp0_cnt]\n inp0_cnt += 1\n except IndexError:\n # we need a new input... exit loop, current pointer is returned\n break\n # give output\n elif opcode == 4:\n out = inp1\n # jump if true / jump if false\n elif opcode in [5, 6]:\n if opcode == 5 and inp1 != 0:\n new_pointer = inp2\n elif opcode == 6 and inp1 == 0:\n new_pointer = inp2\n else:\n new_pointer = pointer\n # less than or equal\n elif opcode in [7, 8]:\n cond_met = inp1 < inp2 if opcode == 7 else inp1 == inp2\n to_store = 1 if cond_met else 0\n else:\n raise NotImplementedError\n if to_store is not None:\n intcode[dest] = to_store\n\n pointer = update_pointer(pointer, opcode, new_pointer)\n opcode = read_opcode(intcode[pointer])\n #if out is not None and out != 0 and opcode != 99:\n # raise ValueError\n\n return out, pointer, opcode == 99\n\ndef part1(content):\n raw_intcode = read_intcode_into_dict(content)\n (out, _, _) = run_program(raw_intcode, 1)\n print(f'Part 1: Diagnostic code: {out}')\n\ndef part2(content):\n raw_intcode = read_intcode_into_dict(content)\n (out, _, _) = run_program(raw_intcode, 5)\n print(f'Part 2: Diagnostic code: {out}')\n","sub_path":"2019/day05.py","file_name":"day05.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"292069289","text":"LOWER = 33\nUPPER = 127\n\ndef main():\n for i in range(LOWER, UPPER):\n print(\"{:>3} {}\".format(i,chr(i)))\n entered_character = str(input(\"Enter a character: \"))\n print(\"The ASCII code for {0} is {1}\".format(entered_character,ord(entered_character)))\n entered_number = int(input(\"Enter a number between {0} and {1}: \".format(LOWER, UPPER)))\n print(\"The character for {0} is {1}\".format(entered_number,chr(entered_number)))\n\nmain()","sub_path":"prac_02/ascii_table/ascii_table.py","file_name":"ascii_table.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"516212359","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# author: anne\n# datetime: 2020-12-02 09:39\n# filename: PythonProjectDemo/requests_user\n\n\nimport json\nimport logging\nimport time\n\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\nFORMAT = '%(asctime)s--%(levelname)s: %(message)s'\nlogging.basicConfig(format=FORMAT, level=logging.INFO)\n\n\ndef decorator(func):\n def wrapper(*params):\n print('%s方法 is running...' % func.__name__)\n func(*params)\n\n return wrapper\n\n\n@decorator\ndef get(host, url, args=None):\n r = requests.get(host + url, params=args)\n if r.status_code == 200:\n logging.info(r.json())\n else:\n logging.info('接口返回错���')\n\n\n@decorator\ndef get_header(host, url, header):\n r = requests.get(host + url, headers=header)\n logging.info(r.text)\n\n\n# 加代理\n@decorator\ndef get_proxy(host, url, proxy):\n r = requests.get(host + url, proxies=proxy)\n logging.info(r.text)\n\n\n# 加鉴权\n@decorator\ndef get_auth(host, url):\n r = requests.get(host + url, auth=HTTPBasicAuth('user', 'password'))\n logging.info(r.text)\n\n\n@decorator\ndef post_data(host, url, params):\n if isinstance(params, dict):\n data = json.dumps(params) # 字典转换成json字符串\n r = requests.post(host + url, data=data)\n logging.info(r.text)\n else:\n r = requests.post(host + url, data=params)\n # logging.info(f'{params}不是字典')\n logging.info(r.text)\n\n\n@decorator\ndef post_json(host, url, params):\n before = time.time()\n r = requests.post(host + url, data=params, timeout=1) # 自动转换成json字符串\n after = time.time()\n if r.status_code == 200:\n logging.info(r.text)\n logging.info(f'接口耗时:{after - before}s')\n else:\n r.raise_for_status()\n\n\n@decorator\ndef put(host, url, data):\n r = requests.put(host + url, data=data)\n logging.info(r.text)\n print(r.headers, r.cookies.get_dict()) # 获取headers和cookies\n\n\n@decorator\ndef delete(host, url, data):\n r = requests.delete(host + url, data=data)\n logging.info(r.content) # 返回二进制的内容,一般是图片、视频、音频等数据流\n\n\ndef session():\n s = requests.Session() # 跨请求保持某些参数\n s.get(\"https://httpbin.org/cookies/set/sessioncookie/anneyang\")\n r = s.get(\"https://httpbin.org/cookies\")\n logging.info(r.text)\n\n\nif __name__ == '__main__':\n host = 'https://httpbin.org/'\n args = {'name': 'anne', 'age': 20, 'attributes': ['eat', 'sing', 'run']}\n args1 = [('key1', 'value1'), ('key2', 'value2')]\n header = {'user_agent': 'my-app/0.0.1'}\n proxies = {'http': 'http://127.0.0.1:8080',\n 'https': 'http://127.0.0.1:8080'}\n\n # get(host, 'ip')\n # get(host, 'get', args)\n # get_header(host, 'get', header)\n # # get_proxy(host, 'get', proxies)\n # get_auth(host, 'user')\n # post_data(host, 'post', args)\n # post_json(host, 'post', args1)\n # put(host, 'put', args)\n # delete(host, 'delete', args)\n session()\n","sub_path":"UnittestFramework/requests_use.py","file_name":"requests_use.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"298305051","text":"from twitter import api\nfrom urlShortener import urlShortener\nfrom logger import logger\nimport random \n\nclass Tweet():\n def __init__ (self):\n self.title = None\n self.link = None\n self.hashtags = None\n self.shortLink = None\n \n def _getTweetFormat(self):\n # 0 = title, 1 = hashtags, 3 = shortlik\n formatArray = [\n '{0} | {1} | {2}',\n '{0} {1} {2}',\n '{0} {1}\\n\\n{2}',\n '{0}\\n\\n{1}\\n\\n{2}',\n '{0}\\n\\n{1}\\n{2}'\n ]\n \n return random.choice(formatArray)\n \n def generate(self):\n self.shortlink = urlShortener.shorten(self.link)\n message = self._getTweetFormat().format(self.title, self.hashtags, self.shortlink)\n return message\n \n def getLength(self):\n return len('{} {}| '.format(self.title, self.hashtags)) + 20\n \n def tweet(self):\n api.tweet(self.generate())","sub_path":"twitter/Tweet.py","file_name":"Tweet.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574705992","text":"import os\nimport requests\n\nclass GiantbombAPI():\n \"\"\"Simple implementation of the Giantbomb API that only offers GET /platforms/ call as a generator.\"\"\"\n\n base_url = \"http://www.giantbomb.com/api\"\n\n def __init__(self, api_key):\n self.api_key = api_key\n\n def get_platforms(self, sort=None, filter=None, field_list=None):\n \"\"\"Generator yielding platforms matching given criteria. Returns all platforms if no limit is specified.\"\"\"\n\n #Convert dictionary of criteria into a comma-separated list of key:value pairs.\n params = {}\n if sort is not None:\n params['sort'] = sort\n if field_list is not None:\n params['field_list'] = field_list\n if filter is not None:\n params['filter'] = filter\n parsed_filters = []\n for key, value in filter.iteritems():\n parsed_filters.append('{0}:{1}', format(key, value))\n params['filter'] = ','.join(parsed_filters)\n\n #Finally, append our API key to the list of params and tell the API to transfer the data as JSON.\n params['api_key'] = self.api_key\n params['format'] = 'json'\n\n incomplete_result = True\n num_total_results = None\n num_fetched_results = 0\n counter = 0\n\n while incomplete_result:\n params['offset'] = num_fetched_results\n result = requests.get(self.base_url + '/platforms/', params=params)\n result = result.json()\n \n if num_total_results is None:\n num_total_results = int(result['number_of_total_results'])\n num_fetched_results += int(result['number_of_total_results'])\n\n if num_fetched_results >= num_total_results:\n incomplete_result = False\n\n for item in result['results']:\n logging.debug(\"Yielding platform {0} of {1}\".format(counter + 1, num_total_results))\n \n #Convert values into a more useful format where appropriate.\n if 'original_price' in item and item['original_price']:\n item['original_price'] = float(item['original_price'])\n\n yield item\n counter += 1","sub_path":"Python-API-Testing/GiantbombAPI.py","file_name":"GiantbombAPI.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"322161449","text":"class Solution:\n def uniquePaths(self, m, n):\n import pdb;pdb.set_trace()\n dp = [[1]*n] + [[1]+[0] * (n-1) for _ in range(m-1)]\n print(dp)\n for i in range(1, m):\n for j in range(1, n):\n dp[i][j] = dp[i-1][j]+dp[i][j-1]\n return dp[-1][-1]\n\n\nins = Solution()\nprint(ins.uniquePaths(5, 4))\n","sub_path":"62.py","file_name":"62.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113139332","text":"\n\nfrom xai.brain.wordbase.nouns._convertible import _CONVERTIBLE\n\n#calss header\nclass _CONVERTIBLES(_CONVERTIBLE, ):\n\tdef __init__(self,): \n\t\t_CONVERTIBLE.__init__(self)\n\t\tself.name = \"CONVERTIBLES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"convertible\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_convertibles.py","file_name":"_convertibles.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121594020","text":"# 버블 정렬 알고리즘 구현-개선2(이미 정렬된 원소를 제외한 나머지만 비교,교환하도록 스캔 범위를 제한함)\n\nfrom typing import MutableSequence\n\n\ndef bubble_sort_verbose(a: MutableSequence) -> None:\n \"\"\"버블 정렬(스캔 범위를 제한)\"\"\"\n ccnt = 0 # 비교 횟수\n scnt = 0 # 교환 횟수\n n = len(a)\n k = 0\n while k < n - 1:\n last = n - 1\n exchng = 0\n print(f'패스 {k + 1}')\n for j in range(n - 1, k, -1):\n for m in range(0, n - 1):\n print(f'{a[m]:2}' + (' ' if m != j - 1 else\n ' +' if a[j - 1] > a[j] else ' -'), end='')\n print(f'{a[n - 1]:2}')\n ccnt += 1\n if a[j - 1] > a[j]:\n scnt += 1\n a[j - 1], a[j] = a[j], a[j - 1]\n last = j\n exchng += 1\n k = last\n if exchng == 0:\n break\n for m in range(0, n - 1):\n print(f'{a[m]:2}', end=' ')\n print(f'{a[n - 1]:2}')\n print(f'비교를 {ccnt}번 했습니다.')\n print(f'교환을 {scnt}번 했습니다.')\n\n\nif __name__ == '__main__':\n print('버블 정렬을 수행합니다.')\n num = int(input('원소 수를 입력하세요 : '))\n x = [None] * num # 원소 수가 num인 배열을 생성\n\n for i in range(num):\n x[i] = int(input(f'x[{i}]: '))\n\n bubble_sort_verbose(x) # 배열 x를 버블 정렬\n print('오름차순으로 정렬했습니다.')\n for i in range(num):\n print(f'x[{i}] = {x[i]}')\n","sub_path":"Do_it!/6.정렬 알고리즘/1.버블정렬/개선2-스캔범위제한.py","file_name":"개선2-스캔범위제한.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301200759","text":"class GTCTelescope():\n\n __instrument_default = \"OSIRIS\"\n\n __params={\n \"OSIRIS\":{\n \"gain\":0.95, #e-/ADU\n \"readnoise\": 4.5, #15-,\n \"readtime\": 21.0, # 1.45 S(Non-destructive)\n \"pixelscale\": 0.127*2, #0.184 arcsec/pixel (unbinned)\n \"telescope\":\"gtc\"\n }\n }\n\n __key_band = {\"mag_u\": \"sdss-u\", \"mag_g\": \"sdss-g\", \"mag_r\": \"sdss-r\", \"mag_i\": \"sdss-i\", \"mag_z\": \"sdss-z\"}\n\n def getBand(self,band):\n band = str(band).lower()\n if band in self.__key_band.keys():\n return self.__key_band[band]\n elif band in self.__key_band.values():\n for mag, filter in self.__key_band.items():\n if filter == band:\n return mag\n else:\n return None\n\n @staticmethod\n def getParam(key):\n\n lv=GTCTelescope()\n\n if key in lv.__params:\n return lv.__params[key]\n else:\n raise KeyError(\"No found \"+key)\n\n @staticmethod\n def getParams(instrument=\"all\"):\n lv = GTCTelescope()\n if instrument == \"all\":\n return lv.__params\n else:\n instrument= instrument.replace(\":\",\"\")\n if instrument in lv.__params:\n return lv.__params[instrument]\n else:\n raise KeyError(\"No found \"+instrument)","sub_path":"telescopes/gtc.py","file_name":"gtc.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"630751117","text":"#!/usr/bin/python3\n# -*- encoding: utf-8 -*-\n\"\"\"\n\n跟卖维护\n\n如果同时有初始底价和最低底价的, 判断时,以最低底价为准,如果没有最低底价的,以初始底价为准。\n\n\"\"\"\n\nimport traceback\nfrom fun import *\n\nstime = int(time.time())\ncdir = os.path.dirname(os.path.abspath(__file__))\nlogfile = cdir + \"/0gmwh.log\"\n\nlogtxt = \"{}\\n{}\\n\\n\".format(__file__, now())\nfile_write(logfile, logtxt, 'w')\n\nUARGS = sys.argv\n\nDIC['pool'] = mysqlc.pool(CONFIG, 3)\ndb = DIC['pool'].get()\n\ndb.query('SET wait_timeout=7200')\nDIC['db'] = db\nDIC['url_id'] = 0\nDIC['fp'] = 0\nDIC['country_filter'] = 1 # 过滤发货地\nDIC['sform'] = {}\nDIC['referer'] = 'https://www.cdiscount.com'\nDIC['filter'] = ''\n\n# --------------------------------------------------------------------\n\ncond = 'where gm.status=2 and caiji=1 and p.addtime < {}'.format(days_before(1))\n# cond = \"where gm.id in(7749)\"\n\n# 单独采集P_ID\nif len(UARGS) == 2 and UARGS[1].isdigit():\n cond = \"where gm.id={}\".format(UARGS[1])\n\nsql0 = \"\"\"\nselect count(*) from t_gm gm\nLEFT JOIN `cd_product` p ON p.`id` = gm.`pro_id`\n{}\n\"\"\".format(cond)\n\nsql1 = \"\"\"\nselect gm.id, s.store_code, p.id pid, p.cd_url, p.addtime from t_gm gm\nLEFT JOIN cd_product p ON p.id = gm.pro_id\nLEFT JOIN t_store s ON s.id = gm.store\n{}\nlimit 10\n\"\"\".format(cond)\n\nprint(sql1)\n\nnum = db.field(sql0)\npagesize = 10\npages = math.ceil(num / pagesize)\nprint(\"总数: {}, pages: {}\\n\".format(num, pages))\n\nfor i in range(1, pages + 1):\n rows = db.fetch(sql1)\n if not rows: continue\n args = [ [a[3], {}, 0, 0, a[1]] for a in rows ]\n # print(i, args)\n print(rows)\n\n threads_run(parse_product_page, args) # 更新cd_product表的产品信息\n\n for row in rows:\n cond1 = \"where gm.id='{}' limit 1\".format(row[0])\n zds = \"gm.id, p.id, p.gm_price, gm.price_gm, gm.price_d, gm.min_dprice\"\n ret = db.one(\"select {} from t_gm gm LEFT JOIN `cd_product` p ON p.`id` = gm.`pro_id` {}\".format(zds, cond1), 1)\n # print(ret)\n\n cjgm_price = float(ret[\"gm_price\"]) # 采集到的最低跟卖价\n price_gm = float(ret[\"price_gm\"]) # 跟卖时填写的跟卖价\n price_d = float(ret[\"price_d\"]) # 跟卖时填写的底价\n min_dprice = float(ret[\"min_dprice\"]) # 填写的最低底价\n\n # if gm_price<=0: continue # 没有跟卖卖家时\n\n ubase_price = price_d\n if min_dprice>0: ubase_price = min_dprice\n\n gm_status = 1;\n if cjgm_price > 0:\n\n print(price_gm, cjgm_price)\n\n if ubase_price > cjgm_price: # 如果底价大于采集到的最低跟卖价,则状态为低于底价\n gm_status = 3;\n elif price_gm > cjgm_price: # 如果跟卖价大于采集到的最低跟卖价,则状态为无购物车\n gm_status = 2;\n\n dd = {}\n dd['gm_status'] = gm_status;\n cond2 = \"id='{}' limit 1\".format(row[0])\n db.update(\"t_gm\", dd, cond2, 0) # 更新跟卖表的记录\n\n print(\"主循环{} 休眠 {}s\".format(i, 10))\n time.sleep(10)\n\nDIC['pool'].empty()\n\nrun_time = int(time.time()) - stime\nif run_time > 3600: run_time = datetime.timedelta(seconds=run_time)\n\nprint(\"花费时间:{} \".format(run_time ))\n\nlogtxt = \"总耗时: {}, 内存: {} \\n\".format(run_time, mem() )\nlogtxt += now() + \"\\n\"\nfile_write(logfile, logtxt, 'a')\n\n\n","sub_path":"cdiscount/gmwh.py","file_name":"gmwh.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312980357","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport sys\n\ndataset_dir = \"./img/\"\n \ndef hog_func():\n argv = sys.argv\n argc = len(argv)\n if (argc != 2):\n #引数がちゃんとあるかチェック\n #正しくなければメッセージを出力して終了\n print('Usage: python %s arg1', argv[0])\n quit()\n # 画像の読み込み\n img_name = dataset_dir + argv[1]\n im = cv2.imread(img_name)\n # HoG特徴量の計算\n hog = cv2.HOGDescriptor()\n # SVMによる人検出\n hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n hogParams = {'winStride': (8, 8), 'padding': (32, 32), 'scale': 1.05}\n # 人を検出した座標\n human, r = hog.detectMultiScale(im, **hogParams)\n # 長方形で人を囲う\n for (x, y, w, h) in human:\n cv2.rectangle(im, (x, y),(x+w, y+h),(0,50,255), 3)\n # 人を検出した座標\n cv2.imshow(\"Human detection\",im)\n cv2.waitKey(0)\n # 画像保存\n out_file_name = \"after\" + argv[1]\n cv2.imwrite(out_file_name,im)\n print (\"saved\")\n\nif __name__ == '__main__':\n hog_func()\n\n","sub_path":"traincascade03.py","file_name":"traincascade03.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"442103410","text":"#!/usr/bin/env python\n# license removed for brevity\n\nimport rospy\nimport random\n# Brings in the SimpleActionClient\nimport actionlib\n# Brings in the .action file and messages used by the move base action\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom pedsim_msgs.msg import AgentStates\n\ndef movebase_client():\n # Create an action client called \"move_base\" with action definition file \"MoveBaseAction\"\n client = actionlib.SimpleActionClient('move_base',MoveBaseAction)\n\n # Waits until the action server has started up and started listening for goals.\n client.wait_for_server()\n\n # Select a pedsim pedestrian\n data = rospy.wait_for_message(\"/pedsim_simulator/simulated_agents\", AgentStates)\n n = len(data.agent_states)\n ped_n = random.randint(1,n)\n ped_x = data.agent_states[ped_n-1].pose.position.x\n ped_y = data.agent_states[ped_n-1].pose.position.y\n print(\"Pedestrian Info: x = %.2f, y = %.2f\" % (ped_x, ped_y))\n\n # Creates a new goal with the MoveBaseGoal constructor\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = \"odom\"\n goal.target_pose.header.stamp = rospy.Time.now()\n # Move 0.5 meters forward along the x axis of the \"map\" coordinate frame\n goal.target_pose.pose.position.x = ped_x\n goal.target_pose.pose.position.y = ped_y\n # No rotation of the mobile base frame w.r.t. map frame\n goal.target_pose.pose.orientation.w = 1.0\n\n # Sends the goal to the action server.\n client.send_goal(goal)\n # Waits for the server to finish performing the action.\n wait = client.wait_for_result()\n # If the result doesn't arrive, assume the Server is not available\n if not wait:\n rospy.logerr(\"Action server not available!\")\n rospy.signal_shutdown(\"Action server not available!\")\n else:\n # Result of executing the action\n return client.get_result()\n\n# If the python node is executed as main process (sourced directly)\nif __name__ == '__main__':\n try:\n # Initializes a rospy node to let the SimpleActionClient publish and subscribe\n rospy.init_node('movebase_client_py')\n result = movebase_client()\n if result:\n rospy.loginfo(\"Goal execution done!\")\n except rospy.ROSInterruptException:\n rospy.loginfo(\"Navigation test finished.\")\n","sub_path":"src/development/scripts/old/ped_goal_v1.py","file_name":"ped_goal_v1.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266251099","text":"from PIL import Image\nfrom PIL import ImageChops\nfrom sys import argv,exit\n \ndef compare_images(path_one, path_two):\n \"\"\"\n Compares to images and saves a diff image, if there\n is a difference\n \n @param: path_one: The path to the first image\n @param: path_two: The path to the second image\n \"\"\"\n image_one = Image.open(path_one)\n image_two = Image.open(path_two)\n \n diff = ImageChops.difference(image_one, image_two)\n pixels = list(diff.getdata())\n\n score = 0\n for p in pixels:\n r,g,b = p\n if r+g+b > 0:\n score += 1\n\n return score\n \n \nif __name__ == '__main__':\n if len(argv) < 3:\n exit()\n else:\n img1 = argv[1]\n img2 = argv[2]\n score = compare_images(img1,img2)\n print(score)","sub_path":"assignments/meme_generator/www/html/scripts/compare_image.py","file_name":"compare_image.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307504621","text":"#coding: utf-8\nimport sys\nimport re\nfrom collections import defaultdict\nmorpheme = []\nsentences = []\nfor line in open(\"neko.txt.mecab\"):\n if \"EOS\" not in line:\n line_s = re.split(\"\\s|,\", line)\n morpheme.append({\"surface\": line_s[0], \"base\":line_s[6], \"pos\":line_s[1], \"pos1\":line_s[2]})\n elif morpheme != []: \n sentences.append(morpheme)\n morpheme = []\ncount = defaultdict(int)\nfor sentence in sentences:\n for word in sentence:\n count[word[\"surface\"]] += 1\nfor k, v in sorted(count.items(), key = lambda x: x[1], reverse = True):\n print(\"{} {}\".format(k, v))\n","sub_path":"tomoya/chapter04/knock36.py","file_name":"knock36.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"25711883","text":"from aoc_parser import Parser\n\n\ndef main():\n file_name = 'data'\n frequencies = Parser('{}.txt'.format(file_name)).int_lines()\n # Part 1: 540\n print('Part 1: {}'.format(sum(frequencies)))\n # Part 2: 73056\n print('Part 2: {}'.format(get_first_repeated(frequencies)))\n\n\ndef get_first_repeated(values):\n seen = set()\n result, i = 0, 0\n while True:\n if result in seen:\n return result\n seen.add(result)\n value = values[i % len(values)]\n result += value\n i += 1\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2018/01/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529806886","text":"#!/usr/bin/python3\n\"\"\"\n Exam from Week 2 (MITx: 6.00.1x): \n Introduction to Computer Science and Programming Using Python\n\n INFO:\n - When submitting the exercise, remove the lines that contains '###'\n (variables and prints). \n\n PROBLEM\n - Write a program to calculate the credit card balance \n after one year if a person only pays the minimum monthly payment\n required by the credit card company each month.\n\n FLOW\n - For each month, calculate statements on the monthly payment and\n remaining balance. At the end of 12 months, print out the remaining\n balance. Be sure to print out no more than two decimal digits of accuracy\n\n INPUTS\n - balance: the outstanding balance on the credit card\n - annualInterestRate: annual interest rate as a decimal\n - monthlyPaymentRate: minimum monthly payment rate as a decimal\n\n\"\"\"\n\nbalance = 42 ###\nannualInterestRate = 0.2 # annual interest ###\nmonthlyPaymentRate = 0.04 # % to pay, per month\nmonthlyInterestRate = annualInterestRate / 12.0 # annual interest, per month\n\nprevious_balance = balance\n\nprint(\"\\nINITIAL BALANCE:\", previous_balance) ###\n\nfor m in range(1, 13):\n print(\"MONTH\", m) ###\n minimum_payment = monthlyPaymentRate * previous_balance\n print(\" - minimum payment:\", minimum_payment) ###\n unpaid_balance = previous_balance - minimum_payment\n print(\" - unpaid balance:\", unpaid_balance) ###\n # calculating annual interest for the new balance... \n previous_balance = unpaid_balance + (monthlyInterestRate * unpaid_balance)\n print(\" - new balance\", previous_balance, \"\\n\") ###\n\nprint(\"Remaining balance: {:.2f}\".format(previous_balance))\n\n\n","sub_path":"edX/pset_02/week2_credit_card_balance.py","file_name":"week2_credit_card_balance.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526402048","text":"import serial\nqte = input().split()\nif len(qte) > 4:\n qte = qte[:4]\nelif len(qte) < 4:\n while len(qte) < 4:\n qte.append('0')\n qte[-1] = '13'\nqte[0] = '0'\nser = serial.Serial('COM3', 9600)\ns = ser.read(100)\n\n\nfor i in range(len(qte)):\n ser.write(int(qte[i]))\n\n\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"158124684","text":"from copy import deepcopy\nfrom reparser.regex_parser import parse\nfrom query import Query, allQuery\n\n\ndef cross(s, t):\n return {x + y for x in s for y in t}\n\n\ndef get_trigrams(raw_str):\n prev = raw_str[:3]\n if len(prev) == 3:\n yield prev\n for j in range(3, len(raw_str)):\n prev = prev[1:] + raw_str[j]\n yield prev\n\n\n# print(list(get_trigrams(\"ab\")))\n\n\ndef freeQuery(tree):\n # if tree is None:\n # return []\n\n re_type = tree[\"type\"]\n if re_type == \"literal\":\n trigram_list = list(get_trigrams(tree[\"value\"]))\n\n if len(trigram_list):\n return Query(o=Query.QAnd, trigram=trigram_list)\n else:\n return deepcopy(allQuery)\n\n if re_type == \"concat\":\n q = freeQuery(tree[\"value\"][0])\n r = freeQuery(tree[\"value\"][1])\n return q.andOr(r, Query.QAnd)\n # return {\"AND\": [freeQuery(tree[\"value\"][0]), freeQuery(tree[\"value\"][1])]}\n\n if re_type == \"union\":\n q = freeQuery(tree[\"value\"][0])\n r = freeQuery(tree[\"value\"][1])\n return q.andOr(r, Query.QOr)\n # return {\"OR\": [freeQuery(tree[\"value\"][0]), freeQuery(tree[\"value\"][1])]}\n\n if re_type == \"repetition\":\n return deepcopy(allQuery)\n\n\nif __name__ == \"__main__\":\n tests = [\n (r\"(abcd|efgh)(ijklm|x*)\", '(\"abc\" \"bcd\")|(\"efg\" \"fgh\")'),\n (r\"(abc|cba)def\", '(\"abc\" | \"cba\") \"def\"'),\n (r\"abc+de\", \"+\"),\n (r\"(abc*)+de\", \"+\"),\n (r\"ab(cd)*ef\", \"+\"),\n (r\"abc|def\", '(\"abc\" | \"def\")')\n ]\n\n for test in tests:\n print(\"test:\", test[0])\n parse_tree = parse(test[0])\n # print(\"tree:\", parse_tree)\n print(\"actual:\", freeQuery(tree=parse_tree))\n print(\"expected:\", test[1])\n","sub_path":"requery/free.py","file_name":"free.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"408595014","text":"from django.shortcuts import render\nfrom django.http import Http404\nfrom .models import WallImage, Catlog, HomePage\nfrom django.core.paginator import Paginator\nfrom django.contrib import messages\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.db.models import Q\n\n\ndef home(request):\n # home page top image\n random_head_image = HomePage.objects.order_by('?')[:1]\n # end\n catlog_item = Catlog.objects.order_by('?')[:8]\n popular_item = Catlog.objects.order_by('?')[:7]\n latest_uploaded = WallImage.objects.order_by('?')[:30]\n images_data = WallImage.objects.order_by('-id')[:30]\n context = {\n 'popular_item':popular_item,\n 'catlog_item': catlog_item,\n 'random_head_image': random_head_image,\n 'latest_uploaded': latest_uploaded,\n 'images_data': images_data,\n }\n return render(request, 'wall/index.html', context)\n\n\ndef images(request):\n catlog_small = Catlog.objects.order_by('?')[:18]\n catlog_items = Catlog.objects.order_by('id')[:29]\n images_data = WallImage.objects.order_by('-id')\n paginator = Paginator(images_data, 30)\n page = request.GET.get('page')\n images_data = paginator.get_page(page)\n context = {\n 'catlog_items': catlog_items,\n 'catlog_small': catlog_small,\n 'images_data': images_data,\n }\n return render(request, 'wall/images.html', context)\n\n\ndef picture(request, pic_id):\n catlog_item = Catlog.objects.order_by('?')[:18]\n picture_data = WallImage.objects.get(id=pic_id)\n # remove after installing search similar items\n random_image = WallImage.objects.order_by('?')[:15]\n context = {\n 'catlog_item': catlog_item,\n 'picture_data': picture_data,\n 'random_image': random_image\n }\n return render(request, 'wall/pic.html', context)\n\n\ndef catlog(request):\n catlog_item = Catlog.objects.order_by('id')\n paginator = Paginator(catlog_item, 21)\n page = request.GET.get('page')\n catlog_item = paginator.get_page(page)\n context = {\n 'catlog_item': catlog_item\n }\n return render(request, 'wall/catlog.html', context)\n\n\ndef explore(request):\n catlog_small = Catlog.objects.order_by('?')[:18]\n catlog_items = Catlog.objects.order_by('?')[:29]\n images_data = WallImage.objects.order_by('?')\n paginator = Paginator(images_data, 30)\n page = request.GET.get('page')\n images_data = paginator.get_page(page)\n context = {\n 'catlog_items': catlog_items,\n 'catlog_small': catlog_small,\n 'images_data': images_data,\n }\n return render(request, 'wall/explore.html', context)\n\ndef catlog_search(request, cat_item):\n catlog_small = Catlog.objects.order_by('?')[:18]\n catlog_items = Catlog.objects.order_by('id')[:29]\n template = 'wall/catlog_search.html'\n queryset_list = WallImage.objects.all()\n query = cat_item\n if query == '':\n return HttpResponseRedirect('/')\n else:\n if query:\n queryset_list = queryset_list.filter(\n Q(image_tags__icontains=query) |\n Q(image_type__icontains=query)\n ).order_by('-id')\n random_search = WallImage.objects.order_by('?')[:1]\n paginator = Paginator(queryset_list, 30)\n page = request.GET.get('page')\n result = paginator.get_page(page)\n context = {\n 'catlog_items': catlog_items,\n 'catlog_small': catlog_small,\n 'result': result,\n 'random_search': random_search,\n }\n return render(request, template, context)\n else:\n messages.error(request, 'no results found')\n\n\ndef search(request):\n template = 'wall/search.html'\n queryset_list = WallImage.objects.all()\n query = request.GET.get('q')\n if query == '':\n return HttpResponseRedirect('/')\n else:\n if query:\n queryset_list = queryset_list.filter(\n Q(image_type__icontains=query) |\n Q(image_tags__icontains=query)\n\n ).order_by('-id')\n catlog_small = Catlog.objects.order_by('?')[:18]\n catlog_items = Catlog.objects.order_by('id')[:29]\n paginator = Paginator(queryset_list, 30)\n page = request.GET.get('page')\n result = paginator.get_page(page)\n context = {\n 'catlog_items': catlog_items,\n 'catlog_small': catlog_small,\n 'result': result,\n 'catlog_items': catlog_items,\n 'catlog_small': catlog_small,\n 'result': result,\n }\n return render(request, template, context)\n else:\n messages.error(request, 'no results found')\n","sub_path":"wall/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79629913","text":"import os\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imread\nfrom scipy.misc import imsave\nimport tensorflow as tf\nfrom tensorflow.contrib.slim.nets import inception\n\nfrom urllib.request import urlretrieve\nimport os\nimport json\nimport matplotlib.pyplot as plt\n\nfrom keras.models import Model\nfrom keras.models import load_model\nfrom keras.utils import np_utils\nfrom keras.datasets import mnist\n\nmodel = load_model('model_weights.h5')\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n\n# 重新定义数据格式,归一化\nx_train = x_train.reshape(x_train.shape[0], -1) / 255.0\nx_test = x_test.reshape(x_test.shape[0], -1) / 255.0\n\n# # 转one-hot标签\ny_train = np_utils.to_categorical(y_train, num_classes=10)\ny_test = np_utils.to_categorical(y_test, num_classes=10)\n\n\ndef fgm(x, preds, y=None, eps=0.3, ord=np.inf, clip_min=None, clip_max=None,targeted=False):\n \"\"\"\n TensorFlow implementation of the Fast Gradient Method.\n :param x: the input placeholder\n :param preds: the model's output tensor (the attack expects the\n probabilities, i.e., the output of the softmax)\n :param y: (optional) A placeholder for the model labels. If targeted\n is true, then provide the target label. Otherwise, only provide\n this parameter if you'd like to use true labels when crafting\n adversarial samples. Otherwise, model predictions are used as\n labels to avoid the \"label leaking\" effect (explained in this\n paper: https://arxiv.org/abs/1611.01236). Default is None.\n Labels should be one-hot-encoded.\n :param eps: the epsilon (input variation parameter) 邻域\n :param ord: (optional) Order of the norm (mimics NumPy).\n Possible values: np.inf, 1 or 2.\n :param clip_min: Minimum float value for adversarial example components\n :param clip_max: Maximum float value for adversarial example components\n :param targeted: Is the attack targeted or untargeted? Untargeted, the\n default, will try to make the label incorrect. Targeted\n will instead try to move in the direction of being more\n like y.\n :return: a tensor for the adversarial example\n \"\"\"\n\n # if y is None:\n # # Using model predictions as ground truth to avoid label leaking\n # preds_max = tf.reduce_max(preds, 1, keep_dims=True)\n # y = tf.to_float(tf.equal(preds, preds_max))\n # y = tf.stop_gradient(y)\n # y = y / tf.reduce_sum(y, 1, keep_dims=True)\n\n # Compute loss\n # loss = utils_tf.model_loss(y, preds, mean=False)\n loss = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=preds)\n # if targeted:\n # loss = -loss\n\n # Define gradient of loss wrt input\n grad, = tf.gradients(loss, x)\n\n if ord == np.inf:\n # Take sign of gradient\n normalized_grad = tf.sign(grad)\n # The following line should not change the numerical results.\n # It applies only because `normalized_grad` is the output of\n # a `sign` op, which has zero derivative anyway.\n # It should not be applied for the other norms, where the\n # perturbation has a non-zero derivative.\n normalized_grad = tf.stop_gradient(normalized_grad)\n elif ord == 1:\n red_ind = list(range(1, len(x.get_shape())))\n normalized_grad = grad / tf.reduce_sum(tf.abs(grad),\n reduction_indices=red_ind,\n keep_dims=True)\n elif ord == 2:\n red_ind = list(range(1, len(x.get_shape())))\n square = tf.reduce_sum(tf.square(grad),\n reduction_indices=red_ind,\n keep_dims=True)\n normalized_grad = grad / tf.sqrt(square)\n else:\n raise NotImplementedError(\"Only L-inf, L1 and L2 norms are \"\n \"currently implemented.\")\n\n # Multiply by constant epsilon\n scaled_grad = eps * normalized_grad\n\n # Add perturbation to original example to obtain adversarial example\n adv_x = x + scaled_grad\n\n # If clipping is needed, reset all values outside of [clip_min, clip_max]\n if (clip_min is not None) and (clip_max is not None):\n adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)\n\n return adv_x\n\n# print(x_train[0])\n# print(y_train[0])\n#\n# print(len(x_train[0]))\n#\npred = model.predict(x_train[0:1])\n# print(pred)\n# print(y_train[0:1])\n# print(type(pred))\n# print(type(x_train[0]))\n# print(type(pred[0]))\n# print(type(y_train[0]))\n\nx = tf.convert_to_tensor(x_train[0])\npred = tf.convert_to_tensor(pred[0])\ny = tf.convert_to_tensor(y_train[0])\nprint(type(x), type(pred), type(y))\nadv_input = fgm(x=x, preds=pred, y=y, clip_min=-1.0, clip_max=1.0)\nprint(adv_input)\n\n\n","sub_path":"examples/mytest/fgsm.py","file_name":"fgsm.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497357127","text":"import os\nimport numpy as np\nimport tensorflow as tf\n\nimport collections\nfrom nltk.corpus import stopwords\n\n########################################################################\n\nwrite_dir = 'models_dir'\nomnibus = 'omnibus.zip'\ndefault_embedding_size = 20\ndefault_epochs = 10\ninitial_learning_rate = 0.15\nnumber_negative_examples = 20\nbatch_size_default = 16\nwindow = 3\ndefault_min_count = 3\nnum_samples_per_word = 2\nthreads = 2\nsubsample_threashold =1e-3\ninteractive_mode = False\nstatistics_interval = 2\nsummary_interval = 5\ncheckpoint_interval = 600\n\n###############################################################################\n\n\n###############################################################################\n\nclass Options():\n\n ###########################################################################\n\n emb_dim = None\n num_samples = None\n learning_rate = None\n epochs_to_train = None\n concurrent_steps = None\n batch_size = None\n window_size = None\n min_count = None\n subsample = None\n statistics_interval = None\n summary_interval = None\n checkpoint_interval = None\n save_path = None\n #eval_data = None\n\n ###########################################################################\n\n def __init__(self):\n\n self.emb_dim = default_embedding_size\n self.num_samples = number_negative_examples\n self.learning_rate = initial_learning_rate\n self.epochs_to_train = default_epochs\n self.concurrent_steps = threads\n self.batch_size = batch_size_default\n self.window_size = window\n self.min_count = default_min_count\n self.samples_window = 2*window - 2\n self.subsample = subsample_threashold\n self.statistics_interval = statistics_interval\n self.summary_interval = summary_interval\n self.checkpoint_interval = checkpoint_interval\n self.save_path = write_dir\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path)\n #self.eval_data = fake_analogies\n\n ###########################################################################\n\n###############################################################################\n\n\n###############################################################################\n\nclass WordMapper():\n \n min_count = None\n null_words = None\n word_list = None\n unk_str = 'UNK'\n word_index = None\n vocab_size = None\n vocab_counts = None\n \n ###########################################################################\n \n def __init__(self, min_count):\n \n self.min_count = min_count\n \n ###########################################################################\n \n ###########################################################################\n \n def make(self, tokens):\n \n count = collections.Counter(tokens)\n count_common = {word: number for word, number\n in count.items() \n if number>=self.min_count}\n self.null_words = set(count.keys())-set(count_common.keys())\n sum_of_uncommon = sum(count[unk_word] \n for unk_word in self.null_words)\n\n self.word_list = np.array([word for word in count_common.keys()])\n \n self.word_index = {word: index for index, word\n in enumerate(self.word_list)}\n self.word_index.update({self.unk_str: len(self.word_list)})\n self.vocab_size = len(self.word_list) + 1\n self.vocab_counts = list(count_common.values()) + [sum_of_uncommon]\n return self\n \n ###########################################################################\n \n ###########################################################################\n \n def run(self, tokens):\n \n return [self.word_index[token] if token \n in self.word_list else self.word_index[self.unk_str]\n for token in tokens]\n \n ###########################################################################\n \n############################################################################### \n\n###############################################################################\n\nclass SkipNGramInput():\n\n ###########################################################################\n\n _options = None\n _wordmap = None\n data = None \n \n words_per_epoch = None\n\n ###########################################################################\n\n def __init__(self, options):\n\n self._options = options\n\n ###########################################################################\n \n ###########################################################################\n \n def make(self, tokenized_data):\n \n min_count = self._options.min_count\n self.length_corpus = len(tokenized_data) \n tokens = tokenized_data\n \n tokens = self.filter_stop_words(tokens)\n wm = WordMapper(min_count).make(tokens)\n cat_tokens = wm.run(tokens)\n self._wordmap = wm\n\n subsamples = self._options.samples_window\n self.words_per_epoch = subsamples*len(cat_tokens)\n words_per_epoch = self.words_per_epoch\n window = self._options.window_size\n \n pad_data = (cat_tokens[-window:] \n + cat_tokens + cat_tokens[:window])\n \n data_labels = np.array([subsamples*[datum] for\n datum in pad_data[window: -window]])\n data_window = np.array(list(map(lambda i: np.random.choice(\n np.hstack((pad_data[i-window:i], pad_data[i+1:i+1+window])),\n size=subsamples, replace=False),\n np.arange(window, len(pad_data)-window))))\n \n data_labels = data_labels.flatten()\n data_window = data_window.flatten()\n \n data_labels_tf = tf.data.Dataset.from_tensor_slices(\n tf.constant(data_labels,\n dtype=tf.int32, shape=(words_per_epoch, )))\n data_window_tf = tf.data.Dataset.from_tensor_slices(\n tf.constant(data_window,\n dtype=tf.int32, shape=(words_per_epoch, ))) \n self.data = tf.data.Dataset.zip(\n (data_labels_tf,\n data_window_tf)).batch(batch_size_default,\n True).repeat(default_epochs)\n\t# temporary\n self.data_labels = data_labels\n self.data_window = data_window\n\n return self\n\n ###########################################################################\n\n ###########################################################################\n \n def filter_stop_words(self, tokens):\n \n stop_words = set(stopwords.words('english'))\n stop_words = stop_words | set('.')\n # should enocde to bytes\n tokens = [word.lower() for word in tokens if word.lower()\n not in stop_words]\n return tokens\n \n ###########################################################################\n \n###############################################################################\n","sub_path":"word_embeddings_util.py","file_name":"word_embeddings_util.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411573039","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: Dylan Chen\n\nTo be continue (IV value)\n\n\"\"\"\n\nimport csv\nimport descriptive_statistics\nimport glob\nimport os\nfrom functools import partial\nfrom multiprocessing import Pool\n\nimport numpy as np\nfrom scipy.stats.stats import pearsonr\nfrom sklearn.feature_selection import chi2\n\n\nclass AdvancedEdd:\n \"\"\"\n\n split row in a vertical direction way, for low-memory computation\n ensure target y is in the last column\n\n header: boolean, default True [must be True :)]\n write each column to input_path directory\n\n \"\"\"\n\n def __init__(self, input_path, var_type, output_path, y_index):\n\n self.input_path = input_path\n self.var_type = var_type\n self.output_path = output_path\n self.y_index = y_index\n\n pass\n\n def row2fields(self, var_type=None, header=True):\n\n num_fields = 0\n tmp_list = []\n keys = []\n\n # ensure the input file containing header\n if not header:\n raise OSError('Ensure header exists in your file !!')\n\n if not var_type:\n raise OSError('Must provide each Variables type !!')\n\n with open(self.input_path, 'r') as csv_file:\n data = csv.reader(csv_file, delimiter=',')\n\n for row_num, line in enumerate(data):\n\n if row_num == 0 and header:\n\n num_fields = len(line)\n keys = line\n print('Label Y Column Name: %s' % keys[self.y_index])\n\n for i in range(num_fields - 1):\n # create columns temp file and write header (one column, y label) to each file\n tmp_list.append(open('%s/field' % os.path.dirname(self.input_path) + str(i + 1) + '.tmp', 'w'))\n tmp_list[i].write('{0},{1},{2}'.format(keys[i].replace(',', ','), 'variable_type', 'target_y') + \"\\n\")\n\n continue\n\n # write each column together with y to the corresponding tmp file\n for i in range(num_fields - 1):\n try:\n tmp_list[i].write(\n ','.join([line[i].replace(',', ','), var_type[keys[i]], line[self.y_index]]) + \"\\n\")\n except IndexError:\n tmp_list[i].write(\"\\n\")\n\n for i in range(num_fields - 1):\n tmp_list[i].close()\n\n def feature_explore(self, output_path, input_file):\n \"\"\"\n Notes:\n 1. ensure all missing values are represented by ''\n 2. the statistic score is implemented without nan values\n 3. this part is practised in parallel\n 4. refer to function (descriptive_statistics) in edd.py\n\n \"\"\"\n\n col_x = []\n tar_y = []\n key = ''\n var_type = ''\n\n type_algorithm = {'categorical': [chi2],\n 'numeric': [pearsonr]}\n\n with open(input_file) as csv_file:\n print(input_file)\n file = csv.reader(csv_file, delimiter=',')\n\n # collect column x and target y into list\n for row_num, line in enumerate(file):\n x, var_type, y = line\n\n if row_num == 0:\n key = x\n continue\n\n col_x.append(x)\n tar_y.append(float(y))\n var_type = var_type\n\n # get each variable type's corresponding statistic score\n des_dict, header = descriptive_statistics.desc_stat(col_x)\n\n # convert categorical columns to numeric\n if var_type == 'categorical':\n col_x = [col_x.index(x) if x != '' else '' for x in col_x]\n\n value_mask = [i for i, x in enumerate(col_x) if x != '']\n\n array_x = np.array(col_x)[value_mask].astype(np.float64)\n array_y = np.array(tar_y)[value_mask].astype(np.float64)\n\n for num, method in enumerate(type_algorithm[var_type]):\n if method == chi2:\n array_x = array_x.reshape(len(array_x), 1)\n\n try:\n statistic, p_value = method(array_x, array_y)\n except ValueError:\n statistic = ' '\n p_value = ' '\n\n if method == chi2:\n statistic = statistic[0]\n p_value = p_value[0]\n des_dict.update({'statistic_%s' % num: statistic, 'p_value_%s' % num: p_value})\n header += ['statistic_%s' % num, 'p_value_%s' % num]\n\n header = ['var_name'] + header\n des_dict.update({'var_name': key})\n\n output = []\n with open(output_path, 'a') as out_file:\n for name in header:\n output.append(str(des_dict[name]))\n out_file.write(','.join(output) + '\\n')\n\n def run(self):\n\n print(\"Getting each variables' type ...\")\n type_dict = {}\n with open(self.var_type) as csv_file:\n\n dict_file = csv.reader(csv_file, delimiter=',')\n\n for row_index, row_data in enumerate(dict_file):\n if row_index == 0:\n continue\n k, v = row_data\n type_dict.update({k: v})\n\n print(\"Splitting TXT file in a vertical direction way ...\")\n self.row2fields(var_type=type_dict)\n\n output_header = ['var_name', 'mean', 'std', 'No_obs', 'No_uniq', 'sum', 'missrate', 'misscnt', 'mod1', 'mod2',\n 'mod3', 'max', 'min', 'p1', 'p5', 'p10', 'p25', 'p33', 'p50', 'p66', 'p75', 'p90', 'p95',\n 'p99',\n 'statistic_0', 'p_value_0', 'statistic_1', 'p_value_1']\n\n print(\"Creating output header ...\")\n with open(self.output_path, 'w') as header_file:\n header_file.write(','.join(output_header) + '\\n')\n\n file_list = glob.glob(os.path.join(os.path.dirname(self.input_path), 'field*.tmp'))\n\n print(\"Computing each Column's descriptive statistics ... \")\n func = partial(self.feature_explore, self.output_path)\n pool = Pool(processes=10)\n p = pool.map(func, file_list)\n\n print(\"Removing temporary files ...\")\n for remove_file in file_list:\n os.remove(remove_file)\n\n print('Done')\n","sub_path":"advanced_edd.py","file_name":"advanced_edd.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131359626","text":"# d[i] 값에 (현재 값)과 (d[i - 1] + 현재 값) 중 큰 값을 대입(음수가 포함돼 있을 수도 있기 때문에)\n\nn = int(input())\nnums = list(map(int, input().split()))\n\nd = [0] * n\nfor ix, num in enumerate(nums):\n if ix == 0:\n d[0] = num\n continue\n\n if num > num + d[ix - 1]:\n d[ix] = num\n else:\n d[ix] = num + d[ix - 1]\n\nprint(max(d))\n","sub_path":"BJ/DP/BJ_1912_continuous_sum.py","file_name":"BJ_1912_continuous_sum.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"578252411","text":"from collections import OrderedDict\n\nfrom sqlalchemy.sql import text as sql_text\n\nfrom service_lib.database import DatabaseEngine\n\n\nclass PermissionsQuery:\n \"\"\"PermissionsQuery base class\n\n Query permissions for GDI resources.\n \"\"\"\n\n # name of public iam.role\n PUBLIC_ROLE_NAME = 'public'\n\n def __init__(self, config_models, logger):\n \"\"\"Constructor\n\n :param ConfigModels config_models: Helper for ORM models\n :param Logger logger: Application logger\n \"\"\"\n self.config_models = config_models\n self.logger = logger\n self.db_engine = DatabaseEngine()\n\n def public_role(self):\n \"\"\"Return public role name.\"\"\"\n return self.PUBLIC_ROLE_NAME\n\n def resource_ids(self, table_names, role, session):\n \"\"\"Query permissions for multiple GDI resource types.\n\n Return set of permitted GDI resource IDs.\n\n :param list[str] table_names: List of table names for GDI resource types\n :param str role: Role name\n :param Session session: DB session\n \"\"\"\n ResourcePermission = self.config_models.model('resource_permission')\n GDIResource = self.config_models.model('gdi_resource')\n\n # base query for all permissions of user\n query = self.role_permissions_query(role, session)\n\n # filter permissions by GDI resource types\n query = query.join(ResourcePermission.resource) \\\n .filter(GDIResource.table_name.in_(table_names))\n\n # execute query and pluck resource IDs\n resource_ids = [p.gdi_oid_resource for p in query.all()]\n\n return set(resource_ids)\n\n def resource_permissions(self, table_name, resource_name, role, session):\n \"\"\"Query permissions for a GDI resource type and optional name.\n\n Return resource permissions sorted by priority.\n\n :param str table_name: Table name for a GDI resource type\n :param str resource_name: optional GDI resource name (None for all)\n :param str role: Role name\n :param Session session: DB session\n \"\"\"\n ResourcePermission = self.config_models.model('resource_permission')\n GDIResource = self.config_models.model('gdi_resource')\n\n # base query for all permissions of user\n query = self.role_permissions_query(role, session)\n\n # filter permissions by GDI resource types\n query = query.join(ResourcePermission.resource) \\\n .filter(GDIResource.table_name == table_name)\n\n if resource_name is not None:\n # filter by resource name\n query = query.filter(GDIResource.name == resource_name)\n\n # order by priority\n query = query.order_by(ResourcePermission.priority.desc())\n\n # execute query and return results\n return query.all()\n\n def role_permissions_query(self, role, session):\n \"\"\"Create base query for all permissions of a role.\n\n :param str role: Role name\n :param Session session: DB session\n \"\"\"\n ResourcePermission = self.config_models.model('resource_permission')\n Role = self.config_models.model('role')\n\n # create query for permissions of role\n query = session.query(ResourcePermission). \\\n join(ResourcePermission.role). \\\n filter(Role.name == role)\n\n return query\n\n def all_resource_ids(self, table_names, session):\n \"\"\"Query multiple GDI resource types.\n\n Return set of GDI resource IDs.\n\n :param list[str] table_names: List of table names for GDI resource types\n :param Session session: DB session\n \"\"\"\n ResourcePermission = self.config_models.model('resource_permission')\n GDIResource = self.config_models.model('gdi_resource')\n\n # create query with any permissions\n query = session.query(ResourcePermission) \\\n .join(ResourcePermission.resource) \\\n .filter(GDIResource.table_name.in_(table_names))\n\n # execute query and pluck resource IDs\n resource_ids = [p.gdi_oid_resource for p in query.all()]\n\n return set(resource_ids)\n\n def postgis_metadata(self, conn_str, schema, table_name, geometry_column):\n \"\"\"Return primary key, geometry column, type and srid from a PostGIS\n table.\n\n :param str conn_str: DB connection URL\n :param str schema: DB schema name\n :param str table_name: DB table name\n :param str geometry_column: optional geometry column name if not unique\n \"\"\"\n info = {}\n\n conn = None\n try:\n # connect to GeoDB\n engine = self.db_engine.db_engine(conn_str)\n conn = engine.connect()\n\n # get primary key\n\n # build query SQL\n sql = sql_text(\"\"\"\n SELECT a.attname\n FROM pg_index i\n JOIN pg_attribute a ON a.attrelid = i.indrelid\n AND a.attnum = ANY(i.indkey)\n WHERE i.indrelid = '{schema}.{table}'::regclass\n AND i.indisprimary;\n \"\"\".format(schema=schema, table=table_name))\n\n # execute query\n result = conn.execute(sql)\n for row in result:\n info['primary_key'] = row['attname']\n\n # get geometry column and srid\n\n # build query SQL\n where_clause = \"\"\n if geometry_column:\n # select specific geometry column\n where_clause = sql_text(\"\"\"\n AND f_geometry_column = '{geom_column}'\n \"\"\".format(geom_column=geometry_column))\n\n sql = sql_text(\"\"\"\n SELECT f_geometry_column, srid, type\n FROM geometry_columns\n WHERE f_table_schema = '{schema}' AND f_table_name = '{table}'\n {where_clause};\n \"\"\".format(\n schema=schema, table=table_name, where_clause=where_clause)\n )\n\n # execute query\n result = conn.execute(sql)\n for row in result:\n info['geometry_column'] = row['f_geometry_column']\n info['geometry_type'] = row['type']\n info['srid'] = row['srid']\n\n # close database connection\n conn.close()\n except Exception as e:\n self.logger.error(\n \"Could not get PostGIS metadata for table '%s.%s':\\n%s\" %\n (schema, table_name, e)\n )\n if conn:\n conn.close()\n\n return info\n\n def attribute_metadata(self, conn_str, schema, table_name, column):\n \"\"\"Return data type and any constraints for a table column.\n\n :param str conn_str: DB connection URL\n :param str schema: DB schema name\n :param str table_name: DB table name\n :param str column: Column name\n \"\"\"\n data_type = 'text'\n # NOTE: use ordered keys\n constraints = OrderedDict()\n\n try:\n # connect to GeoDB\n geo_db = self.db_engine.db_engine(conn_str)\n conn = geo_db.connect()\n\n # build query SQL\n sql = sql_text(\"\"\"\n SELECT data_type, character_maximum_length, numeric_precision,\n numeric_scale\n FROM information_schema.columns\n WHERE table_schema = '{schema}' AND table_name = '{table}'\n AND column_name = '{column}'\n ORDER BY ordinal_position;\n \"\"\".format(schema=schema, table=table_name, column=column))\n\n # execute query\n result = conn.execute(sql)\n for row in result:\n data_type = row['data_type']\n\n # constraints from data type\n if (data_type in ['character', 'character varying'] and\n row['character_maximum_length']):\n constraints['maxlength'] = row['character_maximum_length']\n elif data_type in ['double precision', 'real']:\n # NOTE: use text field with pattern for floats\n constraints['pattern'] = '[0-9]+([\\\\.,][0-9]+)?'\n elif data_type == 'numeric' and row['numeric_precision']:\n step = pow(10, -row['numeric_scale'])\n max_value = pow(\n 10, row['numeric_precision'] - row['numeric_scale']\n ) - step\n constraints['numeric_precision'] = row['numeric_precision']\n constraints['numeric_scale'] = row['numeric_scale']\n constraints['min'] = -max_value\n constraints['max'] = max_value\n constraints['step'] = step\n elif data_type == 'smallint':\n constraints['min'] = -32768\n constraints['max'] = 32767\n elif data_type == 'integer':\n constraints['min'] = -2147483648\n constraints['max'] = 2147483647\n elif data_type == 'bigint':\n # NOTE: JSON/Javascript may reduce precision\n constraints['min'] = -9223372036854775808\n constraints['max'] = 9223372036854775807\n\n if not data_type:\n self.logger.warning(\n \"Could not find data type of column '%s' \"\n \"of table '%s.%s'\" % (column, schema, table_name)\n )\n data_type = 'text'\n\n # close database connection\n conn.close()\n except Exception as e:\n self.logger.error(\n \"Could not get data type of column '%s' \"\n \"of table '%s.%s':\\n%s\" % (column, schema, table_name, e)\n )\n data_type = 'text'\n\n return {\n 'data_type': data_type,\n 'constraints': constraints\n }\n","sub_path":"permissions_query.py","file_name":"permissions_query.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"181940904","text":"from pymongo import MongoClient\nimport pprint\nclient=MongoClient()\nsets={\"name\":\"monika\",\"surname\":\"singh\",\"id\":2314026,\"year\":[2012,2017],\"layout\":[\"good\",\"bad\"],\"designer\":{\"first\":\"tata\",\"last\":\"motor\"},\"work\":\"student\"}\ndb=client.examples #examples is the database\ndb.sets_table.insert(sets)#sets_table is the collection(table)\nfor a in db.sets_table.find():\n pprint.pprint(a)\n''' \ndef find():\n sets_table=db.sets_table.find({\"surname\":\"singh\"})\n for a in sets_table:\n pprint.pprint(a)\n\nif __name__==\"__main__\":\n find()\n'''\n","sub_path":"insert1_mongo.py","file_name":"insert1_mongo.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"431609185","text":"import os\nimport re\nfrom inspect import getmembers, getmodule, ismethod\nfrom typing import Callable, Union\n\nfrom .connection import get_connection\n\n\nclass ImportNotFoundException(Exception):\n pass\n\n\nclass ModuleNotfoundException(Exception):\n pass\n\n\ndef sql_table_name(queue_table: str) -> str:\n return ('.').join([f'\"{x}\"' for x in queue_table.split('.')]) \n\n\nasync def get_fn_name(func: Union[str, Callable]) -> str:\n try:\n if isinstance(func, str):\n return func\n if ismethod(func):\n module_name = get_fn_name(dict(getmembers(func))['__self__'])\n else:\n module_name = getmodule(func).__name__\n name = func.__name__\n return '.'.join([module_name, name])\n except AttributeError as e:\n raise ModuleNotfoundException(e)\n\n\nasync def import_fn(fn_name) -> Callable:\n access_path = fn_name.split(\".\")\n module = None\n try:\n for index in range(1, len(access_path)):\n try:\n # import top level module\n module_name = \".\".join(access_path[:-index])\n module = __import__(module_name)\n except ImportError:\n continue\n else:\n for step in access_path[1:-1]: # walk down it\n module = getattr(module, step)\n break\n if module:\n return getattr(module, access_path[-1])\n else:\n return globals()[\"__builtins__\"][fn_name]\n except KeyError as e:\n raise ImportNotFoundException(e)\n\n\nasync def create_table(name: str):\n if not name:\n name = os.getenv('MOD_NGARN_TABLE', 'modngarn_job')\n print(f\"Creating table {name}...\")\n cnx = await get_connection()\n async with cnx.transaction():\n await cnx.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS {queue_table} (\n id TEXT NOT NULL CHECK (id !~ '\\\\|/|\\u2044|\\u2215|\\u29f5|\\u29f8|\\u29f9|\\ufe68|\\uff0f|\\uff3c'),\n fn_name TEXT NOT NULL,\n args JSON DEFAULT '[]',\n kwargs JSON DEFAULT '{{}}',\n priority INTEGER DEFAULT 0,\n created TIMESTAMP WITH TIME ZONE DEFAULT NOW(),\n scheduled TIMESTAMP WITH TIME ZONE,\n executed TIMESTAMP WITH TIME ZONE,\n canceled TIMESTAMP WITH TIME ZONE,\n result JSON,\n reason TEXT,\n processed_time TEXT,\n PRIMARY KEY (id)\n );\n \"\"\".format(\n queue_table=name\n )\n )\n\n await cnx.execute(\n f\"\"\"CREATE INDEX IF NOT EXISTS idx_pending_jobs ON {name} (executed) WHERE executed IS NULL;\"\"\"\n )\n print(f\"Done\")\n","sub_path":"mod_ngarn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"82428226","text":"\"\"\"\nTic-Tac-Toe Game\n\"\"\"\n\nimport random\n\ngame_board = ['#', 'X', 'O', 'X', 'O', 'X', 'O', 'X', 'O', 'X']\n\n\"\"\"\nDisplay board to user.\n\"\"\"\n\n\ndef display_board(board):\n\n print(board[1]+'|'+board[2]+'|'+board[3])\n print(board[4]+'|'+board[5]+'|'+board[6])\n print(board[7]+'|'+board[8]+'|'+board[9])\n\n\n\"\"\"\nCheck to see what marker the players will have\n\"\"\"\n\n\ndef player_input():\n\n marker = ''\n\n while not (marker == 'X' or marker == 'O'):\n marker = input('Player 1: Choose X or O: ').upper()\n\n if marker == 'X':\n return ('X', 'O')\n else:\n return ('O', 'X')\n\n\n\"\"\"\nTakes in board list object, marker and assigns it to the board\n\"\"\"\n\n\ndef place_marker(board, marker, position):\n board[position] = marker\n\n\n\"\"\"\nTake in board list and checks to see if the marker has won\n\"\"\"\n\n\"\"\"\nChecking board to see if there is a winner\n\"\"\"\n\n\ndef win_check(board, mark):\n return ((board[1] == mark and board[2] == mark and board[3] == mark) or\n (board[4] == mark and board[5] == mark and board[6] == mark) or\n (board[7] == mark and board[8] == mark and board[9] == mark) or\n (board[1] == mark and board[4] == mark and board[7] == mark) or\n (board[2] == mark and board[5] == mark and board[8] == mark) or\n (board[3] == mark and board[6] == mark and board[9] == mark) or\n (board[1] == mark and board[5] == mark and board[9] == mark) or\n (board[3] == mark and board[5] == mark and board[7] == mark))\n\n\n\"\"\"\nPicking the first player to make a move\n\"\"\"\n\n\ndef choose_first():\n\n flip = random.randint(0, 1)\n\n if flip == 0:\n return 'Player 1'\n else:\n return 'Player 2'\n\n\n\"\"\"\nCheck to see if the position on a board is free to place\n\"\"\"\n\n\ndef space_check(board, position):\n return board[position] == ' '\n\n\n\"\"\"\nCheck if board is full and return a boolean value\n\"\"\"\n\n\ndef full_board_check(board):\n\n for i in range(1, 10):\n if space_check(board, i):\n return False\n return True\n\n\n\"\"\"\nAsk player for next position as a number and use previous function to check if\nthat play can be made\n\"\"\"\n\n\ndef player_choice(board):\n\n position = 0\n\n while position not in range(1, 10) or not space_check(board, position):\n position = int(input('Choose a position: 1-9: '))\n\n return position\n\n\n\"\"\"\nWrite a function that asks the player if they want to play again\n\"\"\"\n\n\ndef replay():\n\n choice = input('Play again? Enter Yes or No ')\n\n return choice == 'Yes'\n\n\n\"\"\"\nLogic\n\"\"\"\n\nprint('Welcome to Tic Tac Toe')\n\nwhile True:\n\n the_board = [' ']*10\n player1_marker, player2_marker = player_input()\n\n turn = choose_first()\n print(turn + ' will play first.')\n\n play_game = input('Ready to play? y or n?')\n\n if play_game == 'y':\n game_on = True\n else:\n game_on = False\n\n while game_on:\n\n if turn == 'Player 1':\n display_board(the_board)\n position = player_choice(the_board)\n place_marker(the_board, player1_marker, position)\n\n if win_check(the_board, player1_marker):\n display_board(the_board)\n print('Player 1 has won the game.')\n game_on = False\n else:\n if full_board_check(the_board):\n display_board(the_board)\n print('The game is a draw.')\n game_on = False\n else:\n turn = 'Player 2'\n\n else:\n display_board(the_board)\n position = player_choice(the_board)\n place_marker(the_board, player2_marker, position)\n\n if win_check(the_board, player2_marker):\n display_board(the_board)\n print('Player 2 has won the game.')\n game_on = False\n else:\n if full_board_check(the_board):\n display_board(the_board)\n print('The game is a draw.')\n game_on = False\n else:\n turn = 'Player 1'\n\n if not replay():\n print('Thanks for playing Tic Tac Toe.')\n break\n","sub_path":"Syllabus/06-Python-Milestone-Project-1/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"648285008","text":"import unittest\nimport os\nfrom selenium.webdriver import DesiredCapabilities, Remote\nfrom pages.auth_page import AuthPage\nfrom pages.userinfo_page import UserinfoPage\n\nclass WrongTownTest(unittest.TestCase):\n WRONG_TOWN_NAME = 'qwertyuiop'\n TOP_MESSAGE = 'Некоторые поля заполнены неверно'\n TOWN_ERROR = 'Проверьте название города'\n\n def setUp(self):\n browser = os.environ.get('BROWSER', 'CHROME')\n\n self.driver = Remote(\n command_executor='http://127.0.0.1:4444/wd/hub',\n desired_capabilities=getattr(DesiredCapabilities, browser).copy()\n )\n\n def tearDown(self):\n self.driver.quit()\n\n def test(self):\n auth_page = AuthPage(self.driver)\n auth_page.open()\n auth_page.authorize()\n\n userinfo_page = UserinfoPage(self.driver)\n userinfo_page.open()\n userinfo_form = userinfo_page.form\n userinfo_form.set_town(self.WRONG_TOWN_NAME)\n userinfo_form.wait_for_suggests_invisible()\n userinfo_form.save()\n self.assertEqual(self.TOP_MESSAGE, userinfo_form.get_top_message())\n self.assertEqual(self.TOWN_ERROR, userinfo_form.get_town_message())","sub_path":"tests/userinfo/wrongtown_test.py","file_name":"wrongtown_test.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321620945","text":"'''Desenvolva um programa que leia o comprimento de 3 retas e diga ao usuário se elas podem ou não formar um triangulo'''\r\n\r\nreta1 = float(input('Informe o valor da primeira reta: '))\r\nreta2 = float(input('Informe o valor da segunda reta: '))\r\nreta3 = float(input('Informe o valor da terceira reta: '))\r\n\r\nif reta1 < reta2 + reta3 and reta2 < reta1 + reta3 and reta3 < reta1 + reta2:\r\n print('Os valores informados formam um triângulo!')\r\nelse:\r\n print('Com os valores informados não é possível formar retângulos')","sub_path":"Desafio35.py","file_name":"Desafio35.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618469380","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDate created: Tue Mar 7 15:38:34 2017\nAuthor: S. Hutchins\n\nScript description: Testing different Clustal Omega parameters.\n\n\"\"\"\nimport os\nfrom ClustalParams import COP1, COP2, COP3, COP4, COP5, COP6, COP7\n\n# Assess current working directories and establishing home and output directories.\n\na = '/work5/r2295/bin/ClustalParamsTest/' # Home directory\nhome = a\nos.chdir(home) # Directory change to output directory\n\nparamnames = [\"params1\", \"params2\", \"params3\", \"params4\", \"params5\", \"params6\", \"params7\"]\nparameters = [COP1, COP2, COP3, COP4, COP5, COP6, COP7]\n\nfor name, param in zip(paramnames, parameters):\n # Create directories for alignment files.\n b = a + name + \"-output\"\n os.makedirs('%s' % b, exist_ok=True) # Create a directory or don't if it exists.\n\n # Go to the CDS directory\n c = a + \"InputCDS\"\n\n os.chdir(c) # Change to cds.fasta file directory\n os.listdir(c) # Make a list of the files in the current directory\n print(\"➜ Current CDS/Gene directory: \"+ os.getcwd() + \"\\n\") # Print current working directory\n\n # Echos all commands in the current shell.\n os.system(\"set -x\")\n\n # Copies the profile.fasta and concatenated cds.fasta file to output dir.\n os.system(\"cp {APOL1_cds_nucl.fasta,profile.fasta} \" + b + \"/\")\n\n # Directory change to output directory\n os.chdir(b)\n\n # Run Clustal Omega Commandline tool\n param(in_file=\"APOL1_cds_nucl.fasta\", out_file=name + \".fasta\", outfmt=\"fasta\", logfile=name + \".log\")","sub_path":"{{ cookiecutter.project_slug }}/misc/Tests and Examples/ClustalOmega/ParametersTest/ClustalParamsTest1/params_test.py","file_name":"params_test.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"132240001","text":"from itertools import groupby\nimport operator\nimport time\nfrom datetime import datetime\n\ndef repeat_func(func, times, *args):\n for i in range(times):\n yield func(*args)\n\ndef to_range_str(iterable):\n '''\n Convert an iterable object of integers to a string in with consecutive\n numbers combined into a range.\n '''\n result = ''\n sorted_list = sorted(iterable)\n for (_, g) in groupby(enumerate(sorted_list), lambda t : t[0] - t[1]):\n rlist = list(map(operator.itemgetter(1), g))\n if len(rlist) > 1:\n single = '{}-{}'.format(rlist[0], rlist[-1])\n else:\n single = '{}'.format(rlist[0])\n if not result:\n result = single\n else:\n result += ' {}'.format(single)\n\n return result\n\ndef range_str_to_list(s):\n ret = []\n for f in s.strip(' ,\\n\\t').replace(',', ' ').split():\n r = list(map(int, f.split('-')))\n ret.extend(range(r[0], r[-1] + 1))\n\n return ret\n\ndef get_milliseconds():\n return int(time.time()) * 1000 + (datetime.now().microsecond) // 1000\n\ndef is_trade_time():\n now = datetime.now()\n return now.isoweekday() in (1, 2, 3, 4, 5) and now.hour in range(9, 16)\n\n# This must be put at the last line!\n__all__ = [x for x in locals() if not x.startswith('__')]\n","sub_path":"python/tools/binary-message/ouou/util/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"212639598","text":"from threading import Thread\nimport time\nimport threading\n\n\ndef test1():\n g_num = 100\n name = threading.current_thread().name\n if name == \"Thread-1\":\n g_num += 1\n else:\n time.sleep(2)\n print(\"--Thread is %s--g_num=%d\" % (name, g_num))\n\n\nt1 = Thread(target=test1)\nt1.start()\n\nt2 = Thread(target=test1)\nt2.start()","sub_path":"PycharmProjects/PythonCodes/02-Senior/11-线程/08-多进程使用非共享变量.py","file_name":"08-多进程使用非共享变量.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"331874288","text":"# -*- coding: utf-8 -*-\n# perimeter form tests\nimport datetime\n\nfrom django.core.exceptions import ValidationError\nfrom django.test import TestCase, RequestFactory, override_settings\nfrom django.utils.timezone import now\n\nfrom perimeter.forms import GatewayForm\nfrom perimeter.models import AccessToken, AccessTokenUse\n\nYESTERDAY = now().date() - datetime.timedelta(days=1)\n\nclass GatewayFormTests(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.payload = {\n 'token': \"test\",\n 'email': \"hugo@yunojuno.com\",\n 'name': \"Hugo Rodger-Brown\"\n }\n self.request = self.factory.post('/',\n data=self.payload,\n REMOTE_ADDR='127.0.0.1',\n HTTP_USER_AGENT='test_agent'\n )\n self.token = AccessToken(token=\"test\").save()\n self.form = GatewayForm(self.request.POST)\n\n def test_post_valid_token(self):\n self.assertTrue(self.form.is_valid())\n self.assertEqual(self.form.token, self.token)\n\n def test_clean_inactive_token(self):\n self.token.is_active = False\n self.token.save(update_fields=['is_active'])\n self.assertFalse(self.form.is_valid())\n self.assertRaises(ValidationError, self.form.clean_token)\n\n def test_clean_expired_token(self):\n self.token.expires_on = YESTERDAY\n self.token.save(update_fields=['expires_on'])\n self.assertFalse(self.form.is_valid())\n self.assertRaises(ValidationError, self.form.clean_token)\n\n def test_no_matching_token(self):\n AccessToken.objects.all().delete()\n self.assertFalse(self.form.is_valid())\n self.assertRaises(ValidationError, self.form.clean_token)\n\n def test_save(self):\n self.request.session = {}\n self.assertTrue(self.form.is_valid())\n au = self.form.save(self.request)\n self.assertTrue(AccessTokenUse.objects.get(), au)\n self.assertEqual(au.user_email, self.payload['email'])\n self.assertEqual(au.user_name, self.payload['name'])\n self.assertEqual(au.token, self.token)\n self.assertEqual(au.client_ip, '127.0.0.1')\n self.assertEqual(au.client_user_agent, 'test_agent')\n","sub_path":"perimeter/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"614363516","text":"import pygame;\nimport random;\nimport sys;\nimport pickle;\nimport numpy as np;\nfrom pygame.locals import *;\n\n### define game logic methods ####\n\ndef collide(x1, x2, y1, y2, w1, w2, h1, h2):\n\tif x1+w1>x2 and x1y2 and y1 -1 and ys[i]/obj_size > -1):\n\t\tgrid[xs[i]/obj_size][ys[i]/obj_size] = 0;\n\t\n\twhile i >= 1:\n\t\txs[i] = xs[i-1];\n\t\tys[i] = ys[i-1];\n\t\ti -= 1\n\n\tif dirs==0:\n\t\tys[0] += obj_size;\n\telif dirs==1:\n\t\txs[0] += obj_size;\n\telif dirs==2:\n\t\tys[0] -= obj_size;\n\telif dirs==3:\n\t\txs[0] -= obj_size;\n\n\tif(flag and xs[i]/obj_size < grid_size and ys[i]/obj_size < grid_size and xs[i]/obj_size > -1 and ys[i]/obj_size > -1):\n\t\tgrid[xs[i]/obj_size][ys[i]/obj_size] = 1;\n\ndef check_boundries(xs, ys):\n\tif xs[0] < 0 or xs[0] > (grid_size-1)*obj_size or ys[0] < 0 or ys[0] > (grid_size-1)*obj_size: \n\t\treturn True;\n\treturn False;\n\ndef check_self(xs,ys):\n\ti = len(xs)-1\n\twhile i >= 2:\n\t\tif collide(xs[0], xs[i], ys[0], ys[i], obj_size, obj_size, obj_size, obj_size):\n\t\t\treturn True;\n\t\ti-= 1\n\treturn False;\n\ndef check_apple(xs,ys):\n\treturn collide(xs[0], applepos[0], ys[0], applepos[1], obj_size, obj_size, obj_size, obj_size);\n\n### define init and main game loop ###\ndef init_game():\n\tcurr_it = 0;\n\t\n\tif TRAIN:\n\t\tglobal Q, Q_eps, Q_alpha, Q_gamma, Q_count, Q_runs;\n\t\t\n\t\ttrain_file = open(\"train\",\"rb+\");\n\n\t\tdump_limit = 500;\n\t\tdump_time = 0;\n\t\tcurr_it = 0;\n\n\t\tobj = pickle.load(train_file);\n\t\tcurr_it = obj[\"curr_it\"];\n\t\tQ_eps = obj[\"eps\"];\n\t\tQ_alpha = obj[\"alpha\"];\n\t\tQ_gamma = obj[\"gamma\"];\n\t\tQ_count = obj[\"count\"];\n\t\tQ_runs = obj[\"runs\"];\n\t\tQ = obj[curr_it];\n\n\t\tprint(\"loaded training at \" + str(curr_it));\n\n\tfor i in range(curr_it,100000):\n\t\tcurr_it += 1;\n\n\t\tif TRAIN:\n\t\t\tdump_time += 1;\n\n\t\t\tif(dump_time >= dump_limit):\n\t\t\t\tdump_time = 0;\n\n\t\t\t\tobj[\"runs\"] = Q_runs;\n\t\t\t\tobj[\"curr_it\"] = curr_it;\n\t\t\t\tobj[\"eps\"] = Q_eps;\n\t\t\t\tobj[\"alpha\"] = Q_alpha;\n\t\t\t\tobj[\"gamma\"] = Q_gamma;\n\t\t\t\tobj[\"count\"] = Q_count;\n\t\t\t\tobj[curr_it] = Q;\n\n\t\t\t\ttrain_file.seek(0);\n\t\t\t\tpickle.dump(obj,train_file,pickle.HIGHEST_PROTOCOL);\n\n\t\t\t\tprint(\"wrote training until now\");\n\n\t\t## reset varaibles\n\t\tglobal grid, xs, ys, dirs, score, applepos;\n\n\t\txs = np.copy(xs_orig);\n\t\tys = np.copy(ys_orig);\n\t\tdirs = 0;\n\t\tscore = snake_init_length;\n\t\tapplepos = rand_apple_pos();\n\t\tgrid = np.copy(grid_orig);\n\n\t\trun();\n\n\t\tQ_runs = np.append(Q_runs,score);\n\n\t\tprint(str(curr_it) + \" : \" + str(score));\n\ndef run():\n\tglobal xs, ys, applepos, score, dirs;\n\n\twhile True:\n\t\tif not DEBUG:\n\t\t\tif USER:\n\t\t\t\tclock.tick(20);\n\t\t\telse:\n\t\t\t\tclock.tick(FPS);\n\t\t\tfor e in pygame.event.get():\n\t\t\t\tif e.type == QUIT:\n\t\t\t\t\ttrain_file.close();\n\t\t\t\t\tsys.exit(0)\n\t\t\t\telif e.type == KEYDOWN and USER:\n\t\t\t\t\tif e.key == K_UP and dirs != 0:\n\t\t\t\t\t\tdirs = 2\n\t\t\t\t\telif e.key == K_DOWN and dirs != 2:\n\t\t\t\t\t\tdirs = 0\n\t\t\t\t\telif e.key == K_LEFT and dirs != 1:\n\t\t\t\t\t\tdirs = 3;\n\t\t\t\t\telif e.key == K_RIGHT and dirs != 3:\n\t\t\t\t\t\tdirs = 1\n\n\t\t## ai here\n\t\tif(ALGO_PARAMS==None):\n\t\t\tdirs = ALGO();\n\t\telse:\n\t\t\tdirs = ALGO(ALGO_PARAMS);\n\n\t\tmove(xs,ys,dirs,True);\n\n\t\t## chekc collisions (self, apple, boundries)\n\t\tif(check_self(xs,ys)):\n\t\t\treturn;\n\t\tif check_apple(xs,ys):\n\t\t\tscore+=1;\n\t\t\txs = np.append(xs,grid_size*obj_size + 100);\n\t\t\tys = np.append(ys,grid_size*obj_size + 100);\n\t\t\tapplepos=rand_apple_pos();\n\t\tif check_boundries(xs, ys): \n\t\t\treturn;\n\n\t\tif not DEBUG:\n\t\t\ts.fill((255, 255, 255));\n\t\t\ts.blit(img_head, (xs[0],ys[0]));\n\t\t\tfor i in range(1, len(xs)):\n\t\t\t\ts.blit(img_body, (xs[i], ys[i]))\n\t\t\ts.blit(appleimage, applepos);\n\t\t\tt=f.render(\"length: \" + str(score), True, (0, 0, 0));\n\t\t\ts.blit(t, (10, 10));\n\t\t\tt=f.render(\"eps: \" + str(Q_eps), True, (0,0,0));\n\t\t\ts.blit(t, (10, 30));\n\t\t\tpygame.display.update()\n\n### define ai algos ###\n\ndef ai_weighted_rand_walk(params):\n\t\n\tw = params[0]; # weights\n\ts = params[1]; # smart or not (crash into self/walls ok?)\n\t\n\ttries = [(forward,w[0]), (left_turn,w[1]), (right_turn,w[2])] # forward, left, right\n\n\twhile(len(tries)>0):\n\t\tfake_xs = np.copy(xs);\n\t\tfake_ys = np.copy(ys);\n\t\tfake_dirs = dirs;\n\t\t\n\t\t## weighted choice of what to do next\n\t\ttotal = sum(w for c,w in tries);\n\t\tr = random.uniform(0, total);\n\t\tupto = 0;\n\t\tfor c,w in tries:\n\t\t\t\tif upto + w >= r:\n\t\t\t\t\t\tfunc = c;\n\t\t\t\t\t\tweight = w;\n\t\t\t\t\t\tbreak;\n\t\t\t\tupto += w;\n\n\t\tfake_dirs = func(fake_dirs);\n\t\tmove(fake_xs,fake_ys,fake_dirs,False);\n\t\t\n\t\tif not s:\n\t\t\treturn fake_dirs;\n\n\t\tif not (check_boundries(fake_xs,fake_ys) or check_self(fake_xs,fake_ys)):\n\t\t\treturn fake_dirs;\n\t\t\n\t\ttries.remove((func,weight));\n\n\t## at this point we are in a corner and whatever choice will lead us to die\n\t## might as well just return the original direction\n\treturn dirs;\n\ndef ai_smart():\n\ttries = [forward, left_turn, right_turn];\n\twork = [];\n\n\twhile(len(tries)>0):\n\t\tfake_xs = np.copy(xs);\n\t\tfake_ys = np.copy(ys);\n\t\tfake_dirs = dirs;\n\n\t\tfunc = random.choice(tries);\n\t\tfake_dirs = func(fake_dirs);\n\n\t\tmove(fake_xs, fake_ys, fake_dirs, False);\n\n\t\tif(check_boundries(fake_xs,fake_ys)):\n\t\t\ttries.remove(func);\n\t\t\tcontinue;\n\n\t\tif(check_self(fake_xs,fake_ys)):\n\t\t\ttries.remove(func);\n\t\t\tcontinue;\n\n\t\tdist = np.sqrt(pow(fake_xs[0]-applepos[0],2) + pow(fake_ys[0]-applepos[1],2));\n\t\twork.append([dist,fake_dirs]);\n\t\ttries.remove(func);\n\n\tif(len(work)>0):\n\t\tind = 0;\n\t\tmin_dist = 10000000;\n\t\t\n\t\tfor i in range(len(work)):\n\t\t\tif work[i][0] < min_dist:\n\t\t\t\tind = i;\n\t\t\t\tmin_dist = work[i][0];\n\n\t\treturn work[ind][1];\n\n\t## at this we are in a corner and whatever choice will lead us to die \n\t## might as well just return the original direction\n\treturn dirs;\n\n# Q learning AI\nQ = {};\nQ_runs = np.array([]);\nQ_actions = [forward, left_turn, right_turn];\nQ_eps = .3;\nQ_alpha = .3;\nQ_gamma = .7;\nQ_count = 0;\n\n### find apple quadrant relative to the snake's head\ndef Q_apple_quad(dirs):\n\tdx = 1 if applepos[0] >= xs[0] else -1\n\tdy = 1 if grid_size*obj_size - applepos[1] >= grid_size*obj_size - ys[0] else -1;\n\n\tif dirs == 0: #down\n\t\tqx = -dx;\n\t\tqy = -dy;\n\telif dirs == 1: #right\n\t\tqx = -dy;\n\t\tqy = dx;\n\telif dirs == 2: #up (does nothing)\n\t\tqx = dx;\n\t\tqy = dy;\n\telif dirs == 3: #left\n\t\tqx = dy;\n\t\tqy = -dx;\n\n\treturn (qx,qy);\n\ndef Q_get_state(dirs):\n\tspc = [0,0,0] # forward, left, right\n\tspc_dist = [10000000, 10000000, 10000000];\n\tfake_dirs = None;\n\n\tfor j in range(3):\n\t\tfake_xs = np.copy(xs);\n\t\tfake_ys = np.copy(ys);\n\t\tfake_dirs = dirs;\n\t\n\t\tfake_dirs = Q_actions[j](fake_dirs);\n\t\tmove(fake_xs, fake_ys, fake_dirs, False);\n\t\t\n\t\tif(check_boundries(fake_xs, fake_ys) or check_self(fake_xs, fake_ys)):\n\t\t\tspc[j] = -1;\n\n\t\tif check_apple(fake_xs, fake_ys):\n\t\t\tspc[j] = 1;\n\n\t\tspc_dist[j] = np.sqrt(pow(fake_xs[0]-applepos[0],2) + pow(fake_ys[0]-applepos[1],2));\n\n\t(qx, qy) = Q_apple_quad(dirs);\n\tstate = (spc[0],spc[1],spc[2],qx,qy);\n\n\treturn { \"spc\" : spc, \"spc_dist\" : spc_dist, \"state\" : state};\n\ndef ai_qlearn():\n\tglobal Q, Q_count, Q_eps;\n\t\n\t## due to artifacts of implementation, instead of having a current state\n\t## and previous state, like the Q-learn algorithm requires, we instead have\n\t## a current state and a next state, where the current state effectivley \n\t## acts like the previous state, and the next state acts as the current state\n\t## in terms of the Q-Learn Algo.\n\n\t## epsilon greedy exploration\n\tQ_count += 1;\n\tif Q_count == 10000:\n\t\tQ_eps = (.5)*Q_eps;\n\t\tQ_count = 0;\n\n\t## init some variables\n\tepisode_ended = False;\n\treward = 0;\n\tdistance = 10000000;\n\taction = -1;\n\n\t## get current state\n\tstate_space = Q_get_state(dirs);\n\tstate = state_space[\"state\"];\n\tspc = state_space[\"spc\"];\n\tspc_dist = state_space[\"spc_dist\"];\n\n\t# calculate distance to apple\n\tdist = np.sqrt(pow(xs[0]-applepos[0],2) + pow(ys[0]-applepos[1],2));\n\n\t## take best action (or explore)\n\tif state not in Q:\n\t\tQ[state] = [10,10,10]; # assume optimistic actions\n\n\tq = Q[state];\n\tmaxQ = max(q);\n\n\tif random.random() < Q_eps:\n\t\taction = random.choice([0,1,2]);\n\telse:\n\t\tbest = [i for i,x in enumerate(q) if x == maxQ];\n\t\taction = random.choice(best);\n\n\t# get next action\n\tfunc = Q_actions[action];\n\tfake_dirs = func(dirs);\n\n\t## get reward from next state\n\t## action corresponds to the action the snake will take\n\t## spc[action] corresponds to the outcome in next state\n\tif spc[action] == -1: # crashed\n\t\treward = -100;\n\t\tepisode_ended = True;\n\telif spc[action] == 1: # ate apple\n\t\treward = 500;\n\telif spc_dist[action] < dist: # closer to apple\n\t\treward = 20;\n\telse: # anything else\n\t\treward = -10;\n\n\t## update q of current state\n\tif episode_ended:\n\t\tQ[state][action] += Q_alpha * (reward - Q[state][action]);\n\telse:\n\t\tstate_next = Q_get_state(fake_dirs)[\"state\"];\n\t\tif state_next not in Q:\n\t\t\tQ[state_next] = [10,10,10];\n\t\t\tQ[state][action] += Q_alpha * (reward + Q_gamma * max(Q[state_next]) - Q[state][action]);\n\t\n\treturn fake_dirs;\n\n## 'ai' which lets the user play, basically identity function\ndef ai_user():\n\treturn dirs;\n\n### set environment ###\nDEBUG = False;\nUSER = False;\nTRAIN = False;\nALGO = ai_weighted_rand_walk;\nALGO_PARAMS = [1/3,1/3,1/3]; # completley random walk\nFPS = 0;\n\nfor i in range(1,len(sys.argv)):\n\tcurr = sys.argv[i];\n\tif curr == \"-d\":\n\t\tDEBUG = True;\n\telif curr == \"-r\":\n\t\tALGO = ai_weighted_rand_walk;\n\t\tALGO_PARAMS = [[1/3,1/3,1/3], False];\n\telif curr == \"-wr\":\n\t\tALGO = ai_weighted_rand_walk;\n\t\tALGO_PARAMS = [[.8,.1,.1], False];\n\telif curr == '-sr':\n\t\tALGO = ai_weighted_rand_walk;\n\t\tALGO_PARAMS = [[1/3,1/3,1/3], True];\n\telif curr == '-swr':\n\t\tALGO = ai_weighted_rand_walk;\n\t\tALGO_PARAMS = [[.8,.1,.1], True];\n\telif curr == \"-s\":\n\t\tALGO = ai_smart;\n\t\tALGO_PARAMS = None;\n\telif curr == \"-q\":\n\t\tALGO = ai_qlearn;\n\t\tALGO_PARAMS = None;\n\telif curr == \"-usr\":\n\t\tALGO = ai_user;\n\t\tALGO_PARAMS = None;\n\t\tUSER = True;\n\telif curr == \"-t\":\n\t\tTRAIN = True;\n\telif curr.isdigit():\n\t\tFPS = float(curr);\n\telse:\n\t\traise Exception(\"Unrecognized parameter \" + curr)\n\n### set up game variables ###\n\nobj_size = 10;\ngrid_size = 30;\ngrid_orig = np.zeros((grid_size,grid_size));\n\nsnake_head_start = (np.floor(grid_size/2)*obj_size,np.floor(grid_size/2)*obj_size);\nsnake_init_length = 5;\n\nxs_orig = np.array([snake_head_start[0]]);\nys_orig = np.array([snake_head_start[1]]);\n\ngrid_orig[xs_orig[0]/obj_size][ys_orig[0]/obj_size] = 1;\n\nfor i in range(1,snake_init_length):\n\txs_orig = np.append(xs_orig, snake_head_start[0]);\n\tys_orig = np.append(ys_orig, snake_head_start[1] - i * obj_size);\n\tgrid_orig[xs_orig[0]/obj_size][ys_orig[i-1]/obj_size] = 1;\n\nxs = np.array([]);\nys = np.array([]);\ngrid = np.zeros((grid_size,grid_size));\n\ndirs = 0;\nscore = 0;\napplepos = (0,0);\n\n### set up non debug environment ###\n\nif not DEBUG:\n\tpygame.init();\n\ts=pygame.display.set_mode((grid_size*obj_size, grid_size*obj_size));\n\tpygame.display.set_caption('SnakeAI');\n\tappleimage = pygame.Surface((obj_size, obj_size));\n\tappleimage.fill((0, 255, 0));\n\timg_body = pygame.Surface((obj_size, obj_size));\n\timg_body.fill((255, 0, 0));\n\timg_head = pygame.Surface((obj_size, obj_size));\n\timg_head.fill((125, 0, 0));\n\tf = pygame.font.SysFont('Arial', 20);\n\tclock = pygame.time.Clock()\n\n### run game\ninit_game();\n","sub_path":"Machine Learning/Final Project - Snake AI (Python)/code/snake_old/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":11485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"370892015","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 13 19:02:40 2011\n\n@author: Dai\n\"\"\"\n\nimport pylab as plt\nimport mpl_toolkits.mplot3d.axes3d as p3\nimport numpy as np\nimport time\nimport pickle\n\n#from instruments import AWG81180A\nfrom slab.instruments import InstrumentManager\nfrom .spectrum_analyzer import *\nfrom .sa_calibration_manager import *\n\nfrom .single_sideband_mod import *\n\n\ndef int2amp(intensity):\n \"\"\"convert intensity (in dBm) to amplitude (or more precisely sqrt(intensity))\"\"\"\n return 10.0**(intensity/20.0)\n\nif __name__ == '__main__':\n #awg = AWG81180A(name='awg', address='GPIB::04::INSTR')\n im = InstrumentManager(\"ssm_optimization.cfg\" ) \n awg = im['AWG']\n #sa = SpectrumAnalyzer(protocol='serial', port=2)\n #Agilent analog waveform generator \n #sacm = SACalibrationManager(pickle.load(open('10dBm_cali.data')))\n \n sa = im['SA']\n rf = im['RF'] \n lo = im['LO']\n sacm = SACalibrationManager(pickle.load(open('10dBm_cali.data')))\n \n \"\"\"arbitrary setup parameters\"\"\"\n c_freq = 7e9\n m_freq = 0.1e9\n # amplitude in mV\n m_amp = 200.0\n \n \"\"\"setup analog waveform generators\"\"\"\n rf.set_frequency(c_freq)\n # +13dBm LO to drive IQB0618\n rf.set_power(13)\n lo.set_frequency(c_freq+sa.lo_offset)\n lo.set_power(10)\n \n \"\"\"awg global setup\"\"\"\n \"\"\"synchronize the two channels\"\"\"\n awg.write('inst:coup:stat on')\n \"\"\"turn on both channels\"\"\"\n set_output(awg, 1)\n set_output(awg, 2)\n\n \"\"\"sweep DC offsets and collect carrier amplitudes\"\"\"\n# s_step = 0.001\n# s_start = -0.5\n# s_end = 0.5\n# \n# d1s, d2s = np.meshgrid(np.arange(s_start, s_end, s_step), np.arange(s_start, s_end, s_step))\n# \n# t1 = []\n# cs = []\n# for d2 in d2s:\n# set_sin(awg, 2, m_amp, m_freq, d2[0])\n# for d1 in d1s[0]:\n# set_sin(awg, 1, m_amp, m_freq, d1, 90.0)\n# time.sleep(0.003)\n# \"\"\"carrier frequency\"\"\"\n# try:\n# c = int2amp(sacm.get_rf_power(c_freq-m_freq, get_power_at(sa, lo, c_freq-m_freq)))\n# except OutputOutOfRangeError as e:\n# c = int2amp(e.lower_bound_pwr)\n# \n# t1.append(c)\n# print 'd1, d2, c = '+repr((d1, d2[0], c))\n# cs.append(t1)\n# t1 = []\n# \n \n \"\"\"sweep phase and collect sidebands amplitudes\"\"\" \n s_step = 0.1\n s_start = -20.0\n s_end = 20.0\n\n ps = np.arange(s_start, s_end, s_step)\n\n plsbs = []\n pusbs = []\n i = 0\n for p in ps:\n set_sin(awg, 2, m_amp, m_freq, 0.0, 0.0)\n set_sin(awg, 1, m_amp, m_freq, 0.0, 90.0+p)\n time.sleep(0.1)\n \"\"\"lower side band\"\"\"\n #print 'raw # = '+repr(get_power_at(sa, lo, c_freq-m_freq))\n try:\n plsbs.append(sacm.get_rf_power(c_freq-m_freq, \n get_power_at(sa, lo, c_freq-m_freq)))\n except OutputOutOfRangeError as e:\n plsbs.append(e.lower_bound_pwr)\n\n \"\"\"upper side band\"\"\"\n #print 'raw # = '+repr(get_power_at(sa, lo, c_freq+m_freq))\n try:\n pusbs.append(sacm.get_rf_power(c_freq+m_freq, \n get_power_at(sa, lo, c_freq+m_freq)))\n except OutputOutOfRangeError as e:\n pusbs.append(e.lower_bound_pwr)\n \n print('p, lsb, usb = '+repr((p, plsbs[i], pusbs[i])))\n i += 1\n\n# \"\"\"sweep I, Q and collect sidebands amplitudes\"\"\" \n# s_step = 5.0\n# s_start = 50.0\n# s_end = 200.0\n#\n# iis, qqs = np.meshgrid(np.arange(s_start, s_end, s_step), np.arange(s_start, s_end, s_step))\n#\n# t1 = []\n# t2 = []\n# lsbs = []\n# usbs = []\n# for q in qqs:\n# set_sin(awg, 2, q[0], m_freq, 0.0, 0.0)\n# for i in iis[0]:\n# set_sin(awg, 1, i, m_freq, 0.0, 90.0)\n# time.sleep(0.01)\n# \"\"\"lower side band\"\"\"\n# #print 'raw # = '+repr(get_power_at(sa, lo, c_freq-m_freq))\n# try:\n# lsb = int2amp(sacm.get_rf_power(c_freq-m_freq, \n# get_power_at(sa, lo, c_freq-m_freq)))\n# except OutputOutOfRangeError as e:\n# lsb = int2amp(e.lower_bound_pwr)\n# t1.append(lsb)\n# \n# \"\"\"upper side band\"\"\"\n# #print 'raw # = '+repr(get_power_at(sa, lo, c_freq+m_freq))\n# try:\n# usb = int2amp(sacm.get_rf_power(c_freq+m_freq, \n# get_power_at(sa, lo, c_freq+m_freq)))\n# except OutputOutOfRangeError as e:\n# usb = int2amp(e.lower_bound_pwr)\n# t2.append(usb) \n# \n# print 'i, q, lsb, usb = '+repr((i, q[0], lsb, usb))\n# \n# lsbs.append(t1)\n# usbs.append(t2)\n# t1 = []\n# t2 = [] \n\n\n \"\"\"numpified for plotting\"\"\"\n# lsbs = np.array(lsbs) \n# usbs = np.array(usbs)\n# cs = np.array(cs)\n plsbs = np.array(plsbs)\n pusbs = np.array(pusbs)\n \n \"\"\"plots\"\"\" \n \"\"\"carrier amplitude over offsets\"\"\"\n# fig = plt.figure()\n# ax = p3.Axes3D(fig)\n# ax.set_xlabel('Q offset (V)')\n# ax.set_ylabel('I offset (V)')\n# ax.set_zlabel('Carrier Amplitude')\n# #ax.scatter(d1, d2, cs)\n# ax.plot_wireframe(d1s, d2s, cs)\n# plt.show()\n \n# \"\"\"lsb over IQ\"\"\"\n# fig = plt.figure()\n# ax = p3.Axes3D(fig)\n# ax.set_xlabel('Q amplitude')\n# ax.set_ylabel('I amplitude')\n# ax.set_zlabel('LSB Amplitude')\n# #ax.scatter(d1, d2, cs)\n# ax.plot_wireframe(iis, qqs, lsbs)\n# plt.show()\n# \n# \"\"\"usb over IQ\"\"\"\n# fig = plt.figure()\n# ax = p3.Axes3D(fig)\n# ax.set_xlabel('Q amplitude')\n# ax.set_ylabel('I amplitude')\n# ax.set_zlabel('USB Amplitude')\n# #ax.scatter(d1, d2, cs)\n# ax.plot_wireframe(iis, qqs, usbs)\n# plt.show()\n\n \"\"\"lsb over phase\"\"\"\n# ax.set_xlabel('phase (deg)')\n# ax.set_ylabel('LSB amplitude')\n plt.plot(ps, plsbs)\n plt.show()\n \n \"\"\"usb over phase\"\"\"\n# ax.set_xlabel('phase (deg)')\n# ax.set_ylabel('LSB amplitude')\n plt.plot(ps, pusbs)\n plt.show()","sub_path":"slab/instruments/spec_analyzer/mixer_std_calibration.py","file_name":"mixer_std_calibration.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"283553922","text":"import asyncio\nimport contextlib\nimport socket\n\nimport pytest\n\nimport asyncio_dgram\n\n\n@contextlib.contextmanager\ndef loop_exception_handler():\n \"\"\"\n Replace the current event loop exception handler with one that\n simply stores exceptions in the returned dictionary.\n\n @return - dictionary that is updated with the last loop exception\n \"\"\"\n context = {}\n\n def handler(self, c):\n context.update(c)\n\n loop = asyncio.get_event_loop()\n orig_handler = loop.get_exception_handler()\n loop.set_exception_handler(handler)\n yield context\n\n loop.set_exception_handler(orig_handler)\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"addr,family\",\n [((\"127.0.0.1\", 0), socket.AF_INET), ((\"::1\", 0), socket.AF_INET6)],\n ids=[\"INET\", \"INET6\"],\n)\nasync def test_connect_sync(addr, family):\n # Bind a regular socket, asyncio_dgram connect, then check asyncio send and\n # receive.\n with socket.socket(family, socket.SOCK_DGRAM) as sock:\n sock.bind(addr)\n client = await asyncio_dgram.connect(sock.getsockname()[:2])\n\n assert client.peername == sock.getsockname()\n\n await client.send(b\"hi\")\n got, client_addr = sock.recvfrom(4)\n assert got == b\"hi\"\n assert client_addr == client.sockname\n assert client.peername == sock.getsockname()\n\n sock.sendto(b\"bye\", client.sockname)\n got, server_addr = await client.recv()\n assert got == b\"bye\"\n assert server_addr == sock.getsockname()\n\n with pytest.raises(asyncio.TimeoutError):\n await asyncio.wait_for(client.recv(), 0.05)\n\n # Same as above but reversing the flow. Bind a regular socket, asyncio_dgram\n # connect, then check asyncio receive and send.\n with socket.socket(family, socket.SOCK_DGRAM) as sock:\n sock.bind(addr)\n client = await asyncio_dgram.connect(sock.getsockname()[:2])\n\n with pytest.raises(asyncio.TimeoutError):\n await asyncio.wait_for(client.recv(), 0.05)\n\n assert client.peername == sock.getsockname()\n\n sock.sendto(b\"hi\", client.sockname)\n got, server_addr = await client.recv()\n assert got == b\"hi\"\n assert server_addr == sock.getsockname()\n\n await client.send(b\"bye\")\n got, client_addr = sock.recvfrom(4)\n assert got == b\"bye\"\n assert client_addr == client.sockname\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"addr,family\",\n [((\"127.0.0.1\", 0), socket.AF_INET), ((\"::1\", 0), socket.AF_INET6)],\n ids=[\"INET\", \"INET6\"],\n)\nasync def test_bind_sync(addr, family):\n # Bind an asyncio_dgram, regular socket connect, then check asyncio send and\n # receive.\n with socket.socket(family, socket.SOCK_DGRAM) as sock:\n server = await asyncio_dgram.bind(addr)\n sock.connect(server.sockname)\n\n assert server.peername is None\n\n await server.send(b\"hi\", sock.getsockname())\n got, server_addr = sock.recvfrom(4)\n assert got == b\"hi\"\n assert server_addr == server.sockname\n\n sock.sendto(b\"bye\", server.sockname)\n got, client_addr = await server.recv()\n assert got == b\"bye\"\n assert client_addr == sock.getsockname()\n\n with pytest.raises(asyncio.TimeoutError):\n await asyncio.wait_for(server.recv(), 0.05)\n\n # Same as above but reversing the flow. Bind an asyncio_dgram, regular\n # socket connect, then check asyncio receive and send.\n with socket.socket(family, socket.SOCK_DGRAM) as sock:\n server = await asyncio_dgram.bind(addr)\n sock.connect(server.sockname)\n\n assert server.peername is None\n\n with pytest.raises(asyncio.TimeoutError):\n await asyncio.wait_for(server.recv(), 0.05)\n\n sock.sendto(b\"hi\", server.sockname)\n got, client_addr = await server.recv()\n assert got == b\"hi\"\n assert client_addr == sock.getsockname()\n\n await server.send(b\"bye\", sock.getsockname())\n got, server_addr = sock.recvfrom(4)\n assert got == b\"bye\"\n assert server_addr == server.sockname\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"addr,family\",\n [((\"127.0.0.1\", 0), socket.AF_INET), ((\"::1\", 0), socket.AF_INET6)],\n ids=[\"INET\", \"INET6\"],\n)\nasync def test_from_socket_streamtype(addr, family):\n with socket.socket(family, socket.SOCK_DGRAM) as sock:\n sock.bind(addr)\n stream = await asyncio_dgram.from_socket(sock)\n\n assert stream.sockname is not None\n assert sock.getsockname() == stream.sockname\n assert stream.peername is None\n assert stream.socket.fileno() == sock.fileno()\n assert isinstance(stream, asyncio_dgram.aio.DatagramServer)\n\n with socket.socket(family, socket.SOCK_DGRAM) as sock:\n sock.bind(addr)\n\n with socket.socket(family, socket.SOCK_DGRAM) as tsock:\n tsock.connect(sock.getsockname())\n stream = await asyncio_dgram.from_socket(tsock)\n\n assert stream.sockname is not None\n assert tsock.getsockname() == stream.sockname\n assert isinstance(stream, asyncio_dgram.aio.DatagramClient)\n assert stream.peername == sock.getsockname()\n assert stream.socket.fileno() == tsock.fileno()\n\n # Make sure that the transport stored the peername\n with loop_exception_handler() as context:\n await stream.send(b\"abc\")\n assert context == {}\n\n\n@pytest.mark.asyncio\nasync def test_from_socket_bad_socket():\n with socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) as sock:\n with pytest.raises(TypeError, match=\"either AddressFamily.AF\"):\n await asyncio_dgram.from_socket(sock)\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n with pytest.raises(TypeError, match=\"must be SocketKind.SOCK_DGRAM\"):\n await asyncio_dgram.from_socket(sock)\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n \"addr,family\",\n [((\"127.0.0.1\", 0), socket.AF_INET), ((\"::1\", 0), socket.AF_INET6)],\n ids=[\"INET\", \"INET6\"],\n)\nasync def test_no_server(addr, family):\n with socket.socket(family, socket.SOCK_DGRAM) as sock:\n sock.bind(addr)\n free_addr = sock.getsockname()\n\n client = await asyncio_dgram.connect(free_addr[:2])\n await client.send(b\"hi\")\n\n for _ in range(20):\n try:\n await client.send(b\"hi\")\n except ConnectionRefusedError:\n break\n await asyncio.sleep(0.01)\n else:\n pytest.fail(\"ConnectionRefusedError not raised\")\n\n assert client.peername == free_addr\n client.close()\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"addr\", [(\"127.0.0.1\", 0), (\"::1\", 0)], ids=[\"INET\", \"INET6\"])\nasync def test_echo(addr):\n server = await asyncio_dgram.bind(addr)\n client = await asyncio_dgram.connect(server.sockname[:2])\n\n await client.send(b\"hi\")\n data, client_addr = await server.recv()\n assert data == b\"hi\"\n assert client_addr == client.sockname\n\n await server.send(b\"bye\", client_addr)\n data, server_addr = await client.recv()\n assert data == b\"bye\"\n assert server_addr == server.sockname\n\n assert server.peername is None\n assert client.peername == server.sockname\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"addr\", [(\"127.0.0.1\", 0), (\"::1\", 0)], ids=[\"INET\", \"INET6\"])\nasync def test_echo_bind(addr):\n server = await asyncio_dgram.bind(addr)\n client = await asyncio_dgram.bind(addr)\n\n await client.send(b\"hi\", server.sockname)\n data, client_addr = await server.recv()\n assert data == b\"hi\"\n assert client_addr == client.sockname\n\n await server.send(b\"bye\", client_addr)\n data, server_addr = await client.recv()\n assert data == b\"bye\"\n assert server_addr == server.sockname\n\n assert server.peername is None\n assert client.peername is None\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"addr\", [(\"127.0.0.1\", 0), (\"::1\", 0)], ids=[\"INET\", \"INET6\"])\nasync def test_unconnected_sender(addr):\n # Bind two endpoints and connect to one. Ensure that only the endpoint\n # that was connected to can send.\n ep1 = await asyncio_dgram.bind(addr)\n ep2 = await asyncio_dgram.bind(addr)\n connected = await asyncio_dgram.connect(ep1.sockname[:2])\n\n await ep1.send(b\"from-ep1\", connected.sockname)\n await ep2.send(b\"from-ep2\", connected.sockname)\n data, server_addr = await connected.recv()\n assert data == b\"from-ep1\"\n assert server_addr == ep1.sockname\n\n with pytest.raises(asyncio.TimeoutError):\n await asyncio.wait_for(connected.recv(), 0.05)\n","sub_path":"test/test_aio.py","file_name":"test_aio.py","file_ext":"py","file_size_in_byte":8593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"82843608","text":"\"\"\"\nRelationship between array indexes and tree element\nA complete binary tree has an interesting property that we can use to find the children and parents of any node.\n\nIf the index of any element in the array is i, the element in the index 2i+1 will become the left child and element in 2i+2 index will become the right child. Also, the parent of any element at index i is given by the lower bound of (i-1)/2.\"\"\"\nclass Node:\n\n def __init__(self, key):\n self.key = key\n self.left = None\n self.right = None\n\n\ndef countNodes(root):\n if root is None:\n return 0\n return (1 + countNodes(root.left) + countNodes(root.right))\n\n\ndef isComplete(root, index, numberNodes):\n\n if root is None:\n return True\n\n if index >= numberNodes:\n return False\n\n return (isComplete(root.left, 2 * index + 1, numberNodes)\n and isComplete(root.right, 2 * index + 2, numberNodes))\n\n\nroot = Node(1)\nroot.left = Node(2)\nroot.right = Node(3)\nroot.left.left = Node(4)\nroot.left.right = Node(5)\nroot.right.left = Node(6)\n\nnode_count = countNodes(root)\nindex = 0\n\nif isComplete(root, index, node_count):\n print(\"The tree is a complete binary tree\")\nelse:\n print(\"The tree is not a complete binary tree\")\n","sub_path":"Trees/complete binary tree1.py","file_name":"complete binary tree1.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"41582687","text":"import ply.lex as lex\nimport ply.yacc as yacc\nimport sys\n\n# Create a list to hold all of the token names\ntokens = [\n 'BOOL',\n 'NAME',\n 'LPAREN',\n 'RPAREN',\n 'NEG',\n 'AND',\n 'OR',\n 'COND',\n 'BICOND',\n 'EQUALS'\n]\n\n# Use regular expressions to define what each token is\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\nt_NEG = r'\\~'\nt_AND = r'\\&'\nt_OR = r'\\|'\nt_COND = r'\\->'\nt_BICOND = r'\\<->'\nt_EQUALS = r'\\='\n\n# Ply's special t_ignore variable allows us to define characters the lexer will ignore.\n# We're ignoring spaces.\nt_ignore = r' '\n\n# More complicated tokens, such as tokens that are more than 1 character in length\n# are defined using functions.\n\n# An int is 1 or more numbers.\ndef t_BOOL(t):\n r'(F|T)'\n if t.value == 'F':\n t.value = False\n else:\n t.value = True\n return t\n\n# A NAME is a variable name. A variable can be 1 or more characters in length.\n# The first character must be in the ranges a-z A-Z or be an underscore.\n# Any character following the first character can be a-z A-Z 0-9 or an underscore.\ndef t_NAME(t):\n r'(?!(F|T))[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = 'NAME'\n return t\n\n# Skip the current token and output 'Illegal characters' using the special Ply t_error function.\ndef t_error(t):\n #print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n raise CalcError(\"Illegal character '%s'\" % t.value[0])\n\n# Build the lexer\nlexer = lex.lex()\n\n# Ensure our parser understands the correct order of operations.\n# The precedence variable is a special Ply variable.\nprecedence = (\n ('left', 'BICOND'),\n ('left', 'COND'),\n ('left', 'OR'),\n ('left', 'AND'),\n ('left', 'NEG'),\n ('left', 'LPAREN','RPAREN')\n)\n\n# Convert a boolean element to their char representation\ndef boolToChar(boolean):\n if boolean == True:\n return 'T'\n else:\n return 'F'\n\n# Define our grammar. We allow expressions, var_assign's and empty's.\ndef p_calc(p):\n '''\n calc : expression\n | var_assign\n | empty\n '''\n print(boolToChar(run(p[1])))\n\ndef p_var_assign(p):\n '''\n var_assign : NAME EQUALS expression\n '''\n # Build our tree\n p[0] = ('=', p[1], p[3])\n\ndef p_expression_negation(p):\n '''\n expression : NEG expression\n '''\n p[0] = not p[2]\n\ndef p_expression_parentesis(p):\n '''\n expression : LPAREN expression RPAREN\n '''\n p[0] = p[2]\n\n# Expressions are recursive.\ndef p_expression(p):\n '''\n expression : expression BICOND expression\n | expression COND expression\n | expression OR expression\n | expression AND expression\n '''\n # Build our tree.\n p[0] = (p[2], p[1], p[3])\n\ndef p_expression_bool(p):\n '''\n expression : BOOL\n '''\n p[0] = p[1]\n\ndef p_expression_var(p):\n '''\n expression : NAME\n '''\n p[0] = ('var', p[1])\n\n# Output to the user that there is an error in the input as it doesn't conform to our grammar.\n# p_error is another special Ply function.\ndef p_error(p):\n #print(\"Syntax error found!\")\n raise CalcError(\"Syntax error found!\")\n\ndef p_empty(p):\n '''\n empty :\n '''\n p[0] = None\n\n# Build the parser\nparser = yacc.yacc(debug=0, write_tables=0)\n# Create the environment upon which we will store and retreive variables from.\nenv = {}\n# The run function is our recursive function that 'walks' the tree generated by our parser.\ndef run(p):\n global env\n if type(p) == tuple:\n if p[0] == '&':\n return run(p[1]) and run(p[2])\n elif p[0] == '|':\n return run(p[1]) or run(p[2])\n elif p[0] == '->':\n return not run(p[1]) or run(p[2])\n elif p[0] == '<->':\n return (not run(p[1]) or run(p[2])) and (not run(p[2]) or run(p[1]))\n elif p[0] == '=':\n env[p[1]] = run(p[2])\n return ''\n elif p[0] == 'var':\n if p[1] not in env:\n return 'Undeclared variable found!'\n else:\n return env[p[1]]\n else:\n return p\n\n# Create a REPL to provide a way to interface with our interpreter.\n'''\nwhile True:\n try:\n s = input('>> ')\n \n except EOFError:\n break\n parser.parse(s)\n'''\n\nclass CalcError(Exception):\n def __init__(self, message):\n self.message = message\n\ntry:\n while True:\n s = input('>>> ')\n try:\n answer = parser.parse(s)\n if answer is not None:\n print(answer)\n except CalcError as e:\n print(e.message)\nexcept EOFError:\n print(\"XD\")","sub_path":"prologic.py","file_name":"prologic.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512773401","text":"# -*- coding: utf-8 -*-\n\"\"\"\n pyrseas.dbobject.rule\n ~~~~~~~~~~~~~~~~~~~~~\n\n This defines two classes, Rule and RuleDict, derived from\n DbSchemaObject and DbObjectDict, respectively.\n\"\"\"\nfrom pyrseas.dbobject import DbObjectDict, DbSchemaObject\nfrom pyrseas.dbobject import quote_id, commentable\n\n\nclass Rule(DbSchemaObject):\n \"\"\"A rewrite rule definition\"\"\"\n\n keylist = ['schema', 'table', 'name']\n objtype = \"RULE\"\n\n def identifier(self):\n \"\"\"Return a full identifier for a rule object\n\n :return: string\n \"\"\"\n return \"%s ON %s\" % (quote_id(self.name), self._table.qualname())\n\n def to_map(self):\n \"\"\"Convert rule to a YAML-suitable format\n\n :return: dictionary\n \"\"\"\n dct = self._base_map()\n del dct['_table']\n return {self.name: dct}\n\n @commentable\n def create(self):\n \"\"\"Return SQL statements to CREATE the rule\n\n :return: SQL statements\n \"\"\"\n where = instead = ''\n if hasattr(self, 'condition'):\n where = ' WHERE %s' % self.condition\n if hasattr(self, 'instead'):\n instead = 'INSTEAD '\n return [\"CREATE RULE %s AS ON %s\\n TO %s%s\\n DO %s%s\" % (\n quote_id(self.name), self.event.upper(),\n self._table.qualname(), where, instead, self.actions)]\n\n\nclass RuleDict(DbObjectDict):\n \"The collection of rewrite rules in a database.\"\n\n cls = Rule\n query = \\\n \"\"\"SELECT nspname AS schema, relname AS table, rulename AS name,\n split_part('select,update,insert,delete', ',',\n ev_type::int - 48) AS event, is_instead AS instead,\n pg_get_ruledef(r.oid) AS definition,\n obj_description(r.oid, 'pg_rewrite') AS description\n FROM pg_rewrite r JOIN pg_class c ON (ev_class = c.oid)\n JOIN pg_namespace n ON (relnamespace = n.oid)\n WHERE relkind = 'r'\n AND (nspname != 'pg_catalog' AND nspname != 'information_schema')\n ORDER BY nspname, relname, rulename\"\"\"\n\n def _from_catalog(self):\n \"\"\"Initialize the dictionary of rules by querying the catalogs\"\"\"\n for rule in self.fetch():\n do_loc = rule.definition.index(' DO ')\n if 'WHERE' in rule.definition:\n rule.condition = rule.definition[rule.definition.index(\n ' WHERE ') + 7:do_loc]\n if hasattr(rule, 'instead') and rule.instead:\n do_loc += 8\n rule.actions = rule.definition[do_loc + 4:-1]\n del rule.definition\n self[rule.key()] = rule\n\n def from_map(self, table, inmap):\n \"\"\"Initialize the dictionary of rules by examining the input map\n\n :param inmap: the input YAML map defining the rules\n \"\"\"\n for rul in inmap:\n inrule = inmap[rul]\n rule = Rule(table=table.name, schema=table.schema, name=rul,\n **inrule)\n if inrule:\n if 'oldname' in inrule:\n rule.oldname = inrule['oldname']\n del inrule['oldname']\n if 'description' in inrule:\n rule.description = inrule['description']\n self[(table.schema, table.name, rul)] = rule\n\n def diff_map(self, inrules):\n \"\"\"Generate SQL to transform existing rules\n\n :param input_map: a YAML map defining the new rules\n :return: list of SQL statements\n\n Compares the existing rule definitions, as fetched from the\n catalogs, to the input map and generates SQL statements to\n transform the rules accordingly.\n \"\"\"\n stmts = []\n # check input rules\n for rul in inrules:\n inrul = inrules[rul]\n # does it exist in the database?\n if rul in self:\n stmts.append(self[rul].diff_map(inrul))\n else:\n # check for possible RENAME\n if hasattr(inrul, 'oldname'):\n oldname = inrul.oldname\n try:\n stmts.append(self[oldname].rename(inrul.name))\n del self[oldname]\n except KeyError as exc:\n exc.args = (\"Previous name '%s' for rule '%s' \"\n \"not found\" % (oldname, inrul.name), )\n raise\n else:\n # create new rule\n stmts.append(inrul.create())\n # check database rules\n for (sch, tbl, rul) in self:\n # if missing, drop it\n if (sch, tbl, rul) not in inrules:\n stmts.append(self[(sch, tbl, rul)].drop())\n\n return stmts\n","sub_path":"pyrseas/dbobject/rule.py","file_name":"rule.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"592807597","text":"import json\nimport requests\n\nclass RestAgent:\n\n def __init__(self, url, key):\n self.__session = requests.Session()\n self.base_url = url\n self.client_list = {}\n self.session = requests.Session()\n self.api_secret = key\n\n def send_request(self, request, param=None, method='POST', private=False):\n url = self.base_url + request\n headers = {}\n if private:\n headers = {\n 'Content-Type': \"application/json\",\n 'Authorization': 'Bearer {}'.format(self.api_secret)\n }\n\n try:\n if method == 'GET':\n response = self.session.get(url, headers=headers)\n elif method == 'DELETE':\n response = self.session.delete(url, headers=headers)\n elif method == 'POST' and param is not None:\n response = self.session.post(url, headers=headers, data=json.dumps(param))\n elif method == 'POST' and param is None:\n response = self.session.post(url, headers=headers)\n return response.json()\n except IndexError as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n settings = {\n 'key': '',\n 'secret': ''\n }\n rest_wrapper = RestAgent('https://api.exchange.ripio.com/api/v1', 'a963ae2fccf59bbaae607b1a65b3ca2d3305378b2dc59a0659a02b3b675a6513')\n print('Depth test:')\n resp = rest_wrapper.send_request(request='/orderbook/btc_usdc/', param=None, method='GET')\n print(resp)\n\n print('Ticker test:')\n resp = rest_wrapper.send_request(request='/rate/btc_usdc', param=None, method='GET')\n print(resp)\n\n print('Pairs test:')\n resp = rest_wrapper.send_request(request='/pair/', param=None, method='GET')\n print(resp)\n\n print('Balance test:')\n resp = rest_wrapper.send_request(request='/balances/exchange_balances/', param=None, method='GET', private=True)\n print(resp)\n\n while True:\n pass","sub_path":"test/ripio_tests/test_agent.py","file_name":"test_agent.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"84019066","text":"from __future__ import unicode_literals, division, print_function\n\nimport numpy as np\nimport re\nimport os\nimport csv\n\nfrom vgg16 import Vgg16\nfrom utils import save_array, load_array, get_data, get_batches, onehot\nfrom toolbox import extract_images_and_classes, MODELS_PATH\nfrom keras.models import Sequential\nfrom keras.layers import Input\nfrom keras.layers.core import Flatten, Dense, Dropout, Lambda\nfrom keras.optimizers import SGD, RMSprop\nfrom keras import backend as K\n\n\ndef write_submission_csv(data, columns):\n \"\"\"\n Write data according to the Kaggle submission format.\n \"\"\"\n with open('submission.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerow(columns)\n for key in data.keys():\n w.writerow([key[8:]] + data[key])\n\n\ndef main():\n path = 'sample/' # In order to use the big dataset set it to empty string.\n batch_size = 1 # If you are using a GPU, please change it to 64\n # We are going to use the 1000 categories from the IMAGENET model,\n # so we need to re-use an instance of it.\n vgg = Vgg16()\n # First, we check if we have saved arrays with the data and classes.\n train_data, val_data, batches_classes, val_classes = extract_images_and_classes(\n path, batch_size=batch_size\n )\n\n # We need the labels in \"hot encoding form\". e.g [1, 0] or [0, 1]\n train_labels = onehot(batches_classes)\n val_labels = onehot(val_classes)\n\n # It's always good to see an overview of the whole model.\n vgg.model.summary()\n # Now we need to create the model.\n # --------------------------------------------------------\n # 0) Replace and train the last Dense layer as it doesn't really\n # fit our purposes.\n print('Let\\'s remove the last layer as it\\'s already DENSE...')\n vgg.model.pop()\n # Here we say that we only want to train the last layer\n for layer in vgg.model.layers: layer.trainable = False\n print('And train a new layer that outputs 10 classes (c0..c9)')\n vgg.model.add(Dense(10, activation='softmax'))\n opt = RMSprop(lr=0.1)\n vgg.model.compile(optimizer=opt,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n vgg.model.fit(train_data, train_labels, nb_epoch=1,\n validation_data=(val_data, val_labels), batch_size=batch_size)\n # --------------------------------------------------------\n # 1) Backpropagate and train the previous layers.\n print('Now let\\'s train the previous layers using backpropagation')\n layers = vgg.model.layers\n # Get the index of the first dense layer...\n first_dense_idx = [index for index, layer in enumerate(layers) if type(layer) is Dense][0]\n # ...and set this and all subsequent layers to trainable\n for layer in layers[first_dense_idx:]: layer.trainable = True\n K.set_value(opt.lr, 0.01)\n vgg.model.fit(train_data, train_labels, nb_epoch=1,\n validation_data=(val_data, val_labels), batch_size=batch_size)\n\n print('Saving weights')\n vgg.model.save_weights(MODELS_PATH + 'weights.bc')\n #vgg.model.load_weights(MODEL_PATH + 'weights.bc')\n\n test_batches, predictions = vgg.test(path+'test', batch_size=batch_size)\n\n d = {}\n for idx, filename in enumerate(test_batches.filenames):\n # We use a trick to never show 0 or 1, but 0.05 and 0.95.\n # This is required becase log loss penalizes predictions that are confident and wrong.\n d[filename] = [prediction.clip(min=0.05, max=0.95) for prediction in predictions[idx]]\n write_submission_csv(d, ['img', 'c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9'])\n\nif __name__ == '__main__':\n main()\n","sub_path":"state-farm-distracted-driver-detection/generate_submission_statefarm.py","file_name":"generate_submission_statefarm.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"481397604","text":"# ----------------------------------------------------------------\n# Copyright 2016 Cisco Systems\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------\n\n\"\"\"test_sanity_rpc.py\nsanity test for netconf\n\"\"\"\nfrom __future__ import absolute_import\n\nimport sys\nimport unittest\n\nfrom ydk.errors import YPYModelError, YPYError, YPYServiceError\nfrom ydk.models.ydktest import ydktest_sanity as ysanity\nfrom ydk.providers import NetconfServiceProvider\nfrom ydk.services import NetconfService, Datastore\n\nfrom test_utils import ParametrizedTestCase\nfrom test_utils import get_device_info\n\n\nclass SanityNetconf(ParametrizedTestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.ncc = NetconfServiceProvider(\n cls.hostname,\n cls.username,\n cls.password,\n cls.port,\n cls.protocol,\n not cls.on_demand,\n cls.common_cache,\n cls.timeout)\n cls.netconf_service = NetconfService()\n\n def setUp(self):\n from ydk.services import CRUDService\n crud = CRUDService()\n runner = ysanity.Runner()\n crud.delete(self.ncc, runner)\n\n def tearDown(self):\n pass\n\n def test_edit_commit_get(self):\n runner = ysanity.Runner()\n runner.one.number = 1\n runner.one.name = 'runner:one:name'\n\n get_filter = ysanity.Runner()\n\n op = self.netconf_service.edit_config(self.ncc, Datastore.candidate, runner)\n self.assertEqual(True, op)\n\n result = self.netconf_service.get_config(self.ncc, Datastore.candidate, get_filter)\n self.assertEqual(runner, result)\n\n op = self.netconf_service.commit(self.ncc)\n self.assertEqual(True, op)\n\n result = self.netconf_service.get(self.ncc, get_filter)\n self.assertEqual(runner, result)\n\n def test_lock_unlock(self):\n op = self.netconf_service.lock(self.ncc, Datastore.running)\n self.assertEqual(True, op)\n\n op = self.netconf_service.unlock(self.ncc, Datastore.running)\n self.assertEqual(True, op)\n\n # Failing - NetconfService glue code needed\n def test_lock_unlock_fail(self):\n op = self.netconf_service.lock(self.ncc, Datastore.candidate)\n self.assertEqual(True, op)\n\n try:\n op = self.netconf_service.unlock(self.ncc, Datastore.running)\n except Exception as e:\n self.assertIsInstance(e, YPYError)\n\n def test_validate(self):\n op = self.netconf_service.validate(self.ncc, source=Datastore.candidate)\n self.assertEqual(True, op)\n\n runner = ysanity.Runner()\n runner.one.number = 1\n runner.one.name = 'runner:one:name'\n op = self.netconf_service.validate(self.ncc, source=runner)\n self.assertEqual(True, op)\n\n def test_validate_fail(self):\n # should have been handled by YDK local validation\n pass\n\n def test_commit_discard(self):\n runner = ysanity.Runner()\n runner.two.number = 2\n runner.two.name = 'runner:two:name'\n get_filter = ysanity.Runner()\n\n op = self.netconf_service.edit_config(self.ncc, Datastore.candidate, runner)\n self.assertEqual(True, op)\n\n op = self.netconf_service.discard_changes(self.ncc)\n self.assertEqual(True, op)\n\n op = self.netconf_service.edit_config(self.ncc, Datastore.candidate, runner)\n self.assertEqual(True, op)\n\n op = self.netconf_service.commit(self.ncc)\n self.assertEqual(True, op)\n\n result = self.netconf_service.get(self.ncc, get_filter)\n self.assertEqual(runner, result)\n\n @unittest.skip('No message id in cancel commit payload')\n def test_confirmed_commit(self):\n runner = ysanity.Runner()\n runner.two.number = 2\n runner.two.name = 'runner:two:name'\n get_filter = ysanity.Runner()\n\n op = self.netconf_service.edit_config(self.ncc, Datastore.candidate, runner)\n self.assertEqual(True, op)\n\n op = self.netconf_service.commit(self.ncc, confirmed=True, confirm_timeout=120)\n self.assertEqual(True, op)\n\n result = self.netconf_service.get(self.ncc, get_filter)\n self.assertEqual(runner, result)\n\n op = self.netconf_service.cancel_commit(self.ncc)\n self.assertEqual(True, op)\n\n def test_copy_config(self):\n op = self.netconf_service.copy_config(self.ncc, Datastore.candidate, Datastore.running)\n self.assertEqual(True, op)\n\n runner = ysanity.Runner()\n runner.two.number = 2\n runner.two.name = 'runner:two:name'\n get_filter = ysanity.Runner()\n\n op = self.netconf_service.edit_config(self.ncc, Datastore.candidate, runner)\n self.assertEqual(True, op)\n\n op = self.netconf_service.copy_config(self.ncc, Datastore.running, Datastore.candidate)\n self.assertEqual(True, op)\n\n result = self.netconf_service.get_config(self.ncc, Datastore.running, get_filter)\n self.assertEqual(runner, result)\n\n runner.two.name = '%smodified' % runner.two.name\n\n op = self.netconf_service.copy_config(self.ncc, Datastore.running, runner)\n self.assertEqual(True, op)\n\n result = self.netconf_service.get_config(self.ncc, Datastore.running, get_filter)\n self.assertEqual(runner, result)\n\n def test_delete_config(self):\n pass\n # startup and candidate cannot be both enabled in ConfD\n # op = self.netconf_service.delete_config(self.ncc, Datastore.startup)\n # self.assertEqual(True, op)\n\n # Error not thrown by TCP client, YPYError is populated instead\n def test_delete_config_fail(self):\n found = False\n try:\n self.netconf_service.delete_config(self.ncc, Datastore.running)\n except (YPYError, YPYModelError):\n found = True\n self.assertEqual(found, True)\n\n # Failing - NetconfService glue code needed\n def test_copy_config_fail(self):\n self.assertRaises(YPYServiceError,\n self.netconf_service.copy_config,\n self.ncc,\n target=123,\n source=456)\n\n # Failing - NetconfService glue code needed\n def test_edit_config_fail(self):\n self.assertRaises(YPYServiceError,\n self.netconf_service.edit_config,\n self.ncc,\n Datastore.startup,\n Datastore.candidate)\n\n # Failing - NetconfService glue code needed\n def test_get_config_fail(self):\n runner = ysanity.Runner()\n self.assertRaises(YPYServiceError,\n self.netconf_service.get_config,\n self.ncc,\n \"invalid-input\",\n runner)\n\n # Failing - NetconfService glue code needed\n def test_lock_fail(self):\n self.assertRaises(YPYServiceError,\n self.netconf_service.lock,\n self.ncc,\n \"invalid-input\")\n\n # Failing - NetconfService glue code needed\n def test_unlock_fail(self):\n self.assertRaises(YPYServiceError,\n self.netconf_service.unlock,\n self.ncc,\n \"invalid-input\")\n\n\nif __name__ == '__main__':\n device, non_demand, common_cache, timeout = get_device_info()\n\n suite = unittest.TestSuite()\n suite.addTest(ParametrizedTestCase.parametrize(\n SanityNetconf,\n device=device,\n non_demand=non_demand,\n common_cache=common_cache,\n timeout=timeout))\n ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()\n sys.exit(ret)\n","sub_path":"sdk/python/core/tests/test_sanity_netconf.py","file_name":"test_sanity_netconf.py","file_ext":"py","file_size_in_byte":8267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340534587","text":"import time #pour le chrono\n\n# Definition fonction\ndef f(x):\n return x**2 -10\n\n## Sans recursivité\n\n\n\ndef regula_falsi(f, a, b, epsilon):\n nIter = 0 #nombre ditérations\n\n if f(a) * f(b) > 0:\n return None #TVI pas applicable ici\n u, v = float(a), float(b)\n while (abs(f(u)) > epsilon):\n i = i+1 #incrémentation compteur boucle\n w = u -f(u)*(v-u)/(f(v)-f(u))\n if f(u) * f(w) <= 0: #w remplace v\n v = w\n else: #w remplace u\n u = w\n print(\"Nb d'Itérations':\", nIter)\n return w\n\n\n\n# Lecture de l'intervalle\na = 0\nb = 10\n\n\nepsilon=1e-12\n\n\n# Résultats\nstart_time = time.perf_counter_ns() #début du chrono\nprint(\"La valeur du zéro de cette fonction sur (a,b) est:\",regula_falsi(f,a,b,epsilon))\n\ninterval = time.perf_counter_ns() - start_time #calcul du temps\n\nprint(\"Temps d'execution de la fonction:\", interval*10**(-6))\n\n## Avec recursivité\n\n\ndef regu_rec(f,a, b,prec):\n a, b = float(a), float(b) #convertion bornes en réels\n if (abs(f(a))<=prec): #alors a est racine\n return a -f(a)*(b-a)/(f(b)-f(a))\n else:\n c = a -f(a)*(b-a)/(f(b)-f(a)) #calcul nouvelle borne\n\n if f(a)*f(c) <= 0: #Si le TVI est applicable entre a et c, c remplace b\n return regu_rec(f,a,c,prec)\n else: #sinon c remplace b\n return regu_rec(f,c,b,prec)\n\n\n\n# Résultats\nstart_time = time.perf_counter_ns() #début du chrono\nprint(\"La valeur du zéro de cette fonction sur (a,b) est:\", regu_rec(f,a,b,epsilon))\n\ninterval = time.perf_counter_ns() - start_time #calcul du temps\n\nprint(\"Temps d'execution de la fonction:\", interval*10**(-6))","sub_path":"Grandes méthode d'optimisation/Regula Falsi/regula.py","file_name":"regula.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387582885","text":"# read data file\nf = open('d16-input.txt', 'r')\ndata = [line.rstrip() for line in f.readlines()]\n# Test data\n# data = [\"class: 1-3 or 5-7\",\"row: 6-11 or 33-44\",\"seat: 13-40 or 45-50\",\"your ticket:\",\"7,1,14\",\"\",\"nearby tickets:\",\"7,3,47\",\"40,4,50\",\"55,2,20\",\"38,6,12\"]\n\n# Split data into areas - fields, my ticket, nearby tickets\nfields, myTicket, nearbyTickets = {}, [], []\n# split fields into dictionary with class type, min and max values\nfor idx, line in enumerate(data):\n if line == \"\": continue\n if line[:11] == \"your ticket\":\n break\n # split the line into text before the colon to form the key, then the value pairs afer the colon seperated by ' or ' to form value pairs\n fields[line.split(\": \")[0]] = (line.split(\": \")[1].split(\" or \")[0], line.split(\": \")[1].split(\" or \")[1])\n #the min of the min value\n min_min = int(fields[line.split(\": \")[0]][0].split(\"-\")[0])\n # the max of the min value\n min_max = int(fields[line.split(\": \")[0]][0].split(\"-\")[1])\n #the min of the max value\n max_min = int(fields[line.split(\": \")[0]][1].split(\"-\")[0])\n #the max of the max value\n max_max = int(fields[line.split(\": \")[0]][1].split(\"-\")[1])\n # redefine the dictionary entry with new min/max values\n fields[line.split(\": \")[0]] = (min_min, min_max, max_min, max_max)\n# split myTicket data into list\nfor line in data[idx+1:]:\n if line == \"\": continue\n if line[:14] == \"nearby tickets\":\n break\n myTicket.append(line)\n# split nearbyTicket data into list\nfor line in data[idx+4:]:\n if line == \"\": continue\n nearbyTickets.append(line)\n\n# ignore my ticket\n# loop though each field, and compare ticket values to given parameters\n# store sum of invalid parameters\n# can combine all parameters into one set to compare with each line of list\nvalidParams = set() # define empty set\nticketScanningErrorRate = 0\n# add paramteters to set as union of sets of values from fields\nfor keys in fields:\n for idx in range(0,len(fields[keys])-1,2):\n validParams = validParams | set(range(fields[keys][idx],fields[keys][idx+1]+1))\n# loop though nearbyTickets and sum values not in parameters\nfor line in nearbyTickets:\n values = line.split(\",\")\n for value in values:\n ticketScanningErrorRate += int(not(int(value) in validParams)) * int(value)\nprint(\"ticket scanning error rate =\",ticketScanningErrorRate)","sub_path":"d16p1-solution.py","file_name":"d16p1-solution.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"237020033","text":"from unittest import TestCase\n\nfrom app import app\nfrom models import db, Pet\n\n# Use test database and don't clutter tests with SQL\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///pet_shop_test'\napp.config['SQLALCHEMY_ECHO'] = False\n\n# Make Flask errors be real errors, rather than HTML pages with error info\napp.config['TESTING'] = True\n\n# This is a bit of hack, but don't use Flask DebugToolbar\napp.config['DEBUG_TB_HOSTS'] = ['dont-show-debug-toolbar']\n\ndb.drop_all()\ndb.create_all()\n\n\nclass PetViewsTestCase(TestCase):\n \"\"\"Tests for views for Pets.\"\"\"\n\n def setUp(self):\n \"\"\"Add sample pet.\"\"\"\n\n Pet.query.delete()\n\n pet = Pet(name=\"TestPet\", species=\"dog\", hunger=10)\n db.session.add(pet)\n db.session.commit()\n\n self.pet_id = pet.id\n self.pet = pet\n\n def tearDown(self):\n \"\"\"Clean up any fouled transaction.\"\"\"\n\n db.session.rollback()\n\n def test_list_pets(self):\n with app.test_client() as client:\n resp = client.get(\"/\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('TestPet', html)\n\n def test_show_pet(self):\n with app.test_client() as client:\n resp = client.get(f\"/{self.pet_id}\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('

TestPet

', html)\n self.assertIn(self.pet.species, html)\n \n\n def test_add_pet(self):\n with app.test_client() as client:\n d = {\"name\": \"TestPet2\", \"species\": \"cat\", \"hunger\": 20}\n resp = client.post(\"/\", data=d, follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"

TestPet2

\", html)\n","sub_path":"test_flask.py","file_name":"test_flask.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"22165861","text":"# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Python source file include Iris pipeline functions and necessary utils.\n\nThe utilities in this file are used to build a model with native Keras.\nThis module file will be used in Transform and generic Trainer.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom typing import Text\n\nimport absl\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow_transform as tft\n\nfrom tfx.components.trainer.executor import TrainerFnArgs\n\n_FEATURE_KEYS = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']\n_LABEL_KEY = 'variety'\n\n\ndef _transformed_name(key):\n return key + '_xf'\n\n\ndef _gzip_reader_fn(filenames):\n \"\"\"Small utility returning a record reader that can read gzip'ed files.\"\"\"\n return tf.data.TFRecordDataset(filenames, compression_type='GZIP')\n\n\ndef _get_serve_tf_examples_fn(model, tf_transform_output):\n \"\"\"Returns a function that parses a serialized tf.Example.\"\"\"\n\n @tf.function\n def serve_tf_examples_fn(serialized_tf_examples):\n \"\"\"Returns the output to be used in the serving signature.\"\"\"\n feature_spec = tf_transform_output.raw_feature_spec()\n feature_spec.pop(_LABEL_KEY)\n parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)\n\n transformed_features = tf_transform_output.transform_raw_features(\n parsed_features)\n transformed_features.pop(_transformed_name(_LABEL_KEY))\n\n outputs = model(transformed_features)\n return {'outputs': outputs}\n\n return serve_tf_examples_fn\n\n\ndef _input_fn(file_pattern: Text,\n tf_transform_output: tft.TFTransformOutput,\n batch_size: int = 200) -> tf.data.Dataset:\n \"\"\"Generates features and label for tuning/training.\n\n Args:\n file_pattern: input tfrecord file pattern.\n tf_transform_output: A TFTransformOutput.\n batch_size: representing the number of consecutive elements of returned\n dataset to combine in a single batch\n\n Returns:\n A dataset that contains (features, indices) tuple where features is a\n dictionary of Tensors, and indices is a single Tensor of label indices.\n \"\"\"\n transformed_feature_spec = (\n tf_transform_output.transformed_feature_spec().copy())\n\n dataset = tf.data.experimental.make_batched_features_dataset(\n file_pattern=file_pattern,\n batch_size=batch_size,\n features=transformed_feature_spec,\n reader=_gzip_reader_fn,\n label_key=_transformed_name(_LABEL_KEY))\n\n return dataset\n\n\ndef _build_keras_model() -> tf.keras.Model:\n \"\"\"Creates a DNN Keras model for classifying iris data.\n\n Returns:\n A Keras Model.\n \"\"\"\n # The model below is built with Functional API, please refer to\n # https://www.tensorflow.org/guide/keras/overview for all API options.\n inputs = [\n keras.layers.Input(shape=(1,), name=_transformed_name(f))\n for f in _FEATURE_KEYS\n ]\n d = keras.layers.concatenate(inputs)\n for _ in range(3):\n d = keras.layers.Dense(8, activation='relu')(d)\n output = keras.layers.Dense(3, activation='softmax')(d)\n\n model = keras.Model(inputs=inputs, outputs=output)\n model.compile(\n optimizer=keras.optimizers.Adam(lr=0.001),\n loss='sparse_categorical_crossentropy',\n metrics=[keras.metrics.SparseCategoricalAccuracy(name='accuracy')])\n\n absl.logging.info(model.summary())\n return model\n\n\n# TFX Transform will call this function.\ndef preprocessing_fn(inputs):\n \"\"\"tf.transform's callback function for preprocessing inputs.\n\n Args:\n inputs: map from feature keys to raw not-yet-transformed features.\n\n Returns:\n Map from string feature key to transformed feature operations.\n \"\"\"\n outputs = {}\n\n for key in _FEATURE_KEYS:\n outputs[_transformed_name(key)] = tft.scale_to_z_score(inputs[key])\n outputs[_transformed_name(_LABEL_KEY)] = inputs[_LABEL_KEY]\n\n return outputs\n\n\n# TFX Trainer will call this function.\ndef run_fn(fn_args: TrainerFnArgs):\n \"\"\"Train the model based on given args.\n\n Args:\n fn_args: Holds args used to train the model as name/value pairs.\n \"\"\"\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(fn_args.train_files, tf_transform_output, 40)\n eval_dataset = _input_fn(fn_args.eval_files, tf_transform_output, 40)\n\n # To use distribution strategy, create an appropriate tf.distribute.Strategy\n # and move the creation and compiling of Keras model inside `strategy.scope`.\n #\n # For example, replace `model = _build_keras_model()` with:\n # mirrored_strategy = tf.distribute.MirroredStrategy()\n # with mirrored_strategy.scope():\n # model = _build_keras_model()\n model = _build_keras_model()\n\n model.fit(\n train_dataset,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps)\n\n signatures = {\n 'serving_default':\n _get_serve_tf_examples_fn(model,\n tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)\n","sub_path":"tfx/examples/iris/iris_utils_native_keras.py","file_name":"iris_utils_native_keras.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165715591","text":"#!/usr/bin/env python\n\nfrom Bio import SeqIO\nimport click\nfrom copy import deepcopy\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom models.danq import DanQ\nfrom utils.io import parse_fasta_file, write\nfrom utils.data import one_hot_encode, reverse_complement\n\nCONTEXT_SETTINGS = {\n \"help_option_names\": [\"-h\", \"--help\"],\n}\n\n@click.command(context_settings=CONTEXT_SETTINGS, no_args_is_help=True)\n@click.option(\n \"-f\", \"--fasta-file\",\n help=\"FASTA file with sequences.\",\n metavar=\"FILENAME\",\n required=True\n)\n@click.option(\n \"-o\", \"--out-file\",\n help=\"Output file.\",\n metavar=\"FILENAME\",\n required=True\n)\n@click.option(\n \"-r\", \"--rev-complement\",\n help=\"Predict on reverse complement sequences.\",\n is_flag=True,\n default=False\n)\n@click.option(\n \"-s\", \"--state-dict\",\n help=\"Model state dict to use.\",\n metavar=\"FILENAME\",\n required=True\n)\n@click.option(\n \"-t\", \"--threads\",\n default=1,\n help=\"Number of CPU threads to use.\",\n show_default=True\n)\n\ndef predict(fasta_file, out_file, state_dict, rev_complement=False, threads=1):\n\n _predict(fasta_file, out_file, state_dict, rev_complement, threads)\n\ndef _predict(fasta_file, out_file, state_dict, rev_complement=False, threads=1):\n\n # Sequences\n sequences = []\n for seq_record in parse_fasta_file(fasta_file):\n sequences.append((seq_record.id, str(seq_record.seq).upper()))\n df = pd.DataFrame(sequences, columns=[\"Id\", \"Sequence\"])\n\n # One-hot encode\n encoded_sequences = []\n for seq in df[\"Sequence\"]:\n encoded_sequences.append(one_hot_encode(seq))\n encoded_sequences = np.array(encoded_sequences)\n\n # TensorDataset\n ix = np.array([[i] for i in range(len(sequences))])\n dataset = TensorDataset(torch.Tensor(encoded_sequences), torch.Tensor(ix))\n if rev_complement:\n encoded_sequences_rc = np.array(reverse_complement(encoded_sequences))\n dataset_rc = TensorDataset(\n torch.Tensor(encoded_sequences_rc), torch.Tensor(ix)\n )\n\n # DataLoader\n parameters = dict(batch_size=64, num_workers=threads)\n dataloader = DataLoader(dataset, **parameters)\n if rev_complement:\n dataloader_rc = DataLoader(dataset_rc, **parameters)\n\n # Predict\n sequence_length = len(sequences[0][1])\n predictions = __predict(sequence_length, 1, dataloader, state_dict)\n if rev_complement:\n predictions_rc = __predict(\n sequence_length, 1, dataloader_rc, state_dict\n )\n else:\n predictions_rc = np.empty((len(predictions)))\n predictions_rc[:] = np.NaN\n\n # Save predictions\n zipped_predictions = np.array(\n list(zip(df[\"Id\"].to_list(), predictions, predictions_rc[:]))\n )\n df = pd.DataFrame(zipped_predictions, columns=[\"Id\", \"Fwd\", \"Rev\"])\n df.to_csv(out_file, compression=\"gzip\", index=False)\n\ndef __predict(sequence_length, n_features, dataloader, state_dict):\n\n predictions = None\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = DanQ(sequence_length, n_features).to(device)\n model.load_state_dict(torch.load(state_dict))\n model.eval() # set the model in evaluation mode\n\n for seqs, labels in dataloader:\n x = seqs.to(device) # shape = (batch_size, 4, 200)\n labels = labels.to(device)\n with torch.no_grad():\n # Forward pass\n outputs = model(x)\n # Save predictions\n if predictions is None:\n predictions = outputs.data.cpu().numpy()\n else:\n predictions = np.append(\n predictions, outputs.data.cpu().numpy(), axis=0\n )\n\n return(predictions.flatten())\n\nif __name__ == \"__main__\":\n predict()","sub_path":"DanQ/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"614973472","text":"import requests\nfrom lxml import html\n\n\ndef user_info(user_id):\n url = \"https://www.shiyanlou.com/user/{}/\".format(user_id)\n response = requests.get(url)\n rep = response.content.decode()\n tree = html.fromstring(rep, 'lxml')\n user_name = tree.xpath(\"//div/span[@class='username']/text()\")\n user_level = tree.xpath(\"//div/span[@class='user-level']/text()\")\n join_date = tree.xpath(\"//div/span[@class='join-date']/text()\")\n\n try:\n user_name = user_name[0]\n user_level = int(user_level[0][1:])\n join_date = join_date[0].split()[0]\n except:\n user_name = None\n user_level = None\n join_date = None\n return user_name, user_level, join_date\n\n\nuser_info(\"214893\")","sub_path":"test/test4/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"503796354","text":"import tweepy\nimport sys\n\n\nconsumer_key = \"ZCYgdNKycYTWUB6IReeqmqish\"\nconsumer_secret = \"xm4gKvCaQVxTnAHlbXmH46MhqJbw0FCVAq72hsayMSo06rdndP\"\naccess_key=\"1426498003-PMr1Fds4lCJulrjRMd3wsYZfKLXXeIez0uOveqt\"\naccess_secret=\"3JmD7iedgX2auVOsQQPdEF5E0mlZsLp6mtPsJZ8nWxwar\"\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_key, access_secret)\napi=tweepy.API(auth)\n\ndef gnm(name):\n try:\n user =api.get_user(name)\n foll=(user.friends_count)\n stuff = api.user_timeline(id = name, count = 10, include_rts = False,tweet_mode='extended')\n sum_tweet=0\n for tweet in stuff:\n text=(tweet.full_text).split()#Spao to kathe post se lekseis \n sum_tweet+=len(text)\n return(sum_tweet*foll)\n except:\n print('The username \"%s\" is not valid.'%(name))\n return(1)\n \n\n\nname1=input(\"First user: \")\nname2=input(\"Second user: \")\nginomeno1=gnm(name1)\nginomeno2=gnm(name2)\nif((ginomeno1!=1) or (ginomeno2!=1)): #Elegxo an iparxei to username\n if(ginomeno1>ginomeno2):\n print(\"Ο χρήστης %s έχει μεγαλύτερο γινόμενο από τον χρήστη %s.\"%(name1,name2))\n elif(ginomeno1==ginomeno2):\n print(\"Οι δύο χρήστες έχουν το ίδιο γινόμενο.\")\n else:\n print(\"Ο χρήστης %s έχει μεγαλύτερο γινόμενο από τον χρήστη %s.\"%(name2,name1))\n","sub_path":"ERGASIES_PYTHON/Askisi8.py","file_name":"Askisi8.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242552584","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-intel/egg/etk/docking/util.py\n# Compiled at: 2011-01-15 06:50:51\nimport gtk\n\ndef rect_contains(rect, x, y):\n \"\"\"\n The rect_contains function checks if a point, defined by x and y falls\n within the gdk.Rectangle defined by rect.\n\n Note: Unlike rect_overlaps defined below, this function ignores a 1 pixel border.\n \"\"\"\n if x > rect.x and x < rect.x + rect.width and y > rect.y and y < rect.y + rect.height:\n return True\n else:\n return False\n\n\ndef rect_overlaps(rect, x, y):\n \"\"\"\n The rect_overlaps function checks if a point, defined by x and y overlaps\n the gdk.Rectangle defined by rect.\n\n Note: Unlike rect_contains defined above, this function does not ignore a 1 pixel border.\n \"\"\"\n if x >= rect.x and x <= rect.x + rect.width and y >= rect.y and y <= rect.y + rect.height:\n return True\n else:\n return False\n\n\ndef load_icon(icon_name, size):\n icontheme = gtk.icon_theme_get_default()\n if not icontheme.has_icon(icon_name):\n icon_name = 'gtk-missing-image'\n return icontheme.load_icon(icon_name, size, gtk.ICON_LOOKUP_USE_BUILTIN)\n\n\ndef load_icon_image(icon_name, size):\n icontheme = gtk.icon_theme_get_default()\n if not icontheme.has_icon(icon_name):\n icon_name = 'gtk-missing-image'\n return gtk.image_new_from_icon_name(icon_name, size)\n\n\ndef flatten(w, child_getter=gtk.Container.get_children):\n \"\"\"\n Generator function that returns all items in a hierarchy.\n Default `child_getter` returns children in a GTK+ widget hierarchy.\n \"\"\"\n yield w\n try:\n for c in child_getter(w):\n for d in flatten(c, child_getter):\n yield d\n\n except TypeError:\n pass","sub_path":"pycfiles/etk.docking-0.2-py2.7/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226061573","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('learn', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='contract',\n name='create_time',\n field=models.DateTimeField(default=datetime.datetime.now),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='contract',\n name='message',\n field=models.CharField(max_length=5000),\n preserve_default=True,\n ),\n ]\n","sub_path":"learn/migrations/0002_auto_20150925_0431.py","file_name":"0002_auto_20150925_0431.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"162058732","text":"#!/usr/bin/python3\n\nimport sys\n\nfrom os import system as cmd\nfrom os import name as name\nfrom string import Template\n\nclass Updater(object):\n def __init__(self):\n # Init to inherit classes\n super(Updater, self).__init__()\n\n def generate(self):\n\n form=\"html\"\n source=\"docs/source/\"\n target=\"classes/\"\n build=\"docs/build/\"\n\n # Generate documentation\n print(\"\\n - Building documentation\")\n cmd_doc = Template(\"sphinx-apidoc -f -o $source $target\")\n cmd(cmd_doc.substitute(source=source, target=target))\n\n # Generate documentation\n print(\"\\n - Generating html documentation\")\n cmd_build = Template(\"sphinx-build -b $form $source $build$form\")\n cmd(cmd_build.substitute(form=form, source=source, build=build))\n\n def sphinx(self):\n \"\"\"Initialize sphinx documentation.\n\n \"\"\"\n print(\"\\n - Initializing sphinx\")\n cmd(\"./docs/sphinx-quickstart\")\n\n def ghpages(self):\n \"\"\"push to github gh-pages branch\n\n \"\"\"\n print(\"\\n - Uploading documentation to gh-pages branch\")\n cmd(\"git subtree push --prefix docs/build/html origin gh-pages\")\n\n def master(self, msg):\n print(\"\\n - Uploading to git master branch: \" + sys.argv[1])\n cmd(\"git add -A\")\n cmd('git commit -m \"' + msg +'\"')\n cmd(\"git pull origin master\")\n cmd(\"git push origin master\")","sub_path":"classes/app/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"76988310","text":"from builtins import str\nimport six\n\n\ndef dict_to_query_params(d):\n if d is None or len(d) == 0:\n return ''\n\n param_list = [param + '=' + (str(value).lower() if type(value) == bool else str(value))\n for param, value in six.iteritems(d) if value is not None]\n return '?' + \"&\".join(param_list)","sub_path":"calendar_event_app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69722401","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 16 20:38:07 2018\n\n@author: ztw1e12\n\"\"\"\n\nfrom sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn import decomposition \n# from textblob import TextBlob \nfrom keras.models import Sequential\nfrom keras.preprocessing.text import Tokenizer\n\nimport pandas, numpy, string\nfrom keras import layers, models, optimizers\nfrom sklearn.linear_model import LogisticRegression\n\n\nfrom sklearn.linear_model import Ridge\nimport random\n\n\n\n# load the dataset\n\ndf = pandas.read_csv(\"sentiment labelled sentences/yelp_labelled.txt\", names=['sentence', 'label'], sep=\"\\t\")\n\n\n# Splitting the data (sentence and labbels) into training and testing data \n\nsentences = df['sentence'].values\ny = df['label'].values\n\n\n# create a dataframe using texts and lables\ntrainDF = pandas.DataFrame()\ntrainDF['text'] = sentences\ntrainDF['label'] = y\n\n\n\n# split the dataset into training and validation datasets \ntrain_x, valid_x, train_y, valid_y = model_selection.train_test_split(trainDF['text'], trainDF['label'])\n\n\n\n# Will be used to train models\n\ntrainDF['char_count'] = trainDF['text'].apply(len)\ntrainDF['word_count'] = trainDF['text'].apply(lambda x: len(x.split()))\ntrainDF['word_density'] = trainDF['char_count'] / (trainDF['word_count']+1)\ntrainDF['punctuation_count'] = trainDF['text'].apply(lambda x: len(\"\".join(_ for _ in x if _ in string.punctuation))) \ntrainDF['title_word_count'] = trainDF['text'].apply(lambda x: len([wrd for wrd in x.split() if wrd.istitle()]))\ntrainDF['upper_case_word_count'] = trainDF['text'].apply(lambda x: len([wrd for wrd in x.split() if wrd.isupper()]))\n\n\n'''\npos_family = {\n 'noun' : ['NN','NNS','NNP','NNPS'],\n 'pron' : ['PRP','PRP$','WP','WP$'],\n 'verb' : ['VB','VBD','VBG','VBN','VBP','VBZ'],\n 'adj' : ['JJ','JJR','JJS'],\n 'adv' : ['RB','RBR','RBS','WRB']\n}\n\n\n# function to check and get the part of speech tag count of a words in a given sentence\ndef check_pos_tag(x, flag):\n cnt = 0\n try:\n wiki = TextBlob(x)\n for tup in wiki.tags:\n ppo = list(tup)[1]\n if ppo in pos_family[flag]:\n cnt += 1\n except:\n pass\n return cnt\n\ntrainDF['noun_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'noun'))\ntrainDF['verb_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'verb'))\ntrainDF['adj_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'adj'))\ntrainDF['adv_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'adv'))\ntrainDF['pron_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'pron'))\n'''\n\n\n\n# Our list of functions to apply.\ntransform_functions = [\n trainDF['char_count'],\n trainDF['word_count'],\n trainDF['word_density'],\n trainDF['punctuation_count'],\n trainDF['title_word_count'],\n trainDF['upper_case_word_count']\n]\n\n# Apply each function and put the results into a list.\ncolumns = []\nfor func in transform_functions:\n columns.append(func)\n \n# Convert the meta features to a numpy array.\nmeta = numpy.asarray(columns).T\n\nprint (meta)\n\n\ntrain_rows = 750\n# Set a seed to get the same \"random\" shuffle every time.\nrandom.seed(1)\n\n# Shuffle the indices for the matrix.\nindices = list(range(meta.shape[0]))\nrandom.shuffle(indices)\n\n# Create train and test sets.\ntrain = meta[indices[:train_rows], :]\ntest = meta[indices[train_rows:], :]\n# train = numpy.nan_to_num(train)\n'''\n# Run the regression and generate predictions for the test set.\nreg = Ridge(alpha=.1)\nreg.fit(train,train_y)\npredictions = reg.predict(test)\n'''\n\nclassifier = LogisticRegression()\nclassifier.fit(train, train_y)\nscore = classifier.score(test, valid_y)\n\nprint (\"Test acurracy: \", score)\n\n\n# =============================================================================\n# building sequential model layer by layer \n# =============================================================================\n\ninput_dim = train.shape[1]\nmodel = Sequential ()\nmodel.add(layers.Dense(10, input_dim=input_dim, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.summary()\n\n\nhistory = model.fit(train, train_y, epochs=20, verbose=False, validation_data=(test, valid_y), batch_size=10)\n\n\n\nloss, accuracy = model.evaluate(train, train_y, verbose=False)\nprint(\"Training Accuracy: {:.4f}\".format(accuracy))\nloss, accuracy = model.evaluate(test, valid_y, verbose=False)\nprint(\"Testing Accuracy: {:.4f}\".format(accuracy))\n\n\ntokenizer = Tokenizer(num_words=5000)\ntokenizer.fit_on_texts(train_x)\n\nX_train = tokenizer.texts_to_sequences(train_x)\nX_test = tokenizer.texts_to_sequences(valid_x)\n\nvocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index\n\n\nembedding_dim = 6\nmaxlen = 6\n\nmodel = Sequential()\nmodel.add(layers.Embedding(vocab_size, embedding_dim, input_length=maxlen))\nmodel.add(layers.Conv1D(128, 5, activation='relu'))\nmodel.add(layers.GlobalMaxPooling1D())\nmodel.add(layers.Dense(10, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\n\nhistory = model.fit(train, train_y, epochs=10, verbose=False,validation_data=(test, valid_y),batch_size=10)\nloss, accuracy = model.evaluate(train, train_y, verbose=False)\nprint(\"Training Accuracy: {:.4f}\".format(accuracy))\nloss, accuracy = model.evaluate(test, valid_y, verbose=False)\nprint(\"Testing Accuracy: {:.4f}\".format(accuracy))\n\n\n\n","sub_path":"nlp-features.py","file_name":"nlp-features.py","file_ext":"py","file_size_in_byte":5691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546368860","text":"#!/opt/libreoffice5.2/program/python\n# -*- coding: utf-8 -*-\nIMPLE_NAME = \"pq.Tcu\"\nSERVICE_NAME = \"pq.Tcu\"\ndef create(ctx, *args): \n from inoxt import component\n return component.create(ctx, *args, imple_name=IMPLE_NAME, service_name=SERVICE_NAME)\n# Registration\nimport unohelper\ng_ImplementationHelper = unohelper.ImplementationHelper()\ng_ImplementationHelper.addImplementation(create, IMPLE_NAME, (SERVICE_NAME,),)\n","sub_path":"TCU/src/pyunocomponent.py","file_name":"pyunocomponent.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643663971","text":"# coding=UTF-8\n# **********************************************************************\n# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved\n# written by zen warriors, do not modify!\n# **********************************************************************\n\n\nfrom cobra.mit.meta import ClassMeta\nfrom cobra.mit.meta import StatsClassMeta\nfrom cobra.mit.meta import CounterMeta\nfrom cobra.mit.meta import PropMeta\nfrom cobra.mit.meta import Category\nfrom cobra.mit.meta import SourceRelationMeta\nfrom cobra.mit.meta import NamedSourceRelationMeta\nfrom cobra.mit.meta import TargetRelationMeta\nfrom cobra.mit.meta import DeploymentPathMeta, DeploymentCategory\nfrom cobra.model.category import MoCategory, PropCategory, CounterCategory\nfrom cobra.mit.mo import Mo\n\n\n# ##################################################\nclass Route(Mo):\n \"\"\"\n The EIGRP route information.\n\n \"\"\"\n\n meta = ClassMeta(\"cobra.model.eigrp.Route\")\n\n meta.moClassName = \"eigrpRoute\"\n meta.rnFormat = \"rt-[%(pfx)s]\"\n meta.category = MoCategory.REGULAR\n meta.label = \"EIGRP Route\"\n meta.writeAccessMask = 0x8008020040001\n meta.readAccessMask = 0x8008020040001\n meta.isDomainable = False\n meta.isReadOnly = True\n meta.isConfigurable = False\n meta.isDeletable = False\n meta.isContextRoot = False\n\n meta.childClasses.add(\"cobra.model.eigrp.Nexthop\")\n\n meta.childNamesAndRnPrefix.append((\"cobra.model.eigrp.Nexthop\", \"nh-\"))\n\n meta.parentClasses.add(\"cobra.model.eigrp.Db\")\n\n meta.superClasses.add(\"cobra.model.nw.DbRec\")\n meta.superClasses.add(\"cobra.model.l3.DbRec\")\n meta.superClasses.add(\"cobra.model.eigrp.DbRec\")\n meta.superClasses.add(\"cobra.model.nw.Conn\")\n meta.superClasses.add(\"cobra.model.nw.Item\")\n meta.superClasses.add(\"cobra.model.nw.GEp\")\n\n meta.rnPrefixes = [\n ('rt-', True),\n ]\n\n prop = PropMeta(\"str\", \"actStQual\", \"actStQual\", 17384, PropCategory.REGULAR)\n prop.label = \"Query Origin Qualifier\"\n prop.isOper = True\n prop.range = [(0, 512)]\n meta.props.add(\"actStQual\", prop)\n\n prop = PropMeta(\"str\", \"chgQual\", \"chgQual\", 17385, PropCategory.REGULAR)\n prop.label = \"Last route state transition Qualifier\"\n prop.isOper = True\n prop.range = [(0, 512)]\n meta.props.add(\"chgQual\", prop)\n\n prop = PropMeta(\"str\", \"childAction\", \"childAction\", 4, PropCategory.CHILD_ACTION)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"deleteAll\", \"deleteall\", 16384)\n prop._addConstant(\"deleteNonPresent\", \"deletenonpresent\", 8192)\n prop._addConstant(\"ignore\", \"ignore\", 4096)\n meta.props.add(\"childAction\", prop)\n\n prop = PropMeta(\"str\", \"dn\", \"dn\", 1, PropCategory.DN)\n prop.label = \"None\"\n prop.isDn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"dn\", prop)\n\n prop = PropMeta(\"str\", \"fDist\", \"fDist\", 17383, PropCategory.REGULAR)\n prop.label = \"Feasible Distance\"\n prop.isOper = True\n meta.props.add(\"fDist\", prop)\n\n prop = PropMeta(\"str\", \"flags\", \"flags\", 17388, PropCategory.REGULAR)\n prop.label = \"Route Flags\"\n prop.isOper = True\n prop._addConstant(\"stuck-act\", \"stuck-in-active\", 1)\n meta.props.add(\"flags\", prop)\n\n prop = PropMeta(\"str\", \"lastActTs\", \"lastActTs\", 17386, PropCategory.REGULAR)\n prop.label = \"Last active state timestamp\"\n prop.isOper = True\n meta.props.add(\"lastActTs\", prop)\n\n prop = PropMeta(\"str\", \"modTs\", \"modTs\", 7, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"never\"\n prop._addConstant(\"never\", \"never\", 0)\n meta.props.add(\"modTs\", prop)\n\n prop = PropMeta(\"str\", \"name\", \"name\", 16437, PropCategory.REGULAR)\n prop.label = \"Name\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 128)]\n meta.props.add(\"name\", prop)\n\n prop = PropMeta(\"str\", \"operSt\", \"operSt\", 17382, PropCategory.REGULAR)\n prop.label = \"Operational State\"\n prop.isOper = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"passive\"\n prop._addConstant(\"active\", \"active\", 1)\n prop._addConstant(\"passive\", \"passive\", 0)\n meta.props.add(\"operSt\", prop)\n\n prop = PropMeta(\"str\", \"pfx\", \"pfx\", 17381, PropCategory.REGULAR)\n prop.label = \"Address\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n meta.props.add(\"pfx\", prop)\n\n prop = PropMeta(\"str\", \"rn\", \"rn\", 2, PropCategory.RN)\n prop.label = \"None\"\n prop.isRn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"rn\", prop)\n\n prop = PropMeta(\"str\", \"siaQueryCnt\", \"siaQueryCnt\", 17387, PropCategory.REGULAR)\n prop.label = \"Number of Stuck-in-active queries sent for the active route\"\n prop.isOper = True\n meta.props.add(\"siaQueryCnt\", prop)\n\n prop = PropMeta(\"str\", \"status\", \"status\", 3, PropCategory.STATUS)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"created\", \"created\", 2)\n prop._addConstant(\"deleted\", \"deleted\", 8)\n prop._addConstant(\"modified\", \"modified\", 4)\n meta.props.add(\"status\", prop)\n\n meta.namingProps.append(getattr(meta.props, \"pfx\"))\n getattr(meta.props, \"pfx\").needDelimiter = True\n\n def __init__(self, parentMoOrDn, pfx, markDirty=True, **creationProps):\n namingVals = [pfx]\n Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)\n\n\n\n# End of package file\n# ##################################################\n","sub_path":"venv/Lib/site-packages/cobra/modelimpl/eigrp/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":5660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"594751408","text":"from django.urls import path\nfrom .views import (NewsListView , NewsDetailView , \n\t\t\t\t\t#NewsCreateT, \n\t\t\t\t\tNewsCreateView , \n\t\t\t\t\tNewsUpdateView , NewsDeleteView)\n#app_name = 'news' ##\nurlpatterns = [ \n\t\n\tpath('////delete', NewsDeleteView.as_view(), name='sub_news_delete'),\n\tpath('////edit/', NewsUpdateView.as_view(), name='sub_news_edit'),\n\tpath('create/', NewsCreateView.as_view(), name='news_create'),\n\t#path('salam/', NewsCreateT.as_view() ,name='news_test'),\n\tpath('////', NewsDetailView.as_view(), name='sub_news_url'),\n\tpath('', NewsListView.as_view(), name='main_news'),\n\t ] \n","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"200514612","text":"from PIL import Image\nfrom numpy import *\n\ndef varianceEdge(im,n):\n\tfor i in range(0,len(im),1):\n\t\tfor j in range(0,len(im[0]),1):\n\t\t\tsub = im[i:i+n+1,j:j+n+1]\n\t\t\tim[i,j] = std(sub)\n\treturn im\n\ndef main():\n\tname = 'parking'\n\tim = array(Image.open(name + '.jpg').convert('L'))\n\tim2 = Image.fromarray(varianceEdge(im,2))\n\tim2.convert('RGB').save('var' + name + '.png','png')\n\nmain()\n\n\n\n","sub_path":"Exercises/Exercise_3/ex3q2_variance.py","file_name":"ex3q2_variance.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"367146526","text":"from enum import Enum\n\nimport numpy as np\n\nimport hiv_model_econ_eval.input_data as data\n\n\nclass Therapies(Enum):\n \"\"\" mono vs. combination therapy \"\"\"\n MONO = 0\n COMBO = 1\n\n\nclass Parameters:\n def __init__(self, therapy):\n\n # selected therapy\n self.therapy = therapy\n\n # initial health state\n self.initialHealthState = data.HealthStates.CD4_200to500\n\n # annual treatment cost\n if self.therapy == Therapies.MONO:\n self.annualTreatmentCost = data.Zidovudine_COST\n else:\n self.annualTreatmentCost = data.Zidovudine_COST + data.Lamivudine_COST\n\n # transition probability matrix of the selected therapy\n self.probMatrix = []\n\n # calculate transition probabilities between hiv states\n if self.therapy == Therapies.MONO:\n # calculate transition probability matrix for the mono therapy\n self.probMatrix = get_prob_matrix_mono(trans_matrix=data.TRANS_MATRIX)\n\n elif self.therapy == Therapies.COMBO:\n # calculate transition probability matrix for the combination therapy\n self.probMatrix = get_prob_matrix_combo(\n prob_matrix_mono=get_prob_matrix_mono(trans_matrix=data.TRANS_MATRIX),\n combo_rr=data.TREATMENT_RR)\n\n # annual state costs and utilities\n self.annualStateCosts = data.ANNUAL_STATE_COST\n self.annualStateUtilities = data.ANNUAL_STATE_UTILITY\n\n # discount rate\n self.discountRate = data.DISCOUNT\n\n\ndef get_prob_matrix_mono(trans_matrix):\n \"\"\"\n :param trans_matrix: transition matrix containing counts of transitions between states\n :return: transition probability matrix\n \"\"\"\n\n # initialize transition probability matrix\n trans_prob_matrix = []\n\n # for each row in the transition matrix\n for row in trans_matrix:\n # calculate the transition probabilities\n prob_row = np.array(row)/sum(row)\n # add this row of transition probabilities to the transition probability matrix\n trans_prob_matrix.append(prob_row)\n\n return trans_prob_matrix\n\n\ndef get_prob_matrix_combo(prob_matrix_mono, combo_rr):\n \"\"\"\n :param prob_matrix_mono: (list of lists) transition probability matrix under mono therapy\n :param combo_rr: relative risk of the combination treatment\n :returns (list of lists) transition probability matrix under combination therapy \"\"\"\n\n # create an empty list of lists\n matrix_combo = []\n for row in prob_matrix_mono:\n matrix_combo.append(np.zeros(len(row))) # adding a row [0, 0, 0, 0]\n\n # populate the combo matrix\n # calculate the effect of combo-therapy on non-diagonal elements\n for s in range(len(matrix_combo)):\n for next_s in range(s + 1, len(prob_matrix_mono[s])):\n matrix_combo[s][next_s] = combo_rr * prob_matrix_mono[s][next_s]\n\n # diagonal elements are calculated to make sure the sum of each row is 1\n for s in range(len(matrix_combo)):\n matrix_combo[s][s] = 1 - sum(matrix_combo[s][s+1:])\n\n return matrix_combo\n\n\n# tests\nif __name__ == '__main__':\n matrix_mono = get_prob_matrix_mono(data.TRANS_MATRIX)\n matrix_combo = get_prob_matrix_combo(matrix_mono, data.TREATMENT_RR)\n\n print(matrix_mono)\n print(matrix_combo)\n","sub_path":"hiv_model_econ_eval/param_classes.py","file_name":"param_classes.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"651684861","text":"from random import randrange\nfrom mininet.log import info\n\nclass Placer( object ):\n \"The base object of the placer classes.\"\n\n def __init__( self, servers=None, nodes=None, hosts=None,\n switches=None, controllers=None, links=None ):\n \"\"\"Initialize necessary info,\n They are optional.\n If not feed, they will be empty.\n \"\"\"\n self.servers = servers or []\n self.nodes = nodes or []\n self.hosts = hosts or []\n self.switches = switches or []\n self.controllers = controllers or []\n self.links = links or []\n\n def place( self, node ):\n \"This is going to be overridden. It should return the server to place the node.\"\n return None\n\n\nclass RandomPlacer( Placer ):\n \"Random placement\"\n def place( self, nodename ):\n \"\"\"\n Random placement function\n Nodename is the name of node.\n \"\"\"\n # This may be slow with lots of servers\n return self.servers[ randrange( 0, len( self.servers ) ) ]\n\n\nclass RoundRobinPlacer( Placer ):\n \"\"\"Round-robin placement\n Note this will usually result in cross-server links between\n hosts and switches\"\"\"\n\n def __init__( self, *args, **kwargs ):\n Placer.__init__( self, *args, **kwargs )\n self.next = 0\n\n def place( self, nodename ):\n \"\"\"\n Return the server to place the node.\n Place nodes circularly.\n Nodename is the name of node.\n \"\"\"\n server = self.servers[ self.next ]\n self.next = ( self.next + 1 ) % len( self.servers )\n return server\n\n\nclass SwitchBinPlacer( Placer ):\n \"\"\"Place switches (and controllers) into evenly-sized bins,\n and attempt to co-locate hosts and switches\"\"\"\n\n def __init__( self, *args, **kwargs ):\n Placer.__init__( self, *args, **kwargs )\n self.servdict = dict( enumerate( self.servers ) )\n self.hset = frozenset( self.hosts )\n self.sset = frozenset( self.switches )\n self.cset = frozenset( self.controllers )\n self.placement = self.calculatePlacement()\n\n @staticmethod\n def bin( nodes, servers ):\n \"\"\"Distribute nodes evenly over servers.\"\"\"\n # Calculate base bin size\n nlen = len( nodes )\n slen = len( servers )\n quotient = int( nlen / slen )\n binsizes = { server: quotient for server in servers }\n # Distribute remainder\n remainder = nlen % slen\n for server in servers[ 0 : remainder ]:\n binsizes[ server ] += 1\n # Create binsize[ server ] tickets for each server\n tickets = sum( [ binsizes[ server ] * [ server ]\n for server in servers ], [] )\n # And assign one ticket to each node\n return { node: ticket for node, ticket in zip( nodes, tickets ) }\n\n def calculatePlacement( self ):\n \"Pre-calculate node placement\"\n placement = {}\n # Create host-switch connectivity map,\n # associating host with last switch that it's\n # connected to\n switchFor = {}\n for src, dst in self.links:\n if src in self.hset and dst in self.sset:\n switchFor[ src ] = dst\n if dst in self.hset and src in self.sset:\n switchFor[ dst ] = src\n # Place switches\n placement = self.bin( self.switches, self.servers )\n # Place controllers and merge into placement dict\n placement.update( self.bin( self.controllers, self.servers ) )\n # Co-locate hosts with their switches\n for h in self.hosts:\n if h in placement:\n # Host is already placed - leave it there\n continue\n if h in switchFor:\n placement[ h ] = placement[ switchFor[ h ] ]\n else:\n raise Exception(\n \"SwitchBinPlacer: cannot place isolated host \" + h )\n return placement\n\n def place( self, node ):\n \"\"\"Simple placement algorithm:\n place switches into evenly sized bins,\n and place hosts near their switches\"\"\"\n return self.placement[ node ]\n\n\nclass HostSwitchBinPlacer( Placer ):\n \"\"\"Place switches *and hosts* into evenly-sized bins\n Note that this will usually result in cross-server\n links between hosts and switches\"\"\"\n\n def __init__( self, *args, **kwargs ):\n Placer.__init__( self, *args, **kwargs )\n # Calculate bin sizes\n scount = len( self.servers )\n self.hbin = max( int( len( self.hosts ) / scount ), 1 )\n self.sbin = max( int( len( self.switches ) / scount ), 1 )\n self.cbin = max( int( len( self.controllers ) / scount ) , 1 )\n info( 'scount:', scount )\n info( 'bins:', self.hbin, self.sbin, self.cbin, '\\n' )\n self.servdict = dict( enumerate( self.servers ) )\n self.hset = frozenset( self.hosts )\n self.sset = frozenset( self.switches )\n self.cset = frozenset( self.controllers )\n self.hind, self.sind, self.cind = 0, 0, 0\n\n def place( self, nodename ):\n \"\"\"Simple placement algorithm:\n place nodes into evenly sized bins\"\"\"\n # Place nodes into bins\n if nodename in self.hset:\n server = self.servdict[ self.hind / self.hbin ]\n self.hind += 1\n elif nodename in self.sset:\n server = self.servdict[ self.sind / self.sbin ]\n self.sind += 1\n elif nodename in self.cset:\n server = self.servdict[ self.cind / self.cbin ]\n self.cind += 1\n else:\n info( 'warning: unknown node', nodename )\n server = self.servdict[ 0 ]\n return server\n\n","sub_path":"scalablemininet/scalabletopo.py","file_name":"scalabletopo.py","file_ext":"py","file_size_in_byte":5720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"213205340","text":"######################################################\n# RTRATProd.py\n# ---------\n# Author: Matt Mottram\n# \n#\n# Description:\n# Ganga runtime handlers for SNO+ RATProd application.\n#\n# Prepares RATUser application for given backend.\n#\n# - RTHandler: handles submission to local/batch backends\n# - WGRTHandler: handles submission to WestGrid backend\n# - LCGRTHandler: handles submission to LCG backend\n#\n# Revision History:\n# - 26/08/14: M. Mottram - moved from RATProd (as with RTRATUser)\n#\n######################################################\n\nimport os\nimport shutil\n\nfrom Ganga.GPIDev.Adapters.IRuntimeHandler import IRuntimeHandler\n\nfrom Ganga.GPIDev.Lib.File import *\n\n\n# Assume that the applications should come from the GangaSNOplus/Lib/Applications directory\n_app_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../Applications\"))\n\nclass RTHandler(IRuntimeHandler):\n '''Standard RTHandler (for all batch/local submission).\n '''\n def prepare(self,app,appconfig,appmasterconfig,jobmasterconfig):\n\n logger.debug('RAT::RTHandler prepare ...')\n from Ganga.GPIDev.Adapters.StandardJobConfig import StandardJobConfig\n\n #create the backend wrapper script\n job=app.getJobObject()\n\n # Check that all is OK in the application\n app.verify()\n\n #Check whether we're looking for a non-default sw dir\n if app.softwareEnvironment is None:\n logger.error('Must specify a RAT environment')\n raise Exception\n\n #we need to know the name of the file to run\n macroFile = None\n prodFile = None\n if app.ratMacro!='':\n decimated=app.ratMacro.split('/')\n macroFile=decimated[len(decimated)-1]\n else:\n decimated=app.prodScript.split('/')\n prodFile=decimated[len(decimated)-1]\n\n foutList='[%s]' % (','.join(f for f in app.outputFiles))\n finList='[%s]' % (','.join(f for f in app.inputFiles))\n\n if app.environment==None:\n args = []\n args += ['-e',app.softwareEnvironment]\n args += ['-d',app.outputDir]\n args += ['-o',foutList]\n args += ['-i',finList]\n if app.ratMacro!='':\n args += ['-m',macroFile]\n else:\n args += ['-k','-m',prodFile]\n if app.useDB:\n args += ['--dbuser',app.rat_db_user]\n args += ['--dbpassword',app.rat_db_pswd]\n args += ['--dbname',app.rat_db_name]\n args += ['--dbprotocol',app.rat_db_protocol]\n args += ['--dburl',app.rat_db_url]\n if app.discardOutput:\n args += ['--nostore']\n \n app._getParent().inputsandbox.append('%s/job_tools.py' % _app_directory)\n return StandardJobConfig(File('%s/ratProdRunner.py' % _app_directory),\n inputbox = app._getParent().inputsandbox,\n outputbox = app._getParent().outputsandbox,\n args = args)\n else:#need a specific environment setup \n #can either use a specific file or a list of strings. the latter needs to be converted to a temp file and shipped.\n envFile=None\n if type(app.environment)==list:\n tempname = 'tempRATProdEnv_%s'%os.getlogin()\n tempf = file('/tmp/%s'%(tempname),'w')\n for line in app.environment:\n tempf.write('%s \\n' % line)\n tempf.close()\n app._getParent().inputsandbox.append('/tmp/%s'%(tempname))\n envFile=tempname\n else:\n app._getParent().inputsandbox.append(app.environment)\n envFile=os.path.basename(app.environment)\n args = ''\n args += '-e %s '%(app.softwareEnvironment)\n args += '-d %s '%(app.outputDir)\n args += '-o %s '%(foutList)\n args += '-i %s '%(finList)\n if app.ratMacro!='':\n args += '-m %s '%(macroFile)\n else:\n args += '-k -m %s '%(prodFile)\n if app.useDB:\n args += '--dbuser %s '%(app.rat_db_user)\n args += '--dbpassword %s '%(app.rat_db_pswd)\n args += '--dbname %s '%(app.rat_db_name)\n args += '--dbprotocol %s '%(app.rat_db_protocol)\n args += '--dburl %s '%(app.rat_db_url)\n if app.discardOutput:\n args += '--nostore '\n\n wrapperArgs = ['-f', envFile, '-a', '%s' % args]\n wrapperArgs += ['ratProdRunner.py', 'misc']\n\n app._getParent().inputsandbox.append('%s/ratProdRunner.py' % _app_directory)\n app._getParent().inputsandbox.append('%s/job_tools.py' % _app_directory)\n\n return StandardJobConfig(File('%s/sillyPythonWrapper.py' % _app_directory),\n inputbox = app._getParent().inputsandbox,\n outputbox = app._getParent().outputsandbox,\n args = wrapperArgs)\n\n###################################################################\n\nclass WGRTHandler(IRuntimeHandler):\n '''WGRTHandler for WestGrid submission.\n '''\n def prepare(self,app,appconfig,appmasterconfig,jobmasterconfig):\n\n logger.debug('RAT::RTHandler prepare ...')\n from Ganga.GPIDev.Adapters.StandardJobConfig import StandardJobConfig\n\n #create the backend wrapper script\n job=app.getJobObject()\n\n # Check that all is OK in the application\n app.verify()\n\n #Check whether we're looking for a non-default sw dir\n if app.softwareEnvironment is None:\n logger.error('Must specify a RAT directory')\n raise Exception\n\n voproxy = job.backend.voproxy\n if voproxy==None:\n #use the proxy from the environment (default behaviour) \n try:\n voproxy = os.environ[\"X509_USER_PROXY\"]\n if not voproxy.startswith(os.environ[\"HOME\"]):\n # If the proxy is not on the home directory, copy if there (for jobs on worker nodes)\n cacheDir = os.path.join(os.environ[\"HOME\"], \"gaspCache\")\n if not os.path.exists(os.path.join(cacheDir)):\n os.makedirs(cacheDir)\n proxy_path = os.path.join(cacheDir, os.path.basename(voproxy))\n shutil.copy(voproxy, proxy_path) # Maintains the permissions: important!\n logger.warn(\"Proxy not stored in home directory, transferring across to %s\" % proxy_path)\n logger.warn(\"Note that this proxy may expire during the job lifetime!\")\n voproxy = proxy_path\n except:\n logger.error('Cannot run without voproxy either in environment (X509_USER_PROXY) or specified for WG backend')\n raise Exception\n if not os.path.exists(voproxy): \n logger.error('Valid WestGrid backend voproxy location MUST be specified: %s'%(voproxy))\n raise Exception\n\n #we need to know the name of the file to run\n macroFile = None\n prodFile = None\n if app.ratMacro!='':\n decimated=app.ratMacro.split('/')\n macroFile=decimated[len(decimated)-1]\n else:\n decimated=app.prodScript.split('/')\n prodFile=decimated[len(decimated)-1]\n\n foutList='[%s]' % (','.join(f for f in app.outputFiles))\n finList='[%s]' % (','.join(f for f in app.inputFiles))\n\n args = ''\n args += '-g srm '\n args += '-e %s '%(app.softwareEnvironment)\n args += '-d %s '%(app.outputDir)\n args += '-o %s '%(foutList)\n args += '-i %s '%(finList)\n if app.ratMacro!='':\n args += '-m %s '%(macroFile)\n else:\n args += '-k -m %s '%(prodFile)\n args += '--voproxy %s '%(voproxy)\n if app.useDB:\n args += '--dbuser %s '%(app.rat_db_user)\n args += '--dbpassword %s '%(app.rat_db_pswd)\n args += '--dbname %s '%(app.rat_db_name)\n args += '--dbprotocol %s '%(app.rat_db_protocol)\n args += '--dburl %s '%(app.rat_db_url)\n if app.discardOutput:\n args += '--nostore '\n args += '--se %s ' % job.backend.se\n args += '--surl %s ' % job.backend.baseSURL\n\n\n wrapperArgs = ['-a', '%s' % args]\n wrapperArgs += ['ratProdRunner.py', 'wg']\n\n app._getParent().inputsandbox.append('%s/ratProdRunner.py' % _app_directory)\n app._getParent().inputsandbox.append('%s/job_tools.py' % _app_directory)\n\n return StandardJobConfig(File('%s/sillyPythonWrapper.py' % _app_directory),\n inputbox = app._getParent().inputsandbox,\n outputbox = app._getParent().outputsandbox,\n args = wrapperArgs)\n\n###################################################################\n\nclass LCGRTHandler(IRuntimeHandler):\n '''RTHandler for Grid submission.\n Could include CE options and tags here.\n '''\n def prepare(self,app,appconfig,appmasterconfig,jobmasterconfig):\n\n logger.debug('RAT::LCGRTHandler prepare ...')\n from Ganga.Lib.LCG import LCGJobConfig\n\n #create the backend wrapper script\n job=app.getJobObject()\n\n # Check that all is OK in the application\n app.verify()\n\n # Check the current worker node permissions\n grid_config = RATUtil.GridConfig.get_instance()\n for ce in grid_config.get_excluded_worker_nodes():\n ganga_job.backend.requirements.excludedCEs += ' %s ' % ce\n\n #Check whether we're looking for a non-default sw dir\n if app.softwareEnvironment is None:\n # The relative path for CVMFS, SNOPLUS_CVMFS_DIR will be set at the backend\n # Note the extra \\ to escape the dollar in the initial python wrapper\n app.softwareEnvironment = '\\$SNOPLUS_CVMFS_DIR/sw/%s/env_rat-%s.sh' % (app.ratVersion, app.ratVersion)\n\n #we need to know the name of the file to run\n macroFile = None\n prodFile = None\n if app.ratMacro!='':\n decimated=app.ratMacro.split('/')\n macroFile=decimated[len(decimated)-1]\n else:\n decimated=app.prodScript.split('/')\n prodFile=decimated[len(decimated)-1]\n\n foutList='[%s]' % (','.join(f for f in app.outputFiles))\n finList='[%s]' % (','.join(f for f in app.inputFiles))\n\n args = ''\n args += '-g lcg '\n args += '-e %s '%(app.softwareEnvironment)\n args += '-d %s '%(app.outputDir)\n args += '-o %s '%(foutList)\n args += '-i %s '%(finList)\n if app.ratMacro!='':\n args += '-m %s '%(macroFile)\n else:\n args += '-k -m %s '%(prodFile)\n if app.useDB:\n args += '--dbuser %s '%(app.rat_db_user)\n args += '--dbpassword %s '%(app.rat_db_pswd)\n args += '--dbname %s '%(app.rat_db_name)\n args += '--dbprotocol %s '%(app.rat_db_protocol)\n args += '--dburl %s '%(app.rat_db_url)\n if app.discardOutput:\n args += '--nostore '\n\n wrapperArgs = ['-a', '\"%s\"' % (args)]\n wrapperArgs += ['ratProdRunner.py', 'lcg']\n\n app._getParent().inputsandbox.append('%s/ratProdRunner.py' % _app_directory)\n app._getParent().inputsandbox.append('%s/job_tools.py' % _app_directory)\n\n return LCGJobConfig(File('%s/sillyPythonWrapper.py' % _app_directory),\n inputbox = app._getParent().inputsandbox,\n outputbox = app._getParent().outputsandbox,\n args = wrapperArgs)\n\n###################################################################\n","sub_path":"gasp/GangaSNOplus/Lib/RTHandlers/RTRATProd.py","file_name":"RTRATProd.py","file_ext":"py","file_size_in_byte":12025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"364691607","text":"def take_in(num): return [float(input()) for _ in range(num)] \r\nsample=int(input())\r\nx=int(input())\r\ninstance=take_in(x)\r\ny=int(input())\r\nprices=take_in(y)\r\ndef interpolate(instance,prices,sample):\r\n try:\r\n position=instance.index(list(filter(lambda x:x>sample,instance))[0])\r\n except:\r\n position= len(instance)-1\r\n if position<1:\r\n xs=instance[0:2]\r\n ys=prices[0:2]\r\n else:\r\n xs=instance[position-1:position+1]\r\n ys=prices[position-1:position+1]\r\n ans=((ys[1]-ys[0])*(sample-xs[1])/(xs[1]-xs[0]) +ys[1])*100\r\n #round and format failed to approx 6.125 to 6.13\r\n print(ans)\r\n return (ans//1)/100 if ans%1 <0.5 else ((ans+1)//1)/100\r\nprint(interpolate(instance,prices,sample))\r\n","sub_path":"alg.py","file_name":"alg.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"612077587","text":"from __future__ import print_function\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport Read_Data as RD\n\n#dir = \"wine-5-fold/wine-5-1tra.dat\"\ndir = \"yeast4.dat\"\n\nRD.Initialize_Data(dir)\n\n#Features_Attribute = [0, 13 , 14, 15, 16, 17, 18]\nFeatures_Attribute = np.arange(0, RD.Num_Features, 1)\n\nl = len(Features_Attribute)\n\nfor i in range(0, l):\n for j in range(i+1, l):\n for k in range(j+1, l):\n X_index = Features_Attribute[i]\n Y_index = Features_Attribute[j]\n Z_index = Features_Attribute[k]\n print(X_index, Y_index, Z_index)\n ax = plt.subplot(111, projection='3d')\n ax.scatter(RD.Negative_Feature[:,X_index], RD.Negative_Feature[:,Y_index], RD.Negative_Feature[:,Z_index], marker = 'o', color = '#539caf', label='1', s = 30, alpha=0.3)\n ax.scatter(RD.Positive_Feature[:,X_index], RD.Positive_Feature[:,Y_index], RD.Positive_Feature[:,Z_index], marker = '+', color = 'r', label='2', s = 50)\n plt.show()","sub_path":"3D_Scatter_Plot.py","file_name":"3D_Scatter_Plot.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"475880618","text":"# Given some integer, find the maximal number you can obtain by deleting exactly one digit of the given number.\n#\n# Example\n#\n# For n = 152, the output should be\n# deleteDigit(n) = 52;\n# For n = 1001, the output should be\n# deleteDigit(n) = 101.\n\n\ndef deleteDigit(n):\n num = 0\n\n s = str(n)\n for i in range(len(s)):\n if int(s[:i] + s[i + 1:]) > num:\n num = int(s[:i] + s[i + 1:])\n\n return num\n\n\nprint(deleteDigit(1001))\nprint(deleteDigit(152))\n\n","sub_path":"arcade/deleteDigit.py","file_name":"deleteDigit.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38458274","text":"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\n\n\nfrom __future__ import unicode_literals\nimport unittest\nimport frappe\nfrom erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order\nfrom erpnext.accounts.doctype.sales_invoice.test_sales_invoice import create_sales_invoice\nfrom erpnext.stock.get_item_details import get_item_details\nfrom frappe import MandatoryError\n\nclass TestPricingRule(unittest.TestCase):\n\tdef test_pricing_rule_for_discount(self):\n\t\tfrom erpnext.stock.get_item_details import get_item_details\n\t\tfrom frappe import MandatoryError\n\n\t\tfrappe.db.sql(\"delete from `tabPricing Rule`\")\n\n\t\ttest_record = {\n\t\t\t\"doctype\": \"Pricing Rule\",\n\t\t\t\"title\": \"_Test Pricing Rule\",\n\t\t\t\"apply_on\": \"Item Code\",\n\t\t\t\"item_code\": \"_Test Item\",\n\t\t\t\"selling\": 1,\n\t\t\t\"price_or_discount\": \"Discount Percentage\",\n\t\t\t\"price\": 0,\n\t\t\t\"discount_percentage\": 10,\n\t\t\t\"company\": \"_Test Company\"\n\t\t}\n\t\tfrappe.get_doc(test_record.copy()).insert()\n\n\t\targs = frappe._dict({\n\t\t\t\"item_code\": \"_Test Item\",\n\t\t\t\"company\": \"_Test Company\",\n\t\t\t\"price_list\": \"_Test Price List\",\n\t\t\t\"currency\": \"_Test Currency\",\n\t\t\t\"doctype\": \"Sales Order\",\n\t\t\t\"conversion_rate\": 1,\n\t\t\t\"price_list_currency\": \"_Test Currency\",\n\t\t\t\"plc_conversion_rate\": 1,\n\t\t\t\"order_type\": \"Sales\",\n\t\t\t\"customer\": \"_Test Customer\",\n\t\t\t\"name\": None\n\t\t})\n\t\tdetails = get_item_details(args)\n\t\tself.assertEquals(details.get(\"discount_percentage\"), 10)\n\n\t\tprule = frappe.get_doc(test_record.copy())\n\t\tprule.applicable_for = \"Customer\"\n\t\tprule.title = \"_Test Pricing Rule for Customer\"\n\t\tself.assertRaises(MandatoryError, prule.insert)\n\n\t\tprule.customer = \"_Test Customer\"\n\t\tprule.discount_percentage = 20\n\t\tprule.insert()\n\t\tdetails = get_item_details(args)\n\t\tself.assertEquals(details.get(\"discount_percentage\"), 20)\n\n\t\tprule = frappe.get_doc(test_record.copy())\n\t\tprule.apply_on = \"Item Group\"\n\t\tprule.item_group = \"All Item Groups\"\n\t\tprule.title = \"_Test Pricing Rule for Item Group\"\n\t\tprule.discount_percentage = 15\n\t\tprule.insert()\n\n\t\targs.customer = \"_Test Customer 1\"\n\t\tdetails = get_item_details(args)\n\t\tself.assertEquals(details.get(\"discount_percentage\"), 10)\n\n\t\tprule = frappe.get_doc(test_record.copy())\n\t\tprule.applicable_for = \"Campaign\"\n\t\tprule.campaign = \"_Test Campaign\"\n\t\tprule.title = \"_Test Pricing Rule for Campaign\"\n\t\tprule.discount_percentage = 5\n\t\tprule.priority = 8\n\t\tprule.insert()\n\n\t\targs.campaign = \"_Test Campaign\"\n\t\tdetails = get_item_details(args)\n\t\tself.assertEquals(details.get(\"discount_percentage\"), 5)\n\n\t\tfrappe.db.sql(\"update `tabPricing Rule` set priority=NULL where campaign='_Test Campaign'\")\n\t\tfrom erpnext.accounts.doctype.pricing_rule.pricing_rule\timport MultiplePricingRuleConflict\n\t\tself.assertRaises(MultiplePricingRuleConflict, get_item_details, args)\n\n\t\targs.item_code = \"_Test Item 2\"\n\t\tdetails = get_item_details(args)\n\t\tself.assertEquals(details.get(\"discount_percentage\"), 15)\n\n\t\tfrappe.db.sql(\"delete from `tabPricing Rule`\")\n\n\tdef test_pricing_rule_for_margin(self):\n\t\tfrom erpnext.stock.get_item_details import get_item_details\n\t\tfrom frappe import MandatoryError\n\n\t\tfrappe.db.sql(\"delete from `tabPricing Rule`\")\n\n\t\ttest_record = {\n\t\t\t\"doctype\": \"Pricing Rule\",\n\t\t\t\"title\": \"_Test Pricing Rule\",\n\t\t\t\"apply_on\": \"Item Code\",\n\t\t\t\"item_code\": \"_Test FG Item 2\",\n\t\t\t\"selling\": 1,\n\t\t\t\"price_or_discount\": \"Discount Percentage\",\n\t\t\t\"price\": 0,\n\t\t\t\"margin_type\": \"Percentage\",\n\t\t\t\"margin_rate_or_amount\": 10,\n\t\t\t\"company\": \"_Test Company\"\n\t\t}\n\t\tfrappe.get_doc(test_record.copy()).insert()\n\t\t\n\t\titem_price = frappe.get_doc({\n\t\t\t\"doctype\": \"Item Price\",\n\t\t\t\"price_list\": \"_Test Price List 2\",\n\t\t\t\"item_code\": \"_Test FG Item 2\",\n\t\t\t\"price_list_rate\": 100\n\t\t})\n\t\t\n\t\titem_price.insert(ignore_permissions=True)\n\n\t\targs = frappe._dict({\n\t\t\t\"item_code\": \"_Test FG Item 2\",\n\t\t\t\"company\": \"_Test Company\",\n\t\t\t\"price_list\": \"_Test Price List\",\n\t\t\t\"currency\": \"_Test Currency\",\n\t\t\t\"doctype\": \"Sales Order\",\n\t\t\t\"conversion_rate\": 1,\n\t\t\t\"price_list_currency\": \"_Test Currency\",\n\t\t\t\"plc_conversion_rate\": 1,\n\t\t\t\"order_type\": \"Sales\",\n\t\t\t\"customer\": \"_Test Customer\",\n\t\t\t\"name\": None\n\t\t})\n\t\tdetails = get_item_details(args)\n\t\tself.assertEquals(details.get(\"margin_type\"), \"Percentage\")\n\t\tself.assertEquals(details.get(\"margin_rate_or_amount\"), 10)\n\n\t\tfrappe.db.sql(\"delete from `tabPricing Rule`\")\n\n\tdef test_pricing_rule_for_variants(self):\n\t\tfrom erpnext.stock.get_item_details import get_item_details\n\t\tfrom frappe import MandatoryError\n\n\t\tfrappe.db.sql(\"delete from `tabPricing Rule`\")\n\n\t\tif not frappe.db.exists(\"Item\", \"Test Variant PRT\"):\n\t\t\tfrappe.get_doc({\n\t\t\t\t\"doctype\": \"Item\",\n\t\t\t\t\"item_code\": \"Test Variant PRT\",\n\t\t\t\t\"item_name\": \"Test Variant PRT\",\n\t\t\t\t\"description\": \"Test Variant PRT\",\n\t\t\t\t\"item_group\": \"_Test Item Group\",\n\t\t\t\t\"is_stock_item\": 1,\n\t\t\t\t\"variant_of\": \"_Test Variant Item\",\n\t\t\t\t\"default_warehouse\": \"_Test Warehouse - _TC\",\n\t\t\t\t\"stock_uom\": \"_Test UOM\",\n\t\t\t\t\"attributes\": [\n\t\t\t\t\t{\n\t\t\t\t\t \"attribute\": \"Test Size\",\n\t\t\t\t\t \"attribute_value\": \"Medium\"\n\t\t\t\t\t}\n\t\t\t\t],\n\t\t\t}).insert()\n\n\t\tfrappe.get_doc({\n\t\t\t\"doctype\": \"Pricing Rule\",\n\t\t\t\"title\": \"_Test Pricing Rule 1\",\n\t\t\t\"apply_on\": \"Item Code\",\n\t\t\t\"item_code\": \"_Test Variant Item\",\n\t\t\t\"selling\": 1,\n\t\t\t\"price_or_discount\": \"Discount Percentage\",\n\t\t\t\"price\": 0,\n\t\t\t\"discount_percentage\": 7.5,\n\t\t\t\"company\": \"_Test Company\"\n\t\t}).insert()\n\n\t\targs = frappe._dict({\n\t\t\t\"item_code\": \"Test Variant PRT\",\n\t\t\t\"company\": \"_Test Company\",\n\t\t\t\"price_list\": \"_Test Price List\",\n\t\t\t\"currency\": \"_Test Currency\",\n\t\t\t\"doctype\": \"Sales Order\",\n\t\t\t\"conversion_rate\": 1,\n\t\t\t\"price_list_currency\": \"_Test Currency\",\n\t\t\t\"plc_conversion_rate\": 1,\n\t\t\t\"order_type\": \"Sales\",\n\t\t\t\"customer\": \"_Test Customer\",\n\t\t\t\"name\": None\n\t\t})\n\n\t\tdetails = get_item_details(args)\n\t\tself.assertEquals(details.get(\"discount_percentage\"), 7.5)\n\n\t\t# add a new pricing rule for that item code, it should take priority\n\t\tfrappe.get_doc({\n\t\t\t\"doctype\": \"Pricing Rule\",\n\t\t\t\"title\": \"_Test Pricing Rule 2\",\n\t\t\t\"apply_on\": \"Item Code\",\n\t\t\t\"item_code\": \"Test Variant PRT\",\n\t\t\t\"selling\": 1,\n\t\t\t\"price_or_discount\": \"Discount Percentage\",\n\t\t\t\"price\": 0,\n\t\t\t\"discount_percentage\": 17.5,\n\t\t\t\"company\": \"_Test Company\"\n\t\t}).insert()\n\n\t\tdetails = get_item_details(args)\n\t\tself.assertEquals(details.get(\"discount_percentage\"), 17.5)\n\n\tdef test_pricing_rule_for_stock_qty(self):\n\t\tfrappe.db.sql(\"delete from `tabPricing Rule`\")\n\n\t\ttest_record = {\n\t\t\t\"doctype\": \"Pricing Rule\",\n\t\t\t\"title\": \"_Test Pricing Rule\",\n\t\t\t\"apply_on\": \"Item Code\",\n\t\t\t\"item_code\": \"_Test Item\",\n\t\t\t\"selling\": 1,\n\t\t\t\"price_or_discount\": \"Discount Percentage\",\n\t\t\t\"price\": 0,\n\t\t\t\"min_qty\": 5,\n\t\t\t\"max_qty\": 7,\n\t\t\t\"discount_percentage\": 17.5,\n\t\t\t\"company\": \"_Test Company\"\n\t\t}\n\t\tfrappe.get_doc(test_record.copy()).insert()\n\n\t\tif not frappe.db.get_value('UOM Conversion Detail',\n\t\t\t{'parent': '_Test Item', 'uom': 'box'}):\n\t\t\titem = frappe.get_doc('Item', '_Test Item')\n\t\t\titem.append('uoms', {\n\t\t\t\t'uom': 'Box',\n\t\t\t\t'conversion_factor': 5\n\t\t\t})\n\t\t\titem.save(ignore_permissions=True)\n\n\t\t# With pricing rule\n\t\tso = make_sales_order(item_code=\"_Test Item\", qty=1, uom=\"Box\", do_not_submit=True)\n\t\tso.items[0].price_list_rate = 100\n\t\tso.submit()\n\t\tso = frappe.get_doc('Sales Order', so.name)\n\t\tself.assertEquals(so.items[0].discount_percentage, 17.5)\n\t\tself.assertEquals(so.items[0].rate, 82.5)\n\n\t\t# Without pricing rule\n\t\tso = make_sales_order(item_code=\"_Test Item\", qty=2, uom=\"Box\", do_not_submit=True)\n\t\tso.items[0].price_list_rate = 100\n\t\tso.submit()\n\t\tso = frappe.get_doc('Sales Order', so.name)\n\t\tself.assertEquals(so.items[0].discount_percentage, 0)\n\t\tself.assertEquals(so.items[0].rate, 100)\n\n\tdef test_pricing_rule_with_margin_and_discount(self):\n\t\tfrappe.delete_doc_if_exists('Pricing Rule', '_Test Pricing Rule')\n\t\tmake_pricing_rule(selling=1, margin_type=\"Percentage\", margin_rate_or_amount=10)\n\t\tsi = create_sales_invoice(do_not_save=True)\n\t\tsi.items[0].price_list_rate = 1000\n\t\tsi.insert(ignore_permissions=True)\n\n\t\titem = si.items[0]\n\t\tself.assertEquals(item.rate, 1100)\n\t\tself.assertEquals(item.margin_rate_or_amount, 10)\n\n\t\t# With discount\n\t\titem.discount_percentage = 10\n\t\tsi.save()\n\t\titem = si.items[0]\n\t\tself.assertEquals(item.rate, 990)\n\t\tself.assertEquals(item.discount_percentage, 10)\n\t\tfrappe.db.sql(\"delete from `tabPricing Rule`\")\n\ndef make_pricing_rule(**args):\n\targs = frappe._dict(args)\n\n\tdoc = frappe.get_doc({\n\t\t\"doctype\": \"Pricing Rule\",\n\t\t\"title\": args.title or \"_Test Pricing Rule\",\n\t\t\"company\": args.company or \"_Test Company\",\n\t\t\"apply_on\": args.apply_on or \"Item Code\",\n\t\t\"item_code\": args.item_code or \"_Test Item\",\n\t\t\"applicable_for\": args.applicable_for,\n\t\t\"selling\": args.selling or 0,\n\t\t\"buying\": args.buying or 0,\n\t\t\"min_qty\": args.min_qty or 0.0,\n\t\t\"max_qty\": args.max_qty or 0.0,\n\t\t\"price_or_discount\": args.price_or_discount or \"Discount Percentage\",\n\t\t\"discount_percentage\": args.discount_percentage or 0.0,\n\t\t\"price\": args.price or 0.0,\n\t\t\"margin_type\": args.margin_type,\n\t\t\"margin_rate_or_amount\": args.margin_rate_or_amount or 0.0\n\t}).insert(ignore_permissions=True)\n\n\tapply_on = doc.apply_on.replace(' ', '_').lower()\n\tif args.get(apply_on) and apply_on != \"item_code\":\n\t\tdoc.db_set(apply_on, args.get(apply_on))\n\n\tapplicable_for = doc.applicable_for.replace(' ', '_').lower()\n\tif args.get(applicable_for):\n\t\tdoc.db_set(applicable_for, args.get(applicable_for))","sub_path":"python/erpnext/2017/8/test_pricing_rule.py","file_name":"test_pricing_rule.py","file_ext":"py","file_size_in_byte":9368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"257167137","text":"\nimport os\nimport datetime as dt\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\"\"\"\nmy_assets_selection: according to OI_chg or Vol_chg or fundamental change\nsymbols_selection: according to the most traded/OI contract\nmethods:\n get_backtest_cc_info(asset)\n get_backtest_data_cc(asset)\n get_backtest_data_adjusted_cc(asset)\n \n\"\"\"\ndict_asset_exchange = {\n 'cu': 'SHFE', 'al': 'SHFE', 'zn': 'SHFE', 'pb': 'SHFE',\n 'ni': 'SHFE', 'sn': 'SHFE', 'au': 'SHFE', 'ag': 'SHFE',\n 'rb': 'SHFE', 'wr': 'SHFE', 'hc': 'SHFE', 'sc': 'SHFE',\n 'fu': 'SHFE', 'bu': 'SHFE', 'ru': 'SHFE', 'sp': 'SHFE',\n 'm': 'DCE', 'y': 'DCE', 'a': 'DCE', 'b': 'DCE', 'p': 'DCE',\n 'c': 'DCE', 'cs': 'DCE', 'jd': 'DCE', 'bb': 'DCE', 'fb': 'DCE',\n 'l': 'DCE', 'v': 'DCE', 'eg': 'DCE', 'pp': 'DCE', 'j': 'DCE',\n 'jm': 'DCE', 'i': 'DCE', 'SR': 'CZCE', 'CF': 'CZCE',\n 'CY': 'CZCE', 'ZC': 'CZCE', 'FG': 'CZCE', 'TA': 'CZCE',\n 'MA': 'CZCE', 'WH': 'CZCE', 'PM': 'CZCE', 'RI': 'CZCE',\n 'LR': 'CZCE', 'JR': 'CZCE', 'RS': 'CZCE', 'OI': 'CZCE',\n 'RM': 'CZCE', 'SF': 'CZCE', 'SM': 'CZCE', 'AP': 'CZCE',\n 'CJ': 'CZCE'\n }\n\ndict_contract_size = {\n 'cu': 5, 'al': 5, 'zn': 5, 'pb': 5, 'ni': 1,\n 'sn': 1, 'au': 1000, 'ag': 15, 'rb': 10,\n 'wr': 10, 'hc': 10, 'sc': 1000, 'fu': 10,\n 'bu': 10, 'ru': 10, 'sp': 10, 'm': 10,\n 'y': 10, 'a': 10, 'b': 10, 'p': 10, 'c': 10,\n 'cs': 10, 'jd': 10, 'bb': 500, 'fb': 500,\n 'l': 5, 'v': 5, 'eg': 10, 'pp': 5, 'j': 100,\n 'jm': 60, 'i': 100, 'SR': 10, 'CF': 5, 'CY': 5,\n 'ZC': 100, 'FG': 20, 'TA': 5, 'MA': 10, 'WH': 20,\n 'PM': 50, 'RI': 20, 'LR': 20, 'JR': 20, 'RS': 10,\n 'OI': 10, 'RM': 10, 'SF': 5, 'SM': 5, 'AP': 10,\n 'CJ': 5\n }\n\n# data_path = 'D://BaiduNetdiskDownload//Data'\ndata_path = 'C://Users//nealz//Documents//Data'\n\n\ndef my_assets_selection():\n \"\"\"\n +-为多空方向?加入基本面信息?\n :return:\n \"\"\"\n dict_assets = {\n 'long': ['rb'],\n 'short': ['MA']\n }\n return dict_assets\n\n\ndef symbols_selection(dict_assets):\n \"\"\"\n\n :param dict_assets:\n :return:\n \"\"\"\n dict_symbols = {\n 'long': ['SHFE.rb1905'],\n 'short': ['CZCE.MA1905']\n }\n return dict_symbols\n\n\ndef get_backtest_cc_info(asset):\n str_exchange = dict_asset_exchange[asset]\n df_cc = pd.read_excel(data_path + '//' + str_exchange + '_Hist//' + asset + '.xlsx')\n df_cc.reset_index(inplace=True)\n if '日期' not in df_cc.columns.tolist():\n df_cc.drop(columns='index', inplace=True)\n df_cc.rename(columns={'Unnamed: 0': 'index'}, inplace=True)\n df_cc.loc[:, '日期'] = df_cc.loc[:, 'index'].apply(lambda x: str(x))\n df_cc.set_index(keys='日期', inplace=True)\n ls_idx = df_cc.index.tolist()\n ls_periods = [np.nan] * len(ls_idx)\n for num in range(1, len(ls_idx)):\n if df_cc.cc0[num - 1] != df_cc.cc0[num]:\n ls_periods[num] = ls_idx[num]\n\n ls_periods_start = [x for x in ls_periods if type(x) == str]\n ls_periods_end = [ls_idx[ls_idx.index(x)-1] for x in ls_periods_start]\n ls_periods_end = ls_periods_end[1:]\n ls_periods_end.append(ls_idx[-1])\n\n ls_contracts = list(df_cc.cc0.dropna().unique())\n dict_info = {\n 'contracts': ls_contracts,\n 'periods_start': ls_periods_start,\n 'periods_end': ls_periods_end,\n 'df_cc': df_cc\n }\n\n return dict_info\n\n\ndef get_backtest_data_cc(asset):\n str_exchange = dict_asset_exchange[asset]\n dict_info = get_backtest_cc_info(asset)\n df_cc0 = dict_info['df_cc'].loc[:, 'cc0']\n\n if str_exchange == 'DCE':\n df_tmp = pd.read_csv(data_path + '//' + str_exchange +'_Hist//' + asset + '_hist.csv',\n encoding='gbk')\n df_tmp.loc[:, '日期'] = df_tmp.loc[:, '日期'].apply(lambda x: str(int(x)))\n df_tmp.set_index(['日期', '合约'], inplace=True)\n list_tuple_cc = [(x, df_cc0[x]) for x in df_cc0.index.tolist()]\n df_data_cc = df_tmp.reindex(list_tuple_cc)\n else:\n if str_exchange == 'SHFE':\n df_tmp = pd.read_excel(data_path + '//' + str_exchange + '_Hist//' + 'SHFE_2009_2018.xlsx')\n df_tmp.loc[:, '日期'] = df_tmp.loc[:, '日期'].apply(lambda x: str(x))\n df_tmp.set_index(['日期', '合约'], inplace=True)\n list_tuple_cc = [(x, df_cc0[x]) for x in df_cc0.index.tolist()]\n df_data_cc = df_tmp.reindex(list_tuple_cc)\n elif str_exchange == 'CZCE':\n df_tmp = pd.read_excel(data_path + '//' + str_exchange + '_Hist//' + 'CZCE_2015_2018.xlsx')\n df_tmp.loc[:, '交易日期'] = df_tmp.loc[:, '交易日期'].apply(lambda x: str(x))\n df_tmp.set_index(['交易日期', '品种代码'], inplace=True)\n list_tuple_cc = [(x, df_cc0[x]) for x in df_cc0.index.tolist()]\n df_data_cc = df_tmp.reindex(list_tuple_cc)\n\n return df_data_cc\n\n\ndef get_backtest_data_adjusted_cc(asset):\n # TODO\n df_data_adjusted_cc = pd.DataFrame()\n return df_data_adjusted_cc\n\n\nif __name__ == '__main__':\n asset = 'rb'\n # asset = 'SR'\n # asset = 'j'\n dict_info = get_backtest_cc_info(asset)\n df_data = get_backtest_data_cc(asset)\n print(df_data.head(20))\n","sub_path":"Version2/trading_selection.py","file_name":"trading_selection.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"92075193","text":"\nfrom PIL import Image\n\nimport os.path\n\ndef findSecret(file):\n # 10 x 10 pixel grosse felder\n os.chdir(\"Images\")\n message = \"Nein, gar nicht. Es war ganz leicht.\"\n im = Image.open(file, 'r')\n\n width, height = im.size\n pixels = []\n for x in range(int(width/2)):\n\n pixels.append((im.getpixel((x, height/2))))\n\n print(pixels)\n for pixel in pixels:\n count = 0\n for toTest in pixels:\n if pixel == toTest:\n count +=1\n if count < 10:\n pixels.remove(pixel)\n\n print(pixels) #geeeht alles noch nich\n\n\nif __name__ == '__main__':\n filename = \"mi.png\"\n findSecret(filename)","sub_path":"pyprog/UEB_9/aufg1.py","file_name":"aufg1.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"612268704","text":"import sys\nsys.stdin = open('input.txt','r')\n\nN = int(input())\nspec = []\nfor _ in range(N):\n spec.append(list(map(int,input().split())))\n# for a in spec : print(*a)\n\ntarget = N // 2\nnumli = [a for a in range(1,N+1)]\ncombset = set()\nQ = [(0,set())]\n\nwhile Q :\n cnt , tempset = Q.pop(0)\n if cnt == target :\n combset.add(tuple(tempset))\n continue\n else :\n for num in numli :\n if num not in tempset:\n Q.append((cnt+1, tempset | {num}))\n# print(combset)\n\ndef permutation(memli):\n result = 0\n for i in memli:\n for j in memli:\n result += spec[i-1][j-1]\n return result\n\nminvs = 999999999\n# teamset = set()\nwhile combset and minvs:\n tA = combset.pop()\n # if tuple(tA) not in teamset :\n tB = tuple(set(numli) - set(tA))\n # teamset.add(tuple(tA))\n combset.remove(tuple(tB))\n Ateamworks = permutation(tA)\n Bteamworks = permutation(tB)\n # print(Ateamworks, ' vs ', Bteamworks)\n vs = abs(Bteamworks - Ateamworks)\n # print(vs)\n if vs < minvs : minvs = vs\nprint(minvs)\n","sub_path":"ForNovTest/BeakJoon/14889.py","file_name":"14889.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"597209616","text":"# -*- coding: utf8 -*-\nfrom django.db import models\n\n\nclass SignUp(models.Model):\n first_name = models.CharField(max_length=120, null=True, blank=True)\n last_name = models.CharField(max_length=120, null=True, blank=True)\n email = models.EmailField()\n timestramp = models.DateTimeField(auto_now_add=True, auto_now=False)\n updated = models.DateTimeField(auto_now_add=False, auto_now=True)\n boobs = models.BooleanField(default=True, verbose_name=\"В лесу родилась елочка, в лесу она росла.\")\n\n def __unicode__(self):\n return self.email\n","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569095130","text":"# -*- coding:utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport requests\nfrom sqlalchemy import Column, String, Integer, CHAR\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nBase = declarative_base()\nclass Baike(Base):\n __tablename__=\"baike\"\n id = Column(Integer, primary_key=True)\n bai_ke_name = Column(CHAR(255))\n bai_ke_content = Column(String(1024))\n\nclass Crawl(Baike):\n def __init__(self):\n self.root_url = \"http://baike.baidu.com/subview/14806/8904138.html\"\n self.db_url = \"mysql://root@localhost:3306/test?charset=utf8\"\n\n def get_content(self, url):\n r = requests.get(url)\n if r.status_code != 200:\n return -1\n else:\n html = r.content\n return html\n\n\n def parse_content(self,content):\n html_doc = content\n Soup = BeautifulSoup(html_doc, \"lxml\", from_encoding=\"utf-8\")\n all_content = Soup.find(\"h2\")\n name_1 = all_content.get_text()\n describe = Soup.find(\"div\", class_=\"para\").get_text()\n return name_1, describe\n\n def mysql_insert(self, id, name, content):\n db = self.db_url\n engine = create_engine(db, echo=True)\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n new_Baike = Baike(id=id, bai_ke_name=name, bai_ke_content=content)\n session.add(new_Baike)\n session.commit()\n session.close()\n\n\nif __name__ == \"__main__\":\n Crawler = Crawl()\n url = Crawler.root_url\n html = Crawler.get_content(url)\n content = Crawler.parse_content(html)\n id = 1\n name = content[0]\n content = content[1]\n print(name)\n print(content)\n\n","sub_path":"001/001Baike.py","file_name":"001Baike.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"31057577","text":"# -*- coding: utf-8 -*-\n'''\n\tUsed both Class based Views and Function based views\n\tfor demonstration purpose.\n\t\n\tPlease note that, the code is written with optimization \n\tfor time & effort as this is a demonstration\n\twork only.\n'''\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView,ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom django.views.decorators.csrf import csrf_protect\nfrom models import Genre as GenreModel\nfrom models import Track as TrackModel\nfrom models import UserRating\nfrom .forms import GenreForm, TrackForm\nimport json\nimport simplejson\n\n\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required(login_url='/login/')\ndef index(request):\n\tcontext = {}\n\treturn render(request, 'music/index.html', context)\n\n\n##############################\n# #\n# Class based views #\n# #\n##############################\n\n#create genre\nclass GenreAdd(CreateView):\n\ttemplate_name = \"music/genre_form.html\"\n\tform_class = GenreForm\n\n\tdef form_valid(self, form):\n\t\treturn super(GenreAdd, self).form_valid(form)\n\n\tdef get_success_url(self):\n\t\treturn reverse('genres')\n\n#read genre\nclass Genre(ListView):\n\tmodel = GenreModel\n\tpaginate_by = 3\n\ttemplate_name='music/genres.html'\n\n\tdef get_queryset(self):\n\t\tqs = super(Genre, self).get_queryset().order_by('created_at')\n\t\tqs = qs.reverse()\n\t\treturn qs\n\n#update genre\nclass GenreUpdate(UpdateView):\n\tmodel = GenreModel\n\tform_class = GenreForm\n\ttemplate_name = \"music/genre_update.html\"\n\n\tdef get_success_url(self):\n\t\treturn reverse('genre_detail', kwargs={\n\t\t\t'pk': self.object.pk,\n\t\t})\n\n#delete genre\nclass GenreDelete(DeleteView):\n\tmodel = GenreModel\n\ttemplate_name = \"music/confirm_delete.html\"\n\tsuccess_url = reverse_lazy('genres')\n\nclass GenreDetail(DetailView):\n\tmodel = GenreModel\n\n###################################\n# #\n#\tFunction base views #\n# #\n###################################\n\n@csrf_protect\n@login_required\ndef add_tracks(request):\n\tcontext = {}\n\tif request.method == \"POST\":\n\t\tdata = json.loads(request.body)\n\t\ttitle, genre_string, rating = data.get('title', ''), data.get('genre', ''), data.get('rating', '')\n\t\tgenre_names = genre_string.split(',')\n\t\ttrack = TrackModel(title=title, rating=rating)\n\t\ttrack.save()\n\t\tfor genre_name in genre_names:\n\t\t\tgenre_obj = GenreModel.objects.get(name=genre_name)\n\t\t\ttrack.genre.add(genre_obj)\n\t\tcontext['status'] = True\n\treturn HttpResponse(simplejson.dumps(context), content_type=\"application/json\")\n\n@login_required\ndef get_tracks(request):\n\tcontext = {}\n\ttotal_tracks = TrackModel.objects.all().order_by('created_at').reverse()\n\tpaginator = Paginator(total_tracks, 3)\n\tpage = request.GET.get('page', None)\n\ttracks = None\n\ttry:\n\t\ttracks = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\ttracks = paginator.page(1)\n\texcept EmptyPage:\n\t\ttracks = paginator.page(paginator.num_pages)\n\tgenres = GenreModel.objects.all()\n\tcontext['genres'] = genres\n\tcontext['tracks'] = tracks\n\tcontext['is_paginated'] = True\n\treturn render(request, 'music/tracks.html', context)\n\ndef track_detail(request, pk):\n\tcontext = {}\n\tif request.method == \"GET\":\n\t\ttrack = TrackModel.objects.get(id=pk)\n\t\tcontext['track'] = track\n\t\ttry:\n\t\t\tuserrating = UserRating.objects.get(user=request.user, track=track)\n\t\t\tcontext['user_rating'] = userrating.rating\n\t\texcept UserRating.DoesNotExist:\n\t\t\tcontext['user_rating'] = \"NA\"\n\t\treturn render(request, 'music/track_detail.html', context)\n\n@csrf_protect\n@login_required\ndef update_track(request, id):\n\tcontext = {}\n\tcurrent_user = request.user\n\ttrack = TrackModel.objects.get(id=id)\n\tgenres = GenreModel.objects.all()\n\trating, title, genre_string = request.POST.get('rating', ''), request.POST.get('title', ''), request.POST.get('updated_genre', '')\n\ttry:\n\t\ttrack.title = title\n\t\tuserrating = UserRating.objects.get_or_create(user=current_user, track=track)\n\n\t\tif userrating[1]:\n\t\t\ttrack.rating = (track.rating + int(rating)) / (track.votes + 1)\n\t\t\ttrack.votes = track.votes + 1\n\t\telse:\n\t\t\tnew_rating = (track.rating * track.votes) - float(userrating[0].rating) + float(rating)\n\t\t\ttrack.rating = new_rating / track.votes\n\n\t\ttrack.save()\n\n\t\tuserrating[0].rating = int(rating)\n\t\tuserrating[0].save()\n\n\t\told_genres = track.genre.all()\n\t\told_genres_list = [genre.name for genre in old_genres]\n\t\tgenres_list = genre_string.split(',')\n\t\tadd_diff = list(set(genres_list) - set(old_genres_list))\n\n\t\tfor genre in add_diff:\n\t\t\tgenre_obj = GenreModel.objects.get(name=genre)\n\t\t\ttrack.genre.add(genre_obj)\n\t\trem_diff = list(set(old_genres_list) - set(genres_list))\n\n\t\tfor genre in rem_diff:\n\t\t\tgenre_obj = GenreModel.objects.get(name=genre)\n\t\t\ttrack.genre.remove(genre_obj)\n\t\treturn redirect('/tracks/')\n\texcept Exception as e:\n\t\tcontext = {'track': track, 'genres':genres}\n\t\treturn render(request, 'music/edit_track.html', context)\n\n@login_required\ndef delete_track(request, id, template_name='music/confirm_delete.html'):\n\ttrack = get_object_or_404(TrackModel, id=id) \n\tif request.method=='POST':\n\t\ttrack.delete()\n\t\treturn redirect('tracks')\n\treturn render(request, template_name, {'object':track})\n\n@login_required\ndef edit_track(request, track_id):\n\ttrack = TrackModel.objects.get(id=track_id)\n\tgenres = GenreModel.objects.all()\n\tcontext = {'track': track, 'genres':genres}\n\ttry:\n\t\tuserrating = UserRating.objects.get(user=request.user, track=track)\n\t\tcontext['user_rating'] = userrating.rating\n\texcept UserRating.DoesNotExist:\n\t\tcontext['user_rating'] = 5\n\treturn render(request, 'music/edit_track.html', context)\n","sub_path":"music/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"267171562","text":"from __future__ import print_function\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\n\r\nimport copy\r\n\r\nfrom compas.utilities import pairwise\r\n\r\nfrom compas.utilities import geometric_key\r\nfrom compas.utilities import reverse_geometric_key\r\n\r\n__all__ = [\r\n 'mesh_unweld_vertices',\r\n 'mesh_weld',\r\n 'meshes_join',\r\n 'meshes_join_and_weld',\r\n]\r\n\r\ndef mesh_unweld_vertices(mesh, fkey, where=None):\r\n \"\"\"Unweld a face of the mesh.\r\n\r\n Parameters\r\n ----------\r\n mesh : Mesh\r\n A mesh object.\r\n fkey : hashable\r\n The identifier of a face.\r\n where : list (None)\r\n A list of vertices to unweld.\r\n Default is to unweld all vertices of the face.\r\n\r\n Examples\r\n --------\r\n .. plot::\r\n :include-source:\r\n\r\n import compas\r\n\r\n from compas.datastructures import Mesh\r\n from compas.plotters import MeshPlotter\r\n from compas.geometry import subtract_vectors\r\n\r\n mesh = Mesh.from_obj(compas.get('faces.obj'))\r\n\r\n vertices = set(mesh.vertices())\r\n\r\n fkey = 12\r\n where = mesh.face_vertices(fkey)[0:1]\r\n centroid = mesh.face_centroid(fkey)\r\n\r\n face = mesh.unweld_vertices(fkey, where)\r\n\r\n for key in face:\r\n if key in vertices:\r\n continue\r\n xyz = mesh.vertex_coordinates(key)\r\n v = subtract_vectors(centroid, xyz)\r\n mesh.vertex[key]['x'] += 0.3 * v[0]\r\n mesh.vertex[key]['y'] += 0.3 * v[1]\r\n mesh.vertex[key]['z'] += 0.3 * v[2]\r\n\r\n plotter = MeshPlotter(mesh, figsize=(10, 7))\r\n\r\n plotter.draw_vertices()\r\n plotter.draw_faces(text={fkey: fkey for fkey in mesh.faces()})\r\n\r\n plotter.show()\r\n\r\n \"\"\"\r\n face = []\r\n vertices = mesh.face_vertices(fkey)\r\n\r\n if not where:\r\n where = vertices\r\n\r\n for u, v in pairwise(vertices + vertices[0:1]):\r\n if u in where:\r\n x, y, z = mesh.vertex_coordinates(u)\r\n u = mesh.add_vertex(x=x, y=y, z=z)\r\n if u in where or v in where:\r\n mesh.halfedge[v][u] = None\r\n face.append(u)\r\n\r\n mesh.add_face(face, fkey=fkey)\r\n\r\n return face\r\n\r\ndef mesh_weld(mesh, precision = None, cls = None):\r\n \"\"\"Weld vertices of a mesh within some precision distance.\r\n\r\n Parameters\r\n ----------\r\n mesh : Mesh\r\n A mesh.\r\n\r\n precision: str\r\n Tolerance distance for welding.\r\n\r\n Returns\r\n -------\r\n mesh\r\n The welded mesh.\r\n\r\n \"\"\"\r\n\r\n if cls is None:\r\n cls = type(mesh)\r\n\r\n # create vertex map based on geometric keys in dictionary without duplicates\r\n vertex_map = {geometric_key(mesh.vertex_coordinates(vkey), precision): vkey for vkey in mesh.vertices()}\r\n # list vertices with coordinates\r\n vertices = [reverse_geometric_key(geom_key) for geom_key in vertex_map.keys()]\r\n # reorder vertex keys in vertex map\r\n vertex_map = {geom_key: i for i, geom_key in enumerate(vertex_map.keys())}\r\n # modify vertex indices in the faces\r\n faces = [ [vertex_map[geometric_key(mesh.vertex_coordinates(vkey), precision)] for vkey in mesh.face_vertices(fkey)] for fkey in mesh.faces()]\r\n\r\n return cls.from_vertices_and_faces(vertices, faces)\r\n\r\ndef meshes_join_and_weld(meshes, precision = None, cls = None):\r\n \"\"\"Join and and weld meshes within some precision distance.\r\n\r\n Parameters\r\n ----------\r\n meshes : list\r\n A list of meshes.\r\n\r\n precision: str\r\n Tolerance distance for welding.\r\n\r\n Returns\r\n -------\r\n mesh\r\n The joined and welded mesh.\r\n\r\n \"\"\"\r\n\r\n if cls is None:\r\n cls = type(meshes[0])\r\n\r\n # create vertex map based on geometric keys in dictionary without duplicates\r\n vertex_map = {geometric_key(mesh.vertex_coordinates(vkey), precision): vkey for mesh in meshes for vkey in mesh.vertices()}\r\n # list vertices with coordinates\r\n vertices = [reverse_geometric_key(geom_key) for geom_key in vertex_map.keys()]\r\n # reorder vertex keys in vertex map\r\n vertex_map = {geom_key: i for i, geom_key in enumerate(vertex_map.keys())}\r\n # modify vertex indices in the faces\r\n faces = [ [vertex_map[geometric_key(mesh.vertex_coordinates(vkey), precision)] for vkey in mesh.face_vertices(fkey)] for mesh in meshes for fkey in mesh.faces()]\r\n\r\n return cls.from_vertices_and_faces(vertices, faces)\r\n\r\ndef meshes_join(meshes, cls = None):\r\n \"\"\"Join meshes without welding.\r\n\r\n Parameters\r\n ----------\r\n meshes : list\r\n A list of meshes.\r\n\r\n Returns\r\n -------\r\n mesh\r\n The joined mesh.\r\n\r\n \"\"\"\r\n\r\n if cls is None:\r\n cls = type(meshes[0])\r\n\r\n vertices = []\r\n faces = []\r\n\r\n for mesh in meshes:\r\n # create vertex map based on geometric keys in dictionary with duplicates\r\n vertex_map = ({vkey: len(vertices) + i for i, vkey in enumerate(mesh.vertices())})\r\n # list vertices with coordinates\r\n vertices += [mesh.vertex_coordinates(vkey) for vkey in mesh.vertices()]\r\n # modify vertex indices in the faces\r\n faces += [ [vertex_map[vkey] for vkey in mesh.face_vertices(fkey)] for fkey in mesh.faces()]\r\n\r\n return cls.from_vertices_and_faces(vertices, faces)\r\n\r\n# ==============================================================================\r\n# Main\r\n# ==============================================================================\r\n\r\nif __name__ == \"__main__\":\r\n\r\n import compas\r\n\r\n from compas.datastructures import Mesh\r\n from compas.plotters import MeshPlotter\r\n from compas.geometry import subtract_vectors\r\n\r\n mesh = Mesh.from_obj(compas.get('faces.obj'))\r\n\r\n vertices = set(mesh.vertices())\r\n\r\n fkey = 12\r\n where = mesh.face_vertices(fkey)[0:2]\r\n centroid = mesh.face_centroid(fkey)\r\n\r\n face = mesh.unweld_vertices(fkey, where)\r\n\r\n for key in face:\r\n if key in vertices:\r\n continue\r\n xyz = mesh.vertex_coordinates(key)\r\n v = subtract_vectors(centroid, xyz)\r\n mesh.vertex[key]['x'] += 0.3 * v[0]\r\n mesh.vertex[key]['y'] += 0.3 * v[1]\r\n mesh.vertex[key]['z'] += 0.3 * v[2]\r\n\r\n plotter = MeshPlotter(mesh, figsize=(10, 7))\r\n\r\n plotter.draw_vertices()\r\n plotter.draw_faces(text={fkey: fkey for fkey in mesh.faces()})\r\n\r\n plotter.show()","sub_path":"src/compas/datastructures/mesh/operations/weld.py","file_name":"weld.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"392342376","text":"# -*- coding: utf-8 -*-\nimport math\nn=int(input('digite n: '))\nnum=1\nden=n\nS=num/den\nfor i in range(1,n-1,1):\n num=num+i\n den=den-i\n S=S+(num/den)\nprint(S)","sub_path":"moodledata/vpl_data/92/usersdata/203/37711/submittedfiles/atividade.py","file_name":"atividade.py","file_ext":"py","file_size_in_byte":164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616469274","text":"#!/usr/bin/env python\nimport os\nimport sys\n\nif __name__ == \"__main__\":\n if \"--all\" in sys.argv:\n u_dir = \"unixbench-5.1.2\"\n if not os.path.isdir(u_dir):\n u = \"https://byte-unixbench.googlecode.com/files/unixbench-5.1.2.tar.gz\"\n cmd = \"wget %s && tar xzf %s.tar.gz && \" + \\\n \"patch -p0 -i %s.patch && cd %s && make all\"\n os.system(cmd % (u, u_dir, u_dir, u_dir))\n\n g_dir = \"GChartWrapper\"\n if not os.path.isdir(g_dir):\n u = \"http://google-chartwrapper.googlecode.com/svn/trunk/%s\" % g_dir\n os.system(\"svn co %s\" % u)\n","sub_path":"bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"157310245","text":"from collections import OrderedDict\nfrom enum import Enum\nfrom typing import Tuple\n\nimport torch\nfrom torch.nn import Sequential, Linear, Softmax, ReLU\n\nfrom .general import Endianness, DataTypesSize, int_to_bytes, string_to_bytes, list_to_bytes, to_int, to_string, to_list_int, to_list_float\n\n\ndef state_dict_to_bytes(state_dict: OrderedDict, endianness: Endianness = Endianness.Native):\n result = int_to_bytes(len(state_dict), endianness)\n for key in state_dict:\n result += string_to_bytes(key, endianness)\n result += tensor_to_bytes(state_dict[key], endianness)\n return result\n\n\ndef to_state_dict(value: bytes, start_index: int = 0, endianness: Endianness = Endianness.Native) -> Tuple[OrderedDict, int]:\n entries_length = to_int(value, start_index, endianness)\n total_read = DataTypesSize.Int\n\n d = OrderedDict()\n for i in range(entries_length):\n key, read_bytes = to_string(value, start_index + total_read, endianness)\n total_read += read_bytes\n tensor, read_bytes = to_tensor_float(value, start_index + total_read, endianness)\n total_read += read_bytes\n d[key] = tensor\n\n return d, total_read\n\n\ndef tensor_to_bytes(tensor_value: torch.Tensor, endianness: Endianness = Endianness.Native) -> bytes:\n return list_to_bytes(list(tensor_value.shape), endianness) + list_to_bytes(tensor_value.flatten().tolist(), endianness)\n\n\ndef to_tensor_float(value: bytes, start_index: int = 0, endianness: Endianness = Endianness.Native) -> Tuple[torch.Tensor, int]:\n shape, bytes_read = to_list_int(value, start_index, endianness)\n total_read = bytes_read\n\n items, bytes_read = to_list_float(value, start_index + total_read, endianness)\n total_read += bytes_read\n\n return torch.tensor(items).reshape(shape), total_read\n\n\ndef to_tensor_int(value: bytes, start_index: int = 0, endianness: Endianness = Endianness.Native) -> Tuple[torch.Tensor, int]:\n shape, bytes_read = to_list_int(value, start_index, endianness)\n total_read = bytes_read\n\n items, bytes_read = to_list_int(value, start_index + total_read, endianness)\n total_read += bytes_read\n\n return torch.tensor(items).reshape(shape), total_read\n\n\nclass LayerName(int, Enum):\n Linear = 0\n ReLU = 1\n Softmax = 2\n Sequential = 3\n\n\ndef layout_to_bytes(model: Sequential):\n if not isinstance(model, Sequential):\n raise RuntimeError(\"Works only with Sequential models\")\n\n result = int_to_bytes(len(model))\n\n for child in model.children():\n if isinstance(child, Linear):\n result += int_to_bytes(LayerName.Linear)\n result += int_to_bytes(child.in_features)\n result += int_to_bytes(child.out_features)\n elif isinstance(child, Softmax):\n result += int_to_bytes(LayerName.Softmax)\n elif isinstance(child, ReLU):\n result += int_to_bytes(LayerName.ReLU)\n else:\n raise RuntimeError(f\"Layer is not supported yet: {type(child)}\")\n\n return result\n","sub_path":"python/src/serialization/torch_serialization.py","file_name":"torch_serialization.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"574160352","text":"import tensorflow as tf\nfrom tqdm import tqdm\nimport pandas as pd\nimport pickle\nimport numpy as np\nimport spacy\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Embedding, LSTM\nfrom tensorflow.keras.layers import Dense\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nimport string\n\nprint(\"OKAY LETS STAAAART!!!\")\n\ndef encodeY(Y):\n '''create one-hot (dummies) for output, see also https://machinelearningmastery.com/multi-class-classification-tutorial-keras-deep-learning-library/\n encode class values as integers\n '''\n encoder = LabelEncoder()\n encoder.fit(Y)\n encoded_Y = encoder.transform(Y)\n dummy_y = tf.keras.utils.to_categorical(encoded_Y)\n return dummy_y\n\n\nAEM = '../tmpanne/fullsample/w2v_model_nr_7_window_10_size_300_negsample_15.txt'\nPATH_TO_DATA = \"../tmpanne/AEM_data/dataset_vermeer.pkl\"\n\ndf = pd.read_pickle(PATH_TO_DATA)\n\nX_train, X_test, y_train, y_test = train_test_split([t.translate(str.maketrans('', '', string.punctuation)) for t in df['text']], encodeY(df['topic'].map(int)), test_size = 0.2)\n\n# TOKENIZATION + PAD\n\n# create the tokenizer\ntokenizer = Tokenizer()\n# fit the tokenizer on the documents\ntokenizer.fit_on_texts(X_train)\n\n# sequence encode\nencoded_docs = tokenizer.texts_to_sequences(X_train)\n# pad sequences\nmax_length = max([len(s.split()) for s in X_train])\nXtrain = pad_sequences(encoded_docs, maxlen=max_length, padding='post')\n# HIER OVER NADENKEN\nencoded_docs = tokenizer.texts_to_sequences(X_test)\n# pad sequences\nXtest = pad_sequences(encoded_docs, maxlen=max_length, padding='post')\n\nembeddings_index = {}\nwith open(AEM) as f:\n numberofwordvectors, dimensions = [int(e) for e in next(f).split()]\n for line in tqdm(f):\n values = line.split()\n embeddings_index[values[0]] = np.asarray(values[1:], dtype='float32')\n\n # word = values[0]\n # coefs = np.asarray(values[1:], dtype='float32')\n # embeddings_index[word] = coefs\n\nprint('Found %s word vectors.' % len(embeddings_index))\nprint('Should be {} vectors with {} dimensions'.format(numberofwordvectors, dimensions))\n\n\n# create a weight matrix for the Embedding layer from a loaded embedding\n\ndef get_weight_matrix(embedding, vocab):\n words_not_found = 0\n total_words = 0\n DEBUG_lijstmetwoorden = []\n # total vocabulary size plus 0 for unknown words\n vocab_size = len(vocab) + 1\n # define weight matrix dimensions with all 0\n weight_matrix = np.zeros((vocab_size, 300))\n # step vocab, store vectors using the Tokenizer's integer mapping\n for word, i in tqdm(vocab.items()):\n e = embedding.get(word, None)\n if e is not None: # if we do not find the word, we do not want to replace anything but leave the zero's\n weight_matrix[i] = e\n total_words+=1\n else:\n words_not_found+=1\n DEBUG_lijstmetwoorden.append(word)\n print('Weight matrix created. For {} out of {} words, we did not have any embedding.'.format(words_not_found, total_words))\n return DEBUG_lijstmetwoorden, weight_matrix\n\n\nmissingwords, embedding_vectors = get_weight_matrix(embeddings_index, tokenizer.word_index)\n\nembedding_layer = Embedding(len(tokenizer.word_index)+1, 300, weights=[embedding_vectors], input_length=max_length, trainable=False)\nprint(\"created embedding layer\")\n\n\n\n# alternatief model\nnumberoflabels = 4\nmodel = Sequential()\nmodel.add(embedding_layer)\nmodel.add(Conv1D(128, 4, activation='relu'))\nmodel.add(MaxPooling1D(4))\nmodel.add(MaxPooling1D(4))\nmodel.add(Flatten())\nmodel.add(Dense(units=64, activation='relu'))\nmodel.add(Dense(units=numberoflabels, activation='softmax')) # voor twee categorien sigmoid, voor 1 tanh\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\n\n\nVALIDATION_SIZE=1000\n\nmodel.fit(Xtrain[:-VALIDATION_SIZE], y_train[:-VALIDATION_SIZE],\n epochs=30, verbose=True,\n validation_data=(Xtrain[-VALIDATION_SIZE:], y_train[-VALIDATION_SIZE:]))\n\nloss, acc = model.evaluate(Xtest, y_test, verbose=True)\nprint('Test Accuracy: %f' % (acc*100))\n","sub_path":"deeplearning.py","file_name":"deeplearning.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"648788260","text":"from flask_restful import Resource, reqparse\nfrom models.Hotel import HotelModel\nfrom flask_jwt_extended import jwt_required\n\n\nclass Hoteis(Resource):\n def get(self):\n return {'hoteis': [hotel.json() for hotel in HotelModel.query.all()]}\n\nclass Hotel(Resource):\n argumentos = reqparse.RequestParser()\n argumentos.add_argument('nome', type = str, required = True, help = 'The field \"nome\" cannot be blank.')\n argumentos.add_argument('estrelas', type = int, required = True, help = 'The field \"estrelas\" cannot be blank.')\n argumentos.add_argument('diaria', type = float, required = True, help = 'The field \"diaria\" cannot be blank.')\n argumentos.add_argument('cidade', type = str, required = True, help = 'The field \"cidade\" cannot be blank.')\n\n def findHotel(id):\n for hotel in hoteis:\n if hotel['id'] == id:\n return hotel\n return None\n\n def get(self, id):\n hotel = HotelModel.findHotel(id)\n if hotel:\n return hotel.json()\n \n return {'message': 'Hotel não encontrado!'}, 404 #Not Found.\n\n @jwt_required\n def post(self, id):\n if HotelModel.findHotel(id):\n return {\"message\": \"ID '{}', ja existe.\".format(id)}, 400 #Not Found.\n\n dados = Hotel.argumentos.parse_args()\n hotel = HotelModel(id, **dados)\n try:\n hotel.saveHotel()\n except:\n return {'message': 'Ocorreu um erro interno ao tentar gravar o registro.'}, 500\n\n return hotel.json()\n\n @jwt_required\n def put(self, id):\n dados = Hotel.argumentos.parse_args()\n hotelFind = HotelModel.findHotel(id)\n if hotelFind:\n hotelFind.updateHotel(**dados) #Alterando hotel\n hotelFind.saveHotel()\n return hotelFind.json(), 200 #Ok\n hotel = HotelModel(id, **dados)\n try:\n hotel.saveHotel()\n except:\n return {'message': 'Ocorreu um erro interno ao tentar gravar o registro.'}, 500 \n return hotel.json(), 201\n\n @jwt_required\n def delete(self, id):\n hotel = HotelModel.findHotel(id)\n if hotel:\n try:\n hotel.deleteHotel()\n except:\n return {'message': 'Ocorreu um erro interno ao tentar deletar o registro.'}, 500\n return {'message': 'Hotel deletado!'}\n return {'message': 'Hotel não encontrado'}, 404 \n","sub_path":"resources/hotel.py","file_name":"hotel.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166993894","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.optimizers import SGD , Adam, RMSprop\nfrom tensorflow.keras.layers import Concatenate, Dense, LSTM, Input, concatenate\nfrom tensorflow.keras.layers import LeakyReLU \nfrom matplotlib import pyplot\nimport matplotlib.pyplot as plt\n\nimport pickle \nimport copy\nimport os\nimport gym\nfrom feedback import *\nimport time\nfrom collections import deque\nimport random\nfrom rl_dqn import DeepQNetwork\nfrom kinematic_model import Kinematic_Model\n\ntrial_no = '1'\nENV = \"CartPole-v0\"\nenv = gym.make(ENV)\n\nstate_dim = env.observation_space.shape[0]\naction_dim = env.action_space.n\n\n########################################################\n# RL Agent for Oracle\nweights_file = 'rldqn_cartpole.h5'\noracle = DeepQNetwork(state_dim, action_dim, 0.001, 0.95, 1, 0.001, 0.995 )\noracle.load_weights(weights_file)\n'''\nTEST_Episodes=10\nrewards = [] #Store rewards for graphing\n#Test the agent that was trained\n# In this section we ALWAYS use exploit don't train any more\nfor e_test in range(TEST_Episodes):\n state,done = env.reset(), False\n state = np.reshape(state, [1, state_dim])\n tot_rewards = 0\n while not done:\n env.render()\n action = oracle.test_action(state)\n KM_nstate = Kinematic_Model(state[0],action)\n nstate, reward, done, _ = env.step(action)\n nstate = np.reshape( nstate, [1, state_dim])\n print(\"KM_NS : {}\\t \\t true_NS: {} Diff {}\".format(KM_nstate, nstate[0],KM_nstate-nstate[0]))\n\n tot_rewards += reward\n #DON'T STORE ANYTHING DURING TESTING\n state = nstate\n #done: CartPole fell. \n #t_test == 209: CartPole stayed upright\n rewards.append(tot_rewards)\n print(\"episode: {}/{}, score: {}, e: {}\".format(e_test, TEST_Episodes, tot_rewards, 0))\n\nenv.close()\n\n'''\n\n\n########################################################\npause = 0 \nwhile pause == 1:\n pause = 1\n\n# This model maps an input to its next state\n# Input state\nAE_state = keras.Input(shape=(state_dim,),name=\"AE_state\")\n# 2layer neural network to predict the next state\nencoded = Dense(32,name=\"dense1_NS\")(AE_state)\nencoded = LeakyReLU(alpha=0.2,name=\"LeakyRelu1_NS\")(encoded)\nencoded = Dense(32,name=\"dense2_NS\")(encoded)\nencoded = LeakyReLU(alpha=0.2,name=\"LeakyRelu2_NS\")(encoded)\nn_state = layers.Dense(state_dim,name=\"dense3_NS\")(encoded)\nAE = keras.Model(inputs=AE_state, outputs=n_state,name=\"AE\")\n\n#print(AE.summary())\n#tf.keras.utils.plot_model(AE, to_file='AE_model_plot.png', show_shapes=True, show_layer_names=True)\n\nopt_AE = tf.keras.optimizers.RMSprop(learning_rate=0.00015)\nAE.compile(loss='mean_squared_error', optimizer=opt_AE, metrics=['mse'])\n\n\n\n\n\n# This model maps an input & action to its next state\n# Input state\ncurr_state = keras.Input(shape=(state_dim,),name=\"curr_state\")\ncurr_action = keras.Input(shape=(action_dim,),name=\"curr_action\")\n# FDM model\ncurr_state_action = concatenate([curr_state, curr_action])\nfdm_h1 = Dense(16,name=\"dense1_FDM\")(curr_state_action)\nfdm_h1 = LeakyReLU(alpha=0.2,name=\"LeakyRelu1_FDM\")(fdm_h1)\nfdm_h2 = Dense(16,name=\"dense2_FDM\")(fdm_h1)\nfdm_h2 = LeakyReLU(alpha=0.2,name=\"LeakyRelu2_FDM\")(fdm_h2)\nfdm_pred_state = layers.Dense(state_dim,name=\"dense3_FDM\")(fdm_h2)\nFDM = keras.Model(inputs=[curr_state,curr_action], outputs=fdm_pred_state,name=\"FDM\")\n\n#print(FDM.summary())\n#tf.keras.utils.plot_model(FDM, to_file='FDM_model_plot.png', show_shapes=True, show_layer_names=True)\n\nopt_FDM = tf.keras.optimizers.RMSprop(learning_rate=0.00015)\nFDM.compile(loss='mean_squared_error', optimizer=opt_FDM, metrics=['mse'])\n\npause = 0 \nwhile pause == 1:\n pause = 1\n\np_state = np.zeros((1,state_dim))\n# Action for FDM to sample from\nleft = np.zeros((1,action_dim))\nleft[0][0] = 1\nright = np.zeros((1,action_dim))\nright[0][1] = 1\n\nverbose = False\nimpove_FDM = True\n#Buffer for FMD online improvement\nFDM_buff_s =[]\nFDM_buff_a =[]\nFDM_buff_ns =[]\nFDM_loss = 1000.0\n\nAE_buff_s = []\nAE_buff_ns = []\nAE_loss = 1000.0\n\nfeedback_dict = {\n H_NULL: 0,\n H_UP: 0,\n H_DOWN: 0,\n H_LEFT: 1,\n H_RIGHT: 1,\n H_HOLD: 0,\n DO_NOTHING: 0\n }\n\n\n# set which game to play\nenv = gym.make('CartPole-v0')\nenv.reset()\nenv.render() # Make the environment visible\n\n# Initialise Human feedback (call render before this)\nhuman_feedback = Feedback(env)\n\nnum_steps = 3000\nsteps = 0\nEpisode = 1\ntotal_reward = []\nfeedback_rate = []\n\nstate = env.reset()\n#state = np.reshape(state, [-1, state_dim])\n\nprev_s = state\na = np.random.uniform(-1,1,action_dim)\n\nprint(\"3\")\ntime.sleep(1)\nprint(\"2\")\ntime.sleep(1)\nprint(\"1\")\ntime.sleep(1)\n\n\n\nwhile Episode < 50:\n obs, terminal = env.reset(), False\n prev_state = obs\n print(\"Episode# \",Episode)\n \n episode_rew = 0\n h_counter = 0\n t_counter = 0\n\n # Iterate over the episode\n while((not terminal) and (not human_feedback.ask_for_done()) ):\n \n env.render() # Make the environment visible\n time.sleep(0.1)\n \n # Get feedback signal\n h_fb = human_feedback.get_h()\n if h_fb == 1:\n print(\"Oracle\")\n oracle_action = oracle.test_action(np.reshape(obs, [1, state_dim]))\n h_fb = oracle_action + 3\n \n \n if (feedback_dict.get(h_fb) != 0): # if feedback is not zero i.e. is valid\n # Update policy\n #oracle_action = oracle.test_action(np.reshape(obs, [1, state_dim]))\n #h_fb = oracle_action + 3\n #print(\"Feedback\", h_fb)\n\n h_counter += 1\n\n # Get new state transition label using feedback\n state_corrected = copy.deepcopy(obs)\n if (h_fb == H_LEFT): # PUSH CART TO LEFT\n print(\"Feedback left\\t\",end =\" \")\n state_corrected = Kinematic_Model(state,0)\n #state_corrected[0] -= 0.01 # correction in pos\n #state_corrected[1] -= 0.2 # correction in vel\n #state_corrected[2] += 0.01 # correction in angle\n #state_corrected[3] += 0.27 # correction in anglar vel\n elif (h_fb == H_RIGHT):# PUSH CART TO RIGHT\n print(\"Feedback right\\t\",end =\" \")\n state_corrected = Kinematic_Model(state,1)\n #state_corrected[0] += 0.01 # correction in pos\n #state_corrected[1] += 0.2 # correction in vel\n #state_corrected[2] -= 0.01 # correction in angle\n #state_corrected[3] -= 0.27 # correction in anglar vel\n \n # Add state transition pair to demo buffer\n AE_buff_s.append(obs)\n AE_buff_ns.append(state_corrected)\n\n #print(\"State \",obs) \n #print(\"state_corrected \",state_corrected)\n pred_ns = np.reshape(state_corrected, [-1, state_dim])\n\n '''\n # Update policy (immediate)\n temp_s = np.array(p_state)\n temp_ns = np.array(state_corrected)\n history_AE = AE.fit(x=temp_s, y=temp_ns, verbose=1) \n\n\n # Train with batch from Demo buffer (if enough entries exist)\n num = len(AE_buff_s)\n print(\"num\",num)\n if(num >= 64): # batch size 64\n print(\"Training AE\")\n temp_s = np.array(AE_buff_s)\n temp_ns = np.array(AE_buff_ns)\n # do you need to run for 5 epochs\n history_AE = AE.fit(x=temp_s, y=temp_ns,batch_size=64,shuffle=True, verbose=1)\n '''\n\n else:\n # Use current policy\n print(\"Policy \\t\\t\",end =\" \")\n p_state = np.reshape(obs, [-1, state_dim])\n pred_ns = AE.predict(p_state)\n\n # Get action from ifdm\n FDM_ns_l = np.squeeze(FDM.predict([p_state,left]))\n FDM_ns_r = np.squeeze(FDM.predict([p_state,right]))\n FDM_ns_both = np.array([FDM_ns_l,FDM_ns_r])\n state_diff = np.abs(FDM_ns_both-pred_ns)\n cost = np.sum(state_diff,axis=1)\n action_from_IDM = np.argmin(cost, axis=0)\n action = action_from_IDM\n\n prev_state = obs\n obs, reward, terminal, _ = env.step(action)\n episode_rew += reward\n\n # Add state transition pair to demo buffer\n FDM_buff_s.append(prev_state)\n FDM_buff_a.append(action)\n FDM_buff_ns.append(obs)\n\n if verbose ==True:\n print(\"Curr state \",prev_state)\n print(\"True next state \",obs)\n print(\"AE pred Nstate \",pred_ns)\n print(\"FDM both next state \",FDM_ns_both)\n print(\"cost \",cost)\n print(\"partial cost \",par_cost)\n print(\"action from IDM full cost \",action_from_IDM)\n if action == 0:\n print(\"FDM left\") #0 Push cart to the left\n else:\n print(\"FDM right\") #1 Push cart to the right\n print(\"\")\n \n steps += 1\n t_counter+=1\n\n \n \n feedback_rate.append(h_counter/t_counter)\n total_reward.append(episode_rew)\n\n #print('Episode #%d Reward %d' % (Episode, episode_rew))\n print(\"## episode: {}, Reward: {}, h_counter: {}, t_counter: {}, feedback_rate: {} \".format(Episode, episode_rew,h_counter,t_counter,h_counter/t_counter))\n Episode +=1\n\n #Train Next State predictor\n # Train with batch from Demo buffer (if enough entries exist)\n num = len(AE_buff_s)\n if(num >= 64) and AE_loss > 0.0001: # batch size 64\n print(\"Training AE\")\n # do you need to run for 5 epochs\n history_AE = AE.fit(x=np.array(AE_buff_s), y=np.array(AE_buff_ns),batch_size=64,epochs=5,shuffle=True, verbose=False)\n AE_loss = history_AE.history['loss'][-1]\n print(\"AE loss\",AE_loss)\n\n\n if FDM_loss > 0.0001:\n print(\"Training FDM\")\n #Train FDM every episode,\n temp_s = np.array(FDM_buff_s)\n temp_a = np.zeros((len(FDM_buff_a),action_dim))\n for i in range(len(FDM_buff_a)):\n temp_a[i][FDM_buff_a[i]] =1\n history_FDM=FDM.fit(x=[temp_s,temp_a], y=np.array(FDM_buff_ns),epochs=5,batch_size=32, shuffle=True,verbose=False)\n FDM_loss = history_FDM.history['loss'][-1]\n print(\"FDM loss\",FDM_loss)\n\nfor i in range(Episode):\n print(\"episode: {}, Reward: {}\".format(i, total_reward[i-1]))\n\ntotal_reward =np.array(total_reward)\nrolling_average = np.convolve(total_reward, np.ones(100)/100)\nfeedback_rate = np.array(feedback_rate)\n\nplt.plot(total_reward)\nplt.plot(rolling_average, color='black')\nplt.plot(feedback_rate, color='green')\nplt.axhline(y=195, color='r', linestyle='-') #Solved Line\nplt.xlim( (0,Episode) )\nplt.ylim( (0,220) )\nplt.show()\n\n\nprint(\"Saving Reward\")\nfilename = 'log_files/'+trial_no+'/total_reward.npy'\npickle.dump(total_reward, open(filename, 'wb'))\nfilename = 'log_files/'+trial_no+'/feedback_rate.npy'\npickle.dump(feedback_rate, open(filename, 'wb'))\n\nAE.save('log_files/'+trial_no+'/AE')\nFDM.save('log_files/'+trial_no+'/FDM')\n\n'''\n\nelse: \n # save state, action, nstate\n filename = 'Data/State.npy'\n pickle.dump(s, open(filename, 'wb'))\n filename = 'Data/Action.npy'\n pickle.dump(a, open(filename, 'wb'))\n filename = 'Data/NState.npy'\n pickle.dump(ns, open(filename, 'wb'))\n filename = 'Data/Diff.npy'\n pickle.dump(d,open(filename,'wb'))\n'''","sub_path":"Cartpole/live_improve_v8a.py","file_name":"live_improve_v8a.py","file_ext":"py","file_size_in_byte":11460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577774077","text":"from apps.eur_basketball_spider.eur_playbyplay import *\nimport queue\nimport traceback\nfrom apps.send_error_msg import dingding_alter\nfrom apps.eur_basketball_spider.tools import *\nfrom orm_connection.eur_basketball import *\nfrom orm_connection.orm_session import MysqlSvr\nfrom common.libs.log import LogMgr\n\nlogger = LogMgr.get('eur_basketball_boxscore_live')\n\n\nclass EurLeagueSpider_boxscore(object):\n def __init__(self):\n self.data_queue_svr = queue.Queue()\n self.headers = {\n 'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36',\n }\n self.get_team_id = get_team_id_box()\n self.get_player_id = get_player_id_key()\n self.get_player_shirt_number = get_player_shirt_number()\n\n def start_requests(self, data_queue, gamecode):\n while True:\n time.sleep(10)\n try:\n seasoncode = 2019\n url = 'https://live.euroleague.net/api/Boxscore?gamecode=%s&seasoncode=E%s&disp=' % (\n gamecode, seasoncode)\n logger.info(url)\n boxscore_api_res = requests.get(url, headers=self.headers)\n if boxscore_api_res.text == '':\n logger.info('box比赛未开赛。。。 %s' % str(gamecode))\n else:\n boxscore_json_dict = json.loads(boxscore_api_res.text)\n boxscore_player = boxscore_json_dict['Stats']\n team_stats_list = []\n player_stats_list = []\n belong = 1\n for index in boxscore_player:\n BkMatchTeamStats = {}\n BkMatchTeamStats['belong'] = belong\n name_en = index['Team']\n BkMatchTeamStats['team_id'] = self.get_team_id[name_en.lower()]\n BkMatchTeamStats['team_name'] = name_en\n BkMatchTeamStats['three_point_goals'] = safe_get(index,'totr.FieldGoalsMade3')\n BkMatchTeamStats['three_point_field'] = safe_get(index,'totr.three_point_field')\n BkMatchTeamStats['goals'] = safe_get(index,'totr.FieldGoalsMade2') + BkMatchTeamStats['three_point_goals']\n BkMatchTeamStats['field'] = safe_get(index,'totr.FieldGoalsAttempted2') + BkMatchTeamStats['three_point_field']\n BkMatchTeamStats['free_throw_goals'] = safe_get(index,'totr.FreeThrowsMade')\n BkMatchTeamStats['free_throw_field'] = safe_get(index,'totr.FreeThrowsAttempted')\n BkMatchTeamStats['offensive_rebounds'] = safe_get(index,'totr.OffensiveRebounds')\n BkMatchTeamStats['defensive_rebounds'] = safe_get(index,'totr.DefensiveRebounds')\n BkMatchTeamStats['rebounds'] = safe_get(index,'totr.TotalRebounds')\n BkMatchTeamStats['assists'] = safe_get(index,'totr.Assistances')\n BkMatchTeamStats['steals'] = safe_get(index,'totr.Steals')\n BkMatchTeamStats['blocks'] = safe_get(index,'totr.BlocksFavour')\n BkMatchTeamStats['turnovers'] = safe_get(index,'totr.Turnovers')\n BkMatchTeamStats['personal_fouls'] = safe_get(index,'totr.FoulsCommited')\n BkMatchTeamStats['point'] = safe_get(index,'totr.Points')\n BkMatchTeamStats['score_difference'] = safe_get(index,'totr.Valuation')\n team_stats_list.append(BkMatchTeamStats)\n belong += 1\n for i in range(2):\n for home_player in boxscore_json_dict['Stats'][i]['PlayersStats']:\n player = {}\n player['belong'] = i + 1\n player['player_name'] = translate_player_name(home_player['Player'])\n player_key = home_player['Player_ID'][1:]\n id = safe_get(self.get_player_id,player_key.strip())\n if id == 0:\n url = 'https://www.euroleague.net/competition/players/showplayer?pcode=%s&seasoncode=E2019' % player_key\n player_res = requests.get(url, headers=self.headers)\n player_tree = tree_parse(player_res)\n player['sport_id'] = 2\n try:\n player['name_en'] = player_tree.xpath('//div[@class=\"name\"]/text()')[0]\n except:\n player['name_en'] = ''\n player['key'] = player_key\n try:\n player['logo'] = player_tree.xpath('//div[@class=\"player_img-img\"]/img/@src')[0]\n except:\n player['logo'] = ''\n logger.info('没有该球员的图片...')\n try:\n player['shirt_number'] = player_tree.xpath('//span[@class=\"dorsal\"]/text()')[0]\n except:\n player['shirt_number'] = 0\n try:\n position = \\\n player_tree.xpath(\n '//div[@class=\"summary-first\"]/span[last()]/span[last()]/text()')[\n 0]\n player['position'] = position.encode('utf-8').decode('utf-8')[0]\n except:\n player['position'] = ''\n if 'Height' in \\\n player_tree.xpath('//div[@class=\"summary-second\"]/span[1]/text()')[0].split(\n ':')[0]:\n player['height'] = float(\n player_tree.xpath('//div[@class=\"summary-second\"]/span[1]/text()')[0].split(\n ':')[-1]) * 100\n time_birthday = player_tree.xpath('//div[@class=\"summary-second\"]/span[2]/text()')[\n 0]\n player['birthday'], player['age'] = time_stamp(time_birthday)\n player['nationality'] = \\\n player_tree.xpath('//div[@class=\"summary-second\"]/span[last()]/text()')[\n 0].split(\n ':')[-1]\n else:\n player['height'] = 0\n time_birthday = player_tree.xpath('//div[@class=\"summary-second\"]/span[1]/text()')[\n 0]\n player['birthday'], player['age'] = time_stamp(time_birthday)\n player['nationality'] = \\\n player_tree.xpath('//div[@class=\"summary-second\"]/span[last()]/text()')[\n 0].split(':')[-1]\n try:\n player['name_zh'] = translate_dict[player['name_en']]\n except:\n player['name_zh'] = ''\n data = {\n 'key': player['key'],\n 'name_en': player['name_en'],\n 'name_zh': player['name_zh'],\n 'sport_id': player['sport_id'],\n 'age': player['age'],\n 'birthday': player['birthday'],\n 'nationality': player['nationality'],\n 'height': player['height'],\n 'shirt_number': player['shirt_number'],\n 'position': player['position'],\n }\n spx_dev_session = MysqlSvr.get('spider_zl')\n BleagueEurBasketballPlayer.upsert(\n spx_dev_session,\n 'key',\n data\n )\n player['player_id'] = self.get_player_id[player_key.strip()]\n player['shirt_number'] = self.get_player_shirt_number[player_key.strip()]\n times = home_player['Minutes']\n if times != 'DNP':\n player['enter_ground'] = 1\n minutes = times.split(':')[0]\n seconds = times.split(':')[-1]\n if int(seconds) >= 30:\n player['minutes'] = int(minutes) + 1\n else:\n player['minutes'] = int(minutes)\n else:\n player['enter_ground'] = 0\n player['minutes'] = 0\n player['two_points_goals'] = home_player['FieldGoalsMade2']\n player['two_points_goals'] = safe_get(home_player,'FieldGoalsMade2')\n player['two_points_total'] = safe_get(home_player,'FieldGoalsAttempted2')\n player['three_point_goals'] = safe_get(home_player,'FieldGoalsMade3')\n player['three_point_field'] = safe_get(home_player,'FieldGoalsAttempted3')\n player['goals'] = int(player['two_points_goals']) + int(player['three_point_goals'])\n player['field'] = int(player['two_points_total']) + int(player['three_point_field'])\n player['free_throw_goals'] = safe_get(home_player,'FreeThrowsMade')\n player['free_throw_field'] = safe_get(home_player,'FreeThrowsAttempted')\n player['offensive_rebounds'] = safe_get(home_player,'OffensiveRebounds')\n player['defensive_rebounds'] = safe_get(home_player,'DefensiveRebounds')\n player['rebounds'] = safe_get(home_player,'TotalRebounds')\n player['assists'] = safe_get(home_player,'Assistances')\n player['steals'] = safe_get(home_player,'Steals')\n player['blocks'] = safe_get(home_player,'BlocksFavour')\n player['turnovers'] = safe_get(home_player,'Turnovers')\n player['personal_fouls'] = safe_get(home_player,'FoulsCommited')\n player['score_difference'] = safe_get(home_player,'Valuation')\n player['point'] = safe_get(home_player,'Points')\n player['first_publish'] = safe_get(home_player,'IsStarter')\n first = safe_get(home_player,'IsPlaying')\n if first == 1:\n player['on_ground'] = 0\n elif first == 0:\n player['on_ground'] = 1\n else:\n player['on_ground'] = 0\n player_data = {\n 'belong': int(player['belong']),\n 'player_id': int(player['player_id']),\n 'player_name': player['player_name'],\n 'minutes': int(player['minutes']),\n 'goals': int(player['goals']),\n 'field': int(player['field']),\n 'three_point_goals': int(player['three_point_goals']),\n 'three_point_field': int(player['three_point_field']),\n 'free_throw_goals': int(player['free_throw_goals']),\n 'free_throw_field': int(player['free_throw_field']),\n 'offensive_rebounds': int(player['offensive_rebounds']),\n 'defensive_rebounds': int(player['defensive_rebounds']),\n 'rebounds': int(player['rebounds']),\n 'assists': int(player['assists']),\n 'steals': int(player['steals']),\n 'blocks': int(player['blocks']),\n 'turnovers': int(player['turnovers']),\n 'personal_fouls': int(player['personal_fouls']),\n 'score_difference': int(player['score_difference']),\n 'point': int(player['point']),\n 'first_publish': int(player['first_publish']),\n 'enter_ground': int(player['enter_ground']),\n 'on_ground': int(player['on_ground']),\n 'shirt_number': int(player['shirt_number']),\n }\n player_stats_list.append(player_data)\n match_id = int(str(13) + '0000') + int(gamecode)\n match_data_boxscore = {'match': {'id': int(match_id),\n 'basketball_items': {\n 'player_stat': {\n 'items': player_stats_list},\n 'team_stat': {'items': team_stats_list}\n }}}\n if player_stats_list:\n data_queue.put(match_data_boxscore)\n logger.info('球员技术统计推送完成。。。 %s' % str(gamecode))\n minutes_team = boxscore_json_dict['Stats'][0]['totr']['Minutes']\n if minutes_team == '225:00' or minutes_team == '200:00':\n break\n except:\n dingding_alter(traceback.format_exc())\n logger.error(traceback.format_exc())\n continue\n","sub_path":"apps/eur_basketball_spider/eur_boxscore.py","file_name":"eur_boxscore.py","file_ext":"py","file_size_in_byte":14940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"91162491","text":"import numpy as np\n\ndef getTestFold(data, idx):\n ends = [1273, 2225, 2501, 3482, 3888, 4387, 5343, 6051, 6606, 7702, 8215, 9527, 10068, 11265, 11346]\n\n start = 0\n if (idx > 0):\n start = ends[idx - 1]\n end = ends[idx]\n\n return data[start:end]\n\n\ndef getTrainFold(data, idx):\n ends = [1273, 2225, 2501, 3482, 3888, 4387, 5343, 6051, 6606, 7702, 8215, 9527, 10068, 11265, 11346]\n\n start = 0\n if (idx > 0):\n start = ends[idx - 1]\n end = ends[idx]\n\n take = data[:]\n take = np.delete(take, list(range(start, end)), axis=0)\n\n return take\n\n\ndef getFold(data, idx):\n ends = [1273, 2225, 2501, 3482, 3888, 4387, 5343, 6051, 6606, 7702, 8215, 9527, 10068, 11265, 11346]\n\n start = 0\n if (idx > 0):\n start = ends[idx - 1]\n end = ends[idx]\n\n return data[start:end]\n\n\ndef getFolds(data, foldsIndices):\n take = getFold(data, foldsIndices[0])\n\n for i in range(1, len(foldsIndices)):\n idx = foldsIndices[i]\n take = np.concatenate((take, getFold(data, idx)), axis=0)\n\n return take\n\n\ndef getSubTrainFold(data, idx):\n ends = [1273, 2225, 2501, 3482, 3888, 4387, 5343, 6051, 6606, 7702, 8215, 9527, 10068, 11265, 11346]\n\n remove = idx\n remove2 = idx - 1\n if (remove2 < 0):\n remove2 = 14\n\n indices = []\n for i in range(15):\n if (i != remove and i != remove2):\n indices.append(i)\n\n take = getFold(data, indices[0])\n\n for i in indices[1:]:\n take = np.concatenate((take, getFold(data, i)), axis=0)\n\n return take\n\n\ndef getSubTestFold(data, idx):\n ends = [1273, 2225, 2501, 3482, 3888, 4387, 5343, 6051, 6606, 7702, 8215, 9527, 10068, 11265, 11346]\n\n idx -= 1\n if (idx < 0):\n idx = 14\n\n take = getFold(data, idx)\n\n return take\n\n\ndef loadLines(path):\n with open(path, \"r\") as f:\n lines = [l.strip(\"\\r\\n\") for l in f.readlines()]\n\n return lines\n\n\ndef loadPlainMatrix(path):\n lines = loadLines(path)\n\n gt = []\n for l in lines:\n parts = l.split(\" \")\n current = []\n for p in parts:\n if (p != \"\"):\n current.append(float(p))\n gt.append(current)\n\n return np.array(gt)\n\n\ndef dotProduct(v1, v2):\n return np.sum(np.multiply(v1, v2))\n\n\ndef calculateAngle(v1, v2):\n return 180.0 * np.arccos(dotProduct(v1, v2) / np.sqrt(dotProduct(v1, v1) * dotProduct(v2, v2))) / np.pi\n\n\ndef calculateAngularStatistics(angles1, angles2):\n n = angles1.shape[0]\n\n angles = []\n for i in range(n):\n angles.append(calculateAngle(angles1[i, :], angles2[i, :]))\n\n return (np.mean(angles), np.median(angles), np.max(angles))\n\n\ndef getBatches(data, batchSize=64):\n from math import ceil\n\n n = int(ceil(float(len(data)) / batchSize))\n\n batches = []\n\n for i in range(n):\n currentIdx = i * batchSize\n nextIdx = (i + 1) * batchSize\n batches.append(data[currentIdx:nextIdx, :])\n\n return batches\n","sub_path":"tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425418027","text":"class UserMessage:\n def __init__(self, ical_url, id):\n self.ical_url = ical_url\n self.id = id\n\n\nclass ScheduleRequestMessage:\n def __init__(self, length, date_range, time_range, ideal_days=0, ideal_times=0):\n self.length = length\n self.date_range = date_range\n self.time_range = time_range\n self.ideal_days = ideal_days\n self.ideal_times = ideal_times","sub_path":"Messages.py","file_name":"Messages.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154191628","text":"from flexmock import flexmock\nfrom utils import utils\nfrom infrastructure_manager import *\ntry:\n from unittest import TestCase\nexcept ImportError:\n from unittest.case import TestCase\n\nclass TestInfrastructureManager(TestCase):\n def setUp(self):\n flexmock(utils).should_receive('get_secret').and_return('secret')\n\n def tearDown(self):\n flexmock(utils).should_receive('get_secret').reset()\n\n def test_initialize(self):\n i = InfrastructureManager()\n self.assertEquals('secret', i.secret)\n\n def test_describe_instances(self):\n i = InfrastructureManager()\n params1 = {}\n result1 = i.describe_instances(params1, 'secret1')\n self.assertFalse(result1['success'])\n self.assertEquals(result1['reason'],\n InfrastructureManager.REASON_BAD_SECRET)\n\n # test the scenario where we fail to give describe_instances a\n # reservation id\n params2 = {}\n result2 = i.describe_instances(params2, 'secret')\n self.assertFalse(result2['success'])\n self.assertEquals(result2['reason'],\n 'no ' + InfrastructureManager.PARAM_RESERVATION_ID)\n\n # test what happens when a caller fails to give describe instances\n # a reservation id that's in the system\n params3 = {InfrastructureManager.PARAM_RESERVATION_ID: 'boo'}\n result3 = i.describe_instances(params3, 'secret')\n self.assertFalse(result3['success'])\n self.assertEquals(result3['reason'],\n InfrastructureManager.REASON_RESERVATION_NOT_FOUND)\n\n # test what happens when a caller gives describe_instances a reservation\n # id that is in the system\n id = '0000000000'\n params4 = {InfrastructureManager.PARAM_RESERVATION_ID: id}\n vm_info = {\n 'public_ips': ['public-ip'],\n 'private_ips': ['private-ip'],\n 'instance_ids': ['i-id']\n }\n status_info = {\n 'success': True,\n 'reason': 'received run request',\n 'state': InfrastructureManager.STATE_RUNNING,\n 'vm_info': vm_info\n }\n i.reservations.put(id, status_info)\n result4 = i.reservations.get(id)\n self.assertEquals(result4, i.describe_instances(params4, \"secret\"))\n\n params5 = json.dumps(params4)\n self.assertEquals(result4, i.describe_instances(params5, \"secret\"))\n\n try:\n i.describe_instances('foo', 'bar')\n self.fail('Must throw an exception')\n except Exception:\n pass\n\n try:\n i.describe_instances({'reservation_id': 'foo'}, {})\n self.fail('Must throw an exception')\n except TypeError:\n pass\n\n def test_run_instances(self):\n i = InfrastructureManager()\n\n params1 = {}\n result1 = i.run_instances(params1, 'secret1')\n self.assertFalse(result1['success'])\n self.assertEquals(result1['reason'],\n InfrastructureManager.REASON_BAD_SECRET)\n\n params2 = {}\n result2 = i.run_instances(params2, 'secret')\n self.assertFalse(result2['success'])\n self.assertEquals(result2['reason'], 'no infrastructure')\n\n params3 = {'infrastructure': 'ec2'}\n result3 = i.run_instances(params3, 'secret')\n self.assertFalse(result3['success'])\n self.assertEquals(result3['reason'], 'no num_vms')\n\n params4 = {'infrastructure': 'ec2', 'num_vms': 0}\n result4 = i.run_instances(params4, 'secret')\n self.assertFalse(result4['success'])\n self.assertEquals(result4['reason'],\n InfrastructureManager.REASON_BAD_VM_COUNT)\n\n try:\n result5 = i.run_instances('foo', 'bar')\n self.fail('Must throw an exception')\n except Exception:\n pass\n\n def test_terminate_instances(self):\n i = InfrastructureManager()\n\n params1 = {}\n result1 = i.terminate_instances(params1, 'secret1')\n self.assertFalse(result1['success'])\n self.assertEquals(result1['reason'],\n InfrastructureManager.REASON_BAD_SECRET)\n\n params2 = {}\n result2 = i.terminate_instances(params2, 'secret')\n self.assertFalse(result2['success'])\n self.assertEquals(result2['reason'], 'no infrastructure')\n\n try:\n result3 = i.terminate_instances('foo', 'bar')\n self.fail('Must throw an exception')\n except Exception:\n pass\n","sub_path":"InfrastructureManager/tests/test_infrastructure_manager.py","file_name":"test_infrastructure_manager.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"523893301","text":"import spacy\nfrom nltk.stem import SnowballStemmer\n\nfrom etl.scholarship import AcademicLevel\n\n\nnlp = spacy.load('es_core_news_md')\nstemmer = SnowballStemmer('spanish')\n\n\ndef calculate_academic_level(item: dict):\n if 'description' not in item:\n return item\n\n item['academicLevel'] = match_academic_level(item['description']).value\n\n return item\n\n\ndef match_academic_level(raw_text):\n doc = first_sentence(raw_text)\n word = subtree_matcher(doc)[1]\n\n if word is None:\n return AcademicLevel.OTHERS\n\n for child in word.children:\n if child.dep_ == 'nmod':\n word = child\n break\n\n return academic_level(word.text)\n\n\ndef first_sentence(text):\n doc = nlp(text)\n return list(doc.sents)[0]\n\n\ndef subtree_matcher(doc):\n subject_ = None\n object_ = None\n\n for token in doc:\n if token.dep_ == 'nsubj':\n subject_ = token\n\n if token.dep_ == 'obj':\n object_ = token\n\n if subject_ and object_:\n break\n\n return (subject_, object_)\n\n\ndef academic_level(token):\n token = stemmer.stem(token)\n\n if is_undergraduate(token):\n return AcademicLevel.UNDERGRADUATE\n\n if is_postgraduate(token):\n return AcademicLevel.POSTGRADUATE\n\n return AcademicLevel.OTHERS\n\n\ndef is_undergraduate(token):\n words = ['estudiantes', 'bachilleres']\n words = map(stemmer.stem, words)\n return token in set(words)\n\n\ndef is_postgraduate(token):\n words = ['profesionales', 'doctorado', 'maestria', 'postdoctorado', 'posdoctorado']\n words = map(stemmer.stem, words)\n return token in set(words)\n","sub_path":"etl/icetex/tasks/calculate_academic_level.py","file_name":"calculate_academic_level.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74029445","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : Lysander Tseng\n# Email : qingshan.tseng@gmail.com\n# Time : 2020/2/18 14:48\n# User : Magic\n# Product : PyCharm\n# Project : 030\n# File : 030.3.py\n# Intro : 编写一个程序,用户输入开始搜索的路径,\n# 查找该路径下(包含子文件夹内)所有的视频格式文件\n# (要求查找mp4, rmvb, avi的格式即可),并创建一\n# 个文件(videoList.txt)存放所有找到的文件的路径\nimport os # 引入os模块\n\n\ndef format_find(this_path, ext_list, target_path):\n \"\"\"定义一个格式查找函数,用于查找指定目录中指定拓展名的文件并将其路径记录在一个文本文件中\"\"\"\n\n path_list = []\n for i in os.walk(this_path):\n for j in i[2]:\n for k in ext_list:\n if k in j:\n path_list.append(i[0] + '\\\\' + j)\n else:\n path_str = '\\n\\n'.join(path_list)\n with open(target_path, 'w', encoding='utf-8') as f:\n f.write(path_str)\n print(\"保存成功!\")\n\n\ndef path_trans(raw_path):\n \"\"\"定义一个路径字符串转换函数,用于将当前操作系统中的路径字符串转换成标准路径\"\"\"\n\n path_list = raw_path.split('\\\\') # 将传入的原始路径字符串以\\为标志切割并保存在列表path_list中\n standard_path = os.sep.join(path_list) # 再将os.sep插入到每个元素之间并将其重新拼接成标准字符串\n return standard_path\n\n\nactual_this_path = path_trans(input(\"请输入待查找的初始目录:\"))\nactual_ext_list = input(\"请输入需要查找的拓展名[以','号间隔]:\").split(',')\nactual_target_path = path_trans(input(\"请输入需要保存的路径:\"))\n\nformat_find(actual_this_path, actual_ext_list, actual_target_path)\n","sub_path":"Python/Homework/030/030.3.py","file_name":"030.3.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618563410","text":"import os\nimport random\nimport time\nimport json\nimport torch\nimport torchvision\nimport numpy as np\nimport pandas as pd\nimport warnings\nfrom datetime import datetime\nfrom torch import nn, optim\nfrom collections import OrderedDict\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom sklearn.model_selection import train_test_split, StratifiedKFold\nfrom timeit import default_timer as timer\nfrom IPython import embed\nfrom pipelineTrain import *\nfrom MosNet import *\nfrom Xception import Xception\nfrom configTrain import configTrain\n\n# set cudnn\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = configTrain.gpus\n# torch.backends.cudnn.benchmark = True\n# warnings.filterwarnings('ignore')\nrandom.seed(configTrain.seed)\nnp.random.seed(configTrain.seed)\ntorch.manual_seed(configTrain.seed)\ntorch.cuda.manual_seed_all(configTrain.seed)\n\n\ndef main():\n fold = 0\n # mkdirs\n if not os.path.exists(configTrain.weights):\n os.mkdir(configTrain.weights)\n if not os.path.exists(configTrain.best_models):\n os.mkdir(configTrain.best_models)\n if not os.path.exists(configTrain.logs):\n os.mkdir(configTrain.logs)\n if not os.path.exists(configTrain.weights + configTrain.model_name + os.sep + str(fold) + os.sep):\n os.makedirs(configTrain.weights + configTrain.model_name + os.sep + str(fold) + os.sep)\n if not os.path.exists(configTrain.best_models + configTrain.model_name + os.sep + str(fold) + os.sep):\n os.makedirs(configTrain.best_models + configTrain.model_name + os.sep + str(fold) + os.sep)\n # get model and optimizer\n # model = MosNet()\n model = Xception()\n # model = torch.nn.DataParallel(model)\n model.cuda()\n\n # set optimizer and loss function\n optimizer = optim.Adam(model.parameters(), lr=configTrain.lr, amsgrad=False, weight_decay=configTrain.weight_decay)\n criterion = nn.CrossEntropyLoss().cuda()\n\n # some parameters\n start_epoch = 0\n best_precision1 = 0\n best_precision_save = 0\n resume = False\n\n # restart the training process\n if resume:\n checkpoint = torch.load(configTrain.best_models + str(fold) + \"/model_best.pth.tar\")\n start_epoch = checkpoint[\"epoch\"]\n fold = checkpoint[\"fold\"]\n best_precision1 = checkpoint[\"best_precision1\"]\n model.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n\n # read image files\n train_data_list = get_files(configTrain.train_data, \"train\")\n val_data_list = get_files(configTrain.val_data, \"val\")\n\n # load train_data and validation_data with DataLoader\n train_dataloader = DataLoader(FaceDataset(train_data_list), batch_size=configTrain.batch_size, shuffle=True,\n pin_memory=True)\n val_dataloader = DataLoader(FaceDataset(val_data_list, train=False), batch_size=configTrain.batch_size * 2,\n shuffle=True, pin_memory=False)\n\n # Decays the learning rate by gamma every step_size epochs\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=configTrain.stepLR_size, gamma=0.1)\n\n # define metrics for loss and accuracy\n train_losses = AverageMeter()\n train_top1 = AverageMeter()\n valid_loss = [np.inf, 0, 0]\n\n # set the module in training mode\n model.train()\n\n # train\n for epoch in range(start_epoch, configTrain.epochs):\n scheduler.step(epoch)\n train_progressor = ProgressBar(mode=\"Train\", epoch=epoch, total_epoch=configTrain.epochs,\n model_name=configTrain.model_name, total=len(train_dataloader))\n # global iter\n for iter, (input, target) in enumerate(train_dataloader):\n # switch to continue train process\n train_progressor.current = iter\n\n # get loss and accuracy\n model.train()\n input = Variable(input).cuda()\n target = Variable(torch.from_numpy(np.array(target)).long()).cuda()\n output = model(input)\n loss = criterion(output, target)\n\n precision1_train, precision2_train = accuracy(output, target, topk=(1, 2))\n\n # update batch's loss and accuracy to total\n train_losses.update(loss.item(), input.size(0))\n train_top1.update(precision1_train[0], input.size(0))\n\n # set loss and accuracy to progress bar\n train_progressor.current_loss = train_losses.avg\n train_progressor.current_top1 = train_top1.avg\n # backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # progress bar vision\n train_progressor()\n train_progressor.done()\n\n # evaluate every epoch\n valid_loss_accuracy = evaluate(val_dataloader, model, criterion, epoch)\n\n # determine whether to save model\n is_best = valid_loss_accuracy[1] > best_precision1\n\n # save the best precision\n best_precision1 = max(valid_loss_accuracy[1], best_precision1)\n try:\n best_precision_save = best_precision1.cpu().data.numpy()\n except:\n pass\n save_checkpoint({\n \"epoch\": epoch + 1,\n \"model_name\": configTrain.model_name,\n \"state_dict\": model.state_dict(),\n \"best_precision1\": best_precision1,\n \"optimizer\": optimizer.state_dict(),\n \"fold\": fold,\n \"valid_loss\": valid_loss_accuracy,\n }, is_best, fold)\n\n\nif __name__ ==\"__main__\":\n main()\n\n\n","sub_path":"models/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288072760","text":"\n\nfrom xai.brain.wordbase.nouns._mania import _MANIA\n\n#calss header\nclass _MANIAS(_MANIA, ):\n\tdef __init__(self,): \n\t\t_MANIA.__init__(self)\n\t\tself.name = \"MANIAS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"mania\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_manias.py","file_name":"_manias.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209459872","text":"import cv2\n\ndef kaze_match(im1_path, im2_path):\n # load the image and convert it to grayscale\n im1 = cv2.imread(im1_path)\n im2 = cv2.imread(im2_path)\n gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)\n\n # initialize the AKAZE descriptor, then detect keypoints and extract\n # local invariant descriptors from the image\n detector = cv2.AKAZE_create()\n (kps1, descs1) = detector.detectAndCompute(gray1, None)\n (kps2, descs2) = detector.detectAndCompute(gray2, None)\n\n print(\"keypoints: {}, descriptors: {}\".format(len(kps1), descs1.shape))\n print(\"keypoints: {}, descriptors: {}\".format(len(kps2), descs2.shape))\n\n # Match the features\n bf = cv2.BFMatcher(cv2.NORM_HAMMING)\n # matches = bf.match(descs1, descs2)\n matches = bf.knnMatch(descs1, descs2, k=2) # typo fixed\n # print(matches)\n\n # Apply ratio test\n good = []\n for m, n in matches:\n\n if m.distance < 0.9*n.distance:\n good.append([m])\n\n # cv2.drawMatchesKnn expects list of lists as matches.\n im3 = cv2.drawMatchesKnn(im1, kps1, im2, kps2, good[1:20], None, flags=2)\n cv2.imshow(\"AKAZE matching\", im3)\n cv2.waitKey(0)\n\nkaze_match('img/graf1.png', 'img/graf3.png')","sub_path":"mytask1/akaze_test2.py","file_name":"akaze_test2.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548414577","text":"from flask import Flask, render_template, redirect, request\nimport os\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom pytz import timezone\nfrom datetime import datetime\napp=Flask(__name__)\nengine=create_engine(os.getenv(\"DATABASE_URL\"))\ndb=scoped_session(sessionmaker(bind=engine))\n\n\n@app.route(\"/\")\n\ndef index():\n\treturn render_template(\"index.html\")\n\n@app.route(\"/\")\ndef query(query):\n\ttemplate=\"
  • Authors

  • \"\n\tauthors=db.execute(\"SELECT * FROM authors WHERE name LIKE :query\", {\"query\": query+\"%\"}).fetchall()\n\tbooks=db.execute(\"SELECT * FROM books WHERE name LIKE :query\", {\"query\": query+\"%\"}).fetchall()\n\tif authors:\n\t\tfor i in authors:\n\t\t\ttemplate+='
  • \"+str(i[1])+\"
  • \"\n\ttemplate+=\"
  • Books!

  • \"\n\tif books:\n\t\tfor i in books:\n\t\t\ttemplate+='
  • \"+str(i[1])+\", \"+str(i[3])+\"
  • \"\n\n\treturn template\n@app.route(\"/book\")\ndef book():\n\tid=int(request.args.get(\"q\"))\n\tbook=db.execute(\"SELECT * FROM books WHERE id=:id\", {\"id\":id}).fetchall()\n\tauthor=db.execute(\"SELECT * FROM authors WHERE id=:author_id\", {\"author_id\": book[0][2]}).fetchall()\n\treturn render_template(\"book.html\", book=book[0], author=author[0])\n@app.route(\"/author\")\ndef author():\n\tid=request.args.get(\"q\")\n\tid=int(id)\n\tauthor=db.execute(\"SELECT * FROM authors WHERE id=:id\", {\"id\":id}).fetchall()\n\tbooks=db.execute(\"SELECT * FROM books WHERE author_id=:id\", {\"id\": author[0][0]}).fetchall()\n\treturn render_template(\"author.html\", author=author, books=books)\n@app.route(\"/create_book\", methods=[\"GET\", \"POST\"])\ndef create_book():\n\tif request.method==\"GET\":\n\t\treturn render_template(\"add_books.html\")\n\telse:\n\t\tname=request.form.get(\"name\")\n\t\tauthor=request.form.get(\"author\")\n\t\tyear=request.form.get(\"year\")\n\t\n\t\tif not name or not author or not year:\n\t\t\treturn render_template(\"add_books.html\")\n\t\ttry:\n\t\t\ty=int(year)\n\t\t\tif y<0:\n\t\t\t\treturn render_template(\"add_books.html\")\n\t\t\tif y>datetime.now(timezone(\"US/Eastern\")).year:\n\t\t\t\treturn render_template(\"add_books.html\")\n\n\t\texcept:\n\t\t\treturn render_template(\"add_books.html\")\n\t\tauthor_id=db.execute(\"SELECT * FROM authors WHERE name=:author\", {\"author\": author}).fetchall()\n\t\tif len(author_id)==0:\n\t\t\treturn render_template(\"add_books.html\")\n\t\tsame=db.execute(\"SELECT * FROM books WHERE name=:name AND author_id=:author_id AND year=:year\",{\"name\":name, \"author_id\": author_id[0][0], \"year\":year}).fetchall()\n\t\tif len(same)!=0:\n\t\t\treturn render_template(\"add_books.html\")\n\t\tdb.execute(\"INSERT INTO books(name, author_id, year) VALUES(:name, :author_id, :year)\", {\"name\": name, \"author_id\": author_id[0][0], \"year\": year})\n\t\tdb.commit()\n\t\treturn redirect(\"/\")\n\n@app.route(\"/create_author\", methods=[\"GET\", \"POST\"])\ndef create_author():\n\tif request.method==\"GET\":\n\t\treturn render_template(\"add_authors.html\")\n\tname=request.form.get(\"name\")\n\tif not name:\n\t\treturn render_template(\"add_authors.html\")\n\tsame= db.execute(\"SELECT * FROM authors WHERE name=:name\", {\"name\": name}).fetchall()\n\tif len(same)!=0:\n\t\treturn render_template(\"add_authors.html\")\n\tdb.execute(\"INSERT INTO authors(name) VALUES(:name)\", {\"name\": name})\n\tdb.commit()\n\treturn redirect(\"/\")\n@app.route(\"/delete_author\")\ndef delete_author():\n\tid=int(request.args.get(\"q\"))\n\tdb.execute(\"DELETE FROM books WHERE author_id=:id\", {\"id\": id})\n\tdb.execute(\"DELETE FROM authors WHERE id=:id\", {\"id\": id})\n\tdb.commit()\n\treturn redirect(\"/\")\n\n@app.route(\"/delete_book\")\ndef delete_book():\n\tid=int(request.args.get(\"q\"))\n\tdb.execute(\"DELETE FROM books WHERE id=:id\", {\"id\":id})\n\tdb.commit()\n\treturn redirect(\"/\")\n\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424257622","text":"\nactive = \"#45ABc5\"\ngreen = \"#00ff00\"\nerror = \"#ff0000\"\nalert = \"#fa8700\"\nwarn = \"#faf700\"\nwhite = \"#ffffff\"\n\n\nforeground = \"#858585\"\nbackground = \"#202020\"\n\nsep = \" \"\n\nws_active = \"#3e6700\"\nws_active_txt = \"#cccccc\"\nws_urgent = alert\nws_inactive = \"#303030\"\nws_inactive_txt = \"#888888\"\nws_sep_col = background\n\n\ninterval = 0.2\nwifi_name = \"wlp3s0\"\n\ndzen_args = [\"-dock\", \"-ta\", \"l\", \"-fn\", \"monospace:bold:size=10\", \\\n \"-bg\", background, \"-fg\", foreground, \"-h\", \"15\"]\n","sub_path":"i3/.pybar/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227752097","text":"import json\nimport pprint\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nvectorizer = TfidfVectorizer()\n\n\npp = pprint.PrettyPrinter(indent=4)\nwith open('dev-v1.1.json') as json_file:\n data = json.load(json_file)\n\n # print(data['data'][0]['paragraphs'][0]['qas'][0]['answers'][0]['text'].keys())\n Database = []\n for articles in data['data']:\n article = {}\n article['Q'] = []\n for index, paragraph in enumerate(articles['paragraphs']):\n article[index] = paragraph['context']\n for question in paragraph['qas']:\n article['Q'].append([question['question'],index])\n Database.append(article)\n break\n\n resultList = {}\n for index in range(1,15):\n resultList[index] = []\n numCorrect = 0\n num = 0\n for article in Database:\n Questions = article['Q']\n del article['Q']\n paragraphs = [article[ele] for ele in article.keys()]\n paragraphs.insert(0,'')\n for question in Questions:\n paragraphs[0] = question[0]\n X = vectorizer.fit_transform(paragraphs)\n simi = cosine_similarity(X[0:1], X)\n simi[0][0] = 0\n correct = simi[0][question[1]+1]\n\n\n simi[0].sort()\n reverse = simi[0][::-1]\n mean = sum(reverse[0:10])/10\n\n # if (correct - mean)>0.05:\n # numCorrect +=1\n num+=1\n\n # print(top5)\n # resultList.append( 1 if np.argmax(simi) == question[1]+1 else 0)\n # plt.ylabel('cosine similarity')\n # # plt.xlabel('rank')\n for i in range(0,10):\n if reverse[i] == correct and i==0:\n numCorrect+=1\n # # plt.plot(i, reverse[i], 'ro',label = 'correct')\n # else:\n # # plt.plot(i, reverse[i], 'bo')\n #\n # plt.legend()\n #\n # plt.show()\n\n print(num,numCorrect,numCorrect/num)\n\n\n\n\n\n","sub_path":"explore_squad.py","file_name":"explore_squad.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582776813","text":"import glob\n\nimport pytest\n\nimport archetypal as ar\n\n# configure archetypal\nar.config(log_console=True, log_file=True, use_cache=True,\n data_folder='.temp/data', logs_folder='.temp/logs',\n imgs_folder='.temp/imgs', cache_folder='.temp/cache',\n umitemplate='../data/BostonTemplateLibrary.json')\n\n# # Uncomment this block to test different file variations\n# files_to_try = ['./input_data/problematic/*.idf',\n# './input_data/regular/*.idf',\n# './input_data/umi_samples/*.idf']\n# ids = ['problematic', 'regular', 'umi_samples']\n\nfiles_to_try = ['./input_data/regular/*.idf']\nids = ['regular']\n\n\n@pytest.fixture(scope='module', params=files_to_try, ids=ids)\ndef template(fresh_start, request):\n \"\"\"Instantiate an umi template placeholder. Calls in the fresh_start\n function to clear the cache folder\"\"\"\n idf = glob.glob(request.param)\n idf = ar.copy_file(idf)\n # idf = './input_data/AdultEducationCenter.idf'\n wf = './input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw'\n a = ar.Template(idf, wf)\n\n yield a\n\n\n@pytest.fixture(scope='module')\ndef test_template_withcache():\n \"\"\"Instantiate an umi template placeholder. Does note call fresh_start\n function so that caching can be used\"\"\"\n idf = glob.glob('/Users/samuelduchesne/Dropbox/Polytechnique/Doc/software'\n '/archetypal-dev/data/necb/NECB_2011_Montreal_idf/*.idf')\n idf = ar.copy_file(idf)\n # idf = './input_data/AdultEducationCenter.idf'\n wf = './input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw'\n a = ar.Template(idf, wf)\n\n yield a\n\n\n@pytest.fixture(scope='module')\ndef sql(test_template_withcache):\n sql = test_template_withcache.run_eplus(silent=False, processors=-1)\n yield sql\n\n\ndef test_materials_gas(template):\n template.materials_gas = ar.materials_gas(template.idfs)\n assert not template.materials_gas.empty\n\n\ndef test_materials_glazing(template):\n template.materials_glazing = ar.materials_glazing(template.idfs)\n template.materials_glazing = ar.newrange(template.materials_gas,\n template.materials_glazing)\n return template.materials_glazing\n\n\ndef test_materials_opaque(template):\n template.materials_opaque = ar.materials_opaque(template.idfs)\n template.materials_opaque = ar.newrange(template.materials_glazing,\n template.materials_opaque)\n return template.materials_opaque\n\n\ndef test_constructions_opaque(template):\n template.constructions_opaque = ar.constructions_opaque(template.idfs,\n template.materials_opaque)\n template.constructions_opaque = ar.newrange(template.materials_opaque,\n template.constructions_opaque)\n return template.constructions_opaque\n\n\ndef test_constructions_windows(template):\n template.constructions_windows = ar.constructions_windows(template.idfs,\n template.materials_glazing)\n template.constructions_windows = ar.newrange(template.constructions_opaque,\n template.constructions_windows)\n return template.constructions_windows\n\n\ndef test_day_schedules(template):\n template.day_schedules = ar.day_schedules(template.idfs)\n return template.day_schedules\n\n\ndef test_week_schedules(template):\n template.week_schedules = ar.week_schedules(template.idfs,\n template.day_schedules)\n template.week_schedules = ar.newrange(template.day_schedules,\n template.week_schedules)\n return template.week_schedules\n\n\ndef test_year_schedules(template):\n template.year_schedules = ar.year_schedules(template.idfs,\n template.week_schedules)\n template.year_schedules = ar.newrange(template.week_schedules,\n template.year_schedules)\n return template.year_schedules\n\n\n# Zones\ndef test_zone_information(template, sql):\n template.zone_details = ar.zone_information(sql)\n\n\ndef test_zone_loads(template, sql):\n template.zone_loads = ar.zone_loads(sql)\n\n\ndef test_zone_ventilation(template, sql):\n template.zone_ventilation = ar.zone_ventilation(sql)\n\n\ndef test_zone_condition(template, sql):\n template.zone_conditioning = ar.zone_conditioning(sql)\n\n\ndef test_zone_condition_dev(test_template_withcache, sql):\n test_template_withcache.zone_conditioning = ar.zone_conditioning(sql)\n print(test_template_withcache.zone_conditioning)\n\n\ndef test_to_json(test_template_withcache):\n test_template_withcache.read()\n json = test_template_withcache.to_json(orient='records')\n print(json)\n\n\ndef test_to_json_std():\n files = glob.glob(\"./input_data/STD/*idf\")\n files = ar.copy_file(files)\n wf = './input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw'\n a = ar.Template(files, wf)\n a.read()\n json = a.to_json(orient='records')\n print(json)\n","sub_path":"tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"456170456","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# In[ ]:\r\n\r\n\r\n# Import required packages\r\nimport pybullet as p\r\nimport pybullet_data\r\nimport numpy as np\r\nimport cv2\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\np.connect(p.GUI) \r\np.setAdditionalSearchPath(pybullet_data.getDataPath())\r\nplane = p.loadURDF(\"plane.urdf\")\r\np.setGravity(0, 0, -30)\r\n\r\nhuskypos = [0, 0, 1]\r\n\r\n# Load URDFs\r\nhusky = p.loadURDF(\"husky/husky.urdf\", huskypos[0], huskypos[1], huskypos[2])\r\ncheetah = p.loadURDF(\"mini_cheetah/mini_cheetah.urdf\",-5,+2, huskypos[2])\r\n\r\ntarget= p.loadURDF(\"block0.urdf\",3, +2, huskypos[2])\r\n\r\n\r\nmaxForce = 55#Force of car #Newton.m\r\ndist_init=0#initial distance\r\nf=50#force of cheetah #Newton.m\r\nm1=0.01\r\nd=50\r\ns=0.15# distance travelled by cheetah for calling runcheetah() once\r\nl=0\r\nl1=0 #adjust the time.sleep(l1) \r\n# Function to set cheetah's initial orientation(need was understood by experiment)\r\ndef setPos():\r\n l2=0.1\r\n n=0\r\n while(n=-np.pi:\r\n speed= -sp\r\n elif error>0.5 and error3:\r\n sp= int((mag(targetVec)-1.5)*10)\r\n m= (sp//10)-1 # To set kp and kd according to the basespeed\r\n else:\r\n sp=0\r\n sx= (mag(targetVec)/3)*h-1# Dependence on speed of enemy\r\n print(sp)\r\n speed_correction, last_error=speedCorrection(targetVec, frontVec, last_error, m) \r\n turn(speed_correction, sp, sx)\r\n\r\n for k, v in keys.items():\r\n if (k == p.B3G_UP_ARROW and (v & p.KEY_IS_DOWN) and PID_CONTROL==False):\r\n targetVel = 20\r\n for joint in range(2,6):\r\n p.setJointMotorControl2(husky,joint, p.VELOCITY_CONTROL, targetVelocity =targetVel, force = maxForce)\r\n\r\n p.stepSimulation()\r\n\r\n if (k == p.B3G_UP_ARROW and (v & p.KEY_WAS_RELEASED) and PID_CONTROL==False):\r\n targetVel = 0\r\n for joint in range(2, 6):\r\n p.setJointMotorControl2(husky, joint, p.VELOCITY_CONTROL,targetVelocity = targetVel,force = maxForce)\r\n\r\n if (k == p.B3G_DOWN_ARROW and (v & p.KEY_IS_DOWN) and PID_CONTROL==False):\r\n targetVel = -20\r\n for joint in range(2,6):\r\n p.setJointMotorControl2(husky,joint, p.VELOCITY_CONTROL, targetVelocity =targetVel, force = maxForce)\r\n\r\n p.stepSimulation()\r\n\r\n if (k == p.B3G_DOWN_ARROW and (v & p.KEY_WAS_RELEASED) and PID_CONTROL==False):\r\n targetVel = 0\r\n for joint in range(2, 6):\r\n p.setJointMotorControl2(husky, joint, p.VELOCITY_CONTROL,targetVelocity = targetVel,force = maxForce)\r\n\r\n if (k == p.B3G_RIGHT_ARROW and (v & p.KEY_IS_DOWN) and PID_CONTROL==False):\r\n targetVel = 5\r\n for joint in range(1,3):\r\n p.setJointMotorControl2(husky,2*joint, p.VELOCITY_CONTROL, targetVelocity =targetVel,force = maxForce)\r\n for joint in range(1,3):\r\n p.setJointMotorControl2(husky,2*joint+1, p.VELOCITY_CONTROL,targetVelocity =-1*targetVel,force = maxForce)\r\n\r\n p.stepSimulation()\r\n if (k == p.B3G_RIGHT_ARROW and (v & p.KEY_WAS_RELEASED)):\r\n targetVel = 0\r\n for joint in range(2, 6):\r\n p.setJointMotorControl2(husky, joint, p.VELOCITY_CONTROL,targetVelocity = targetVel,force = maxForce)\r\n\r\n if (k == p.B3G_LEFT_ARROW and (v & p.KEY_IS_DOWN)and PID_CONTROL==False):\r\n targetVel = 5\r\n for joint in range(1,3):\r\n p.setJointMotorControl2(husky,2* joint+1, p.VELOCITY_CONTROL,targetVelocity = targetVel,force = maxForce)\r\n for joint in range(1,3):\r\n p.setJointMotorControl2(husky,2* joint, p.VELOCITY_CONTROL,targetVelocity =-1* targetVel,force = maxForce)\r\n\r\n p.stepSimulation()\r\n if (k == p.B3G_LEFT_ARROW and (v & p.KEY_WAS_RELEASED)):\r\n targetVel = 0\r\n for joint in range(2, 6):\r\n p.setJointMotorControl2(husky, joint, p.VELOCITY_CONTROL, targetVelocity =targetVel,force = maxForce)\r\n\r\n p.stepSimulation()\r\n ### Press c to start auto mode\r\n if (k == ord('c') and (v & p.KEY_WAS_TRIGGERED)):\r\n print(\"\\nPID Control-on/Auto mode on\")\r\n PID_CONTROL = True\r\n if (k == ord('r') and (v & p.KEY_WAS_TRIGGERED)):\r\n print(\"\\nPID Control-off ,back to manual\")\r\n PID_CONTROL = False\r\n ### Press m to change mode \r\n if (k == ord('m') and (v & p.KEY_WAS_TRIGGERED)):\r\n b=~b\r\n print(\"\\nVisual mode\")\r\n ### Press p to take a picture from driver's view \r\n if (k == ord('p') and (v & p.KEY_IS_DOWN)):\r\n rear_bump= p.getLinkState(husky,9)\r\n front_bump= p.getLinkState(husky,8)\r\n v1= front_bump[0][0]-rear_bump[0][0]\r\n v2= front_bump[0][1]-rear_bump[0][1]\r\n v3= front_bump[0][2]-rear_bump[0][2]\r\n frontVec= np.array(list((v1, v2, v3)))\r\n t= (frontVec[0]**2 + frontVec[1]**2 + frontVec[2]**2)**(0.5)\r\n frontDrn= frontVec/t\r\n s1=20\r\n eyePos= [front_bump[0][0], front_bump[0][1], front_bump[0][2]]\r\n targetPos= list(frontDrn*s1)\r\n upVec= [0,0,1]\r\n view_matrix= p.computeViewMatrix(eyePos, targetPos, upVec)\r\n\r\n width= 512\r\n height= 512\r\n aspect_ratio= width/height\r\n near= 0.1\r\n far= 10\r\n fov= 120\r\n projection_matrix= p.computeProjectionMatrixFOV(fov, aspect_ratio, near, far)\r\n\r\n images= p.getCameraImage(width, height, view_matrix, projection_matrix, shadow=True, renderer= p.ER_BULLET_HARDWARE_OPENGL)\r\n image= images[2].reshape((height, width, 4))\r\n image= cv2.cvtColor(image, cv2.COLOR_RGBA2BGRA) \r\n cv2.imshow('Driver_View', image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n ## Visual instructing mode(a little slower due to waitkey, background should be light coloured than instrucor's hand)\r\n else:\r\n #update your droid cam code for better control and visualisation\r\n cap= cv2.VideoCapture(\"http://192.168.43.53:4747/video\")\r\n \r\n while(True):\r\n _,image= cap.read()\r\n #gray= cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n #mask= cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 641, 2)\r\n #mask= cv2.erode(mask, (5,5), iterations=10)\r\n image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB) \r\n image=cv2.rotate(image,cv2.ROTATE_90_CLOCKWISE)\r\n lower=np.array([80,60,50])\r\n high=np.array([165,126,110])\r\n mask=cv2.inRange(image,lower,high)\r\n length= mask.shape[1]\r\n bredth= mask.shape[0]\r\n ex= length//3\r\n screen1= mask[:, 0:ex]\r\n screen2= mask[:, ex:(length-ex)]\r\n screen3= mask[:, (length-ex):]\r\n c1,_= cv2.findContours(screen1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n c2,_= cv2.findContours(screen2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n c3,_= cv2.findContours(screen3, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n \r\n \r\n \r\n if len(c1)!=0:\r\n c= sorted(c1, key= cv2.contourArea, reverse= True)[0]\r\n a1= cv2.contourArea(c)\r\n else:\r\n a1=0\r\n if len(c2)!=0:\r\n c= sorted(c2, key= cv2.contourArea, reverse= True)[0]\r\n a2= cv2.contourArea(c)\r\n else:\r\n a2=0\r\n if len(c3)!=0:\r\n c= sorted(c3, key= cv2.contourArea, reverse= True)[0]\r\n a3= cv2.contourArea(c)\r\n else:\r\n a3=0\r\n a4= max(a1, a2, a3)\r\n if a4==a3:\r\n ##speed= 40##((sp+sx)/3)*2\r\n speed1= 160\r\n speed2= -160\r\n print(\"R\")\r\n\r\n elif a4==a1:\r\n #speed= -40#-((sp+sx)/3)*2\r\n speed1= -160\r\n speed2= 160\r\n print(\"L\")\r\n else:\r\n speed1= 160\r\n speed2= 160\r\n print(\"F\")\r\n targetVel_R = speed1\r\n targetVel_L = speed2\r\n for joint in range(1,3):\r\n p.setJointMotorControl2(husky,2* joint, p.VELOCITY_CONTROL, targetVelocity =targetVel_R,force = maxForce)\r\n for joint in range(1,3):\r\n p.setJointMotorControl2(husky,2* joint+1, p.VELOCITY_CONTROL,targetVelocity =targetVel_L,force = maxForce)\r\n p.stepSimulation()\r\n mask[:,ex]=255\r\n mask[:, length-ex]=255\r\n cv2.imshow(\"mask\", mask)\r\n k= cv2.waitKey(1)\r\n if k==27:\r\n b=0\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n break\r\np.disconnect()\r\n\r\n\r\n","sub_path":"Robotics_Summer_camp.py","file_name":"Robotics_Summer_camp.py","file_ext":"py","file_size_in_byte":23516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"85284448","text":"# ------------------------------------------------------------------------------------------------------------------------------------------\n# region FEATURE SELECTION\n\ndef get_ds_best_features(dfX, dfY, dataset_name, cluster_topic, get_past_years_x_y, kept_years, n_best_features):\n x_data, scaler = scale_array_between_0_1(dfX)\n\n le = LabelEncoder()\n y_data = le.fit_transform(dfY[cluster_topic]).tolist()\n\n select_k_best = SelectKBest(chi2, k=n_best_features)\n select_k_best.fit_transform(x_data, y_data)\n selected_cols = select_k_best.get_support()\n\n # List best features\n cols = list(dfX.columns[selected_cols])\n print_check(dataset_name, cols)\n\n folder_name = sanitize_string(dataset_name)\n sub_folder_name = 'BEST_FEATURES'\n cluster_by = sanitize_string(cluster_topic)\n file_path = '{}/{}/COLS_{}'.format(folder_name, sub_folder_name, cluster_by)\n save_as_pickle(cols, file_path)\n\n return cols\n\n\ndef get_best_features():\n for ds_name in DS_NAMES:\n # --\n df = read_pickle(ds_name)\n dfX, dfY = get_past_years_x_y(df, MyCols.NB_OF_PAST_YRS_KEPT)\n\n # --\n for cluster_topic in CLUSTER_TOPICS:\n for n in N_BEST_FEATURES:\n COLS = get_ds_best_features(dfX, dfY, wka.cat_type_of_program_fr, get_program_type_xy, MyClusters.NB_MODELING_YEARS, n)\n\n # Get scree charts\n best_cols = COLS + [wka.cat_type_of_program_fr, wka.yr_treatment]\n BEST_DS_COLS['{}-{}-{}'.format(sanitize_string(ds_name), sanitize_string(cluster_topic), n)] = best_cols\n\n # -- Customization\n # --\n # df_x = drop_columns(df_x, get_cols_with_prefix(df_x, 'BIN_REFERRAL_'), errors='ignore')\n # df_x = drop_columns(df_x, get_cols_with_prefix(df_x, 'NUM_PROD_'), errors='ignore')\n # df_x = drop_columns(df_x, get_cols_with_prefix(df_x, 'BIN_MAIN_PROD_'), errors='ignore')\n df_x = drop_columns(df_x, get_cols_with_prefix(df_x, 'COUNT_'), errors='ignore')\n # df_x = drop_columns(df_x, get_cols_with_prefix(df_x, 'BIN_PROD_'), errors='ignore')\n df_x = drop_columns(df_x, get_cols_with_prefix(df_x, 'BIN_NATIONALITY_'), errors='ignore')\n df_x = drop_columns(df_x, get_cols_with_prefix(df_x, 'BIN_PREV_TREAT_'), errors='ignore')\n","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312458236","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author:Fangyang\n@User:haizhi\n@File:JUDGEMENT_WENSHU_for_jilin\n@time:2018/8/8下午5:57\n@Software:PyCharm\n\n\"\"\"\nimport time\nimport random\nimport re\n\nimport json\nimport requests\nimport pymysql\nimport pymongo\nfrom bs4 import BeautifulSoup\n#config_of_mysql = pymysql.connect(\"localhost\", \"root\", \"fangyang\", \"test\")\nconfig_of_mongodb = pymongo.MongoClient('mongodb://localhost:27017')\n\n\nheader = {'User-Agent:': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 \\\n (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\n\n\nclass Judgement_wenshu(object):\n def __init__(self, database_name, url):\n self.database_name = database_name\n self.url = url\n self.id_list = []\n self.title_list = []\n self.Pageurl_list = []\n self.source_page_list = []\n self.wenshu_content = []\n\n def link_database(self):#默认使用mongodb\n # noinspection PyBroadException\n try:\n if self.database_name == \"mysql\":\n return 0\n elif self.database_name == \"mongodb\":\n return config_of_mongodb\n except Exception:\n print(\"缺少数据库名称,默认使用mongodb\")\n return config_of_mongodb\n\n def start_request(self, ip):\n proxy = {\n 'http': 'http://{}'.format(ip),\n 'https': 'https:{}'.format(ip)\n }\n try:\n context = requests.get(self.url).text\n except Exception as e:\n print(e + \"访问失败\")\n return -1\n soup = BeautifulSoup(context, \"html.parser\")\n content_url = \"http://www.jlsfy.gov.cn:8080/susong51/cpws/loadPrintContent.htm?id=\"#详情页的获取页面\n list_context = soup.find_all(onclick=re.compile('javascript:cpwsDetail'))\n for tag in list_context:\n self.id_list.append(tag['onclick'][23:-3])\n temp = str(tag.contents[1]).split('>')\n self.title_list.append(temp[3][:-3])\n for id in self.id_list:\n url1 = content_url + id\n self.Pageurl_list.append(url1)\n if len(self.id_list) == 0:\n return 0\n page_content = requests.get(url1, header).text\n print(\"获取到\"+url1+\"的数据\")\n temp_js = json.loads(page_content)\n self.source_page_list.append(temp_js['data'])\n print(str(url1) + \" \" + \"ok\")\n # 获取到信息以后剔除HTML标签\n pat = re.compile('>(.*?)<')\n p = ''.join(pat.findall(temp_js['data']))\n self.wenshu_content.append(p)\n return 1\n\n def write_to_database(self):\n global config_of_mongodb\n if self.databasename == \"mysql\":\n for i in range(len(self.title_list)):\n sql = \"INSERT IGNORE INTO judgement_wenshu(wenshu_content, \\\n case_name, _site_record_id,topic,source,source_page,PageUrl,_in_time) \\\n VALUES ('%s', '%s', '%s','%s','%s','%s','%s','%s')\" % \\\n (self.wenshu_content[i],\n self.title_list[i],\n \"www.jlsfy.gov.cn:8080\",\n \"0\",\n \"judgement_wenshu\",\n \"诉讼无忧新版采集脚本v0_吉林高院省\",\n self.source_page_list[i],\n self.Pageurl_list[i],\n time.time())\n #cursor = config_of_mysql.cursor()\n # cursor.execute(sql)\n # config_of_mysql.commit()\n return 1\n elif self.database_name == \"mongodb\":\n for i in range(len(self.title_list)):\n mycol = config_of_mongodb['test']['judgement_wenshu']\n mycol.insert_one({\"wenshu_content\": self.wenshu_content[i], 'case_name': self.title_list[i],\n 'site_record_id': 'www.jlsfy.gov.cn:8080',\n 'topic': 'judgement_wenshu',\n 'source': '诉讼无忧新版采集脚本v0',\n 'source_page': self.source_page_list[i],\n 'PageUrl': self.Pageurl_list[i],\n '_in_time': time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n }\n )\n return 1\n\n def work(self):\n myip=config_of_mongodb['proxy']\n ip = []\n myproxy = myip['raw_proxy'].find()\n for i in myproxy:\n ip.append(i['proxy'])\n self.start_request(random.choice(ip))\n self.writetodatabase()\n\n\nif __name__ == '__main__':\n\n cjws = Judgement_wenshu('mongodb', 'http://www.jlsfy.gov.cn:8080/susong51/fymh/750/cpws.htm?page=1')\n cjws.work()\n \n\n\n\n\n\n\n\n\n\n\n","sub_path":"JUDGEMENT_WENSHU_for_jilin.py","file_name":"JUDGEMENT_WENSHU_for_jilin.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"15909120","text":"import torch\nimport torch.nn as nn\n\nimport speechbrain as sb\nfrom speechbrain.nnet.pooling import StatisticsPooling\nfrom speechbrain.nnet.CNN import Conv1d\nfrom speechbrain.nnet.linear import Linear\nfrom speechbrain.nnet.normalization import BatchNorm1d\nfrom speechbrain.lobes.models.dual_path import Decoder\n\n\nclass PASEEncoder(torch.nn.Module):\n def __init__(\n self,\n activation=torch.nn.PReLU,\n use_sincnet=True,\n in_channels=1,\n blocks_channels=[128,256,100],\n blocks_kernel_sizes=[11,11,1],\n blocks_strides=[2,2,1],\n ):\n super().__init__()\n self.blocks = nn.ModuleList()\n\n first_hidden_layer = Conv1d\n self.blocks.append(\n first_hidden_layer(\n in_channels=in_channels,\n out_channels=blocks_channels[0],\n kernel_size=blocks_kernel_sizes[0],\n stride=blocks_strides[0],\n )\n )\n in_channels = blocks_channels[0]\n \n for block_index in range(1, len(blocks_channels)-1):\n out_channels = blocks_channels[block_index]\n self.blocks.extend(\n [\n Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=blocks_kernel_sizes[block_index],\n stride=blocks_strides[block_index],\n ),\n BatchNorm1d(input_size=out_channels),\n activation(),\n ]\n )\n in_channels = blocks_channels[block_index]\n self.blocks.extend(\n [\n Conv1d(\n in_channels=in_channels,\n out_channels=blocks_channels[block_index + 1],\n kernel_size=blocks_kernel_sizes[block_index + 1],\n stride=blocks_strides[block_index + 1],\n ),\n BatchNorm1d(input_size=blocks_channels[block_index + 1], affine=False),\n ]\n )\n\n def forward(self, x, *args, **kwargs):\n for layer in self.blocks:\n try:\n x = layer(x, *args, **kwargs)\n except TypeError:\n x = layer(x)\n return x\n","sub_path":"transfer/models/transfer_model.py","file_name":"transfer_model.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14799376","text":"# Standard imports\nimport cv2\nimport sys\nimport numpy as np \n \n# Read images\nsrc = cv2.imread(sys.argv[1])\ndst = cv2.imread(sys.argv[2])\n \n \n# Create a rough mask around the airplane.\nsrc_mask = np.zeros(src.shape, src.dtype)\nsrc_mask[16:190,134:310] = 1.0\n#poly = np.array([ [16,80], [30,54], [151,63], [254,37], [298,90], [272,134], [43,122] ], np.int32)\n#cv2.fillPoly(src_mask, [poly], (255, 255, 255))\n \n# This is where the CENTER of the airplane will be placed\ncenter = (0,0)\n \n# Clone seamlessly.\noutput = cv2.seamlessClone(src, dst, src_mask, center, cv2.NORMAL_CLONE)\n \n# Save result\ncv2.imwrite(\"opencv-seamless-cloning-example.jpg\", output);\n","sub_path":"New_Driver/seamless_clone.py","file_name":"seamless_clone.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"329145062","text":"from django.contrib import admin\n\nfrom blog.models import Entry, UserProfile\n\n\nclass EntryAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['title']}),\n (None, {'fields': ['author']}),\n ('Date Published', {'fields': ['pub_date']}),\n ('Dose of Happy', {'fields': ['content']}),\n ('Happy Level', {'fields': ['rating']}),\n ]\n list_display = ('title', 'pub_date', 'author',)\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['user']}),\n ('About', {'fields': ['bio']}),\n ('Picture', {'fields': ['picture']}),\n ]\n list_display = ('user', 'bio', 'picture',)\n\n\nadmin.site.register(Entry, EntryAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\n","sub_path":"blog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"589498336","text":"\"\"\"\nAn implementation of the Probabilistic Ensembles with Trajectory Sampling (PETS) algorithm\nfrom Chua et al (2018).\n\n\"\"\"\nimport sys\nimport gym\nimport torch\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.torch.PETS import Model, MPCPolicy, PETSTrainer\nfrom rlkit.envs.wrappers import NormalizedBoxEnv\nfrom rlkit.data_management.env_replay_buffer import EnvReplayBuffer\nfrom rlkit.launchers.launcher_util import setup_logger\nfrom rlkit.samplers.data_collector import MdpPathCollector\nfrom rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm\nfrom custom.mountain_car_continuous import mountain_car_continuous_reward\nfrom custom.cartpole_swingup import CartPoleSwingUpEnv, cartpole_swingup_reward_v1\n\nptu.set_gpu_mode(True)\n\n\ndef experiment(variant):\n # expl_env = NormalizedBoxEnv(gym.make('BipedalWalker-v3'))\n # eval_env = NormalizedBoxEnv(gym.make('BipedalWalker-v3'))\n # expl_env = NormalizedBoxEnv(CartPoleSwingUpEnv())\n # eval_env = NormalizedBoxEnv(CartPoleSwingUpEnv())\n from custom.mjcartpole import CartpoleEnv, get_cp_reward # This import will fail if you don't have Mujoco.\n expl_env = CartpoleEnv()\n eval_env = CartpoleEnv()\n # expl_env = NormalizedBoxEnv(gym.make('MountainCarContinuous-v0'))\n # eval_env = NormalizedBoxEnv(gym.make('MountainCarContinuous-v0'))\n assert variant['policy']['num_particles'] % variant['model']['num_bootstrap'] == 0, \"There must be an even number of particles per bootstrap\" # NOQA\n assert variant['algorithm_kwargs']['num_trains_per_train_loop'] % variant['model']['num_bootstrap'] == 0, \"Must be an even number of train steps per bootstrap\" # NOQA\n obs_dim = expl_env.observation_space.low.size\n action_dim = expl_env.action_space.low.size\n\n model = Model(\n hidden_sizes=variant['model']['hidden_sizes'],\n obs_dim=obs_dim,\n action_dim=action_dim,\n num_bootstrap=variant['model']['num_bootstrap'],\n rew_function=get_cp_reward,\n # env=expl_env,\n # rew_function=mountain_car_continuous_reward # for now\n )\n policy = MPCPolicy(\n model=model,\n obs_dim=obs_dim,\n action_dim=action_dim,\n num_particles=variant['policy']['num_particles'],\n cem_horizon=variant['policy']['cem_horizon'],\n cem_iters=variant['policy']['cem_iters'],\n cem_popsize=variant['policy']['cem_popsize'],\n cem_num_elites=variant['policy']['cem_num_elites'],\n sampling_strategy=variant['policy']['sampling_strategy'],\n optimizer=variant['policy']['optimizer'],\n opt_freq=variant['policy']['opt_freq'],\n cem_alpha=variant['policy']['cem_alpha'],\n )\n trainer = PETSTrainer(expl_env,\n policy,\n model,\n lr=variant['lr'])\n eval_path_collector = MdpPathCollector(\n eval_env,\n policy,\n )\n expl_path_collector = MdpPathCollector(\n expl_env,\n policy,\n )\n replay_buffer = EnvReplayBuffer(\n variant['replay_buffer_size'],\n expl_env,\n )\n algorithm = TorchBatchRLAlgorithm(\n trainer=trainer,\n exploration_env=expl_env,\n evaluation_env=eval_env,\n exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector,\n replay_buffer=replay_buffer,\n **variant['algorithm_kwargs']\n )\n algorithm.to(ptu.device)\n algorithm.train()\n\n\nif __name__ == '__main__':\n name = sys.argv[1]\n torch.set_num_threads(8)\n variant = dict(\n policy=dict(\n num_particles=20,\n cem_horizon=25,\n cem_iters=5,\n cem_popsize=400,\n cem_num_elites=40,\n sampling_strategy='TSinf',\n optimizer='CEM',\n cem_alpha=0.1,\n opt_freq=1,\n ),\n algorithm_kwargs=dict(\n num_epochs=5,\n num_eval_steps_per_epoch=200,\n num_trains_per_train_loop=100000,\n num_expl_steps_per_train_loop=500,\n min_num_steps_before_training=10000,\n max_path_length=200,\n batch_size=256,\n ),\n # policy=dict(\n # num_particles=10,\n # cem_horizon=25,\n # cem_iters=2,\n # cem_popsize=10,\n # cem_num_elites=3,\n # sampling_strategy='TS1',\n # optimizer='CEM',\n # cem_alpha=0.1,\n # opt_freq=25,\n # ),\n # algorithm_kwargs=dict(\n # num_epochs=3000,\n # num_eval_steps_per_epoch=400,\n # num_trains_per_train_loop=100,\n # num_expl_steps_per_train_loop=200,\n # min_num_steps_before_training=200,\n # max_path_length=200,\n # batch_size=256,\n # ),\n model=dict(\n num_bootstrap=5,\n hidden_sizes=[500, 500, 500],\n ),\n replay_buffer_size=int(1e7),\n lr=0.001,\n )\n\n setup_logger(name, variant=variant)\n # import pudb; pudb.set_trace()\n experiment(variant)\n","sub_path":"examples/pets.py","file_name":"pets.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79443023","text":"import copy\n\nfrom pure_mcts.keyset import KeySet\nfrom pure_mcts.mcts_dpw import MCTSStochastic, StochasticAction, StochasticState\nimport numpy as np\nfrom race_components.race_ol_uct import strategic_rollout\n\nfrom helpers import (argmax, is_atari_game, copy_atari_state, restore_atari_state, stable_normalizer)\n\n\nclass RaceStochasticState(StochasticState):\n def __init__(self, index, r, terminal, parent_action, na, signature, budget, env=None, max_depth=200, owner=None):\n assert owner is not None, \"Owner parameter must be specified for RaceStochasticState class constructor\"\n self.owner = owner\n self.end_turn = env.has_transitioned()\n\n super(RaceStochasticState, self).__init__(index, r, terminal, parent_action, na, signature, budget,\n env=env, max_depth=max_depth)\n\n if self.terminal or terminal:\n self.V = np.zeros(env.agents_number)\n\n action_list = env.get_available_actions(owner)\n self.child_actions = [RaceStochasticAction(a, parent_state=self, owner=owner) for a in action_list]\n\n def random_rollout(self, budget, env, max_depth=200):\n return strategic_rollout(env, budget, max_depth=200, terminal=self.terminal, root_owner=self.owner)\n\nclass RaceStochasticAction(StochasticAction):\n def __init__(self, index, parent_state, Q_init=0.0, owner=None):\n assert owner is not None, \"Owner must be specified for constructor of RaceStochasticAction class\"\n self.owner = owner\n super(RaceStochasticAction, self).__init__(index, parent_state, Q_init=Q_init)\n\n def add_child_state(self, s1, r, terminal, signature, budget, env=None, max_depth=200):\n child_state = RaceStochasticState(s1, r, terminal, self, self.parent_state.na, signature, budget, env=env,\n max_depth=max_depth, owner=env.get_next_agent())\n self.child_states.append(child_state)\n\n sk = KeySet(s1)\n\n # s1_hash = s1.tostring()\n # self.state_indeces[sk.__hash__()] = self.n_children\n self.state_indices[sk] = self.n_children\n self.n_children += 1\n return child_state, child_state.remaining_budget\n\nclass RaceMCTSStochastic(MCTSStochastic):\n def __init__(self, root, root_index, model, na, gamma, alpha=0.6, depth_based_bias=False, owner=None):\n super(RaceMCTSStochastic, self).__init__(root, root_index, model, na, gamma,\n alpha=alpha, depth_based_bias=depth_based_bias)\n\n assert owner is not None, \"Owner must be specified for RaceMCTSStochastic constructor\"\n self.owner = owner\n\n def search(self, n_mcts, c, Env, mcts_env, budget, max_depth=200):\n ''' Perform the MCTS search from the root '''\n is_atari = is_atari_game(Env)\n if is_atari:\n snapshot = copy_atari_state(Env) # for Atari: snapshot the root at the beginning\n else:\n mcts_env = copy.deepcopy(Env) # copy original Env to rollout from\n # else:\n # restore_atari_state(mcts_env, snapshot)\n\n # Check that the environment has been copied correctly\n try:\n sig1 = mcts_env.get_signature()\n sig2 = Env.get_signature()\n if sig1.keys() != sig2.keys():\n raise AssertionError\n if not all(np.array_equal(sig1[key], sig2[key]) for key in sig1):\n raise AssertionError\n except AssertionError:\n print(\"Something wrong while copying the environment\")\n sig1 = mcts_env.get_signature()\n sig2 = Env.get_signature()\n print(sig1.keys(), sig2.keys())\n exit()\n\n if self.root is None:\n # initialize new root\n self.root = RaceStochasticState(self.root_index, r=0.0, terminal=False, parent_action=None,\n na=self.na, signature=Env.get_signature(),\n env=mcts_env, budget=budget, owner=self.owner)\n else:\n self.root.parent_action = None # continue from current root\n if self.root.terminal:\n raise (ValueError(\"Can't do tree search from a terminal state\"))\n\n while budget > 0:\n state = self.root # reset to root for new trace\n if not is_atari:\n mcts_env = copy.deepcopy(Env) # copy original Env to rollout from\n else:\n restore_atari_state(mcts_env, snapshot)\n mcts_env.seed()\n st = 0\n while not state.terminal:\n bias = c * self.gamma ** st / (1 - self.gamma) if self.depth_based_bias else c\n action = state.select(c=bias)\n k = np.ceil(self.beta * action.n ** self.alpha)\n if k >= action.n_children:\n s1, r, t, _ = mcts_env.step(action.index)\n # if action.index == 0 and not np.array_equal(s1.flatten(), action.parent_state.index.flatten()):\n # print(\"WTF\")\n if mcts_env.has_transitioned():\n budget -= 1\n if action.get_state_ind(s1) != -1:\n state = action.child_states[action.get_state_ind(s1)] # select\n state.r = r\n else:\n state, budget = action.add_child_state(s1, r, t, mcts_env.get_signature(), budget, env=mcts_env,\n max_depth=max_depth - st) # expand\n break\n else:\n state = action.sample_state()\n mcts_env.set_signature(state.signature)\n if state.terminal:\n budget -= 1\n\n if mcts_env.has_transitioned():\n st += 1\n\n # Back-up\n R = np.zeros(mcts_env.agents_number)\n\n if not state.terminal:\n R = copy.deepcopy(state.V)\n state.update()\n agents_reward = copy.deepcopy(state.r)\n while state.parent_action is not None: # loop back-up until root is reached\n owner = state.parent_action.owner # rewards are stored in the state following the action, which has different owner\n if not state.terminal:\n if state.end_turn:\n agents_reward = copy.deepcopy(state.r)\n R[owner] = agents_reward[owner] + self.gamma * R[owner]\n else:\n R = copy.deepcopy(state.r)\n action = state.parent_action\n action.update(R[action.owner])\n state = action.parent_state\n state.update()","sub_path":"race_components/race_dpw.py","file_name":"race_dpw.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"319885443","text":"# Program to flatten a nested list\n\ndef flattenMat1(mat):\n # Nested List Naive\n flatten_matrix = [] \n \n for sublist in matrix: \n for val in sublist: \n flatten_matrix.append(val) \n return flatten_matrix\n \ndef flattenMat2(mat):\n # Nested List Comprehension \n return [val for sublist in matrix for val in sublist]\n \n #checking program \nmatrix = [[1, 2, 3], [4, 5], [6, 7, 8, 9]] \nprint(flattenMat2(matrix)) \n\n ","sub_path":"Shimon_Labs/flattenMatrix.py","file_name":"flattenMatrix.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351621295","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException\nimport time\nimport tweepy\nimport datetime\n\n\nclass Scraper:\n def __init__(self, query, delay_scrolling, limit):\n \"\"\"\n :param query:\n :param delay_scrolling:\n :param limit:\n \"\"\"\n self.query = query.lower()\n self.delay_scrolling = delay_scrolling\n self.limit = limit\n self.driver = webdriver.Firefox()\n self.tweet_selector = 'li.js-stream-item'\n self.id_selector = '.time a.tweet-timestamp'\n self.ids = []\n\n def format_day(self, date):\n return date.strftime('%Y-%m-%d')\n\n def form_url(self, query, since, until):\n p1 = 'https://twitter.com/search?q='\n p2 = query + '%20since%3A' + since + '%20until%3A' + until + '&src=typd'\n return p1 + p2\n\n def end_dates(self):\n date_today = datetime.date.today()\n dates_list = [datetime.date.today()]\n index = 1\n month_temp = date_today.month\n year_temp = date_today.year\n flag = False\n while (month_temp != date_today.month) or (year_temp != (date_today.year - 1)):\n index = index + 1\n month_temp = date_today.month - index\n if month_temp <= 0:\n month_temp = month_temp + 12\n if not flag:\n year_temp = year_temp - 1\n flag = True\n dates_list.append(datetime.date(year_temp, month_temp,\n self.last_day_of_month(datetime.date(year_temp, month_temp, 1)).day))\n del dates_list[-1]\n return dates_list\n\n def last_day_of_month(self, any_day):\n next_month = any_day.replace(day=28) + datetime.timedelta(days=4) # this will never fail\n return next_month - datetime.timedelta(days=next_month.day)\n\n def run_scrapping(self):\n for date in self.end_dates():\n start_date = self.format_day(datetime.date(date.year, date.month, 1))\n end_date = self.format_day(date)\n url = self.form_url(self.query, start_date, end_date)\n self.scrap_period(url)\n\n def extract_ids(self, stamps):\n ids = []\n for tweet in stamps:\n try:\n id = tweet.get_attribute('data-conversation-id')\n # id = tweet.find_element_by_css_selector(id_selector).get_attribute('href').split('/')[-1]\n ids.append(id)\n except StaleElementReferenceException as e:\n print('lost element reference', tweet)\n return ids\n\n def scrap_period(self, url):\n self.driver.get(url)\n time.sleep(self.delay_scrolling)\n try:\n found_tweets = self.driver.find_elements_by_css_selector(self.tweet_selector)\n # scroll down the browser until the results end\n increment = 10\n while len(found_tweets) <= self.limit and len(found_tweets) >= increment:\n print('scrolling down to load more tweets')\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n time.sleep(self.delay_scrolling)\n found_tweets = self.driver.find_elements_by_css_selector(self.tweet_selector)\n increment = increment + 10\n # collect tweets id\n tweet_stamps = self.driver.find_elements_by_css_selector(self.id_selector)\n self.ids.extend(self.extract_ids(tweet_stamps))\n except NoSuchElementException:\n print('no tweets on this period')\n\n\nclass WebTweets:\n def __init__(self, query, delay, limit, class_type, api):\n self.scraper = Scraper(query, delay, limit)\n self.scraper.run_scrapping()\n self.ids = self.scraper.ids\n self.api = api\n self.class_type = class_type\n self.tweets = []\n\n def mine_scraped_ids(self, scrap_ids):\n ids_number = len(scrap_ids)\n scrapped_tweets = []\n start_i = 0\n end_i = 99\n stop = False\n while not stop:\n if end_i > ids_number:\n end_i = ids_number - 1\n stop = True\n scrapped_tweets.extend(self.api.statuses_lookup(scrap_ids[start_i:end_i], tweet_mode='extended'))\n start_i = end_i + 1\n end_i = end_i + 99\n return scrapped_tweets\n\n def filter_cls(self, tweet, cls):\n item_dict = tweet._json\n item_dict['class'] = cls\n return item_dict\n\n def get_tweets(self):\n temp_tweets = self.mine_scraped_ids(self.ids)\n for scraped_tweet in temp_tweets:\n self.tweets.append(self.filter_cls(scraped_tweet, self.class_type))\n return self.tweets\n\n\ndef set_api():\n with open('tokens', mode='r', encoding='utf-8') as myfile:\n lines = myfile.readlines()\n consumer_key = lines[0].strip()\n consumer_secret = lines[1].strip()\n access_token = lines[2].strip()\n access_secret = lines[3].strip()\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n api = tweepy.API(auth)\n return api\n\n\nif __name__ == '__main__':\n scraper = WebTweets('bdsfail', 5, 10, 'against', set_api())\n tweets = scraper.scrape_web()\n print(tweets)\n","sub_path":"tweets_collect_tool/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"406193868","text":"class Settings():\n\n def __init__(self):\n self.screen_width = 1200\n self.screen_height = 600\n self.bg_color = (187,255,255)\n\n self.bullet_speed_factor = 20\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_color = 60, 60, 60\n\n self.alien_speed_factor = 20\n self.fleet_drop_speed = 10\n # fleet_direction为1表示向右移,为-1表示向左移\n self.fleet_direction = 1\n\n self.ship_limit = 3 #ship还剩三条命\n self.alien_points = 50 #单个分数\n\n\n\n\n","sub_path":"Alien_Invasion/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307238331","text":"#!/usr/bin/python3\n\"\"\"\nDefine city routes.\n\"\"\"\n\n\nfrom flask import request, abort, jsonify\nfrom models import storage\nfrom models.city import City\nfrom models.state import State\nfrom api.v1.views import app_views\n\n\n@app_views.route(\"/states//cities\", methods=[\"GET\", \"POST\"])\ndef cities(state_id):\n \"\"\"Define /cities route with GET and POST methods\n\n POST - Create a new city\n GET - Get a list of all cities\n \"\"\"\n state = storage.get('State', state_id)\n if state is None:\n return abort(404)\n\n # GET\n if request.method == \"GET\":\n return jsonify([city.to_dict() for city in state.cities])\n\n # POST\n doc = request.get_json(silent=True)\n if doc is None:\n return \"Not a JSON\", 400\n if doc.get(\"name\") is None:\n return \"Missing name\", 400\n doc['state_id'] = state_id\n city = City(**doc)\n city.save()\n return jsonify(city.to_dict()), 201\n\n\n@app_views.route(\"/cities/\", methods=[\"GET\", \"DELETE\", \"PUT\"])\ndef city(city_id):\n \"\"\"Define /cities/ with GET, PUT and DELETE methodes\n\n GET - get a city with the given id\n PUT - Update the city with the given id\n DELETE - Deletes the cityy with the givem id\n \"\"\"\n city = storage.get('City', city_id)\n if city is None:\n abort(404)\n\n # GET\n if request.method == \"GET\":\n return jsonify(city.to_dict())\n\n # PUT\n elif request.method == \"PUT\":\n doc = request.get_json(silent=True)\n if doc is None:\n return \"Not a JSON\", 400\n\n for k, v in doc.items():\n if k not in (\"id\", \"created_at\", \"updated_at\"):\n setattr(city, k, v)\n city.save()\n return jsonify(city.to_dict())\n\n # DELETE\n city.delete()\n city.save()\n return jsonify({})\n","sub_path":"api/v1/views/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487752269","text":"#!/usr/bin/python3\n\"\"\"Gets data from an API\"\"\"\nimport json\nimport requests\n\n\ndef recurse(subreddit, hot_list=[], after=None):\n \"\"\"Shows hot post\"\"\"\n hot_post = []\n url = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)\n agent = {'User-Agent': 'requested'}\n parameters = {'after': after, 'limit': '100'}\n response = requests.get(url, headers=agent,\n params=parameters, allow_redirects=False)\n if response.status_code in [302, 404]:\n return None\n else:\n posts = response.json()['data']['children']\n after = response.json()['data']['after']\n for hot in posts:\n hot_list.append(hot)\n if after is not None:\n recurse(subreddit, hot_list, after)\n return hot_list\n","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"638133861","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 11 21:41:48 2017\n\n@author: ram\n\"\"\"\n\nimport serial\n\nclass value:\n def __init__(self, strPort):\n self.ser = serial.Serial(strPort, 9600)\n\n def update(self,i):\n try: \n print(i)\n line = self.ser.readline()\n print(line)\n data = [float(val) for val in line.split(',')]\n print(data)\n print(rects)\n print(data)\n for rect, yi in zip(rects, data):\n print(yi)\n print(rect.set_height(yi))\n \n except KeyboardInterrupt:\n print('exiting')\n \n\n return rects\n\n # clean up\n def close(self):\n # close serial\n self.ser.flush()\n self.ser.close() ","sub_path":"graph_matplot/class/serialvalue.py","file_name":"serialvalue.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"105059751","text":"from Panda import *\r\n# When you create a triangle or rectangle, you can choose which points in texture space each vertex goes with\r\n# In texture space, the lower left corner is P2(0,0) and the upper right is P2(1,1).\r\n\r\n\r\n\r\n# Here are four points in space. Build two triangles and texture them with realpanda.jpg\r\np1 = P3(-1, 0, -1)\r\np2 = P3(1, 0, -1)\r\np3 = P3(-1, 0, 1)\r\np4 = P3(1,0, 1)\r\n# For a triangle, the texture points are P2(0,0), P2(1,0), and P2(0, 1). You can alter\r\n# these by keyword parameters . Create a slider the more the second triangle to the side.\r\n# Change the texP3 coordinate below and see what happens.\r\nm = triangle(p1, p2, p3, texture = \"realpanda.jpg\")\r\nn = triangle(p3, p4, p2, texture = \"realpanda.jpg\", texP1 = P2(0, 1), texP2 = P2(1,1), texP3 = P2(1,0), position = P3(slider(), 0, 0))\r\nstart()","sub_path":"CompletedPandaHandouts/src/Texture and Geometry/06-texturecoords.py","file_name":"06-texturecoords.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353791074","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#\nfrom __future__ import print_function,division\n\nimport rospy\n\nfrom std_msgs.msg import Empty\n\nfrom larvae_behavior_classifier.msg import Behavior\n\nfrom larvae_behavior_classifier.msg import Features\n\nfrom mightex_controller.msg import CmdCurrent,CmdChannel\n\nfrom lavis_stimuli_controller.msg import StochasticStimuli\n\nimport random\n\n\nclass StochasticController(object):\n def __init__(self,*args,**kwargs):\n rospy.loginfo('Initializing stochastic_node...')\n self._initialized = False\n\n self._behavior_sub = rospy.Subscriber('behavior',Behavior,self._behavior_callback)\n self._features_sub = rospy.Subscriber('features',Features,self._features_callback)\n\n self._cmd_light_current_pub = rospy.Publisher('cmd_current',CmdCurrent,queue_size=1)\n self._cmd_light_off_pub = rospy.Publisher('cmd_off',CmdChannel,queue_size=1)\n self._cmd_all_lights_off_pub = rospy.Publisher('cmd_all_off',Empty,queue_size=1)\n\n self._start_sub = rospy.Subscriber('~start',StochasticStimuli,self._start_callback)\n self._stop_sub = rospy.Subscriber('~stop',Empty,self._stop_callback)\n\n self._feedback_period = rospy.get_param('~feedback_period')\n\n self._last_update_time = 0\n self._last_bandit = 0\n self._running = False\n self._stopped = False\n self._begin = False\n self.firstindex = 0\n\n\n self._stochastic_stimulus_channel = 0\n self._stochastic_stimulus_current = 0\n self._stochastic_left_probability = 0\n self._stochastic_right_probability = 0\n self._stochastic_background_channel = 0\n self._stochastic_background_current = 0\n self._stochastic_basal_current = 0\n\n self.impulsewidth = 10\n self.game_length = 500\n self.impulse_period = 1500\n self.s_filtered = 0\n self.bend_state = 0\n self.left_current = 0\n self.right_current = 0\n\n self.begin_run_forward_two_sec = 0\n self.run_forward_two_sec = False\n self.begin_thirteen_sec_wait = 0\n self.wait_thirteen_sec = 0\n self.begin_bandit_game = 0\n self.play_bandit_game = False\n\n rospy.loginfo('stochastic_node initialized!')\n self._initialized = True\n\n def _features_callback(self,data):\n if (self._begin == True):\n self.firstindex = data.index\n rospy.loginfo('first index: %d', self.firstindex)\n self._begin = False\n self.s_filtered = data.s_filtered\n self.index = data.index\n\n def _behavior_callback(self,data):\n\n if self._initialized and self._running:\n time_now = rospy.get_time()\n if (time_now - self._last_update_time) >= self._feedback_period:\n self._last_update_time = time_now\n\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_background_channel\n cmd_current.current = 0\n self._cmd_light_current_pub.publish(cmd_current)\n\n # See if bandit is being played.\n # self._features_pub(play_bandit_game)\n if (self.play_bandit_game == False):\n # See if neither wait sequences are running\n # There is a 2 second buffer where larva must run in a forward line.\n # After completion of the 2 second run, there is an 13 second wait.\n if (self.run_forward_two_sec == False and self.wait_thirteen_sec == False):\n # If not, then attempt to initiate the 2-sec forward/run.\n if (data.b_bend == False and data.b_run):\n self.run_forward_two_sec = True\n self.begin_run_forward_two_sec = self.index\n rospy.loginfo('Initiate 2-sec run: %d', self.begin_run_forward_two_sec)\n # Beginning the 13-sec wait after finishing the 2-sec forward run\n # ensures both wait sequences never occur simultaneously\n elif (self.run_forward_two_sec == True):\n # See if it's been 2 seconds\n if (self.index - self.begin_run_forward_two_sec > 50*2):\n self.run_forward_two_sec = False\n # Initialize 13-sec wait sequence\n self.wait_thirteen_sec = True\n self.begin_thirteen_sec_wait = self.index\n rospy.loginfo('Begin 13-sec wait, Index: %d', self.begin_thirteen_sec_wait)\n # See if larvae still runs forward. Otherwise reset.\n else:\n if (data.b_bend == True or data.b_run == False):\n self.run_forward_two_sec = False\n # In general, it is good to ensure that the 13 second wait does not begin falsely.\n self.wait_thirteen_sec = False\n\n # 13 second wait.\n elif (self.wait_thirteen_sec == True):\n # See if it's been 13 seconds. If so, begin bandit game.\n if (self.index - self.begin_thirteen_sec_wait > 50*13):\n self.wait_thirteen_sec = False\n self.play_bandit_game = True\n self.begin_bandit_game = self.index\n rospy.loginfo('End 13-sec wait, Index: %d', self.index)\n # If not, then we do nothing.\n # Every time we don't play the bandit game, we make sure everything is quiescent.\n # rospy.loginfo(\"STOPPED: %d\", (self.index - self.firstindex + 1 + 500) % 1500)\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_stimulus_channel\n cmd_current.current = 0\n self._cmd_light_current_pub.publish(cmd_current)\n\n # The bandit game is being played. Now we count indices:\n else:\n if ((self.index - self.begin_bandit_game) <= self.impulse_width ):\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_background_channel\n # I might be able to speed up the frame-rate here if I sample from a small array, approximately gaussian.\n cmd_current.current = int(random.gauss(self._stochastic_background_current,20)) + self._stochastic_basal_current\n self._cmd_light_current_pub.publish(cmd_current)\n rospy.loginfo(\"BLUE: %d\", cmd_current.current)\n\n # Now we want to play the game for as long as the larva does not run forward for 2 seconds, after at least 10 seconds of gameplay.\n elif (self.index - self.begin_bandit_game) > self.impulse_width and (self.index - self.begin_bandit_game) <= self.game_length :\n # else:\n if data.b_bend and data.b_left == False and data.b_right == False:\n self.bend_state = 0\n elif data.b_bend and data.b_left and (self.s_filtered < 0.8):\n if self.bend_state == 0 or self.bend_state == 1:\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_stimulus_channel\n self.bernoulli_left = random.random()\n self.left_current = self._stochastic_stimulus_current*(self.bernoulli_left < self._stochastic_left_probability)\n cmd_current.current = self.left_current\n self._cmd_light_current_pub.publish(cmd_current)\n self.bend_state = -1\n rospy.loginfo('Bernoulli left: %f, Current Left: %d',self.bernoulli_left, self.left_current)\n elif self.bend_state == -1:\n # Here we make sure sampling is every cast, not every frame.\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_stimulus_channel\n cmd_current.current = self.left_current\n self._cmd_light_current_pub.publish(cmd_current)\n # rospy.loginfo('Current Left: %d',self.left_current)\n elif data.b_bend and data.b_right and (self.s_filtered < 0.8):\n if self.bend_state == 0 or self.bend_state == -1:\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_stimulus_channel\n self.bernoulli_right = random.random()\n self.right_current = self._stochastic_stimulus_current*(self.bernoulli_right < self._stochastic_right_probability)\n cmd_current.current = self.right_current\n self.bend_state = 1\n self._cmd_light_current_pub.publish(cmd_current)\n rospy.loginfo('Bernoulli Right: %f, Current Right: %d',self.bernoulli_right, self.right_current)\n elif self.bend_state == 1:\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_stimulus_channel\n cmd_current.current = self.right_current\n self._cmd_light_current_pub.publish(cmd_current)\n # rospy.loginfo('Current Right: %d',self.right_current)\n else:\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_stimulus_channel\n cmd_current.current = 0\n self._cmd_light_current_pub.publish(cmd_current)\n else:\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_stimulus_channel\n cmd_current.current = 0\n self._cmd_light_current_pub.publish(cmd_current)\n self.play_bandit_game = False\n self.wait_thirteen_sec = False\n self.run_forward_two_sec = False\n rospy.loginfo('Bandit game is finished. Index: %d', self.index)\n\n # Below is the protocol for extending the game past self.game_length\n # if (self.index - self.begin_bandit_game) > self.game_length:\n # if (self.run_forward_two_sec == False):\n # if (data.b_bend == False and data.b_run == True):\n # # We begin keeping track of how long larvae has been running forward, but only after 10 seconds of game play.\n # # There are reasons for and against this, but the case that wins this line of reasoning for me\n # # is the one where the larvae learns to perform a single-cast. Say a larva single-casts in ~2 seconds, begins running\n # # forward for 2 seconds. During the next 6 seconds, it might decide to cast once more. This extra sample is extremely useful\n # # because it informs the larva whether it made the right decision. Without it, the larva will learn that the best decision\n # # is the one that is made quickly. This is something I would like to avoid.\n #\n # # Mainly, this more-than-10-seconds condition is for extended multi-casts, and so we should enforce a 10-second game.\n # self.run_forward_two_sec = True\n # self.begin_run_forward_two_sec = self.index\n # else:\n # # We send protocol to thirteen second wait once larva has begun running forward for two seconds.\n # if (self.index - self.begin_run_forward_two_sec > 50*1):\n # self.play_bandit_game = False\n # self.wait_thirteen_sec = True\n # self.run_forward_two_sec = False\n # self.begin_thirteen_sec_wait = self.index\n # rospy.loginfo('Bandit game is finished. Index: %d', self.index)\n # else:\n # if (data.b_bend == True or data.b_run == False):\n # self.run_forward_two_sec = False\n\n def _start_callback(self,stimuli):\n self._stopped = False\n self._begin = True\n # rospy.loginfo('begin here')\n self._stochastic_stimulus_channel = stimuli.stochastic_stimulus_channel\n self._stochastic_stimulus_current = stimuli.stochastic_current\n self._stochastic_left_probability = stimuli.stochastic_left_probability\n self._stochastic_right_probability = stimuli.stochastic_right_probability\n self._stochastic_background_channel = stimuli.stochastic_background_channel\n self._stochastic_background_current = stimuli.stochastic_background_current\n\n cmd_current = CmdCurrent()\n cmd_current.channel = self._stochastic_background_channel\n cmd_current.current = 0\n self._cmd_light_current_pub.publish(cmd_current)\n\n rospy.loginfo('stochastic sleeping for {0} secs before starting'.format(stimuli.wait.to_sec()))\n rospy.sleep(stimuli.wait)\n # check to see if stopped while waiting\n if not self._stopped:\n rospy.loginfo('head_stochastic starting with stochastic_stimulus_current = {0}, stochastic_left_probability = {1}, stochastic_right_probability = {2}'.format(stimuli.stochastic_current,stimuli.stochastic_left_probability,stimuli.stochastic_right_probability))\n self._running = True\n\n def _stop_callback(self,data):\n self._running = False\n self._stopped = True\n rospy.loginfo('stochastic stopping')\n self._cmd_all_lights_off_pub.publish(Empty())\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('stochastic_node')\n hcc = StochasticController()\n rospy.spin()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"lavis_stimuli_controller/nodes/stochastic_node.py","file_name":"stochastic_node.py","file_ext":"py","file_size_in_byte":14787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"46808157","text":"import sqlite3\n\nclass DbDateTime:\n def __init__(self):\n self._db = None\n\n @property\n def db(self):\n if self._db == None:\n self._db = sqlite3.connect(\":memory:\")\n return self._db\n\n def datetime(self,time,modifier=None):\n if modifier != None:\n parameters=(time,modifier)\n sql = \"select julianday(datetime(?,?))\"\n else:\n parameters=(time,)\n sql = \"select julianday(datetime(?))\"\n cursor = self.db.execute(sql,parameters)\n return (list(cursor))[0][0]\n\n def format(self,julianday,format='%Y-%m-%d %H:%M:%S utc'):\n parameters=(format,julianday)\n sql = \"select strftime(?,?)\"\n cursor = self.db.execute(sql,parameters)\n return (list(cursor))[0][0]\n\n def now(self):\n return self.datetime('now')\n","sub_path":"dbdatetime.py","file_name":"dbdatetime.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621845190","text":"import io\n\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch\nfrom mondas.bot import Bot, Answer\n\n\nclass BotTest(TestCase):\n\n def setUp(self):\n self.bot = Bot(\"123\")\n\n def testYouCanAskAQuestion(self):\n bot = self.bot\n\n response = bot.ask(\"This is a question\")\n\n self.assertIsNotNone(response)\n\n def testItCanSayThings(self):\n bot = self.bot\n\n bot.say(\"This is a response\")\n\n self.assertEqual([\"This is a response\"], bot.output)\n\n def testItCanBeCleared(self):\n bot = self.bot\n\n bot.say(\"This is a response\")\n bot.say(\"This is another response\")\n\n self.assertEqual(\n [\"This is a response\", \"This is another response\"],\n bot.output\n )\n bot.clear()\n\n self.assertEqual([], bot.output)\n\n def testItHasASession(self):\n self.assertIsNotNone(self.bot.session_id)\n\n def testItHasAnApiAIObject(self):\n bot = self.bot\n\n self.assertIsNotNone(bot.ai)\n self.assertEqual(\"123\", bot.ai.client_access_token)\n\n def testAskFiresAnApiAIRequest(self):\n bot = self.bot\n mockApi = Mock()\n mockRequest = Mock()\n mockApi.text_request.return_value = mockRequest\n mockRequest.getresponse.return_value = io.StringIO(\"{}\")\n bot.ai = mockApi\n\n bot.ask(\"This is a test\")\n\n mockApi.text_request.assert_called()\n mockRequest.getresponse.assert_called()\n self.assertEqual(\"This is a test\", mockRequest.query)\n\n def testHandlesResponses(self):\n handler = Mock()\n handler.action = \"TestAction\"\n\n bot = self.bot\n\n bot.handlers[\"TestAction\"] = handler\n\n answer = Answer()\n answer.status = True\n answer.action = \"TestAction\"\n\n bot.handle(answer)\n \n handler.handle.assert_called_with(answer)\n","sub_path":"tests/bot/test_bot.py","file_name":"test_bot.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116335127","text":"#!/usr/bin/python3\n\n\"\"\"Runs data against the cl model.\n\"\"\"\n\n__author__ = \"Martin Eigenmann\"\n__license__ = \"unlicence\"\n__version__ = \"0.0.1\"\n__email__ = \"martin.eigenmann@unisg.ch\"\n__status__ = \"Prototpye\"\n\nimport pandas as pd\nimport sys\nimport torch\nimport numpy as np\n\nclass NN(torch.nn.Module):\n def __init__(self, input_dimension,number_of_target_classes):\n super().__init__()\n # stack layers\n self.module_list = torch.nn.ModuleList()\n ## stacks contain of linear + nonlinear layers\n self.module_list.append(torch.nn.Linear(input_dimension,20))\n self.module_list.append(torch.nn.Tanh())\n #\n self.module_list.append(torch.nn.Linear(20,20))\n self.module_list.append(torch.nn.Tanh())\n #\n self.module_list.append(torch.nn.Linear(20,10))\n self.module_list.append(torch.nn.Tanh())\n # map to output layer\n self.module_list.append(torch.nn.Linear(10,number_of_target_classes))\n \n def forward(self,x):\n for module in self.module_list:\n x = module(x)\n return x\n\n\nwith torch.no_grad():\n df = pd.read_csv(sys.stdin)\n\n device = torch.device('cpu')\n model2 = NN(17,2)\n model2.load_state_dict(torch.load('/processors/torch', map_location=device))\n model2.eval()\n data_to_predict = df.drop(['b_start', 'b_duration', 'e_start', 'e_duration', 'e_distance','study','subject'], axis=1).to_numpy()[0]\n prediction = model2(torch.from_numpy(data_to_predict).float())\n predicted_class = np.argmax(prediction)\n\n b_start = int(df.iloc[0].b_start)\n e_start = int(df.iloc[0].e_start)\n study = df.iloc[0].study\n subject = df.iloc[0].subject\n\n print(f'prediction_class,b_start,e_start,study,subject')\n print(f'{predicted_class},{b_start},{e_start},{study},{subject}')\n","sub_path":"nifi/processors/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263842425","text":"#coding=utf-8\nfrom django.conf.urls import patterns, include, url\nfrom ansibleweb import views\n\nurlpatterns = [\n #url(r'$', views.ansible,name='ansible'),\n url(r'^cmd/$', views.ansiblecmd,name='ansiblecmd'),\n url(r'^batch/$',views.ansiblebatch, name='ansiblebatch'),\n url(r'^detail/(?P\\d+)/$',views.ansibledetail, name='ansibledetail'),\n url(r'^retry/(?P\\d+)/$', views.ansibleretry, name='ansibleretry'),\n url(r'^rerun/(?P\\d+)/$', views.ansiblererun, name='ansiblererun'),\n url(r'^group/$',views.ansiblegroup,name='ansiblegroup'),\n url(r'^log/$', views.ansiblelog,name='ansiblelog'),\n url(r'^inspection/$', views.ansibleinspection,name='ansibleinspection'),\n #url(r'^key/$', views.ansiblekey,name='ansiblekey'),\n #url(r'^playbook/$', views.ansibleplaybook,name='ansibleplaybook'),\n]","sub_path":"ansibleweb/ansible_urls.py","file_name":"ansible_urls.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244835067","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport streamlit as st\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LinearRegression \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, mean_squared_log_error\n\n@st.cache()\ndef prediction(car_df, car_width, engine_size, horse_power, drive_wheel_fwd, car_comp_buick):\n\tX = car_df.iloc[:, :-1]\n\ty = car_df[\"price\"]\n\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42)\n\tlin_reg = LinearRegression()\n\tlin_reg.fit(X_train, y_train)\n\tscore = lin_reg.score(X_train, y_train)\n\tprice = lin_reg.predict([[car_width, engine_size, horse_power, drive_wheel_fwd, car_comp_buick]])\n\tprice = price[0]\n\ty_test_pred = lin_reg.predict(X_test)\n\ttest_r2_score = r2_score(y_test, y_test_pred)\n\ttest_mae = mean_absolute_error(y_test, y_test_pred)\n\ttest_msle = mean_squared_log_error(y_test, y_test_pred)\n\ttest_rmse = np.sqrt(mean_squared_error(y_test, y_test_pred))\n\treturn(price, score, test_r2_score, test_mae, test_msle, test_rmse)\ndef app(car_df):\n\tst.subheader(\"Select Values\")\n\tcar_width = st.slider(\"Car Width\", float(car_df[\"carwidth\"].min()), float(car_df[\"carwidth\"].max()))\n\tengine_size = st.slider(\"Engine Size\", float(car_df[\"enginesize\"].min()), float(car_df[\"enginesize\"].max()))\n\thorse_power = st.slider(\"Horse Power\", float(car_df[\"horsepower\"].min()), float(car_df[\"horsepower\"].max()))\n\td_fwd = st.radio(\"Is it a Forward Drive Wheel Car?\", (\"Yes\", \"No\"))\n\tif d_fwd == \"No\":\n\t\td_fwd = 0\n\telse:\n\t\td_fwd = 1\n\tcom_bui = st.radio(\"Is the car manufactured by Buick\", (\"Yes\", \"No\"))\n\tif com_bui == \"No\" :\n\t\tcom_bui = 0\n\telse:\n\t\tcom_bui = 1\n\tif st.button(\"Predict\"):\n\t\tst.subheader(\"Prediction Results\")\n\t\tprice, score, car_r2, car_mae, car_msle, car_rmse = prediction(car_df, car_width, engine_size, horse_power, d_fwd, com_bui)\n\t\tst.success(f\"The predicted price of the car: {int(price)}\")\n\t\tst.info(f\"Accuracy score of the model: {score}\")\n\t\tst.info(f\"R2 Score: {car_r2}\")\n\t\tst.info(f\"Mean Absolute Error: {car_mae}\")\n\t\tst.info(f\"Root Mean Squared Error: {car_rmse}\")\n\t\tst.info(f\"Mean Squared Log Error: {car_msle}\")","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88422758","text":"\"\"\"Contains all events of a given day.\"\"\"\nclass EventList:\n\n errors = [] # Contains errors that occur during scrape\n\n def __init__(self, day, events):\n \"\"\"Construct an event list object.\n\n Args:\n day (str): The calendar date of the events\n events (list): A list of events\n\n Returns:\n EventList: An event list object.\n \"\"\"\n self.day = day\n self.events = events\n\n def to_json(self):\n \"\"\"Returns a JSON representation of the object.\n\n Returns: dict: A JSON representation of the object.\n \"\"\"\n j = {}\n j['day'] = self.day\n j['events'] = [e.to_json() for e in self.events]\n j['errors'] = self.errors\n return j","sub_path":"python/modules/EventList.py","file_name":"EventList.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"182767292","text":"import boto3\n\ndynamo = boto3.resource('dynamodb',\n endpoint_url='http://localhost:8000',\n region_name='dummy',\n aws_access_key_id='dummy',\n aws_secret_access_key='dummy')\n\nt = dynamo.Table('CS411')\nt.delete()\n","sub_path":"tools/drop_local_table.py","file_name":"drop_local_table.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198758304","text":"# from . import cv2_transforms as transforms\n# import torchvision.transforms as transforms\n\n\ndef build_transforms(cfg_transforms):\n cfg_transforms = cfg_transforms.copy()\n transforms_list = list()\n for item in cfg_transforms:\n transforms_type = item.pop(\"type\")\n\n if transforms_type == 'transforms_type':\n if item['backend'] == 'cv2':\n from . import cv2_transforms as transforms\n else:\n # import torchvision.transforms as transforms\n from . import transforms as transforms\n else:\n transforms_params = item\n # for debug\n # print(transforms_params)\n if hasattr(transforms, transforms_type):\n # for debug\n # print('transforms: ', transforms)\n # print(getattr(transforms, transforms_type))\n transforms_list.append(getattr(transforms, transforms_type)(**transforms_params))\n else:\n raise ValueError(\"\\'type\\'{} of transforms is not defined!!!\".format(transforms_type))\n\n # print(transforms_list)\n return transforms.Compose(transforms_list)\n","sub_path":"classification/data/transforms/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220845901","text":"import pymysql\nimport logging\nimport datetime\nimport xmltodict\n\n\nfile = 'purchaseNotice_Brianskaya_obl_20160801_000000_20160831_235959_001.xml'\n\nfile_log = './log_tenders223/tenders223_ftp_' + str(datetime.date.today()) + '.log'\nlogging.basicConfig(level=logging.DEBUG, filename=file_log,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n\ndef generator_univ(c):\n if type(c) == list:\n for i in c:\n yield i\n else:\n yield c\n\n\nclass Tender223:\n\n def __init__(self, tender223):\n self.tender223 = None\n tender_body = None\n tender_purchase = None\n tender_root = list(tender223.keys())\n tender_child = tender223[tender_root[0]]\n tender_child_2_list = list(tender_child.keys())\n for i in tender_child_2_list:\n if i.startswith('body') or i.startswith('ns2:body') or i.startswith('oos:body'):\n tender_body = tender_child[i]\n break\n if tender_body is not None:\n tender_item_list = list(tender_body.keys())\n for j in tender_item_list:\n if j.startswith('item') or j.startswith('ns2:item') or j.startswith('oos:item'):\n tender_purchase = tender_body[j]\n break\n if tender_purchase is not None:\n tender_purchase_list = list(tender_purchase.keys())\n for m in tender_purchase_list:\n if m.startswith('purchase') or m.startswith('ns2:purchase') or m.startswith('oos:purchase'):\n self.tender223 = tender_purchase[m]\n break\n\n @property\n def id_t(self):\n id_ten = 0\n if 'ns2:guid' in self.tender223:\n id_ten = self.tender223['ns2:guid']\n elif 'oos:guid' in self.tender223:\n id_ten = self.tender223['oos:guid']\n elif 'guid' in self.tender223:\n id_ten = self.tender223['guid']\n if id_ten is None:\n id_ten = 0\n return id_ten\n\n @property\n def purchaseNumber(self):\n purchaseNumber = ''\n if 'ns2:registrationNumber' in self.tender223:\n purchaseNumber = self.tender223['ns2:registrationNumber']\n elif 'oos:registrationNumber' in self.tender223:\n purchaseNumber = self.tender223['oos:registrationNumber']\n elif 'registrationNumber' in self.tender223:\n purchaseNumber = self.tender223['registrationNumber']\n if purchaseNumber is None:\n purchaseNumber = ''\n return purchaseNumber\n\n @property\n def docPublishDate(self):\n docPublishDate = ''\n if 'ns2:publicationDateTime' in self.tender223:\n docPublishDate = self.tender223['ns2:publicationDateTime']\n elif 'oos:publicationDateTime' in self.tender223:\n docPublishDate = self.tender223['oos:registrationNumber']\n elif 'publicationDateTime' in self.tender223:\n docPublishDate = self.tender223['publicationDateTime']\n if docPublishDate is None:\n docPublishDate = ''\n return docPublishDate\n\n def organizer_mainInfo(self, d):\n try:\n organizer_mainInfo = self.tender223['ns2:placer']['mainInfo'][d]\n except Exception:\n try:\n organizer_mainInfo = self.tender223['oos:placer']['mainInfo'][d]\n except Exception:\n try:\n organizer_mainInfo = self.tender223['placer']['mainInfo'][d]\n except Exception:\n organizer_mainInfo = ''\n\n if organizer_mainInfo is None:\n organizer_mainInfo = ''\n return organizer_mainInfo\n\n @property\n def placingWay_code(self):\n placingWay_code = ''\n if 'ns2:purchaseMethodCode' in self.tender223:\n placingWay_code = self.tender223['ns2:purchaseMethodCode']\n elif 'oos:purchaseMethodCode' in self.tender223:\n placingWay_code = self.tender223['oos:purchaseMethodCode']\n elif 'purchaseMethodCode' in self.tender223:\n placingWay_code = self.tender223['purchaseMethodCode']\n if placingWay_code is None:\n placingWay_code = ''\n return placingWay_code\n\n def electronicPlaceInfo(self, d):\n try:\n electronicPlaceInfo = self.tender223['ns2:electronicPlaceInfo'][d]\n except Exception:\n try:\n electronicPlaceInfo = self.tender223['oos:electronicPlaceInfo'][d]\n except Exception:\n try:\n electronicPlaceInfo = self.tender223['electronicPlaceInfo'][d]\n except Exception:\n electronicPlaceInfo = ''\n\n if electronicPlaceInfo is None:\n electronicPlaceInfo = ''\n return electronicPlaceInfo\n\n def get_attachments(self):\n if 'ns2:attachments' in self.tender223:\n if self.tender223['ns2:attachments'] is None:\n return []\n elif 'document' in self.tender223['ns2:attachments']:\n return generator_univ(self.tender223['ns2:attachments']['document'])\n else:\n return []\n elif 'oos:attachments' in self.tender223:\n if self.tender223['oos:attachments'] is None:\n return []\n elif 'document' in self.tender223['oos:attachments']:\n return generator_univ(self.tender223['oos:attachments']['document'])\n else:\n return []\n elif 'attachments' in self.tender223:\n if self.tender223['attachments'] is None:\n return []\n elif 'document' in self.tender223['attachments']:\n return generator_univ(self.tender223['attachments']['document'])\n else:\n return []\n else:\n return []\n\n def attach(self, attachment, d):\n try:\n ret_attach = attachment[d]\n except Exception:\n ret_attach = ''\n if ret_attach is None:\n ret_attach = ''\n return ret_attach\n\n def customer(self, d):\n try:\n customer_ret = self.tender223['ns2:customer']['mainInfo'][d]\n except Exception:\n try:\n customer_ret = self.tender223['oos:customer']['mainInfo'][d]\n except Exception:\n try:\n customer_ret = self.tender223['customer']['mainInfo'][d]\n except Exception:\n customer_ret = ''\n\n if customer_ret is None:\n customer_ret = ''\n return customer_ret\n\n def contact(self, d):\n try:\n contact_ret = self.tender223['ns2:contact'][d]\n except Exception:\n try:\n contact_ret = self.tender223['oos:contact'][d]\n except Exception:\n try:\n contact_ret = self.tender223['contact'][d]\n except Exception:\n contact_ret = ''\n\n if contact_ret is None:\n contact_ret = ''\n return contact_ret\n\n def get_lots(self):\n if 'ns2:lots' in self.tender223:\n if self.tender223['ns2:lots'] is None:\n return []\n elif 'lot' in self.tender223['ns2:lots']:\n return generator_univ(self.tender223['ns2:lots']['lot'])\n else:\n return []\n elif 'ns2:lot' in self.tender223:\n if self.tender223['ns2:lot'] is None:\n return []\n else:\n return generator_univ(self.tender223['ns2:lot'])\n elif 'oos:lots' in self.tender223:\n if self.tender223['oos:lots'] is None:\n return []\n elif 'oos:lot' in self.tender223['oos:lots']:\n return generator_univ(self.tender223['oos:lots']['oos:lot'])\n else:\n return []\n elif 'oos:lot' in self.tender223:\n if self.tender223['oos:lot'] is None:\n return []\n else:\n return generator_univ(self.tender223['oos:lot'])\n elif 'lots' in self.tender223:\n if self.tender223['lots'] is None:\n return []\n elif 'lot' in self.tender223['lots']:\n return generator_univ(self.tender223['lots']['lot'])\n else:\n return []\n elif 'lot' in self.tender223:\n if self.tender223['lot'] is None:\n return []\n else:\n return generator_univ(self.tender223['lot'])\n else:\n return []\n\n def get_lotitems(self, lot):\n if 'lotData' in lot:\n if 'lotItems' in lot['lotData']:\n if lot['lotData']['lotItems'] is None:\n return []\n elif 'lotItem' in lot['lotData']['lotItems']:\n return generator_univ(lot['lotData']['lotItems']['lotItem'])\n else:\n return []\n else:\n return []\n else:\n return []\n\n def lot_max_price(self, lot):\n try:\n lot_max_price = lot['lotData']['initialSum']\n except Exception:\n lot_max_price = ''\n if lot_max_price is None:\n lot_max_price = ''\n return lot_max_price\n\n def okpd2_code(self, item):\n try:\n okpd2_code = item['okpd2']['code']\n except Exception:\n okpd2_code = ''\n if okpd2_code is None:\n okpd2_code = ''\n return okpd2_code\n\n\ndef parser_type_223(doc, path_xml, filexml, reg, reg_id):\n tender = Tender223(doc)\n id_t = tender.id_t\n lots = tender.get_lots()\n for lot in lots:\n lot_max_price = tender.lot_max_price(lot)\n print(lot_max_price)\n lotitems = tender.get_lotitems(lot)\n for lotitem in lotitems:\n okpd2_code = tender.okpd2_code(lotitem)\n print(okpd2_code)\n\n\n\ndef parser(doc, path_xml, filexml, reg, reg_id):\n global file_log\n try:\n parser_type_223(doc, path_xml, filexml, reg, reg_id)\n except Exception:\n logging.exception(\"Ошибка при парсинге тендера типа 223: \")\n with open(file_log, 'a') as flog:\n flog.write('Ошибка при парсинге тендера типа 223 ' + ' ' + path_xml + ' ' + '\\n\\n\\n')\n\nwith open(file) as f:\n doc = xmltodict.parse(f.read())\nparser(doc, file, file, 32, 32)","sub_path":"test223.py","file_name":"test223.py","file_ext":"py","file_size_in_byte":10463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"631359078","text":"from flask import Flask, redirect, render_template, request, jsonify\n\napp = Flask(__name__)\n\napp.config['DEBUG'] = True\n\n@app.route(\"/\")\ndef hello():\n return \"Hello World\"\n\n@app.route(\"/greeting\")\ndef greeting():\n return render_template('index.html', name='Akilah')\n\n@app.route(\"/pie\")\ndef pie():\n ingredients=['apples', 'butter', 'sugar', 'flour']\n return jsonify({'pie ingredient': ingredients[0]})\n\ningredients=['apples', 'butter', 'sugar', 'flour', 'baking soda']\n@app.route(\"/recipe\", methods=['GET','POST'])\ndef recipe():\n if request.method == 'POST':\n new_ingredient = request.form[\"ingredient\"]\n ingredients.append(new_ingredient)\n return redirect ('/recipe')\n else:\n return render_template('pie.html', name=\"apple pie\", ingredients=ingredients)\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"295183505","text":"#!/usr/bin/env python3\nimport pathlib\nimport sys\n\nimport setuptools\nfrom setuptools.command.install import install\nimport os\nfrom genos.version import VERSION\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n description = 'verify that the git tag matches our version'\n\n def run(self):\n tag = os.getenv('CIRCLE_TAG')\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}. \" \\\n \"Update version in src/genos/version.py\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name='genos',\n version=VERSION,\n description='Instantiate objects and call functions using dictionary configs in Python using Genos.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/Neural-Space/genos',\n author='Kushal Jain',\n author_email='kushal@neuralspace.ai',\n keywords='instantiation, objects, recursive instantiation, functio call, config instantiate',\n license='MIT',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Software Development :: Build Tools\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Internet\",\n \"Programming Language :: Python :: 3.7\",\n ],\n install_requires=[\n \"omegaconf~=2.0.4\"\n ],\n python_requires='>=3.7, <4',\n package_dir={'': 'src'},\n packages=setuptools.find_packages(where='src'),\n cmdclass={\n 'verify': VerifyVersionCommand,\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"42468811","text":"#-*_coding:utf8-*-\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom pandas import ExcelWriter\r\nimport pandas as pd\r\nimport requests\r\nimport time\r\nimport re\r\n\r\nptt='https://www.ptt.cc'\r\nurlget=[]\r\npo=[]\r\nhot_name=[]\r\n#取得熱門子版url\r\nhot_url='https://www.ptt.cc/bbs/hotboards.html'\r\nres=requests.get(hot_url)\r\n\r\nhot_soup=BeautifulSoup(res.text,'lxml')\r\n#熱門子版總url+版名\r\nfind_hot_url=hot_soup.find_all('div',class_='b-ent')\r\nfor show_hot_url in find_hot_url:\r\n\tlist_hot_url=show_hot_url.find('a')['href']\r\n\turlget.append(list_hot_url)\r\n\tlist_hot_name=show_hot_url.find('div',class_='board-name').text\r\n\thot_name.append(list_hot_name)\r\nprint('取得熱門子版url')\r\n#熱門子版PO文總數\r\nfind_hot_push=hot_soup.find_all('div','board-nuser')\r\nfor show_hot_push in find_hot_push:\r\n\tlist_hot_push=show_hot_push.find('span').text\r\n\tpo.append(list_hot_push)\r\nfor x in urlget:\r\n\turl2=(ptt+x)\t\r\n\r\n#寫入\r\nurl = 'https://www.ptt.cc/bbs/Gossiping/index38933.html' \r\n\r\n#response = requests.get(url)\r\n#滿18進入頁面\r\n#response = requests.get(url, cookies={'over18': '1'}) \r\n#content = response.content\r\n\r\n#翻頁/這和一般取得連結方式不同\r\n#next=[]\r\n#next_page=soup.find(\"div\",class_=\"btn-group btn-group-paging\")\r\n#next2=next_page.find_all(\"a\",href=True)\r\n#for p in next2:\r\n\t#u=p['href']\r\n\t#next.append(u) \r\n#next_url=\"next :\"+ptt+next[1]\r\n\r\n#collect info 空集合\r\ntheme=[]\r\ndate=[]\r\nauthor_id=[]\r\nrecommend=[]\r\nhttp=[]\r\nabout_author=[]\r\n#---\r\n\r\narticle_date=[]\r\narticle_author=[]\r\narticle=[]\r\narticle_ip=[]\r\narticle_msg=[]\r\nartilce_old=[]\r\narticle_old_author=[]\r\nmsg_date=[]\r\nmsg_id=[]\r\nmsg_rec=[]\r\n\r\n\r\n#單筆數據測試\r\n#data_all{'title':entry.find}\r\n#a=soup.find(\"div\",class_=\"r-ent\")\r\n#print(a.find(\"div\",class_=\"title\").text)#標題\r\n#print(ptt+a.find(\"a\")[\"href\"])#網址\r\n#print(a.find(\"div\",class_=\"author\").text)#作者\r\n#print(a.find(\"span\"))#推文數!=0才能+text\r\n#print(a.find(\"div\",class_=\"date\").text)\r\n#b=a.find(\"div\",class_=\"item\")\r\n#print(ptt+b.find(\"a\")[\"href\"])#該作者其他文章\r\n\r\n#定義////刪除\r\ndel_word=\"刪除\"\r\ntargetIP = u'※ 發信站: 批踢踢實業坊'\r\n#隱藏\r\noption = webdriver.ChromeOptions()\r\noption.add_argument('headless')\r\n#自動化網頁設定\r\ndriver=webdriver.Chrome( )\r\ndriver.get(url)\r\ndriver.find_element_by_name('yes').click()\r\nprint(\"進入網址: \"+url)\r\nsoup = BeautifulSoup(driver.page_source,'lxml')\r\nprint(\"瀏覽版名: \"+soup.title.text)#該版名\r\n\r\n\r\n\r\n#find data迴圈\r\ncount=1;\r\npage=0;\r\nwhile page<1: #指定筆數\r\n\t \r\n\tinfo_all =soup.find_all(\"div\", class_=\"r-ent\")\r\n\tprint(\"第\"+str(count)+\"筆\")\r\n\tcount=count+1\r\n\t \r\n\t#作者相關文章設定空值則不顯示(測試)\r\n\tinfo_about=soup.find_all(\"div\",class_=\"item\")\r\n\tfor infos3 in info_about:\r\n\t\tabout=ptt+infos3.find(\"a\")[\"href\"]\r\n\t\tif about is None:\r\n\t\t\tabout=\"can't find\"\r\n\t\t\tabout_author.append(about)\r\n\t\telse:\r\n\t\t\tabout_author.append(about)\r\n\t#推文數\r\n\tfor infos2 in info_all:\r\n\t\trec=infos2.find(\"span\")\r\n\t\tif rec is None:\r\n\t\t\trec=\"0\"\r\n\t\t\trecommend.append(rec)\r\n\t\telse:\r\n\t\t\trecommend.append(rec.text)\r\n\t\t\t\r\n\tfor infos in info_all:\r\n\t title1=infos.find(\"div\",class_=\"title\").text \r\n\t #theme.append(title1)\r\n\t author1=infos.find(\"div\", class_=\"author\").text\r\n\t author2=author1\r\n\t author_id.append(author2)\r\n\t date1=infos.find(\"div\",class_=\"date\").text\r\n\t date2=date1\r\n\t date.append(date2) \r\n\t#被刪除文章 文章和網址顯示del \r\n\t if (del_word) in title1:\r\n\t \tweb=\"[None]文章被刪除\"\r\n\t \thttp_none=\"[None]網址被移除\"\r\n\t \ttheme.append(web)\r\n\t \thttp.append(http_none)\r\n\t \r\n\t else:\r\n\t \ttheme.append(title1)\r\n\t \tweb2=infos.find(\"a\")[\"href\"]\r\n\t \thttp.append(ptt+web2)\r\n\t#取內文\r\n\t\r\n\r\n\t\t#print()\r\n\t\r\n\t#翻頁x path位置定義 易出錯\r\n\tdriver.find_element_by_xpath('//*[@id=\"action-bar-container\"]/div/div[2]/a[2]').click()\r\n\t#driver.find_element_by_link_text('上頁').click()\r\n\tpage=page+1\r\n\ttime.sleep(0.1)\r\n\t\r\n\r\nprint(\"結束,已讀取所有頁面,等待讀取相關內文\")\r\ndriver.close()\r\nfor http_web in http:\r\n\tif http_web==\"[None]網址被移除\":\r\n\t\tarticle_date.append(\"-\")\r\n\t\tarticle_author.append(\"-\")\r\n\t\tarticle.append(\"-\")\r\n\t\tarticle_ip.append(\"-\")\r\n\t\tarticle_msg.append(\"-\")\r\n\t\tartilce_old.append(\"-\")\r\n\t\tarticle_old_author.append(\"-\")\r\n\t\tmsg_date.append(\"-\")\r\n\t\tmsg_id.append(\"-\")\r\n\t\tmsg_rec.append(\"-\")\r\n\telse:\r\n\t\thttp_get=requests.get(http_web,cookies={'over18': '1'})\r\n\t\tsoup_http=BeautifulSoup(http_get.text,'lxml')\r\n\t\tfind_push_content=soup_http.find_all('div',class_='push')\r\n\t\tptt_content=soup_http.find_all('meta', property=\"og:description\")\r\n\t\tip = soup_http.find(string=re.compile(targetIP))\r\n\t\tip = re.search(r\"[0-9]*\\.[0-9]*\\.[0-9]*\\.[0-9]*\", ip).group()\r\n\t\tarticle_ip.append(ip)\r\n\tfor p_content in find_push_content:\r\n\t\tpush_content=p_content.find('span',class_='f3 push-content').text\r\n\t\tarticle_msg.append(push_content)\r\n\t\t\r\n\tfor ptt_content2 in ptt_content:\r\n\t\tptt_content3=ptt_content2.get('content')\r\n\t\tarticle.append(ptt_content3)\r\n\t\r\n\t\t\r\n \r\n\r\nprint(\"等待輸出csv\")\t\t\r\n\r\n\r\n#輸出\r\n\r\n#for a in theme:\r\n\t#print(a)\r\n#for b in http:\r\n\t#print(b)\r\n#for c in author_id:\r\n\t#print(c)\r\n#for d in date:\r\n\t#print(d)\r\n#for e in about_author:\r\n\t#print(e)\r\n#for f in recommend:\r\n\t#print(f)\t \r\n#---\r\n#for g in article:\r\n\t#print(g)\r\n#for h in article_msg:\r\n\t#print(h)\r\n#for i in article_ip:\r\n\t#print(i)\t \r\n#dict={\"推薦\":recommend,\"主題\":theme,\"作者\":author_id,\t\"日期\":date,\t\"網址\":http,\"發文ip\":article_ip}\t\r\n#print(len(recommend), len(theme), len(author_id),len(date),len(http),len(about_author),len(article))\r\n#df=pd.DataFrame(dict)\r\n#writer=pd.ExcelWriter('pttdata.xlsx')\r\n#df.to_excel(writer,index=False,encoding='utf8')\r\n#writer.save()\r\n#print(\"已寫入csv檔\")\r\n\r\n\r\n","sub_path":"python爬蟲/ptt_data.py","file_name":"ptt_data.py","file_ext":"py","file_size_in_byte":5771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"303001866","text":"import string\n#read in and set up \nread('results/*contree')\ninTrees = var.trees\n\nvar.trees = []\nMRP=func.readAndPop('supertrees/mrpStrictConsTree.nex')\nMajR=func.readAndPop('supertrees/mrpMajRuleConsTree.nex')\nSR2008=func.readAndPop('SR2008_cons.nex')\nSR2008.name='SR2008'\n#SPA=func.readAndPop('SPA_f.nex')\n#SPA.name='SPA'\n#SPA_support=func.readAndPop('SPA_supp_f.nex')\n#SPA_support.name='SPA_support'\n#MRP.name='MRP'\nMajR.name='MajR'\na=func.readAndPop('supertrees/mrpStrictConsTree.nex')\n#SPA.taxNames=a.taxNames\nMRP.taxNames=a.taxNames\n#inTrees.taxNames = list(string.uppercase[:10])\n#MajR.taxNames=a.taxNames\n\n\n#trees=[total_evidence, MRP,MajR, SR2008,SPA, SPA_support]\ntrees=[MRP, MajR]\ntaxNames=list(string.uppercase[:10])\n\n\n\n#tt=Trees(trees=trees, taxNames=MRP.taxNames)\ntt=Trees(trees=trees, taxNames=taxNames)\ntt.inputTreesToSuperTreeDistances(inTrees)\n\n#dm = tt.topologyDistanceMatrix('scqdist')\n#dm.writeNexus()\n#dm.writeNexus('qd_distances.nex')\n#paup execute rf_dist nj save\n\n\n\n\n\n","sub_path":"SIMULATED_DATASET/100_B_inputdistances.py","file_name":"100_B_inputdistances.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316503227","text":"class ListNode(object):\n\n def __init__(self, val):\n self.val = val\n self.next = None\n\n\nclass LinkedList(object):\n\n def __init__(self, first, *k):\n self.head = ListNode(first)\n temp = self.head\n for i in k:\n temp.next = ListNode(i)\n temp = temp.next\n\n def reverse(self):\n prev = None\n while self.head:\n temp = self.head.next\n self.head.next = prev\n prev = self.head\n self.head = temp\n self.head = prev\n return repr(self)\n\n def __repr__(self):\n s = '{}'.format(self.head.val)\n temp = self.head.next\n while temp:\n s += ','\n s += str(temp.val)\n temp = temp.next\n return \"LinkedList({})\".format(s)\n\n\"\"\"\n由于单向链表每个节点只有一个后继指针,因此有些操作十分复杂:\n例如反向遍历链表,获得链表中一个元素前面的元素。这些操作单链表都需要多次遍历,十分的浪费资源。\n因此我们引入了双向链表这一概念。\n双向链表较之单向链表最大的区别就是一个节点不仅仅拥有后继指针指向它的后一个节点,\n而且还拥有一个前驱指针指向它前一个节点。因此,对于双向链表来说。\n反向遍历链表和访问链表中某个元素前一个元素都可以通过它的前驱指针较为简单的完成。\n\n\"\"\"\nclass DoubleLinked():\n\n def __init__(self, val):\n self.val = val\n self.pre = self.next = None\n\n\nclass DoubleList():\n\n def __init__(self, first, *k):\n self.head = DoubleLinked(first)\n temp = self.head\n for i in k:\n item = DoubleLinked(i)\n item.pre = temp\n temp.next = item\n temp = temp.next\n self.tail = temp\n\n def reverse(self):\n pre = None\n while self.head:\n # 1.遍历每一个��点 2.pre和next互唤。\n temp = self.head\n self.head = self.head.next\n temp.next = temp.pre\n temp.pre = self.head\n self.head = temp\n\n def __repr__(self):\n s = 'head:{}'.format(self.head.val)\n temp = self.head.next\n while temp:\n s += ','\n s += str(temp.val)\n temp = temp.next\n return s","sub_path":"BasicDataStructure/LinkedList/pyLinked.py","file_name":"pyLinked.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228419989","text":"\nfrom flask_sqlalchemy import SQLAlchemy\nfrom app import db\nfrom app.auditorium.models import Auditorium\nfrom app.movie.models import Movie\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import time\nfrom app.movie.models import Movie\n\n\nclass Screening (db.Model):\n __tablename__ = \"screening\"\n id = db.Column('id', db.Integer, primary_key = True)\n movie_id = db.Column('movie_id', db.Integer, db.ForeignKey('movie.id'))\n auditorium_id = db.Column('auditorium_id', db.Integer, db.ForeignKey('auditorium.id'))\n screening_start_time = db.Column('screening_start_time', db.Time)\n screening_date = db.Column('screening_date',db.Date)\n\n db.relationship('Auditorium', foreign_keys='auditorium_id')\n db.relationship('Movie', foreign_keys='movie_id')\n\n def __init__(self,movie_id,auditorium_id,screening_start_time,screening_date):\n self.movie_id = movie_id\n self.auditorium_id = auditorium_id\n self.screening_start_time = screening_start_time\n self.screening_date = screening_date\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'movie_id': self.movie_id,\n 'auditorium_id': self.auditorium_id,\n 'screening_start_time': str(self.screening_start_time),\n 'screening_date': str(self.screening_date)\n }\n\n def to_dict_dates(self):\n return {\n 'date' : str(self.screening_date)\n }\n\n def to_dict_shows(self):\n return {\n 'screening_start_time' : str(self.screening_start_time)\n }\n def __repr__(self):\n return \"'Screening' { 'movie_id': %r , 'auditorium_id': %r, 'screening_start_time':%r , 'screening_date': %r}\"%(self.movie_id,self.auditorium_id,str(self.screening_start_time),str(self.screening_date))\n","sub_path":"src/app/screening/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641388496","text":"import numpy as np\nimport tensorflow as tf\nimport time\nfrom matplotlib.pyplot import imshow\nimport matplotlib.pyplot as plt\nimport tensorflow.contrib.keras as keras\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import mean_squared_error\nfrom tf_models import KERAS\nimport preprocessing\nimport scoring\nimport output\nfrom sklearn.model_selection import train_test_split\nimport math\n\n############################################ Configuration ###################################################\n# Decide whether self-evaluation or final submission\nfinal_submission = True\nTrain_split = 8./10\n\n## General\nBLoadData = 1\nBFinalPrediction = 1\n\n# Hyperparameters\n# epochs = 100\n# param = 30\n# layers = 2\n# batch_size = 32\n\nepochs = 90\nparam = 42\nlayers = 3\nbatch_size = 32\n\n# Gridsearch\nBGridSearch = 1\nepoch_list = [75, 90, 100, 120]\nparam_list = [24, 30, 36, 42]\nlayer_list = [1, 2, 3, 4]\nbatch_size_list = [16, 32, 64, 128]\n\n## Postprocessing\n## Score\nBRMSEScore = 0\nBAccuracy = 1\n##############################################################################################################\n\n## Load Data\nif BLoadData == 1:\n dataloader = preprocessing.loadfiles('C:\\\\Users\\\\fabri\\\\git\\\\Intro-to-ML\\\\Task2\\\\Raw_Data') # Define datafolder - HomePC\n # dataloader = preprocessing.loadfiles('C:\\\\Users\\\\fabri\\\\git\\\\Intro-to-ML\\\\Task0\\\\Raw_Data') # Surface\n X_train = dataloader.loadX_train()\n y_train = dataloader.loady_train()\n X_test = dataloader.loadX_test()\n print('X_train shape: ', X_train.shape)\n print('y_train shape: ', y_train.shape)\n \n\n## Train / Test Split \nif BFinalPrediction == 0:\n X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.33, random_state=32) \n\nif BGridSearch == 0 or BFinalPrediction == 1: \n ## Create one hot format\n y_train_onehot = keras.utils.to_categorical(y_train)\n print('First 3 labels: ', y_train[:3])\n print('First 3 onehot labels:\\n', y_train_onehot[:3])\n\n # build model:\n model = KERAS.build(X_train, y_train_onehot, param, layers)\n # train model:\n trained_model, losses = KERAS.fit(model, X_train, y_train_onehot, epochs, batch_size)\n # predict labels:\n y_pred = KERAS.predict(trained_model, X_test)\n\n ## Score\n if BAccuracy == 1 and BFinalPrediction == 0:\n scorer = scoring.score()\n score = scorer.Accuracy(y_test, y_pred)\n print('Accuracy score is = ', repr(score))\n\nif BGridSearch == 1 and BFinalPrediction == 0: \n iters = len(epoch_list)*len(param_list)*len(layer_list)*len(batch_size_list)\n i = 0\n score_best = 0\n while i < iters:\n epochs = epoch_list[i%len(epoch_list)]\n param = param_list[math.floor((i/len(param_list))%len(epoch_list))]\n layers = layer_list[math.floor((i/(len(param_list)*len(layer_list)))%len(epoch_list))]\n batch_size = batch_size_list[math.floor((i/(len(param_list)*len(layer_list)*len(batch_size_list)))%len(epoch_list))]\n print('epoch = ', repr(epochs), '| param = ', repr(param), '| layers = ', repr(layers), '| batch_size = ', repr(batch_size))\n ## Create one hot format\n y_train_onehot = keras.utils.to_categorical(y_train)\n print('First 3 labels: ', y_train[:3])\n print('First 3 onehot labels:\\n', y_train_onehot[:3])\n\n # build model:\n model = KERAS.build(X_train, y_train_onehot, param, layers)\n # train model:\n trained_model, losses = KERAS.fit(model, X_train, y_train_onehot, epochs, batch_size)\n # predict labels:\n y_pred = KERAS.predict(trained_model, X_test)\n\n ## Score\n if BAccuracy == 1 and BFinalPrediction == 0:\n scorer = scoring.score()\n score = scorer.Accuracy(y_test, y_pred)\n print('Accuracy score is = ', repr(score))\n \n if score > score_best:\n epoch_best = epochs\n param_best = param\n layers_best = layers\n batch_size_best = batch_size\n score_best = score\n \n i += 1\n print('epoch = ', repr(epoch_best), '| param = ', repr(param_best), '| layers = ', repr(layers_best), '| batch_size = ', repr(batch_size_best), '| score = ', repr(score_best))\n\n## Output Generation\nif BFinalPrediction == 1:\n datasaver = output.savetask2('C:\\\\Users\\\\fabri\\\\git\\\\Output', 'C:\\\\Users\\\\fabri\\\\git\\\\Intro-to-ML\\\\Task2\\\\Raw_Data') # Savepath, Datapath\n datasaver.saveprediction(y_pred)","sub_path":"Task2/CodeFabrice/gridsearch.py","file_name":"gridsearch.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"185502568","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\nimport time\n\n# pdftk Whetten1989.pdf output uncompressed.pdf uncompress\n# LANG=C sed -n '/^\\/Annots/!p' uncompressed.pdf > stripped.pdf\n# pdftk stripped.pdf output final.pdf compress\n\nos.chdir('remove_comments')\n\npdfs = [line for line in subprocess.check_output(\n \"find -name '*.pdf'\", shell=True).splitlines()]\n\nif os.path.isfile('uncompressed.pdf') or os.path.isfile('stripped.pdf'):\n input('remove uncompressed.pdf or stripped.pdf')\n\nfor pdf in pdfs:\n pdf = pdf.decode('UTF-8').replace('./', '')\n os.rename(pdf, '_' + pdf)\n command = \"pdftk \" + '_' + pdf + \" output uncompressed.pdf uncompress && LANG=C \" + \\\n \"sed -n '/^\\/Annots/!p' uncompressed.pdf > stripped.pdf && pdftk stripped.pdf output \" \\\n + pdf + \" compress\"\n # print(command)\n subprocess.run(command, shell=True, check=True)\n time.sleep(2)\n os.remove('uncompressed.pdf')\n os.remove('stripped.pdf')\n os.remove('_' + pdf)\n","sub_path":"remove-pdf-comments.py","file_name":"remove-pdf-comments.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502912634","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv(r\"C:\\Users\\User\\PycharmProjects\\ML_Project\\data\\Pre-processed_Sarcasm_Headlines_Dataset.csv\")\n\nheadlines = df['headline']\ny = df['is_sarcastic']\n\nheadlines_train, headlines_test, y_train, y_test = train_test_split(headlines, y, test_size=0.10, random_state=1000)\n\n# Create dictionary of words from training set\nvectorizer = CountVectorizer()\nvectorizer.fit(headlines_train)\n\nX_train = vectorizer.transform(headlines_train)\nX_test = vectorizer.transform(headlines_test)\n\n# Note: class1 is not sarcastic and class2 is sarcastic\n# y_train_array = y_train\ny_train_class1_inds = np.where(y_train==0) # Indices of all non sarcastic elements\ny_train_class2_inds = np.where(y_train==1) # Indices of all sarcastic elements\nX_train_class1 = X_train[y_train_class1_inds]\nX_train_class2 = X_train[y_train_class2_inds]\n\n# X_train_np = X_train.toarray()\n# get shape of matrix - no of rows = no of elements\nclass1_shapeR, class1_shapeC = X_train_class1.shape\nclass2_shapeR, class2_shapeC = X_train_class2.shape\n\nprob_class1 = class1_shapeR/(class1_shapeR+class2_shapeR)\nprob_class2 = class2_shapeR/(class1_shapeR+class2_shapeR)\n\n# Less efficient but easier to code... Might change later\nX_train_class1_arr = X_train_class1.toarray()\nX_train_class2_arr = X_train_class2.toarray()\n\n\n","sub_path":"Naive_Bayes/Naive_Bayes.py","file_name":"Naive_Bayes.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"139400829","text":"import pytest\nimport ujson\nfrom company_management.models import Company\nfrom . models import MLData\nfrom rest_framework.test import APIClient\nfrom contextlib import contextmanager\nimport os\nimport csv\n\n\n@pytest.fixture\ndef api():\n return APIClient()\n\n\n@contextmanager\ndef generate_csv():\n with open('temp.csv', 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, dialect='excel')\n spamwriter.writerow(['cust_ID', 'owner_first_name'])\n spamwriter.writerow(['1', 'qw'])\n spamwriter.writerow(['2', 'qw1'])\n spamwriter.writerow(['3', 'qw2'])\n try:\n yield\n finally:\n os.remove('temp.csv')\n\n\n@contextmanager\ndef generate_csv_2():\n with open('temp1.csv', 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, dialect='excel')\n spamwriter.writerow(['cust_ID', 'owner_first_name'])\n spamwriter.writerow(['1', 'qe'])\n spamwriter.writerow(['2', 'qe1'])\n spamwriter.writerow(['3', 'qe2'])\n try:\n yield\n finally:\n os.remove('temp1.csv')\n\n\ndef create_ml_data(api):\n com_instance = Company.objects.create(name=\"test1\")\n with generate_csv():\n with open('temp.csv') as fp:\n response = api.post('/api/ml_data/data_set/%d/' % 0, {\"company\": com_instance.id,\n \"file\": fp, \"name\": \"test1\"})\n return response\n\n\n@pytest.mark.django_db\ndef test_all_data_sets(api):\n com_instance_1 = Company.objects.create(name=\"test1\")\n MLData.objects.create(name=\"test1\", company=com_instance_1)\n MLData.objects.create(name=\"test2\", company=com_instance_1)\n com_instance_2 = Company.objects.create(name=\"test2\")\n MLData.objects.create(name=\"test1\", company=com_instance_2)\n MLData.objects.create(name=\"test2\", company=com_instance_2)\n request = api.get('/api/ml_data/all_data_sets/', )\n\n assert 200 == request.status_code\n\n body = ujson.loads(request.content)\n assert 2 == len(body)\n assert body[0] == {'label': 'test1', 'data': com_instance_1.id, 'children': [{'label': 'test1', 'data': 1},\n {'label': 'test2', 'data': 2}]\n }\n assert body[1] == {'label': 'test2', 'data': com_instance_2.id, 'children': [{'label': 'test1', 'data': 3},\n {'label': 'test2', 'data': 4}]\n }\n\n\n@pytest.mark.django_db\ndef test_data_set_get(api):\n com_instance = Company.objects.create(name=\"test1\")\n data_instance = MLData.objects.create(name=\"test1\", company=com_instance)\n request = api.get('/api/ml_data/data_set/%d/' % data_instance.id)\n assert 200 == request.status_code\n assert ujson.loads(request.content) == {'id': 5, 'name': 'test1', 'company': {'id': com_instance.id, 'name': 'test1'},\n 'key_data': {'name': ''}, 'data': None}\n\n\n@pytest.mark.django_db\ndef test_data_set_post(api):\n request = create_ml_data(api)\n assert 200 == request.status_code\n data = request.json()\n ml_instance = MLData.objects.get(name=\"test1\")\n assert ml_instance.id == data[\"id\"]\n\n\n@pytest.mark.django_db\ndef test_data_set_put(api):\n com_instance = Company.objects.create(name=\"test1\")\n data_instance = MLData.objects.create(name=\"test1\", company=com_instance)\n with generate_csv():\n with open('temp.csv') as fp:\n request = api.put('/api/ml_data/data_set/%d/' % data_instance.id, {\"company\": com_instance.id,\n \"file\": fp, \"name\": \"test1\"})\n assert 200 == request.status_code\n data = request.json()\n assert data_instance.id == data[\"id\"]\n\n\n@pytest.mark.django_db\ndef test_data_set_delete(api):\n com_instance = Company.objects.create(name=\"test1\")\n data_instance = MLData.objects.create(name=\"test1\", company=com_instance)\n request = api.delete('/api/ml_data/data_set/%d/' % data_instance.id)\n assert 200 == request.status_code\n assert 0 == len(MLData.objects.filter(name=\"test1\"))\n\n\n@pytest.mark.django_db\ndef test_data_set_row(api):\n create_ml_data(api)\n ml_instance = MLData.objects.get(name=\"test1\")\n request = api.put('/api/ml_data/data_set_row/%d/' % ml_instance.id, {\"row_index\": 1,\n \"col_name\": \"owner_first_name\",\n \"value\": 2})\n assert 200 == request.status_code\n ml_instance = MLData.objects.get(name=\"test1\")\n ml_data = ml_instance.data\n for i in ml_data:\n if i[\"row_index\"] == 1:\n assert i[\"owner_first_name\"] == '2'\n if i[\"row_index\"] == 2:\n assert i[\"owner_first_name\"] != '2'\n\n\n@pytest.mark.django_db\ndef test_map_new_data_set(api):\n create_ml_data(api)\n mapped_columns = ujson.dumps([{\"origin_column\": \"owner_first_name\", \"new_set_column\": \"owner_first_name\"}])\n primary_mapping = ujson.dumps({\"origin_column\": \"cust_ID\", \"new_set_column\": \"cust_ID\"})\n origin_column = ujson.dumps([\"cust_ID\", \"owner_first_name\"])\n new_set_column = ujson.dumps([\"cust_ID\", \"owner_first_name\"])\n context = {\n \"mapped_columns\": mapped_columns,\n \"primary_columns\": primary_mapping,\n \"origin_column\": origin_column,\n \"new_set_column\": new_set_column\n }\n ml_instance = MLData.objects.get(name=\"test1\")\n with generate_csv_2():\n with open('temp1.csv') as fp:\n context[\"file\"] = fp\n request = api.post('/api/ml_data/map_new_data_set/%d/' % ml_instance.id, context)\n\n assert 200 == request.status_code\n\n ml_instance = MLData.objects.get(name=\"test1\")\n ml_data = ml_instance.data\n for i in ml_data:\n if i[\"row_index\"] == 1:\n assert i[\"owner_first_name\"] == 'qe'\n if i[\"row_index\"] == 2:\n assert i[\"owner_first_name\"] == 'qe1'\n if i[\"row_index\"] == 3:\n assert i[\"owner_first_name\"] == 'qe2'\n","sub_path":"ml_data_care/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"375333842","text":"import pprint\nimport time\n\nfrom luminateapi.luminate_python import LuminateV2Client\n\nLUMINATE_URL = 'https://api..luminatesec.com'\nAPI_KEY = ''\nAPI_SECRET = ''\nVERIFY_CERTIFICATE = True\n\nTEST_USER_EMAIL = \"\"\n\nif __name__ == '__main__':\n # Create a V2 Client\n luminate_client = LuminateV2Client(LUMINATE_URL,\n API_KEY,\n API_SECRET,\n VERIFY_CERTIFICATE)\n\n # Block all users with this email\n block_res = luminate_client.block_user_by_email(TEST_USER_EMAIL)\n pprint.pprint(block_res)\n\n # Unblock the same user\n unblock_res = luminate_client.unblock_user_by_email(TEST_USER_EMAIL)\n pprint.pprint(unblock_res)\n\n # Creating SSH Application\n ssh_app_res = luminate_client.create_app(\"client-ssh-test\",\n \"description\",\n \"SSH\",\n \"tcp://127.0.0.1:8000\",\n [\"some_linux_user\"])\n pprint.pprint(ssh_app_res)\n\n # Creating HTTP Application\n http_app_res = luminate_client.create_app(\"client-http-app\",\n \"description\",\n \"HTTP\",\n \"http://127.0.0.1:8080\",\n None)\n pprint.pprint(http_app_res)\n\n # Creating a Site\n site_res = luminate_client.create_site(\"site-test-client\", \"description\")\n pprint.pprint(site_res)\n\n # Binding the Application to Site\n luminate_client.bind_app_to_site(http_app_res['id'], site_res['id'])\n\n # Get user information\n user_info_res = luminate_client.get_user(TEST_USER_EMAIL)\n pprint.pprint(user_info_res)\n\n # Finding local IDP\n local_idp = None\n for idp in user_info_res:\n if idp['name'] == 'local':\n local_idp = idp\n assert (local_idp is not None)\n\n local_user = local_idp['users'][0]\n\n # Assigning the user to the created application\n assignment_res = luminate_client.assign_entity_to_app(http_app_res['id'],\n local_user['id'],\n local_user['identity_provider_id'],\n local_user['repository_type'],\n \"User\")\n print(assignment_res)\n\n # Destroy User Session\n destroy_res = luminate_client.destroy_user_sessions_by_email(TEST_USER_EMAIL)\n pprint.pprint(destroy_res)\n\n # Getting HTTP and SSH logs\n query = {\n \"free_text\": \"\",\n \"from_date\": int((time.time() - 1000) * 1000),\n \"to_date\": int(time.time() * 1000),\n }\n http_logs_res = luminate_client.get_access_logs(2, query, None)\n pprint.pprint(http_logs_res)\n\n ssh_logs_res = luminate_client.get_ssh_access_logs(2, query, None)\n pprint.pprint(ssh_logs_res)\n","sub_path":"luminateapi/examples/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"500148387","text":"import gym\nimport numpy as np\nimport time\nfrom IPython.display import clear_output\n\nenv = gym.make(\"FrozenLake-v0\")\nactionSpace_size = env.action_space.n\nstateSpace_size = env.observation_space.n\n\nq_table = np.zeros((stateSpace_size, actionSpace_size))\n\nalpha = 0.1 # learning rate\nepsilon = 1 # e-greedy parameter\nepsilon_min = 0.0\nepsilon_max = 1\nepsilon_decay =0.001\n\ngamma = 0.99 # discount rate\n\nnum_eps = 10000\nmax_steps = 100\n\nreward_list = []\n\nfor episode in range(num_eps):\n current_state = env.reset()\n done = False\n episode_reward = 0\n for j in range(max_steps):\n if np.random.rand() > epsilon:\n action = np.argmax(q_table[current_state, :])\n else:\n action = env.action_space.sample() #random sample\n\n new_state, reward, done, info = env.step(action) #single time step\n #print(reward)\n new_action = np.argmax(q_table[new_state, :])\n q_star = reward + gamma*np.max(q_table[new_state, :]) #bellman equation\n\n q_table[current_state, action] = (1-alpha)*q_table[current_state,action] +(alpha)*q_star\n\n current_state = new_state\n episode_reward += reward\n\n if done:\n break\n\n\n epsilon = epsilon_min + (epsilon_max-epsilon_min)*np.exp(-epsilon_decay*episode)\n reward_list.append(episode_reward)\n\nreward_perThousand = np.split(np.array(reward_list), num_eps/1000)\nprint(\"Average reward per thousand episodes \\n \", np.mean(reward_perThousand, 1))\n\nfor episode in range(3):\n current_state = env.reset()\n done = False\n print(\"Episode number: \", episode+1, \"\\n\\n\")\n time.sleep(1)\n for step in range(max_steps):\n clear_output(wait= True)\n env.render()\n\n action = np.argmax(q_table[current_state, :])\n new_state, reward, done, info = env.step(action)\n time.sleep(0.3)\n if done:\n clear_output(wait=True)\n env.render()\n if reward:\n print(\"***** You found the frisbee *****\")\n time.sleep(3)\n elif not reward:\n print(\"***** You fell into a hole *****\")\n time.sleep(3)\n clear_output(wait= True)\n break\n current_state = new_state\n\n'''\n#create environment\nactions =\nstates =\n\nr= np.random.rand()\nepsilon= 1\nalpha= 01\ngamma=0.9\nq_table= zeros(statesize, actionsize)\nr_table=\n\n# start game\nfor num iters:\n reset environment\n for each episode:\n s= current state\n if r>epsilon:\n #exploitation- choose action with maximum qval\n else:\n #explore - take random action\n a= env.actionspace.sample\n q_star= \n \n loss= q_star-q[s,a]\n \n qtable[s,a]= (1-alpha)*qtable[s,a]+(alpha)*q_star\n \n epsilon *=0.9 #decay\n\n'''","sub_path":"Q-learning/first_game.py","file_name":"first_game.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307143572","text":"import sys\n#Ex:1\n\n# for i in range(5):\n# for j in range(i):\n# print (\"* \", end=\"\")\n# print(\"\")\n\n# for i in range(5,0,-1):\n# for j in range(i):\n# print(\"* \", end=\"\")\n# print('')\n\n\nnum__ = input(\"Enter a number:\" )\nnum__ = num__ + 1 if num__ % 2 == 0 else num__\nmid_row = num__ // 2 + 1 \nfor i in range(1, num__ + 1):\n\tif i < mid_row:\n\t\tprint(i + \"* \")\n\telse:\n\t\tj = mid_row * 2 - i\n\t\tprint(j + \"* \")\n\n#Ex:2\nfor i in range(1,11):\n\tif i == 1:\n\t\tprevious = 1\n\telse:\n\t\tprevious = i - 1\n\tprint(f\"{previous} + {i} = {previous + i}\")\n\n#Ex:3\n\nnum = input(\"Enter a number:\\n\")\n\nif not (num and num.isdigit()):\n\tsys.exit()\nelse:\n\tnum = int(num)\n\nfor i in range(1, num // 2 + 1):\n\tif num % i == 0:\n\t\tprint(\"The divisors of \",num, \"is\",i)\n\n\n\n#Ex:4\n\nfactorial_num = input(\"Tell me number:\\n\")\nfactorial = 1\n\nif not (factorial_num and factorial_num.isdigit()):\n\tsys.exit()\nelse:\n\tfactorial_num = int(factorial_num)\n\nif factorial_num < 0:\n print(\"Factorial does not exist for negative number\")\nelif factorial_num == 0:\n print(\"The factorial of 0 is 1\")\nelse:\n for i in range(1,factorial_num + 1):\n factorial = factorial*i\n print(\"The factorial of\",factorial_num,\"is\",factorial)\n\n\n\n#Ex:5\n\nnum_1, num_2 = 0, 1\n\nwhile num_2 <= 50:\n\tprint(num_2, \" \", end=\"\")\n\tnum_1, num_2 = num_2, num_1 + num_2\n\n","sub_path":"homework_4.py","file_name":"homework_4.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"53384093","text":"from pyplasm import *\nfrom scipy import *\n\ngrigio_granito = [0.803,0.752,0.690];\n\n#definisco una colonna\ndef circR2(p): \n\treturn [2*COS(p[0]),0]\n\ndef circR1(p): \n\treturn [COS(p[0]),0]\ndef domaincirc(n): \n\treturn INTERVALS(2*PI)(n)\n\ncircBase=MAP(circR2)(domaincirc(25))\ncircBase=JOIN([circBase])\ncircBase=T([1,2,3])([12,12,6])(circBase)\n\ncircSopra=MAP(circR1)(domaincirc(25))\ncircSopra=JOIN([circSopra])\ncircSopra=T([1,2,3])([12,12,20])(circSopra)\ncolonna=JOIN([circBase,circSopra])\n\ncolonna=COLOR(grigio_granito)(STRUCT([colonna]))\n#VIEW(STRUCT([colonna,scalinata]))\n\n#replico le colonne e creo i colonnati\nxmov=[T(1)(5),colonna]\nymov=[T([1,2])([0,4.64]),colonna]\ncolonnato1=STRUCT(NN(7)(xmov))\ncolonnato2=STRUCT(NN(17)(ymov))\nx2mov=[T([1,2])([0,4.64]),T([1,2])([35,0])(colonna)]\ncolonnato3=STRUCT(NN(17)(x2mov))\ny2mov=[T([1])([5]),T([1,2])([0,79])(colonna)]\ncolonnato4=STRUCT(NN(7)(y2mov))\ncolonnati=STRUCT([colonna,colonnato1,colonnato2,colonnato3,colonnato4])\n\n#seconda fila di colonne\ndef circ(p): \n\treturn [0.5*COS(p[0]),0]\n\ncircBasedentro=MAP(circR1)(domaincirc(25))\ncircBasedentro=JOIN([circBasedentro])\ncircBasedentro=T([1,2,3])([12,12,6])(circBasedentro)\n\ncircSopradentro=MAP(circ)(domaincirc(25))\ncircSopradentro=JOIN([circSopradentro])\ncircSopradentro=T([1,2,3])([12,12,20])(circSopradentro)\n\ncolonnadentro=COLOR(grigio_granito)(T([1,2,3])([5,5])(JOIN([circBasedentro,circSopradentro])))\n\nxcapitello=QUOTE([2.2])\nycapitello=QUOTE([2.5])\ncapitello=COLOR(grigio_granito)(T([1,2,3])([15.9,15.3,20.2])(INSR(PROD)([xcapitello,ycapitello,QUOTE([0.5])])))\n\ncolonnadentro=STRUCT([colonnadentro,capitello])\nmovint=[T(1)(5),colonnadentro]\ncolonnatointerno=COLOR(grigio_granito)(STRUCT(NN(5)(movint)))\n\n#colonne nella stanza piu piccola\ncircBasedentro2=MAP(circR1)(domaincirc(25))\ncircBasedentro2=JOIN([circBasedentro2])\ncircBasedentro2=T([1,2,3])([12,12,6])(circBasedentro2)\ncircSopradentro2=MAP(circ)(domaincirc(25))\ncircSopradentro2=JOIN([circSopradentro2])\ncircSopradentro2=T([1,2,3])([12,12,20])(circSopradentro2)\ncolonnadentro2=JOIN([circBasedentro2,circSopradentro2])\ncolonnastanza1=COLOR(grigio_granito)(T([1,2,3])([12,25])(colonnadentro2))\ncolonnastanza1_2=COLOR(grigio_granito)(T([1,2,3])([22,25])(colonnadentro2))\ncolonnastanza1_3=COLOR(grigio_granito)(T([1,2,3])([12,30])(colonnadentro2))\ncolonnastanza1_4=COLOR(grigio_granito)(T([1,2,3])([22,30])(colonnadentro2))\ncolonnatostanza1=STRUCT([colonnastanza1,colonnastanza1_2,colonnastanza1_3,colonnastanza1_4])\n\n#colonne nella stanza piu grande, le colonne sono piu piccole\ndef circpicc(p): \n\treturn [0.25*COS(p[0]),0]\ncircsoprastanza2=MAP(circpicc)(domaincirc(25)) #raggio 0.25\ncircsoprastanza2=JOIN([circsoprastanza2])\ncircsoprastanza2=T([1,2,3])([20,53,20])(circsoprastanza2)\ncircbasestanza2=MAP(circ)(domaincirc(25)) #raggio 0.5\ncircbasestanza2=JOIN([circbasestanza2])\ncircbasestanza2=T([1,2,3])([20,53,9])(circbasestanza2)\ncolonnastanza2=JOIN([circbasestanza2,circsoprastanza2])\n\nxmov=[T([1,2])([0,3]),colonnastanza2]\ncolonnatostanza2=COLOR(grigio_granito)(STRUCT(NN(10)(xmov)))\n\nxmov2=[T([1,2])([3,0]),colonnastanza2]\ncolonnatostanza2_2=COLOR(grigio_granito)(STRUCT(NN(5)(xmov2)))\n\nxmov3=[T([1,2])([0,3]),T(1)(18)(colonnastanza2)]\ncolonnatostanza2_3=COLOR(grigio_granito)(STRUCT(NN(10)(xmov3)))\n\ncolonnatistanza2=STRUCT([colonnatostanza2,colonnatostanza2_2,colonnatostanza2_3])\n\n\nr=STRUCT([colonnatostanza1,colonnatistanza2])\nVIEW(r)\n","sub_path":"2014-03-21/python/Exercise2.py","file_name":"Exercise2.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129506704","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport os\nimport argparse\nfrom torchvision.datasets import MNIST\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, datasets\nfrom torch.optim.lr_scheduler import StepLR\nimport utils\n\ncuda = True\ncnt = 0\nlr = 0.001\nout_dir = \"out_aae3\"\nbatch_size = 100\n\nnc = 1 # number of channels\nnz = 8 # size of latent vector\nngf = 128 # decoder (generator) filter factor\nndf = 128 # encoder filter factor\nh_dim = 512 # discriminator hidden size\nlam = 10 # regulization coefficient\n\ntrainset = MNIST(root='./data/',train=True,transform=transforms.ToTensor(),download=True)\ndata_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=20, drop_last=True)\n\n\ndef load_args():\n parser = argparse.ArgumentParser(description='PyTorch MNIST WAE-GAN')\n\n parser.add_argument('--batch-size', type=int, default=100, help='')\n parser.add_argument('--epochs', type=int, default=100, help='')\n parser.add_argument('--dim', type=int, default=8, help='')\n parser.add_argument('--epsilon', type=float, default=1e-15, help='')\n parser.add_argument('--l', type=int, default=10, help='')\n parser.add_argument('--lr', type=float, default=.001, help='')\n parser.add_argument('--e_dropout', type=bool, default=False, help='')\n parser.add_argument('--resume', type=bool, default=False, help='')\n parser.add_argument('--pretrain_epochs', type=int, default=200, help='')\n parser.add_argument('--loss', type=str, default='l2sq', help='')\n parser.add_argument('--dataset', type=str, default='mnist', help='')\n\n\n args = parser.parse_args()\n return args\n\n\nclass Decoder(nn.Module):\n def __init__(self):\n super(Decoder, self).__init__()\n # input is Z, going into a convolution\n self.linear = nn.Linear(nz, ngf*8*7*7)\n\n self.deconv1 = nn.ConvTranspose2d(ngf*8 , ngf*4, 4, 1, 0, 0, bias=False)\n self.bn1 = nn.BatchNorm2d(ngf*4)\n self.deconv2 = nn.ConvTranspose2d(ngf*4, ngf*2, 4, 1, 0, 0, bias=False)\n self.bn2 = nn.BatchNorm2d(ngf*2)\n self.deconv_out = nn.ConvTranspose2d(ngf*2, nc, 4, 2, 0, 0, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n # print ('G in: ', x.shape)\n x = x.view(-1, nz) \n x = F.relu(self.linear(x))\n x = x.view(-1, ngf*8, 7, 7)\n x = F.relu(self.bn1(self.deconv1(x)))\n x = F.relu(self.bn2(self.deconv2(x)))\n x = self.deconv_out(x)\n # print ('G out: ', x.shape)\n x_act = self.sigmoid(x)\n\n return (x_act, x)\n\n\nclass Encoder(nn.Module):\n def __init__(self, is_training=True):\n super(Encoder, self).__init__()\n self.is_training = is_training\n # input is (nc) x 64 x 64\n self.conv1 = nn.Conv2d(nc, ndf, 4, 2, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(ndf)\n # state size. (ndf) x 32 x 32\n self.conv2 = nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False)\n self.bn2 = nn.BatchNorm2d(ndf * 2)\n # state size. (ndf*2) x 16 x 16\n self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False)\n self.bn3 = nn.BatchNorm2d(ndf * 4)\n # state size. (ndf*4) x 8 x 8\n self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)\n self.bn4 = nn.BatchNorm2d(ndf * 8)\n # state size. (ndf*8) x 4 x 4\n self.linear = nn.Linear(ndf*8, nz)\n\n def forward(self, x):\n #print ('E in: ', x.shape)\n if self.is_training:\n z = torch.normal(torch.zeros_like(x.data), std=0.01)\n x.data += z\n x = F.leaky_relu(self.bn1(self.conv1(x)))\n x = F.leaky_relu(self.bn2(self.conv2(x)))\n x = F.leaky_relu(self.bn3(self.conv3(x)))\n x = F.leaky_relu(self.bn4(self.conv4(x)))\n # x = self.conv5(x)\n x = x.view(-1, ndf*8)\n x = self.linear(x)\n #print ('E out: ', x.shape)\n return x\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.linear1 = nn.Linear(nz, h_dim)\n self.linear2 = nn.Linear(h_dim, h_dim)\n self.linear3 = nn.Linear(h_dim, h_dim)\n self.linear4 = nn.Linear(h_dim, h_dim)\n self.logits = nn.Linear(h_dim, 1)\n self.sigmoid = nn.Sigmoid()\n self.a = nn.ReLU()\n\n def forward(self, x):\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = F.relu(self.linear3(x))\n x = F.relu(self.linear4(x))\n x = self.logits(x)\n x = self.sigmoid(x)\n return x\n\n\ndef load_networks():\n netE = Encoder().cuda()\n netG = Decoder().cuda()\n netD = Discriminator().cuda()\n print (netE, netG, netD)\n \n return netE, netG, netD\n\n\ndef reset_grad():\n netE.zero_grad()\n netG.zero_grad()\n netD.zero_grad()\n\n\ndef param_switch(module: nn.Module, state):\n if state == \"freeze\":\n for p in module.parameters():\n p.requires_grad = False\n else:\n for p in module.parameters():\n p.requires_grad = True\n\n\nargs = load_args()\n\nnetE, netG, netD = load_networks()\n\n# optimG = optim.Adam(netG.parameters(), lr=lr)\noptimE = optim.Adam(netE.parameters(), lr=lr)\noptimD = optim.Adam(netD.parameters(), lr=lr*0.5)\noptimAE = optim.Adam(list(netE.parameters())+list(netG.parameters()), lr=lr)\n\n# LRgen = StepLR(optimG, step_size=30, gamma=0.5)\nLRenc = StepLR(optimE, step_size=30, gamma=0.5)\nLRdis = StepLR(optimD, step_size=30, gamma=0.5)\nLRae = StepLR(optimAE, step_size=30, gamma=0.5)\n\nprint (args.resume)\nif args.resume is True:\n print (\"\\n==> Loading old weights if possible\")\n netE, optimE, _ = utils.load_model(netE, optimE, \"E_latest.pth\")\n netG, optimG, _ = utils.load_model(netG, optimG, \"G_latest.pth\")\n netD, optimD, _ = utils.load_model(netD, optimD, \"D_latest.pth\")\n optimAE = utils.load_model(optimAE, \"AE_latest.pth\")\n\n\ndef pretrain_e():\n print (\"==> Pretraining Encoder\")\n for epoch in range(args.pretrain_epochs):\n for i, images in enumerate(data_loader):\n x = Variable(images[0].cuda())\n noise = Variable(torch.rand(batch_size, nz)).cuda()\n latent = netE(x).view(batch_size, nz)\n #noise = noise.view(*noise.size(), 1, 1)\n loss = utils.pretrain_loss(latent, noise)\n loss.backward()\n optimE.step()\n reset_grad()\n print (\"Pretrain Enc iter: {}, Loss: {}\".format(i, loss.data[0]))\n if loss.data[0] < 0.1:\n print (\"Finished Pretraining Encoder\")\n return\n\nif not args.resume:\n pretrain_e()\n\none = torch.Tensor([1]).cuda()\nmone = (one * -1).cuda()\n\nfor epoch in range(args.epochs):\n for batch_idx, batch_images in enumerate(data_loader):\n \"\"\" var creation \"\"\"\n X = Variable(batch_images[0]).cuda()\n z_sample = Variable(torch.randn(batch_size, nz)).cuda()\n \n z_enc = netE(X)\n X_sample, X_logits = netG(z_enc)\n reset_grad()\n\t\n \"\"\" Regularization phase \"\"\"\n param_switch(netG, \"freeze\")\n param_switch(netE, \"freeze\")\n param_switch(netD, \"free\")\n # z adversary\n D_fake = netD(z_sample)\n z_real = netE(X)\n D_real = netD(z_real)\n D_loss_fake = torch.log(D_fake).mean()\n D_loss_real = torch.log(1-(D_real+args.epsilon)).mean()\n D_loss = D_loss_fake + D_loss_real\n D_loss_fake.backward(mone)\n D_loss_real.backward(mone)\n optimD.step()\n \n \"\"\" WAE update \"\"\"\n param_switch(netG, \"free\")\n param_switch(netE, \"free\")\n param_switch(netD, \"freeze\")\n\n z_enc = netE(X)\n\n # recon_loss = utils.ae_loss(args, X+args.epsilon, X_sample+args.epsilon)\n recon_loss = F.mse_loss(X_sample, X)\n penalty = utils.gan_loss2(args, z_sample, z_enc, netD)\n recon_loss.backward(one, retain_graph=True)\n penalty.backward(mone)\n optimAE.step()\n\n \"\"\"\n z_fake = netE(X).view(batch_size,-1)\n D_fake = netD(z_fake)\n\n #G_loss = -torch.mean(torch.log(D_fake))\n G_loss = -torch.mean(D_fake)\n\n G_loss.backward()\n optimE.step()\n reset_grad()\n\t\"\"\"\n if batch_idx % 50 == 0:\n losses = [D_loss.data[0], penalty.data[0], recon_loss.data[0]]\n print('Epoch {}; iter {}; D_loss: {:.4}; G_loss: {:.4}; recon_loss: {:.4}'\n .format(epoch, batch_idx, losses[0], losses[1], losses[2]))\n utils.save_model(netE, optimE, epoch, \"E_latest.pth\")\n utils.save_model(netG, optimAE, epoch, \"G_latest.pth\")\n utils.save_model(netD, optimD, epoch, \"D_latest.pth\")\n\n # Print and plot every now and then\n if batch_idx % 100 == 0:\n utils.save_image(netG, epoch, batch_idx, orthogonal=False)\n \n","sub_path":"test_mnist_waegan.py","file_name":"test_mnist_waegan.py","file_ext":"py","file_size_in_byte":9036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"82320134","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 05 04:35:49 2015\n\n@author: DavidDobor\n\"\"\"\n\n# part 1: an implementation of a brute-force pattern-matching algorithm\n\ndef find_brute(T, P):\n \"\"\" Return the lowest index of T at which substring P begins (or else -1).\"\"\"\n n, m = len(T), len(P)\n for i in range(n-m+1): # try every potential starting index within T\n k = 0\n while k < m and T[i + k] == P[k]:\n k += 1\n if k == m:\n return i\n return -1\n \n \nT = \"abacaabaccabacabaabb\"\nP = \"abacab\"\n\nfind_brute(T,P)","sub_path":"dynamic/LCS/brute_pattern_match.py","file_name":"brute_pattern_match.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"589130925","text":"from pytest import mark\r\nimport sqlalchemy as sa\r\nfrom sqlalchemy_utils.types import url\r\n\r\nfrom tests import TestCase\r\n\r\n\r\n@mark.skipif('url.furl is None')\r\nclass TestURLType(TestCase):\r\n def create_models(self):\r\n class User(self.Base):\r\n __tablename__ = 'user'\r\n id = sa.Column(sa.Integer, primary_key=True)\r\n website = sa.Column(url.URLType)\r\n\r\n def __repr__(self):\r\n return 'User(%r)' % self.id\r\n\r\n self.User = User\r\n\r\n def test_color_parameter_processing(self):\r\n user = self.User(\r\n website=url.furl(u'www.example.com')\r\n )\r\n\r\n self.session.add(user)\r\n self.session.commit()\r\n\r\n user = self.session.query(self.User).first()\r\n assert isinstance(user.website, url.furl)\r\n\r\n def test_scalar_attributes_get_coerced_to_objects(self):\r\n user = self.User(website=u'www.example.com')\r\n\r\n assert isinstance(user.website, url.furl)\r\n","sub_path":"python/flask-mail-labs/.venv/lib/python2.7/site-packages/tests/types/test_url.py","file_name":"test_url.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"329102215","text":"from sqlalchemy import desc\nfrom sqlalchemy.sql import and_, select\n\nfrom tno.db import DBBase\nfrom datetime import datetime\n\nclass DesSkybotJobDao(DBBase):\n def __init__(self, pool=True):\n super(DesSkybotJobDao, self).__init__(pool)\n\n schema = self.get_base_schema()\n self.tablename = \"des_skybotjob\"\n self.tbl = self.get_table(self.tablename, schema)\n\n def get_tablename(self):\n return self.tablename\n\n def get_tbl(self):\n return self.tbl\n\n def get_by_id(self, id):\n\n stm = select(self.tbl.c).where(and_(self.tbl.c.id == int(id)))\n\n self.debug_query(stm, True)\n\n row = self.fetch_one_dict(stm)\n\n return row\n\n def get_by_status(self, status):\n \"\"\"Retorna os jobs pelo status\n ATENÇÃO: Os jobs estão ordenados pela data de criação em ordem ascendente. está ordem é importante para o pipeline.\n\n Arguments:\n status {int} -- Status do job, como está definido no Model des/SkybotJob\n\n Returns:\n [array] -- Um Array com os jobs, tem a mesma estrutura do Model des/SkybotJob\n \"\"\"\n stm = (\n select(self.tbl.c)\n .where(and_(self.tbl.c.status == int(status)))\n .order_by(self.tbl.c.start)\n )\n\n rows = self.fetch_all_dict(stm)\n\n return rows\n\n def update_by_id(self, id, job):\n stm = (\n self.tbl.update()\n .where(self.tbl.c.id == int(id))\n .values(\n status=job[\"status\"],\n start=job[\"start\"],\n exposures=job[\"exposures\"],\n nights=job[\"nights\"], \n ccds=job[\"ccds\"], \n positions=job[\"positions\"],\n asteroids=job[\"asteroids\"],\n exposures_with_asteroid=job[\"exposures_with_asteroid\"],\n ccds_with_asteroid=job[\"ccds_with_asteroid\"],\n path=job[\"path\"],\n results=job[\"results\"],\n estimated_execution_time=job[\"estimated_execution_time\"]\n\n )\n )\n\n return self.execute(stm)\n\n def complete_job(self, id, job):\n stm = (\n self.tbl.update()\n .where(self.tbl.c.id == int(id))\n .values(\n status=job[\"status\"],\n # finish=job['finish'].strftime('%Y-%m-%d %H:%M:%S'),\n finish=job[\"finish\"],\n execution_time=job[\"execution_time\"],\n error=job[\"error\"],\n )\n )\n\n return self.execute(stm)\n","sub_path":"backend/des/dao/skybot_job.py","file_name":"skybot_job.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"269819803","text":"import torch\nimport numpy as np\n\nfrom typing import Dict, List, Optional\n\nfrom spacy.tokens import Token as SpacyToken\n\nfrom overrides import overrides\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.nn import util\nfrom allennlp.data.tokenizers.token import Token\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.data.fields import SequenceField\nfrom allennlp.data.fields.text_field import TokenList\n\n\nclass BidirectionalLanguageModelField(SequenceField[Dict[str, torch.Tensor]]):\n \"\"\"\n Taken from calypso:\n https://github.com/allenai/calypso/blob/ac934b6881787387581efaa8a646531278010652/calypso/allennlp_bridge.py#L51-L131\n\n Field for adding targets for a BidirectionalLM in a multi-task setting.\n \"\"\"\n def __init__(self,\n tokens: List[Token]) -> None:\n self.tokens = tokens\n self._indexed_tokens: Optional[Dict[str, TokenList]] = None\n self._directions = ['forward_targets', 'backward_targets']\n\n if not all([isinstance(x, (Token, SpacyToken)) for x in tokens]):\n raise ConfigurationError(\"TextFields must be passed Tokens. \"\n \"Found: {} with types {}.\".format(tokens, [type(x) for x in tokens]))\n\n @overrides\n def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):\n pass\n\n @overrides\n def index(self, vocab: Vocabulary):\n # self._indexed_tokens = {'forward_targets': array of next token id,\n # 'backward_targets': array of prev token id}\n\n tokens = [token.text for token in self.tokens]\n token_ids = [vocab.get_token_index(token, namespace='lm')\n for token in tokens]\n\n n_tokens = len(tokens)\n forward_targets = np.zeros(n_tokens, dtype='int32')\n forward_targets[0:-1] = token_ids[1:]\n\n backward_targets = np.zeros(n_tokens, dtype='int32')\n backward_targets[1:n_tokens] = token_ids[0:(n_tokens - 1)]\n\n self._indexed_tokens = {\n direction: target for direction, target in\n zip(self._directions, [forward_targets, backward_targets])\n }\n\n @overrides\n def get_padding_lengths(self) -> Dict[str, int]:\n # the padding length is the number of tokens in the sequence\n if self._indexed_tokens is None:\n raise ValueError(\"self._indexed_tokens is None.\")\n return {'num_lm_targets': len(self._indexed_tokens['forward_targets'])}\n\n @overrides\n def sequence_length(self) -> int:\n return len(self.tokens)\n\n @overrides\n def as_tensor(self,\n padding_lengths: Dict[str, int],\n cuda_device: int = -1) -> Dict[str, torch.Tensor]:\n\n desired_num_tokens = padding_lengths['num_lm_targets']\n if self._indexed_tokens is None:\n raise ValueError(\"self._indexed_tokens is None.\")\n unpadded_num_tokens = len(self._indexed_tokens['forward_targets'])\n\n # need to pad indexed_tokens to the specified length then return\n # as torch tensor on device\n tensors = {}\n\n for k in ['forward_targets', 'backward_targets']:\n padded_array = np.zeros(desired_num_tokens, dtype=np.int32)\n padded_array[:unpadded_num_tokens] = self._indexed_tokens[k]\n\n tensor = torch.LongTensor(padded_array)\n if cuda_device >= 0:\n tensor = tensor.cuda(cuda_device)\n tensors[k] = tensor\n\n return tensors\n\n @overrides\n def empty_field(self, vocab):\n text_field = BidirectionalLanguageModelField([])\n text_field.index(vocab)\n return text_field\n\n @overrides\n def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:\n # This is creating a dict of {token_indexer_key: batch_tensor} for each token indexer used\n # to index this field.\n return util.batch_tensor_dicts(tensor_list)\n","sub_path":"contexteval/data/fields/bidirectional_language_model_field.py","file_name":"bidirectional_language_model_field.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"37389030","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.utils.crypto import get_random_string\n\n# Create your views here.\ndef index(request):\n if \"count\" not in request.session:\n request.session[\"count\"] = 1\n else:\n request.session[\"count\"] += 1\n \n random_word = {\n \"word\": get_random_string(length=14),\n }\n\n return render(request, \"random_word_app/index.html\", random_word)\n\ndef reset(request):\n request.session[\"count\"] = 0\n return redirect(\"/random_word\")","sub_path":"Week3/RandomWordGeneratorAssignment/apps/random_word_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"373669869","text":"# Tank Game:\r\n# A game with tanks and stuff.\r\n#\r\n# by Erkalys & Florimond Manca\r\n#\r\n# Level editor\r\n\r\n# Imports\r\n\r\nfrom tkinter import *\r\nimport math\r\nimport sys\r\nimport os\r\njoin = os.path.join\r\n\r\n# chemin d'accès aux fichiers\r\n\r\npath = os.getcwd()\r\n\r\n# Variables globales\r\n\r\nedit_path = join(path, \"custom_levels\")\r\nint_matrix = [[0 for j in range(32)] for i in range(21)]\r\nIA = [] # liste correspondant aux IA blue, bluePlus, purple et purplePlus\r\npaths_matrix = []\r\nrect_matrix = [[None for j in range(32)] for i in range(21)]\r\ncolor_dict = {'white': ('#fff', 0), 'black': ('#000', 1), 'brown': ('#a60', 2),\r\n 'green': ('#0f0', 3), 'yellow': ('#ff0', 4), 'orange': ('#fc0', 5),\r\n 'blue': ('#00f', 6), 'darkblue': ('#00a', 7), 'red': ('#f00', 8), 'darkred': ('#c00', 9),\r\n 'purple': ('#f0f', 10), 'darkpurple': ('#a0a', 11), 'grey': ('#aaa', 12), 'darkgrey': ('#777', 13)}\r\ntype_dict = {'white': 'None', 'black': 'Wall', 'brown': 'Pit',\r\n 'green': 'Player', 'yellow': 'IA Yellow', 'orange': 'IA YellowPlus',\r\n 'blue': 'IA Blue', 'darkblue': 'IA BluePlus', 'red': 'IA Red', 'darkred': 'IA redPlus',\r\n 'purple': 'IA purple', 'darkpurple': 'IA PurplePlus', 'grey': \"Spawner\", 'darkgrey': \"SpawnerPlus\"}\r\n\r\n# UTILITAIRES\r\n\r\n\r\ndef convert(liste, split=False): # renvoie la chaine de caractère correspondant à la liste avec des espaces\r\n string = \"\"\r\n if not split:\r\n for i in range(len(liste)):\r\n string += str(liste[i]) + \" \"\r\n else: # split = True, la liste est une liste de tupple qu'il faut séparer\r\n for i in range((len(liste))):\r\n string += str(liste[i][0]) + \" \" + str(liste[i][1]) + \" \"\r\n return string + '\\n'\r\n\r\n\r\ndef create_level(matrice, paths):\r\n n = 1\r\n while os.path.exists(join(edit_path, \"custom_level\" + str(n) + \".txt\")):\r\n n += 1\r\n fname = \"custom_level\" + str(n) + \".txt\"\r\n fullname = join(edit_path, fname)\r\n file = open(fullname, 'w')\r\n for i in range(len(matrice)):\r\n file.write(convert(matrice[i]))\r\n if paths != []:\r\n for i in range(len(paths)):\r\n file.write(convert(paths[i], split=True))\r\n file.close()\r\n\r\n\r\ndef convert_and_sort(matrix): # liste de liste de way_points -> liste de x1 y1 x2 y2 ...\r\n sortie = []\r\n for i in range(len(matrix)):\r\n sortie.append(matrix[i])\r\n for j in range(len(matrix[i])):\r\n (a, b) = matrix[i][j].pos\r\n sortie[i][j] = (b, a)\r\n sortie.sort() # on trie les listes de la matrice\r\n for i in range(len(sortie)):\r\n for j in range(len(sortie[i])):\r\n (a, b) = sortie[i][j]\r\n sortie[i][j] = (b, a)\r\n return sortie\r\n\r\n\r\ndef export(zone_dessin, int_matrix, paths_matrix, IA):\r\n warn = Tk()\r\n warn.title(\"Warning\")\r\n\r\n texte = Label(warn, text=\"You won't be able to edit this level after its exportation\")\r\n texte.pack()\r\n\r\n back = Button(warn, text='Get back to work', command=warn.destroy)\r\n back.pack(side=LEFT)\r\n\r\n def fct(zone_dessin, int_matrix, paths_matrix, IA):\r\n create_level(int_matrix, convert_and_sort(paths_matrix))\r\n clear(zone_dessin, int_matrix, paths_matrix, IA)\r\n warn.destroy()\r\n\r\n exp = Button(warn, text='Continue', command=lambda: fct(\r\n zone_dessin, int_matrix, paths_matrix, IA))\r\n exp.pack(side=RIGHT)\r\n warn.mainloop()\r\n\r\n\r\ndef clear(zone_dessin, int_matrix, paths_matrix, IA): # réinitialise l'éditeur de niveau\r\n int_matrix = [[0 for j in range(32)] for i in range(21)]\r\n paths_matrix = []\r\n IA = []\r\n for i in range(21):\r\n for j in range(32):\r\n if i == 0 or i == 20 or j == 0 or j == 31: # on est sur les bords\r\n rect = rectangle(zone_dessin, i, j, 'black', color_dict)\r\n rect_matrix[i][j] = rect\r\n int_matrix[i][j] = color_dict['black'][1]\r\n else:\r\n rect = rectangle(zone_dessin, i, j, 'white', color_dict)\r\n rect_matrix[i][j] = rect\r\n int_matrix[i][j] = color_dict['white'][1]\r\n\r\n\r\ndef clear_paths(paths_matrix, zone_dessin, IA): # réinitialise uniquement les chemins liés aux IA\r\n paths_matrix = []\r\n for i in range(len(IA)):\r\n paths_matrix.append([])\r\n zone_dessin.delete((\"path\", str(i)))\r\n for x in IA:\r\n x.points_list = [way_point(zone_dessin, x.pos[1], x.pos[0], x.id)]\r\n\r\n# GESTION D'EVENEMENTS LIES A LA SOURIS\r\n\r\n\r\nclass mouse():\r\n ''' la classe qui gère toutes les actions du curseur dans l'interface Tkinter'''\r\n\r\n def __init__(self, fenetre, color='white'):\r\n self.color = color\r\n self.mode = 'normal'\r\n self.where = fenetre\r\n self.buttons = None\r\n self.selected = None # le rectangle de l'IA bleue sélectionnée\r\n\r\n # les fonctions d'état\r\n\r\n def mouse_normal(self, zone_dessin):\r\n # passe la souris en mode normal à l'appui sur le bouton 'Normal mode'\r\n self.mode = 'normal'\r\n self.selected = None\r\n zone_dessin.delete(\"paths\")\r\n\r\n def mouse_path_creation(self, zone_dessin):\r\n # passe la souris en mode de création de path pour IA bleue à\r\n # l'appui sur le bouton 'Path creation mode'\r\n self.mode = 'path_creation'\r\n for x in IA:\r\n pts = x.points_list\r\n for i in range(len(pts) - 1):\r\n pts[i].plot(zone_dessin)\r\n pts[i].bind(pts[i + 1], zone_dessin)\r\n\r\n def In_Outils(self, event):\r\n self.where = \"zone_outils\"\r\n\r\n def Out(self, event):\r\n self.where = None\r\n\r\n def In_Dessin(self, event):\r\n self.where = \"zone_dessin\"\r\n\r\n # les fonctions d'édition\r\n\r\n def mouse_color_click(self, event):\r\n # permet de donner à la souris la couleur qui a été choisie\r\n for button in self.buttons:\r\n if button.selected:\r\n button.highlight_button() # on enlève la précédente selection\r\n if (event.x // 25, event.y // 25) == button.pos:\r\n self.color = button.color\r\n self.mode = 'normal' # on repasse en mode normal\r\n button.highlight_button()\r\n\r\n def print_color(self, event, zone_dessin, int_matrix, paths_matrix, color_dict, IA):\r\n\r\n # permet en mode normal de déposer sur le canevas un rectangle de couleur self.color\r\n # permet en mode création de chemin de dessiner le chemin de la dernière IA sélectionnée\r\n j, i = event.x // 25, event.y // 25\r\n\r\n if 1 <= i <= 19 and 1 <= j <= 30: # on ne doit pas changer les murs extérieurs\r\n rect = rect_matrix[i][j]\r\n\r\n # Cas 1: on est en mode normal d'édition\r\n if self.mode == 'normal':\r\n if rect.color != self.color:\r\n # on a affaire à une IA necessitant un path\r\n if self.color == 'blue' or self.color == 'darkblue' or self.color == 'purple' or self.color == 'darkpurple':\r\n newIA = IA_rectangle(zone_dessin, i, j, self.color, zone_dessin, color_dict)\r\n rect_matrix[i][j] = newIA\r\n int_matrix[i][j] = color_dict[self.color][1]\r\n newIA.id = len(IA)\r\n IA.append(newIA)\r\n paths_matrix.append([])\r\n else:\r\n rect_matrix[i][j] = rectangle(zone_dessin, i, j, self.color, color_dict)\r\n int_matrix[i][j] = color_dict[self.color][1]\r\n\r\n # Cas 2: on est en mode création de chemin d'IA bleue\r\n if self.mode == 'path_creation':\r\n n = int_matrix[i][j]\r\n if n == 6 or n == 7 or n == 10 or n == 11: # on a affaire à une IA qui nécessite un path\r\n for x in IA:\r\n if x.selected:\r\n x.highlight_rect(zone_dessin) # on deselectionne toutes les IA\r\n cwia = rect_matrix[i][j]\r\n self.selected = cwia.id\r\n cwia.highlight_rect(zone_dessin)\r\n elif self.selected != None:\r\n cwia = IA[self.selected]\r\n pt = way_point(zone_dessin, i, j, cwia.id)\r\n pt.bind(cwia.points_list[-1], zone_dessin)\r\n pt.plot(zone_dessin)\r\n cwia.points_list.append(pt)\r\n paths_matrix[cwia.id] = cwia.points_list\r\n\r\n def mouse_click_left(self, event, zone_dessin, int_matrix, paths_matrix, color_dict, IA):\r\n if self.where == 'zone_dessin':\r\n self.print_color(event, zone_dessin, int_matrix, paths_matrix,\r\n color_dict, IA) # on effectue l'action de la zone de dessin\r\n elif self.where == 'zone_outils':\r\n self.mouse_color_click(event) # on effecture l'action de la zone des outils\r\n\r\n def mouse_click_right(self, event, zone_dessin, int_matrix, rect_matrix, paths_matrix, color_dict, number):\r\n # N'est activée que dans la zone de dessin (contrairement au clic gauche)\r\n if self.where == \"zone_dessin\":\r\n\r\n if self.mode == 'normal': # En mode normal, permet d'effacer le rectangle sur lequel pointe la souris\r\n j, i = event.x // 25, event.y // 25\r\n if 1 <= i <= 19 and 1 <= j <= 30: # on ne doit pas changer les murs extérieurs\r\n rect = rect_matrix[i][j]\r\n if self.mode == 'normal':\r\n rect_matrix[i][j] = rectangle(zone_dessin, i, j, \"white\", color_dict)\r\n int_matrix[i][j] = 0\r\n\r\n elif self.mode == 'path_creation': # En mode path_creation, permet d'enlever un point du chemin\r\n if self.selected != None:\r\n cwia = IA[self.selected]\r\n pts = cwia.points_list\r\n if (len(pts)) > 1:\r\n pts.pop()\r\n cwia.points_list = pts\r\n paths_matrix[cwia.id] = pts\r\n zone_dessin.delete((\"paths\", str(cwia.id)))\r\n for i in range(len(pts) - 1):\r\n pts[i].plot(zone_dessin)\r\n pts[i].bind(pts[i + 1], zone_dessin)\r\n\r\n\r\n# CLASSES DIVERSES\r\n\r\nclass rectangle():\r\n '''regroupe un rectangle du canevas, sa couleur et sa position'''\r\n\r\n def __init__(self, can, i, j, color, color_dict):\r\n rect = can.create_rectangle(25 * j, 25 * i, 25 * (j + 1), 25 * (i + 1),\r\n width=1, fill=color_dict[color][0], outline=\"black\")\r\n self.rect = rect\r\n self.color = color\r\n self.pos = (j, i)\r\n self.master = can\r\n self.selected = False\r\n\r\n\r\nclass IA_rectangle:\r\n ''' un rectangle d'une IA bleue. Contient, en plus d'un rectangle classique, le parcours de l'IA'''\r\n\r\n def __init__(self, can, i, j, color, zone_dessin, color_dict):\r\n rect = can.create_rectangle(25 * j, 25 * i, 25 * (j + 1), 25 * (i + 1),\r\n width=1, fill=color_dict[color][0], outline=\"black\")\r\n self.rect = rect\r\n self.color = color\r\n self.pos = (j, i)\r\n self.master = can\r\n self.id = -1\r\n self.selected = False\r\n # liste d'objets de classe way_point formant le chemin suivi\r\n self.points_list = [way_point(zone_dessin, i, j, self.id)]\r\n\r\n def highlight_rect(self, zone_dessin):\r\n can = self.master\r\n j, i = self.pos\r\n color = self.color\r\n if not self.selected:\r\n self.rect = can.create_rectangle(\r\n 25 * j, 25 * i, 25 * (j + 1), 25 * (i + 1), width=1, fill=color_dict[color][0], outline=\"#fe2\")\r\n self.points_list[0] = way_point(zone_dessin, i, j, self.id)\r\n else:\r\n self.rect = can.create_rectangle(\r\n 25 * j, 25 * i, 25 * (j + 1), 25 * (i + 1), width=1, fill=color_dict[color][0], outline='black')\r\n self.points_list[0] = way_point(zone_dessin, i, j, self.id)\r\n self.selected = not self.selected\r\n\r\n\r\nclass way_point():\r\n\r\n def __init__(self, can, i, j, n):\r\n rect = can.create_rectangle(25 * j + 5, 25 * i + 5, 25 * (j + 1) - 5, 25 * (i + 1) - 5,\r\n width=1, fill=\"#aaa\", outline=\"#000\", tag=(\"path\", str(n)))\r\n self.id = n\r\n self.rect = rect\r\n self.color = \"#aaa\"\r\n self.pos = (j, i)\r\n\r\n def bind(self, wp, zone_dessin): # trace une ligne entre deux waypoints\r\n (j1, i1) = self.pos\r\n (j2, i2) = wp.pos\r\n x1, y1, x2, y2 = 25 * j1 + 12, 25 * i1 + 12, 25 * j2 + 12, 25 * i2 + 12\r\n if abs(x1 - x2) > abs(y1 - y2):\r\n zone_dessin.create_line(x1, y1, x2, y1, x2, y2, fill=\"#aaa\",\r\n width=1, tag=(\"path\", str(self.id)))\r\n else:\r\n zone_dessin.create_line(x1, y1, x1, y2, x2, y2, fill=\"#aaa\",\r\n width=1, tag=(\"path\", str(self.id)))\r\n\r\n def plot(self, zone_dessin):\r\n (j, i) = self.pos\r\n zone_dessin.create_rectangle(25 * j + 8, 25 * i + 8, 25 *\r\n (j + 1) - 8, 25 * (i + 1) - 8, tag=(\"path\", str(self.id)))\r\n\r\n\r\nclass color_button:\r\n # classe des boutons-outil\r\n\r\n def __init__(self, can, x1, y1, x2, y2, color, color_dict, type_dict):\r\n rect, text = self.create_button(can, x1, y1, x2, y2, color, color_dict, type_dict)\r\n self.rectangle = rect\r\n self.pos = (x1 // 25, y1 // 25)\r\n self.master = can\r\n self.coords = (x1, y1, x2, y2)\r\n self.text = text\r\n self.color = color\r\n self.selected = False\r\n\r\n def create_button(self, can, x1, y1, x2, y2, color, color_dict, type_dict):\r\n rect = can.create_rectangle(x1, y1, x2, y2, width=1, fill=color_dict[\r\n color][0], outline='black')\r\n text = can.create_text(x2 + 4, (y1 + y2) / 2, text=type_dict[color], anchor=W)\r\n return rect, text\r\n\r\n def highlight_button(self):\r\n can = self.master\r\n x1, y1, x2, y2 = self.coords\r\n color = self.color\r\n if not self.selected:\r\n self.rectangle = can.create_rectangle(\r\n x1, y1, x2, y2, width=1, fill=color_dict[color][0], outline='#ee2')\r\n else:\r\n self.rectangle = can.create_rectangle(\r\n x1, y1, x2, y2, width=1, fill=color_dict[color][0], outline='black')\r\n self.selected = not self.selected\r\n\r\n\r\n# LA FONCTION PRINCIPALE\r\n\r\ndef level_editor():\r\n\r\n fenetre = Tk()\r\n fenetre.title(\"Tank Game Level Editor\")\r\n\r\n # la souris\r\n souris = mouse(fenetre)\r\n fenetre.bind(\r\n \"\",\r\n lambda event: souris.mouse_click_left(event,\r\n zone_dessin, int_matrix, paths_matrix, color_dict, IA))\r\n fenetre.bind(\r\n \"\",\r\n lambda event: souris.mouse_click_right(event,\r\n zone_dessin, int_matrix, rect_matrix, paths_matrix, color_dict, IA))\r\n bout = Button(fenetre, text='Exit', command=lambda: fenetre.destroy())\r\n bout.pack()\r\n\r\n # la fenetre de boutons d'action\r\n fenetre_divers = LabelFrame(fenetre, text=\"Options\")\r\n fenetre_divers.pack(side=RIGHT)\r\n b_normal = Button(fenetre_divers, text=\"Normal mode\",\r\n command=lambda: souris.mouse_normal(zone_dessin))\r\n b_normal.pack()\r\n b_path = Button(fenetre_divers, text=\"Path creation mode\",\r\n command=lambda: souris.mouse_path_creation(zone_dessin))\r\n b_path.pack()\r\n exporter = Button(fenetre_divers, text=\"Export\", command=lambda: export(\r\n zone_dessin, int_matrix, paths_matrix, IA))\r\n exporter.pack()\r\n clear_all = Button(fenetre_divers, text=\"Clear All\", command=lambda: clear(\r\n zone_dessin, int_matrix, paths_matrix, IA))\r\n clear_all.pack()\r\n clear_p = Button(fenetre_divers, text=\"Clear Paths\",\r\n command=lambda: clear_paths(paths_matrix, zone_dessin, IA))\r\n clear_p.pack()\r\n\r\n # la fenetre de boutons-outil\r\n fenetre_outils = LabelFrame(fenetre, text=\"Outils\")\r\n fenetre_outils.bind(\"\", souris.mouse_color_click)\r\n fenetre_outils.bind(\"\", souris.In_Outils)\r\n fenetre_outils.bind(\"\", souris.Out)\r\n fenetre_outils.pack(side=LEFT)\r\n zone_outils = Canvas(fenetre_outils, width=120, height=525)\r\n zone_outils.pack()\r\n\r\n # la fenetre de dessin\r\n fenetre_dessin = LabelFrame(fenetre, text=\"Map\")\r\n fenetre_dessin.bind(\"\", souris.In_Dessin)\r\n fenetre_dessin.bind(\"\", souris.Out)\r\n fenetre_dessin.pack(side=RIGHT)\r\n zone_dessin = Canvas(fenetre_dessin, width=800, height=525)\r\n zone_dessin.pack()\r\n\r\n # création des boutons-outil dans le canevas zone_outils\r\n\r\n buttons = []\r\n for color in color_dict:\r\n n = color_dict[color][1]\r\n button = color_button(zone_outils, 4, 25 * (n + 1), 4 + 25,\r\n 25 * (n + 2), color, color_dict, type_dict)\r\n buttons.append(button)\r\n souris.buttons = buttons\r\n\r\n # creation des rectangles et initialisation de la matrice\r\n\r\n for i in range(21):\r\n for j in range(32):\r\n if i == 0 or i == 20 or j == 0 or j == 31: # on est sur les bords\r\n rect = rectangle(zone_dessin, i, j, 'black', color_dict)\r\n rect_matrix[i][j] = rect\r\n int_matrix[i][j] = color_dict['black'][1]\r\n else:\r\n rect = rectangle(zone_dessin, i, j, 'white', color_dict)\r\n rect_matrix[i][j] = rect\r\n int_matrix[i][j] = color_dict['white'][1]\r\n\r\n # boucle principale\r\n fenetre.mainloop()\r\n fenetre.destroy()\r\n\r\nif __name__ == \"__main__\":\r\n level_editor()\r\n","sub_path":"src/leveleditor.py","file_name":"leveleditor.py","file_ext":"py","file_size_in_byte":17983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"260816931","text":"from json import dump as dump_json, load as load_json\nfrom pickle import dumps as pickle_dumps, loads as pickle_loads\nfrom random import choice as random_choice\n\nfrom django.conf import settings as django_settings\nfrom redis.sentinel import Sentinel\nfrom redis.exceptions import TimeoutError\nfrom redis.sentinel import MasterNotFoundError\n\nfrom .utils import executing_test_case, executing_travis\n\nimport logging\n_logger = logging.getLogger(__name__)\nd = logging.getLogger(__name__).debug\n\n\ndef RedisSentinelCache(*args, **kwargs):\n \"\"\"\n A factory for the redis client.\n\n Returns dummy cache with hardcoded dict as the storage when executing inside travis\n \"\"\"\n if executing_travis() or kwargs.get('dummy', False):\n return _RedisSentinelCacheDummy(*args, **kwargs)\n else:\n return _RedisSentinelCache(*args, **kwargs)\n\n\nclass _RedisSentinelCache():\n\n def __init__(self, db=0, master_only=False, settings=django_settings):\n \"\"\"\n db: database index to read/write to. available indexes 0-15.\n master_only: always use master for read operations, for those times when you know you are going to\n read the same key again from cache very soon.\n settings: override redis setttings in settings.py. easier to use class from outside context of django (i.e. cron)\n \"\"\"\n if not isinstance(settings, dict):\n if not hasattr(settings, 'REDIS_SENTINEL'):\n raise Exception('Missing configuration from settings.py: REDIS_SENTINEL')\n settings = settings.REDIS_SENTINEL\n\n if not settings.get('HOSTS', None):\n raise Exception('Missing configuration from settings for REDIS_SENTINEL: HOSTS')\n if not settings.get('SERVICE', None):\n raise Exception('Missing configuration from settings for REDIS_SENTINEL: SERVICE')\n if not settings.get('TEST_DB', None):\n raise Exception('Missing configuration from settings for REDIS_SENTINEL: TEST_DB')\n if len(settings['HOSTS']) < 3:\n raise Exception('Invalid configuration in settings for REDIS_SENTINEL: HOSTS minimum number of hosts is 3')\n\n if executing_test_case():\n db = settings['TEST_DB']\n elif db == settings['TEST_DB']:\n raise Exception('Invalid db: db index %d is reserved for test suite execution.' % db)\n\n self._sentinel = Sentinel(settings['HOSTS'], password=settings['PASSWORD'], socket_timeout=settings.get('SOCKET_TIMEOUT', 0.1), db=db)\n self._service_name = settings['SERVICE']\n self._DEBUG = settings.get('DEBUG', False)\n self._read_from_master_only = master_only\n self._node_count = self._node_count()\n\n def set(self, key, value, **kwargs):\n if self._DEBUG:\n d('cache: set()...')\n pickled_data = pickle_dumps(value)\n master = self._get_master()\n\n try:\n return master.set(key, pickled_data, **kwargs)\n except (TimeoutError, MasterNotFoundError):\n if self._DEBUG:\n d('cache: master timed out or not found. no write instances available')\n # no master available\n return\n\n if self._DEBUG:\n test = master.get(key)\n if test:\n d('cache: set() successful')\n else:\n d('cache: set() unsuccessful, could not get saved data?')\n\n def get_or_set(self, key, value, **kwargs):\n \"\"\"\n Atomic set of value only if key did not exist yet.\n\n Returns True if set was successful, None if set failed (= value existed)\n\n https://redis.io/commands/setnx Not recommended any longer for distributed locks...\n https://redis.io/topics/distlock However this is also just a proposal and no official\n implementation exists yet\n \"\"\"\n return self.set(key, value, nx=True, **kwargs)\n\n def get(self, key, **kwargs):\n \"\"\"\n Randomly select slave or master for reading. Fallback to master anyway in case of errors.\n\n Use of master can be forced by using the master_only flag in the constructor.\n \"\"\"\n\n # todo allow reading from cache when master is down? could possibly serve stale data.\n # see redis.conf setting: slave-serve-stale-data\n if self._DEBUG:\n d('cache: get()...')\n\n if self._read_from_master_only:\n return self._get_from_master(key, **kwargs)\n else:\n if self._slave_chosen():\n try:\n return self._get_from_slave(key, **kwargs)\n except TimeoutError:\n pass\n # lady luck chose master, or read from slave had an error\n return self._get_from_master(key, **kwargs)\n\n def delete(self, *keys):\n if self._DEBUG:\n d('cache: delete()...')\n\n master = self._get_master()\n\n try:\n master.delete(*keys)\n except (TimeoutError, MasterNotFoundError):\n if self._DEBUG:\n d('cache: master timed out or not found. no write instances available. raising error')\n # no master available\n return\n\n if self._DEBUG:\n test = master.get(keys[0])\n if not test:\n d('cache: delete() successful')\n else:\n d('cache: delete() unsuccessful, could not delete data?')\n\n def _get_from_slave(self, key, **kwargs):\n node = self._get_slave()\n try:\n res = node.get(key, **kwargs)\n except TimeoutError:\n if self._DEBUG:\n d('cache: slave.get() timed out, trying from master instead. fail-over in process?')\n # fail-over propbably happened, and the old slave is now a master\n # (in case there was only one slave). try master instead\n raise\n else:\n return pickle_loads(res) if res is not None else None\n\n def _get_from_master(self, key, **kwargs):\n master = self._get_master()\n try:\n res = master.get(key, **kwargs)\n except (TimeoutError, MasterNotFoundError):\n if self._DEBUG:\n d('cache: master timed out also. no read instances available. returning None')\n # uh oh, no master available either. either all redis instances have hit the bucket,\n # or there is a fail-over in process, and a new master will be in line in a moment\n return None\n return pickle_loads(res) if res is not None else None\n\n def get_master(self):\n \"\"\"\n Expose the master node to permit any operation in redis-py\n \"\"\"\n return self._get_master()\n\n def _slave_chosen(self):\n return random_choice(range(0, self._node_count)) != 0\n\n def _get_master(self):\n if self._DEBUG:\n d('cache: getting master')\n return self._sentinel.master_for(self._service_name, socket_timeout=0.1)\n\n def _get_slave(self):\n if self._DEBUG:\n d('cache: getting slave')\n return self._sentinel.slave_for(self._service_name, socket_timeout=0.1)\n\n def _node_count(self):\n return len(self._sentinel.discover_slaves(self._service_name)) + 1 # +1 is master\n\n\nclass _RedisSentinelCacheDummy():\n\n \"\"\"\n A dummy redis client that writes to a file on disk.\n \"\"\"\n\n _storage_path = '/tmp/redis_dummy_storage'\n\n def __init__(self, *args, **kwargs):\n # d('Note: using dummy cache')\n pass\n\n def set(self, key, value, **kwargs):\n storage = self._get_storage()\n storage[key] = value\n self._save_storage(storage)\n\n def get(self, key, **kwargs):\n return self._get_storage().get(key, None)\n\n def get_or_set(self, key, value, **kwargs):\n if self.get(key):\n return False\n else:\n self.set(key, value,)\n return True\n\n def delete(self, key, **kwargs):\n storage = self._get_storage()\n storage.pop(key, False)\n self._save_storage(storage)\n\n def get_master(self):\n return self\n\n def flushdb(self):\n self._save_storage({})\n return True\n\n def _get_storage(self):\n try:\n with open(self._storage_path, 'r') as f:\n return load_json(f)\n except Exception as e:\n _logger.error('Could not open dummy cache file for reading at %s: %s' % (self._storage_path, str(e)))\n return {}\n\n def _save_storage(self, storage):\n try:\n with open(self._storage_path, 'w') as f:\n dump_json(storage, f)\n except Exception as e:\n _logger.error('Could not open dummy cache file for writing at %s: %s' % (self._storage_path, str(e)))\n","sub_path":"src/metax_api/utils/redis.py","file_name":"redis.py","file_ext":"py","file_size_in_byte":8742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572082797","text":"import json\nimport random\n\nimport requests\nfrom django.db import transaction, OperationalError\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nimport datetime\nimport time\n# Create your views here.\nfrom rest_framework.views import APIView\nfrom rest_framework.authtoken.models import Token\nfrom accounts.models import PhoneConfirm, User\nfrom accounts.serializers import SMSSignupPhoneCheckSerializer, SMSSignupPhoneConfirmSerializer\nfrom core.sms.utils import SMSV2Manager, MMSV1Manager\nfrom core.tools import get_client_ip\nfrom django.conf import settings\nfrom logic.models import DateTimeLotteryResult\nfrom logs.models import MMSSendLog\nfrom projects.models import Project\nfrom products.models import Product, Reward\nfrom respondent.models import RespondentPhoneConfirm, Respondent\nfrom respondent.serializers import SMSRespondentPhoneCheckSerializer, RespondentCreateSerializer, SMSRespondentPhoneConfirmSerializer\n\n\nclass SMSViewSet(viewsets.GenericViewSet):\n \"\"\"\n sms 전송시 공통으로 사용하는 viewset\n \"\"\"\n permission_classes = [AllowAny]\n\n def get_serializer_class(self):\n if self.action == 'send':\n serializer = SMSSignupPhoneCheckSerializer\n elif self.action == 'confirm':\n serializer = SMSSignupPhoneConfirmSerializer\n elif self.action == 'respondent_send':\n serializer = SMSRespondentPhoneCheckSerializer\n elif self.action == 'respondent_confirm':\n serializer = SMSRespondentPhoneConfirmSerializer\n else:\n serializer = super(SMSViewSet, self).get_serializer_class()\n return serializer\n\n @action(methods=['post'], detail=False)\n def send(self, request, *args, **kwargs):\n \"\"\"\n 회원가입시 인증번호 발송하는 api입니다.\n api: api/v1/sms/send\n method: POST\n data: {'phone'}\n \"\"\"\n data = request.data\n serializer = self.get_serializer(data=data)\n if serializer.is_valid(raise_exception=True):\n phone = serializer.validated_data['phone']\n sms_manager = SMSV2Manager()\n sms_manager.set_content()\n sms_manager.create_instance(phone=phone, kinds=PhoneConfirm.SIGN_UP)\n\n if not sms_manager.send_sms(phone=phone):\n return Response(\"Failed send sms\", status=status.HTTP_410_GONE)\n\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n @action(methods=['post'], detail=False)\n def confirm(self, request, *args, **kwargs):\n \"\"\"\n 회원가입시 인증번호 인증 api입니다. 인증시 다음페이지(비밀번호설정)에서 사용할 phone을 리턴합니다.\n api: api/v1/sms/confirm\n method: POST\n data: {'phone', 'confirm_key'}\n \"\"\"\n data = request.data\n serializer = self.get_serializer(data=data)\n if not serializer.is_valid(raise_exception=True):\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n return Response({'phone': serializer.validated_data['phone']}, status=status.HTTP_200_OK)\n\n @action(methods=['post'], detail=False)\n def respondent_send(self, request, *args, **kwargs):\n \"\"\"\n 설문자 인증번호 발송시 사용하는 핸드폰인증입니다.\n api: api/v1/sms/respondent_send/\n method: POST\n data: {'phone'}\n \"\"\"\n data = request.data\n serializer = self.get_serializer(data=data)\n if serializer.is_valid(raise_exception=True):\n phone = serializer.validated_data['phone']\n sms_manager = SMSV2Manager()\n sms_manager.set_respondent_content()\n sms_manager.create_respondent_send_instance(phone=phone)\n\n if not sms_manager.send_sms(phone=phone):\n return Response(\"Failed send sms\", status=status.HTTP_410_GONE)\n\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n @transaction.atomic\n @action(methods=['post'], detail=False)\n def respondent_confirm(self, request, *args, **kwargs):\n \"\"\"\n 설문자 인증번호 인증 api입니다. 인증시 서버에서 5-10초후 reward MMS를 발송합니다.\n api: api/v1/sms/respondent_confirm\n method: POST\n 전화번호, 인증번호 와 url에서 파싱한 project_key와 validator를 담아서 보내주어야 합니다.\n data: {'phone', 'confirm_key', 'project_key', 'validator'}\n \"\"\"\n data = request.data\n serializer = self.get_serializer(data=data)\n if not serializer.is_valid(raise_exception=True):\n return Response(status=status.HTTP_400_BAD_REQUEST)\n if not Project.objects.filter(project_hash_key=data.get('project_key')).exists():\n return Response(status=status.HTTP_404_NOT_FOUND)\n self.data = serializer.validated_data\n self._set_project()\n self._create_respondent()\n\n # 여기까지가 유저 당첨확인 및 생성\n item_name = ''\n if self.is_win:\n self._set_random_reward()\n\n phone = self.data.get('phone')\n brand = self.reward.product.item.brand.name\n item_name = self.reward.product.item.name\n item_url = self.reward.reward_img.url\n due_date = self.reward.due_date\n\n if type(item_url) is tuple:\n item_url = ''.join(item_url)\n\n if type(item_name) is tuple:\n item_name = ''.join(item_name)\n\n mms_manager = MMSV1Manager()\n mms_manager.set_content(brand, item_name, due_date)\n success, code = mms_manager.send_mms(phone=phone, image_url=item_url)\n if not success: # TODO : status 로 클라에서 재전송버튼 활성화? 아니면 슬랙알림만?\n MMSSendLog.objects.create(code=code, phone=phone, item_name=item_name, item_url=item_url,\n due_date=due_date, brand=brand)\n\n # TODO: 당첨자 안나온 상품 있으면 한번에 보내기\n self.reward.winner_id = self.respondent.id\n self.reward.save()\n item_name = self.reward.product.item.short_name\n\n return Response({'id': self.project.id,\n 'is_win': self.is_win,\n 'item_name': item_name}, status=status.HTTP_200_OK)\n\n def _set_random_reward(self): # TODO: 에러날경우 패스 혹은 문의하기로\n reward_queryset = Reward.objects.filter(winner_id__isnull=True) \\\n .select_related('product', 'product__item', 'product__project', 'product__item__brand')\n remain_rewards = reward_queryset.filter(product__project=self.project)\n remain_rewards_id = list(remain_rewards.values_list('id', flat=True))\n remain_rewards_price = list(remain_rewards.values_list('product__item__price', flat=True))\n reward_weight = list(map(lambda x: round(1 / x * (sum(remain_rewards_price) / len(remain_rewards_price)))\n , remain_rewards_price))\n random_reward_id_by_weight = random.choices(remain_rewards_id, weights=reward_weight)[0]\n self.reward = reward_queryset.get(id=random_reward_id_by_weight)\n\n def _set_project(self):\n project_queryset = Project.objects.filter(project_hash_key=self.data.get('project_key'))\\\n .prefetch_related('respondents', 'respondents__phone_confirm', 'products', 'products__rewards') # 2021.07.07 [d-o-d.io 리뉴얼 ]추가 ####\n\n self.project = project_queryset.get(project_hash_key=self.data.get('project_key'))\n self.phone_confirm = RespondentPhoneConfirm.objects.filter(phone=self.data.get('phone'),\n confirm_key=self.data.get('confirm_key'),\n is_confirmed=True).first()\n\n def _create_respondent(self):\n self.is_win = self._am_i_winner()\n data = {'project': self.project.id,\n 'phone_confirm': self.phone_confirm.id,\n 'is_win': self.is_win}\n\n serializer = RespondentCreateSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n self.respondent = serializer.save()\n\n def _am_i_winner(self):\n # 프로젝트 생성자는 무조건 꽝!\n if self.phone_confirm.phone == self.project.owner.phone:\n return False\n # 2021.07.07 [d-o-d.io 리뉴얼 ]추가 ####\n if self.project.products.filter(rewards__isnull=True).exists():\n return False\n # 2021.07.07 [d-o-d.io 리뉴얼 ]추가 ####\n self.lucky_times = self.project.select_logics.last().lottery_times.filter(is_used=False)\n now = datetime.datetime.now()\n self.valid_lucky_times = self.lucky_times.filter(lucky_time__lte=now)\n if not self.valid_lucky_times.exists():\n # 당첨 안된 경우\n return False\n else:\n try:\n with transaction.atomic(using='default'):\n vlt = DateTimeLotteryResult.objects.select_for_update(nowait=True)\\\n .filter(logic__project=self.project).filter(is_used=False, lucky_time__lte=now).first()\n vlt.is_used = True\n vlt.save()\n val = True\n except OperationalError:\n val = False\n except DateTimeLotteryResult.DoesNotExist:\n val = False\n return val\n\n\nclass SendMMSAPIView(APIView):\n \"\"\"\n 당첨자 3초 후 문자전송을 위해 만듬 (20210622)\n\n [DEPRECATED] 발송실패가 많음. (20210625)\n \"\"\"\n permission_classes = [AllowAny]\n\n def post(self, request, *args, **kwargs):\n\n ip = get_client_ip(request)\n if ip not in settings.ALLOWED_HOSTS:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n data = request.data\n phone = data.get('phone')\n brand = data.get('brand')\n item_name = data.get('item_name')\n item_url = data.get('item_url')\n due_date = data.get('due_date')\n time.sleep(3) # wait 3 seconds\n mms_manager = MMSV1Manager()\n mms_manager.set_content(brand, item_name, due_date)\n success, code = mms_manager.send_mms(phone=phone, image_url=item_url)\n if not success:\n MMSSendLog.objects.create(code=code, phone=phone, item_name=item_name, item_url=item_url, due_date=due_date)\n # MMSSendLog.objects.create(code=code, phone=phone, item_name=item_name, item_url=item_url, due_date=due_date)\n return Response(status=status.HTTP_200_OK)\n\n","sub_path":"DOD-main-server/dod/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"185543488","text":"\"\"\"\nPerform file organizing and metadata tasks for GeoTIFF files from MicaSense RedEdge.\nBy: Emily Sturdivant, esturdivant@usgs.gov, https://github.com/esturdivant-usgs\nRequires Python 3.2+\n\nThe user inputs the following:\n- input image directory (imgdir_in): directory where the raw files are stored and sorted by flight number\n- output image directory (imgdir_out): directory to be created and populated with the processed files\n- minimum and maximum altitude for files to process\n- mission info (survey ID, camera ID, values for metadata)\n\nNote:\n- We expect flight folder names to start with 'f' and end with a number. The number in the folder name will be used as the flight ID.\n\n\nThis script does the following:\n 1. For each flight folder copy files to a flight folder in the out dir\n 1.a. Get flight IDs (integer) from the folder names in 'imgdir_in'.\n 1.b. Copy TIF files in each flight folder to new flight folder in 'imgdir_in' using the naming scheme fXX, ignoring intermediate folders (e.g. 0000SET, 000)\n 2. Change photo metadata: rename and assign metadata values\n 2.a. Rename files with survey ID, flight ID, sensor ID, datetime, and original filename.\n 2.b. Add flight ID to UserComment tag in file header.\n 3. Add standard metadata values.\n 3.a. Add the standard WHCMSC Exif tags, with specific values input by the user.\n 3.b. Copy the CreateDate value to the GPSDate and GPSTime tags.\n 4. Copy files within the input altitude range to a separate directory tree.\n\"\"\"\n#%%\nimport pandas as pd\nimport os\nimport subprocess\nimport shutil\nimport glob\n\n#%% Set source dir\n# Only used flights 11-32, i.e. photos collected on 8/7 and 8/8. RedEdge surveying on 8/6 (flights 2–10) had many gaps.\nimgdir_in = r\"/Volumes/stor/Projects/2019009FA_PlumIsland/RAW/photos/mica_images\"\nimgdir_out = r\"/Volumes/stor/Projects/2019009FA_PlumIsland/PROCESSED/final_photos/micasense\"\n\n# If calibration files have already been separated out, they will need to be processed too.\ncal_separated = True\n\n# Altitude filtering values\nalt_min = 73\nalt_max = 88\n\n# Mission info\nfan = '2019-009-FA' # input('Field activity number (e.g. \"2017-010-FA\"): ')\ncam_id = 'm01'\n# flight_id = 'f06'\n# doi =\n\n# WHSC EXIF population\nsurveyid = fan.replace(\"-\",\"\")\nsite = \"Plum Island, MA\"\ncredit = \"U.S. Geological Survey\"\ncomment = \"\"\"One band of a multispectral image from a low-altitude aerial survey in {1}. Captured with a MicaSense RedEdge at a target altitude of 80 m. WHCMSC field activity number {0} (https://cmgds.marine.usgs.gov/fan_info.php?fa={0}).\"\"\".format(fan, site) # used for caption, ImageDescription, Caption-Abstract\nkeywords = \"{}; Massachusetts; {}; UAS; nadir; multispectral; USGS\".format(site, fan)\nartist = \"WHCMSC AIM Group\"\ncontact = \"WHSC_data_contact@usgs.gov\"\n\n#%% Functions\ndef write_WHSC_exiftags(imgdir, credit, comment, keywords, artist, contact, run=True, recursive=True):\n # Tags that will be identical for all images in the folder\n tagvalues = {}\n tagvalues['imgdir'] = imgdir\n tagvalues['credit'] = credit\n tagvalues['artist'] = artist\n tagvalues['contact'] = contact\n tagvalues['comment'] = comment\n tagvalues['keywords'] = keywords\n tagvalues['copyright'] = \"Public Domain. Please credit {credit}\".format(**tagvalues)\n # Write to EXIF\n substance = \"\"\"-Artist=\"{artist} \" -Credit=\"{credit} \" -Contact=\"{contact} \" -comment=\"{comment} \" -sep \"; \" -keywords=\"{keywords} \" -Caption=\"{comment} \" -Copyright=\"{copyright} \" -CopyrightNotice=\"{copyright} \" -Caption-Abstract=\"{comment} \" -ImageDescription=\"{comment} \" \"\"\".format(**tagvalues)\n tagvalues['substance'] = substance\n if run:\n if recursive:\n cmd = \"\"\"exiftool {substance} -overwrite_original -r {imgdir}\"\"\".format(**tagvalues)\n else:\n cmd = \"\"\"exiftool {substance} -overwrite_original {imgdir}\"\"\".format(**tagvalues)\n subprocess.check_call(cmd, shell=True)\n print(\"Updated Exif headers in directory: {}\".format(imgdir))\n return\n else:\n return(substance)\n\ndef filter_by_exif(imgdir, keep_dir, min, max, tag='GPSAltitude'):\n # Filter image files by indicated EXIF tag. Copy files that match the criteria to a keep folder.\n # Print altitudes to CSV\n exif_csv = imgdir+\"_{}.csv\".format(tag)\n cmd1 = \"\"\"exiftool -csv -{} -n -r {} > {}\"\"\".format(tag, imgdir, os.path.join(imgdir, exif_csv))\n subprocess.check_call(cmd1, shell=True)\n # Move photos with values between min and max (inclusive) to keep folder\n # Convert CSV To DF\n df = pd.read_csv(exif_csv)\n # for each entry, if altitude is between alt_min and alt_max (inclusive), move file to the keep folder\n keepct = 0 # keep counter\n rct = 0 # remove counter\n for index, row in df.iterrows():\n fpath = row.SourceFile\n fname = os.path.basename(row.SourceFile)\n alt = row.loc[tag]\n if alt < alt_min or alt > alt_max:\n rct += 1\n else:\n try:\n shutil.copy2(fpath, os.path.join(keep_dir, fname))\n keepct += 1\n except:\n pass\n # Print counts\n print(\"Files matching criteria: {} out of {}\".format(keepct, keepct+rct))\n # Check altitudes - print to CSV and DF\n exif_csv = keep_dir+\"_keepers.csv\"\n cmd1 = \"\"\"exiftool -csv -{} -n {} > {}\"\"\".format(tag, keep_dir, exif_csv)\n subprocess.check_call(cmd1, shell=True)\n df = pd.read_csv(exif_csv)\n #\n return(df)\n\n#%% For each flight folder: get flight number, copy files to out dir, rename files\n# Copy files to new flight folders, ignoring intermediate folders\nprint(\"\\n***** Creating new folder structure and copying files *****\".format(fnum, flight_id))\nfdirs = glob.glob(os.path.join(imgdir_in, 'f*[0-9]'))\nfor fltdir in fdirs:\n # Convert folder name to flight ID\n fnum = int(''.join(filter(str.isdigit, os.path.basename(fltdir))))\n flight_id = 'f{0:02d}'.format(fnum)\n # Create folder for renamed photos\n print(\"Creating folder and copying files for flight {} ({})\".format(fnum, flight_id))\n fltdir_out = os.path.join(imgdir_out, flight_id)\n try: shutil.rmtree(fltdir_out)\n except FileNotFoundError: pass\n os.makedirs(fltdir_out, exist_ok=True)\n # List TIF files and copy to processing directory, ignoring intermediate folders\n imglist = glob.glob(os.path.join(fltdir, '**/*.tif'), recursive=True)\n for img in imglist:\n shutil.copy2(img, os.path.join(fltdir_out, os.path.basename(img)))\n # Check that there are the expected number of files in the directory.\n if len(imglist) < len(os.listdir(fltdir_out)):\n print(\"WARNING: There are extra files in the flight directory.\")\n elif len(imglist) > len(os.listdir(fltdir_out)):\n print(\"WARNING: There are too few files in flight directory.\")\n\n#%% Change photo metadata: Rename, Assign Exif tag values,\nprint(\"\\n***** Running ExifTool for each flight folder to rename files and add flight number. *****\".format(fnum, flight_id))\nfor fltdir_out in glob.glob(os.path.join(imgdir_out, 'f*[0-9]')):\n flight_id = os.path.basename(fltdir_out)\n num_renamed = len(glob.glob(os.path.join(fltdir_out, '{}*'.format(surveyid))))\n if num_renamed > 0:\n print(\"ALERT: There are already {} files in the {} folder that may have been renamed. We'll skip this folder.\".format(num_renamed, flight_id))\n continue\n fnum = int(''.join(filter(str.isdigit, flight_id)))\n # Rename and save flight ID in UserComment, uses ExifTool\n cmd1 = \"\"\"exiftool -d {}_{}{}_%Y%m%dT%H%M%SZ_%%f.%%e \"-filename {}\"\"\".format(imgdir_out, exif_csv)\n# subprocess.check_call(cmd1, shell=True)\n# df = pd.read_csv(exif_csv)\n# df\n\n#%% Same process, but all in one for-loop (older version):\n# #%% For each flight folder: get flight number, copy files to out dir, rename files\n# # List folders\n# # In each folder, rename files\n# fdirs = glob.glob(os.path.join(imgdir_in, 'f*'), recursive=True)\n# for fltdir in fdirs:\n# # Convert folder name to flight ID\n# fnum = int(''.join(filter(str.isdigit, os.path.basename(fltdir))))\n# flight_id = 'f{0:02d}'.format(fnum)\n# print(\"\\n***** Processing files from flight {} ({}) *****\".format(fnum, flight_id))\n# # Create folder for renamed photos\n# print(\"...Creating new flight folder...\"\n# fltdir_out = os.path.join(imgdir_out, flight_id)\n# try: shutil.rmtree(fltdir_out)\n# except FileNotFoundError: pass\n# os.makedirs(fltdir_out, exist_ok=True)\n# # List TIF files and copy to processing directory, ignoring intermediate folders\n# imglist = glob.glob(os.path.join(fltdir, '**/*.tif'), recursive=True)\n# for img in imglist:\n# shutil.copy2(img, os.path.join(fltdir_out, os.path.basename(img)))\n# # Check that there are the expected number of files in the directory.\n# if len(imglist) < len(os.listdir(fltdir_out)):\n# print(\"WARNING: There are extra files in the flight directory.\")\n# elif len(imglist) > len(os.listdir(fltdir_out)):\n# print(\"WARNING: There are too few files in flight directory.\")\n# # Rename and save flight ID in UserComment, uses ExifTool\n# cmd1 = \"\"\"exiftool -d {}_{}{}_%Y%m%dT%H%M%SZ_%%f.%%e \"-filename len(os.listdir(fltdir_out)):\n# print(\"WARNING: There are too few files in flight directory.\")\n# #---- But not below here:\n# # Execute altitude filter\n# print(\"Copying files matching altitude criteria into keep folder...\")\n# keep_dir = os.path.join(imgdir_out, 'keep_alt{}to{}'.format(alt_min, alt_max), flight_id)\n# os.makedirs(keep_dir, exist_ok=True) # Create keep_dir tree if it doesn't already exist.\n# # Copy files within altitude bounds to keep folder\n# keeper_values = filter_by_exif(fltdir_out, keep_dir, alt_min, alt_max, tag='GPSAltitude')\n# print(\"Number of files in final directory: {}\".format(len(os.listdir(keep_dir))))\n","sub_path":"image_RenameTagFilter.py","file_name":"image_RenameTagFilter.py","file_ext":"py","file_size_in_byte":13452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499835514","text":"end = True\r\nwhile end:\r\n print('Do you want to type the number(yes/no):')\r\n c = input()\r\n if c == 'yes':\r\n print('Please, type the number:')\r\n a = int(input())\r\n if a < 0:\r\n print('Negative number')\r\n elif a == 0:\r\n print('number is zero')\r\n elif a % 2 == 0:\r\n print('Even number')\r\n elif a % 2 == 1:\r\n print('odd number')\r\n elif c == 'no':\r\n print('OK,see you later')\r\n end = False\r\n else:\r\n print('Please, type \"yes\" or \"no\"')","sub_path":"home_1_2.py","file_name":"home_1_2.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"70681900","text":"import ROOT\nimport argparse\nimport ConfigParser\nimport json\n\n# Get config file name\nparser = argparse.ArgumentParser(description='Specify config and overwrite values if desired.')\nparser.add_argument(\"--config\",help = \"Specify configuration file to use\")\nparser.add_argument(\"--tag\",help=\"Tag to add to all directory names to separate this production\")\nargs = parser.parse_args()\n\n# Read configuration\ndefaults = { \"widths\": \"[0.07,0.10,0.15]\",\n \"startPoints\": 200,\n \"stopDensePoints\": 2000,\n \"stopMediumPoints\": 3000,\n \"stopPoints\" : 7500,\n \"binnings\": \"[0]\"}\n\nconfigReader = ConfigParser.RawConfigParser(defaults)\nconfigReader.read(args.config)\n \nsection = \"IO\"\ninputBkgFile = configReader.get(section, \"inputFileName\")\ninputBkgHistName = configReader.get(section,\"binTemplateHist\")\noutputFileFormat = configReader.get(section, \"outputFileFormat\")\n \nsection = \"MassesAndWidths\"\nwidths = json.loads(configReader.get(section,\"widths\"))\nnWidths = len(widths)\nstartPoints = configReader.getint(section,\"startPoints\")\nstopDensePoints = configReader.getint(section,\"stopDensePoints\")\nstopMediumPoints = configReader.getint(section,\"stopMediumPoints\")\nstopPoints = configReader.getint(section,\"stopPoints\")\n\nsection = \"Binnings\"\nbinnings = json.loads(configReader.get(section,\"binnings\"))\nnBinnings = len(binnings)\n\n# Get histogram to use as template\ninfile = ROOT.TFile(inputBkgFile,\"READ\")\nnominalBkg = infile.Get(inputBkgHistName)\nnominalBkg.SetDirectory(0)\ninfile.Close()\n\n# Generate list of masses from given info\nmasses = []\nfor mass in range(startPoints, stopDensePoints, 50) : masses.append(float(mass))\nfor mass in range(stopDensePoints, stopMediumPoints, 100) : masses.append(float(mass))\nfor mass in range(stopMediumPoints, stopPoints, 200) : masses.append(float(mass))\n\n# Generate signals\nfor mass in masses :\n for width in widths :\n \n widthForName = int(width*100)\n onesigma = mass*width\n filename = outputFileFormat.format(int(mass),widthForName)\n\n # Generate BW\n GenericBW = ROOT.TF1(\"signal\",\"TMath::BreitWigner(x,{0},{1})\".format(mass,onesigma),0,13000)\n outfile = ROOT.TFile(filename,\"RECREATE\")\n\n for binning in binnings :\n\n # Convert to histogram\n \n low = nominalBkg.GetBinLowEdge(1)\n high = nominalBkg.GetBinLowEdge(nominalBkg.GetNbinsX()) + nominalBkg.GetBinWidth(nominalBkg.GetNbinsX())\n\n if binning==0 :\n SignalHist = ROOT.TH1D(nominalBkg)\n SignalHist.SetName(\"nominalBins\")\n SignalHist.SetTitle(\"nominalBins\")\n elif binning < 0 and binning > -1.5 :\n newBins = []\n for j in range(0, nominalBkg.GetNbinsX()+1) :\n newBins.append(nominalBkg.GetBinLowEdge(j))\n newBins.append(nominalBkg.GetBinLowEdge(j)+nominalBkg.GetBinWidth(j)/2.0)\n newBins.append(nominalBkg.GetBinLowEdge(nominalBkg.GetNbinsX())+nominalBkg.GetBinWidth(nominalBkg.GetNbinsX()))\n SignalHist = ROOT.TH1D(\"halfNominalBins\",\"halfNominalBins\",len(newBins),array(newBins))\n elif binning < -1 and binning > -99 :\n SignalHist = ROOT.TH1D(nominalBkg)\n SignalHist.Rebin(int(-1*binning))\n SignalHist.SetName(\"{0}TimesNominalBins\".format(int(-1*binning)))\n SignalHist.SetTitle(\"{0}TimesNominalBins\".format(int(-1*binning)))\n elif (binning < -99) :\n newBins = []\n for j in range(0, nominalBkg.GetNbinsX()) :\n newBins.append(nominalBkg.GetBinLowEdge(j+1)+50)\n newBins.append(nominalBkg.GetBinLowEdge(nominalBkg.GetNbinsX())+nominalBkg.GetBinWidth(nominalBkg.GetNbinsX())+50)\n SignalHist = ROOT.TH1D(\"shifted50NominalBins\",\"shifted50NominalBins\",nominalBkg.GetNbinsX(),array(newBins))\n else :\n nBins = int((high - low)/ float(binning))\n SignalHist = ROOT.TH1D(\"binsCloseTo{0}\".format(binning),\"binsCloseTo{0}\".format(binning),nBins,low,high)\n\n # Make sure histograms are clear and free, regardless\n SignalHist.Reset()\n SignalHist.SetDirectory(0)\n\n for bin in range(0, SignalHist.GetNbinsX()+2) :\n a = SignalHist.GetBinLowEdge(bin)\n b = SignalHist.GetBinLowEdge(bin)+SignalHist.GetBinWidth(bin)\n content = GenericBW.Integral(a,b)\n SignalHist.SetBinContent(bin,content)\n SignalHist.SetBinError(bin,0.)\n\n intNow = SignalHist.Integral()\n\n thisPercentage = 0\n thisInterval = 0\n remember1 = 1\n remember2 = 1\n smallestInterval = 1E6\n rememberThisPercentage = 0\n for bin1 in range(0,SignalHist.GetNbinsX()+2) :\n for bin2 in range(bin1,SignalHist.GetNbinsX()+2) :\n thisPercentage = (SignalHist.Integral(bin1,bin2))/intNow;\n thisInterval = SignalHist.GetBinLowEdge(bin2) + SignalHist.GetBinWidth(bin2) - SignalHist.GetBinLowEdge(bin1)\n if (thisPercentage >= 0.95) :\n if (thisInterval < smallestInterval) :\n remember1=bin1\n remember2=bin2\n smallestInterval=thisInterval\n rememberThisPercentage=thisPercentage\n break\n \n for bin in range(0, SignalHist.GetNbinsX()+2) :\n if (bin < remember1) or (bin > remember2) :\n SignalHist.SetBinContent(bin,0.)\n SignalHist.SetBinError(bin,0.)\n intNow = SignalHist.Integral()\n SignalHist.Scale(1.0/intNow)\n\n outfile.cd()\n SignalHist.Write()\n\n outfile.Close()\n\n","sub_path":"scripts/createBWSignals.py","file_name":"createBWSignals.py","file_ext":"py","file_size_in_byte":5407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178672817","text":"from unittest.mock import ANY\n\nfrom freezegun import freeze_time\nfrom rest_framework import serializers, status\nfrom rest_framework.reverse import reverse\n\nfrom ..factories.analytics import AnalyticsDataFactory, AnalyticsFactory\nfrom ..models.analytics import AnalyticsData\n\n\ndef test_analytics_data_list(api_client, django_assert_max_num_queries):\n AnalyticsDataFactory.create_batch(5)\n with django_assert_max_num_queries(35):\n r = api_client.get(reverse('analyticsdata-list'))\n assert r.status_code == status.HTTP_200_OK\n assert len(r.data) == 4\n\n\ndef test_analytics_data_filter_by_analytics(api_client, django_assert_max_num_queries):\n AnalyticsDataFactory.create_batch(3)\n analytics = AnalyticsFactory()\n AnalyticsDataFactory.create_batch(2, analytics=analytics)\n with django_assert_max_num_queries(10):\n r = api_client.get(reverse('analyticsdata-list') + f'?analytics={analytics.pk}')\n assert r.status_code == status.HTTP_200_OK\n assert len(r.data) == 4\n\n\ndef test_analytics_data_filter_by_invalid_date(api_client, django_assert_max_num_queries):\n analytics = AnalyticsFactory()\n AnalyticsDataFactory.create_batch(2, analytics=analytics)\n with django_assert_max_num_queries(10):\n r = api_client.get(reverse('analyticsdata-list') + '?from=2021/07/03')\n assert r.status_code == status.HTTP_400_BAD_REQUEST\n assert 'Invalid date format, date must be in \"YYYY-MM-DD' in r.data['detail']\n\n\ndef test_analytics_data_post(api_client, staff_user):\n api_client.force_authenticate(staff_user)\n analytics = AnalyticsFactory()\n AnalyticsDataFactory.create()\n\n with freeze_time() as frozen_time:\n r = api_client.post(reverse('analyticsdata-list'), data={\n 'analytics': analytics.pk,\n 'date': '2021-07-13T20:00:10Z',\n 'value': 300\n }, format='json')\n\n assert r.status_code == status.HTTP_201_CREATED\n assert AnalyticsData.objects.get(pk=r.data['pk']).value == 300\n assert r.data == {\n 'pk': ANY,\n 'created_date': serializers.DateTimeField().to_representation(frozen_time()),\n 'modified_date': serializers.DateTimeField().to_representation(frozen_time()),\n 'date': '2021-07-13T20:00:10Z',\n 'value': 300,\n 'analytics': analytics.pk\n }\n\n\ndef test_analytics_data_patch(api_client, staff_user):\n api_client.force_authenticate(staff_user)\n analytics_data = AnalyticsDataFactory()\n\n with freeze_time():\n r = api_client.patch(\n reverse('analyticsdata-detail', (analytics_data.pk,)),\n data={\n 'value': 430,\n },\n )\n\n assert r.status_code == status.HTTP_200_OK\n assert AnalyticsData.objects.get(pk=str(analytics_data.pk)).value == 430\n\n\ndef test_analytics_data_delete(api_client, staff_user):\n api_client.force_authenticate(staff_user)\n data = AnalyticsDataFactory()\n r = api_client.delete(reverse('analyticsdata-detail', (data.pk,)))\n\n assert r.status_code == status.HTTP_204_NO_CONTENT\n assert r.data is None\n assert AnalyticsData.objects.filter(pk=str(data.pk)).first() is None\n\n\ndef test_analytics_data_anon_post(api_client):\n r = api_client.post(reverse('analyticsdata-list'), data={'value': 100}, format='json')\n\n assert r.status_code == status.HTTP_401_UNAUTHORIZED\n","sub_path":"v1/analytics/tests/analytics_data.py","file_name":"analytics_data.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115913736","text":"import zipfile\n\nimport io\n\nfrom django.db.models import Q, F, Subquery, query\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom inventit.models import *\nimport json\nfrom decimal import *\nimport csv\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.contrib import messages\n\nfrom django.contrib.auth import authenticate, login, logout\nfrom inventit.EmailBackEnd import EmailBackEnd\nfrom django.db import connection\nimport pandas as pd\n\n\nimport datetime as dt\nimport pandas as pd\nimport os\nfrom django.core.files.storage import FileSystemStorage\nfrom django.shortcuts import redirect\nfrom toggle.settings import allowed_users\n\nfrom django.db import transaction\nfrom django.core.serializers import serialize\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db.models import F\n\n# Global Variables\nAuth_Temp = 'Auth_Templates/'\nMain_Pages = 'inventit/'\naddition = 'inventit/addition/'\n\n\ndef Signin(request):\n return render(request, Auth_Temp + \"Sign_In.html\")\n\ndef Signup(request):\n return render(request, Auth_Temp + \"sign-up.html\")\n\ndef doLogin(request):\n if request.method!=\"POST\":\n return HttpResponse(\"

    Method Not Allowed

    \")\n else:\n user=EmailBackEnd.authenticate(request,username=request.POST.get(\"username\"),password=request.POST.get(\"password\"))\n if user!=None:\n login(request,user)\n\n if Profile.objects.filter(user=request.user).exists():\n if request.user.is_superuser:\n return HttpResponseRedirect('/summary')\n else:\n return HttpResponseRedirect('/capture1')\n elif request.user.is_superuser:\n return HttpResponseRedirect('/summary')\n else:\n return HttpResponseRedirect('/error_temp')\n else:\n messages.error(request,\"Invalid Login Details\")\n return HttpResponseRedirect(\"/\")\n\ndef signup_admin(request):\n username=request.POST.get(\"username\")\n email=request.POST.get(\"email\")\n password=request.POST.get(\"password\")\n\n try:\n if username in allowed_users:\n user=User.objects.create_user(username=username,password=password,email=email, is_staff=True,is_active=True,is_superuser=True)\n user.is_superuser=True\n user.save()\n else:\n user=User.objects.create_user(username=username,password=password,email=email)\n user.save()\n messages.success(request,\"Your Account Was Successfully Created, Please Login to proceed. Thank you!\")\n return redirect(Signin) \n except:\n messages.error(request,\"Failed to Create Admin\")\n return redirect(Signup) \n\ndef error_temp(request):\n return render(request,addition + \"error_login.html\")\n\n\n@login_required()\ndef capture1(request):\n if Profile.objects.filter(user=request.user).exists():\n\n template = Main_Pages+\"capture.html\"\n\n\n assets=Inventory.objects.all()\n # cursor = connection.cursor()\n # query = \"select * from inventit_inventory;\"\n # cursor.execute(query)\n # result = cursor.fetchall()\n # for i in result:\n # assets = i[2]\n profile = Profile.objects.get(user=request.user)\n\n # my allowed categories\n categories = Category.objects.filter(count_1=profile.team)\n bins = Category.objects.filter(count_1=profile.team)\n\n count_header = CountHeader.objects.filter(is_active=True)\n\n count_lines = CountLines.objects.filter(\n Q(count_header=count_header.first()) & Q(category__in=categories)\n )\n \n # count_lines1 = CountLines.objects.filter( Q(category=1) )\n\n # from django.db.models import Count, Max\n\n # unique_fields = ['inventory', 'category']\n\n # duplicates = (\n # CountLines.objects.values(*unique_fields)\n # .order_by()\n # .annotate(max_id=Max('id'), count_id=Count('id'))\n # .filter(count_id__gt=1)\n # )\n\n # for duplicate in duplicates:\n # (\n # CountLines.objects\n # .filter(**{x: duplicate[x] for x in unique_fields})\n # .exclude(id=duplicate['max_id'])\n # .delete()\n # )\n\n\n context = {\"categories\": categories,\"bins\": bins,\"assets\": assets, \"count_headers\": count_header, \"lines\": count_lines, \"count\": 1, \"team\": profile.team.name}\n\n return render(request, template, context)\n\n else:\n return HttpResponseRedirect('/error_counts')\n\n\n\n@login_required()\ndef capture2(request):\n if Profile.objects.filter(user=request.user).exists():\n\n template = Main_Pages+\"capture.html\"\n\n assets=Inventory.objects.all()\n profile = Profile.objects.get(user=request.user)\n\n # my allowed categories\n categories = Category.objects.filter(count_2=profile.team)\n bins = Category.objects.filter(count_2=profile.team)\n\n count_header = CountHeader.objects.filter(is_active=True)\n\n count_lines = CountLines.objects.filter(\n Q(count_header=count_header.first()) & Q(category__in=categories)\n )\n \n \n\n context = {\"bins\":bins,\"assets\":assets, \"categories\": categories,\"count_headers\": count_header, \"lines\": count_lines, \"count\": 2, \"team\": profile.team.name}\n\n return render(request, template, context)\n else:\n return HttpResponseRedirect('/error_counts')\n\n\n@login_required()\ndef capture3(request):\n if Profile.objects.filter(user=request.user).exists():\n\n\n\n template = Main_Pages+\"capture.html\"\n\n profile = Profile.objects.get(user=request.user)\n\n # my allowed categories\n categories = Category.objects.filter(count_3=profile.team)\n\n count_header = CountHeader.objects.filter(is_active=True)\n\n count_lines = CountLines.objects.filter(\n Q(count_header=count_header.first()) & Q(category__in=categories) & Q(category__count_3__exact=profile.team)\n )\n\n\n context = {\"categories\": categories,\"count_headers\": count_header, \"lines\": count_lines, \"count\": 3, \"team\": profile.team.name}\n\n return render(request, template, context)\n\n else:\n return HttpResponseRedirect('/error_counts') \n\n\n\n@login_required()\ndef summary(request):\n template = Main_Pages+\"summary.html\"\n profile = Profile.objects.all()\n\n counts_sign_off = Team.objects.all().values_list('name', 'count_1_sign_off', 'count_2_sign_off', 'count_3_sign_off')\n\n count_header = CountHeader.objects.filter(is_active=True)\n \n inventory = Inventory.objects.all() \\\n .annotate(count_variance=F('count_summary') - F('count_theoretical')) \\\n .annotate(cost_variance=F('count_variance') * F('cost'))\n \n \n\n \n cost_variance_sum = inventory.aggregate(Sum('cost_variance'))['cost_variance__sum']\n\n summary_count1 = (\n CountLines.objects.filter(count_header=count_header.first())\n .filter(count_1__gte=0)\n .count()\n )\n summary_count2 = (\n CountLines.objects.filter(count_header=count_header.first())\n .filter(count_2__gte=0)\n .count()\n )\n summary_count3 = (\n CountLines.objects.filter(count_header=count_header.first())\n .filter(count_3__gte=0)\n .count()\n )\n total = CountLines.objects.filter(count_header=count_header.first()).count()\n\n \n \n summary_count1Percentage = 0\n summary_count2Percentage = 0\n summary_count3Percentage = 0\n\n if summary_count1:\n summary_count1Percentage = round((float(summary_count1) / float(total)) * 100)\n\n if summary_count2:\n summary_count2Percentage = round((float(summary_count2) / float(total)) * 100)\n if summary_count3:\n summary_count3Percentage = round((float(summary_count3) / float(total)) * 100)\n\n counts = {\n \"summary_count1\": summary_count1,\n \"summary_count2\": summary_count2,\n \"summary_count3\": summary_count3,\n \"summary_count1Percentage\": summary_count1Percentage,\n \"summary_count2Percentage\": summary_count2Percentage,\n \"summary_count3Percentage\": summary_count3Percentage,\n \"total\": total,\n \"counts_sign_off\": counts_sign_off\n }\n\n \n #balanced item = 0\n count_lineswithzero = Inventory.objects.filter( Q(count_summary=F('count_theoretical')) & Q(count_summary=0) & Q(count_theoretical=0) ).count()\n\n totalwithzero = Inventory.objects.all().count()\n if totalwithzero:\n percwithzero = ( count_lineswithzero / totalwithzero ) * 100\n else:\n percwithzero = 0\n \n\n \n\n #balanced item != 0\n count_lineswithoutzero = Inventory.objects.filter( Q(count_summary=F('count_theoretical')) & ~Q(count_summary=0) & ~Q(count_theoretical=0) ).count()\n totalwithoutzero = Inventory.objects.all().count()\n if totalwithoutzero:\n percwithoutzero = ( count_lineswithoutzero / totalwithoutzero ) * 100\n else:\n percwithoutzero = 0\n\n #Confirmed\n query = \"select * from inventit_Inventory where Confimed=1 and (count_summary-count_theoretical) <> 0;\"\n conresults = Inventory.objects.raw(query)\n Confirmedcount = len(conresults)\n\n # Confirmedcount = Inventory.objects.filter( Q(Confimed__exact=1)).count()\n totalc = Inventory.objects.all().count()\n if totalc:\n percc = ( Confirmedcount / totalc ) * 100\n else:\n percc = 0\n\n\n #unconfirmed\n query = \"select * from inventit_Inventory where Confimed=0 and (count_summary-count_theoretical) <> 0;\"\n uncresults = Inventory.objects.raw(query)\n unconfirmedcount = len(uncresults)\n\n\n # unconfirmedcount = Inventory.objects.filter( Q(Confimed__exact=0) ).count()\n totalunc = Inventory.objects.all().count()\n if totalunc:\n percunc = ( unconfirmedcount / totalunc ) * 100\n else:\n percunc = 0\n\n\n cursor = connection.cursor()\n query = '''\\\n Select (count_summary-count_theoretical) as variance , SUM((count_summary-count_theoretical) * cost) \n FROM inventit_inventory\n WHERE (Confimed=0 and variance <> 0)\n '''\n cursor.execute(query)\n result = cursor.fetchall()\n for i in result:\n UnConfirmed_Variance = i[1]\n\n query = '''\\\n Select (count_summary-count_theoretical) as variance , SUM((count_summary-count_theoretical) * cost) \n FROM inventit_inventory\n WHERE (Confimed=1 and variance <> 0)\n '''\n cursor.execute(query)\n result = cursor.fetchall()\n for i in result:\n Confirmed_Variance = i[1]\n\n from django.core.paginator import Paginator\n\n\n\n\n ctg = Team.objects.all()\n\n context = {\n \"count_headers\": count_header, \"Teams\":ctg,\n \"counts\": counts, \"counts_sign_off\": counts_sign_off,\"profile\":profile,\n \"cost_variance_sum\": cost_variance_sum,\n \"county\":unconfirmedcount , \"percent\":percunc, \"Confirmedcount\":Confirmedcount, \"percunc\":percc,\n \"count_lineswithzero\":count_lineswithzero, \"percwithzero\":percwithzero,\n \"count_lineswithoutzero\":count_lineswithoutzero, \"percwithoutzero\":percwithoutzero,\"Confirmed_Variance\":Confirmed_Variance,\"UnConfirmed_Variance\":UnConfirmed_Variance\n }\n\n if request.method == 'POST':\n uname = request.session.get('uname')\n check = request.POST['check'] # post value either true or false\n titles = request.POST['checko']# post value i.e. name of title\n for title in Inventory.objects.filter(id=titles):\n # return HttpResponse(title.title)\n title.Confimed = check.title() # title() capitalizes first letter of string\n title.save()\n return HttpResponse('data is saved')\n\n return render(request, template, context)\n\n@login_required()\ndef save_count_summary(request):\n if request.method == \"POST\":\n pk = request.POST.get(\"pk\")\n value = request.POST.get(\"value\")\n\n inventory = Inventory.objects.get(pk=pk)\n inventory.count_summary = value\n inventory.save()\n\n response_data = {\"result\": \"Saved!!!\"}\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n else:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\",\n )\n\n@login_required()\ndef save_data(request):\n if request.method == \"POST\":\n pk = request.POST.get(\"pk\")\n count = request.POST.get(\"count\")\n count_type = request.POST.get(\"count_type\")\n\n response_data = {}\n\n if count_type == \"1\":\n line = CountLines.objects.filter(pk=pk)\n\n if line.values()[0].get(\"count_2\") == Decimal(count):\n line.update(count_1=count, count_3=count)\n else:\n line.update(count_1=count)\n\n elif count_type == \"2\":\n line = CountLines.objects.filter(pk=pk)\n\n # import pdb; pdb.set_trace();\n if line.values()[0].get(\"count_1\") == Decimal(count):\n line.update(count_2=count, count_3=count)\n else:\n line.update(count_2=count)\n\n else:\n line = CountLines.objects.filter(pk=pk)\n line.update(count_3=count)\n\n # Update the Inventory count summary\n count_lines = CountLines.objects.filter(\n inventory__item_code=line.first().inventory.item_code\n )\n\n sum = 0\n for count_line in count_lines:\n if count_line.count_3:\n sum += count_line.count_3\n\n inventory = Inventory.objects.filter(\n item_code=line.first().inventory.item_code\n ).first()\n\n if inventory:\n inventory.count_summary = sum\n inventory.save()\n\n response_data[\"result\"] = \"Saved!!!\"\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n else:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\",\n )\n\n\n\n@csrf_exempt\n@login_required()\ndef sign_off(request, count):\n profile = Profile.objects.get(user=request.user)\n count_header = CountHeader.objects.filter(is_active=True)\n team = profile.team\n assets=CountLines.objects.all()\n if count == 1:\n categories = Category.objects.filter(count_1=team)\n count_lines = CountLines.objects.filter(\n Q(count_header=count_header.first()) & Q(category__in=categories)\n )\n for b in count_lines:\n if b.count_1 is None:\n \n b.count_1 = 0\n b.save()\n \n elif b.count_1 == b.count_2:\n b.count_3 = b.count_1\n b.save()\n \n \n #count_lines.filter(count_1__isnull=True).update(count_1=0)\n team.count_1_sign_off = True\n elif count == 2:\n categories = Category.objects.filter(count_2=team)\n count_lines = CountLines.objects.filter(\n Q(count_header=count_header.first()) & Q(category__in=categories)\n )\n for b in count_lines:\n if b.count_2 is None:\n \n b.count_2 = 0\n b.save()\n \n elif b.count_2 == b.count_1:\n b.count_3 = b.count_1\n b.save()\n\n #count_lines.filter(count_2__isnull=True).update(count_2=0)\n team.count_2_sign_off = True\n\n else:\n categories = Category.objects.filter(count_3=team)\n count_lines = CountLines.objects.filter(\n Q(count_header=count_header.first()) & Q(category__in=categories)\n )\n count_lines.filter(count_3__isnull=True).update(count_3=0)\n team.count_3_sign_off = True\n\n team.save()\n\n return HttpResponse(status=200)\n\n@login_required()\ndef export(request):\n output = io.StringIO()\n writer = csv.writer(output, dialect=\"excel\")\n\n inventory = export_inventory()\n countlines = export_countlines()\n\n response = HttpResponse(content_type=\"application/zip\")\n response[\"Content-Disposition\"] = \"attachment; filename=csv.zip\"\n\n z = zipfile.ZipFile(response, \"w\")\n z.writestr(\"inventory.csv\", inventory.getvalue())\n z.writestr(\"countlines.csv\", countlines.getvalue())\n\n return response\n\n\ndef export_inventory():\n output = io.StringIO()\n writer = csv.writer(output)\n writer.writerow([\"Confimed\",\"Item Code\", \"Theoretical\", \"Count Summary\"])\n\n for inventory in Inventory.objects.all():\n writer.writerow(\n [inventory.Confimed,inventory.item_code, inventory.count_theoretical, inventory.count_summary]\n )\n\n return output\n\ndef export_countlines():\n output = io.StringIO()\n writer = csv.writer(output)\n writer.writerow([\"Item Code\", \"Count 1\", \"Count 2\", \"Count 3\", \"Theoretical\"])\n\n count_header = CountHeader.objects.filter(is_active=True)\n\n for countLine in CountLines.objects.filter(count_header=count_header.first()):\n writer.writerow(\n [\n countLine.inventory.item_code,\n countLine.count_1,\n countLine.count_2,\n countLine.count_3,\n countLine.inventory.count_theoretical,\n ]\n )\n\n return output\n\n\ndef add_countline_save1(request):\n is_ajax = request.headers.get('X-Requested-With') == 'XMLHttpRequest'\n if is_ajax:\n data = json.load(request)\n ctldata = data.get('payload')\n\n bin_id=ctldata[\"category\"]\n category=Category.objects.get(id=bin_id)\n inventory_id=ctldata[\"inventory\"]\n # header_id = request.POST.get(\"time_log_count_header\")\n query = \"select * from inventit_CountHeader where is_active=True;\"\n results = CountHeader.objects.raw(query)\n for result in results:\n header_id = CountHeader.objects.get(description = result).id\n headerie = CountHeader.objects.get(id=header_id)\n if not Inventory.objects.filter(item_code=inventory_id).exists():\n messages.error(request, 'Invenotry Item Does Not Exist!!')\n context= [{'messages':'Invenotry Item Does Not Exist!!'}]\n else:\n inventor=Inventory.objects.get(item_code=inventory_id).id\n inventory=Inventory.objects.get(id=inventor)\n if not CountLines.objects.filter(category=category,inventory=inventory).exists():\n count_model=CountLines(count_header=headerie,category=category,inventory=inventory)\n count_model.save()\n messages.success(request, 'CountLine was successfully added !')\n context= [{'messages':'CountLine was successfully added !'}]\n else:\n messages.error(request, 'CountLine already exist!!!')\n context= [{'messages':'CountLine already exist!!!'}]\n return JsonResponse({'context':context})\n\n\n\ndef time_save(request):\n if request.method == 'POST': \n time_log_team=request.POST.get(\"time_log_team\")\n time_log_count_num=request.POST.get(\"time_log_count_num\")\n time_log_count_header=request.POST.get(\"time_log_count_header\")\n time_log_time=request.POST.get(\"time_log_time\")\n time_log_bin=request.POST.get(\"time_log_bin\")\n nguva=Time_Log(time_log_team=time_log_team, time_log_count_num=time_log_count_num, time_log_count_header=time_log_count_header, time_log_time=time_log_time ,time_log_bin=time_log_bin)\n nguva.save()\n return redirect(Home) \n\n\ndef Home(request):\n return render(request,Main_Pages+\"capture.html\")\n\ndef upload(request):\n Category.objects.all().delete()\n CountHeader.objects.all().delete()\n CountLines.objects.all().delete()\n Inventory.objects.all().delete()\n print('Deleted all data in tables') \n try:\n if request.method == 'POST' and request.FILES['myfile']:\n \n myfile = request.FILES['myfile'] \n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n excel_file = uploaded_file_url\n print(excel_file) \n \n #for Inventory\n empexceldata = pd.read_excel (\".\"+excel_file, sheet_name='Final Inventory')\n print (empexceldata)\n print(type(empexceldata))\n dbframe = empexceldata\n for dbframe in dbframe.itertuples():\n \n obj = Inventory.objects.create(Confimed=dbframe.Confimed,item_code=dbframe.item_code, count_theoretical=dbframe.count_theoretical,\n count_summary=dbframe.count_summary, cost=dbframe.cost, description=dbframe.description)\n \n print(type(obj))\n obj.save()\n\n #for bins\n mpexceldata = pd.read_excel (\".\"+excel_file, sheet_name='Final_Bins')\n print (mpexceldata)\n print(type(mpexceldata))\n dbfram = mpexceldata\n for dbfram in dbfram.itertuples():\n obje = Category.objects.create(name=dbfram.name)\n \n print(type(obje))\n obje.save()\n\n #for header\n mpexceldata = pd.read_excel (\".\"+excel_file, sheet_name='Final Count_header')\n print (mpexceldata)\n print(type(mpexceldata))\n dbfra = mpexceldata\n for dbfra in dbfra.itertuples():\n obji = CountHeader.objects.create(description=dbfra.description)\n \n print(type(obji))\n obji.save()\n \n #for countlines\n empexceldata = pd.read_excel (\".\"+excel_file, sheet_name='Final CountLines')\n print (empexceldata)\n print(type(empexceldata))\n dbfr = empexceldata\n \n for dbfr in dbfr.itertuples():\n count_heade = CountHeader.objects.get(description=dbfr.count_header).id\n count_header=CountHeader.objects.get(id=count_heade)\n categor = Category.objects.get(name=dbfr.category).id\n category=Category.objects.get(id=categor)\n inventor=Inventory.objects.get(item_code=dbfr.item_code).id\n inventory=Inventory.objects.get(id=inventor)\n print(count_header,category,inventory)\n objo=CountLines(count_header=count_header,category=category,inventory=inventory)\n objo.save()\n\n return redirect(Home) \n\n except Exception as identifier: \n print(identifier)\n \n return redirect(Home) \n\n\n\ndef error_counts(request):\n return render(request,addition + \"error_login.html\")\n\ndef finishie(request):\n return render(request,addition + \"finished.html\")\n\ndef error_network(request):\n return render(request,addition + \"error_network.html\")\n\n\n\nfrom django.core import serializers\n\ndef poptable(request):\n if request.method == 'POST':\n pk = request.POST.get(\"pk\")\n a = CountLines.objects.filter(inventory__item_code=pk)\n tmpJson = serializers.serialize(\"json\",a)\n tmpObj = json.loads(tmpJson)\n dataa = json.dumps(tmpObj)\n # return JsonResponse(dataa, safe=False)\n return render(request, Main_Pages+\"summary.html\", {\"dataa\": dataa,\"pk\":a})\n # return redirect(summary,{'Countlinees':a})\n\n\n\ndef confirm_summary(request):\n if request.method == \"POST\":\n check = request.POST['check'] # post value either true or false\n titles = request.POST['checko']# post value i.e. name of title\n for title in Inventory.objects.filter(id=titles):\n # return HttpResponse(title.title)\n title.Confimed = check.title() # title() capitalizes first letter of string\n title.save()\n response_data = {\"result\": \"Saved!!!\"}\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n else:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\",\n )\n\n#==============================================================POP UP SUMMARY ==================================================================\ndef Binloc(request):\n # request.is_ajax() is deprecated since django 3.1\n is_ajax = request.headers.get('X-Requested-With') == 'XMLHttpRequest'\n\n if is_ajax:\n data = json.load(request)\n todo = data.get('payload')\n todos = list(CountLines.objects.filter(inventory__item_code = todo['task']).values())\n a = CountLines.objects.filter(inventory__item_code = todo['task'])\n # for i in a:\n context = [{\n 'category':i.category.name,\n 'count_1':i.count_1,\n 'count_2':i.count_2,\n 'count_3':i.count_3,\n } for i in a]\n # context = json.dumps(context,cls=DjangoJSONEncoder)\n return JsonResponse({'context':context})\n\n#=========================================================== SUMMARY lINES ITEMS =======================================================================\n\ndef SummaryTab(request):\n # request.is_ajax() is deprecated since django 3.1\n is_ajax = request.headers.get('X-Requested-With') == 'XMLHttpRequest'\n\n if is_ajax:\n data = json.load(request)\n tabbs = data.get('payload')\n \n list_of_p_dict2 = data_perfection(tabbs)\n\n\n return JsonResponse({'context':list_of_p_dict2})\n\n\n#=================================================PERFECT SEARCH ==============================================================================================\ndef SearchFilter(request):\n # request.is_ajax() is deprecated since django 3.1\n is_ajax = request.headers.get('X-Requested-With') == 'XMLHttpRequest'\n\n if is_ajax:\n data = json.load(request)\n tabbs = data.get('payload')\n\n\n search_data = tabbs['sdata'] \n\n invdrop = tabbs['invdropdata']\n\n if invdrop == 'Unconfirmed':\n confirm = 0\n elif invdrop == 'Confirmed':\n confirm = 1\n else:\n confirm = 1\n\n\n context = data_perfection(tabbs)\n\n if tabbs['invdropdata'] != \"Show All\":\n context = [data for data in context if data['Confimed']== confirm]\n else:\n context = context\n\n # Get all the teams fed\n a = Team.objects.all()\n # Creating empty list to add teams fed\n selected_teams = []\n for i in a:\n #Adding teams available \n selected_teams.append(i.name)\n\n # Filtered Team\n chosen_team = tabbs['teamdropdata']\n c_team = [d for d in selected_teams if d.lower() == chosen_team.lower()]\n #Finding the index of filtered team\n if len(c_team) > 0:\n list_of_alpha = Alpha_allocate(selected_teams,c_team)\n contex2 = []\n for i in range(len(list_of_alpha)):\n selected_letters = list_of_alpha[i]\n \n cont = [data for data in context if data['item_code'][0].lower() == selected_letters.lower()]\n\n for g in cont :\n contex2.append(g)\n\n context = [data for data in contex2 if search_data.lower() in data['item_code'].lower()] \n else:\n context = [data for data in context if search_data.lower() in data['item_code'].lower()] \n\n\n\n return JsonResponse({'context':context})\n\n\n\n#=============================================================== Filter By Inventory and Team================================================================================\n\ndef InvFilter(request):\n # request.is_ajax() is deprecated since django 3.1\n is_ajax = request.headers.get('X-Requested-With') == 'XMLHttpRequest'\n\n if is_ajax:\n data = json.load(request)\n tabbs = data.get('payload')\n\n\n \n\n invdrop = tabbs['invdropdata']\n\n if invdrop == 'Unconfirmed':\n confirm = 0\n elif invdrop == 'Confirmed':\n confirm = 1\n else:\n confirm = 1\n\n\n context = data_perfection(tabbs)\n\n if tabbs['invdropdata'] != \"Show All\":\n context = [data for data in context if data['Confimed']== confirm]\n\n # Get all the teams fed\n a = Team.objects.all()\n # Creating empty list to add teams fed\n selected_teams = []\n for i in a:\n #Adding teams available \n selected_teams.append(i.name)\n\n # Filtered Team\n chosen_team = tabbs['teamdata']\n c_team = [d for d in selected_teams if d.lower() == chosen_team.lower()]\n #Finding the index of filtered team\n if len(c_team) > 0:\n list_of_alpha = Alpha_allocate(selected_teams,c_team)\n contex2 = []\n for i in range(len(list_of_alpha)):\n selected_letters = list_of_alpha[i]\n \n cont = [data for data in context if data['item_code'][0].lower() == selected_letters.lower()]\n\n for g in cont :\n contex2.append(g)\n\n else:\n contex2 = context\n\n\n\n\n\n return JsonResponse({'context':contex2})\n\n\n\n#================================================================= Confirm =========================================================================================\n\ndef Confirmed(request):\n # request.is_ajax() is deprecated since django 3.1\n is_ajax = request.headers.get('X-Requested-With') == 'XMLHttpRequest'\n\n if is_ajax:\n data = json.load(request)\n tabbs = data.get('payload')\n\n context = data_perfection(tabbs)\n\n context = [data for data in context if data['Confimed'] == 0]\n\n\n # Get all the teams fed\n a = Team.objects.all()\n # Creating empty list to add teams fed\n selected_teams = []\n for i in a:\n #Adding teams available \n selected_teams.append(i.name)\n\n # Filtered Team\n chosen_team = tabbs['teamdropdata']\n c_team = [d for d in selected_teams if d.lower() == chosen_team.lower()]\n #Finding the index of filtered team\n if len(c_team) > 0:\n list_of_alpha = Alpha_allocate(selected_teams,c_team)\n\n contex2 = []\n\n for i in range(len(list_of_alpha)):\n selected_letters = list_of_alpha[i]\n cont = [data for data in context if data['item_code'][0].lower() == selected_letters.lower()]\n for i in cont: \n dd = Inventory.objects.filter(Q(item_code=i['item_code']))\n for po in dd:\n po.Confimed = 1\n po.save()\n\n\n\n\n\n return JsonResponse({'context':contex2})\n\n#================================================================= UnConfirm Page =========================================================================================\n\n\ndef UnconfirmedPage(request):\n if Profile.objects.filter(user=request.user).exists():\n\n\n\n template = addition + \"CountUnconfirmed.html\"\n\n profile = Profile.objects.get(user=request.user)\n\n # my allowed categories\n categories = Team.objects.filter(name=profile.team)\n \n\n count_header = CountHeader.objects.filter(is_active=True)\n\n sqlquery = sql_statements()[0]\n \n\n cursor = connection.cursor()\n cursor.execute(sqlquery)\n row = cursor.fetchall()\n\n \n\n contex = [{\n 'pk':i[0],\n 'Confimed':i[1],\n 'item_code':i[2],\n 'count_theoretical':i[3],\n 'count_summary':i[4] ,\n 'count_variance':i[5],\n 'cost_variance':i[6],\n } for i in row ]\n contex = [data for data in contex if data['count_summary']==None] \n\n\n # Get all the teams fed\n a = Team.objects.all()\n # Creating empty list to add teams fed\n selected_teams = []\n for i in a:\n #Adding teams available \n selected_teams.append(i.name)\n\n # Filtered Team\n chosen_team = profile.team.name\n c_team = [d for d in selected_teams if d == chosen_team ]\n #Finding the index of filtered team\n if len(c_team) > 0:\n list_of_alpha = Alpha_allocate(selected_teams,c_team)\n\n contex2 = []\n for i in range(len(list_of_alpha)):\n selected_letters = list_of_alpha[i]\n\n cont = [data for data in contex if data['item_code'][0].lower() == selected_letters.lower()]\n\n for g in cont :\n contex2.append(g)\n\n\n context = {\"inventory\":contex2,\"categories\": categories,\"count_headers\": count_header, \"count\": \"Unconfirmed\", \"team\": profile.team.name}\n\n return render(request, template, context)\n\n else:\n return HttpResponseRedirect('/error_login') \n\n#================================================================= Nullify =========================================================================================\n\n\ndef Nullify(request):\n # request.is_ajax() is deprecated since django 3.1\n is_ajax = request.headers.get('X-Requested-With') == 'XMLHttpRequest'\n\n if is_ajax:\n data = json.load(request)\n tabbs = data.get('payload')\n\n context = data_perfection(tabbs)\n\n context = [data for data in context if data['Confimed'] == 0]\n\n # Get all the teams fed\n a = Team.objects.all()\n # Creating empty list to add teams fed\n selected_teams = []\n for i in a:\n #Adding teams available \n selected_teams.append(i.name)\n\n # Filtered Team\n chosen_team = tabbs['teamfil']\n c_team = [d for d in selected_teams if d.lower() == chosen_team.lower()]\n #Finding the index of filtered team\n\n if len(c_team) > 0:\n if len(c_team) > 0:\n list_of_alpha = Alpha_allocate(selected_teams,c_team)\n\n contex2 = []\n for i in range(len(list_of_alpha)):\n selected_letters = list_of_alpha[i]\n cont = [data for data in context if data['item_code'][0].lower() == selected_letters.lower()]\n for i in cont: \n dd = Inventory.objects.filter(Q(item_code=i['item_code']))\n for po in dd:\n po.count_summary = None\n po.save()\n\n\n\n return JsonResponse({'context':context})\n\n#================================================================= Confirm =========================================================================================\n\ndef sql_statements():\n \n sqlquery1 = '''\\\n SELECT a.id as id , a.Confimed as Confimed, a.item_code as item_code,a.count_theoretical as count_theoretical, a.count_summary as count_summary, \n (a.count_summary - a.count_theoretical) as count_variance, ((a.count_summary - a.count_theoretical) * a.cost) as cost_variance\n FROM inventit_inventory a\n '''\n \n sqlquery2 = '''\\\n SELECT a.id as id , a.Confimed as Confimed, a.item_code as item_code,a.count_theoretical as count_theoretical, a.count_summary as count_summary, \n (a.count_summary - a.count_theoretical) as count_variance, ((a.count_summary - a.count_theoretical) * a.cost) as cost_variance,b.count_1,b.count_2,b.count_3\n FROM inventit_inventory a\n INNER JOIN inventit_countlines b\n ON (a.id = b.inventory_id) \n '''\n sql_statement = [sqlquery1,sqlquery2]\n return sql_statement\n\n\ndef data_perfection(tabbs):\n\n if tabbs['tabbs'] == 'ALL':\n sqlquery = sql_statements()[0]\n elif tabbs['tabbs'] == 'C_failed_NV':\n sqlquery = sql_statements()[1]\n elif tabbs['tabbs'] == 'C_failed_V': \n sqlquery = sql_statements()[1]\n elif tabbs['tabbs'] == 'C_Succ_NV':\n sqlquery = sql_statements()[1]\n elif tabbs['tabbs'] == 'C_Succ_V':\n sqlquery = sql_statements()[1]\n\n cursor = connection.cursor()\n cursor.execute(sqlquery)\n row = cursor.fetchall()\n\n\n if tabbs['tabbs'] != 'ALL':\n context = [{\n 'pk':i[0],\n 'Confimed':i[1],\n 'item_code':i[2],\n 'count_theoretical':i[3],\n 'count_summary':i[4],\n 'count_variance':i[5],\n 'cost_variance':i[6],\n 'count_1':i[7],\n 'count_2':i[8],\n 'count_3':i[9],\n } for i in row]\n else:\n context = [{\n 'pk':i[0],\n 'Confimed':i[1],\n 'item_code':i[2],\n 'count_theoretical':i[3],\n 'count_summary':i[4],\n 'count_variance':i[5],\n 'cost_variance':i[6],\n } for i in row]\n\n _name = False\n contex2 = []\n list_of_p_name = []\n list_of_p_dict = []\n list_of_p_dict2 = []\n list_of_p_dict3 = []\n infection = []\n successs = None\n if tabbs['tabbs'] == 'ALL':\n context = [data for data in context]\n successs = None\n elif tabbs['tabbs'] == 'C_failed_NV':\n context = [data for data in context if data['count_variance'] == 0]\n successs = False\n elif tabbs['tabbs'] == 'C_failed_V': \n context = [data for data in context if data['count_variance'] != 0]\n successs = False\n elif tabbs['tabbs'] == 'C_Succ_NV':\n context = [data for data in context if data['count_variance'] == 0]\n successs = True\n elif tabbs['tabbs'] == 'C_Succ_V':\n context = [data for data in context if data['count_variance'] != 0]\n successs = True\n \n\n for i in range(len(context)):\n name_o_item = context[i]['item_code']\n c_name = [data for data in context if data['item_code'] == name_o_item]\n if len(c_name) > 0:\n for s in c_name:\n if successs == True:\n decis = (s['count_1'] == s['count_2'] == s['count_3'])\n if decis == True:\n p_name = s['item_code']\n if len(list_of_p_name) > 0:\n if p_name in list_of_p_name:\n pass\n else:\n list_of_p_name.append(p_name)\n else:\n list_of_p_name.append(p_name)\n else:\n if s['item_code'] not in infection:\n infection.append(s['item_code'])\n elif successs == False:\n decis = (s['count_1'] != s['count_3'] or s['count_1'] != s['count_2'] or s['count_2'] != s['count_3'])\n if decis == True:\n p_name = s['item_code']\n if len(list_of_p_name) > 0:\n if p_name in list_of_p_name:\n pass\n else:\n list_of_p_name.append(p_name)\n else:\n list_of_p_name.append(p_name)\n else:\n if s['item_code'] not in infection:\n infection.append(s['item_code'])\n else:\n list_of_p_dict2 = context\n if successs != None:\n for i in list_of_p_name:\n list_of_p_dict = [data for data in context if data['item_code'] == i]\n list_of_p_dict2.append(list_of_p_dict[0])\n\n for p in range(len(infection)):\n s = [data for data in list_of_p_dict2 if data['item_code'] == infection[p]]\n if len(s) > 0:\n list_of_p_dict2.remove(s[0])\n\n return list_of_p_dict2\n\n\ndef Alpha_allocate(selected_teams,c_team):\n t_indx = selected_teams.index(c_team[0])\n # Array of first letters to be filtered\n arry = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0']\n # Length of arry \n arry_len = len(arry)\n # Length of selected teams fed\n team_len = len(selected_teams)\n # Finding how many times teams can be allocated the arry\n team_times = arry_len/team_len\n g = int(t_indx)\n # creating empty list to store first letters per team\n list_of_alpha = []\n team_times = int(team_times)\n for i in range(int(team_times)):\n list_of_alpha.append(arry[g])\n g += team_times\n\n return list_of_alpha\n","sub_path":"inventit/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":40754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360302472","text":"import blurplefier\n\nwith open(\"images/input.png\", \"rb\") as test_file:\n test_file_bytes = test_file.read()\n\nextension, standard_output = blurplefier.convert_image(\n test_file_bytes, blurplefier.Methods.CLASSIC\n)\n\nwith open(f\"images/output_standard.{extension}\", \"wb\") as output_file:\n output_file.write(standard_output)\n\nextension, filtered_output = blurplefier.convert_image(\n test_file_bytes, blurplefier.Methods.FILTER\n)\n\nwith open(f\"images/output_filter.{extension}\", \"wb\") as output_file:\n output_file.write(filtered_output)\n\nextension, neo_output = blurplefier.convert_image(\n test_file_bytes,\n blurplefier.Methods.FILTER,\n modifier=blurplefier.Modifiers.NEW_LIGHT,\n)\n\nwith open(f\"images/output_neofilter.{extension}\", \"wb\") as output_file:\n output_file.write(neo_output)\n","sub_path":"tests/basic_test.py","file_name":"basic_test.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"639578518","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport sys\nimport random as rnd\nfrom datetime import datetime\n\nOnHand = {}\nFolded = {}\nDeck = []\nBehavior = {}\ncurrent = \"\"\nactions = 0\n\nPlayer = 'Player'\nppl = (Player, 'Opponent 1', 'Opponent 2', 'Opponent 3')\nsuitdict = {\"S\":0, \"H\":1, \"D\":2, \"C\":3}\nletters = [\"A\"]+[str(i) for i in range(2,11)]+[\"J\", \"Q\", \"K\"]\nheader = (\"Spade\", \"Heart\", \"Diamond\", \"Club\")\n\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\n\ndef init():\n\tglobal OnHand\n\tglobal Deck\n\tglobal Folded\n\tglobal Behavior\n\tglobal current\n\tglobal actions\n\n\tfor x in ppl:\n\t\tOnHand[x] = [False]*52\n\t\tFolded[x] = 0\n\t\tBehavior[x] = []\n\tgot = {recvr:0 for recvr in ppl}\n\tfor i in range(52):\n\t\trecvr = ppl[rnd.randint(0, 3)]\n\t\twhile got[recvr] == 13:\n\t\t\trecvr = ppl[rnd.randint(0, 3)]\n\t\tOnHand[recvr][i] = True\n\t\tgot[recvr] += 1\n\t\t# Spade 7\n\t\tif i == 6:\n\t\t\tcurrent = recvr\n\t\t\tprint(\"[Game]\", recvr, \"is to serve.\")\n\tDeck = [False]*52\n\tactions = 0\n\ndef parse_input(msg):\n\tif msg[0] not in \"SsHhDdCc\":\n\t\treturn (-1, \"[Alert] Unrecognized suid \\\"{}\\\".\".format(msg[0])) \n\tsuit = suitdict[msg[0].upper()]\n\tif len(msg)<=1 or not msg[1:].strip():\n\t\treturn (-1, \"[Alert] Unable to comprehend number.\") \n\tmsg = msg[1:].strip()\n\tif msg[0] in \"AaJjQqKk\":\n\t\tnum = letters.index(msg[0].upper())\n\telse:\n\t\ttry:\n\t\t\tnum = int(msg)-1\n\t\texcept ValueError:\n\t\t\treturn (-1, \"[Alert] Unable to comprehend number.\") \n\treturn (suit*13+num, None)\n\ndef can_put(hand, deck):\n\tif not deck[6]:\n\t\treturn \t[i==6 and hand[6] for i in range(52)]\t\n\tacc = [False]*52\n\tfor s in range(4):\n\t\tfoundL = False\n\t\tfoundR = False\n\t\tfor i in range(8):\n\t\t\tif not foundL and (i==6 or deck[s*13+i+1]):\n\t\t\t\tacc[s*13+i] = hand[s*13+i]\n\t\t\t\tfoundL = True\n\t\t\tif not foundR and deck[s*13+11-i]:\n\t\t\t\tacc[s*13+12-i] = hand[s*13+12-i]\n\t\t\t\tfoundR = True\n\treturn acc\n\ndef print_set(sset, end='\\n', highlight=''):\n\tif highlight:\n\t\tacc = can_put(sset, Deck)\n\tfor i in range(4):\n\t\tprint(header[i].ljust(10, ' '), end='')\n\t\tfor j in range(0,13):\n\t\t\thead = \"\"\n\t\t\tbody = \"\"\n\t\t\ttail = \"\"\n\t\t\tif highlight and acc[i*13+j]:\n\t\t\t\thead = highlight\n\t\t\t\ttail = color.END\n\t\t\tbody = (letters[j] if sset[i*13+j] else '-').ljust(3, ' ')\n\t\t\tprint(head+body+tail,end='')\n\t\tprint(\"\")\n\tprint(\"\", end=end)\n\n\nModel = np.zeros(1)\nModel_fold = np.zeros(1)\nBData = np.load('behavior_data.npy')\n\ndef s_AI(hand, deck):\n\tcan = can_put(hand, deck)\n\tfold = not any(can)\n\tvec = np.asarray([float(x) for x in hand]+[float(x) for x in deck])\n\n\tcan_vec = np.asarray([float(x) for x in can])\n\tif fold:\n\t\tprobf = vec@Model_fold\n\t\tres = np.argmax(probf*can_vec)\n\telse:\n\t\tprob = vec@Model\n\t\tres = np.argmax(prob*can_vec)\n\t# print(\"[DEBUG]\", res)\n\tif can[res]:\n\t\t# print(\"[DEBUG]smartboi\")\n\t\treturn res\n\treturn hand.index(True) if fold else can.index(True)\n\n\ndef processBehavior(winn, weight):\n\tglobal BData\n\tglobal Model_fold\n\tglobal Model\n\tif winn is not None:\n\t\tBData = np.concatenate((BData, [[weight]+x for x in Behavior[winn]]), axis=0)\n\t# train model\n\tdata1 = []\n\tdata2 = []\n\ty1 = []\n\ty2 = []\n\tfor x in BData:\n\t\tloop = 5\n\t\t# score fold card own[52] deck[52]\n\t\tscore, fold, card = x[0:3]\n\t\tvec = x[3:]\n\t\tif score > 0:\n\t\t\tloop -= score//10\n\t\tloop = max(0, loop)\n\t\tfor i in range(loop):\t\t\n\t\t\tif fold == 1:\n\t\t\t\tdata1.append(vec)\n\t\t\t\ty1.append([float(t==card) for t in range(52)])\n\t\t\telse:\n\t\t\t\tdata2.append(vec)\n\t\t\t\ty2.append([float(t==card) for t in range(52)])\n\n\tModel_fold, res, rnk, sing = np.linalg.lstsq(data1, y1, rcond=None)\n\tModel, res, rnk, sing = np.linalg.lstsq(data2, y2, rcond=None)\n\nrnd.seed(datetime.now())\nwhile True:\n\tinit()\n\tprocessBehavior(None, 0)\n\t# main loop\n\twhile actions < 52:\n\t\t\t# print(current)\n\t\t\tpcard = -1\n\t\t\tpfold = not any(can_put(OnHand[current], Deck))\n\t\t\tif current == ppl[0]:\n\t\t\t\tprint(\"[Game] Deck:\")\n\t\t\t\tprint_set(Deck)\n\t\t\t\tprint(\"[Game] Your cards:\")\n\t\t\t\tprint_set(OnHand[current], highlight=color.YELLOW)\n\t\t\t\twhile True:\n\t\t\t\t\tpinput = input(\"[Game] PLEASE ENTER [SHDC][A-K] or [*]: \")\n\t\t\t\t\tif '*' in pinput:\n\t\t\t\t\t\t# auto complete\n\t\t\t\t\t\tpcard = s_AI(OnHand[current], Deck)\n\t\t\t\t\t\tbreak\n\t\t\t\t\tpcard, err = parse_input(pinput.lstrip())\n\t\t\t\t\tif err:\n\t\t\t\t\t\tprint(err)\n\t\t\t\t\t\tcontinue\t\t\t\t\t\n\t\t\t\t\tif not OnHand[current][pcard]:\n\t\t\t\t\t\tprint(\"[Alert] You don't have this card!\", pcard)\n\t\t\t\t\telif not pfold and (pcard%13 > 6 and not Deck[pcard-1] or pcard%13 < 6 and not Deck[pcard+1]):\n\t\t\t\t\t\tprint(\"[Alert] You can't put this card now!\", pcard)\n\t\t\t\t\telif OnHand[current][6] and pcard != 6:\n\t\t\t\t\t\tprint(\"[Alert] Please put Spade 7 first.\")\n\t\t\t\t\telse: \n\t\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tpcard = s_AI(OnHand[current], Deck)\n\n\t\t\tBehavior[current].append([pfold, pcard]+OnHand[current][:]+Deck[:])\n\t\t\t\n\t\t\tOnHand[current][pcard] = False\n\t\t\tDeck[pcard] = not pfold\n\t\t\tif pfold:\n\t\t\t\tFolded[current] += (pcard%13+1)\n\t\t\tactions += 1\n\n\t\t\tprint(current, \"FOLDED\" if pfold else \"PUT {} {}.\".format(header[pcard//13], letters[pcard%13]))\n\n\t\t\tcurrent = ppl[(ppl.index(current)+1)%4]\n\tprint(\"[Game] Game Over.\")\n\tprint(\"[Game] Penalty:\")\n\tprint(ppl[0].ljust(15, ' ')+ppl[1].ljust(15, ' ')+ppl[2].ljust(15, ' ')+ppl[3].ljust(15, ' '))\n\tprint(str(Folded[ppl[0]]).ljust(15, ' ')+str(Folded[ppl[1]]).ljust(15, ' ')+\\\n\t\tstr(Folded[ppl[2]]).ljust(15, ' ')+str(Folded[ppl[3]]).ljust(15, ' '))\n\twinner = min(Folded, key=Folded.get)\n\tprocessBehavior(Player, Folded[Player]-Folded[winner])\n\tif winner != Player:\t\n\t\tprocessBehavior(winner, Folded[winner])\n\tif input(\"[Game] Another game ? [Y/N]\")[0].upper() != \"Y\":\n\t\tbreak \n\nnp.save('behavior_data.npy', BData)\nprint(\"[Data] Behavior data is saved to behavior_data.npy. N = {}\".format(len(BData)))\n\n\n# print([int(x) for x in Behavior['Player'][0]])\n","sub_path":"Sevens.py","file_name":"Sevens.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"295101233","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save # very commonly used\nfrom django.dispatch import receiver # goes with post_save and other signals\nfrom rest_framework.authtoken.models import Token\n\n\nclass Location(models.Model):\n city_choice = models.CharField(max_length=20)\n\n def __str__(self):\n return self.city_choice\n\n\nclass Category(models.Model):\n new_category = models.CharField(max_length=30)\n choose_main = models.ForeignKey('self', null=True, blank=True, related_name=\"sub_choice\")\n\n def __str__(self):\n return self.new_category\n\n class Meta:\n verbose_name_plural = \"Categories\"\n\n\nclass Listing(models.Model):\n title = models.CharField(max_length=40)\n description = models.TextField()\n price = models.DecimalField(max_digits=6, decimal_places=2)\n photo = models.ImageField(upload_to='listing_photos', null=True, blank=True, verbose_name='Upload a photo')\n city = models.ForeignKey(Location)\n seller = models.ForeignKey(User)\n created = models.DateTimeField(auto_now_add=True)\n pick_category = models.ForeignKey(Category)\n\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = ['-created']\n\n @property\n def photo_url(self):\n if self.photo:\n return self.photo.url\n return 'http://sport.ngmnexpo.com/cliparts/2015/02/1344144.jpg'\n\n\nclass TraderProfile(models.Model):\n user = models.OneToOneField('auth.User')\n preferred_location = models.ForeignKey(Location, null=True, blank=True)\n primary_category = models.ForeignKey(Category, null=True, blank=True)\n email_address = models.EmailField(max_length=45, null=True, blank=True)\n logo = models.ImageField(upload_to='logo_images', null=True, blank=True, verbose_name='Upload a logo')\n\n @property\n def logo_url(self):\n if self.logo:\n return self.logo.url\n return 'http://static.tumblr.com/e7snt83/tU6m7t07k/pirate_patch2.jpg'\n\n\n@receiver(post_save, sender='auth.User')\ndef create_user_profile(**kwargs):\n created = kwargs.get('created')\n instance = kwargs.get('instance')\n\n if created:\n TraderProfile.objects.create(user=instance) # hooks profile to user\n\n@receiver(post_save, sender='auth.User')\ndef create_token(**kwargs): # a shortcut pass in\n created = kwargs.get(\"created\") # boilerplate\n instance = kwargs.get(\"instance\") # boilerplate\n if created:\n Token.objects.create(user=instance) # yep. standard.\n","sub_path":"main_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"502364453","text":"import http.server as SimpleHTTPServer\nimport xmlrpc.server as SimpleXMLRPCServer\nimport robot.setMotorPower as RobotMotorPower\n\ndef registerRobotXmlRpcMethods(server):\n \n # Register standard XML-RPC methods.\n server.register_introspection_functions()\n \n # Register the motor power command function.\n RobotMotorPower.init()\n server.register_function(RobotMotorPower.set,'setRobotMotorPower')\n\n\n \n \n# We define a custom server request handler, capable of both handling GET and XML-RPC requests.\nclass RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler, SimpleHTTPServer.SimpleHTTPRequestHandler):\n rpc_paths = ('/RobotControlService',)\n\n def do_GET(self):\n SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) \n \n \n \n \n# Start running the server ... \nif __name__ == \"__main__\":\n \n # Create our XML-RPC server.using out custom request handler that is also able to serve web pages over GET.\n port = 8080\n server = SimpleXMLRPCServer.SimpleXMLRPCServer((\"0.0.0.0\", port), RequestHandler)\n \n # Register the XML-RPC methods ...\n registerRobotXmlRpcMethods(server)\n \n # Start to server.\n server.serve_forever()\n","sub_path":"Blockly_picar-c/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"182576442","text":"import os\r\nimport re\r\nfrom KO_web_parse_multi_process_v2 import get_ko_data\r\nfrom KO_web_parse_multi_process_v2 import write_file\r\nimport time\r\nimport multiprocessing\r\nimport random\r\nfrom multiprocessing import Pool\r\nimport time\r\nimport shutil\r\n\r\ndef check_and_backup():\r\n if os.path.exists(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO_END.txt\"):\r\n curent_time = time.time()\r\n ko_info_end_creat_time = os.stat(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO_END.txt\").st_ctime\r\n # print(ko_info_end_creat_time)\r\n time_gap = curent_time - ko_info_end_creat_time\r\n ModifiedTime=time.localtime(os.stat(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO_END.txt\").st_mtime) #文件访问时间 \r\n y=time.strftime('%Y', ModifiedTime) \r\n m=time.strftime('%m', ModifiedTime) \r\n d=time.strftime('%d', ModifiedTime) \r\n H=time.strftime('%H', ModifiedTime) \r\n M=time.strftime('%M', ModifiedTime)\r\n\r\n if time_gap>24*60*60*1.5:\r\n backup_name = \"/home/fdong/auto_protein_analysis/ko_info/ko_info_end_backups/KO_INFO_END_{0}{1}{2}{3}{4}.txt\".format(y,m,d,H,M)\r\n shutil.copy(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO_END.txt\",backup_name)\r\n print(\"KO_INFO_END.TXT已经备份,正在更新......\")\r\n os.system(\"python3 /home/fdong/auto_protein_analysis/ko_info/KO_num_class.py\")\r\n os.remove(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO.txt\")\r\n os.remove(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO_END.txt\")\r\n\r\n\r\n\r\n\r\ndef clean_ko_info(ko_info_end,ko_middle_temp,ko_scraped,num_temp):\r\n if not ko_middle_temp:\r\n ko_scraped= []\r\n num_temp=0\r\n return (ko_scraped, num_temp)\r\n for line in ko_middle_temp:\r\n # print(line)\r\n if re.match(r\"^K\\d{5}\",line[:6]) and len(re.findall(r\"\\t\",line)) == 3: # 剔除异常行\r\n match_temp = re.match(r\"^K\\d{5}\",line[:6]).group()\r\n if match_temp not in ko_scraped: #去除重复值\r\n ko_info_end.write(line)\r\n num_temp += 1 # 已经爬取的数量\r\n ko_scraped.append(match_temp)\r\n # ko_middle_temp.close()\r\n # ko_info_end.close()\r\n return (ko_scraped,num_temp)\r\n\r\n\r\ndef get_omit_ko_list(ko_scraped,ko_all_num,num_temp):\r\n omit_ko_list = list()\r\n if (ko_all_num - num_temp)!=0:\r\n ko_all = open(\"/home/fdong/auto_protein_analysis/ko_info/KO_terms.txt\", 'r')\r\n for ko in ko_all:\r\n if ko.strip() not in ko_scraped: #去除已经爬取的\r\n omit_ko_list.append(ko.strip())\r\n # ko_all.close()\r\n return omit_ko_list\r\n\r\n\r\n\r\ndef add_omit_main():\r\n ko_all = open(\"/home/fdong/auto_protein_analysis/ko_info/KO_terms.txt\", 'r')\r\n ko_all_num = len(ko_all.readlines())\r\n ko_all.close()\r\n\r\n num_temp = 0\r\n ko_scraped = list()\r\n kegg_ko_omit = list()\r\n #if os.path.exists(\"KO_INFO_END.txt\"):\r\n # os.remove(\"KO_INFO_END.txt\")\r\n\r\n while (ko_all_num - num_temp):\r\n ko_info_end = open(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO_END.txt\", \"a+\") # 所有ko 的所有信息__终版\r\n try:\r\n ko_middle_temp = open(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO.txt\", \"r+\") # 多线程爬取的文件,中间含有遗漏\r\n except:\r\n ko_middle_temp = None\r\n # print(ko_all_num,\"\\t\",num_temp)\r\n (ko_scraped, num_temp) = clean_ko_info(ko_info_end, ko_middle_temp, ko_scraped, num_temp)\r\n omit_ko_list = get_omit_ko_list(ko_scraped,ko_all_num,num_temp)\r\n # print(\"num_temp:\\t\",num_temp)\r\n\r\n print(\"omit ko numbers:\", len(omit_ko_list))\r\n if len(omit_ko_list) <= 50:\r\n print(omit_ko_list)\r\n # if len(omit_ko_list)==2 and \"K06060\" in omit_ko_list and \"K04225\" in omit_ko_list:\r\n # break\r\n # print(\"omit_ko_list:\\t\",omit_ko_list)\r\n # print(\"kegg_ko_omit:\\t\",kegg_ko_omit)\r\n print(\"*\"*30)\r\n if omit_ko_list == kegg_ko_omit:\r\n break\r\n else:\r\n kegg_ko_omit = omit_ko_list\r\n\r\n if len(omit_ko_list) != 0:\r\n pool = multiprocessing.Pool(processes=100)\r\n # file_data = list()\r\n for line in omit_ko_list:\r\n ko_list_signle = line.strip()\r\n # file_data.append(pool.apply_async(get_ko_data, (ko_list_signle,),callback=write_file))\r\n pool.apply_async(get_ko_data, (ko_list_signle,),callback=write_file)\r\n time.sleep(random.uniform(0,0.3))\r\n pool.close()\r\n pool.join()\r\n pool.terminate()\r\n # for line in file_data:\r\n # ko_middle_temp.write(line.get())\r\n ko_middle_temp.close()\r\n ko_info_end.close()\r\n\r\nif __name__ == '__main__':\r\n start = time.time() # 计时开始\r\n #os.remove(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO_END.txt\")\r\n#os.remove(\"/home/fdong/auto_protein_analysis/ko_info/KO_INFO.txt\")\r\n\r\n check_and_backup()\r\n add_omit_main()\r\n\r\n end = time.time()\r\n m, s = divmod(end - start, 60) # 转换时间的方法\r\n h, m = divmod(m, 60)\r\n print('KO在线爬取任务完成,共耗时%02d小时%02d分%02d秒' % (h, m, s))\r\n","sub_path":"ko_info/add_ko_omit_v2.py","file_name":"add_ko_omit_v2.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74838846","text":"\"\"\"Tests for GP and SP classes\"\"\"\nimport math\nimport unittest\nimport numpy as np\nfrom gpkit import (Model, Monomial, settings, VectorVariable, Variable,\n SignomialsEnabled, ArrayVariable)\nfrom gpkit.geometric_program import GeometricProgram\nfrom gpkit.small_classes import CootMatrix\nfrom gpkit.feasibility import feasibility_model\n\nNDIGS = {\"cvxopt\": 5, \"mosek\": 7, \"mosek_cli\": 5}\n# name: decimal places of accuracy\n\n\nclass TestGP(unittest.TestCase):\n \"\"\"\n Test GeometricPrograms.\n This TestCase gets run once for each installed solver.\n \"\"\"\n name = \"TestGP_\"\n # solver and ndig get set in loop at bottom this file, a bit hacky\n solver = None\n ndig = None\n\n def test_trivial_gp(self):\n \"\"\"\n Create and solve a trivial GP:\n minimize x + 2y\n subject to xy >= 1\n\n The global optimum is (x, y) = (sqrt(2), 1/sqrt(2)).\n \"\"\"\n x = Monomial('x')\n y = Monomial('y')\n prob = Model(cost=(x + 2*y),\n constraints=[x*y >= 1])\n sol = prob.solve(solver=self.solver, verbosity=0)\n self.assertEqual(type(prob.latex()), str)\n self.assertEqual(type(prob._repr_latex_()), str)\n self.assertAlmostEqual(sol(\"x\"), math.sqrt(2.), self.ndig)\n self.assertAlmostEqual(sol(\"y\"), 1/math.sqrt(2.), self.ndig)\n self.assertAlmostEqual(sol(\"x\") + 2*sol(\"y\"),\n 2*math.sqrt(2),\n self.ndig)\n self.assertAlmostEqual(sol[\"cost\"], 2*math.sqrt(2), self.ndig)\n\n def test_simple_united_gp(self):\n R = Variable('R', units=\"nautical_miles\")\n a0 = Variable('a0', 340.29, 'm/s')\n theta = Variable(r'\\theta', 0.7598)\n t = Variable('t', 10, 'hr')\n T_loiter = Variable('T_{loiter}', 1, 'hr')\n T_reserve = Variable('T_{reserve}', 45, 'min')\n M = VectorVariable(2, 'M')\n\n if R.units:\n prob = Model(1/R,\n [t >= sum(R/a0/M/theta**0.5) + T_loiter + T_reserve,\n M <= 0.76])\n sol = prob.solve(verbosity=0)\n self.assertAlmostEqual(sol[\"cost\"], 0.0005532, self.ndig)\n\n def test_trivial_vector_gp(self):\n \"\"\"\n Create and solve a trivial GP with VectorVariables\n \"\"\"\n x = VectorVariable(2, 'x')\n y = VectorVariable(2, 'y')\n prob = Model(cost=(sum(x) + 2*sum(y)),\n constraints=[x*y >= 1])\n sol = prob.solve(solver=self.solver, verbosity=0)\n self.assertEqual(sol('x').shape, (2,))\n self.assertEqual(sol('y').shape, (2,))\n for x, y in zip(sol('x'), sol('y')):\n self.assertAlmostEqual(x, math.sqrt(2.), self.ndig)\n self.assertAlmostEqual(y, 1/math.sqrt(2.), self.ndig)\n self.assertAlmostEqual(sol[\"cost\"]/(4*math.sqrt(2)), 1., self.ndig)\n\n def test_zero_lower_unbounded(self):\n x = Variable('x', value=4)\n y = Variable('y', value=0)\n z = Variable('z')\n t1 = Variable('t1')\n t2 = Variable('t2')\n\n prob = Model(z, [z >= x + t1,\n t1 >= t2,\n t2 >= y])\n sol = prob.solve(verbosity=0)\n\n def test_mdd_example(self):\n Cl = Variable(\"Cl\", 0.5, \"-\", \"Lift Coefficient\")\n Mdd = Variable(\"Mdd\", \"-\", \"Drag Divergence Mach Number\")\n m1 = Model(1/Mdd, [1 >= 5*Mdd + 0.5, Mdd >= 0.00001])\n m2 = Model(1/Mdd, [1 >= 5*Mdd + 0.5])\n m3 = Model(1/Mdd, [1 >= 5*Mdd + Cl, Mdd >= 0.00001])\n sol1 = m1.solve(solver=self.solver, verbosity=0)\n sol2 = m2.solve(solver=self.solver, verbosity=0)\n sol3 = m3.solve(solver=self.solver, verbosity=0)\n gp1, gp2, gp3 = [m.program for m in [m1, m2, m3]]\n self.assertEqual(gp1.A, CootMatrix(row=[0, 1, 2],\n col=[0, 0, 0],\n data=[-1, 1, -1]))\n self.assertEqual(gp2.A, CootMatrix(row=[0, 1],\n col=[0, 0],\n data=[-1, 1]))\n # order of variables within a posynomial is not stable\n # (though monomial order is)\n equiv1 = gp3.A == CootMatrix(row=[0, 2, 3, 2],\n col=[0, 0, 0, 0],\n data=[-1, 1, -1, 0])\n equiv2 = gp3.A == CootMatrix(row=[0, 1, 3, 2],\n col=[0, 0, 0, 0],\n data=[-1, 1, -1, 0])\n self.assertTrue(equiv1 or equiv2)\n self.assertAlmostEqual(sol1(Mdd), sol2(Mdd))\n self.assertAlmostEqual(sol1(Mdd), sol3(Mdd))\n self.assertAlmostEqual(sol2(Mdd), sol3(Mdd))\n\n def test_additive_constants(self):\n x = Variable('x')\n m = Model(1/x, [1 >= 5*x + 0.5, 1 >= 10*x])\n m.solve(verbosity=0)\n gp = m.program\n self.assertEqual(gp.cs[1], gp.cs[2])\n self.assertEqual(gp.A.data[1], gp.A.data[2])\n\n def test_zeroing(self):\n L = Variable(\"L\")\n k = Variable(\"k\", 0)\n with SignomialsEnabled():\n constr = [L-5*k <= 10]\n sol = Model(1/L, constr).solve(verbosity=0, solver=self.solver)\n self.assertAlmostEqual(sol(L), 10, self.ndig)\n self.assertAlmostEqual(sol[\"cost\"], 0.1, self.ndig)\n\n def test_singular(self):\n \"\"\"\n Create and solve GP with a singular A matrix\n \"\"\"\n if self.solver == 'cvxopt':\n # cvxopt can't solve this problem\n # (see https://github.com/cvxopt/cvxopt/issues/36)\n return\n x = Variable('x')\n y = Variable('y')\n m = Model(y*x, [y*x >= 12])\n sol = m.solve(solver=self.solver, verbosity=0)\n self.assertAlmostEqual(sol[\"cost\"], 12, self.ndig)\n\n def test_constants_in_objective_1(self):\n '''Issue 296'''\n x1 = Variable('x1')\n x2 = Variable('x2')\n m = Model(1.+ x1 + x2, [x1 >= 1., x2 >= 1.])\n sol = m.solve(solver=self.solver, verbosity=0)\n self.assertAlmostEqual(sol[\"cost\"], 3, self.ndig)\n\n def test_constants_in_objective_2(self):\n '''Issue 296'''\n x1 = Variable('x1')\n x2 = Variable('x2')\n m = Model(x1**2 + 100 + 3*x2, [x1 >= 10., x2 >= 15.])\n sol = m.solve(solver=self.solver, verbosity=0)\n self.assertAlmostEqual(sol[\"cost\"]/245., 1, self.ndig)\n\n def test_feasibility_gp_(self):\n x = Variable('x')\n m = Model(x, [x**2 >= 1, x <= 0.5])\n self.assertRaises(RuntimeWarning, m.solve, verbosity=0)\n fm = feasibility_model(m, \"max\")\n sol1 = fm.solve(verbosity=0)\n fm = feasibility_model(m, \"product\")\n sol2 = fm.solve(verbosity=0)\n self.assertTrue(sol1[\"cost\"] >= 1)\n self.assertTrue(sol2[\"cost\"] >= 1)\n\n def test_terminating_constant_(self):\n x = Variable('x')\n y = Variable('y', value=0.5)\n prob = Model(1/x, [x + y <= 4])\n sol = prob.solve(verbosity=0)\n self.assertAlmostEqual(sol[\"cost\"], 1/3.5, self.ndig)\n\n def test_check_result(self):\n \"\"\"issue 361\"\"\"\n N = 5\n L = 5.\n dx = L/(N-1)\n EI = Variable(\"EI\",10)\n p = VectorVariable(N, \"p\")\n p = p.sub(p, 100*np.ones(N))\n V = VectorVariable(N, \"V\")\n M = VectorVariable(N, \"M\")\n th = VectorVariable(N, \"th\")\n w = VectorVariable(N, \"w\")\n eps = 1E-6\n substitutions = {var: eps for var in [V[-1], M[-1], th[0], w[0]]}\n objective = w[-1]\n constraints = [EI*V.left[1:N] >= EI*V[1:N] + 0.5*dx*p.left[1:N] + 0.5*dx*p[1:N],\n EI*M.left[1:N] >= EI*M[1:N] + 0.5*dx*V.left[1:N] + 0.5*dx*V[1:N],\n EI*th.right[0:N-1] >= EI*th[0:N-1] + 0.5*dx*M.right[0:N-1] + 0.5*dx*M[0:N-1],\n EI*w.right[0:N-1] >= EI*w[0:N-1] + 0.5*dx*th.right[0:N-1] + 0.5*dx*th[0:N-1]]\n m = Model(objective, constraints, substitutions)\n sol = m.solve(verbosity=0)\n\n def test_exps_is_tuple(self):\n \"\"\"issue 407\"\"\"\n x = Variable('x')\n m = Model(x, [x >= 1])\n m.solve(verbosity=0)\n self.assertEqual(type(m.program.cost.exps), tuple)\n\nclass TestSP(unittest.TestCase):\n \"\"\"test case for SP class -- gets run for each installed solver\"\"\"\n name = \"TestSP_\"\n solver = None\n ndig = None\n\n def test_trivial_sp(self):\n x = Variable('x')\n y = Variable('y')\n with SignomialsEnabled():\n m = Model(x, [x >= 1-y, y <= 0.1])\n sol = m.localsolve(verbosity=0, solver=self.solver)\n self.assertAlmostEqual(sol[\"variables\"][\"x\"], 0.9, self.ndig)\n with SignomialsEnabled():\n m = Model(x, [x+y >= 1, y <= 0.1])\n sol = m.localsolve(verbosity=0, solver=self.solver)\n self.assertAlmostEqual(sol[\"variables\"][\"x\"], 0.9, self.ndig)\n\n def test_relaxation(self):\n x = Variable(\"x\")\n y = Variable(\"y\")\n with SignomialsEnabled():\n constraints = [y + x >= 2, y <= x]\n objective = x\n m = Model(objective, constraints)\n m.localsolve(verbosity=0)\n\n # issue #257\n\n A = VectorVariable(2, \"A\")\n B = ArrayVariable([2, 2], \"B\")\n C = VectorVariable(2, \"C\")\n with SignomialsEnabled():\n constraints = [A <= B.dot(C),\n B <= 1,\n C <= 1]\n obj = 1/A[0] + 1/A[1]\n m = Model(obj, constraints)\n m.localsolve(verbosity=0)\n\n def test_issue180(self):\n L = Variable(\"L\")\n Lmax = Variable(\"L_{max}\", 10)\n W = Variable(\"W\")\n Wmax = Variable(\"W_{max}\", 10)\n A = Variable(\"A\", 10)\n Obj = Variable(\"Obj\")\n a_val = 0.01\n a = Variable(\"a\", a_val)\n with SignomialsEnabled():\n eqns = [L <= Lmax,\n W <= Wmax,\n L*W >= A,\n Obj >= a*(2*L + 2*W) + (1-a)*(12 * W**-1 * L**-3)]\n m = Model(Obj, eqns)\n spsol = m.solve(verbosity=0, solver=self.solver)\n # now solve as GP\n eqns[-1] = (Obj >= a_val*(2*L + 2*W) + (1-a_val)*(12 * W**-1 * L**-3))\n m = Model(Obj, eqns)\n gpsol = m.solve(verbosity=0, solver=self.solver)\n self.assertAlmostEqual(spsol['cost'], gpsol['cost'])\n\n def test_trivial_sp2(self):\n x = Variable(\"x\")\n y = Variable(\"y\")\n\n # converging from above\n with SignomialsEnabled():\n constraints = [y + x >= 2, y >= x]\n objective = y\n x0 = 1\n y0 = 2\n m = Model(objective, constraints)\n sol1 = m.localsolve(x0={x: x0, y: y0}, verbosity=0, solver=self.solver)\n\n # converging from right\n with SignomialsEnabled():\n constraints = [y + x >= 2, y <= x]\n objective = x\n x0 = 2\n y0 = 1\n m = Model(objective, constraints)\n sol2 = m.localsolve(x0={x: x0, y: y0}, verbosity=0, solver=self.solver)\n\n self.assertAlmostEqual(sol1[\"variables\"][\"x\"],\n sol2[\"variables\"][\"x\"], self.ndig)\n self.assertAlmostEqual(sol1[\"variables\"][\"y\"],\n sol2[\"variables\"][\"x\"], self.ndig)\n\n def test_sp_initial_guess_sub(self):\n x = Variable(\"x\")\n y = Variable(\"y\")\n x0 = 1\n y0 = 2\n with SignomialsEnabled():\n constraints = [y + x >= 2, y <= x]\n objective = x\n m = Model(objective, constraints)\n try:\n sol = m.localsolve(x0={x: x0, y: y0}, verbosity=0,\n solver=self.solver)\n except TypeError:\n self.fail(\"Call to local solve with only variables failed\")\n self.assertAlmostEqual(sol(x), 1, self.ndig)\n self.assertAlmostEqual(sol[\"cost\"], 1, self.ndig)\n\n try:\n sol = m.localsolve(x0={\"x\": x0, \"y\": y0}, verbosity=0,\n solver=self.solver)\n except TypeError:\n self.fail(\"Call to local solve with only variable strings failed\")\n self.assertAlmostEqual(sol(\"x\"), 1, self.ndig)\n self.assertAlmostEqual(sol[\"cost\"], 1, self.ndig)\n\n try:\n sol = m.localsolve(x0={\"x\": x0, y: y0}, verbosity=0,\n solver=self.solver)\n except TypeError:\n self.fail(\"Call to local solve with a mix of variable strings \"\n \"and variables failed\")\n self.assertAlmostEqual(sol[\"cost\"], 1, self.ndig)\n\n def test_small_signomial(self):\n x = Variable('x')\n z = Variable('z')\n local_ndig = 4\n nonzero_adder = 0.1 # TODO: support reaching zero, issue #348\n with SignomialsEnabled():\n J = 0.01*(x - 1)**2 + nonzero_adder\n m = Model(z, [z >= J])\n sol = m.localsolve(verbosity=0)\n self.assertAlmostEqual(sol['cost'], nonzero_adder, local_ndig)\n self.assertAlmostEqual(sol('x'), 0.987, 3)\n\n def test_signomials_not_allowed_in_objective(self):\n with SignomialsEnabled():\n x = Variable('x')\n y = Variable('y')\n J = 0.01*((x - 1)**2 + (y - 1)**2) + (x*y - 1)**2\n m = Model(J)\n with self.assertRaises(TypeError):\n sol = m.localsolve(verbosity=0)\n\n def test_partial_sub_signomial(self):\n \"\"\"Test SP partial x0 initialization\"\"\"\n x = Variable('x')\n y = Variable('y')\n with SignomialsEnabled():\n m = Model(x, [x + y >= 1, y <= 0.5])\n m.localsolve(x0={x: 0.5}, verbosity=0)\n self.assertEqual(m.program.gps[0].constraints[0].exp[x], -1./3)\n\n\nTEST_CASES = [TestGP, TestSP]\n\nTESTS = []\nfor testcase in TEST_CASES:\n for solver in settings[\"installed_solvers\"]:\n if solver:\n test = type(testcase.__name__+\"_\"+solver,\n (testcase,), {})\n setattr(test, \"solver\", solver)\n setattr(test, \"ndig\", NDIGS[solver])\n TESTS.append(test)\n\nif __name__ == \"__main__\":\n from gpkit.tests.helpers import run_tests\n run_tests(TESTS)\n","sub_path":"gpkit/tests/t_model.py","file_name":"t_model.py","file_ext":"py","file_size_in_byte":14190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287393829","text":"from globals import *\r\nfrom geant_classes import *\r\n\r\nslack = 1.0*nm\r\ndiameter = 10.6*mm # detector_geometry.hole_diameter\r\nfoil_thickness = 5000.0*nm\r\nfoil_metal_thickness = 200.0*nm\r\nfoil_material = \"ParyleneN\"\r\n\r\nclass FoilGeometry(G4LogicalVolume):\r\n def __init__(self):\r\n # A round disk of very thin parylene foil, coated with Aluminum\r\n foil_solid = G4Tubs(\"foil\",\r\n 0, diameter/2,\r\n foil_thickness/2,\r\n 0, 2*pi)\r\n G4LogicalVolume.__init__(self, foil_solid, G4Material(\"Al\"), \"foil\")\r\n\r\n # Parylene Core\r\n core_solid = G4Tubs(\"foil_core\",\r\n 0, diameter/2 - slack,\r\n (foil_thickness - foil_metal_thickness)/2,\r\n 0, 2*pi)\r\n core_lvolume = G4LogicalVolume(core_solid, G4Material(foil_material), \"foil_core\")\r\n G4PVPlacement(0, [0, 0, 0],\r\n core_lvolume,\r\n \"foil_core\",\r\n self)","sub_path":"python geometry/foil_geometry.py","file_name":"foil_geometry.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"260360420","text":"\"\"\"Basic compute graph elements\"\"\"\nimport abc\nfrom collections import OrderedDict\nimport dataclasses as dc\nimport itertools\nimport json\nimport logging\nimport networkx as nx\nimport os\nimport pdb\nfrom pathlib import Path\nimport typing as ty\nimport pickle as pk\nfrom copy import deepcopy, copy\nfrom time import sleep\n\nimport cloudpickle as cp\nfrom filelock import FileLock\nimport shutil\nfrom tempfile import mkdtemp\n\nfrom . import state\nfrom . import auxiliary as aux\nfrom .specs import File, BaseSpec, RuntimeSpec, Result, SpecInfo, LazyField\nfrom .helpers import (\n make_klass,\n create_checksum,\n print_help,\n load_result,\n gather_runtime_info,\n save_result,\n ensure_list,\n record_error,\n get_inputs,\n)\nfrom ..utils.messenger import send_message, make_message, gen_uuid, now, AuditFlag\n\nlogger = logging.getLogger(\"pydra\")\n\ndevelop = True\n\n\nclass TaskBase:\n _api_version: str = \"0.0.1\" # Should generally not be touched by subclasses\n _version: str # Version of tool being wrapped\n _task_version: ty.Optional[\n str\n ] = None # Task writers encouraged to define and increment when implementation changes sufficiently\n _input_sets = None # Dictionaries of predefined input settings\n\n audit_flags: AuditFlag = AuditFlag.NONE # What to audit. See audit flags for details\n\n _can_resume = False # Does the task allow resuming from previous state\n _redirect_x = False # Whether an X session should be created/directed\n\n _runtime_requirements = RuntimeSpec()\n _runtime_hints = None\n\n _cache_dir = None # Working directory in which to operate\n _references = None # List of references for a task\n\n # dj: do we need it??\n input_spec = BaseSpec\n output_spec = BaseSpec\n\n # TODO: write state should be removed\n def __init__(\n self,\n name,\n inputs: ty.Union[ty.Text, File, ty.Dict, None] = None,\n audit_flags: AuditFlag = AuditFlag.NONE,\n messengers=None,\n messenger_args=None,\n cache_dir=None,\n cache_locations=None,\n ):\n \"\"\"A base structure for nodes in the computational graph (i.e. both\n ``Node`` and ``Workflow``).\n\n Parameters\n ----------\n\n name : str\n Unique name of this node\n inputs : dictionary (input name, input value or list of values)\n States this node's input names\n \"\"\"\n self.name = name\n if not self.input_spec:\n raise Exception(\"No input_spec in class: %s\" % self.__class__.__name__)\n klass = make_klass(self.input_spec)\n self.inputs = klass(\n **{\n f.name: (None if f.default is dc.MISSING else f.default)\n for f in dc.fields(klass)\n }\n )\n self.input_names = [\n field.name\n for field in dc.fields(klass)\n if field.name not in [\"_func\", \"_graph\"]\n ]\n self.state = None\n self._output = {}\n self._result = {}\n # flag that says if node finished all jobs\n self._done = False\n if self._input_sets is None:\n self._input_sets = {}\n if inputs:\n if isinstance(inputs, dict):\n inputs = {k: v for k, v in inputs.items() if k in self.input_names}\n elif Path(inputs).is_file():\n inputs = json.loads(Path(inputs).read_text())\n elif isinstance(inputs, str):\n if self._input_sets is None or inputs not in self._input_sets:\n raise ValueError(\"Unknown input set {!r}\".format(inputs))\n inputs = self._input_sets[inputs]\n self.inputs = dc.replace(self.inputs, **inputs)\n self.state_inputs = inputs\n self.audit_flags = audit_flags\n self.messengers = ensure_list(messengers)\n self.messenger_args = messenger_args\n self.cache_dir = cache_dir\n self.cache_locations = cache_locations\n\n # dictionary of results from tasks\n self.results_dict = {}\n self.plugin = None\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"input_spec\"] = pk.dumps(state[\"input_spec\"])\n state[\"output_spec\"] = pk.dumps(state[\"output_spec\"])\n state[\"inputs\"] = dc.asdict(state[\"inputs\"])\n return state\n\n def __setstate__(self, state):\n state[\"input_spec\"] = pk.loads(state[\"input_spec\"])\n state[\"output_spec\"] = pk.loads(state[\"output_spec\"])\n state[\"inputs\"] = make_klass(state[\"input_spec\"])(**state[\"inputs\"])\n self.__dict__.update(state)\n\n def __getattr__(self, name):\n if name == \"lzout\": # lazy output\n return LazyField(self, \"output\")\n return self.__getattribute__(name)\n\n def help(self, returnhelp=False):\n \"\"\" Prints class help\n \"\"\"\n help_obj = print_help(self)\n if returnhelp:\n return help_obj\n\n @property\n def version(self):\n return self._version\n\n # TODO: not sure what was the idea for the method (not used)\n # def save_set(self, name, inputs, force=False):\n # if name in self._input_sets and not force:\n # raise KeyError(\"Key {} already saved. Use force=True to override.\")\n # self._input_sets[name] = inputs\n\n @property\n def checksum(self):\n return create_checksum(self.__class__.__name__, self.inputs)\n\n def set_state(self, splitter, combiner=None):\n if splitter is not None:\n self.state = state.State(\n name=self.name, splitter=splitter, combiner=combiner\n )\n else:\n self.state = None\n return self.state\n\n @property\n def output_names(self):\n return [f.name for f in dc.fields(make_klass(self.output_spec))]\n\n def audit(self, message, flags=None):\n if develop:\n with open(\n Path(os.path.dirname(__file__)) / \"..\" / \"schema/context.jsonld\", \"rt\"\n ) as fp:\n context = json.load(fp)\n else:\n context = {\n \"@context\": \"https://raw.githubusercontent.com/nipype/pydra/master/pydra/schema/context.jsonld\"\n }\n if self.audit_flags & flags:\n if self.messenger_args:\n send_message(\n make_message(message, context=context),\n messengers=self.messengers,\n **self.messenger_args,\n )\n else:\n send_message(\n make_message(message, context=context), messengers=self.messengers\n )\n\n @property\n def can_resume(self):\n \"\"\"Task can reuse partial results after interruption\n \"\"\"\n return self._can_resume\n\n @abc.abstractmethod\n def _run_task(self):\n pass\n\n @property\n def cache_dir(self):\n return self._cache_dir\n\n @cache_dir.setter\n def cache_dir(self, location):\n if location is not None:\n self._cache_dir = Path(location).resolve()\n self._cache_dir.mkdir(parents=False, exist_ok=True)\n else:\n self._cache_dir = mkdtemp()\n self._cache_dir = Path(self._cache_dir)\n\n @property\n def cache_locations(self):\n return self._cache_locations + ensure_list(self._cache_dir)\n\n @cache_locations.setter\n def cache_locations(self, locations):\n if locations is not None:\n self._cache_locations = [Path(loc) for loc in ensure_list(locations)]\n else:\n self._cache_locations = []\n\n @property\n def output_dir(self):\n return self._cache_dir / self.checksum\n\n def audit_check(self, flag):\n return self.audit_flags & flag\n\n def __call__(self, **kwargs):\n return self.run(**kwargs)\n\n def run(self, **kwargs):\n self.inputs = dc.replace(self.inputs, **kwargs)\n checksum = self.checksum\n lockfile = self.cache_dir / (checksum + \".lock\")\n \"\"\"\n Concurrent execution scenarios\n\n 1. prior cache exists -> return result\n 2. other process running -> wait\n a. finishes (with or without exception) -> return result\n b. gets killed -> restart\n 3. no cache or other process -> start\n 4. two or more concurrent new processes get to start\n \"\"\"\n # TODO add signal handler for processes killed after lock acquisition\n with FileLock(lockfile):\n # Let only one equivalent process run\n # Eagerly retrieve cached\n if self.results_dict: # should be skipped if run called without submitter\n result = self.result()\n if result is not None:\n return result\n odir = self.output_dir\n if not self.can_resume and odir.exists():\n shutil.rmtree(odir)\n cwd = os.getcwd()\n odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)\n # start recording provenance, but don't send till directory is created\n # in case message directory is inside task output directory\n if self.audit_check(AuditFlag.PROV):\n aid = \"uid:{}\".format(gen_uuid())\n start_message = {\"@id\": aid, \"@type\": \"task\", \"startedAtTime\": now()}\n os.chdir(odir)\n if self.audit_check(AuditFlag.PROV):\n self.audit(start_message, AuditFlag.PROV)\n # audit inputs\n # check_runtime(self._runtime_requirements)\n # isolate inputs if files\n # cwd = os.getcwd()\n if self.audit_check(AuditFlag.RESOURCE):\n from ..utils.profiler import ResourceMonitor\n\n resource_monitor = ResourceMonitor(os.getpid(), logdir=odir)\n result = Result(output=None, runtime=None, errored=False)\n try:\n if self.audit_check(AuditFlag.RESOURCE):\n resource_monitor.start()\n if self.audit_check(AuditFlag.PROV):\n mid = \"uid:{}\".format(gen_uuid())\n self.audit(\n {\n \"@id\": mid,\n \"@type\": \"monitor\",\n \"startedAtTime\": now(),\n \"wasStartedBy\": aid,\n },\n AuditFlag.PROV,\n )\n self._run_task()\n result.output = self._collect_outputs()\n except Exception as e:\n record_error(self.output_dir, e)\n result.errored = True\n raise\n finally:\n if self.audit_check(AuditFlag.RESOURCE):\n resource_monitor.stop()\n result.runtime = gather_runtime_info(resource_monitor.fname)\n if self.audit_check(AuditFlag.PROV):\n self.audit(\n {\"@id\": mid, \"endedAtTime\": now(), \"wasEndedBy\": aid},\n AuditFlag.PROV,\n )\n # audit resources/runtime information\n eid = \"uid:{}\".format(gen_uuid())\n entity = dc.asdict(result.runtime)\n entity.update(\n **{\n \"@id\": eid,\n \"@type\": \"runtime\",\n \"prov:wasGeneratedBy\": aid,\n }\n )\n self.audit(entity, AuditFlag.PROV)\n self.audit(\n {\n \"@type\": \"prov:Generation\",\n \"entity_generated\": eid,\n \"hadActivity\": mid,\n },\n AuditFlag.PROV,\n )\n save_result(odir, result)\n with open(odir / \"_node.pklz\", \"wb\") as fp:\n cp.dump(self, fp)\n os.chdir(cwd)\n if self.audit_check(AuditFlag.PROV):\n # audit outputs\n self.audit(\n {\"@id\": aid, \"endedAtTime\": now(), \"errored\": result.errored},\n AuditFlag.PROV,\n )\n return result\n\n # TODO: Decide if the following two functions should be separated\n @abc.abstractmethod\n def _list_outputs(self):\n pass\n\n def _collect_outputs(self):\n run_output = ensure_list(self._list_outputs())\n output_klass = make_klass(self.output_spec)\n output = output_klass(**{f.name: None for f in dc.fields(output_klass)})\n return dc.replace(output, **dict(zip(self.output_names, run_output)))\n\n # TODO: should change state!\n def split(self, splitter, **kwargs):\n if kwargs:\n self.inputs = dc.replace(self.inputs, **kwargs)\n # dj:??, check if I need it\n self.state_inputs = kwargs\n splitter = aux.change_splitter(splitter, self.name)\n if self.state:\n raise Exception(\"splitter has been already set\")\n else:\n self.set_state(splitter)\n return self\n\n def combine(self, combiner):\n if not self.state:\n self.split(splitter=None)\n if not self.state:\n self.fut_combiner = combiner\n return self\n # raise Exception(\"splitter has to be set first\")\n elif self.state.combiner:\n raise Exception(\"combiner has been already set\")\n self.combiner = combiner\n self.set_state(splitter=self.state.splitter, combiner=self.combiner)\n return self\n\n # TODO: was used in submitter (if not needed should be removed)\n # def checking_input_el(self, ind):\n # \"\"\"checking if all inputs are available (for specific state element)\"\"\"\n # try:\n # self.get_input_el(ind)\n # return True\n # except: # TODO specify\n # return False\n\n def get_input_el(self, ind):\n \"\"\"collecting all inputs required to run the node (for specific state element)\"\"\"\n if ind is not None:\n # TODO: doesnt work properly for more cmplicated wf\n state_dict = self.state.states_val[ind]\n input_ind = self.state.inputs_ind[ind]\n inputs_dict = {}\n for inp in set(self.input_names):\n inputs_dict[inp] = getattr(self.inputs, inp)[\n input_ind[f\"{self.name}.{inp}\"]\n ]\n return state_dict, inputs_dict\n else:\n inputs_dict = {inp: getattr(self.inputs, inp) for inp in self.input_names}\n return None, inputs_dict\n\n def to_job(self, ind):\n \"\"\" running interface one element generated from node_state.\"\"\"\n # logger.debug(\"Run interface el, name={}, ind={}\".format(self.name, ind))\n el = deepcopy(self)\n el.state = None\n _, inputs_dict = self.get_input_el(ind)\n el.inputs = dc.replace(el.inputs, **inputs_dict)\n return el\n\n # checking if all outputs are saved\n @property\n def done(self):\n if self.results_dict:\n return all([future.done() for _, (future, _) in self.results_dict.items()])\n\n def _combined_output(self):\n combined_results = []\n for (gr, ind_l) in self.state.final_groups_mapping.items():\n combined_results.append([])\n for ind in ind_l:\n result = load_result(self.results_dict[ind][1], self.cache_locations)\n combined_results[gr].append(result)\n return combined_results\n\n def result(self, state_index=None):\n \"\"\"\n :param state_index:\n :return:\n \"\"\"\n # TODO: check if result is available in load_result and\n # return a future if not\n if self.state:\n if state_index is None:\n # if state_index=None, collecting all results\n if self.state.combiner:\n return self._combined_output()\n else:\n results = []\n for (ii, val) in enumerate(self.state.states_val):\n result = load_result(\n self.results_dict[ii][1], self.cache_locations\n )\n results.append(result)\n return results\n else: # state_index is not None\n if self.state.combiner:\n return self._combined_output()[state_index]\n result = load_result(\n self.results_dict[state_index][1], self.cache_locations\n )\n return result\n else:\n if state_index is not None:\n raise ValueError(\"Task does not have a state\")\n if self.results_dict:\n checksum = self.results_dict[None][1]\n else:\n checksum = self.checksum\n result = load_result(checksum, self.cache_locations)\n return result\n\n\nclass Workflow(TaskBase):\n def __init__(\n self,\n name,\n input_spec: ty.Union[ty.List[ty.Text], BaseSpec, None] = None,\n output_spec: ty.Optional[BaseSpec] = None,\n audit_flags: AuditFlag = AuditFlag.NONE,\n messengers=None,\n messenger_args=None,\n cache_dir=None,\n cache_locations=None,\n **kwargs,\n ):\n if input_spec:\n if isinstance(input_spec, BaseSpec):\n self.input_spec = input_spec\n else:\n self.input_spec = SpecInfo(\n name=\"Inputs\",\n fields=[(name, ty.Any) for name in input_spec]\n + [(\"_graph\", ty.Any)],\n bases=(BaseSpec,),\n )\n if output_spec is None:\n output_spec = SpecInfo(\n name=\"Output\", fields=[(\"out\", ty.Any)], bases=(BaseSpec,)\n )\n self.output_spec = output_spec\n\n super(Workflow, self).__init__(\n name=name,\n inputs=kwargs,\n cache_dir=cache_dir,\n cache_locations=cache_locations,\n audit_flags=audit_flags,\n messengers=messengers,\n messenger_args=messenger_args,\n )\n\n self.graph = nx.DiGraph()\n self.name2obj = {}\n\n # store output connections\n self._connections = None\n self.node_names = []\n\n def __getattr__(self, name):\n if name == \"lzin\":\n return LazyField(self, \"input\")\n if name == \"lzout\":\n return super().__getattr__(name)\n if name in self.name2obj:\n return self.name2obj[name]\n return self.__getattribute__(name)\n\n @property\n def nodes(self):\n return self._nodes\n\n @property\n def graph_sorted(self):\n return list(nx.topological_sort(self.graph))\n\n def add(self, task):\n if not is_task(task):\n raise ValueError(\"Unknown workflow element: {!r}\".format(task))\n self.graph.add_nodes_from([task])\n self.name2obj[task.name] = task\n self._last_added = task\n other_states = {}\n for field in dc.fields(task.inputs):\n val = getattr(task.inputs, field.name)\n if isinstance(val, LazyField):\n # adding an edge to the graph if task id expecting output from a different task\n if val.name != self.name:\n self.graph.add_edge(\n getattr(self, val.name),\n task,\n from_field=val.field,\n to_field=field.name,\n )\n if val.name in self.node_names and getattr(self, val.name).state:\n # adding a state from the previous task to other_states\n other_states[val.name] = (getattr(self, val.name).state, field.name)\n # if task has connections state has to be recalculated\n if other_states:\n if hasattr(task, \"fut_combiner\"):\n task.state = state.State(\n task.name, other_states=other_states, combiner=task.fut_combiner\n )\n else:\n task.state = state.State(task.name, other_states=other_states)\n self.node_names.append(task.name)\n self.inputs._graph = self.graph_sorted\n return self\n\n def _run_task(self):\n for task in self.graph_sorted:\n # depend on prior tasks that have state\n task.inputs.retrieve_values(self)\n if task.state and not hasattr(task.state, \"states_ind\"):\n task.state.prepare_states(inputs=task.inputs)\n if task.state and not hasattr(task.state, \"inputs_ind\"):\n task.state.prepare_inputs()\n if self.plugin is None:\n task.run()\n else:\n from .submitter import Submitter\n\n with Submitter(self.plugin) as sub:\n sub.run(task)\n while not task.done:\n sleep(1)\n\n def set_output(self, connections):\n self._connections = connections\n fields = [(name, ty.Any) for name, _ in connections]\n self.output_spec = SpecInfo(name=\"Output\", fields=fields, bases=(BaseSpec,))\n\n def _list_outputs(self):\n output = []\n for name, val in self._connections:\n if not isinstance(val, LazyField):\n raise ValueError(\"all connections must be lazy\")\n output.append(val.get_value(self))\n return output\n\n\n# TODO: task has also call\ndef is_function(obj):\n return hasattr(obj, \"__call__\")\n\n\ndef is_task(obj):\n return hasattr(obj, \"_run_task\")\n\n\ndef is_workflow(obj):\n return isinstance(obj, Workflow)\n","sub_path":"pydra/engine/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":22006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"467423749","text":"import sys\nimport argparse\n\n\ndef echo_upper(text):\n return text.upper()\n\n\ndef echo_lower(text):\n return text.lower()\n\n\ndef echo_title(text):\n return text.title()\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(\n description='Perform transformation on input text.')\n parser.add_argument('-u', '--upper',\n help='convert text to uppercase', action='store_true')\n parser.add_argument('-l', '--lower',\n help='convert text to lowercase', action='store_true')\n parser.add_argument('-t', '--title',\n help='convert text to titlecase', action='store_true')\n parser.add_argument('text', help='text to be manipulated')\n\n return parser.parse_args(args)\n\n\ndef main():\n text = parse_args(sys.argv[1:])\n upper = text.upper\n lower = text.lower\n title = text.title\n\n if upper:\n print(echo_upper(text))\n if lower:\n print(echo_lower(text))\n if title:\n print(echo_title(text))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"76964690","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 10 09:19:05 2019\n\n@author: gpichard\n\"\"\"\n\nimport os\nfile_path = os.path.realpath(__file__)\ndir_path = os.path.dirname(file_path)\nif os.getcwd() != dir_path:\n print(\"Need to relocate current working directory\")\n os.chdir(dir_path)\n\nimport pandas as pd\npd.options.display.float_format = '{:.3f}'.format\nimport numpy as np\nimport datetime\nimport multiprocessing\nimport linearmodels\n\n# =============================================================================\n# # Loading the monthly panel, transforming some variables and filtering some outliers\n# =============================================================================\nMonthlyVariables_db = pd.read_csv('Monthly/MonthlyVariables_db.csv', index_col = [0, 1], header = 0, parse_dates = True)\nMonthlyVariables_db.info()\nfor col in list(['Date.' + str(i) for i in np.arange(1, 9)]):\n MonthlyVariables_db[col] = pd.to_datetime(MonthlyVariables_db[col], infer_datetime_format = True)\ndel col\nMonthlyVariables_db.reset_index(inplace = True)\nMonthlyVariables_db.set_index(['RIC','YearMonth'], inplace = True)\n\n#MonthlyAvailable_db = MonthlyVariables_db.dropna()\nMonthlyAvailable_db = MonthlyVariables_db\n# Whenever, at a given time and for a given company, the number of shares detained by ETFs is equal to or larger than the number of shares outstanding, the observation is dropped\nMonthlyAvailable_db.loc[MonthlyAvailable_db.PctSharesHeldETF >= 1, 'PctSharesHeldETF'] = None\n# Dropping observations with stock price less than USD 1\nMonthlyAvailable_db.loc[MonthlyAvailable_db.Close < 1, 'Close'] = None\n\n\n# Transformations and lags needed for regressions\nMonthlyAvailable_db = MonthlyAvailable_db.assign(InvClose = pd.Series(1/MonthlyAvailable_db['Close']))\nMonthlyAvailable_db = MonthlyAvailable_db.assign(BookToMarketRatio = pd.Series(1/MonthlyAvailable_db['PriceToBVPerShare']))\n\n# Replace infinite values with NaN in order to drop them afterwards\nMonthlyAvailable_db.replace([np.inf, -np.inf], np.nan, inplace = True)\n\n\n## Summary statistics\nKeptVariables_summary = list(['Volatility', 'PctSharesHeldETF', 'BookToMarketRatio', 'CompanyMarketCap_millions', 'InvClose', 'PctBidAskSpread', 'AmihudRatio', 'RetPast12to1M', 'RetPast12to7M', 'GrossProfitability'])\nKeptVariables_headers = list(['Volatility', 'ETF Ownership', 'Book-to-market', 'Market cap. ($ Mln.)', '1/Price', 'Rel. Bid-Ask spread', 'Amihud ratio', 'Past 12-to-1-month return', 'Past 12-to-7-month return', 'Gross profitability'])\nSumStats = MonthlyAvailable_db[KeptVariables_summary].dropna().describe().transpose()\nSumStats['count'] = SumStats['count'].astype(int)\nSumStats.set_index(pd.Series(KeptVariables_headers), drop = True, inplace = True)\n# Export\n#SumStats.to_latex('../SummaryStats/SummaryTable.tex', header = ['N (obs.)', 'Mean', 'St. dev.', 'Min.', '25%', 'Median', '75%', 'Max.'])\n#SumStats.to_latex()\n# Correlation matrix\nCorrMat = MonthlyAvailable_db[KeptVariables_summary].corr()\nmask = np.tril(np.ones_like(CorrMat, dtype=np.bool), k=0)\nCorrMat.where(mask, other = '', inplace = True)\nNumberedHeader = [\"({})\".format(x) for x in np.arange(1, len(KeptVariables_summary) + 1)]\nCorrMat.set_index([pd.Series(KeptVariables_headers), pd.Series(NumberedHeader)], inplace = True, drop = True)\n\n# Export\n#CorrMat.to_latex('../SummaryStats/CorrTable.tex', index_names = True, header = NumberedHeader[:(len(NumberedHeader))])\n\n## Dynamic panel\n# Shift variables in time : independent variables and volatility lags\nMonthlyAvailable_1lag_db = MonthlyAvailable_db.groupby(level = 0)[['PctSharesHeldETF', 'CompanyMarketCap', 'InvClose', 'AmihudRatio', 'PctBidAskSpread', 'BookToMarketRatio', 'RetPast12to1M', 'RetPast12to7M','GrossProfitability', 'Volatility', 'PctSharesHeldOtherMutual', 'PctSharesHeldPension', 'PctSharesHeldHedge']].shift(1)\nMonthlyAvailable_1lag_db.columns = [s + \"_1lag\" for s in list(MonthlyAvailable_1lag_db.columns)]\nMonthlyAvailable_2lag_db = MonthlyAvailable_db.groupby(level = 0)['Volatility'].shift(2)\nMonthlyAvailable_2lag_db.name = MonthlyAvailable_2lag_db.name + \"_2lag\"\nMonthlyAvailable_3lag_db = MonthlyAvailable_db.groupby(level = 0)['Volatility'].shift(3)\nMonthlyAvailable_3lag_db.name = MonthlyAvailable_3lag_db.name + \"_3lag\"\nMonthlyAvailable_4lag_db = MonthlyAvailable_db.groupby(level = 0)['Volatility'].shift(4)\nMonthlyAvailable_4lag_db.name = MonthlyAvailable_4lag_db.name + \"_4lag\"\nMonthlyAvailable_db = pd.concat([MonthlyAvailable_db, MonthlyAvailable_1lag_db, MonthlyAvailable_2lag_db, MonthlyAvailable_3lag_db, MonthlyAvailable_4lag_db], axis = 1)\n#MonthlyAvailable_db = MonthlyAvailable_db.dropna()\nMonthlyAvailable_db.info()\n\n## Standardizing dependent and ownership variables\n## Censoring the sample at 99.99th percentile for statistics\nMonthlyAvailable_std_db = MonthlyAvailable_db[['Volatility', 'PctSharesHeldETF', 'PctSharesHeldOtherMutual', 'PctSharesHeldPension', 'PctSharesHeldHedge']]\ncolInit = list(MonthlyAvailable_std_db.columns)\nfor col in colInit:\n mean = MonthlyAvailable_std_db[col].dropna().loc[MonthlyAvailable_std_db[col].dropna() < np.percentile(MonthlyAvailable_std_db[col].dropna(), 99.99)].mean()\n std = MonthlyAvailable_std_db[col].dropna().loc[MonthlyAvailable_std_db[col].dropna() < np.percentile(MonthlyAvailable_std_db[col].dropna(), 99.99)].std()\n # replacing the top 0.01% of values with NaN\n MonthlyAvailable_std_db[col].loc[MonthlyAvailable_std_db[col] >= np.percentile(MonthlyAvailable_std_db[col].dropna(), 99.99)] = None\n MonthlyAvailable_std_db[col] = (MonthlyAvailable_std_db[col] - mean)/std\n\ndel colInit, col, mean, std\n\n# Assign the non-normalized controls\nMonthlyAvailable_std_db = MonthlyAvailable_std_db.assign(InvClose = MonthlyAvailable_db.InvClose, AmihudRatio = MonthlyAvailable_db.AmihudRatio, RetPast12to1M = MonthlyAvailable_db.RetPast12to1M, RetPast12to7M = MonthlyAvailable_db.RetPast12to7M, CompanyMarketCap = MonthlyAvailable_db.CompanyMarketCap, BookToMarketRatio = MonthlyAvailable_db.BookToMarketRatio, PctBidAskSpread = MonthlyAvailable_db.PctBidAskSpread, GrossProfitability = MonthlyAvailable_db.GrossProfitability)\n\nMonthlyAvailable_std_1lag_db = MonthlyAvailable_std_db.groupby(level = 0)[['PctSharesHeldETF', 'CompanyMarketCap', 'InvClose', 'AmihudRatio', 'PctBidAskSpread', 'BookToMarketRatio', 'RetPast12to1M', 'RetPast12to7M','GrossProfitability', 'Volatility', 'PctSharesHeldOtherMutual', 'PctSharesHeldPension', 'PctSharesHeldHedge']].shift(1)\nMonthlyAvailable_std_1lag_db.columns = [s + \"_1lag\" for s in list(MonthlyAvailable_std_1lag_db.columns)]\nMonthlyAvailable_std_2lag_db = MonthlyAvailable_std_db.groupby(level = 0)['Volatility'].shift(2)\nMonthlyAvailable_std_2lag_db.name = MonthlyAvailable_std_2lag_db.name + \"_2lag\"\nMonthlyAvailable_std_3lag_db = MonthlyAvailable_std_db.groupby(level = 0)['Volatility'].shift(3)\nMonthlyAvailable_std_3lag_db.name = MonthlyAvailable_std_3lag_db.name + \"_3lag\"\nMonthlyAvailable_std_4lag_db = MonthlyAvailable_std_db.groupby(level = 0)['Volatility'].shift(4)\nMonthlyAvailable_std_4lag_db.name = MonthlyAvailable_std_4lag_db.name + \"_4lag\"\nMonthlyAvailable_std_db = pd.concat([MonthlyAvailable_std_db, MonthlyAvailable_std_1lag_db, MonthlyAvailable_std_2lag_db, MonthlyAvailable_std_3lag_db, MonthlyAvailable_std_4lag_db], axis = 1)\n\n## First differences\nMonthlyAvailable_diff_db = MonthlyAvailable_db.groupby(level=0).diff()\n#MonthlyAvailable_diff_db.to_csv('Monthly/MonthlyAvailable_diff.csv', header = True, index = True)\n\n## Loading quarterly variance ratios panel\nQuarterlyVarianceRatio_panel = pd.read_csv('Quarterly/Stocks_5to1dVarianceRatio.csv', header = 0, parse_dates = True)\nQuarterlyVarianceRatio_panel.Date = pd.to_datetime(QuarterlyVarianceRatio_panel.Date, infer_datetime_format = True)\nQuarterlyVarianceRatio_db = pd.melt(QuarterlyVarianceRatio_panel, id_vars = ['Date'], var_name = 'RIC', value_name = 'VR')\nQuarterlyVarianceRatio_db.set_index(['RIC','Date'], inplace = True)\nQuarterlyVarianceRatio_db = QuarterlyVarianceRatio_db.assign(absVR = abs(QuarterlyVarianceRatio_db.VR - 1))\n\n# Creating quarterly dataset with the monthly regressors, using end of quarter data or, if missing, the last available values\nlevel_values = MonthlyAvailable_db.index.get_level_values\nQuarterlyAvailable_db = MonthlyAvailable_db.groupby([level_values(0)]+[pd.Grouper(freq='Q', level = -1)]).last()\n# Lags have to be constructed again at the quarterly frequency (they are still monthly)\nQuarterlyAvailable_db.drop(columns=QuarterlyAvailable_db.filter(regex=(\"lag\")).columns, inplace = True)\nQuarterlyAvailable_db = pd.concat([QuarterlyAvailable_db, QuarterlyVarianceRatio_db], axis = 1)\n\nQuarterlyAvailable_1lag_db = QuarterlyAvailable_db.groupby(level = 0)[['PctSharesHeldETF', 'CompanyMarketCap', 'InvClose', 'AmihudRatio', 'PctBidAskSpread', 'BookToMarketRatio', 'RetPast12to1M', 'RetPast12to7M','GrossProfitability', 'Volatility', 'PctSharesHeldOtherMutual', 'PctSharesHeldPension', 'PctSharesHeldHedge', 'VR', 'absVR']].shift(1)\nQuarterlyAvailable_1lag_db.columns = [s + \"_1lag\" for s in list(QuarterlyAvailable_1lag_db.columns)]\nQuarterlyAvailable_2lag_db = QuarterlyAvailable_db.groupby(level = 0)['Volatility'].shift(2)\nQuarterlyAvailable_2lag_db.name = QuarterlyAvailable_2lag_db.name + \"_2lag\"\nQuarterlyAvailable_3lag_db = QuarterlyAvailable_db.groupby(level = 0)['Volatility'].shift(3)\nQuarterlyAvailable_3lag_db.name = QuarterlyAvailable_3lag_db.name + \"_3lag\"\nQuarterlyAvailable_4lag_db = QuarterlyAvailable_db.groupby(level = 0)['Volatility'].shift(4)\nQuarterlyAvailable_4lag_db.name = QuarterlyAvailable_4lag_db.name + \"_4lag\"\nQuarterlyAvailable_db = pd.concat([QuarterlyAvailable_db, QuarterlyAvailable_1lag_db, QuarterlyAvailable_2lag_db, QuarterlyAvailable_3lag_db, QuarterlyAvailable_4lag_db], axis = 1)\n\n\n# =============================================================================\n# ## Regression estimates\n# =============================================================================\n# Model 1 : Volatility\n# Full US sample, lagged controls\n# Column \"Baseline\"\nmod1_All_Volatility = linearmodels.PanelOLS.from_formula('Volatility ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + InvClose_1lag + BookToMarketRatio_1lag + RetPast12to1M_1lag + EntityEffects + TimeEffects', MonthlyAvailable_db)\nmod1_fit_All_Volatility = mod1_All_Volatility.fit(cov_type = \"kernel\")\nprint(mod1_fit_All_Volatility)\nf = open('../Regression_Results/US/mod1_fit_All_Volatility.tex', 'w')\nf.write(mod1_fit_All_Volatility.summary.as_latex())\nf.close()\n\n## Full US sample with a constant term, lagged controls and 4 volatility lags\n#mod1_All_Volatility_withLags = linearmodels.PanelOLS.from_formula('Volatility ~ 1 + PctSharesHeldETF + np.log(CompanyMarketCap_1lag) + InvClose_1lag + AmihudRatio_1lag + PctBidAskSpread_1lag + BookToMarketRatio_1lag + RetPast12to7M_1lag + GrossProfitability_1lag + EntityEffects + TimeEffects + Volatility_1lag + Volatility_2lag + Volatility_3lag + Volatility_4lag', MonthlyAvailable_db)\n#mod1_fit = mod1_All_Volatility_withLags.fit(cov_type = \"kernel\")\n#print(mod1_fit)\n#f = open('../Regression_Results/US/mod1_All_Volatility_withLags.tex', 'w')\n#f.write(mod1_fit.summary.as_latex())\n#f.close()\n\n# With additional liquidity proxies and volatility lags\n# Column \"Controls + Vol. lags\"\nmod1_All_Volatility_withLags_Controls = linearmodels.PanelOLS.from_formula('Volatility ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + InvClose_1lag + AmihudRatio_1lag + PctBidAskSpread_1lag + BookToMarketRatio_1lag + RetPast12to1M_1lag + GrossProfitability_1lag + EntityEffects + TimeEffects + Volatility_1lag + Volatility_2lag + Volatility_3lag + Volatility_4lag', MonthlyAvailable_db)\nmod1_fit_All_Volatility_withLags_Controls = mod1_All_Volatility_withLags_Controls.fit(cov_type = \"kernel\")\nprint(mod1_fit_All_Volatility_withLags_Controls)\nf = open('../Regression_Results/US/mod1_contemporaneousControls_fit.tex', 'w')\nf.write(mod1_fit_All_Volatility_withLags_Controls.summary.as_latex())\nf.close()\n\n### Attempt to do a dynamic panel estimation through GMM. Needs further research.\n##mod1_GMM_All_Volatility_withlags = linearmodels.LinearFactorModelGMM()\n#MonthlyAvailable_db.describe().to_csv('../SummaryStats/All_Volatility_withLags.csv', header = True, index = True)\n## Summary statistics : there are a few extreme values that may bias the sample.\n#MonthlyAvailable_db.describe().to_csv('../SummaryStats/All_Volatility_withLags.csv', header = True, index = True)\n\n\n# Including other fund ownership controls\n# Column \"Inst. o\\'ship controls\"\nmod1_All_Volatility_withLags_withFundcontrols = linearmodels.PanelOLS.from_formula('Volatility ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + InvClose_1lag + AmihudRatio_1lag + PctBidAskSpread_1lag + BookToMarketRatio_1lag + RetPast12to1M_1lag + GrossProfitability_1lag + PctSharesHeldOtherMutual_1lag + PctSharesHeldPension_1lag + PctSharesHeldHedge_1lag + EntityEffects + TimeEffects + Volatility_1lag + Volatility_2lag + Volatility_3lag + Volatility_4lag', MonthlyAvailable_db)\nmod1_fit_All_Volatility_withLags_withFundcontrols = mod1_All_Volatility_withLags_withFundcontrols.fit(cov_type = \"kernel\")\nprint(mod1_fit_All_Volatility_withLags_withFundcontrols)\nf = open('../Regression_Results/US/mod1_fit_All_Volatility_withLags_withFundcontrols.tex', 'w')\nf.write(mod1_fit_All_Volatility_withLags_withFundcontrols.summary.as_latex())\nf.close()\n\n# Standardization could (and should?) be performed over the relevant sample of values only\n\n# Estimation on standardized, winsorized variables\n# Column \"Standardized\"\nmod1_All_Volatility_std_withLags_withFundcontrols = linearmodels.PanelOLS.from_formula('Volatility ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + InvClose_1lag + AmihudRatio_1lag + PctBidAskSpread_1lag + BookToMarketRatio_1lag + RetPast12to1M_1lag + GrossProfitability_1lag + PctSharesHeldOtherMutual_1lag + PctSharesHeldPension_1lag + PctSharesHeldHedge_1lag + EntityEffects + TimeEffects + Volatility_1lag + Volatility_2lag + Volatility_3lag + Volatility_4lag', MonthlyAvailable_std_db)\nmod1_fit_All_Volatility_std_withLags_withFundcontrols = mod1_All_Volatility_std_withLags_withFundcontrols.fit(cov_type = \"kernel\")\nprint(mod1_fit_All_Volatility_std_withLags_withFundcontrols)\nf = open('../Regression_Results/US/mod1_fit_All_Volatility_std_withLags_withFundcontrols.tex', 'w')\nf.write(mod1_fit_All_Volatility_std_withLags_withFundcontrols.summary.as_latex())\nf.close()\n\n## First differences, no controls except other fund holdings and volatility lags\n## The full regressors matrix is not full rank, thus we do not include all usual controls \n#mod1_All_diffVolatility_withLags_withFundcontrols = linearmodels.PanelOLS.from_formula('Volatility_std ~ 0 + PctSharesHeldETF_std + PctSharesHeldOtherMutual_std + PctSharesHeldPension_std + PctSharesHeldHedge_std + AmihudRatio_1lag + BookToMarketRatio_1lag + RetPast12to7M_1lag + GrossProfitability_1lag + EntityEffects + TimeEffects + Volatility_1lag', MonthlyAvailable_diff_db)\n#mod1_fit_All_diffVolatility_withLags_withFundcontrols = mod1_All_diffVolatility_withLags_withFundcontrols.fit(cov_type = \"kernel\")\n#print(mod1_fit_All_diffVolatility_withLags_withFundcontrols)\n#f = open('../Regression_Results/US/mod1_fit_All_diffVolatility_std_withLags_withFundcontrols.tex', 'w')\n#f.write(mod1_fit_All_diffVolatility_withLags_withFundcontrols.summary.as_latex())\n#f.close()\n#\n## Variation : there is a FirstDifferenceOLS function in the \"linearmodels\" package\n## Using the standardized variables\n#mod1_All_FirstDiffOLS_withLags_withFundcontrols = linearmodels.FirstDifferenceOLS.from_formula('Volatility ~ 0 + PctSharesHeldETF_std + CompanyMarketCap_1lag + InvClose_1lag + AmihudRatio_1lag + PctBidAskSpread_1lag + BookToMarketRatio_1lag + RetPast12to7M_1lag + GrossProfitability_1lag + PctSharesHeldOtherMutual_std + PctSharesHeldPension_std + PctSharesHeldHedge_std + TimeEffects + Volatility_1lag + Volatility_2lag + Volatility_3lag + Volatility_4lag', MonthlyAvailable_stdMod1_db)\n#mod1_fit_All_FirstDiffOLS_withLags_withFundcontrols = mod1_All_FirstDiffOLS_withLags_withFundcontrols.fit(cov_type = \"kernel\")\n#print(mod1_fit_All_FirstDiffOLS_withLags_withFundcontrols)\n\n# Models comparison for the paper body\nmod1_comp = linearmodels.panel.compare({'Baseline':mod1_fit_All_Volatility, 'Controls + Vol. lags': mod1_fit_All_Volatility_withLags_Controls, 'Inst. o\\'ship controls':mod1_fit_All_Volatility_withLags_withFundcontrols , 'Standardized': mod1_fit_All_Volatility_std_withLags_withFundcontrols})\nprint(mod1_comp)\nf = open('../Regression_Results/US/mod1_comp.tex', 'w')\nf.write(mod1_comp.summary.as_latex())\nf.close()\n\n# Model 2 : Liquidity\n# Bid-ask spread on close prices (not equal to the High-Low spread metric from Israeli 2017) - static model\nmod2_All_Liquidity_BidAsk = linearmodels.PanelOLS.from_formula('PctBidAskSpread ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + BookToMarketRatio_1lag + Volatility_1lag + EntityEffects + TimeEffects', MonthlyAvailable_db)\nmod2_fit_All_Liquidity_BidAsk = mod2_All_Liquidity_BidAsk.fit(cov_type = \"kernel\")\nprint(mod2_fit_All_Liquidity_BidAsk)\nf = open('../Regression_Results/US/mod2_fit_All_Liquidity_BidAsk.tex', 'w')\nf.write(mod2_fit_All_Liquidity_BidAsk.summary.as_latex())\nf.close()\n\nmod2_All_Liquidity_BidAsk_withFundcontrols = linearmodels.PanelOLS.from_formula('PctBidAskSpread ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + BookToMarketRatio_1lag + Volatility_1lag + EntityEffects + TimeEffects + PctSharesHeldOtherMutual_1lag + PctSharesHeldPension_1lag + PctSharesHeldHedge_1lag', MonthlyAvailable_db)\nmod2_fit_All_Liquidity_BidAsk_withFundcontrols = mod2_All_Liquidity_BidAsk_withFundcontrols.fit(cov_type = \"kernel\")\nprint(mod2_fit_All_Liquidity_BidAsk_withFundcontrols)\nf = open('../Regression_Results/US/mod2_fit_All_Liquidity_BidAsk_withFundcontrols.tex', 'w')\nf.write(mod2_fit_All_Liquidity_BidAsk_withFundcontrols.summary.as_latex())\nf.close()\n\n# Amihud ratio decomposition\nmod2_All_Liquidity_Amihud = linearmodels.PanelOLS.from_formula('AmihudNumerator ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + BookToMarketRatio_1lag + AmihudDenominator + EntityEffects + TimeEffects', MonthlyAvailable_db)\nmod2_fit_All_Liquidity_Amihud = mod2_All_Liquidity_Amihud.fit(cov_type = \"kernel\")\nprint(mod2_fit_All_Liquidity_Amihud)\nf = open('../Regression_Results/US/mod2_fit_All_Liquidity_Amihud.tex', 'w')\nf.write(mod2_fit_All_Liquidity_Amihud.summary.as_latex())\nf.close()\n\nmod2_All_Liquidity_Amihud_withFundcontrols = linearmodels.PanelOLS.from_formula('AmihudNumerator ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + BookToMarketRatio_1lag + AmihudDenominator + EntityEffects + TimeEffects + PctSharesHeldOtherMutual_1lag ', MonthlyAvailable_db)\nmod2_fit_All_Liquidity_Amihud_withFundcontrols = mod2_All_Liquidity_Amihud_withFundcontrols.fit(cov_type = \"kernel\")\nprint(mod2_fit_All_Liquidity_Amihud_withFundcontrols)\nf = open('../Regression_Results/US/mod2_fit_All_Liquidity_Amihud_withFundcontrols.tex', 'w')\nf.write(mod2_fit_All_Liquidity_Amihud_withFundcontrols.summary.as_latex())\nf.close()\n# Only the other mutual fund holdings are included to control for institutional ownership, otherwise the matrix of regressors does not have full column rank. Other mutual funds are the most available values and economically the more sizeable of the three categories at hand.\n\n# Models comparison for the paper body\nmod2_comp = linearmodels.panel.compare({'Bid-Ask':mod2_fit_All_Liquidity_BidAsk, 'Bid-Ask w/inst. o\\'ship':mod2_fit_All_Liquidity_BidAsk_withFundcontrols, 'Amihud':mod2_fit_All_Liquidity_Amihud, 'Amihud w/inst. o\\'ship':mod2_fit_All_Liquidity_Amihud_withFundcontrols})\nprint(mod2_comp)\nf = open('../Regression_Results/US/mod2_comp.tex', 'w')\nf.write(mod2_comp.summary.as_latex())\nf.close()\n# Model 3 : Efficiency\nmod3_All_VR = linearmodels.PanelOLS.from_formula('VR ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + InvClose_1lag + AmihudRatio_1lag + PctBidAskSpread_1lag + BookToMarketRatio_1lag + RetPast12to7M_1lag + GrossProfitability_1lag + EntityEffects + TimeEffects', QuarterlyAvailable_db)\nmod3_fit_All_VR = mod3_All_VR.fit(cov_type = \"kernel\")\nprint(mod3_fit_All_VR)\nf = open('../Regression_Results/US/mod3_fit_All_VR.tex', 'w')\nf.write(mod3_fit_All_VR.summary.as_latex())\nf.close()\n\nmod3_All_VR_withFundcontrols = linearmodels.PanelOLS.from_formula('VR ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + InvClose_1lag + AmihudRatio_1lag + PctBidAskSpread_1lag + BookToMarketRatio_1lag + RetPast12to7M_1lag + GrossProfitability_1lag + EntityEffects + TimeEffects + PctSharesHeldOtherMutual_1lag + PctSharesHeldPension_1lag + PctSharesHeldHedge_1lag', QuarterlyAvailable_db)\nmod3_fit_All_VR_withFundcontrols = mod3_All_VR_withFundcontrols.fit(cov_type = \"kernel\")\nprint(mod3_fit_All_VR_withFundcontrols)\nf = open('../Regression_Results/US/mod3_fit_All_VR_withFundcontrols.tex', 'w')\nf.write(mod3_fit_All_VR_withFundcontrols.summary.as_latex())\nf.close()\n\nmod3_All_absVR = linearmodels.PanelOLS.from_formula('absVR ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + InvClose_1lag + AmihudRatio_1lag + PctBidAskSpread_1lag + BookToMarketRatio_1lag + RetPast12to7M_1lag + GrossProfitability_1lag + EntityEffects + TimeEffects', QuarterlyAvailable_db)\nmod3_fit_All_absVR = mod3_All_absVR.fit(cov_type = \"kernel\")\nprint(mod3_fit_All_absVR)\nf = open('../Regression_Results/US/mod3_fit_All_absVR.tex', 'w')\nf.write(mod3_fit_All_absVR.summary.as_latex())\nf.close()\n\nmod3_All_absVR_withFundcontrols = linearmodels.PanelOLS.from_formula('absVR ~ 1 + PctSharesHeldETF_1lag + np.log(CompanyMarketCap_1lag) + InvClose_1lag + AmihudRatio_1lag + PctBidAskSpread_1lag + BookToMarketRatio_1lag + RetPast12to7M_1lag + GrossProfitability_1lag + EntityEffects + TimeEffects + PctSharesHeldOtherMutual_1lag + PctSharesHeldPension_1lag + PctSharesHeldHedge_1lag', QuarterlyAvailable_db)\nmod3_fit_All_absVR_withFundcontrols = mod3_All_absVR_withFundcontrols.fit(cov_type = \"kernel\")\nprint(mod3_fit_All_absVR_withFundcontrols)\nf = open('../Regression_Results/US/mod3_fit_All_absVR_withFundcontrols.tex', 'w')\nf.write(mod3_fit_All_absVR_withFundcontrols.summary.as_latex())\nf.close()\n\n# Models comparison for the paper body\nmod3_comp = linearmodels.panel.compare({'VR':mod3_fit_All_VR, 'VR w/inst. o\\'ship':mod3_fit_All_VR_withFundcontrols, 'Abs. VR':mod3_fit_All_absVR, 'Abs. VR w/inst. o\\'ship':mod3_fit_All_absVR_withFundcontrols})\nprint(mod3_comp)\nf = open('../Regression_Results/US/mod3_comp.tex', 'w')\nf.write(mod3_comp.summary.as_latex())\nf.close()","sub_path":"Eikon/PanelAnalysis_monthly.py","file_name":"PanelAnalysis_monthly.py","file_ext":"py","file_size_in_byte":22675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"161672192","text":"import requests\nfrom bs4 import BeautifulSoup\nimport bs4\nimport re\n\n#从网络上获取大学排名网页内容\ndef getHTMLText(url):\n try:\n r = requests.get(url, timeout = 30)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n return 'Fail'\n\n#提取网页内容中信息到合适的数据结构\ndef fillUnivList(ulist, html):\n soup = BeautifulSoup(html, 'html.parser')\n ulist = soup.find_all(string = re.compile('大学'))[6:21]\n d = dict()\n count = 1\n for name in ulist:\n if name not in d:\n d[name] = count\n count+=1\n lst = list()\n for name, rank in d.items():\n lst.append((rank, name))\n\n lst.sort()\n print('rank\\t univ')\n for rank, name in lst:\n print(rank, name)\n #for tr in soup.find('tbody').children: #解析tbody标签所在位置\n #if isinstance(tr, bs4.element.Tag): #tbody中找到每所大学对应的tr标签,过滤掉非标签信息\n #print(soup.find_all(string = re.compile('大学'))[:20])\n #print(tr.prettify())\n #tds = tr('td') #在tr标签中找到td标签的信息,把需要的td标签存入列表\n #ulist.append( [tds[0].string, tds[1].string, tds[2].string] )\n'''\n#利用数据结构展示并输出结果\ndef printUnivList(ulist, num):\n tplt = '{0:^10}\\t{1:{3}^10}\\t{2:^10}' #{3}format函数中的第三个变量进行填充\n print(tplt.format('排名', '学校', '所在地', chr(12288)))\n for i in range (num):\n u = ulist[i]\n print(tplt.format(u[0], u[1], u[2], chr(12288)))\n'''\nif __name__ == '__main__':\n unifo = []\n url = 'http://www.zuihaodaxue.com/ARWU2018.html'\n html = getHTMLText(url)\n fillUnivList(unifo, html)\n #printUnivList(unifo, 20) #20 univs\n","sub_path":"worldUniRank_junkV1.py","file_name":"worldUniRank_junkV1.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111605690","text":"import numpy as np\nimport keras\nfrom keras.layers import Conv2D\nfrom keras.models import Sequential\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\n\ndef read_labels(filename, items): # 读取图片标记,也就是要学习的数字\n file_labels = open(filename, 'rb')\n file_labels.seek(8) # 标签文件的头��8个字节,略过不读\n data = file_labels.read(items)\n y = np.zeros(items)\n for i in range(items):\n y[i] = data[i]\n file_labels.close()\n return y\n\n\ny_train = read_labels('./train-labels-idx1-ubyte', 60000) # 读取60000张训练标记\ny_test = read_labels('./t10k-labels-idx1-ubyte', 10000) # 读取10000张测试标记\n\n\ndef read_images(filename, items): # 读取图片\n file_image = open(filename, 'rb')\n file_image.seek(16)\n\n data = file_image.read(items * 28 * 28)\n\n X = np.zeros(items * 28 * 28)\n for i in range(items * 28 * 28):\n X[i] = data[i] / 255\n file_image.close()\n return X.reshape(-1, 28, 28, 1) # 请注意最后这一行,形状要整形成适合卷积网络的输入\n\n\nX_train = read_images('train-images-idx3-ubyte', 60000) # 读取60000张训练图片\nX_test = read_images('./t10k-images-idx3-ubyte', 10000) # 读取10000张测试图片\n\ny_train = keras.utils.to_categorical(y_train, 10) # one hot转码\ny_test = keras.utils.to_categorical(y_test, 10) # one hot转码\n\n# 训练与验证部分\n\nmodel = Sequential()\nmodel.add(\n Conv2D(32, kernel_size=(3, 3), activation='relu',\n input_shape=(28, 28, 1))) # 32核卷积\nmodel.add(MaxPooling2D(pool_size=(2, 2))) # 2*2池化\nmodel.add(Conv2D(64, (3, 3), activation='relu')) # 64核卷积\nmodel.add(MaxPooling2D(pool_size=(2, 2))) # 2*2池化\nmodel.add(Flatten()) # 连接层,卷积和全连接网络的中介\nmodel.add(Dense(128, activation='relu')) # 全连接层\nmodel.add(Dropout(0.5)) # Dropout层\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy']) # 编译\n\nmodel.fit(\n X_train,\n y_train,\n batch_size=128,\n epochs=10,\n verbose=1,\n validation_data=(X_test, y_test)) # 训练\nscore = model.evaluate(X_test, y_test, verbose=0) # 验证\nprint('损失数:', score[0])\nprint('准确率:', score[1])\n","sub_path":"keras_cnn.py","file_name":"keras_cnn.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461998957","text":"def letterCasePermutation(self, S: str) -> List[str]:\n \"\"\"\n Given a string S,\n we can transform every letter individually\n to be lowercase or uppercase to create another string.\n\n Return a list of all possible strings we could create.\n\n Examples:\n Input: S = \"a1b2\"\n Output: [\"a1b2\", \"a1B2\", \"A1b2\", \"A1B2\"]\n\n Input: S = \"3z4\"\n Output: [\"3z4\", \"3Z4\"]\n\n Input: S = \"12345\"\n Output: [\"12345\"]\n\n Note:\n S will be a string with length between 1 and 12.\n S will consist only of letters or digits.\n \"\"\"\n perms = [\"\"]\n for c in S:\n if c.isalpha():\n perms = [p + c.lower() for p in perms] + [p + c.upper() for p in perms]\n else:\n perms = [p + c for p in perms]\n\n return perms\n","sub_path":"algo/permutation/letterCase.py","file_name":"letterCase.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242204207","text":"import collections\nfrom collections import Iterable\n\nimport cv2\nimport numpy\nfrom object_detection.utils.visualization_utils import draw_bounding_box_on_image_array, \\\n STANDARD_COLORS\n\n\nclass DataLabel(object):\n def __init__(self):\n self.data = None\n self.labels = None\n\n\ndef draw_boxes_on_image(\n image, boxes, classes, scores, category_index, min_scores_thresh,\n line_thickness=4):\n\n if not isinstance(min_scores_thresh, Iterable):\n min_scores_thresh_per_class = {\n class_data['name']: min_scores_thresh\n for class_data in category_index.values()\n }\n elif isinstance(min_scores_thresh, dict):\n min_scores_thresh_per_class = {\n class_data['name']: min_scores_thresh['others']\n for class_data in category_index.values()\n }\n min_scores_thresh.pop('others')\n min_scores_thresh_per_class.update(min_scores_thresh)\n else:\n min_scores_thresh_per_class = {\n class_data['name']: 0.5\n for class_data in category_index.values()\n }\n\n drawn_boxes = []\n for i in range(boxes.shape[0]):\n class_name = category_index[classes[i]]['name']\n\n if scores[i] < min_scores_thresh_per_class[class_name]:\n continue\n\n drawn_boxes.append((class_name, boxes[i]))\n\n for class_name, box in drawn_boxes:\n height, width = image.shape[:2]\n ymin, xmin, ymax, xmax = box\n pt1 = pt1_x, pt1_y = (int(xmin*width), int(ymin*height))\n pt2 = pt2_x, pt2_y = (int(xmax*width), int(ymax*height))\n color = list(numpy.random.random(size=3) * 256)\n cv2.rectangle(image, pt1, pt2, color, thickness=2)\n\n font_face = cv2.FONT_HERSHEY_PLAIN\n thickness = 1\n baseline = 0\n ((text_width, text_height), _) = cv2.getTextSize(class_name, font_face, thickness, baseline)\n\n #cv2.rectangle(image, pt1, (pt1_x + text_width, pt1_y + text_height), (255, 255, 255), -1)\n cv2.putText(\n image, class_name, (pt1_x + 2*thickness, pt1_y + text_height + 2*thickness),\n fontFace=font_face, fontScale=1, color=(0, 0, 0), thickness=thickness\n )\n\n return image\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"564708067","text":"#\n# Determine messages and constants used in the file-system-protection module\n#\n# Determine the size of the encrypt/decrypt data_block\nDATA_BLOCK_SIZE = 32768\n\n# Determine the value returned by the function\nERROR_CODE = -1\nSUCCESS_CODE = 0\n\nACCESS_DENIED_CODE = 5\n\n# Define operating system platform\nUNKNOWN_PLATFORM = -1\nWINDOWS_PLATFORM = 0\nLINUX_PLATFORM = 1\n\nFILE_NOT_FOUND_CODE = -2\nDIR_NOT_FOUND_CODE = -3\n\nFILE_EXIST_MSG = 'File exist.'\nDIR_EXIST_MSG = 'Directory exist.'\n\nFILE_TYPE = 0\nDIR_TYPE = 1\nREGISTRY_TYPE = 2\n\nTYPE_ENCRYPT_FILE = '.enc'\n\n# Determine the messages encrypt/decrypt file and directory\nPASSWORD_INCORRECT_MSG = 'Error. Password incorrect'\nENCRYPT_FILE_SUCCESS_MSG = 'Done encrypt file.'\nENCRYPT_FILE_ERROR_MSG = '#Error in process encrypt file.'\nDECRYPT_FILE_SUCCESS_MSG = 'Done decrypt file.'\nDECRYPT_FILE_ERROR_MSG = '#Error in process decrypt file.'\nENCRYPT_DIR_SUCCESS_MSG = 'Done encrypt directory.'\nDECRYPT_DIR_SUCCESS_MSG = 'Done decrypt directory.'\n\nCONFIRM_DEL = 0\nSKIP_CODE = 1\nOVERRIDE_CODE = 2\nPASSWORD_INCORRECT_CODE = -4\n\nCONFIRM_DEL_MSG = 'Confirm override.'\nSKIP_OVERRIDE_MSG = 'Skip override.'\n\n\nDEFAULT_PASSWORD = 'bkcs'\n\n# Determine the error value / messages returned by the database query function\nCREATE_DB_ERROR = -4\nINSERT_RECORD_ERROR = -5\nUPDATE_RECORD_ERROR = -6\nDELETE_RECORD_ERROR = -7\n\nCREATE_DB_ERROR_MSG = 'There was an error creating the database.'\nCREATE_DB_SUCCESS_MSG = 'Create success database: '\n\nQUERY_TABLE_DB_ERROR_MSG = 'The error connect to database.'\n\nADD_FILE_MSG = 'The new file add to folder.'\nCHANGE_FILE_MSG = 'File is changed.'\nDELETE_FILE_MSG = 'File is deleted.'\nNOT_CHANGE_FILE_MSG = 'File isn\\'t changed.'\n\n\nFILE_CHECK_TAG = 'file_check'\nFOLDER_CHECK_TAG = 'folder_check'\nREGISTRY_CHECK_TAG = 'windows_registry'\nCHECK_LIST_TAG = 'check_list'\n\n# New sys_check add to database is value 0\nSYS_CHECK_OBJECT_NEW = 0\nSYS_CHECK_OBJECT_OLD = 1\n\nSYS_CHECK_OBJECT_IGNORE = 1\n\nSYS_CHECK_OBJECT_XML_FILE = 'xml'\nSYS_CHECK_OBJECT_CSV_FILE = 'csv'\n\n# Registry\nVALUE_CHANGE = 'Registry value change.'\nVALUE_ADD = 'Registry value add.'\nVALUE_DEL = 'Registry value deleted.'\nKEY_ADD = 'Registry key add.'\nKEY_DEL = 'Registry key deleted.'\n\nPATH_DIR_EVENT_LOG = r\"C:\\Event_Logs\"\n\n\nADD_FILE_ACTION_MSG = 'Create File'\nCHANGE_FILE_ACTION_MSG = 'Modify File'\nDELETE_FILE_ACTION_MSG = 'Delete File'\nDELETE_DIR_ACTION_MSG = 'Delete Dir'\nCHANGE_FILE_ACL = \"Modify ACL\"\nRENAME_FILE_ACTION_MSG = 'Rename'\nMOVE_FILE_ACTION_MSG = 'Move'\nRESTORE_FILE_ACTION_MSG = 'Restore'\nRECYCLE_FILE_ACTION_MSG = 'Recycle'\n\n\n# ----------------------------------- Handle Audit Linux -----------------------------------#\nAUDIT_RULE_LINUX_PATH = \"/etc/audit/rules.d/audit.rules\"\nPATH_AUDIT_LOG = \"/var/log/audit/audit.log\"\n","sub_path":"codes/program_msg.py","file_name":"program_msg.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"541238840","text":"import sys,time,os,random,cPickle, math\nimport traceback\nimport pygame, thread\nfrom pygame.locals import *\n \n \n \n \n \n# Skeleton class \nclass Skeleton:\n\n\n \n # Initialise pygame, and load the fonts\n def init_pygame(self, w, h): \n pygame.init()\n default_font_name = pygame.font.match_font('bitstreamverasansmono', 'verdana', 'sans')\n if not default_font_name: \n self.default_font_name = pygame.font.get_default_font() \n self.default_font = pygame.font.Font(default_font_name, 36)\n self.screen = pygame.display.set_mode((w,h)) \n #store screen size\n self.w = self.screen.get_width()\n self.h = self.screen.get_height()\n self.centre_x = self.w/2\n self.centre_y = self.h/2\n \n \n \n #initialise any surfaces that are required\n def init_surfaces(self): \n self.draw_buf = pygame.Surface(self.screen.get_size()) \n self.draw_buf.fill((255,255,255))\n \n \n\n \n # init routine, sets up the engine,\n def __init__(self, size=(800,600), draw_fn=None, tick_fn=None, event_fn=None, quit_fn=None): \n # screen size\n self.init_pygame(size[0],size[1])\n self.init_surfaces()\n self.fps = 60 \n self.clock = pygame.time.Clock() \n self.last_frame_time = time.clock()\n self.draw_fn = draw_fn\n self.tick_fn = tick_fn\n self.quit_fn = quit_fn\n self.handle_event = event_fn\n self.looping = True\n \n \n # handles shutdown\n def quit(self):\n if self.quit_fn:\n self.quit_fn()\n self.looping = False\n \n # wrapper for drawing text on screen\n def text(self, buffer, text, color, position): \n textImage = self.default_font.render(text, True, color)\n buffer.blit(textImage, position)\n \n def transparent_text(self, buffer, text, color, position, alpha=255): \n textImage = self.default_font.render(text, False, color)\n textImage.set_alpha(alpha)\n buffer.blit(textImage, position)\n \n \n def screen_text(self, text, position, color=(0,0,0)):\n self.text(self.screen, text, color, position)\n\n \n # this is the redraw code. Add drawing code between the \"LOCK\" and \"END LOCK\" sections\n def flip(self):\n self.screen.blit(self.draw_buf, (0,0)) \n \n if self.draw_fn!=None:\n self.draw_fn(self.screen)\n pygame.display.flip()\n \n \n # Get the array containing the up or down states for each key. For example, key_state[K_UP] is true if the up arrow key is pressed, [K_q] if Q is pressed, etc.\n def check_key_state(self):\n self.key_state = pygame.key.get_pressed()\n \n \n \n #frame loop. Called on every frame. all calculation shpuld be carried out here \n def tick(self): \n self.clock.tick(self.fps) \n self.handle_events() \n delta_t = time.clock() - self.last_frame_time \n self.last_frame_time = time.clock() \n self.check_key_state() \n\n if self.tick_fn!=None:\n self.tick_fn(delta_t)\n self.flip()\n\n \n \n #returns last mouse position\n def get_mouse(self):\n return self.mouse_pos\n \n #Event handlers. These are called as events arrive\n def keydown(self,event):\n return\n \n def keyup(self,event):\n return\n \n def mouseup(self,event):\n return\n \n def mousedown(self,event):\n return\n \n def mousemove(self,event):\n (self.mouse_pos) = event.pos\n return\n \n \n \n #event handling code. Calls the relevant handlers\n def handle_events(self):\n for event in pygame.event.get(): \n if event.type==KEYDOWN:\n \n if event.key==K_ESCAPE:\n self.quit()\n else:\n self.keydown(event)\n \n if event.type==KEYUP:\n self.keyup(event)\n \n if event.type == QUIT:\n self.quit()\n \n elif event.type == MOUSEBUTTONUP:\n self.mouseup(event)\n elif event.type == MOUSEBUTTONDOWN:\n self.mousedown(event)\n elif event.type == MOUSEMOTION: \n self.mousemove(event)\n if self.handle_event:\n self.handle_event(event)\n\n #main loop. Just runs tick until the program exits \n def main_loop(self):\n while self.looping:\n self.tick()\n pygame.quit()\n \n \n\n\n \n\n\n\n ","sub_path":"skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530926602","text":"\"\"\"\nOperator for scraping articles from every organisation.\n\"\"\"\n\nimport json\nimport os\n\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.utils.project import get_project_settings\nimport scrapy.signals\n\nfrom reach.sentry import report_exception\nfrom reach.scraper.wsf_scraping import feed_storage\nfrom reach.scraper.wsf_scraping.spiders.who_iris_spider import WhoIrisSpider\nfrom reach.scraper.wsf_scraping.spiders.nice_spider import NiceSpider\nfrom reach.scraper.wsf_scraping.spiders.gov_spider import GovSpider\nfrom reach.scraper.wsf_scraping.spiders.msf_spider import MsfSpider\nfrom reach.scraper.wsf_scraping.spiders.unicef_spider import UnicefSpider\nfrom reach.scraper.wsf_scraping.spiders.parliament_spider import ParliamentSpider\nfrom reach.scraper.wsf_scraping.spiders.acme_spider import AcmeSpider\nimport reach.scraper.wsf_scraping.settings\n\n\nSPIDERS = {\n 'who_iris': WhoIrisSpider,\n 'nice': NiceSpider,\n 'gov_uk': GovSpider,\n 'msf': MsfSpider,\n 'unicef': UnicefSpider,\n 'parliament': ParliamentSpider,\n 'acme': AcmeSpider,\n}\n\n\nclass SpiderOperator(BaseOperator):\n \"\"\"\n Pulls data from the dimensions.ai to a bucket in S3.\n\n Args:\n organisation: The organisation to pull documents from.\n \"\"\"\n\n template_fields = ('dst_s3_dir',)\n\n @apply_defaults\n def __init__(self, organisation, dst_s3_dir,\n item_years, item_max, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.organisation = organisation\n self.dst_s3_dir = dst_s3_dir\n self.dst_s3_dir = dst_s3_dir\n\n self.item_count = None\n self.scraper_errors = None\n self.item_max = item_max\n self.item_years = item_years\n\n def on_item_scraped(self, item, response):\n \"\"\" Increments our count of items for reporting/future metrics. \"\"\"\n self.item_count += 1\n\n def on_item_error(self, item, response, failure):\n \"\"\"\n Records Scrapy item_error signals; these fire automatically if\n exceptions occur while saving items in the pipeline.\n \"\"\"\n self.scraper_errors.append(\n ('item_error', item, response, failure)\n )\n\n def on_manifest_storage_error(self, exception):\n \"\"\"\n Records our feed storage's error signal, as would occur when\n exceptions occur while saving the manifest to S3.\n \"\"\"\n self.scraper_errors.append(\n ('manifest_storage_error', exception)\n )\n\n @report_exception\n def execute(self, context):\n # Initialise settings for a limited scraping\n os.environ.setdefault(\n 'SCRAPY_SETTINGS_MODULE',\n 'reach.scraper.wsf_scraping.settings'\n )\n\n if not self.dst_s3_dir.startswith('s3://'):\n raise ValueError('Invalid S3 url: %s' % self.dst_s3_dir)\n\n # This monkey-patching only works because Airflow shells out to\n # a new Python interpreter for every task it runs. It thus *must*\n # remain inside execute(), so other code paths don't touch it.\n reach.scraper.wsf_scraping.settings.MAX_ARTICLE = self.item_max\n reach.scraper.wsf_scraping.settings.WHO_IRIS_YEARS = \\\n self.item_years\n\n reach.scraper.wsf_scraping.settings.FEED_URI = \\\n 'manifest' + self.dst_s3_dir\n\n settings = get_project_settings()\n self.log.info(\n \"scrapy settings: %s\",\n json.dumps(\n {k: v for k, v in settings.items()\n if isinstance(v, (str, int, float, bool))}\n )\n )\n\n process = CrawlerProcess(settings, install_root_handler=False)\n spider = SPIDERS[self.organisation]\n crawler = process.create_crawler(spider)\n\n self.item_count = None\n self.scraper_errors = []\n crawler.signals.connect(\n self.on_item_error,\n signal=scrapy.signals.item_error)\n crawler.signals.connect(\n self.on_manifest_storage_error,\n signal=feed_storage.manifest_storage_error)\n\n process.crawl(crawler) # starts reactor\n process.start() # waits for reactor to finish\n\n if self.scraper_errors:\n scraper_errors = self.scraper_errors # put into local for sentry\n self.log.error(\n 'SpiderOperator: scrapy signaled %d errors:',\n len(scraper_errors)\n )\n for tup in self.scraper_errors:\n self.log.error('DummySpiderOperator: %r', tup)\n raise Exception(\n \"%d errors occurred during scrape\" %\n len(scraper_errors)\n )\n","sub_path":"reach/airflow/tasks/spider_operator.py","file_name":"spider_operator.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"16612176","text":"#!/usr/bin/python\n\n#Author: Duncan Campbell\n#Written: January 21, 2015\n#Yale University\n#Description: make a combined NYU-MPA dr7 catalogue using the MPA as the base\n\n###packages###\nfrom __future__ import print_function\nimport numpy as np\nimport h5py\nimport custom_utilities as cu\nimport matplotlib.pyplot as plt\nimport sys\n\ndef main():\n \n ###make sure to change these when running in a new enviorment!###\n #location of data directories\n filepath_2 = cu.get_output_path() + 'processed_data/mpa_dr7/'\n filepath_1 = cu.get_output_path() + 'processed_data/NYU_VAGC/'\n #################################################################\n\n catalogue_2 = 'mpa_dr7_unique'\n catalogue_1 = 'nyu_vagc_dr7'\n \n f_1 = h5py.File(filepath_1+catalogue_1+'.hdf5', 'r')\n dset_1 = f_1.get(catalogue_1)\n dset_1 = np.array(dset_1)\n print(dset_1.dtype.names)\n\n f_2 = h5py.File(filepath_2+catalogue_2+'.hdf5', 'r')\n dset_2 = f_2.get(catalogue_2)\n dset_2 = np.array(dset_2)\n print(dset_2.dtype.names)\n \n #open matching files. these are created with the 'match_to_NYU_VAGC.py' script. \n filename = 'matches_into_mpa_vagc.npy'\n match_1 = np.load(filepath_1+'matches/'+filename)\n filename = 'mpa_vagc_matched_to_nyu.npy'\n match_2 = np.load(filepath_1+'matches/'+filename)\n \n \n N_col_1 = len(dset_1.dtype.descr)\n N_col_2 = len(dset_2.dtype.descr[25:]) #number of columns used from NYU VAGC\n \n names_1 = dset_1.dtype.names\n names_2 = dset_2.dtype.names[25:]\n \n dtype = np.dtype(dset_1.dtype.descr + dset_2.dtype.descr[25:])\n \n combined_data = np.recarray((len(dset_1),),dtype=dtype)\n combined_data.fill(-99)\n \n for name in names_1:\n print(name)\n combined_data[name] = dset_1[name]\n \n for name in names_2:\n print(name)\n combined_data[name][match_1] = dset_2[name][match_2]\n \n filename = 'nyu_mpa_vagc_dr7'\n savepath = filepath_1\n f = h5py.File(savepath+filename+'.hdf5', 'w')\n dset = f.create_dataset(filename, data=combined_data)\n\n\nif __name__ == '__main__':\n main()","sub_path":"combine_w_mpa_vagc.py","file_name":"combine_w_mpa_vagc.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96718307","text":"import logging\n\nfrom functools import partial\n\nfrom mozilla_version.gecko import FennecVersion\n\nfrom mozapkpublisher.common.apk.history import get_expected_combos, craft_combos_pretty_names\nfrom mozapkpublisher.common.exceptions import BadApk, BadSetOfApks, NotMultiLocaleApk\nfrom mozapkpublisher.common.utils import filter_out_identical_values\n\nlogger = logging.getLogger(__name__)\n\n\n# x86* must have the highest version code. See bug 1338477 for more context.\n_ARCHITECTURE_ORDER_REGARDING_VERSION_CODE = ('armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64')\n\n\nclass ExpectedPackageNamesCheck:\n def __init__(self, expected_product_types):\n self.expected_product_types = expected_product_types\n\n def validate(self, apks_metadata_per_paths):\n types = set([metadata['package_name'] for metadata in apks_metadata_per_paths.values()])\n\n if not types == set(self.expected_product_types):\n raise BadSetOfApks('Expected product types {}, found {}'.format(self.expected_product_types, types))\n logger.info('Expected product types {} found'.format(self.expected_product_types))\n\n\nclass AnyPackageNamesCheck:\n def validate(self, _):\n pass\n\n\ndef cross_check_apks(apks_metadata_per_paths, package_names_check, skip_checks_fennec, skip_check_multiple_locales,\n skip_check_same_locales, skip_check_ordered_version_codes):\n logger.info(\"Checking APKs' metadata and content...\")\n package_names_check.validate(apks_metadata_per_paths)\n\n if not skip_checks_fennec:\n singular_apk_metadata = list(apks_metadata_per_paths.values())[0]\n _check_version_matches_package_name(\n singular_apk_metadata['firefox_version'], singular_apk_metadata['package_name']\n )\n\n _check_all_apks_have_the_same_firefox_version(apks_metadata_per_paths)\n _check_all_apks_have_the_same_build_id(apks_metadata_per_paths)\n _check_all_architectures_and_api_levels_are_present(apks_metadata_per_paths)\n\n if not skip_check_multiple_locales:\n _check_all_apks_are_multi_locales(apks_metadata_per_paths)\n\n if not skip_check_same_locales:\n _check_all_apks_have_the_same_locales(apks_metadata_per_paths)\n\n if not skip_check_ordered_version_codes:\n _check_apks_version_codes_are_correctly_ordered(apks_metadata_per_paths)\n\n logger.info('APKs are sane!')\n\n\ndef _check_piece_of_metadata_is_unique(key, pretty_key, apks_metadata_per_paths):\n all_items = [metadata[key] for metadata in apks_metadata_per_paths.values()]\n unique_items = filter_out_identical_values(all_items)\n\n if not unique_items:\n raise BadSetOfApks('No {} found'.format(key))\n if len(unique_items) > 1:\n raise BadSetOfApks(\"APKs don't have the same {}. Found: {}\".format(pretty_key, unique_items))\n\n logger.info('All APKs have the same {}: {}'.format(pretty_key, unique_items[0]))\n\n\n_check_all_apks_have_the_same_firefox_version = partial(_check_piece_of_metadata_is_unique, 'firefox_version', 'Firefox version')\n_check_all_apks_have_the_same_build_id = partial(_check_piece_of_metadata_is_unique, 'firefox_build_id', 'Firefox BuildID')\n_check_all_apks_have_the_same_locales = partial(_check_piece_of_metadata_is_unique, 'locales', 'locales')\n\n\ndef _check_version_matches_package_name(version, package_name):\n sanitized_version = FennecVersion.parse(version)\n\n if (\n (package_name == 'org.mozilla.firefox' and sanitized_version.is_release) or\n # Due to project Dawn, Nightly is now using the Aurora package name. See bug 1357351.\n (package_name == 'org.mozilla.fennec_aurora' and sanitized_version.is_nightly) or\n (\n # XXX Betas aren't following the regular XX.0bY format. Instead they follow XX.0\n # (which looks like release). Therefore, we can't use sanitized_version.is_beta\n package_name == 'org.mozilla.firefox_beta'\n and sanitized_version.is_release\n and sanitized_version.minor_number == 0\n # We ensure the patch_number is undefined. Calling sanitized_version.patch_number\n # directly raises an (expected) AttributeError\n and getattr(sanitized_version, 'patch_number', None) is None\n )\n ):\n logger.info('Firefox version \"{}\" matches package name \"{}\"'.format(version, package_name))\n\n else:\n raise BadApk('Wrong version number \"{}\" for package name \"{}\"'.format(version, package_name))\n\n\ndef _check_apks_version_codes_are_correctly_ordered(apks_metadata_per_paths):\n architectures_per_version_code = {\n metadata['version_code']: metadata['architecture']\n for metadata in apks_metadata_per_paths.values()\n }\n\n if len(architectures_per_version_code) != len(apks_metadata_per_paths):\n raise BadSetOfApks('Some APKs are sharing the same version code! APKs metadata: {}'.format(\n apks_metadata_per_paths\n ))\n\n sorted_architectures_per_version_code = tuple([\n architectures_per_version_code[version_code]\n for version_code in sorted(architectures_per_version_code.keys())\n ])\n\n previous_index = -1\n for architecture in sorted_architectures_per_version_code:\n index = _ARCHITECTURE_ORDER_REGARDING_VERSION_CODE.index(architecture)\n if index <= previous_index:\n raise BadSetOfApks(\n 'APKs version codes are not correctly ordered. Expected order: {}. Order found: {}. APKs metadata: {}'.format(\n _ARCHITECTURE_ORDER_REGARDING_VERSION_CODE, sorted_architectures_per_version_code, apks_metadata_per_paths\n )\n )\n previous_index = index\n\n logger.info('APKs version codes are correctly ordered: {}'.format(architectures_per_version_code))\n\n\ndef _check_all_apks_are_multi_locales(apks_metadata_per_paths):\n for path, metadata in apks_metadata_per_paths.items():\n locales = metadata['locales']\n\n if not isinstance(locales, tuple):\n raise BadApk('Locale list is not either a tuple. \"{}\" has: {}'.format(path, locales))\n\n number_of_locales = len(locales)\n\n if number_of_locales <= 1:\n raise NotMultiLocaleApk(path, locales)\n\n logger.info('\"{}\" is multilocale.'.format(path))\n\n\ndef _check_all_architectures_and_api_levels_are_present(apks_metadata_per_paths):\n single_metadata = list(apks_metadata_per_paths.values())[0]\n firefox_version = single_metadata['firefox_version']\n\n expected_combos = get_expected_combos(firefox_version)\n\n current_combos = set([\n (metadata['architecture'], metadata['api_level'])\n for metadata in apks_metadata_per_paths.values()\n ])\n\n missing_combos = expected_combos - current_combos\n if missing_combos:\n raise BadSetOfApks('One or several APKs are missing for Firefox {}: {}'.format(\n firefox_version, craft_combos_pretty_names(missing_combos)\n ))\n\n extra_combos = current_combos - expected_combos\n if extra_combos:\n raise BadSetOfApks('One or several APKs are not allowed for Firefox {}: {}. \\\nPlease make sure mozapkpublisher has allowed them to be uploaded.'.format(\n firefox_version, craft_combos_pretty_names(extra_combos)\n ))\n\n logger.info('Every expected APK was found!')\n","sub_path":"mozapkpublisher/common/apk/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"338673481","text":"import speech_recognition as sr\nimport pyaudio\nimport os\n\nr = sr.Recognizer()\n\nprint(\"Speck Now\")\ndef speek():\n with sr.Microphone() as source:\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print(text)\n\n except sr.UnknownValueError:\n print(\"sorry, Unknown Value Error\")\n return\n except sr.RequestError:\n print(\"sorry, Request Error\")\n return\n\nspeek()","sub_path":"speach_to_text.py","file_name":"speach_to_text.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522090686","text":"# -*- coding: UTF-8 -*-\nimport datetime\nimport logging as lg\n\n\nclass InputDataUser:\n mois = {\n 1: \"Janvier\", 2: \"Février\", 3: \"Mars\", 4: \"Avril\", 5: \"Mai\", 6: \"Juin\", 7: \"Juillet\", 8: \"Août\",\n 9: \"Septembre\", 10: \"Octobre\", 11: \"Novembre\", 12: \"Décembre\"}\n jourToday: int\n\n def __init__(self, depenses=\"0.00\", description=\"\", revenumois=\"0.00\", revenueuro=\"0.00\", categories=\"\"):\n self.moisToday = datetime.date.today().month\n self.jourToday = datetime.date.today().day\n self.anneeToday = datetime.date.today().year\n # Prénom et Nom\n self.user = {\"nom\": None, \"prenom\": None}\n self.depenseEuro = depenses\n self.depenseDescription = description\n self.depenseMois = self.mois[self.moisToday]\n self.revenuMois = revenumois\n self.revenuEuro = revenueuro\n self.categories = categories\n self.revenuesMoisUsers = ()\n self.depensesMoisUsers = ()\n self.dataForDB = {\n 'Users': \"\",\n 'Dates': (str(self.moisToday) + \"/\" + str(self.jourToday) + \"/\" + str(self.anneeToday)),\n 'Mois': \"\", 'Descriptions': \"\", 'Depenses': \"\", 'Revenus': \"\", 'Categories': \"\"\n }\n\n # Users\n @property\n def get_users(self):\n if (self.user[\"nom\"] is None) or (self.user[\"prenom\"] is None):\n lg.warning(\"il n'y a pas nom et prénom\")\n else:\n print(self.user[\"nom\"], self.user[\"prenom\"])\n\n def set_users(self, nom, prenom):\n self.user[\"nom\"] = nom\n self.user[\"prenom\"] = prenom\n\n # Catégorie\n def get_categories(self):\n return self.categories\n\n def set_categories(self, categorie):\n self.categories = categorie\n\n # Dépenses\n def get_depenses(self):\n return self.depenseMois, (self.depenseDescription, self.depenseEuro)\n\n def set_depenses(self, description, depenses):\n self.depenseDescription = description\n self.depenseEuro = depenses\n return self.depenseMois, (self.depenseDescription, self.depenseEuro)\n\n # Revenues\n def get_renenues(self):\n return self.revenuEuro, self.revenuMois\n\n def set_revenues(self, revenueuro):\n self.revenuEuro = revenueuro\n\n def get_revenues_users_mois(self):\n return self.revenuesMoisUsers\n\n def set_revenues_users_mois(self, mois, revenue):\n self.revenuesMoisUsers = (mois, revenue)\n\n def get_depenses_users_mois(self):\n return self.user, self.get_categories(), self.get_depenses()\n\n def set_depenses_users_mois(self, user, categorie, mois, description, depense):\n self.depensesMoisUsers = (user, categorie, mois, description, depense)\n\n @property\n def add_data_for_db(self):\n\n self.dataForDB['Users'] = self.user\n self.dataForDB['Descriptions'] = self.depenseDescription\n self.dataForDB['Depenses'] = self.depenseEuro\n self.dataForDB['Mois'] = self.depenseMois\n self.dataForDB['Revenus'] = self.revenuEuro\n self.dataForDB['Categories'] = self.categories\n\n return self.dataForDB\n\n\ndef main():\n teste = InputDataUser()\n teste.set_users(\"Nom Teste\", \"Prénom Teste\")\n teste.set_categories(\"Catégorie Teste\")\n teste.set_depenses(\"Description Teste\", \"22.50\")\n teste.get_users\n print(teste.get_depenses())\n print(teste.get_depenses_users_mois())\n\n print(\"\\n\", teste.add_data_for_db)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Package/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649099607","text":"#!/usr/bin/python3\n\nimport helper_functions as hf\nimport RPi.GPIO as GPIO\nimport can\nimport time\nimport os\nimport queue\nfrom threading import Thread\nfrom datetime import datetime\nfrom gps3 import gps3\n\n# initial Raspi setup for CAN Shield interfacing\nled = 22\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(led,GPIO.OUT)\nGPIO.output(led,True)\n\n# configure Gps\ngpsd_socket = gps3.GPSDSocket()\ndata_stream = gps3.DataStream()\ngpsd_socket.connect()\ngpsd_socket.watch()\n\n# Bring up can0 interface at 500kbps\nos.system(\"sudo /sbin/ip link set can0 up type can bitrate 500000\")\ntime.sleep(0.1)\n\ntry:\n bus = can.interface.Bus(channel='can0', bustype='socketcan_native')\nexcept OSError:\n print('Cannot find PiCAN board.')\n GPIO.output(led,False)\n exit()\n\ntemperature = 0\nrpm = 0\nspeed = 0\nthrottle = 0\ndistance = 0\ndistance_total = 0\nrpi_speed_epoch_time = 0\ntime2 = 0\ntime1 = 0\nvspeed2 = 0\nvspeed1 = 0\ncurr_lat = 0\nprev_lat = 0\ncurr_lon = 0\nprev_lon = 0\nfirst_time12 = True\nlogged_data = ''\nlogged_data_can_msg = ''\n\ncount = 0\nsp_count = 0\ntime_spent_at_stop = 0.0\ntime_start_at_stop = 0.0\n#file_count = 0\noutfile = 0\noutfile_name = 0\n############################################################\nfile_open = False\n############################################################\nfile_name = ''\n\nDEPOT_BEGIN = True\nSTARTED_FROM_DEPOT = False # what is the purpose of this flag?\nSTARTED_FROM_ROUTE = True\nRETURN_TO_DEPOT = False\nCIRCULATOR = True\nFIRST_TIME_START = True\nNEW_DATA_START_LOC = False\nNEW_DATA_STOP_LOC = False\n\n# Main control starts\ntry:\n while True:\n GPIO.output(led,True)\n # Send Engine RPM request\n msg = can.Message(arbitration_id=hf.PID_REQUEST,data=[0x02,0x01,hf.ENGINE_RPM,0x00,0x00,0x00,0x00,0x00],extended_id=False)\n bus.send(msg)\n rpm_timeStamptx = datetime.now().strftime('%H:%M:%S.%f')\n #print('sent RPM mesg')\n #time.sleep(0.01)\n # waiting for RPM\n #print('waiting for rpm')\n rpm_not_rx = True\n while rpm_not_rx:\n #print('waiting for rpm')\n message = bus.recv()\n logged_data_can_msg = str(message) + '\\n'\n #print(str(message))\n if message.arbitration_id == hf.PID_REPLY and message.data[2] == hf.ENGINE_RPM:\n rpm_timeStamp = datetime.now().strftime('%H:%M:%S.%f')\n rpm = round(((message.data[3]*256) + message.data[4])/4)\n rpm_not_rx = False\n #print('rpm recieved')\n\n # Send Vehicle speed request\n #time.sleep(0.01)\n msg = can.Message(arbitration_id=hf.PID_REQUEST,data=[0x02,0x01,hf.VEHICLE_SPEED,0x00,0x00,0x00,0x00,0x00],extended_id=False)\n bus.send(msg)\n speed_timeStamptx = datetime.now().strftime('%H:%M:%S.%f')\n #print('sent Speed mesg')\n #time.sleep(0.01)\n # waiting for Speed\n #print('waiting for speed')\n speed_not_rx = True\n while speed_not_rx:\n #print('waiting for speed')\n message = bus.recv()\n logged_data_can_msg += str(message) + '\\n'\n #print(str(message))\n if message.arbitration_id == hf.PID_REPLY and message.data[2] == hf.VEHICLE_SPEED:\n rpi_time = time.time()\n speed_timeStamp = datetime.now().strftime('%H:%M:%S.%f')\n speed = message.data[3]\n vspeed2 = speed\n time2 = message.timestamp\n speed_not_rx = False\n #print('speed recieved')\n\n # Send Throttle position request\n #time.sleep(0.01)\n msg = can.Message(arbitration_id=hf.PID_REQUEST,data=[0x02,0x01,hf.THROTTLE,0x00,0x00,0x00,0x00,0x00],extended_id=False)\n bus.send(msg)\n throttle_timeStamptx = datetime.now().strftime('%H:%M:%S.%f')\n #print('sent throttle mesg')\n #time.sleep(0.01)\n # waiting for throttle\n #print('waiting for throttle')\n throttle_not_rx = True\n while throttle_not_rx:\n #print('waiting for throttle')\n message = bus.recv()\n logged_data_can_msg += str(message) + '\\n'\n #print(str(message))\n if message.arbitration_id == hf.PID_REPLY and message.data[2] == hf.THROTTLE:\n throttle_timeStamp = datetime.now().strftime('%H:%M:%S.%f')\n throttle = round((message.data[3]*100)/255)\n throttle_not_rx = False\n time.sleep(0.01)\n #print('throttle recieved')\n\n # End transmission\n GPIO.output(led,False)\n\n #time.sleep(0.2)\n\n logged_data = rpm_timeStamptx + ', ' + rpm_timeStamp + ', {0:d}, '.format(rpm) + speed_timeStamptx + ', ' + speed_timeStamp + ', {0:f}, '.format(time2) + '{0:f}, '.format(rpi_time) + '{0:d}, '.format(speed) + throttle_timeStamptx + ', ' + throttle_timeStamp + ', {0:d}, '.format(throttle)\n\n # calculate distance\n if first_time12:\n time1 = time2\n vspeed1 = vspeed2\n first_time12 = False\n # convert speed from km/h to m/s\n vspeed1 = vspeed1 * 5 / 18\n vspeed2 = vspeed2 * 5 / 18\n distance += (vspeed2 + vspeed1)*(time2 - time1)/2\n distance_total += (vspeed2 + vspeed1)*(time2 - time1)/2\n vspeed1 = vspeed2\n time1 = time2\n\n #print('exited CAN Rx LOOP')\n\n # read GPS data\n for new_data in gpsd_socket:\n #print('received GPS')\n if new_data:\n #print('recieved valid GPS data')\n data_stream.unpack(new_data)\n curr_lat = data_stream.TPV['lat']\n if curr_lat == 'n/a':\n curr_lat = 0;\n curr_lon = data_stream.TPV['lon']\n if curr_lon == 'n/a':\n curr_lon = 0;\n curr_lat_temp = curr_lat\n curr_lon_temp = curr_lon\n if prev_lat == curr_lat and prev_lon == curr_lon:\n curr_lat_temp = 0\n curr_lon_temp = 0\n else:\n prev_lat = curr_lat\n prev_lon = curr_lon\n logged_data += str(curr_lat_temp) + ', ' + str(curr_lon_temp) + ', ' + str(data_stream.TPV['time'] + ', ') + datetime.now().strftime('%H:%M:%S.%f')\n break\n else:\n continue\n\n logged_data += ', {0:d}, {1:f}, {2:f}'.format(count,distance_total,distance)\n\n if not file_open:\n file_name = 'Documents/logs/log_DOJ_stopnwait2_' + str(datetime.now()) + '.csv'\n file_name_can_msg = 'Documents/logs/log_DOJ_stopnwait2_can_msg_' + str(datetime.now()) + '.csv'\n outfile = open(file_name,'w+')\n outfile_can_msg = open(file_name_can_msg, 'w+')\n print('Logging data timestamps...')\n file_open = True\n if file_open:\n print(logged_data,file = outfile)\n print(logged_data_can_msg,file = outfile_can_msg)\n\n count += 1\n #print(logged_data)\n\nexcept KeyboardInterrupt:\n #Catch keyboard interrupt\n GPIO.output(led,False)\n #if file_open:\n #outfile.close()\n # close CAN interface\n os.system(\"sudo /sbin/ip link set can0 down\")\n print('\\n\\rKeyboard interrtupt')\n","sub_path":"Documents/smart_car_old_scripts/python_test_scripts/main_control_py3_test_stopnwait2.py","file_name":"main_control_py3_test_stopnwait2.py","file_ext":"py","file_size_in_byte":7297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"405911443","text":"import urllib, urllib2, json\nfrom pygeocoder import Geocoder\n\n\ndef decode_address_to_coordinates(address):\n params = {\n 'address': address,\n 'sensor': 'false',\n }\n url = 'http://maps.google.com/maps/api/geocode/json?' + urllib.urlencode(params)\n response = urllib2.urlopen(url)\n result = json.load(response)\n try:\n return result['results'][0]['geometry']['location']\n except:\n return None\n\n\ndef complete_result(address):\n params = {\n 'address': address,\n 'sensor': 'false',\n }\n url = 'http://maps.google.com/maps/api/geocode/json?' + urllib.urlencode(params)\n response = urllib2.urlopen(url)\n result = json.load(response)\n try:\n return result['results'][0]\n except:\n return None\n\n\ndef get_zipcode(address):\n try:\n result = Geocoder.geocode(address)\n isValid = result.valid_address\n print(isValid, 'if valid or not')\n if isValid:\n print(address, ' is valid address.')\n else:\n print(address, ' is invalid address')\n\n print('postal code is ', result[0].postal_code)\n return result[0].postal_code\n except:\n return None\n\n\ndef get_address_using_coordinates(latitude, longitude):\n try:\n result = Geocoder.reverse_geocode(latitude, longitude)\n return result\n except:\n return None\n\n# print decode_address_to_coordinates(\"Toronto\")\n# print decode_address_to_coordinates(\"Birmingham, AL\")\n#print decode_result('Toronto')\n#print decode_result('Birmingham, AL')\n#print get_zipcode(\"451-499 24th Street North, Birmingham, AL 35203, USA\")\n#print get_zipcode(\"Birmingham, AL\")\n#address_la = decode_address_to_coordinates(\"Birmingham, AL\")\n#print address_la\n\n#result = get_address_using_coordinates(address_la['lat'],address_la['lng'])\n#print result[0]\n#print result[1]\n#print result[2]\n#print result[3]\n#print result[4]\n#print result[5]\n#print result[6]\n#print result[7]\n#print result.country__short_name\n#print result.postal_code\n#print result.street_number\n#print result.route\n#print result.administrative_area_level_1\n#print result.city\n#print result.state\n#print result.state__short_name\n#print result.country\n#print result.formatted_address\n#print result.count\n#print result.country__short_name\n#print get_address_using_coordinates(address_co[0]['])\n","sub_path":"processWords/python_scripts/address_coordinates.py","file_name":"address_coordinates.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128476428","text":"import io\nimport os\nfrom distutils.file_util import copy_file\nfrom setuptools import setup, find_packages\n\n\nbase_gh_url = 'https://github.com/sowemail/'\ndir_path = os.path.abspath(os.path.dirname(__file__))\nreadme_path = os.path.join(dir_path, 'README.rst')\nversion_path = os.path.join(dir_path, 'VERSION.txt')\n\nwith io.open(readme_path, encoding='utf-8') as readme_file:\n readme = readme_file.read()\nwith io.open(version_path, encoding='utf-8') as version_file:\n version = version_file.read().strip()\n\ncopy_file(version_path,\n os.path.join(dir_path, 'sowemail', 'VERSION.txt'),\n verbose=False)\n\n\ndef parse_requirements(filename):\n \"\"\"load requirements from a pip requirements file\"\"\"\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and\n (not line.startswith(\"#\") and not line.startswith('-'))]\n\n\nsetup(\n name='sowemail',\n version=version,\n author='Fourat ZOUARI',\n author_email='opensource@sowemail.com',\n url='{}sowemail-python'.format(base_gh_url),\n download_url='{}sowemail-python/tarball/{}'.format(base_gh_url, version),\n packages=['sowemail'],\n include_package_data=True,\n install_requires=parse_requirements('install-requirements.pip'),\n tests_require=parse_requirements('test-requirements.pip'),\n license='MIT',\n description='SoWeMail library for Python 3',\n long_description_content_type='text/x-rst',\n long_description=readme,\n keywords=[\n 'API',\n 'SOWEMAIL'],\n python_requires='>=3.6.*',\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603741428","text":"from django.conf.urls import patterns, url\nfrom web import views\n\nurlpatterns = patterns(\n '',\n url(r'groups/home/', views.groups_home),\n url(r'groups/ping/', views.groups_ping),\n url(r'groups/events/', views.groups_events),\n url(r'login/', views.homepage),\n url(r'^$', views.landingpage),\n)\n","sub_path":"partyup/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465701161","text":"import axelrod as axl\nimport typing\nimport sys, inspect\nimport importlib\nimport logging\nimport matplotlib.pyplot as plt\n\ndef do_axelrod_tournament(players: list, output_file_name: str,\n noise: float = 0, noise_bias: bool = False,\n prob_end: float = 0, seed: int = 0) -> None:\n \"\"\"\n Do an Axelrod tournament with the given arguments and create the boxplot, payoff matrix, and winplot of the tournament\n\n Arguments:\n players: list of Player playing in the Axelrod tournament. Necessary argument\n output_file_name: the output name for the boxplot, payoff matrix, and the winplot for the current Axelrod tournament\n noise: percentage of noise (mistake, flipping a decision) happening in each player's decision\n noise_bias: if True, only allows mistake that changes cooperation to defection\n prob_end: The probability of ending a match between a pair of players after each game\n seed: random seed for the current tournament (for reproducibility)\n \"\"\"\n axl.seed(seed)\n tournament = axl.Tournament(players, noise=noise, noise_bias=noise_bias, prob_end=prob_end) # Create a tournament\n results = tournament.play() # Play the tournament\n plot = axl.Plot(results)\n plot.boxplot()\n plt.savefig('boxplot_' + output_file_name, format='png')\n plot.winplot()\n plt.savefig('winplot_' + output_file_name, format='png')\n plot.payoff()\n plt.savefig('payoff_' + output_file_name, format='png')\n plt.close('all')\n\ndef do_case_tournament(players: list, output_file_name: str,\n turns: int = axl.DEFAULT_TURNS, prob_end: float = None,\n maximum_round: int = 20, noise: float = 0,\n noise_bias: bool = False, replace_amount: int = 1,\n game: axl.Game = None, seed: int = 0) -> None:\n \"\"\"\n Do a Case simulation with the given arguments and create the population graph of the simulation\n\n Arguments:\n players: list of Player playing in the Case simulation\n output_file_name: the output name for the population graph for the current simulation\n turns: the maximum number of turns for each Match between a pair of players\n prob_end: The probability of ending a match between a pair of players after each game\n noise: percentage of noise (mistake, flipping a decision) happening in each player's decision\n noise_bias: if True, only allows mistake that changes cooperation to defection\n replace_amount: the amount of players replaced after each simulation round\n game: iterated prisoner's dilemma game parameter for the simulation\n seed: random seed for the current tournament (for reproducibility)\n \"\"\"\n axl.seed(seed)\n tournament = axl.CaseProcess(players, turns=turns, maximum_round=maximum_round,\n replace_amount=replace_amount, noise=noise, noise_bias=noise_bias, game=game,\n prob_end=prob_end)\n tournament.play()\n tournament.populations_plot()\n plt.savefig(output_file_name, format='png', bbox_inches='tight')\n plt.close()\n\n# The list of agents playing in the Axelrod's first tournament, except Graaskamp\naxl_first_players = [\n axl.TitForTat(),\n axl.TidemanAndChieruzzi(),\n axl.Nydegger(),\n axl.Grofman(),\n axl.Shubik(),\n axl.SteinAndRapoport(),\n axl.Grudger(),\n axl.Davis(),\n axl.RevisedDowning(),\n axl.Feld(),\n axl.Joss(),\n axl.Tullock(),\n axl.UnnamedStrategy(),\n axl.Random()\n]\n\n# The list of agents playing in the Axelrod's second tournament. Incomplete\naxl_second_players = [\n axl.Black(),\n axl.Borufsen(),\n axl.Cave(),\n axl.Champion(),\n axl.Colbert(),\n axl.Eatherley(),\n axl.Getzler(),\n axl.Gladstein(),\n axl.GoByMajority(),\n axl.GraaskampKatzen(),\n axl.Harrington(),\n axl.Kluepfel(),\n axl.Leyvraz(),\n axl.Mikkelson(),\n axl.MoreGrofman(),\n axl.MoreTidemanAndChieruzzi(),\n axl.RichardHufford(),\n axl.Tester(),\n axl.Tranquilizer(),\n axl.Weiner(),\n axl.White(),\n axl.WmAdams(),\n axl.Yamachi()\n]\n\n# The list of agents playing in the Stewart-Plotkin's tournament.\nsteward_plotkin_players = [\n axl.ZDExtort2(),\n axl.HardGoByMajority(),\n axl.HardTitForTat(),\n axl.HardTitFor2Tats(),\n axl.GTFT(),\n axl.ZDGTFT2(),\n axl.Calculator(),\n axl.Prober(),\n axl.Prober2(),\n axl.Prober3(),\n axl.HardProber(),\n axl.NaiveProber()\n]\n\n# The list of agents playing in the Case's simulation\ncase_players = [\n axl.Cooperator(),\n axl.Defector(),\n axl.TitForTat(),\n axl.Grudger(),\n axl.Detective(),\n axl.TitFor2Tats(),\n axl.WinStayLoseShift(),\n axl.Random()\n]\n\n# The list of agents who won or survived the most based on the previous tournaments and simulations\nbest_players = [\n axl.SteinAndRapoport(),\n axl.Grudger(),\n axl.RevisedDowning(),\n axl.TitForTat(),\n axl.Davis(),\n axl.Nydegger(),\n axl.Cave(),\n axl.Tranquilizer(),\n axl.White(),\n axl.Eatherley(),\n axl.Champion(),\n axl.Harrington(),\n axl.Defector(),\n axl.Random(),\n axl.WinStayLoseShift(),\n axl.GTFT(),\n axl.ZDGTFT2(),\n axl.TitFor2Tats(),\n axl.NaiveProber(),\n axl.HardProber()\n]\n\n# Seed list. Contains twenty different seeds\nseeds = [1, 771923, 1143728, 291358764, 901236547, 4750670, 511161, 603276, 83281327, 34293471, 918273645, 135792468, 243165978, 9100021, 43238133, 0, 19192831, 5665363, 2231145, 123456]\n\nplayers = []\nplayer_name = ''\nfor i in range(5):\n if i == 0:\n players = axl_first_players\n player_name = 'axelrod_first'\n elif i == 1:\n players = axl_first_players + axl_second_players\n player_name = 'axelrod_second'\n elif i == 2:\n players = case_players\n player_name = 'case'\n elif i == 3:\n players = steward_plotkin_players + case_players\n player_name = 'steward_plotkin'\n else:\n players = best_players\n player_name = 'best'\n for seed in seeds:\n do_axelrod_tournament(players, 'tournament_axelrod_players_' + player_name + '_param_turns_200_prob_end_0.005_seed_' + str(seed) + '.png', seed=seed, prob_end=0.005)\n do_axelrod_tournament(players, 'tournament_axelrod_players_' + player_name + '_param_turns_200_prob_end_0.005_noise_0.05_seed_' + str(seed) + '.png', seed=seed, prob_end=0.005, noise=0.05)\n do_axelrod_tournament(players, 'tournament_axelrod_players_' + player_name + '_param_turns_200_prob_end_0.005_noise_0.05_biasnoise_seed_' + str(seed) + '.png', seed=seed, prob_end=0.005, noise=0.05, noise_bias=True)\n do_axelrod_tournament(players, 'tournament_axelrod_players_' + player_name + '_param_turns_200_prob_end_0.005_noise_0.2_seed_' + str(seed) + '.png', seed=seed, noise=0.2, prob_end=0.005)\n do_axelrod_tournament(players, 'tournament_axelrod_players_' + player_name + '_param_turns_200_prob_end_0.005_noise_0.2_biasnoise_seed_' + str(seed) + '.png', seed=seed, noise=0.2, prob_end=0.005, noise_bias=True)\n do_case_tournament(players, 'tournament_case_players_' + player_name + '_param_turns_200_seed_' + str(seed) + '.png', turns=200, maximum_round=50, seed=seed, prob_end=0.005)\n do_case_tournament(players, 'tournament_case_players_' + player_name + '_param_turns_200_round_50_noise_0.05_seed_' + str(seed) + '.png', turns=200, maximum_round=50, noise=0.05, seed=seed, prob_end=0.005)\n do_case_tournament(players, 'tournament_case_players_' + player_name + '_param_turns_200_round_50_noise_0.2_seed_' + str(seed) + '.png', turns=200, maximum_round=50, noise=0.2, seed=seed, prob_end=0.005)\n do_case_tournament(players, 'tournament_case_players_' + player_name + '_param_turns_200_round_50_noise_0.05_biasnoise_seed_' + str(seed) + '.png', turns=200, maximum_round=50, noise=0.05, noise_bias=True, seed=seed, prob_end=0.005)\n do_case_tournament(players, 'tournament_case_players_' + player_name + '_param_turns_200_round_50_noise_0.2_biasnoise_seed_' + str(seed) + '.png', turns=200, maximum_round=50, noise=0.2, noise_bias=True, seed=seed, prob_end=0.005)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"84552861","text":"from django.db import models\nfrom django.urls import reverse\nimport uuid\n\n# Create your models here.\n\n\nclass Genre(models.Model):\n '''\n model representing book genre\n '''\n name=models.CharField(max_length=100,help_text='Enter a book genre')\n\n def __str__(self):\n '''\n string representing model object\n '''\n return self.name\n\n\nclass Book(models.Model):\n '''\n model representing a book (not a specific copy)\n '''\n\n title=models.CharField(max_length=100)\n author=models.ForeignKey('Author',on_delete=models.SET_NULL,null=True) #assuming that each book will have only one author\n summary=models.TextField(max_length=1000,help_text='Enter a brief description of the book')\n isbn=models.CharField('ISBN',max_length=13,help_text='Enter ISBN number')\n\n genre=models.ManyToManyField(Genre,help_text='Select an appropriate genre for this book')\n\n\n def display_genre(self):\n '''\n Create a string for the genre. This is required to display genre in admin\n\n '''\n return ', '.join(genre.name for genre in self.genre.all()[:3])\n\n display_genre.short_description='Genre'\n\n\n\n def __str__(self):\n '''\n string representing model object\n '''\n return self.title\n\n def get_absolute_url(self):\n '''\n returns url to access a detail record for this book\n '''\n return reverse('book-detail',args=[str(self.id)])\n\n\nclass BookInstance(models.Model):\n '''\n model representing a specific copy of a book that can be borrowed from the library\n '''\n id=models.UUIDField(primary_key=True,default=uuid.uuid4, help_text='Unique ID for this particular book across the whole library')\n book=models.ForeignKey(Book,on_delete=models.SET_NULL,null=True)\n imprint=models.CharField(max_length=200)\n language=models.ForeignKey('Language',on_delete=models.SET_NULL,null=True)\n due_back=models.DateField(null=True,blank=True)\n\n\n LOAN_STATUS=(('m','Maintenance'),('o','On loan'),('a','Available'),('r','Reserved'),)\n\n status=models.CharField(max_length=1,choices=LOAN_STATUS,blank=True,default='m',help_text='Book availability',)\n\n class Meta:\n ordering=['due_back']\n\n def __str__(self):\n '''\n string representing model object\n '''\n return f'{self.id} ({self.book.title})'\n\n\n\nclass Author(models.Model):\n '''\n model representing an author\n '''\n\n first_name=models.CharField(max_length=100)\n last_name=models.CharField(max_length=100)\n date_of_birth=models.DateField(null=True,blank=True)\n date_of_death=models.DateField('Died',null=True,blank=True)\n\n class Meta:\n ordering=['last_name','first_name']\n\n def get_absolute_url(self):\n '''\n returns the url to access a particular author instance\n '''\n return reverse('author-detail',args=[str(self.id)])\n\n\n def __str__(self):\n '''\n string representing model object\n '''\n return f'{self.last_name}, {self.first_name}'\n\n\n\nclass Language(models.Model):\n '''\n model representing a language in which a book might be available\n '''\n name=models.CharField(max_length=100,help_text='Enter script language')\n\n def __str__(self):\n '''\n string representing model object\n '''\n return self.name\n","sub_path":"locallibrary/catalog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"431986040","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n# import libraries\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport keras\nfrom keras import layers\nfrom keras import models\nfrom keras.layers import Dropout\nfrom keras.optimizers import SGD\nfrom keras.regularizers import l1\nfrom keras import metrics\n\n\n# In[3]:\n\n\n# load 'feat.npy'\nfeat = np.load('feat.npy',allow_pickle=True)\n\n\n# In[4]:\n\n\n# load 'path.npy'\npath = np.load('path.npy', allow_pickle=True)\n\n\n# In[5]:\n\n\n# load 'train.csv'\ntrain = pd.read_csv('train.csv')\n\n\n# In[6]:\n\n\n# load 'test.csv'\ntest = pd.read_csv('test.csv')\n\n\n# In[7]:\n\n\n# create dictionary using path values as keys and feat values as values\ndictionary = {}\nfor key,val in zip(path,feat):\n dictionary[key] = val\n\n\n# In[8]:\n\n\n# match train set with dictionary's items\ny_big_train = []\nx_big_train = []\nfor i in train.values:\n if i[0] in dictionary.keys():\n y_big_train.append(i[1])\n x_big_train.append(dictionary[i[0]])\ny_big_train = np.asarray(y_big_train)\nx_big_train = np.asarray(x_big_train)\n\n\n# In[9]:\n\n\n# match test set with dictionary's items\nx_test = []\nfor i in test.values:\n if i[0] in dictionary.keys():\n x_test.append(dictionary[i[0]])\nx_test = np.asarray(x_test)\n\n\n# In[10]:\n\n\n# applying the StandardScaler from sklearn to preprocess the train - test data sets\nscaler = StandardScaler()\n\nfor i in range(94824):\n x_big_train[i] = scaler.fit_transform(x_big_train[i])\n \nfor i in range(11005):\n x_test[i] = scaler.fit_transform(x_test[i]) \n\n\n# In[11]:\n\n\n# checking the highest length of the first dimension of the train data\ncounter=0\nfor i in range(x_big_train.shape[0]):\n if x_big_train[i].shape[0] > counter:\n counter = x_big_train[i].shape[0]\n\n#Create a zero matrix for the input of train data\ntrain_data = np.zeros((94824,99,13))\n#Concatenating the values from the train data to the zero matrix\nfor i in range(94824):\n for j in range(x_big_train[i].shape[0]):\n for z in range(x_big_train[i].shape[1]):\n train_data[i][j][z] = train_data[i][j][z] + x_big_train[i][j][z]\n\n\n# In[12]:\n\n\n# checking the highest length of the first dimension of the test data\ncounter=0\nfor i in range(x_test.shape[0]):\n if x_test[i].shape[0] > counter:\n counter = x_test[i].shape[0] \n\n#Create a zero matrix for the input of spoken data\ntest_data = np.zeros((11005,99,13))\n#Concatenating the values from the spoken data to the zero matrix\nfor i in range(11005):\n for j in range(x_test[i].shape[0]):\n for z in range(x_test[i].shape[1]):\n test_data[i][j][z] = test_data[i][j][z] + x_test[i][j][z]\n\n\n# In[13]:\n\n\n# train - validation sets split\nx_train, x_val, y_train, y_val = train_test_split(train_data, y_big_train, test_size=0.2, random_state=666, shuffle=True)\n\n\n# In[14]:\n\n\n# transform the labels for the processing of the model\nlb = LabelEncoder()\nbig_train_labels = lb.fit_transform(y_big_train)\ntrain_labels = lb.fit_transform(y_train)\nval_labels = lb.fit_transform(y_val)\n\n\n# In[15]:\n\n\n# transform the above labels to binary in order to fit them in the network\nbig_train_labels = tf.keras.utils.to_categorical(big_train_labels, num_classes=None, dtype='float32')\ntrain_labels = tf.keras.utils.to_categorical(train_labels, num_classes=None, dtype='float32')\nval_labels = tf.keras.utils.to_categorical(val_labels, num_classes=None, dtype='float32')\n\n\n# In[16]:\n\n\n# apply the 1-D Convolutional Neural Network\nn_timesteps = x_train.shape[1]\nn_features = x_train.shape[2] \nn_outputs = 35\n\nmodel = models.Sequential()\nmodel.add(layers.Conv1D(filters=100, kernel_size=10, activation='relu', input_shape=(n_timesteps,n_features)))\nmodel.add(layers.MaxPooling1D())\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Conv1D(100, 10, activation='relu'))\nmodel.add(layers.MaxPooling1D())\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Conv1D(100, 10, activation='relu'))\nmodel.add(layers.MaxPooling1D())\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Flatten())\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dropout(0.25))\nmodel.add(layers.Dense(n_outputs, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\n\n# In[17]:\n\n\n# fit using train set\nmodel.fit(x_train, train_labels, epochs=20)\n\n\n# In[18]:\n\n\n# evaluate using validation set\naccuracy = model.evaluate(x_val, val_labels, batch_size=32)\nprint(model.metrics_names)\nprint(accuracy)\n\n\n# In[19]:\n\n\n# predict using test set\ntest = model.predict(test_data)\ntest = np.argmax(np.round(test),axis=1)\ntest = lb.inverse_transform(test)\n\n\n# In[20]:\n\n\n# add the words' column to test set - create csv file\nnew = pd.read_csv('test.csv', delimiter=',')\nnew['word'] = test\n\n\n# In[21]:\n\n\n# save the csv file\nnew = new.to_csv(r'C:\\Users\\Gebruiker\\Downloads\\ml_challenge\\result.csv', index=False)\n\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220010223","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/jgorset/Code/python/fandjango/fandjango/middleware.py\n# Compiled at: 2015-12-28 07:16:58\nfrom django.conf import settings\nfrom django.http import QueryDict, HttpResponseRedirect\nfrom datetime import timedelta\nfrom urlparse import parse_qs\nfrom django.core.exceptions import ImproperlyConfigured\nfrom fandjango.views import authorize_application, authorization_denied\nfrom fandjango.models import Facebook, User, OAuthToken\nfrom fandjango.settings import FACEBOOK_APPLICATION_SECRET_KEY, FACEBOOK_APPLICATION_ID, FANDJANGO_CACHE_SIGNED_REQUEST, DISABLED_PATHS, ENABLED_PATHS\nfrom fandjango.utils import is_disabled_path, is_enabled_path, get_full_path, authorization_denied_view, get_post_authorization_redirect_url\nfrom facepy import SignedRequest, GraphAPI\ntry:\n from django.utils.timezone import now\nexcept ImportError:\n from datetime import datetime\n\n def now():\n return datetime.now()\n\n\nfrom dateutil.tz import tzlocal\n\nclass BaseMiddleware:\n\n def is_valid_path(self, request):\n if ENABLED_PATHS and DISABLED_PATHS:\n raise ImproperlyConfigured('You may configure either FANDJANGO_ENABLED_PATHS or FANDJANGO_DISABLED_PATHS, but not both.')\n if DISABLED_PATHS and is_disabled_path(request.path):\n return False\n if ENABLED_PATHS and not is_enabled_path(request.path):\n return False\n return True\n\n def is_access_denied(self, request):\n return 'error' in request.GET and request.GET['error'] == 'access_denied'\n\n\nclass FacebookMiddleware(BaseMiddleware):\n \"\"\"Middleware for Facebook canvas applications.\"\"\"\n\n def process_request(self, request):\n \"\"\"Process the signed request.\"\"\"\n if hasattr(request, 'facebook') and request.facebook:\n return\n request.facebook = False\n if not self.is_valid_path(request):\n return\n if self.is_access_denied(request):\n return authorization_denied_view(request)\n if 'signed_request' not in request.REQUEST and 'signed_request' not in request.COOKIES:\n return\n if request.method == 'POST' and 'signed_request' in request.POST:\n request.POST = QueryDict('')\n request.method = 'GET'\n request.facebook = Facebook()\n try:\n request.facebook.signed_request = SignedRequest(signed_request=request.REQUEST.get('signed_request') or request.COOKIES.get('signed_request'), application_secret_key=FACEBOOK_APPLICATION_SECRET_KEY)\n except SignedRequest.Error:\n request.facebook = False\n\n if request.facebook and request.facebook.signed_request.user.has_authorized_application and not request.facebook.signed_request.user.oauth_token.has_expired:\n try:\n user = User.objects.get(facebook_id=request.facebook.signed_request.user.id)\n except User.DoesNotExist:\n oauth_token = OAuthToken.objects.create(token=request.facebook.signed_request.user.oauth_token.token, issued_at=request.facebook.signed_request.user.oauth_token.issued_at.replace(tzinfo=tzlocal()), expires_at=request.facebook.signed_request.user.oauth_token.expires_at.replace(tzinfo=tzlocal()))\n user = User.objects.create(facebook_id=request.facebook.signed_request.user.id, oauth_token=oauth_token)\n user.synchronize()\n else:\n user.last_seen_at = now()\n if 'signed_request' in request.REQUEST:\n user.authorized = True\n if request.facebook.signed_request.user.oauth_token:\n user.oauth_token.token = request.facebook.signed_request.user.oauth_token.token\n user.oauth_token.issued_at = request.facebook.signed_request.user.oauth_token.issued_at.replace(tzinfo=tzlocal())\n user.oauth_token.expires_at = request.facebook.signed_request.user.oauth_token.expires_at.replace(tzinfo=tzlocal())\n user.oauth_token.save()\n user.save()\n\n if not user.oauth_token.extended:\n try:\n user.oauth_token.extend()\n except:\n pass\n\n request.facebook.user = user\n\n def process_response(self, request, response):\n \"\"\"\n Set compact P3P policies and save signed request to cookie.\n\n P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most\n browsers it is considered by IE before accepting third-party cookies (ie. cookies set by\n documents in iframes). If they are not set correctly, IE will not set these cookies.\n \"\"\"\n response['P3P'] = 'CP=\"IDC CURa ADMa OUR IND PHY ONL COM STA\"'\n if FANDJANGO_CACHE_SIGNED_REQUEST:\n if hasattr(request, 'facebook') and request.facebook and request.facebook.signed_request:\n response.set_cookie('signed_request', request.facebook.signed_request.generate())\n else:\n response.delete_cookie('signed_request')\n return response\n\n\nclass FacebookWebMiddleware(BaseMiddleware):\n \"\"\"Middleware for Facebook auth on websites.\"\"\"\n\n def process_request(self, request):\n \"\"\"Process the web-based auth request.\"\"\"\n if hasattr(request, 'facebook') and request.facebook:\n return\n else:\n request.facebook = False\n if not self.is_valid_path(request):\n return\n if self.is_access_denied(request):\n return authorization_denied_view(request)\n request.facebook = Facebook()\n oauth_token = False\n if 'oauth_token' in request.COOKIES:\n try:\n oauth_token = OAuthToken.objects.get(token=request.COOKIES['oauth_token'])\n except OAuthToken.DoesNotExist:\n request.facebook = False\n return\n\n else:\n if 'code' in request.GET:\n try:\n graph = GraphAPI()\n response = graph.get('oauth/access_token', client_id=FACEBOOK_APPLICATION_ID, redirect_uri=get_post_authorization_redirect_url(request, canvas=False), client_secret=FACEBOOK_APPLICATION_SECRET_KEY, code=request.GET['code'])\n components = parse_qs(response)\n oauth_token, new_oauth_token = OAuthToken.objects.get_or_create(token=components['access_token'][0], issued_at=now(), expires_at=now() + timedelta(seconds=int(components['expires'][0])))\n except GraphAPI.OAuthError:\n pass\n\n if not oauth_token or oauth_token.expired:\n request.facebook = False\n return\n try:\n user = oauth_token.user\n if not user.authorized:\n request.facebook = False\n return\n user.last_seen_at = now()\n user.save()\n except User.DoesNotExist:\n graph = GraphAPI(oauth_token.token)\n profile = graph.get('me')\n try:\n user = User.objects.get(facebook_id=profile.get('id'))\n if not user.authorized:\n if new_oauth_token:\n user.last_seen_at = now()\n user.authorized = True\n else:\n request.facebook = False\n return\n except User.DoesNotExist:\n user = User.objects.create(facebook_id=profile.get('id'), oauth_token=oauth_token)\n\n user.synchronize(profile)\n old_oauth_token = None\n if user.oauth_token != oauth_token:\n old_oauth_token = user.oauth_token\n user.oauth_token = oauth_token\n user.save()\n if old_oauth_token:\n old_oauth_token.delete()\n\n if not user.oauth_token.extended:\n try:\n user.oauth_token.extend()\n except:\n pass\n\n request.facebook.user = user\n request.facebook.oauth_token = oauth_token\n return\n\n def process_response(self, request, response):\n \"\"\"\n Set compact P3P policies and save auth token to cookie.\n\n P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most\n browsers it is considered by IE before accepting third-party cookies (ie. cookies set by\n documents in iframes). If they are not set correctly, IE will not set these cookies.\n \"\"\"\n if hasattr(request, 'facebook') and request.facebook and request.facebook.oauth_token:\n if 'code' in request.REQUEST:\n path = get_full_path(request, remove_querystrings=['code', 'web_canvas'])\n response = HttpResponseRedirect(path)\n response.set_cookie('oauth_token', request.facebook.oauth_token.token)\n else:\n response.delete_cookie('oauth_token')\n response['P3P'] = 'CP=\"IDC CURa ADMa OUR IND PHY ONL COM STA\"'\n return response","sub_path":"pycfiles/fandjango-4.2.1.tar/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":9578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28576869","text":"from django.conf.urls import patterns, include, url\n\nfrom . import views\n\nurlpatterns = patterns('blog',\n url(r'^$', views.index, name='index'),\n url(r'^post/(?P.+?)/$', views.post_detail, name='post_detail'),\n url(r'^tag/(?P.+?)/$', views.show_tag, name='show_tag'),\n url(r'^category/(?P.+?)/$', views.show_category, name='show_category'),\n url(r'^archive/$', views.show_archive, name='show_archive'),\n url(r'^create/$', views.create_or_update_post, name='create_post'),\n url(r'^update/(?P\\d+)/$', views.create_or_update_post, name='update_post'),\n)\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"620917174","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.6.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Using numerical and categorical variables together\n#\n# In this notebook, we will present typical ways to deal with **categorical\n# variables**, namely **ordinal encoding** and **one-hot encoding**.\n#\n# We will load the entire adult census dataset.\n\n# %%\nimport pandas as pd\n\ndf = pd.read_csv(\"../datasets/adult-census.csv\")\n\ntarget_name = \"class\"\ntarget = df[target_name]\n\ndata = df.drop(columns=[target_name, \"fnlwgt\"])\n\n# %% [markdown]\n# We recall that `\"education-num\"` and `\"education\"` columns contains the same\n# information. In the previous notebook, we drop `\"education-num\"` and only\n# use the latter column. We will do the same processing here.\n\n# %%\ndata = data.drop(columns=\"education-num\")\n\n# %% [markdown]\n# ## Selection based on data types\n#\n# We separate categorical and numerical variables using the `object` data types\n# as we previously saw that it only corresponds to categorical columns. We make\n# use of the `make_column_selector` to select the corresponding columns.\n\n# %%\nfrom sklearn.compose import make_column_selector as selector\n\nnumerical_columns_selector = selector(dtype_exclude=object)\ncategorical_columns_selector = selector(dtype_include=object)\n\nnumerical_columns = numerical_columns_selector(data)\ncategorical_columns = categorical_columns_selector(data)\n\n# %% [markdown]\n# Besides, we will list beforehand the categories for each categorical column\n# to avoid issues with rare categories when evaluating the model during the\n# cross-validation.\n\n# %%\ncategories = [data[column].unique()\n for column in categorical_columns]\n\n# %% [markdown]\n# ## Dispatch some columns to a specific processor\n#\n# In the previous sections, we saw that we need to treat data differently\n# depending on their nature (i.e. numerical or categorical).\n#\n# Scikit-learn provides a `ColumnTransformer` class which will send specific\n# columns to a specific transformer, making it easy to fit a single predictive\n# model on a dataset that combines both kinds of variables together\n# (heterogeneously typed tabular data).\n#\n# We can first define the columns depending on their data type:\n#\n# * **one-hot encoding** will be applied to categorical columns. Besides, we\n# will use the option `drop=\"if_binary\"` to drop one of the column since the\n# information will be correlated;\n# * **numerical scaling** numerical features which will be standardized.\n#\n# We can now create our `ColumnTransfomer` by specifying a list of triplet\n# (preprocessor name, transformer, columns). First, let's start by creating\n# a transformer for the numerical and categorical part as we did in the\n# previous notebooks.\n\n# %%\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\n\ncategorical_preprocessor = OneHotEncoder(categories=categories,\n drop=\"if_binary\")\nnumerical_preprocessor = StandardScaler()\n\n# %% [markdown]\n# Now, we can associate each of these preprocessors with their respective\n# columns.\n\n# %%\nfrom sklearn.compose import ColumnTransformer\n\npreprocessor = ColumnTransformer([\n ('one-hot-encoder', categorical_preprocessor, categorical_columns),\n ('standard-scaler', numerical_preprocessor, numerical_columns)])\n\n# %% [markdown]\n# Finally, we can define a pipeline to stack this \"preprocessor\" with our\n# classifier (logistic regression).\n\n# %%\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\n\nmodel = make_pipeline(preprocessor, LogisticRegression(max_iter=500))\n\n# %% [markdown]\n# Starting from `scikit-learn 0.23`, the notebooks can display an interactive\n# view of the pipelines.\n\n# %%\nfrom sklearn import set_config\nset_config(display='diagram')\nmodel\n\n# %% [markdown]\n# The final model is more complex than the previous models but still follows\n# the same API:\n#\n# - the `fit` method is called to preprocess the data then train the\n# classifier;\n# - the `predict` method can make predictions on new data;\n# - the `score` method is used to predict on the test data and compare the\n# predictions to the expected test labels to compute the accuracy.\n#\n# Let's start by splitting our data into train and test sets.\n\n# %%\nfrom sklearn.model_selection import train_test_split\n\ndata_train, data_test, target_train, target_test = train_test_split(\n data, target, random_state=42)\n\n# %% [markdown]\n# Now, we can train the model on the train set.\n\n# %%\n_ = model.fit(data_train, target_train)\n\n# %% [markdown]\n# Then, we can use the raw dataset directly to the pipeline. Indeed, we don't\n# need to make any processing. All the preprocessing will be handle when\n# calling `predict`. We will give an example by predicting on the five first\n# sample from the test set.\n\n# %%\ndata_test.head()\n\n# %%\nmodel.predict(data_test)[:5]\n\n# %%\ntarget_test[:5]\n\n# %% [markdown]\n# We can get the accuracy score by calling directly the `score` method. We will\n# compute the score on the entire test set.\n\n# %%\nmodel.score(data_test, target_test)\n\n# %% [markdown]\n# ## Evaluation of the model with cross-validation\n#\n# This model can also be cross-validated as we previously did (instead of using\n# a single train-test split):\n\n# %%\nfrom sklearn.model_selection import cross_val_score\n\nscores = cross_val_score(model, data, target, cv=5)\nscores\n\n# %%\nprint(f\"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}\")\n\n# %% [markdown]\n# The compound model has a higher predictive accuracy than the two models that\n# used numerical and categorical variables in isolation.\n\n# %% [markdown]\n# ## Fitting a more powerful model\n#\n# **Linear models** are very nice because they are usually very cheap to train,\n# **small** to deploy, **fast** to predict and give a **good baseline**.\n#\n# However, it is often useful to check whether more complex models such as an\n# ensemble of decision trees can lead to higher predictive performance.\n#\n# In the following cell we try a scalable implementation of the **Gradient\n# Boosting Machine** algorithm. For this class of models, we know that contrary\n# to linear models, it is **useless to scale the numerical features** and\n# furthermore it is both safe and significantly more computationally efficient\n# to use an arbitrary **integer encoding for the categorical variables** even\n# if the ordering is arbitrary. Therefore we adapt the preprocessing pipeline\n# as follows:\n\n# %%\nfrom sklearn.experimental import enable_hist_gradient_boosting\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.preprocessing import OrdinalEncoder\n\ncategorical_preprocessor = OrdinalEncoder(categories=categories)\n\npreprocessor = ColumnTransformer([\n ('categorical', categorical_preprocessor, categorical_columns)],\n remainder=\"passthrough\")\n\nmodel = make_pipeline(preprocessor, HistGradientBoostingClassifier())\n\n# %% [markdown]\n# Now that we created our model, we can check the performance of the model.\n\n# %%\n# %%time\n_ = model.fit(data_train, target_train)\n\n# %%\nmodel.score(data_test, target_test)\n\n# %% [markdown]\n# We can observe that we get significantly higher accuracies with the Gradient\n# Boosting model. This is often what we observe whenever the dataset has a\n# large number of samples and limited number of informative features (e.g. less\n# than 1000) with a mix of numerical and categorical variables.\n#\n# This explains why Gradient Boosted Machines are very popular among\n# datascience practitioners who work with tabular data.\n\n# %% [markdown]\n# In this notebook we have:\n#\n# * used a `ColumnTransformer` to apply different preprocessing for\n# categorical and numerical variables;\n# * used a pipeline to chain the `ColumnTransformer` preprocessing and\n# logistic regresssion fitting;\n# * seen that **gradient boosting methods** can outperforms the basic linear\n# approach.\n","sub_path":"python_scripts/03_categorical_pipeline_column_transformer.py","file_name":"03_categorical_pipeline_column_transformer.py","file_ext":"py","file_size_in_byte":8043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"217006493","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n Graphics processing module\n\n\"\"\"\nfrom colorama import init, Fore, Back\ninit(autoreset=True)\n\nMEMORY = {\n 'num': None,\n 'end': None,\n 'url': None,\n 'fps': None,\n 'res': None,\n 'size': None,\n 'percents': None,\n 'filename': None\n}\nTERM: int = 79 # terminal window width\n\n\ndef set_parameters(parameters: dict = None):\n \"\"\"\n Interface to the MEMORY dict\n \"\"\"\n global MEMORY\n\n if not parameters:\n return\n\n total_digits = max([2, len(str(parameters['end']))])\n MEMORY = {\n 'num': str(parameters['num']).rjust(total_digits, '0'),\n 'end': str(parameters['end']).rjust(total_digits, '0'),\n 'url': parameters['url'],\n 'size': str(round(parameters['size']/1024/1024, 2)).rjust(6) + 'mb ',\n 'percents': parameters['percents'],\n 'filename': parameters['filename']\n }\n\n\ndef unable_to_reach(attempt: int):\n \"\"\"\n Call status if an attempt to download has failed\n \"\"\"\n statbar = attempt * '.' + (10 - attempt) * ' '\n status(prefix='\\r' + Fore.LIGHTRED_EX, stbar=statbar, perc=0, fsize=0)\n\n\ndef unaviable():\n \"\"\"\n Call status if all attempts to download has failed\n \"\"\"\n statbar = '[ FAIL ]'\n status(prefix='\\r' + Fore.RED, stbar=statbar, name=MEMORY.get('url', ''), perc=0, postfix='\\n')\n\n\ndef call_status_for_already_exist():\n \"\"\"\n Call status when file already exist\n \"\"\"\n statbar = Fore.BLACK + Back.GREEN + ' EXISTS ' + Fore.RESET + Back.RESET\n status(stbar=statbar, perc=100, postfix='\\n')\n\n\ndef call_status_for_progres(percents: int):\n \"\"\"\n Call status when we've got new percent of file\n \"\"\"\n MEMORY['percents'] = percents\n\n ready = int(percents / 10)\n remain = 10 - ready\n\n ready_bar = Fore.YELLOW + Back.YELLOW + ready * 'X' + Back.RESET\n remain_bar = Fore.YELLOW + remain * '_'\n full_bar = ready_bar + remain_bar\n\n status(prefix='\\r' + Fore.YELLOW, stbar=full_bar, perc=percents)\n\n\ndef call_status_for_complete():\n \"\"\"\n Call status when file downloading is complete\n \"\"\"\n status(prefix='\\r' + Fore.GREEN, stbar='[COMPLETE]', perc=100, postfix='\\n')\n\n\ndef status(prefix='', num='', end='', fsize=0, perc=0, name='', stbar='', postfix=''):\n \"\"\"\n Print current status\n \"\"\"\n num = num or MEMORY['num']\n end = end or MEMORY['end']\n fsize = fsize or MEMORY['size']\n name = name or MEMORY['filename']\n perc = perc or MEMORY['percents']\n perc = ' ' + str(perc).rjust(3) + '% '\n name = name or MEMORY['filename']\n textual_status = f'[{num} of {end}]{stbar}{perc}{fsize}'\n fsize = len(end) * 2 + 30\n pos = TERM - fsize - 1\n print(prefix + textual_status + name[:pos], end=postfix)\n","sub_path":"yt_graphics.py","file_name":"yt_graphics.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534465652","text":"# pylint: disable=no-self-use\nfrom unittest.mock import Mock\n\nfrom PartSeg._roi_analysis.advanced_window import MeasurementSettings, MultipleInput, Properties\nfrom PartSeg._roi_analysis.advanced_window import QInputDialog as advanced_module_input\nfrom PartSeg._roi_analysis.advanced_window import QMessageBox as advanced_message_box\nfrom PartSegCore.analysis import AnalysisAlgorithmSelection\n\n\nclass TestProperties:\n def test_synchronize_voxel_size(self, qtbot, part_settings):\n widget = Properties(part_settings)\n qtbot.addWidget(widget)\n widget.lock_spacing.setChecked(True)\n widget.update_spacing()\n value = widget.spacing[1].value()\n with qtbot.waitSignal(widget.spacing[2].valueChanged, timeout=10**4):\n widget.spacing[2].setValue(value - 20)\n assert widget.spacing[1].value() == value - 20\n\n def test_pipeline_profile_show_info(\n self, qtbot, part_settings, border_rim_profile, lower_threshold_profile, sample_pipeline\n ):\n part_settings.roi_profiles[border_rim_profile.name] = border_rim_profile\n part_settings.roi_pipelines[sample_pipeline.name] = sample_pipeline\n widget = Properties(part_settings)\n widget.show()\n qtbot.addWidget(widget)\n widget.update_profile_list()\n assert widget.profile_list.count() == 1\n part_settings.roi_profiles[lower_threshold_profile.name] = lower_threshold_profile\n widget.update_profile_list()\n assert widget.profile_list.count() == 2\n assert widget.pipeline_list.count() == 1\n assert widget.info_label.toPlainText() == \"\"\n with qtbot.waitSignal(widget.profile_list.currentItemChanged, timeout=10**4):\n widget.profile_list.setCurrentRow(1)\n profile = part_settings.roi_profiles[widget.profile_list.item(1).text()]\n assert widget.info_label.toPlainText() == profile.pretty_print(AnalysisAlgorithmSelection)\n widget.pipeline_list.setCurrentRow(0)\n assert widget.info_label.toPlainText() == sample_pipeline.pretty_print(AnalysisAlgorithmSelection)\n widget.hide()\n\n def test_delete_profile(self, qtbot, part_settings, border_rim_profile, lower_threshold_profile):\n part_settings.roi_profiles[border_rim_profile.name] = border_rim_profile\n part_settings.roi_profiles[lower_threshold_profile.name] = lower_threshold_profile\n widget = Properties(part_settings)\n widget.show()\n qtbot.addWidget(widget)\n widget.update_profile_list()\n assert widget.profile_list.count() == 2\n with qtbot.waitSignal(widget.profile_list.currentItemChanged, timeout=10**4):\n widget.profile_list.setCurrentRow(0)\n assert widget.delete_btn.isEnabled()\n with qtbot.waitSignal(widget.delete_btn.clicked):\n widget.delete_btn.click()\n assert len(part_settings.roi_profiles) == 1\n assert lower_threshold_profile.name in part_settings.roi_profiles\n widget.hide()\n\n def test_rename_profile(\n self, qtbot, part_settings, border_rim_profile, lower_threshold_profile, sample_pipeline, monkeypatch\n ):\n part_settings.roi_profiles[border_rim_profile.name] = border_rim_profile\n part_settings.roi_pipelines[sample_pipeline.name] = sample_pipeline\n part_settings.roi_profiles[lower_threshold_profile.name] = lower_threshold_profile\n widget = Properties(part_settings)\n widget.show()\n qtbot.addWidget(widget)\n widget.update_profile_list()\n assert widget.profile_list.count() == 2\n monkeypatch.setattr(advanced_module_input, \"getText\", check_text(border_rim_profile.name, \"rim\"))\n widget.profile_list.setCurrentRow(0)\n widget.rename_profile()\n assert widget.profile_list.item(1).text() == \"rim\"\n assert set(part_settings.roi_profiles.keys()) == {\"rim\", lower_threshold_profile.name}\n monkeypatch.setattr(advanced_module_input, \"getText\", check_text(sample_pipeline.name, \"rim\"))\n widget.pipeline_list.setCurrentRow(0)\n widget.rename_profile()\n assert widget.pipeline_list.item(0).text() == \"rim\"\n assert set(part_settings.roi_pipelines.keys()) == {\"rim\"}\n monkeypatch.setattr(advanced_module_input, \"getText\", check_text(\"rim\", lower_threshold_profile.name))\n\n called_mock = [0]\n\n def mock_waring(*_):\n monkeypatch.setattr(widget, \"rename_profile\", _empty)\n called_mock[0] = 1\n return advanced_message_box.No\n\n monkeypatch.setattr(advanced_message_box, \"warning\", mock_waring)\n widget.profile_list.setCurrentRow(1)\n widget.rename_profile()\n assert called_mock[0] == 1\n assert widget.profile_list.item(1).text() == \"rim\"\n assert set(part_settings.roi_profiles.keys()) == {\"rim\", lower_threshold_profile.name}\n widget.hide()\n\n def test_multiple_files_visibility(self, qtbot, part_settings):\n widget = Properties(part_settings)\n qtbot.addWidget(widget)\n assert not part_settings.get(\"multiple_files_widget\")\n assert not widget.multiple_files_chk.isChecked()\n with qtbot.waitSignal(widget.multiple_files_chk.stateChanged):\n widget.multiple_files_chk.setChecked(True)\n assert part_settings.get(\"multiple_files_widget\")\n part_settings.set(\"multiple_files_widget\", False)\n assert not widget.multiple_files_chk.isChecked()\n\n\nclass TestMeasurementSettings:\n def test_create(self, qtbot, part_settings):\n widget = MeasurementSettings(part_settings)\n qtbot.addWidget(widget)\n\n def test_base_steep(self, qtbot, part_settings):\n widget = MeasurementSettings(part_settings)\n qtbot.addWidget(widget)\n widget.show()\n widget.profile_options.setCurrentRow(0)\n assert widget.profile_options.item(0).text() == \"Volume\"\n assert widget.profile_options.item(1).text() == \"Diameter\"\n assert widget.profile_options_chosen.count() == 0\n widget.choose_option()\n assert widget.profile_options_chosen.count() == 1\n widget.choose_option()\n assert widget.profile_options_chosen.count() == 1\n widget.profile_options.setCurrentRow(1)\n assert widget.profile_options_chosen.count() == 1\n widget.choose_option()\n assert widget.profile_options_chosen.count() == 2\n widget.profile_options.setCurrentRow(0)\n widget.proportion_action()\n assert widget.profile_options_chosen.count() == 2\n widget.profile_options.setCurrentRow(1)\n widget.proportion_action()\n assert widget.profile_options_chosen.count() == 3\n assert widget.profile_options_chosen.item(2).text() == \"ROI Volume/ROI Diameter\"\n\n widget.profile_options_chosen.setCurrentRow(0)\n assert widget.profile_options_chosen.item(0).text() == \"ROI Volume\"\n widget.remove_element()\n assert widget.profile_options_chosen.count() == 2\n assert widget.profile_options_chosen.item(0).text() == \"ROI Diameter\"\n\n assert not widget.save_butt.isEnabled()\n with qtbot.waitSignal(widget.profile_name.textChanged):\n widget.profile_name.setText(\"test\")\n assert widget.save_butt.isEnabled()\n\n assert len(part_settings.measurement_profiles) == 2\n with qtbot.waitSignal(widget.save_butt.clicked):\n widget.save_butt.click()\n assert len(part_settings.measurement_profiles) == 3\n\n with qtbot.waitSignal(widget.profile_name.textChanged):\n widget.profile_name.setText(\"\")\n assert not widget.save_butt.isEnabled()\n\n widget.reset_action()\n assert widget.profile_options_chosen.count() == 0\n widget.hide()\n\n\ndef test_multiple_input(qtbot, monkeypatch):\n mock = Mock()\n monkeypatch.setattr(advanced_message_box, \"warning\", mock)\n widget = MultipleInput(\n text=\"sample text\",\n help_text=\"help\",\n objects_list=[\n (\"A\", str),\n (\"B\", int, 5),\n (\"C\", float, 5.0),\n (\"D\", int),\n (\"E\", float),\n ],\n )\n qtbot.addWidget(widget)\n assert widget.result is None\n widget.accept_response()\n assert widget.result is None\n mock.assert_called_once()\n mock.reset_mock()\n widget.object_dict[\"A\"][1].setText(\"test\")\n widget.accept_response()\n mock.assert_not_called()\n assert widget.result == {\"A\": \"test\", \"B\": 5, \"C\": 5.0, \"D\": 0, \"E\": 0.0}\n\n\ndef check_text(expected, to_return):\n def _check(*_, text=None, **_kwargs):\n assert text == expected\n return to_return, True\n\n return _check\n\n\ndef _empty():\n \"\"\"\n Empty function for monkeypatching to prevent recursive call\n in `test_rename_profile` test\n \"\"\"\n","sub_path":"package/tests/test_PartSeg/roi_analysis/test_advanced_window.py","file_name":"test_advanced_window.py","file_ext":"py","file_size_in_byte":8777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535459255","text":"\nimport commands\nfrom numpy import *\nfrom timings import *\nfrom strCheck import *\nfrom matplotlib import *\nfrom pylab import *\nimport sys\nfrom scipy.misc import *\n\nstatus,output= commands.getstatusoutput(\"ls Activities/*_*m.txt\")\nif len(sys.argv) > 1 and sys.argv[1] == \"50\":\n\tstatus,output= commands.getstatusoutput(\"ls Activities/*_50*m.txt\")\n\nfnames = output.split('\\n')\nfn_dates = []\nfor fn in fnames:\n\tfn_dates.append(getDate(fn))\n\t\n# sort activities based on dates\t\nfn_dates, fnames = (list(t) for t in zip(*sorted(zip(fn_dates, fnames))))\n\ndates = []\nstrks_len = []\navg_pace = []\nstrks_min = []\n\n\nfor fn in fnames:\n#\tprint fn\n\tf = open(fn)\n\tdate = getDate(fn)\t\t\n\theader = f.readline()\n\tstroke_index = header.split(',').index('Total Strokes')\n\tmeters = 0. \n\tseconds = 0.\n\tstrokes = 0.\n\tfor line in f:\n\t\t# we want the interval 3 not the single splits 3.1, 3.2, etc\n\t\tif (checkIsInterval(line) == False):\n\t\t\tcontinue\n\t\tstyle = getStyle(line)\n\t\tif not style == 0 : \n\t\t\tcontinue\n#\t\tprint line\n\t\tline = strIntCorrection(line)\n\t\ttokens = line.split(',')\n\t\tmeters = meters + float(tokens[3])\n\t\tseconds = seconds + getSeconds(tokens[4])\n\t\tstrokes = strokes + float(tokens[stroke_index])\n\n\tf.close()\n\n\tif meters > 0 :\n\t\tdates.append(date)\n\t\t# SwimSmooth plot counts both arms, so I double the strokes \n\t\tstrks_min.append(2*strokes*60/seconds)\n\t\tavg_pace.append(seconds*100/meters)\n\t\n\nn=len(strks_min)\nstrks_min = strks_min[n-10:] \navg_pace = avg_pace[n-10:] \n\nformatter = FuncFormatter(toMMSS)\n\nfig, ax1 = plt.subplots()\nax1.xaxis.set_major_formatter(formatter)\nplot(avg_pace, strks_min, \"o--\")\n\nimg = imread(\"swimsmoothpic.png\")\nplt.imshow(img,zorder=0,extent=[130, 60, 30,100], aspect='auto')\nxlim(130,60)\nylim(30,100)\nshow()\n\t\n\n\n\n\n\n\n\n","sub_path":"swimsmooth.py","file_name":"swimsmooth.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334589931","text":"import glob\r\n\r\n\r\n# If run from the current folder, this example outputs:\r\n# Searching for *ule.py, I found:\r\n# path_of_that_module.py\r\n# path_of_this_module.py\r\n\r\n# If run from the parent folder, it finds no such matches,\r\n# since the glob_search does not specify an absolute path\r\n\r\n\r\nglob_search = '*ule.py'\r\n\r\nlist_of_path_names = glob.glob(glob_search)\r\n\r\nprint(f'Searching for {glob_search}, I found:')\r\n\r\nfor path in list_of_path_names:\r\n print(path)\r\n","sub_path":"paths/glob_example.py","file_name":"glob_example.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12158534","text":"from __future__ import unicode_literals\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.gis.geos import *\nfrom django.contrib.gis.measure import D\nfrom mezzanine import template\nfrom mezzanine.generic.forms import RatingForm\nfrom directory.models import Rating, CustomAddress, Main\n\nregister = template.Library()\n\n\n@register.inclusion_tag('includes/closest_objects.html', takes_context=True)\ndef closest(context):\n \"\"\"\n Template tag for rendering closest 5 main objects\n \"\"\"\n if 'page' in context and hasattr(context['page'], 'main'):\n city = context['page'].main.address.all()[0]\n else:\n return {}\n pnt = city.location\n qs = CustomAddress.objects.filter(location__distance_lte=(pnt, D(km=2000))).\\\n distance(pnt).order_by('distance').values_list('id', flat=True)[:5]\n closest_objects = Main.objects.filter(address__id__in=list(qs)).distinct()\n return {'closest_objects': closest_objects}\n\n\n@register.inclusion_tag('includes/highest_rated.html', takes_context=True)\ndef highest_rated(context):\n \"\"\"\n Template tag for rendering highest rated 5 main objects\n \"\"\"\n if 'selected_city' in context:\n city = context['selected_city']\n else:\n return {}\n highest_rated = Main.objects.filter(address__city=city).distinct()\n highest_rated_count = highest_rated.count()\n highest_rated = highest_rated.order_by('rating')[:highest_rated_count]\n return {'highest_rated': highest_rated}\n\n\n@register.inclusion_tag(\"includes/rating.html\", takes_context=True)\ndef rating_custom(context, obj):\n \"\"\"\n Override default Mezzanine's rating tag\n \"\"\"\n ct = ContentType.objects.get_for_model(obj)\n context[\"rating_counter\"] = []\n for val in range(1, 6):\n count = Rating.objects.filter(\n content_type=ct, object_pk=obj.pk, value=val).count()\n context[\"rating_counter\"].append(count)\n context[\"rating_object\"] = context[\"rating_obj\"] = obj\n context[\"rating_form\"] = RatingForm(context[\"request\"], obj)\n comments = Rating.objects.filter(content_type=ct, object_pk=obj.pk)\\\n .values_list('comment', 'value')\n # add comments to context\n context['comments'] = [\n {'comment': item[0], 'value': item[1]} for item in comments]\n ratings = context[\"request\"].COOKIES.get(\"mezzanine-rating\", \"\")\n rating_string = \"%s.%s\" % (obj._meta, obj.pk)\n context[\"rated\"] = (rating_string in ratings)\n rating_name = obj.get_ratingfield_name()\n for f in (\"average\", \"count\", \"sum\"):\n context[\"rating_\" + f] = getattr(obj, \"%s_%s\" % (rating_name, f))\n return context\n","sub_path":"directory/templatetags/directory_tags.py","file_name":"directory_tags.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"654399905","text":"\"\"\"\r\nland cover zonal statistics for chicago\r\n\r\nkufre\r\n\"\"\"\r\n\r\nimport rasterio as rio\r\nimport geopandas as gpd\r\nimport pandas as pd\r\nfrom rasterstats import zonal_stats\r\n\r\nraster = 'cookcountylandcover2010\\landcover_2010_cookcounty.img'\r\n\r\n# chicago tracts from chicago open data\r\nwith rio.open(raster) as src:\r\n chicago = gpd.read_file('https://data.cityofchicago.org/api/geospatial/5jrd-6zik?method=export&format=GeoJSON').to_crs(src.crs)\r\n\r\n# reading dbf file for raster categories\r\ndbf = gpd.read_file('cookcountylandcover2010\\landcover_2010_cookcounty.img.vat.dbf')\r\n\r\n# cleaning up categories\r\nclasses = [str(x).lower().replace('/','_').replace(' ','_') for x in dbf['Class']]\r\n\r\n# creating dictionary to be used in zonal_stats()\r\nland_cover = dict(zip(dbf['Value'].tolist(), classes))\r\n\r\nzonal = zonal_stats(chicago,raster,categorical = True,nodata = 0, category_map = land_cover)\r\n\r\n# blank DataFrame to be filled in with values from zonal_stats()\r\nzeros = [0] * len(chicago)\r\n\r\nresults = pd.DataFrame(dict((v,zeros) for k,v in land_cover.items()))\r\n\r\nresults.insert(0,'geoid10', chicago['geoid10'])\r\n\r\n# inserting values\r\nfor i in range(len(zonal)):\r\n row = zonal[i]\r\n for j in list(row.keys()):\r\n results.at[i,j] = row.get(j) * 4\r\n\r\nchicago = chicago.merge(results, how = 'left', on = 'geoid10')\r\n\r\n# calculating canopy percentage for each tract\r\nchicago['pct_canopy'] = chicago['tree_canopy'] / chicago.area * 100\r\n\r\n# save results\r\nresults.to_csv('chicago_land_cover.csv')\r\n\r\nchicago.to_file('chicago.gpkg', layer ='land_cover', driver=\"GPKG\")\r\n","sub_path":"chicago_trees/chicago_zonal.py","file_name":"chicago_zonal.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38731252","text":"import unittest\nimport random\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tests.test_base import MNIST_MOBINENET_V2, BlockingPredictionTester, AsyncPredictionTester\n\n\nclass MnistMobilenetV2(unittest.TestCase):\n (_, _), (x_test, _) = tf.keras.datasets.mnist.load_data()\n x_test = np.reshape(x_test, (10000, 28, 28, 1))\n\n def test_session(self):\n tester = BlockingPredictionTester(MNIST_MOBINENET_V2)\n\n for i in range(0, 10):\n idx = random.randrange(0, 9999, 1)\n tester.assert_equals(np.array(self.x_test[idx:idx + 1], dtype=np.uint8))\n\n tester.close()\n\n def test_async_session(self):\n tester = AsyncPredictionTester(MNIST_MOBINENET_V2)\n\n for i in range(0, 10):\n idx = random.randrange(0, 9999, 1)\n tester.assert_equals(np.array(self.x_test[idx:idx + 1], dtype=np.uint8))\n\n tester.close()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/furiosa-sdk-runtime/tests/test_mnist_mobinenet_v2.py","file_name":"test_mnist_mobinenet_v2.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38339145","text":"import numpy as np\n\nfrom megskull.network import Network\nfrom megskull.opr.all import (\n Conv2D, Pooling2D, FullyConnected, Softmax,\n\t CrossEntropyLoss, Dropout, ElementwiseAffine, Concat \n\t\t)\nfrom megskull.opr.helper.elemwise_trans import ReLU, Identity\nfrom megskull.graph.query import GroupNode\nfrom megskull.opr.netsrc import DataProvider\nimport megskull.opr.helper.param_init as pinit\nfrom megskull.opr.helper.param_init import AutoGaussianParamInitializer as G\nfrom megskull.opr.helper.param_init import ConstantParamInitializer as C\nfrom megskull.opr.regularizer import BatchNormalization as BN\nimport megskull.opr.arith as arith\n\nglobal idx\nidx = 0\n\n\"\"\"\ndef conv_bn(inp, ker_shape, stride, padding, out_chl, isrelu):\n\tglobal idx\n\tidx += 1\n\tl1 = Conv2D(\n\t\t\"conv{}\".format(idx), inp, kernel_shape = ker_shape, stride = stride, padding = padding,\n\t\toutput_nr_channel = out_chl,\n\t\tW = G(mean = 0, std = ((1 + int(isrelu)) / (ker_shape**2 * inp.partial_shape[1]))**0.5),\n\t\tnonlinearity = {True:ReLU(), False:Identity()}[isrelu]\n\t\t)\n\tl2 = BN(\"bn{}\".format(idx), l1, eps = 1e-9)\n\tl2 = ElementwiseAffine(\"bnaff{}\".format(idx), l2, shared_in_channels = False, k = C(1), b = C(0))\n\treturn l2\n\"\"\"\n\ndef bn_relu_conv(inp, ker_shape, stride, padding, out_chl, has_relu, has_bn, has_conv = True):\n\tglobal idx\n\tidx += 1\n\tif has_bn:\n\t\tl1 = BN(\"bn{}\".format(idx), inp, eps = 1e-9)\n\t\tl1 = ElementwiseAffine(\"bnaff{}\".format(idx), l1, shared_in_channels = False, k = C(1), b = C(0))\n\telse:\n\t\tl1 = inp\n\t\n\tif has_relu:\n\t\tl2 = arith.ReLU(l1)\n\telse:\n\t\tl2 = l1\n\t\n\tif not has_conv:\n\t\treturn l2\n\n\tl3 = Conv2D(\n\t\t\"conv{}\".format(idx), l2, kernel_shape = ker_shape, stride = stride, padding = padding,\n\t\toutput_nr_channel = out_chl,\n\t\tnonlinearity = Identity()\n\t\t)\n\t\n\treturn l3\n\ndef dense_block(inp, k, l):\n\tlay = inp\n\tfor i in range(l):\n\t\tcur_lay = bn_relu_conv(lay, 3, 1, 1, k, True, True)\n\t\tlay = Concat([lay, cur_lay], axis = 1)\n\treturn lay\n\ndef transition(inp, i):\n\tl1 = bn_relu_conv(inp, 1, 1, 0, inp.partial_shape[1], True, True, i != 2)\n\tglobal idx\n\tidx += 1\n\tif i != 2:\n\t\tl2 = Pooling2D(\n\t\t\t\"Pooling{}\".format(idx), l1, window = 2, mode = \"AVERAGE\"\n\t\t\t)\n\telse:\n\t\tl2 = Pooling2D(\n\t\t\t\"Pooling{}\".format(idx), l1, window = 8, stride = 8, mode = \"AVERAGE\"\n\t\t\t)\n\treturn l2\n\n\n\"\"\"\ndef res_layer(inp, chl):\n\tpre = inp\n\tinp = conv_bn(inp, 3, 1, 1, chl, True)\n\tinp = conv_bn(inp, 3, 1, 1, chl, False)\n\tinp = arith.ReLU(inp + pre)\n\treturn inp\n\ndef res_block(inp, chl, n):\n\tstride = 2\n\tif chl == 16:\n\t\tstride = 1\n\tpre = inp\n\tinp = conv_bn(inp, 3, stride, 1, chl, True)\n\tinp = conv_bn(inp, 3, 1, 1, chl, False)\n\tinp = inp + conv_bn(pre, 1, stride, 0, chl, False)\n\tinp = arith.ReLU(inp)\n\t\n\tfor i in range(n - 1):\n\t\tinp = res_layer(inp, chl)\n\t\n\treturn inp\n\"\"\"\n\ndef make_network(minibatch_size = 64):\n\tpatch_size = 32\n\tinp = DataProvider(\"data\", shape = (minibatch_size, 3, patch_size, patch_size))\n\tlabel = DataProvider(\"label\", shape = (minibatch_size, ))\n\n\tlay = bn_relu_conv(inp, 3, 1, 1, 16, False, False)\n\n\tk, l = 12, (40 - 4) // 3\n\tfor i in range(3):\n\t\tlay = transition(dense_block(lay, k, l), i)\n\t\n\t#global average pooling\n\tprint(lay.partial_shape)\n\tfeature = lay.mean(axis = 2).mean(axis = 2)\n\t#feature = Pooling2D(\"glbpoling\", lay, window = 8, stride = 8, mode = \"AVERAGE\")\n\tpred = Softmax(\"pred\", FullyConnected(\n\t\t\"fc0\", feature, output_dim = 10,\n\t\tnonlinearity = Identity()\n\t\t))\n\t\n\tnetwork = Network(outputs = [pred])\n\tnetwork.loss_var = CrossEntropyLoss(pred, label)\n\treturn network\n\nif __name__ == '__main__':\n\tmake_network()\n","sub_path":"SE/d40/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"483822749","text":"what = input(\"Please enter the name of a txt file and directory if not in Work:\")\nthefile = open(what, \"r\")\nstring = thefile.read()\nthefile.close()\nletters = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\nwcount = 0\nlcount = 1\naword = False\nfor i in range(len(string)):\n if string[i] in (letters) and aword == False:\n aword = True\n if aword:\n if string[i] not in (letters):\n aword = False\n wcount +=1\n if i == len(string)-1 and string[i] in (letters):\n wcount +=1\n if string[i] == \"\\n\":\n lcount+=1\nprint (\"In the file \" +what+\", there are\", wcount,\"words and\",lcount,\"lines.\")\n","sub_path":"counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28954184","text":"numbers = [10,22,33,44,55,66,77,88,90]\n\ndef cacl_avg(values):\n sum = 0\n for count in values:\n sum += count\n avg = sum / len(values)\n\n return avg\n\ndef main():\n res = cacl_avg(numbers)\n print('{:.3f}'.format(res))\n\n\nif __name__ == '__main__':\n main()","sub_path":"datetime/numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88071310","text":"from django.http import HttpResponse\nfrom django.template import loader\nfrom bs4 import BeautifulSoup as bs4\nfrom playground import urls\n\n# Basically the render function but prettifies the html file\n# Because the programmer has severe OCD when it comes to indents.\n# Also append a few things (like the urls) into the context.\ndef prender(request, template_name, context={}, content_type='text/html', status=200, using=None):\n context['urls'] = []\n for url in urls.urlpatterns:\n if url.name[:2] != '__':\n context['urls'].append({'name':url.name.replace(\"_\", \" \").title(), 'pattern':url.pattern})\n content = loader.render_to_string(template_name, context, request, using) # Get HTML code from file\n no_new_line = content.split('\\n')\n no_new_list = ''\n for line in no_new_line:\n no_new_list = no_new_list+line\n #document = html.fromstring(no_new_list) # Get HTML string\n soup = bs4(no_new_list, features=\"lxml\")\n pretty_doc = soup.prettify()\n nowhite_doc = '' # Empty string\n\n # Remove lines that just have whitespace\n for line in pretty_doc.split('\\n'):\n if line.rstrip(): # If the line is just whitespace\n nowhite_doc = nowhite_doc+line+'\\n'\n\n # Append to the top of the document also\n return HttpResponse(nowhite_doc, content_type, status) # Spit out the string\n","sub_path":"internals/rendering.py","file_name":"rendering.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88283892","text":"#!/usr/bin/env python3\n# The FLASK_APP environment variable is used to specify how to load the application.\n# flask is aware of static and templates folder\n\nfrom flask import Flask, render_template, request, url_for, flash, redirect\nimport sqlite3\nfrom werkzeug.exceptions import abort\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'your secret key'\n\ndef get_db_connection():\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n return conn\n\ndef get_post(post_id):\n conn = get_db_connection()\n post = conn.execute('SELECT * FROM posts WHERE id = ?',(post_id,)).fetchone()\n conn.close()\n if post is None:\n abort(404)\n return post\n\n\n# flask creare URL map for this\n@app.route('/')\ndef indexroot():\n conn = get_db_connection()\n posts = conn.execute('SELECT * FROM posts').fetchall()\n conn.close()\n return render_template('index.html',mess=\"Kubernetes\", posts=posts)\n \n@app.route('/index.html')\ndef index():\n conn = get_db_connection()\n posts = conn.execute('SELECT * FROM posts').fetchall()\n conn.close()\n return render_template('index.html',mess=\"Kubernetes\", posts=posts)\n\n@app.route('/')\ndef post(post_id):\n post = get_post(post_id)\n return render_template('post.html', post=post)\n\n\n@app.route('/create', methods=('GET', 'POST'))\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n content = request.form['content']\n\n if not title:\n flash('Title is required!')\n else:\n conn = get_db_connection()\n conn.execute('INSERT INTO posts (title, content) VALUES (?, ?)',\n (title, content))\n conn.commit()\n conn.close()\n return redirect(url_for('index'))\n\n return render_template('create.html')\n\n\n@app.route('//edit', methods=('GET', 'POST'))\ndef edit(id):\n post = get_post(id)\n\n if request.method == 'POST':\n title = request.form['title']\n content = request.form['content']\n\n if not title:\n flash('Title is required!')\n else:\n conn = get_db_connection()\n conn.execute('UPDATE posts SET title = ?, content = ?'\n ' WHERE id = ?',\n (title, content, id))\n conn.commit()\n conn.close()\n return redirect(url_for('index'))\n\n return render_template('edit.html', post=post)\n\n\n\n@app.route('//delete', methods=('POST',))\ndef delete(id):\n post = get_post(id)\n conn = get_db_connection()\n conn.execute('DELETE FROM posts WHERE id = ?', (id,))\n conn.commit()\n conn.close()\n flash('\"{}\" was successfully deleted!'.format(post['title']))\n return redirect(url_for('index'))\n\n@app.route('/about.html')\ndef aboutus():\n return render_template('about.html')\n\n@app.route('/dblinks.html')\ndef dblinks():\n return render_template('dblinks.html')\n\n@app.route('/monitor.html')\ndef monitor():\n return render_template('monitor.html')\n\n@app.route('/work.html')\ndef work():\n return render_template('work.html')","sub_path":"flask-blog/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"552910687","text":"class Skok:\n def __init__(self, linija):\n dijelovi = [dio.split(',') for dio in linija.strip().split('->')]\n self.iz, self.za = tuple(dijelovi[0])\n self.u, self.s, self.pa = tuple(dijelovi[1])\n\n def odgovara(self, stanje, znak):\n return self.iz == stanje and self.za == znak\n\nclass Stroj:\n def __init__(self, linije):\n self.prazan = linije[2]\n self.stanje = linije[3]\n self.ciljna = linije[4].split(',')\n self.skokovi = [Skok(l) for l in linije[5:]]\n self.traka = []\n self.mjesto = 0\n\n def postavi(self, traka):\n self.traka = list(traka) + [self.prazan]\n\n def pokreni(self):\n koraci = 0\n\n while True:\n znak = self.traka[self.mjesto]\n skokovi = filter(lambda s: s.odgovara(self.stanje, znak), self.skokovi)\n if len(skokovi) < 1: break\n\n skok = skokovi[0]\n self.stanje = skok.u\n self.traka[self.mjesto] = skok.s\n koraci += 1\n\n if skok.pa == 'R':\n self.mjesto = self.mjesto + 1\n self.traka = self.traka + [self.prazan]\n else:\n self.mjesto = self.mjesto - 1\n\n return (koraci, 1 if self.stanje in self.ciljna else 0)\n\ndef utrlab3(definicijaTS, sadrzajTrake):\n stroj = Stroj(definicijaTS.strip('%').split('%'))\n stroj.postavi(sadrzajTrake)\n rezultat = stroj.pokreni()\n return \"%d%%%d\" % rezultat\n","sub_path":"labosi/lab-3/2010-11/by_unknown/UUTR_2010-11_lab-3.py","file_name":"UUTR_2010-11_lab-3.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"93185107","text":"#!/usr/bin/python3\n\nMINTIME = 100000000000000\nMINTIME = 1\n\nimport fileinput\nfrom math import gcd\nfrom collections import defaultdict\n\ndef kgv(a, b):\n\n return a * b // gcd(a, b)\n\ndef solve_next_bus(start, step, pos, id):\n\n i = 0\n\n while (start +i*step + pos) % id != 0:\n \n i += 1\n\n new_step = kgv(step, id)\n\n return start +i*step, new_step, i+1\n\ndef main():\n \n # part 1\n\n input_ = fileinput.input()\n\n arrival = int(input_.readline())\n\n buses = { pos: int(id) for pos,id in enumerate(input_.readline().strip().split(\",\")) if id.isnumeric() }\n\n departures = { id: id * (arrival // id + (1 if arrival % id else 0)) for id in buses.values() }\n\n busid, departure = min(departures.items(), key = lambda x: x[1])\n\n print(busid * (departure - arrival))\n\n # part 2\n\n timestamp = 0\n step = 1\n iterations = 0\n\n for pos, id in buses.items():\n print(timestamp, step)\n timestamp, step, i = solve_next_bus(timestamp, step, pos, id)\n iterations += i\n\n print(timestamp, iterations)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"13/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"459615920","text":"# -*- coding: utf-8 -*-\n# 实例23:one_hot实验\n\nimport tensorflow as tf\n\n# 标准的one-hot编码\nlabels0 = [\n [0, 0, 1],\n [0, 1, 0]\n]\n\n# 非标注的one-hot编码\nlabels1 = [\n [0.4, 0.1, 0.5],\n [0.3, 0.6, 0.1]\n]\n\n# 输出值\nlogits = [\n [2, 0.5, 6],\n [0.1, 0, 3]\n]\nresult0 = tf.nn.softmax_cross_entropy_with_logits(labels=labels0, logits=logits)\nresult1 = tf.nn.softmax_cross_entropy_with_logits(labels=labels1, logits=logits)\n\nwith tf.Session() as sess:\n print(\"result0: \", sess.run(result0))\n print(\"result1: \", sess.run(result1))\n","sub_path":"tf06/example23.py","file_name":"example23.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"456584028","text":"from functools import reduce\n\n\nclass MrComplex(object):\n @staticmethod\n def update(game_info):\n defense_tasks = []\n reserves = []\n\n # Check for threats and available ships\n for planet in game_info.my_planets.values():\n incoming_fleets = [fleet for fleet in game_info.enemy_fleets.values() if fleet.dest.id == planet.id]\n incoming_threats = []\n turn_processed = 0\n avail_ships = planet.num_ships\n excess_ships = 0\n\n for fleet in incoming_fleets:\n new_threat = True\n for threat in incoming_threats:\n if threat.turns_remaining == fleet.turns_remaining:\n new_threat = False\n threat.num_ships += fleet.num_ships\n break\n if new_threat:\n incoming_threats.append(PlanetThreat(round(fleet.turns_remaining), fleet.num_ships))\n\n incoming_threats = sorted(incoming_threats, key=lambda incoming: incoming.turns_remaining)\n\n for threat in incoming_threats:\n def check_for_reinforcements(accum, fleet):\n if fleet.dest.id == planet.id and turn_processed < fleet.turns_remaining <= threat.turns_remaining:\n return accum + fleet.num_ships\n else:\n return accum\n\n reinforcements = reduce(check_for_reinforcements, game_info.my_fleets.values(), 0)\n\n if turn_processed == 0:\n reinforcements += round(threat.turns_remaining * planet.growth_rate)\n if reinforcements >= threat.num_ships:\n excess_ships = reinforcements - threat.num_ships\n elif reinforcements + planet.num_ships >= threat.num_ships:\n avail_ships = planet.num_ships - (threat.num_ships - reinforcements)\n excess_ships = avail_ships\n else:\n shortfall = threat.num_ships - (planet.num_ships + reinforcements)\n defense_tasks.append(DefenseTask(planet, threat.turns_remaining, shortfall))\n else:\n produced_ships = round((threat.turns_remaining - turn_processed) * planet.growth_rate)\n total_defense = produced_ships + excess_ships + reinforcements\n shortfall = threat.num_ships - total_defense\n\n if shortfall > 0:\n defense_tasks.append(DefenseTask(planet, threat.turns_remaining, shortfall))\n avail_ships = 0\n excess_ships = 0\n else:\n excess_ships = total_defense - threat.num_ships\n if avail_ships > excess_ships:\n avail_ships = excess_ships\n\n turn_processed = threat.turns_remaining\n\n if avail_ships > 0:\n reserves.append(ReserveForce(planet, avail_ships))\n\n # Send available ships to defend planets under attack\n defense_tasks = sorted(defense_tasks, key=lambda def_task: def_task.turns_remaining)\n for task in defense_tasks:\n defense_forces = []\n ships_required = task.num_ships\n\n for reserve in reserves:\n if reserve.planet.id == task.planet.id:\n continue\n distance = round(reserve.planet.distance_to(task.planet))\n if distance <= task.turns_remaining and reserve.num_ships > 0 and ships_required > 0:\n if ships_required >= reserve.num_ships:\n ships_required -= reserve.num_ships\n defense_forces.append(reserve)\n else:\n defense_forces.append(ReserveForce(reserve.planet, ships_required))\n ships_required = 0\n\n if ships_required == 0:\n for force in defense_forces:\n game_info.planet_order(force.planet, task.planet, force.num_ships)\n used_reserve = next(reserve for reserve in reserves if reserve.planet.id == force.planet.id)\n used_reserve.num_ships -= task.num_ships\n task.num_ships = 0\n\n # Attack with any remaining available ships\n reserves = [reserve for reserve in reserves if reserve.num_ships > 0]\n reserves = sorted(reserves, key=lambda res: res.num_ships, reverse=True)\n for reserve in reserves:\n for planet in game_info.not_my_planets.values():\n def get_inbound_ships(accum, fleet):\n if fleet.dest.id == planet.id:\n return accum + fleet.num_ships\n else:\n return accum\n my_ships = reduce(get_inbound_ships, game_info.my_fleets.values(), 0)\n enemy_ships = reduce(get_inbound_ships, game_info.enemy_fleets.values(), 0)\n if planet.owner_id == 0:\n neutral_ships = planet.num_ships\n ship_balance = my_ships - (neutral_ships + enemy_ships)\n if ship_balance > 0: # Already have enough ships en route to conquer\n continue\n ship_balance += reserve.num_ships\n if ship_balance > 0:\n game_info.planet_order(reserve.planet, planet, ship_balance)\n reserve.num_ships -= ship_balance\n elif planet.owner_id == 2:\n enemy_ships += round(reserve.planet.distance_to(planet) * planet.growth_rate)\n ship_balance = my_ships - enemy_ships\n if ship_balance > 0:\n continue\n ship_balance += reserve.num_ships\n if ship_balance > 0:\n game_info.planet_order(reserve.planet, planet, ship_balance)\n reserve.num_ships -= ship_balance\n\n\nclass DefenseTask(object):\n def __init__(self, planet, turns_remaining, num_ships):\n self.planet = planet\n self.turns_remaining = turns_remaining\n self.num_ships = num_ships\n\n\nclass ReserveForce(object):\n def __init__(self, planet, num_ships):\n self.planet = planet\n self.num_ships = num_ships\n\n\nclass PlanetThreat(object):\n def __init__(self, turns_remaining, num_ships):\n self.turns_remaining = turns_remaining\n self.num_ships = num_ships\n\n","sub_path":"11_spike/bots/MrComplex.py","file_name":"MrComplex.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426096794","text":"from chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\n\nchatbot = ChatBot(\n 'করোনা বট',\n storage_adapter='chatterbot.storage.SQLStorageAdapter',\n logic_adapters=[\n 'chatterbot.logic.MathematicalEvaluation',\n 'chatterbot.logic.TimeLogicAdapter',\n 'chatterbot.logic.BestMatch',\n {\n 'import_path': 'chatterbot.logic.BestMatch',\n 'default_response': 'আমি দুঃখিত, কিন্তু আমি বুঝতে পারছিনা। আমি এখনো শিখছি।',\n 'maximum_similarity_threshold': 0.70\n }\n ],\n database_uri='sqlite:///database.sqlite3'\n)\n\n\ntrainer = ListTrainer(chatbot)\n\ntraining_data_quesans = open('ques_ans.txt').read().splitlines()\ntraining_data_personal = open('personal_ques.txt').read().splitlines()\n\ntraining_data = training_data_quesans + training_data_personal\n\ntrainer.train(training_data)\n\n","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326993173","text":"from pyasn1.type import univ, namedtype, tag, constraint\r\n\r\nfrom .common import ConditionalTime, Credentials, InvokeId, IntUnsignedLong,\\\r\n SpaceLinkDataUnit, Time\r\nfrom .pdu import SleScheduleStatusReportInvocation, SleStopInvocation,\\\r\n SleAcknowledgement, SleScheduleStatusReportReturn\r\nfrom .raf_structure import RafParameterName, RequestedFrameQuality, AntennaId,\\\r\n CarrierLockStatus, DiagnosticRafGet, DiagnosticRafStart, FrameQuality,\\\r\n FrameSyncLockStatus, LockStatus, Notification, RafGetParameter,\\\r\n RafProductionStatus, SymbolLockStatus\r\nfrom .bind import SleBindInvocation, SleBindReturn, SlePeerAbort,\\\r\n SleUnbindInvocation, SleUnbindReturn\r\n\r\n\r\n# Incoming PDUs\r\n\r\nclass RafStartInvocation(univ.Sequence):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType('invokerCredentials', Credentials()),\r\n namedtype.NamedType('invokeId', InvokeId()),\r\n namedtype.NamedType('startTime', ConditionalTime()),\r\n namedtype.NamedType('stopTime', ConditionalTime()),\r\n namedtype.NamedType('requestedFrameQuality', RequestedFrameQuality())\r\n )\r\n\r\n\r\nclass RafGetParameterInvocation(univ.Sequence):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType('invokerCredentials', Credentials()),\r\n namedtype.NamedType('invokeId', InvokeId()),\r\n namedtype.NamedType('rafParameter', RafParameterName())\r\n )\r\n\r\n\r\nclass RafUserToProviderPdu(univ.Choice):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType('rafBindInvocation', SleBindInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 100))),\r\n namedtype.NamedType('rafBindReturn', SleBindReturn().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 101))),\r\n namedtype.NamedType(\r\n 'rafUnbindInvocation', SleUnbindInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 102))),\r\n namedtype.NamedType('rafUnbindReturn', SleUnbindReturn().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 103))),\r\n namedtype.NamedType('rafStartInvocation', RafStartInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 0))),\r\n namedtype.NamedType('rafStopInvocation', SleStopInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 2))),\r\n namedtype.NamedType(\r\n 'rafScheduleStatusReportInvocation',\r\n SleScheduleStatusReportInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 4))),\r\n namedtype.NamedType(\r\n 'rafGetParameterInvocation', RafGetParameterInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 6))),\r\n namedtype.NamedType('rafPeerAbortInvocation', SlePeerAbort().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatSimple, 104)))\r\n )\r\n\r\n\r\n# Outgoing PDUs\r\n\r\nclass RafStartReturn(univ.Sequence):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType('performerCredentials', Credentials()),\r\n namedtype.NamedType('invokeId', InvokeId()),\r\n namedtype.NamedType('result', univ.Choice(\r\n componentType=namedtype.NamedTypes(\r\n namedtype.NamedType(\r\n 'positiveResult', univ.Null().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatSimple, 0))),\r\n namedtype.NamedType(\r\n 'negativeResult', DiagnosticRafStart().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 1)))\r\n )))\r\n )\r\n\r\n\r\nclass RafTransferDataInvocation(univ.Sequence):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType('invokerCredentials', Credentials()),\r\n namedtype.NamedType('earthReceiveTime', Time()),\r\n namedtype.NamedType('antennaId', AntennaId()),\r\n namedtype.NamedType('dataLinkContinuity', univ.Integer().subtype(\r\n subtypeSpec=constraint.ValueRangeConstraint(-1, 16777215))),\r\n namedtype.NamedType('deliveredFrameQuality', FrameQuality()),\r\n namedtype.NamedType('privateAnnotation', univ.Choice(\r\n componentType=namedtype.NamedTypes(\r\n namedtype.NamedType('null', univ.Null().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatSimple, 0))),\r\n namedtype.NamedType('notNull', univ.OctetString().subtype(\r\n subtypeSpec=constraint.ValueSizeConstraint(\r\n 1, 128)).subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatSimple, 1)))\r\n ))),\r\n namedtype.NamedType('data', SpaceLinkDataUnit())\r\n )\r\n\r\n\r\nclass RafSyncNotifyInvocation(univ.Sequence):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType('invokerCredentials', Credentials()),\r\n namedtype.NamedType('notification', Notification())\r\n )\r\n\r\n\r\nclass FrameOrNotification(univ.Choice):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType(\r\n 'annotatedFrame', RafTransferDataInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 0))),\r\n namedtype.NamedType(\r\n 'syncNotification', RafSyncNotifyInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 1)))\r\n )\r\n\r\n\r\nclass RafTransferBuffer(univ.SequenceOf):\r\n componentType = FrameOrNotification()\r\n\r\n\r\nclass RafStatusReportInvocation(univ.Sequence):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType('invokerCredentials', Credentials()),\r\n namedtype.NamedType('errorFreeFrameNumber', IntUnsignedLong()),\r\n namedtype.NamedType('deliveredFrameNumber', IntUnsignedLong()),\r\n namedtype.NamedType('frameSyncLockStatus', FrameSyncLockStatus()),\r\n namedtype.NamedType('symbolSyncLockStatus', SymbolLockStatus()),\r\n namedtype.NamedType('subcarrierLockStatus', LockStatus()),\r\n namedtype.NamedType('carrierLockStatus', CarrierLockStatus()),\r\n namedtype.NamedType('productionStatus', RafProductionStatus())\r\n )\r\n\r\n\r\nclass RafGetParameterReturn(univ.Sequence):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType('performerCredentials', Credentials()),\r\n namedtype.NamedType('invokeId', InvokeId()),\r\n namedtype.NamedType('result', univ.Choice(\r\n componentType=namedtype.NamedTypes(\r\n namedtype.NamedType(\r\n 'positiveResult', RafGetParameter().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext,\r\n tag.tagFormatConstructed, 0))),\r\n namedtype.NamedType(\r\n 'negativeResult', DiagnosticRafGet().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 1)))\r\n )))\r\n )\r\n\r\n\r\nclass RafProviderToUserPdu(univ.Choice):\r\n componentType = namedtype.NamedTypes(\r\n namedtype.NamedType('rafBindInvocation', SleBindInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 100))),\r\n namedtype.NamedType('rafBindReturn', SleBindReturn().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 101))),\r\n namedtype.NamedType(\r\n 'rafUnbindInvocation', SleUnbindInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 102))),\r\n namedtype.NamedType('rafUnbindReturn', SleUnbindReturn().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 103))),\r\n namedtype.NamedType('rafStartReturn', RafStartReturn().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 1))),\r\n namedtype.NamedType('rafStopReturn', SleAcknowledgement().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 3))),\r\n namedtype.NamedType('rafTransferBuffer', RafTransferBuffer().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 8))),\r\n namedtype.NamedType(\r\n 'rafScheduleStatusReportReturn',\r\n SleScheduleStatusReportReturn().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 5))),\r\n namedtype.NamedType(\r\n 'rafStatusReportInvocation', RafStatusReportInvocation().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 9))),\r\n namedtype.NamedType(\r\n 'rafGetParameterReturn', RafGetParameterReturn().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatConstructed, 7))),\r\n namedtype.NamedType('rafPeerAbortInvocation', SlePeerAbort().subtype(\r\n implicitTag=tag.Tag(\r\n tag.tagClassContext, tag.tagFormatSimple, 104)))\r\n )\r\n","sub_path":"slecommon/datatypes/raf_pdu.py","file_name":"raf_pdu.py","file_ext":"py","file_size_in_byte":9855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"4633909","text":"#!/usr/bin/env python\n# Licensed to Cloudera, Inc. under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. Cloudera, Inc. licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom crequest.middleware import CrequestMiddleware\n\nfrom desktop.conf import ENABLE_ORGANIZATIONS\n\n\ndef default_organization():\n from useradmin.models import Organization\n default_organization, created = Organization.objects.get_or_create(name='default', domain='default')\n return default_organization\n\n\ndef get_user_request_organization():\n request = CrequestMiddleware.get_request()\n return request.user.organization if request and hasattr(request, 'user') and request.user.is_authenticated() else default_organization()\n\n\ndef _fitered_queryset(queryset, by_owner=False):\n request = CrequestMiddleware.get_request()\n\n # Avoid infinite recursion on very first retrieval of the user\n if ENABLE_ORGANIZATIONS.get() and \\\n request and hasattr(request, 'user') and hasattr(request.user, '_wrapped') and type(request.user._wrapped) is not object and \\\n request.user.is_authenticated():\n if by_owner:\n filters = {'owner__organization': request.user.organization}\n else:\n filters = {'organization': request.user.organization}\n\n queryset = queryset.filter(**filters)\n\n return queryset\n","sub_path":"apps/useradmin/src/useradmin/organization.py","file_name":"organization.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"409710148","text":"from unittest import TestCase\n\nfrom libmt94x.transfer_failed_codes import InvalidTransferFailedCodeError\nfrom libmt94x.transfer_failed_codes import TransferFailed\nfrom libmt94x.transfer_failed_codes import TransferFailedMisc\nfrom libmt94x.transfer_failed_codes import TransferFailedSEPA\n\n\nclass TransferFailedTests(TestCase):\n def setUp(self):\n self.sepa = TransferFailedSEPA.get_instance()\n self.misc = TransferFailedMisc.get_instance()\n self.any = TransferFailed.get_instance()\n\n # SEPA codes tests\n\n def test_sepa_code_ok(self):\n reason = self.sepa.resolve_code('AC01')\n self.assertEquals(reason, 'Rekeningnummer incorrect')\n\n rv = self.sepa.code_is_valid('AC01')\n self.assertEquals(rv, True)\n\n def test_sepa_code_invalid(self):\n # This code belongs to the Misc category\n with self.assertRaises(InvalidTransferFailedCodeError):\n self.sepa.resolve_code('AC03')\n\n rv = self.sepa.code_is_valid('AC03')\n self.assertEquals(rv, False)\n\n # Misc codes tests\n\n def test_misc_code_ok(self):\n reason = self.misc.resolve_code('AC03')\n self.assertEquals(reason, 'Ongeldig rekeningnummer crediteur')\n\n rv = self.misc.code_is_valid('AC03')\n self.assertEquals(rv, True)\n\n def test_misc_code_invalid(self):\n # This code belongs to the SEPA category\n with self.assertRaises(InvalidTransferFailedCodeError):\n self.misc.resolve_code('AC01')\n\n rv = self.misc.code_is_valid('AC01')\n self.assertEquals(rv, False)\n\n # Any/All codes tests\n\n def test_any_sepa_code_ok(self):\n # Can resolve a SEPA code\n reason = self.any.resolve_code('AC01')\n self.assertEquals(reason, 'Rekeningnummer incorrect')\n\n rv = self.any.code_is_valid('AC01')\n self.assertEquals(rv, True)\n\n def test_any_misc_code_ok(self):\n # Can resolve a Misc code\n reason = self.any.resolve_code('AC03')\n self.assertEquals(reason, 'Ongeldig rekeningnummer crediteur')\n\n rv = self.any.code_is_valid('AC03')\n self.assertEquals(rv, True)\n\n def test_any_code_invalid(self):\n # This code is not defined for either SEPA nor Misc\n with self.assertRaises(InvalidTransferFailedCodeError):\n self.any.resolve_code('AC00')\n\n rv = self.any.code_is_valid('AC00')\n self.assertEquals(rv, False)\n","sub_path":"tests/test_transfer_failed_codes.py","file_name":"test_transfer_failed_codes.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"581448322","text":"import numpy as np\nimport pandas as pd\n\ntrain = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\ntest = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\npredictors = [\"Pclass\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Fare\", \"Embarked\"]\nmark=\"Survived\"\n\n\ndef harmonize_data(titanic):\n \n titanic[\"Age\"] = titanic[\"Age\"].fillna(titanic[\"Age\"].median())\n titanic[\"Age\"].median()\n \n \n \n titanic.loc[titanic[\"Sex\"] == \"male\", \"Sex\"] = 0\n titanic.loc[titanic[\"Sex\"] == \"female\", \"Sex\"] = 1\n \n titanic[\"Embarked\"] = titanic[\"Embarked\"].fillna(\"S\")\n\n titanic.loc[titanic[\"Embarked\"] == \"S\", \"Embarked\"] = 0\n titanic.loc[titanic[\"Embarked\"] == \"C\", \"Embarked\"] = 1\n titanic.loc[titanic[\"Embarked\"] == \"Q\", \"Embarked\"] = 2\n\n titanic[\"Fare\"] = titanic[\"Fare\"].fillna(titanic[\"Fare\"].median())\n\n return titanic\n\n\ndef create_submission(clf, train, test, predictors, filename):\n\n clf.fit(train[predictors], train[\"Survived\"])\n predictions = clf.predict(test[predictors])\n\n submission = pd.DataFrame({\n \"PassengerId\": test[\"PassengerId\"],\n \"Survived\": predictions\n })\n \n submission.to_csv(filename, index=False)\n\n\nfrom sklearn.model_selection import cross_val_score\n\ndef validation_scores(clf, train_data):\n scores = cross_val_score(\n clf,\n train_data[predictors],\n train_data[mark],\n cv=3\n )\n return scores.mean()\n\n\ntrain_data = harmonize_data(train)\ntest_data = harmonize_data(test)\n\n\ndef compare_metods(classifiers, train_data):\n names, scores = [], []\n for name, clf in classifiers:\n names.append(name)\n scores.append(validation_scores(clf, train_data))\n return pd.DataFrame(scores, index=names, columns=['Scores'])\n\n\n\nfrom sklearn.ensemble import GradientBoostingClassifier\n\nclassifiers = [\n (\"Gradient Boosting\", GradientBoostingClassifier(max_depth=4)),\n]\n\nres = compare_metods(classifiers, train_data)\nres\n\nprint(res)\n","sub_path":"script/gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"76572322","text":"import json\nimport base64\nimport numpy as np\nimport sys\nimport time\nimport os\nimport cv2\nfrom PIL import Image\nimport io\nimport boto3\nimport urllib\n\n\n# construct the argument parse and parse the arguments\nconfthres = 0.5\nnmsthres = 0.1\ns3_resource = boto3.resource('s3')\ndynamodb = boto3.client('dynamodb')\nTABLE_NAME = 'IMAGE_URL'\nbasic_url = 'https://s3-image-storing-bucket-fit5225.s3.amazonaws.com/'\n\n\n\ndef load_model(configpath, weightspath):\n # load our YOLO object detector trained on COCO dataset (80 classes)\n print(\"[INFO] loading YOLO from disk...\")\n net = cv2.dnn.readNetFromDarknet(configpath, weightspath)\n return net\n \ndef do_prediction(image, net, LABELS):\n (H, W) = image.shape[:2]\n # determine only the *output* layer names that we need from YOLO\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # construct a blob from the input image and then perform a forward\n # pass of the YOLO object detector, giving us our bounding boxes and\n # associated probabilities\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),\n swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layerOutputs = net.forward(ln)\n # print(layerOutputs)\n end = time.time()\n\n # show timing information on YOLO\n print(\"[INFO] YOLO took {:.6f} seconds\".format(end - start))\n\n # initialize our lists of detected bounding boxes, confidences, and\n # class IDs, respectively\n boxes = []\n confidences = []\n classIDs = []\n\n # loop over each of the layer outputs\n for output in layerOutputs:\n # loop over each of the detections\n for detection in output:\n # extract the class ID and confidence (i.e., probability) of\n # the current object detection\n scores = detection[5:]\n # print(scores)\n classID = np.argmax(scores)\n # print(classID)\n confidence = scores[classID]\n\n # filter out weak predictions by ensuring the detected\n # probability is greater than the minimum probability\n if confidence > confthres:\n # scale the bounding box coordinates back relative to the\n # size of the image, keeping in mind that YOLO actually\n # returns the center (x, y)-coordinates of the bounding\n # box followed by the boxes' width and height\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n\n # use the center (x, y)-coordinates to derive the top and\n # and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # update our list of bounding box coordinates, confidences,\n # and class IDs\n boxes.append([x, y, int(width), int(height)])\n\n confidences.append(float(confidence))\n classIDs.append(classID)\n\n # apply non-maxima suppression to suppress weak, overlapping bounding boxes\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, confthres,\n nmsthres)\n\n # TODO Prepare the output as required to the assignment specification(DONE)\n # ensure at least one detection exists\n if len(idxs) > 0:\n # loop over the indexes we are keeping\n # create an array of the results\n result = []\n #loop over the objects and put them in an array and return the result\n for i in idxs.flatten():\n result.append(LABELS[classIDs[i]])\n return result\n \n\n \n\n\n\nbucket = 'library-bucket-fit5225'\nkey1 = 'coco.names'\nkey2 = 'yolov3-tiny.cfg'\nkey3 = 'yolov3-tiny.weights'\n\n\n\ns3_bucket = s3_resource.Bucket(bucket)\ns3_obj1 = s3_bucket.Object(key=key1)\ns3_obj2 = s3_bucket.Object(key=key2)\ns3_obj3 = s3_bucket.Object(key=key3)\n\n\n\n\n\n\ndef lambda_handler(event, context):\n \n print('Lamda handler started')\n bucket2 = event['Records'][0]['s3']['bucket']['name']\n print(bucket2)\n key = event['Records'][0]['s3']['object']['key']\n keyImage = urllib.parse.unquote_plus(key,encoding = 'utf-8')\n print(keyImage)\n s3_bucket_2 = s3_resource.Bucket(bucket2)\n s3_obj5 = s3_bucket_2.Object(key=key)\n \n # read image which is uplaoded to bucker\n \n \n # read yolo files from bucket \n #boto to get file from bucket\n #make this into an array\n labels = s3_obj1.get().get('Body').read()\n labels = str(labels)\n labels = labels[2:]\n labels.replace('\"','')\n #print(labels)\n labels = labels.split(\"\\\\n\")\n #print(labels)\n cfg = s3_obj2.get().get('Body').read()\n weights = s3_obj3.get().get('Body').read()\n\n\n img_data = s3_obj5.get().get('Body').read()\n\n #image from the s3 bucket\n img = Image.open(io.BytesIO(img_data))\n npimg = np.array(img)\n image = npimg.copy()\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n nets = load_model(cfg, weights)\n result = do_prediction(image, nets, labels)\n tags = 'abc'\n if result == None:\n tags = list(result)\n else: \n \n if len(result) > 1 :\n tags = []\n for i in result:\n if i not in tags:\n tags.append(i)\n tags = list(tags) \n else:\n tags = list(result)\n \n url = basic_url+keyImage\n print(url)\n print(tags)\n data = {}\n data['url_list'] = {'S' : url}\n data['tags'] = {'SS' : tags}\n dynamodb.put_item(TableName=TABLE_NAME, Item=data)\n print('Inserted into db')\n\n \n","sub_path":"object_detection_lambda_final.py","file_name":"object_detection_lambda_final.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"304916741","text":"from collections import namedtuple\nfrom itertools import cycle\nfrom typing import List\n\nimport numpy as np\nimport plotly.colors\nfrom astropy import units as u\nfrom astropy.coordinates import CartesianRepresentation\nfrom plotly.graph_objects import Figure\n\nfrom poliastro.plotting.util import BODY_COLORS, generate_label\nfrom poliastro.util import norm\n\n\nclass Trajectory(namedtuple(\"Trajectory\", [\"trajectory\", \"state\", \"label\", \"color\"])):\n pass\n\n\nclass BaseOrbitPlotter:\n \"\"\"\n Parent Class for the 2D and 3D OrbitPlotter Classes based on Plotly.\n \"\"\"\n\n def __init__(self, figure=None):\n self._figure = figure or Figure()\n self._layout = None\n\n self._trajectories = [] # type: List[Trajectory]\n\n self._attractor = None\n self._attractor_radius = np.inf * u.km\n\n self._color_cycle = cycle(plotly.colors.DEFAULT_PLOTLY_COLORS)\n\n @property\n def trajectories(self):\n return self._trajectories\n\n def _set_attractor(self, attractor):\n if self._attractor is None:\n self._attractor = attractor\n elif attractor is not self._attractor:\n raise NotImplementedError(\n f\"Attractor has already been set to {self._attractor.name}.\"\n )\n\n def set_attractor(self, attractor):\n \"\"\"Sets plotting attractor.\n\n Parameters\n ----------\n attractor : ~poliastro.bodies.Body\n Central body.\n\n \"\"\"\n self._set_attractor(attractor)\n\n def _redraw_attractor(self):\n # Select a sensible value for the radius: realistic for low orbits,\n # visible for high and very high orbits\n min_radius = min(\n [\n trajectory.represent_as(CartesianRepresentation).norm().min() * 0.15\n for trajectory, _, _, _ in self._trajectories\n ]\n or [0 * u.m]\n )\n radius = max(self._attractor.R.to(u.km), min_radius.to(u.km))\n # TODO: Remove previously plotted sphere?\n self._plot_sphere(\n radius,\n BODY_COLORS.get(self._attractor.name, \"#999999\"),\n self._attractor.name,\n )\n\n self._attractor_radius = radius\n\n def _plot_point(self, radius, color, name, center=None):\n raise NotImplementedError\n\n def _plot_sphere(self, radius, color, name, center=None):\n raise NotImplementedError\n\n def plot_trajectory(self, trajectory, *, label=None, color=None):\n \"\"\"Plots a precomputed trajectory.\n\n An attractor must be set first.\n\n Parameters\n ----------\n trajectory : ~astropy.coordinates.CartesianRepresentation\n Trajectory to plot.\n label : string, optional\n color : string, optional\n\n \"\"\"\n if self._attractor is None:\n raise ValueError(\n \"An attractor must be set up first, please use \"\n \"set_attractor(Major_Body) or plot(orbit).\"\n )\n else:\n if color is None:\n color = next(self._color_cycle)\n\n trace = self._plot_trajectory(trajectory, str(label), color, False)\n\n self._trajectories.append(\n Trajectory(trajectory, None, label, trace.line.color)\n )\n\n if not self._figure._in_batch_mode:\n return self.show()\n\n def _plot_trajectory(self, trajectory, label, color, dashed):\n raise NotImplementedError\n\n def plot(self, orbit, *, label=None, color=None):\n \"\"\"Plots state and osculating orbit in their plane.\n\n Parameters\n ----------\n orbit : ~poliastro.twobody.orbit.Orbit\n Orbit to plot.\n label : string, optional\n Label of the orbit.\n color : string, optional\n Color of the line and the position.\n\n \"\"\"\n if color is None:\n color = next(self._color_cycle)\n\n self._set_attractor(orbit.attractor)\n\n label = generate_label(orbit, label)\n trajectory = orbit.sample()\n\n trace = self._plot_trajectory(trajectory, label, color, True)\n\n self._trajectories.append(\n Trajectory(trajectory, orbit.r, label, trace.line.color)\n )\n\n # Redraw the attractor now to compute the attractor radius\n self._redraw_attractor()\n\n # Plot required 2D/3D shape in the position of the body\n radius = min(\n self._attractor_radius * 0.5, (norm(orbit.r) - orbit.attractor.R) * 0.5\n ) # Arbitrary thresholds\n self._plot_point(radius, color, label, center=orbit.r)\n\n if not self._figure._in_batch_mode:\n return self.show()\n\n def _prepare_plot(self):\n if self._attractor is not None:\n self._redraw_attractor()\n\n self._figure.layout.update(self._layout)\n\n def show(self):\n \"\"\"Shows the plot in the Notebook.\n\n Updates the layout and returns the underlying figure.\n\n \"\"\"\n self._prepare_plot()\n return self._figure\n","sub_path":"src/poliastro/plotting/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":5013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"163026122","text":"#!/usr/bin/env python3\n# coding=utf-8\n\"\"\"Implement Hyman's (flawed) concurrent lock acquisition algorithm.\n\nThis module implements Hyman's concurrent lock acquisition algorithm, as laid\nout in his 1966 letter \"Comments on a Problem in Concurrent Programming\nControl.\"\n\nThe original problem is as follows: given a group of N computers working\ntogether on some task, how can we ensure that only one works on a critical\nportion fo the task at a time? Some requirements are as follows:\n\n#. If two computers attempt to enter the critical section at the same time, the\n solution can *not* be to choose a winner based on some static property of\n the computers, such as their IDs\n#. Assumptions may not be made about the relative speeds of the computers. The\n computers may change speed over time.\n#. If a computer leaves the cluster while not in the critical section, the\n remaining computers must be able to continue working on the task.\n#. Theoretically infinite waits are permissible, so long as they're\n statistically unlikely.\n\nIn addition, Hyman notes that his algorithm works when there are only two\ncomputers. The original code is poorly formatted and, to my eyes, illegible.\nFurthermore, Knuth noted that \"there are 15 syntactic ALGOL errors in 12 lines\nof program!\" The California State University, Stanislaus, has provided a\n`translation`_. Here is a slightly cleaned-up rendition of that translation,\nwhich I will use as the basis of my program:\n\n // Each of two processes, P0 and P1 executes the code below, P0 executes\n // Protocol(0,1), and P1 executes Protocol(1,0).\n\n int turn = 0;\n int flag[2] = false;\n void Protocol (int me, int you) {\n do {\n flag[me] = true;\n while (turn != me) {\n while (flag[you]) {\n /* do nothing */;\n }\n turn = me;\n }\n CriticalSection(me);\n flag[me] = false;\n RemainderSection(me);\n } while (true);\n }\n\nFor an example of how this procedure can fail, consider the following\nscenario:\n\n#. Worker 1 raises flag 1.\n# Worker 1 notes that it is not their turn to do work.\n#. Worker 1 notes that flag 0 is down.\n#. Worker 0 raises his flag.\n#. Worker 0 notes that it is his turn to do work, and proceeds into the\n critical section.\n#. Worker 1 declares that it is his turn to do work, and proceeds into the\n critical section.\n\n.. _translation:\n https://www.cs.csustan.edu/~john/Classes/Previous_Semesters/CS3750_OperatingSys_I/2009_04_Fall/hymanProb.html\n\"\"\"\nfrom threading import Thread\nfrom typing import Callable, List\n\nWORKERS = 2\n\nturn: int = 0\nflags: List[bool] = [False, False]\n\n\ndef main() -> None:\n \"\"\"Spawn several threads.\"\"\"\n threads = [Thread(target=do_work, args=(i,)) for i in range(WORKERS)]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n\ndef do_work(worker_id: int) -> None:\n \"\"\"Do bogus work.\n\n Do work, acquire the lock, do more work, release the lock, and do more\n work. (No work is actually done.)\n\n :param worker_id: A unique identifier for this thread. Used in lock\n management logic.\n \"\"\"\n lock_and_call(worker_id, print, f'Worker {worker_id} doing critical work.')\n\n\ndef lock_and_call(worker_id: int, function: Callable, *args, **kwargs) -> None:\n \"\"\"Acquire the global lock, call the given function, and release the lock.\n\n :param worker_id: A unique identifier for this thread. Used in lock\n management logic.\n :param function: A function to call once the lock is acquired.\n :param args: Passed to ``function``.\n :param kwargs: Passed to ``function``.\n \"\"\"\n global turn # pylint:disable=global-statement,invalid-name\n global flags # pylint:disable=global-statement,invalid-name\n\n other_worker_id = 1 - worker_id\n flags[worker_id] = True\n while turn != worker_id:\n while flags[other_worker_id]:\n pass\n turn = worker_id\n print(f'Worker {worker_id} has acquired the lock.')\n function(*args, **kwargs)\n print(f'Worker {worker_id} is releasing the lock.')\n flags[worker_id] = False\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/hyman-concurrent/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284820681","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('common', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now)),\n ('modified_on', models.DateTimeField(auto_now=True, default=django.utils.timezone.now)),\n ('first_name', models.CharField(max_length=255)),\n ('last_name', models.CharField(max_length=255)),\n ('email', models.EmailField(unique=True, max_length=255)),\n ('phone', models.CharField(max_length=255)),\n ('address', models.ForeignKey(blank=True, null=True, to='common.Address', default=None)),\n ('comments', models.ManyToManyField(blank=True, to='common.Comment')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Vendor',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now)),\n ('modified_on', models.DateTimeField(auto_now=True, default=django.utils.timezone.now)),\n ('name', models.CharField(unique=True, max_length=255)),\n ('comments', models.ManyToManyField(blank=True, to='common.Comment')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='contact',\n name='represents',\n field=models.ForeignKey(to='vendors.Vendor'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='contact',\n name='user',\n field=models.OneToOneField(blank=True, null=True, to=settings.AUTH_USER_MODEL, default=None),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='contact',\n unique_together=set([('represents', 'first_name', 'last_name')]),\n ),\n ]\n","sub_path":"apps/vendors/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397517503","text":"import threading\nimport socket\n\nclass Tello:\n ANY_ADDR = '0.0.0.0'\n\n DEVICE_COMMAND_ADDR = '192.168.10.1'\n DEVICE_COMMAND_SEND_PORT = 8889\n DEVICE_COMMAND_RECV_PORT = 9000\n\n DEVICE_STATE_RECV_PORT = 8890\n\n DEVICE_VIDEO_RECV_PORT = 9000\n\n def __init__(self):\n self.deviceCommandSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.deviceCommandSocket.bind((Tello.ANY_ADDR, Tello.DEVICE_COMMAND_RECV_PORT))\n\n self.deviceStateSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.deviceStateSocket.bind((Tello.ANY_ADDR, Tello.DEVICE_STATE_RECV_PORT))\n\n self.__recvThreadQuit = False\n self.__recvThread = threading.Thread(target=self.__recv)\n self.__recvThread.start()\n\n self.__stateThreadQuit = False\n self.__stateThread = threading.Thread(target=self.__state)\n self.__stateThread.start()\n\n def __del__(self):\n self.deviceCommandSocket.close()\n\n def __recv(self):\n while not self.__recvThreadQuit:\n data, addr = self.deviceCommandSocket.recvfrom(128)\n if len(data) > 0:\n print(\"Last command received '%s' from %s\" % (data, addr))\n else:\n print(\"Status received from %s\" % addr)\n print(\"Done receiving\")\n\n def __state(self):\n while not self.__stateThreadQuit:\n data, addr = self.deviceStateSocket.recvfrom(512)\n if len(data) > 0:\n print(\"From %s:\" % addr[0])\n print(\" %s\" % data)\n else:\n print(\"Status received from %s\" % addr)\n print(\"Done state\")\n\n def testCmd(self):\n self.deviceCommandSocket.sendto('command', (Tello.DEVICE_COMMAND_ADDR, Tello.DEVICE_COMMAND_SEND_PORT))\n\n def release(self):\n print(\"Thread joining...\")\n\n self.__recvThreadQuit = True\n self.__recvThread.join()\n\n self.__stateThreadQuit = True\n self.__stateThread.join()\n\n print(\"Thread joined\")\n\nif __name__ == \"__main__\":\n tello = Tello()\n tello.testCmd()\n tello.release()\n","sub_path":"tello.py","file_name":"tello.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198007305","text":"#!/usr/bin/python\n\nimport os, codecs, re, argparse, sys\nfrom io import StringIO\nimport unicodecsv as csv\nfrom lxml import etree\n\nfrom test_anystyle_parser import *\n\n# THE FOLLOWING THREE \"for\" STATMENTS SKETCH OUT HOW THE SCRIPT MIGHT RUN: \n#\n# 1) PROVIDED WITH A SET OF EXPLICIT FOLDER/FILE COMBINATIONS; \n# 2) PROVIDED WITH A FOLDER NAME, SO AS TO PROCESS ALL THE FILES \n# IN THE FOLDER; \n# 3) TO PROCESS JUST ONE FILE, EXPLICITLY NAMED AS A TEST.\n#\n\n#for file_name in ['csv/1984-1993_11_scopus.csv', 'csv/1984-1993_5_scopus.csv', 'csv/1984-1993_6_scopus.csv', 'csv/1994-1996 scopus.csv', 'csv/1997-1998.csv']:\n\n#for temp in os.listdir('some_folder/'):\n# file_name = 'some_folder/' + temp\n\n#R/S: DEFINE ARGUMENT\n\ncsv.field_size_limit(sys.maxsize)\nparser = argparse.ArgumentParser()\nparser.add_argument(\"filename\", help=\"The Name of the File to be Processed\", nargs='+')\nargs = parser.parse_args()\n\n\nfor file_name in args.filename:\n \n # OPEN THE OUTPUT FILE, TO WHICH WE WILL RIGHT OUR RESULTS\n\n file_handle = open('for_open_refine_' + file_name.split('/')[-1], 'w')\n w = csv.writer(file_handle, encoding='utf-8')\n\n # READ THE INPUT CSV INTO data, A LIST CONTAINING ALL THE CSV ROWS\n\n data = []\n for d in csv.reader(open(file_name, 'r'), encoding='utf-8'):\n data.append(d)\n\n # WRITE THE CSV HEADER ROW; NOTE THAT WE'RE ADDING HEADINGS FOR\n # OUT NEW COLUMNS.\n\n header_row = data[0]\n w.writerow(['row_type', 'source_file', 'scopus_EID', 'reference_n'] + header_row)\n\n # PROCESS THE REMAINING ROWS IN THE CSV, ONE-BY-BY, STARTING AT ROW 2 (\"[1:]\").\n\n for row in data[1:]:\n\n # PICK THE IMPORTANT BITS FROM THE ROW\n\n authors_sequence = row[0]\n #R/S HANDLE THE JR/SR PROBLEM\n authors_sequence = re.sub(r'(\\,)(\\s[JS]r\\.?)', r'\\2', authors_sequence)\n #R/S HANDLE THE \"et al\" PROBLEM\n authors_sequence = re.sub(r'\\s?[Ee][Tt]\\.?\\s?[Aa][Ll]\\.?','', authors_sequence)\n row[0]=authors_sequence\n title = row[1]\n year = row[2]\n journal = row[3]\n #R/S CLEAN UP JOURNAL TITLES\n journal = re.sub(r'[\\,\\\\\\/\\.]','',journal)\n journal = re.sub(r'^[iI]n\\s','',journal)\n row[3]=journal\n references_sequence = row[24]\n scopus_EID = row[41]\n\n # FIX THE TRUNCATION PROBLEM\n\n if len(row[24]) > 32767:\n row[24] = row[24][:32765]\n\n # WRITE THE ORIGINAL ROW TO THE OUTPUT, ADDING OUR NEW COLUMNS.\n\n new_row = ['scopus result', file_name.replace('csv/', ''), scopus_EID, ''] + row\n w.writerow(new_row)\n \n # PICK APART THE REFERENCES\n\n if references_sequence.strip() > '':\n\n # REFERENCES ARE SEPARATED BY SEMICOLON. SPLIT ON SEMICOLON,\n # THEN PROCESS EACH OF THE INDIVIDUAL REFERENCES. \n # reference_n IS THE SEQUENCE NUMBER OF THE REFERENCE\n # (STARTING WITH 0), r IS THE ACTUAL TEXT OF THE REFERENCE.\n \n for reference_n, r in enumerate(references_sequence.split(';')):\n\n if r.strip() > '':\n\n # CALL THE RUBY ANYSTYLE PARSER, THEN PARSE THE \n # RESULTS INTO SEPARATE FIELDS.\n #\n # THIS IS QUITE SLOW. THE ANYSTYLE, WHICH IS, AS\n # FAR AS WE'RE CONCERNED, A BLACKBOX, SEEMS TO \n # EXPEND QUITE A BIT OF PROCESSING PARSING REFERENCES.\n # R/S FIX THE JR/SR PROBLEM\n r = re.sub(r'(\\,)(\\s[JS]r\\.?)', r'\\2', r)\n # R/S FIX THE \"et al\" PROBLEM\n r = re.sub(r'\\,\\s?[Ee][Tt]\\.?\\s?[Aa][Ll]\\.?','', r)\n author, title, year, journal, volume, from_page, to_page = call_anystyle_parser(r)\n \n # ANYSTYLE PARSER OFTEN (10% OF THE TIME?) LEAVES\n # BITS OF THE ARTICLE OR JOURNAL IN WITH THE YEAR.\n # THIS CODE CLEANS THAT UP.\n\n if re.sub('[0-9]', '', year.strip()) == '':\n # IT'S OKAY, SO WE DON'T HAVE TO DO ANYTHING\n pass\n else:\n\n # SPLIT THE YEAR BY GROUPS OF NUMBERS\n\n year_parts = re.split('([0-9]+)', year.strip())\n\n # WHICH ONE OF THE PARTS IS REALLY THE YEAR?\n\n year_pos = -1\n for a in range(0, len(year_parts)):\n if len(year_parts[a]) == 4 and re.sub('[0-9]', '', year_parts[a]) == '':\n year_pos = a\n break\n\n # IF WE FOUND THE ACTUAL YEAR (\"year_pos != -1\"),\n # THE PUT THE PARTS WHERE THEY REALLY BELONG\n\n if year_pos != -1:\n if year_pos > 0:\n title = title + ' ' + ''.join(year_parts[:year_pos])\n year = year_parts[year_pos]\n for a in range(year_pos + 1, len(year_parts)):\n if re.sub('[0-9]', '', year_parts[a]) == '':\n volume = volume + year_parts[a]\n else:\n journal = journal + year_parts[a]\n\n # FINALLY, WE'RE DONE, AND CAN WRITE A \"reference\"\n # ROW TO THE OUTPUT FILE.\n \n #R/S CLEAN UP JOURNAL TITLES\n journal = re.sub(r'^[iI]n\\s','',journal)\n new_row = ['reference', file_name.replace('csv/', ''), scopus_EID, reference_n] + [author, title, year, journal, volume, '', '', from_page, to_page, '', '', '', '', '', '', '', '', '', '', '', '', '', '', r]\n w.writerow(new_row)\n\n file_handle.close()\n\n\n \n","sub_path":"make_openrefine_input_2.py","file_name":"make_openrefine_input_2.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"89911502","text":"import pandas as pd\r\nimport requests\r\nimport openpyxl\r\n\r\n# Read the data\r\nDataPath = './Hotel_reviews_NLP/Tripadvisor Review Part3.xlsx'\r\nLocations = pd.read_excel(DataPath, Headname=None, usecols=[2, 3])\r\nData = pd.read_excel(DataPath, Headname=None)\r\nLocList = []\r\nCityList = []\r\n\r\n# Save different Addresses of hotels\r\nfor i in range(0, Locations.shape[0]):\r\n if Locations.ix[i, 0] not in LocList:\r\n LocList.append(Locations.ix[i, 0])\r\n CityList.append(Locations.ix[i, 1])\r\nKey = ',+CA&key=AIzaSyBin-hGUWXjPnlT8q_ETK17zRYFbLsuw-4'\r\nGeocoding = 'https://maps.googleapis.com/maps/api/geocode/xml?address='\r\nLatitude = []\r\nlongitude = []\r\n\r\nprint(LocList.index(Locations.ix[10000, 0]))\r\n\r\n# Get Geocoding list\r\nfor i in range(0, len(LocList)):\r\n # for i in range(0, 1):\r\n url = Geocoding + LocList[i] + ' ' + CityList[i] + Key\r\n data = requests.get(url).text\r\n print(i)\r\n Latitude.append(data[data.find('') + 5:data.find('')])\r\n longitude.append(data[data.find('') + 5:data.find('')])\r\nprint(Latitude, longitude)\r\n\r\n# Transform all addresses to Geocoding\r\nData['Latitude'] = None\r\nData['Longitude'] = None\r\nfor i in range(0, Locations.shape[0]):\r\n index = LocList.index(Locations.ix[i, 0])\r\n Data.ix[i, 15] = Latitude[index]\r\n Data.ix[i, 16] = longitude[index]\r\nData.to_excel('./Hotel_reviews_NLP/Tripadvisor Review Part3-transformed.xlsx')\r\n","sub_path":"Step2.py","file_name":"Step2.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451566282","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 22 15:53:40 2016\n\"\"\"\nimport shutil\nimport os, datetime\n\ninPath = \"C:/1save/taipei_parking\" #這個是來源 folder path\n\noutPath = \"C:/1save/test\" #這個是目的地 folder path\n\nb4List = os.listdir(inPath)\n\naftList = []\n\ndef insertColon(string = str, index = int):\n \"\"\"\n 在原始字串的某個位置,插入冒號.\n 第一變數放目標字串\n 第二變數放要插入的位置\n \"\"\"\n return string[:index] + ':' + string[index:]\n\n\nfor name in b4List:\n timeIn = name[:-4].replace(\"_\", \"T\")\n timeIn = insertColon(timeIn, -4)\n timeIn = insertColon(timeIn, -2)\n obj = datetime.datetime.strptime(timeIn,'%Y-%m-%dT%H:%M:%S')\n obj = obj + datetime.timedelta(hours = -1)\n outName = outPath + \"/\" + obj.strftime(\"%Y-%m-%d_%H%M%S\") + \".csv\"\n shutil.copy(inPath + \"/\" + name, outName)\n #d0 = d0.strftime(\"%Y-%m-%d_%H%M%S\")\n aftList.append(obj) #obj 是轉換成 datetime後的東西\n","sub_path":"minus1hour.py","file_name":"minus1hour.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"160054008","text":"\"\"\"\n-*- coding: utf-8 -*-\n@Name : pos_tagging-processor.py\n@Time : 2021/3/16 0:46\n@Author : 软工1701 李澳 U201716958\n@Desc : 预处理如 PTB,CTB,人民日报语料库 等数据集的函数工具包,目前先以处理 PTB 数据集的函数集合作为代表\n\"\"\"\n\n\n# 生成词频降序排列的词汇表文件\ndef generate_ptb_vocab(src_train_data, vocab_file):\n \"\"\"\n\n Args:\n src_train_data: ptb训练集数据作为输入\n vocab_file: 输出的词汇表文件\n\n Returns:\n null\n\n References:\n https://blog.csdn.net/qq_23031939/article/details/79759344\n https://chehongshu.blog.csdn.net/article/details/85288590?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control&dist_request_id=&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control\n\n Build Time:\n 2021/3/16 1:30\n\n \"\"\"\n\n import codecs\n import collections\n from operator import itemgetter\n\n # 统计单词出现的频率\n counter = collections.Counter()\n with codecs.open(src_train_data, \"r\", \"utf-8\") as f:\n for line in f:\n for word in line.strip().split():\n counter[word] += 1\n\n # 按照词频顺序对单词进行排序\n # Counter 集成于 dict 类,因此也可以使用字典的方法,此类返回一个以元素为 key 、元素个数为 value 的 Counter 对象集合\n # 依据key排序 itermgetter(1)为降序\n sorted_word_to_cnt = sorted(counter.items(), key=itemgetter(1), reverse=True)\n\n # 转换成单词string的list\n sorted_words_list = [x[0] for x in sorted_word_to_cnt]\n\n # 因为稍后需要在文本换行处加入句子结束符\"\"(或者说行结束符),这里预先将其加入词汇表\n # 而因为在 ptb 数据集中,输入数据已经将低频词汇替换为了,可以作为一个“单词”在以上步骤中被一并处理,\n # 所以不需要另外在单词的list中加入低频词汇/的项\n sorted_words_list = [\"\"] + sorted_words_list\n\n # 将降序后的单词序列写入到 vocab_file的词汇表文件中\n with codecs.open(vocab_file, 'w', 'utf-8') as file_output:\n for word in sorted_words_list:\n file_output.write(word + '\\n')\n\n\n# 将数据集中的训练、测试、验证的数据文件编号化\ndef vocab_transform_index(vocab, src_train_data, src_test_data, src_valid_data, train_index_data, test_index_data,\n valid_index_data):\n \"\"\"\n\n Args:\n vocab: 创建好的词汇降序表\n src_train_data: ptb 数据集中的训练数据\n src_test_data: ptb 数据集中的测试数据\n src_valid_data: ptb 数据集中的验证数据\n train_index_data: 经过编号化后的训练数据\n test_index_data: 经过编号化后的测试数据\n valid_index_data: 经过编号化后的验证数据\n\n Returns:\n null\n\n References:\n https://blog.csdn.net/qq_23031939/article/details/79759344\n https://chehongshu.blog.csdn.net/article/details/85288590?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control&dist_request_id=&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-4.control\n\n Build Time:\n 2021/3/18 0:08\n\n \"\"\"\n import codecs\n with codecs.open(vocab, 'r', 'utf-8') as vocab_f:\n # 获取降序后的单词列表 vocab_list\n vocab_list = [single_vocab.strip() for single_vocab in vocab_f.readlines()]\n # 生成单词和行数的 dict\n word_index_dict = {k: v for (k, v) in zip(vocab_list, range(len(vocab_list)))}\n\n # 根据单词获取其在字典中的编号 index 的函数 get_word_index_from_dict\n def get_word_index_from_dict(word_content):\n return word_index_dict[word_content] if word_content in word_index_dict else word_index_dict['']\n\n # 编号化单个数据集文件的函数 src_to_index\n def src_to_index(src_data, index_data):\n # 指向原始格式数据集文件和编号化后的数据集文件指针 f_in, f_out\n f_in = codecs.open(src_data, 'r', 'utf-8')\n f_out = codecs.open(index_data, 'w', 'utf-8')\n for line in f_in:\n # 每一行/每一个句子中出现的单词集合(外加句子结束符\"\")\n words = line.strip().split() + [\"\"]\n # 单词集合转换成对应的 index 集合\n index_line = ' '.join([str(get_word_index_from_dict(word)) for word in words]) + '\\n'\n # 按行转换后写道对应的文件中\n f_out.write(index_line)\n f_in.close()\n f_out.close()\n\n # 将训练数据,测试数据,验证数据分别编号化\n src_to_index(src_train_data, train_index_data)\n src_to_index(src_test_data, test_index_data)\n src_to_index(src_valid_data, valid_index_data)\n","sub_path":"postag/pos_tagging/utils/dataset/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"315553644","text":"import sys\nN, M = map(int, sys.stdin.readline().split())\ntimes = []\nfor i in range(N):\n times.extend(list(map(int, sys.stdin.readline().split('/n'))))\n\ns = 1\ne = max(times)*M\nwhile s <= e:\n num = (s+e)//2\n total = 0\n for time in list(times):\n total += num // time\n if total >= M:\n result = num\n e = num-1\n else:\n s = num+1\nprint(result)","sub_path":"김민채/0808/BOJ_3079.py","file_name":"BOJ_3079.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"50414519","text":"'''\nProblem 1.4\nWrite a function for sync(x) = (sin x) / x\nMake sure that your function handles x = 0 correctly.\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as mpl\n\ndef plot_sinc(x, y):\n mpl.figure()\n mpl.plot(x, y, 'r-')\n mpl.title(\"Sinc Function\")\n mpl.xlabel(\"x\")\n mpl.ylabel(\"sync(x)\")\n mpl.show()\n\ndef my_sinc(x):\n if x == 0.0:\n return 1\n else:\n return np.sin(x)/x\n\n# Lets calculate a few values and plot it\nsamples = 200\nx = np.linspace(-30.0, 30.0, samples)\ny = np.empty_like(x)\nfor i in range(samples):\n y[i] = my_sinc(x[i])\nplot_sinc(x,y)\n\n","sub_path":"Lecture3/CompPhysPy/ch01/sinc.py","file_name":"sinc.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"431811376","text":"# Prompt: https://leetcode.com/problems/matrix-diagonal-sum/\n# Runtime: 88 ms, faster than 55.68% of Python online submissions for Matrix Diagonal Sum.\n# Memory Usage: 13.6 MB, less than 44.32% of Python online submissions for Matrix Diagonal Sum.\n\n\nclass Solution(object):\n def diagonalSum(self, mat):\n \"\"\"\n :type mat: List[List[int]]\n :rtype: int\n \"\"\"\n sm = 0\n length = len(mat)\n for i in range(length):\n sm += mat[i][i]\n # avoid adding middle twice\n if i != length-i-1:\n sm += mat[length-i-1][i]\n \n return sm\n","sub_path":"0. Easy/1572. Matrix Diagonal Sum/matrix_diagonal_sum.py","file_name":"matrix_diagonal_sum.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189834718","text":"### Give a target amount n and a list of distinct coin values, return a list of the least amount of coins needed ###\nimport unittest\n\n\ndef change_coin(target_amount, coins):\n \"\"\"input a target amount 11 and coin_list [1, 5], return [5, 5, 1]\"\"\"\n coin_list = []\n if target_amount == 0:\n return coin_list\n else:\n coins.sort(reverse=True)\n if target_amount >= coins[0]:\n target_amount -= coins[0]\n coin_list.append(coins[0])\n coin_list.extend(change_coin(target_amount, coins))\n elif target_amount < coins[0]:\n coin_list.extend(change_coin(target_amount, coins[1:]))\n return coin_list\n\n\nclass Test(unittest.TestCase):\n \"\"\"Test cases\"\"\"\n\n '''test change coin'''\n def test_change_coin(self):\n self.assertEqual(change_coin(15, [1, 2, 5]), [5, 5, 5])\n self.assertEqual(change_coin(19, [1, 3, 5]), [5, 5, 5, 3, 1])\n self.assertEqual(change_coin(5, [1, 2]), [2, 2, 1])\n\n\n'''run the script'''\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"Recursion/CoinChange.py","file_name":"CoinChange.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"138960923","text":"# Cheetah-Shell/modules/dir.py\n# Copyright (C) 2018 Marco Tomas Rodriguez.\n# Licensed under https://opensource.org/licenses/MIT \nimport os\nimport modules.operativesystem as eos\n\n\nclass Dir:\n def __init__(self):\n self.directory = os.getcwd()\n\n def setDir(self, directory):\n i = 0\n for letters in directory:\n if letters == '/' or letters == '\\\\':\n i = i + 1\n if directory == '..':\n if eos.getPlatform() == 'Windows':\n split = str(self.directory).split('\\\\')\n dir = \"\"\n for routes in split[:-1]:\n dir = dir + routes + '\\\\'\n self.directory = dir\n\n else:\n # Finish using some Unix based OS\n pass\n elif i < 1:\n if eos.getPlatform() == 'Windows':\n if os.path.isdir(self.directory + '\\\\' + directory):\n self.directory = self.directory + '\\\\' + directory\n else:\n print(\"Path doesn't exist\")\n elif os.path.isdir(directory):\n self.directory = directory\n else:\n return print(\"The directory not exist\")\n\n def getDir(self):\n return self.directory","sub_path":"modules/dir.py","file_name":"dir.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369659098","text":"'''Optimized retrieval and in-memory storage of a small amount of information across many contacts.'''\n\nfrom process.logging import Logger as log\nfrom process.globals import config\nfrom database import db\n\nclass ContactCache(object):\n def __init__(self, require_email=False, **kw):\n self.columns = []\n self.contacts = []\n self.require_email = require_email\n\n def isEmpty(self):\n return not self.contacts.empty()\n\n def fetch(self):\n '''Load a batch of contacts into the cache'''\n query = self.buildQuery()\n\n self.contacts = []\n result = db.get_db().execute(query)\n for row in result:\n name_components = []\n keys = ['first_name', 'middle_name', 'last_name', 'organization_name']\n\n for key in keys:\n if key in row and row[key]:\n name_components.append(row[key])\n\n #TODO: consider some flatter structure:\n #self.contacts.append([\n #\trow['id'],\n #\t\" \".join(name_components),\n #\trow['email'],\n #])\n self.contacts.append({\n 'id': row['id'],\n 'name': \" \".join(name_components),\n 'email': row['email'],\n })\n\n def buildQuery(self):\n query = db.Query()\n query.columns.extend([\n \"contact.id\",\n \"contact.first_name\",\n \"contact.middle_name\",\n \"contact.last_name\",\n \"email.email\",\n \"address.street_address\",\n \"address.city\",\n \"address.postal_code\",\n \"state.abbreviation\",\n \"country.iso_code\",\n ])\n email_clause = \"civicrm_email email ON contact.id = email.contact_id\"\n if self.require_email:\n email_clause += \" AND email.email IS NOT NULL\"\n query.tables = [\n \"civicrm_contact contact\",\n email_clause,\n \"civicrm_address address ON contact.id = address.contact_id\",\n \"civicrm_country country ON address.country_id = country.id\",\n \"civicrm_state_province state ON address.state_province_id = state.id\",\n ]\n query.group_by = [\n \"contact.id\",\n ]\n query.order_by = [\n \"contact.id\",\n ]\n return query\n\nclass PagedGroup(ContactCache):\n pagesize = config.contact_cache_size\n\n def __init__(self, **kw):\n super(PagedGroup, self).__init__(**kw)\n self.offset = 0\n\n def buildQuery(self):\n query = super(PagedGroup, self).buildQuery()\n log.info(\"Limiting batch contact retrieval to {num} records.\".format(num=self.pagesize))\n query.limit = self.pagesize\n query.offset = self.offset\n return query\n\n def next(self):\n #TODO:\n #query.offset += self.pagesize\n #self.fetch()\n raise Exception(\"unimplemented\")\n\nclass TaggedGroup(PagedGroup):\n \"\"\"Select contacts based on included and excluded tags.\"\"\"\n\n def __init__(self, tag, excludetag=None, **kw):\n super(TaggedGroup, self).__init__(**kw)\n self.tag = tag\n self.excludetag = excludetag\n\n def buildQuery(self):\n query = super(TaggedGroup, self).buildQuery()\n query.tables.extend([\n \"civicrm_entity_tag entity_tag ON entity_tag.entity_id = contact.id AND entity_tag.tag_id = %(tag_id)s AND entity_tag.entity_table = 'civicrm_contact'\",\n ])\n query.params.update({\n 'tag_id': self.tag.id\n })\n\n if self.excludetag:\n query.tables.extend([\n \"civicrm_entity_tag entity_tag_not ON entity_tag_not.entity_id = contact.id AND entity_tag_not.tag_id = %(excludetag_id)s AND entity_tag_not.entity_table = 'civicrm_contact'\",\n ])\n query.where.extend([\n \"entity_tag_not.id IS NULL\"\n ])\n query.params.update({\n 'excludetag_id': self.excludetag.id\n })\n\n return query\n","sub_path":"dedupe/contact_cache.py","file_name":"contact_cache.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"153665581","text":"\"\"\"\n建议88:使用multiprocessing克服GIL的缺陷\n\n进程之间的通信优先考虑Pipe和Queue,而不是Lock,Event,Condition,Semaphore等 同步原语。\n使用Queue进行进程之间的通信的时候,传输的对象必须是可以序列化的,否则put操作会导致PicklingError。\nmultiprocessing.Pipe([duplex]),其中dumplex默认为True,表示为双向管道,否则为单向。它返回\n一个Connection对象的组(conn1, conn2),分别代表管道两端。它不支持进程安全,只适用在两个进程间通信,\n性能更好。\n\"\"\"\n\nfrom multiprocessing import Process, Pipe, Queue\nimport time\n\n\ndef reader_pipe(pipe):\n # 这里用output_p端\n output_p, input_p = pipe\n input_p.close() # 关闭一端,另一端接收数据\n while True:\n try:\n msg = output_p.recv() # noqa\n except EOFError:\n break\n\n\ndef writer_pipe(count, input_p):\n # 这里用input_p端\n for i in range(0, count):\n input_p.send(i)\n\n\ndef reader_queue(queue):\n while True:\n msg = queue.get()\n if (msg == 'DONE'):\n break\n\n\ndef writer_queue(count, queue):\n for ii in range(0, count):\n queue.put(ii)\n queue.put('DONE')\n\n\n# if __name__ == '__main__':\n# print('testing for pipe:')\n# for count in [10**3, 10**4, 10**5]:\n# output_p, input_p = Pipe()\n# reader_p = Process(target=reader_pipe, args=((output_p, input_p), )) # 拷贝一份pipe\n# reader_p.start()\n\n# _start = time.time()\n# output_p.close() # 当前进程的管道,关闭一端,往另一端发送数据\n# writer_pipe(count, input_p)\n# input_p.close()\n# reader_p.join()\n# print('Sending %s numbers to Pipe() took %s seconds' % (count, (time.time() - _start)))\n\n# print('testing for queue:')\n# for count in [10**3, 10**4, 10**5]:\n# queue = Queue()\n# reader_p = Process(target=reader_queue, args=((queue), ))\n# reader_p.daemon = True\n# reader_p.start()\n\n# _start = time.time()\n# writer_queue(count, queue)\n# reader_p.join()\n# print('Sending %s numbers to Queue() took %s seconds' % (count, time.time() - _start))\n\"\"\"\n尽量避免资源共享。如果不可避免,可以通过multiprocessing.Value 和 multiprocessing.Array\n或者 multiprocessing.sharedctypes来实现内存共享,也可以通过服务器进程管理器Manager()来\n实现数据和状态的共享。\n共享内存方式更快,效率更高,但Manager()使用起来更加方便,并且支持本地和远程内存共享。\n\"\"\"\nimport time\nfrom multiprocessing import Process, Value\n\n\ndef func(val):\n for i in range(10):\n time.sleep(0.1)\n with val.get_lock(): # 必须加锁同步\n val.value += 1\n\n\nif __name__ == '__main__':\n v = Value('i', 0)\n processList = [Process(target=func, args=(v, )) for i in range(10)]\n for p in processList:\n p.start()\n for p in processList:\n p.join()\n print(v.value)\n","sub_path":"suggestion88.py","file_name":"suggestion88.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572912394","text":"def main():\n n = int(input(\"Insira o valor de n: \"))\n fatorial = 1\n i = 2\n while i <= n:\n fatorial = fatorial*i\n i = i + 1\n\n print(\"O fatorial de \" + str(n) + \" eh : \" + str(fatorial))\n\n\nmain()","sub_path":"fatorial-simples/python/syngon/fatorial-simples.py","file_name":"fatorial-simples.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"562729474","text":"# -*- coding: utf-8 -*-\n\nimport dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport flask\nimport pandas as pd\nimport plotly.graph_objs as go\nimport utils\nimport files_config\nimport plotly.express as px\nimport gensim\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom dash.exceptions import PreventUpdate\nimport utils\n\nnltk.data.path.append(files_config.nltk_data_path_local)\nnltk.data.path.append(files_config.nltk_data_path_container)\n\nserver = flask.Flask(__name__)\napp = dash.Dash(__name__, server=server)\n\n# vocab with dimensionality reduction and most similar already calculated\ndf = pd.read_pickle(files_config.viz_df_filepath)\n# need to index on the vocab but also need to pass plotly a column\n# name to label the scatterplot, so will duplicate the vocab field\n# until can find a better way\ndf['index'] = df['vocab']\ndf = df.set_index('index')\n\n# Word2vec model\nmodel_word = gensim.models.Word2Vec.load(files_config.word_model_filepath)\n\ntfidf = gensim.models.TfidfModel.load(files_config.tfidf_filepath)\ndictionary = gensim.corpora.Dictionary([list(model_word.wv.vocab.keys())])\n\ndf_subjects = pd.read_pickle(files_config.search_lookup_filepath)\n\nlayout_children = [\n html.H2('FOI search/similarity'),\n html.Div(\n \"Based on a word2vec model, corpus is request bodies and subject lines from Hackney's disclosure log\"\n ),\n dcc.Graph(id=\"graph\", style={\"width\": \"75%\", \"display\": \"inline-block\"}),\n html.H5('Most similar to:'),\n dcc.Dropdown(\n id='word-dropdown',\n placeholder='Search words in model vocabulary...',\n options=[{'value': i, 'label': i} for i in df.index],\n multi=False,\n ),\n html.Div(id='most-similar'),\n html.Br(),\n html.Hr(),\n html.H5('What do you want to know?'),\n html.Div(\n 'Returns suggestions from the disclosure log. Based on cosine similarity of vectors of submitted text vs requests. These sentence/document vectors are a TF-IDF weighted average of the vectors of the constituent words.'\n ),\n html.Br(),\n dcc.Textarea(\n id='search-textarea',\n placeholder='Your request...',\n rows=50,\n style={'width': '50%'},\n ),\n html.Br(),\n html.Button('Submit', id='search_log_button'),\n html.Br(),\n html.Div(id='results-list'),\n html.Br(),\n html.Br(),\n html.Hr(),\n]\n\napp.layout = html.Div(children=layout_children)\n\n\n@app.callback(\n Output('results-list', 'children'),\n [Input('search-textarea', 'value'), Input('search_log_button', 'n_clicks')],\n)\ndef update_search_results(query, n_clicks):\n if not n_clicks:\n return html.Div()\n ctx = dash.callback_context\n input_id = ctx.triggered[0]['prop_id'].split('.')[0]\n if not (ctx.triggered and input_id == 'search_log_button'):\n raise PreventUpdate\n else:\n df_results = utils.search_log(\n query, 5, model_word, df_subjects, dictionary, tfidf\n )\n rows = []\n for i in range(len(df_results)):\n row = []\n for col in df_results.columns:\n value = df_results.iloc[i][col]\n if col == 'url':\n cell = html.Td(html.A(href=value, target=\"_blank\", children=value))\n else:\n cell = html.Td(children=value)\n row.append(cell)\n rows.append(html.Tr(row))\n return html.Table(\n # Header\n [html.Tr([html.Th(col) for col in df_results.columns])]\n + rows\n )\n\n\n@app.callback(Output('most-similar', 'children'), [Input('word-dropdown', 'value')])\ndef update_most_similar(chosen_word):\n if chosen_word:\n similar = df.loc[chosen_word]['most_similar']\n return html.Table([html.Tr(html.Td(' '.join(map(str, i)))) for i in similar])\n\n\n@app.callback(Output(\"graph\", \"figure\"), [Input('word-dropdown', 'value')])\ndef make_figure(chosen_word):\n fig = px.scatter(data_frame=df, x='x', y='y', text='vocab')\n return fig\n\n\nif __name__ == '__main__':\n # app.run_server(debug=True)\n app.run_server(debug=False, port=8080)\n","sub_path":"foi_semantic_search/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526159735","text":"import numpy as np\nimport re\n\ninputfile = \"input11\"\nexample = '''Monkey 0:\n Starting items: 79, 98\n Operation: new = old * 19\n Test: divisible by 23\n If true: throw to monkey 2\n If false: throw to monkey 3\n\nMonkey 1:\n Starting items: 54, 65, 75, 74\n Operation: new = old + 6\n Test: divisible by 19\n If true: throw to monkey 2\n If false: throw to monkey 0\n\nMonkey 2:\n Starting items: 79, 60, 97\n Operation: new = old * old\n Test: divisible by 13\n If true: throw to monkey 1\n If false: throw to monkey 3\n\nMonkey 3:\n Starting items: 74\n Operation: new = old + 3\n Test: divisible by 17\n If true: throw to monkey 0\n If false: throw to monkey 1'''\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nALPHABET = alphabet.capitalize()\n\nneighbours = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n\ndef textgrid_to_numbers(lines):\n return np.array([[int(x) for x in line] for line in lines], dtype=int)\n\ndef traverse(root):\n sum = 0\n for entry in root.keys():\n if entry == '..':\n continue\n if isinstance(root[entry], dict):\n sum += traverse(root[entry])\n else:\n sum += root[entry]\n global totsum\n totsum.append(sum)\n return sum\n\ndef append_if_exists(dictionary: dict, key, val):\n if key not in dictionary.keys():\n dictionary[key] = []\n\n dictionary[key].append(val)\n\ndef test(level, divisor):\n return level % divisor == 0\n\ndef main():\n file_contents = ''\n with open(inputfile) as infile:\n file_contents = infile.read()\n \n lines = file_contents.splitlines() \n # lines = example.splitlines()\n\n monkeys = []\n\n print(f\"Read {len(lines)} lines\")\n\n common_divisor = 1\n\n current_monkey = {}\n monkey_number = -1\n i = 0\n while i < len(lines):\n line = lines[i]\n assert line[:6] == \"Monkey\"\n monkey_number = int(line.split(' ')[-1][:-1]) # remove \":\"\n assert len(monkeys) == monkey_number\n current_monkey['items'] = [int(item) for item in (lines[i+1].split(': ')[1]).split(', ')]\n op_text = lines[i+2].split('new = ')[1]\n if 'old +' in op_text:\n plus = int(op_text.split(' ')[-1])\n def plus_fun(plus_):\n return lambda level: level + plus_\n current_monkey['operation'] = plus_fun(plus)\n elif 'old * old' in op_text:\n current_monkey['operation'] = lambda level: level * level\n elif 'old * ' in op_text:\n mult = int(op_text.split(' ')[-1])\n def mult_fun(mult_):\n return lambda level: level * mult_\n current_monkey['operation'] = mult_fun(mult)\n else:\n raise Exception('parsing error in op')\n\n current_monkey['test_div'] = int(lines[i + 3].split(' ')[-1])\n common_divisor *= current_monkey['test_div']\n current_monkey['test_res_true'] = int(lines[i + 4].split(' ')[-1])\n current_monkey['test_res_false'] = int(lines[i + 5].split(' ')[-1])\n current_monkey['activity'] = 0\n\n monkeys.append(current_monkey)\n current_monkey = {}\n\n i += 7\n\n # play game\n for i in range(10000):\n print(f'round {i}')\n for j, monkey in enumerate(monkeys):\n # print(monkey)\n monkey['activity'] += len(monkey['items'])\n for item in monkey['items']:\n op_level = monkey['operation'](item)\n new_level = op_level % common_divisor\n # print(f'op_level: {op_level}, new: {new_level}')\n if test(new_level, monkey['test_div']):\n monkeys[monkey['test_res_true']]['items'].append(new_level)\n else:\n # print(f'monkey {j} throws {new_level}')\n monkeys[monkey['test_res_false']]['items'].append(new_level)\n monkey['items'] = []\n\n print(sorted([monkey['activity'] for monkey in monkeys]))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407541677","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nfrom scrapy.http import Request\nimport urlparse\nimport datetime\n\nfrom ArticleSpider.items import JobboleArticleItem\nfrom ArticleSpider.utils.common import get_md5\n\nclass JobboleSpider(scrapy.Spider):\n name = 'jobbole'\n allowed_domains = ['blog.jobbole.com']\n start_urls = ['http://blog.jobbole.com/all-posts/']\n\n # Scrapy will make request to start_urls with parse() as call back.\n\n # extract detailed page urls from the overview page, yield a RequestForUrl + parse_detail for each page;\n # after visit the urls in current overview page, yield a RequestForNextPage + parse;\n def parse(self, response):\n # Use CSS selector instead of Xpath selector (they have the same effect) to select detailed urls list\n # select all nodes: id=archive / class=post && floated-thumb / class=post-thumb / tag=a\n detailNodes = response.css(\"#archive .post.floated-thumb .post-thumb a\")\n for detailNode in detailNodes:\n # print(detailedUrl)\n # Asynchronous request (Scrapy help to handle) for detailed page, and then call \"parse_detail\"\n # \"Request\" is in scrapy.http; \"parse.urljoin\" is in urllib, to generate a complete url (sometimes detailed url is a relative url).\n # (Assume the coverPicture for each article can only be seen on the overview page (not the detailed page), then we need get the picture and pass into parse_detail, using \"meta\")\n imgSrc = detailNode.css(\"img::attr(src)\").extract_first()\n detailUrl = detailNode.css(\"::attr(href)\").extract_first()\n yield Request(urlparse.urljoin(response.url, detailUrl), meta={\"coverImg\": imgSrc}, callback=self.parse_detail)\n\n # Find next overview page, make request and call self (parse()) again.\n # .extract_first(defaultValue) return defaultValue if no ele in extracted list; or return the first ele in list\n nextUrl = response.css(\".navigation .next.page-numbers::attr(href)\").extract_first(\"\")\n if nextUrl:\n yield Request(nextUrl, callback=self.parse)\n pass\n\n # parse information for a detailed page\n def parse_detail(self, response):\n # DOM元素id是全局唯一的;\n # scrapy 获取的源码是未执行js动态生成时的html源码(相当于browser中查看源码);\n # 而直接从browser通过开发者工具xpath Copy拿到的xpath是基于js执行动态生成后的html;\n # 因此通过从browser复制得到类似/div[1]/div[2]...这样的xpath有时是不靠谱的;\n # 而类似\"*[@id=\"...\"]\"包含id 或者 \"*[@class=\"header\"]/...\"包含实际内容class的 这样的比较靠谱\n # re_selector = response.xpath('//*[@id=\"post-114442\"]/div[1]/h1')\n\n # response.xpath(...) 返回的是SelectorList;\n # 此处Selector元素[0]:\n # (Selector or SelectorList) .extract() 返回( data or dataList)\n\n coverImg = response.meta.get(\"coverImg\", \"\")\n title = response.css(\".entry-header h1::text\").extract_first()\n date = response.xpath('//*[@class=\"entry-meta\"]/p[1]/text()')[0].extract().replace('·', '').strip()\n\n # < span data - post - id = \"114442\"class =\" btn-bluet-bigger href-style vote-post-up register-user-only \" > < i class =\"fa fa-thumbs-o-up\" > < / i > < h10 id=\"114442votetotal\" > 1 < / h10 > 赞 < / span >\n thumbUp = int(response.xpath(\"//span[contains(@class, 'vote-post-up')]/h10/text()\").extract()[0])\n\n favSpan = response.xpath(\"//span[contains(@class, 'bookmark-btn')]/text()\").extract()[0];\n favMatch = re.match(\".*?(\\d+).*\", favSpan)\n if favMatch:\n favNum = int(favMatch.group(1))\n else:\n favNum = 0\n\n comment = response.xpath(\"//a[@href='#article-comment']/span/text()\").extract()[0]\n commentMatch = re.match('.*?(\\d+).*', comment)\n if commentMatch:\n commentNum = int(commentMatch.group(1))\n else:\n commentNum = 0\n\n tagList = response.xpath(\"//p[@class='entry-meta-hide-on-mobile']/a/text()\").extract()\n # Delete \"k 评论\" tag\n tagList = [element for element in tagList if not (element.strip().endswith(\"评论\"))]\n tags = \",\".join(tagList)\n\n contentList = response.css(\"div.entry *::text\").extract()\n content = \" \".join(contentList)\n content = content.replace(\"\\t\", \" \").replace(\"\\n\", \" \").replace(\"\\r\", \" \")\n content = \" \".join(content.split())\n\n item = JobboleArticleItem()\n item[\"title\"] = title\n try:\n create_date = datetime.datetime.strptime(date, \"%Y/%m/%d\").date()\n except Exception as e:\n create_date = datetime.datetime.now().date()\n item[\"date\"] = create_date\n item[\"url\"] = response.url\n # item[\"urlObjId\"]\n # item[url] would be processed as a list in \"pipelines\"\n item[\"coverImgUrl\"] = [coverImg]\n item[\"thumbUp\"] = thumbUp\n item[\"favNum\"] = favNum\n item[\"commentNum\"] = commentNum\n item[\"tags\"] = tags\n item[\"content\"] = content\n item[\"urlObjId\"] = get_md5(response.url)\n\n #yield send item to pipeline if \"settings\" enable pipleline\n yield item\n\n pass\n","sub_path":"ArticleSpider/spiders/jobbole.py","file_name":"jobbole.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312272337","text":"from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter\n\n\nclass Command(BaseXpressDemocracyClubCsvImporter):\n council_id = \"NWL\"\n addresses_name = \"2021-04-16T11:45:50.143865/North West Leicestershire Democracy_Club__06May2021 (2).tsv\"\n stations_name = \"2021-04-16T11:45:50.143865/North West Leicestershire Democracy_Club__06May2021 (2).tsv\"\n elections = [\"2021-05-06\"]\n csv_delimiter = \"\\t\"\n\n def address_record_to_dict(self, record):\n uprn = record.property_urn.strip().lstrip(\"0\")\n\n if uprn in [\n \"200003503741\", # OLD FARMHOUSE, NOTTINGHAM ROAD, STAUNTON HAROLD, ASHBY-DE-LA-ZOUCH\n \"100030573013\", # THE OLD SLAUGHTER HOUSE, PARK LANE FARM, PARK LANE, CASTLE DONINGTON, DERBY\n ]:\n return None\n\n if record.addressline6 in [\n \"LE65 2RF\",\n \"DE74 2DE\",\n \"DE11 8HB\",\n \"LE67 6HP\",\n \"LE12 9TB\",\n \"LE67 5DJ\",\n \"DE12 6DW\",\n ]:\n return None\n\n return super().address_record_to_dict(record)\n\n def station_record_to_dict(self, record):\n\n # Hall Lane Methodist Church Hall Lane Whitwick LE67 5FD\n if record.polling_place_id == \"4013\":\n record = record._replace(polling_place_postcode=\"\")\n\n return super().station_record_to_dict(record)\n","sub_path":"polling_stations/apps/data_importers/management/commands/import_north_west_leicestershire.py","file_name":"import_north_west_leicestershire.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415850249","text":"import numpy as np\nimport pickle\nimport sys\nfrom WGS84toENU import *\n\n__all__ = [ \"getCameraParams\" ]\n\n\ndef getCameraParams():\n cam = [{}, {}]\n for i in [0, 1]:\n cam[i]['R_to_c_from_i'] = np.array([[-1, 0, 0],\n [0, 0, -1],\n [0, -1, 0]])\n\n if i == 0:\n cam[i]['rot_x'] = deg2rad(-0.8) # better cam 1\n cam[i]['rot_y'] = deg2rad(-0.5)\n cam[i]['rot_z'] = deg2rad(-0.005)\n cam[i]['t_x'] = -0.5\n cam[i]['t_y'] = 1.1\n cam[i]['t_z'] = 0.0\n elif i == 1:\n cam[i]['rot_x'] = deg2rad(-0.61) # better cam 2\n cam[i]['rot_y'] = deg2rad(0.2)\n cam[i]['rot_z'] = deg2rad(0.0)\n cam[i]['t_x'] = 0.5\n cam[i]['t_y'] = 1.1\n cam[i]['t_z'] = 0.0\n\n cam[i]['fx'] = 2221.8\n cam[i]['fy'] = 2233.7\n cam[i]['cu'] = 623.7\n cam[i]['cv'] = 445.7\n cam[i]['KK'] = np.array([[cam[i]['fx'], 0.0, cam[i]['cu']],\n [0.0, cam[i]['fy'], cam[i]['cv']],\n [0.0, 0.0, 1.0]])\n cam[i]['f'] = (cam[i]['fx'] + cam[i]['fy']) / 2\n\n return cam\n\nif __name__ == '__main__':\n cam= getCameraParams()\n filename = 'cam_params.pickle'\n if len(sys.argv) > 1:\n filename = sys.argv[1]\n pickle.dump(cam, open(filename, 'wb'))\n\n","sub_path":"src/caffe/py_lane_label_reader/CameraParams.py","file_name":"CameraParams.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334599463","text":"from magma import *\nfrom mantle import *\n\n__all__ = ['Input', 'Output']\n\ndef Input(N, inputs):\n\n print('Building input ports')\n\n A = In(Bits(N))()\n n = len(inputs)\n if n == 1:\n O = inputs[0]\n elif n == 2:\n mux = Mux(2, N)\n mux(inputs[0], inputs[1], A[0])\n O = mux.O\n elif n == 4:\n mux = Mux(4, N)\n mux(inputs[0], inputs[1], inputs[2], inputs[3], A[0:2])\n O = mux.O\n elif n == 8:\n mux = Mux(8, N)\n mux(inputs[0], inputs[1], inputs[2], inputs[3],\n inputs[4], inputs[5], inputs[6], inputs[7], A[0:3])\n O = mux.O\n\n return AnonymousCircuit(\"A\", A, \"O\", O)\n\n\n#\n# Generate a register with the same number of bits as the output\n#\ndef OutputOne(input, output, we):\n reg = Register(8, has_ce=True)\n reg(input, ce=we)\n wire(reg, output)\n\ndef Output(N, outputs):\n\n print('Building output ports')\n I = In(Bits(N))()\n A = In(Bits(N))()\n we = In(Bit)()\n\n if len(outputs) == 1:\n OutputOne(I, outputs[0], we)\n else:\n for output in outputs:\n en = Decode(i, N)(A)\n en = And2()(en, we)\n OutputOne(I, output, en)\n\n return AnonymousCircuit(\"A\", A, \"I\", I, \"WE\", we )\n\n","sub_path":"pico40/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378139414","text":"def compare(s1, s2):\n if not s1 and not s2:\n return True\n if not s1 or not s2:\n return False\n\n r1 = []\n i = 0\n while i < len(s1):\n if s1[i] == '-':\n r1.pop()\n i += 2\n else:\n r1.append(s1[i])\n i += 1\n\n r1 = ''.join(r1)\n print(r1)\n\n r2 = []\n i = 0\n while i < len(s2):\n if s2[i] == '-':\n r2.pop()\n i += 2\n else:\n r2.append(s2[i])\n i += 1\n\n r2 = ''.join(r2)\n print(r2)\n\n if r1 == r2:\n return True\n else:\n return False\n\n\ndef compare_2(s1, s2):\n if not s1 and not s2:\n return True\n if not s1 or not s2:\n return False\n\n i = j = 0\n l1 = []\n while i < len(s1):\n if s1[i] == '-':\n l1.append('-1')\n i += 2\n else:\n l1.append(s1[i])\n i += 1\n l2 = []\n while j < len(s2):\n if s2[j] == '-':\n l2.append('-1')\n j += 2\n else:\n l2.append(s2[j])\n j += 1\n\n print(l1, l2)\n\n minus1_count1 = 0\n i = len(l1) - 1\n minus1_count2 = 0\n j = len(l2) - 1\n while i >= 0 and j >= 0:\n while l1[i] == '-1':\n minus1_count1 += 1\n i -= 1\n while l2[j] == '-1':\n minus1_count2 += 1\n j -= 1\n\n i -= minus1_count1\n minus1_count1 = 0\n j -= minus1_count2\n minus1_count2 = 0\n\n print(i, j)\n print(l1[i], l2[j])\n if l1[i] == l2[j]:\n i -= 1\n j -= 1\n continue\n else:\n return False\n\n if minus1_count1 != minus1_count2:\n return False\n\n return True\n\n\nstring1 = '1-12-13-14-156789'\nstring2 = '567891234-1-1-1-1'\n\nprint(compare_2(string1, string2))\nprint(compare(string1, string2))\n","sub_path":"Google/输入数字比较相同.py","file_name":"输入数字比较相同.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"518897031","text":"#!/usr/bin/env python\nimport random \nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport argparse\n\ndcoef = {'v1': \"0.174323\", 'v2': \"0.1918\", 'v3': \"0.1928\", 'v4': \"0.2155\", 'v5': \"0.2893\", 'v6': \"0.3308\", 'v7': \"0.347\", 'v8': \"0.4115\", 'v9':\"0.5688\", 'v10': \"1.0\"}\nD_list = [\"v1\", \"v2\", \"v3\", \"v4\", \"v5\", \"v6\", \"v7\", \"v8\", \"v9\", \"v10\"]\n\nd_uc = 26.6226e-8\n\ndef buildMOF(x, y, z):\n mof, clist, coord = [], [], []\n if len(z) == 0:\n for i in x:\n for j in y:\n clist.append([i,j])\n elif len(z) != 0:\n for i in x:\n for j in y:\n for k in z:\n clist.append([i, j, k])\n for i in range(len(clist)):\n slist = ','.join([str(n) for n in clist[i]])\n coord.append(slist)\n# random.seed(10) \n for i in range(len(coord)):\n mof.append(random.choice(D_list))\n \n dcoord = dict(zip(coord,mof))\n coord_split = [coord[i].split(',') for i in range(len(coord))]\n coef_list =[]\n for i in mof:\n coef_list.append(dcoef[i])\n \n return dcoord, mof, coord, coef_list\n\n\ndef find2Dneigh(smof):\n neigh_list = []\n smof_x = int(smof[0])\n smof_y = int(smof[1])\n mol1 = [smof_x, smof_y-1]\n mol2 = [smof_x+1, smof_y-1]\n mol3 = [smof_x+1, smof_y]\n mol4 = [smof_x+1, smof_y+1]\n mol5 = [smof_x, smof_y+1]\n list1 = [mol1, mol2, mol3, mol4, mol5]\n \n for i in range(len(list1)):\n if (list1[i][1] != B) and (list1[i][0] != A) and (list1[i][1] != -1):\n neigh_list.append(list1[i]) \n \n return neigh_list\n\n\ndef forwardN(smof):\n n = 3\n f = int((1/3)*100 + n)\n v = int((1/3)*100 + n/2)\n b = 100 - f - v\n sub_list = []\n smof_x = int(smof[0])\n smof_y = int(smof[1])\n pk = random.randint(1,100)\n if smof_x == 0 and smof_y != 0:\n if pk <= 30:\n x = 0\n y = [1] \n else:\n x = 1\n y = [0, 1]\n elif smof_x != 0 and smof_y == 0:\n if pk <= 30:\n x = 0\n y = [1]\n else: \n x = 1\n y = [0, 1]\n elif smof_x == 0 and smof_y == 0:\n if pk <= 30:\n x = 0\n y = [1]\n else: \n x = 1\n y = [0, 1]\n else: \n if pk <= b:\n x = -1\n y = [-1, 0, 1]\n elif pk > b and pk <= b + v:\n x = 0\n y = [-1, 1]\n else:\n x = 1\n y = [-1, 0, 1]\n \n for i in y: \n nei_mof1 = (smof_x + x, smof_y + i)\n if nei_mof1[0] >= 0 and nei_mof1[1] >=0 and nei_mof1[0] < A and nei_mof1[1] < A:\n sub_list.append(nei_mof1)\n \n return sub_list\n\n\ndef compNeigh(sub_list, dcoord):\n neigh_D, str_list, mof_f = [], [], []\n D_list = []\n local_d = {}\n\n for i in range(len(sub_list)):\n str_list_i = ','.join([str(n) for n in sub_list[i]])\n str_list.append(str_list_i) # '1,1'\n a = dcoord[str_list_i] # 6.0\n\n neigh_D.append(str(dcoord[str_list_i])) #'4'\n\n D_list.append(str(a))\n\n # find the next mof to walk to:\n d_t = dict(zip(str_list, D_list))\n dict2 = {}\n names = set(d_t.values())\n d = {}\n for n in names:\n d[n] = [k for k in d_t.keys() if d_t[k] == n ]\n pool = []\n \n if 'v10' in d:\n a = random.randint(1,100)\n if a <= 90:\n temp_f = random.choice(d.get('v10'))\n mof_f = list(map(int, temp_f.split(',')))\n else:\n new_d = {key:val for key, val in d.items() if key != 'v10'}\n for i in new_d:\n f = int(len(new_d[i]))*int(i[1])\n for j in range(f):\n pool.append(str(new_d[i])[1:-1].replace(\"'\", \"\"))\n temp = random.choice(random.choice(pool).split(', '))\n mof_f = list(map(int, temp.split(',')))\n\n else:\n for i in d:\n f = int(len(d[i]))*int(i[1])\n for j in range(f):\n pool.append(str(d[i])[1:-1].replace(\"'\", \"\"))\n \n tp, tp2 = [], []\n for i in pool:\n tp.append(list(i.split(', ')))\n for i in tp:\n for j in i:\n tp2.append(j)\n temp = random.choice(tp2)\n mof_f = list(map(int, temp.split(',')))\n \n \n return neigh_D, str_list, mof_f\n\n\ndef caldist(mof1, mof2):\n dist = math.sqrt(((mof2[0]-mof1[0])**2) + ((mof2[1]-mof1[1])**2)) \n #for 3D walk\n# dist = math.sqrt(((mof2[0]-mof1[0])**2) + ((mof2[1]-mof1[1])**2)) + ((mof2[2]-mof1[2])**2))\n return dist\n\n\ndef randwalk(smof, N, dcoord):\n path = [smof]\n dist = 0\n temp = 0\n time1 = 0\n time1_list, time2_list = [], []\n dt_list, dx_list = [], []\n time2 = 0\n time = 0\n sub_list = forwardN(smof)\n for i in range(N):\n neigh_D, str_list, mof_f = compNeigh(sub_list, dcoord)\n cod_smof = ','.join(map(str,smof))\n cod_mof_f = ','.join(map(str,mof_f))\n D_smof = dcoef[dcoord[cod_smof]]\n D_mof_f = dcoef[dcoord[cod_mof_f]]\n dx = caldist(mof_f, smof)\n if dcoord[cod_smof] != 'v10':\n d_time = (dx**2)/(4*float(D_smof))\n time1_list.append((dx**2)/(4*float(D_smof)))\n time1 += (dx**2)/(4*float(D_smof))\n elif dcoord[cod_mof_f] == 'v10':\n d_time = (dx)/(2*float(D_mof_f))\n time2_list.append((dx)/(2*float(D_mof_f)))\n time2 += (dx)/(2*float(D_mof_f))\n elif dcoord[cod_smof] == 'v10':\n d_time = (dx)/(2*float(D_smof))\n time2_list.append((dx)/(2*float(D_smof)))\n time2 += (dx)/(2*float(D_smof))\n\n temp += d_time\n dt_list.append(round(temp, 3))\n\n \n dist += caldist(mof_f, smof)\n dx_list.append(round(dist, 3))\n \n sub_list = forwardN(mof_f)\n \n if smof in sub_list:\n sub_list.remove(smof)\n else:\n pass\n smof = mof_f\n path.append(smof)\n time = time1 + time2\n if len(sub_list) == 0 or (mof_f[0] == A-1):\n break\n \n return path, dist, time, dt_list, dx_list\n\n\ndef cmap(path, coef_list):\n w = A \n h = B \n d = 100\n relist = np.array([float(i) for i in coef_list]).reshape(A,B)\n \n color_map = plt.imshow(relist)\n color_map.set_cmap(\"Blues_r\")\n \n plt.savefig('temp1.png')\n \n for i in path:\n x = i[0]\n y = i[1]\n relist[y][x] = 0 \n \n color_map = plt.imshow(relist)\n color_map.set_cmap(\"Blues_r\")\n\n plt.savefig('temp2.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate 2D walk plot and data')\n parser.add_argument('-a', nargs=1, help='Input lattice dimension', required=True)\n args = parser.parse_args()\n\n if vars(args)['a'][0] != 'NULL':\n A = int(vars(args)['a'][0])\n B = A\n smof = [0, int(A/2)]\n N = A*B*10\n tot_dist = A * d_uc\n C = 0\n x = [ i for i in range(A)]\n y = [ i for i in range(B)]\n z = [ i for i in range(C)]\n dcoord, mof, coord, coef_list = buildMOF(x, y, z)\n sub_list = forwardN(smof)\n neigh_D, str_list, mof_f = compNeigh(sub_list, dcoord)\n path, dist, time, dt_list, dx_list = randwalk(smof, N, dcoord)\n cmap(path, coef_list) \n\n","sub_path":"RandomWalk/2D_random.py","file_name":"2D_random.py","file_ext":"py","file_size_in_byte":7363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123598838","text":"import argparse\nimport shutil\nimport sys\nimport os\n\nfrom interface import *\nfrom error import *\nfrom utils import *\n\nparser = argparse.ArgumentParser(description='Main Interface')\nparser.add_argument('--skip', help='number of images skipped', default=1, required=False)\nparser.add_argument('--noErr', help='skip error metric computation', default=False, required=False, action='store_true')\nparser.add_argument('--noDiff', help='skip difference image computation', default=False, required=False, action='store_true')\nargs = parser.parse_args()\n\nDATA_ROOT_PATH \t\t= \"/home/Workspace/data/\"\nMETA_DATA_FILENAME \t= \"cyclic_meta.csv\"\nERROR_FILENAME \t\t= \"cyclic_error.csv\"\nINSTALL_PATH \t\t= \"/home/Workspace/install\"\nSKIP_NUM\t\t\t= 1\n\ndef skipFrames(path, skipNum):\n\t\n\tfileList = os.listdir(path)\n\timgList = filterImages(fileList)\n\timgList = sorted(imgList)\n\n\tkeepNum = skipNum + 1\n\n\tfor counter, fileName in enumerate(imgList):\n\t\t\n\t\tif((counter % keepNum) != 0):\n\t\t\tos.remove(os.path.join(path, fileName))\n\ndef parseArgs():\n\t\n\tglobal SKIP_NUM\n\tSKIP_NUM = int(args.skip)\n\t\n\tif(SKIP_NUM != 0 and SKIP_NUM != 1 and SKIP_NUM != 3 and SKIP_NUM != 7):\n\t\tprint(\"Invalid number of images to be skipped (\" + str(SKIP_NUM) + \")\")\n\t\tsys.exit(1)\n\t\t\n\nif __name__ == \"__main__\":\n\t\n\tparseArgs()\n\t\n\t# Assume all images are in root directory\n\t\n\torigPath \t= os.path.join(DATA_ROOT_PATH, \"orig\")\n\ttempPath \t= os.path.join(DATA_ROOT_PATH, \"temp\")\n\toutPath \t= os.path.join(DATA_ROOT_PATH, \"out\")\n\tdiffPath \t= os.path.join(DATA_ROOT_PATH, \"diff\")\n\t\n\t# Reset directory if anything was done\n\t\n\tif(os.path.exists(os.path.join(DATA_ROOT_PATH, META_DATA_FILENAME))):\n\t\tos.remove(os.path.join(DATA_ROOT_PATH, META_DATA_FILENAME))\n\t\t\n\tif(os.path.exists(os.path.join(DATA_ROOT_PATH, ERROR_FILENAME))):\n\t\tos.remove(os.path.join(DATA_ROOT_PATH, ERROR_FILENAME))\n\t\n\tif(os.path.exists(origPath)):\n\t\tmoveAllFiles(origPath, DATA_ROOT_PATH)\n\t\tshutil.rmtree(origPath)\n\t\n\tif(os.path.exists(tempPath)):\n\t\tshutil.rmtree(tempPath)\n\t\t\n\tif(os.path.exists(outPath)):\n\t\tshutil.rmtree(outPath)\n\t\t\n\tif(os.path.exists(diffPath)):\n\t\tshutil.rmtree(diffPath)\n\t\n\t# Create directory structure\n\t\n\tos.makedirs(origPath)\n\tos.makedirs(tempPath)\n\tos.makedirs(outPath)\n\t\n\tmoveAllFiles(DATA_ROOT_PATH, origPath)\n\tcopyAllFiles(origPath, tempPath)\n\tskipFrames(tempPath, SKIP_NUM)\n\t\n\t# Do the interpolation\n\t\n\tprint(\"Interpolating images ...\")\n\t\n\tmeta_data = []\n\t\n\tif(SKIP_NUM == 0 or SKIP_NUM == 1):\n\t\tmeta_data = interpolate(tempPath, outPath, meta_data, INSTALL_PATH)\n\telif(SKIP_NUM == 3):\n\t\tmeta_data = interpolate(tempPath, outPath, meta_data, INSTALL_PATH)\n\t\tdeleteAllFiles(tempPath)\n\t\tmoveAllFiles(outPath, tempPath)\n\t\tmeta_data = interpolate(tempPath, outPath, meta_data, INSTALL_PATH)\n\telif(SKIP_NUM == 7):\n\t\tmeta_data = interpolate(tempPath, outPath, meta_data, INSTALL_PATH)\n\t\tdeleteAllFiles(tempPath)\n\t\tmoveAllFiles(outPath, tempPath)\n\t\tmeta_data = interpolate(tempPath, outPath, meta_data, INSTALL_PATH)\n\t\tdeleteAllFiles(tempPath)\n\t\tmoveAllFiles(outPath, tempPath)\n\t\tmeta_data = interpolate(tempPath, outPath, meta_data, INSTALL_PATH)\n\t\n\tshutil.rmtree(tempPath)\n\t\n\t# Write out metadata\n\t\n\tmeta_data_file = open(os.path.join(DATA_ROOT_PATH, META_DATA_FILENAME), \"w\")\n\tmeta_data_file.write(\"Avg time per img, Total time, width, height, interp img num\\n\")\n\tmeta_data_file.write(str(meta_data[0]) + \",\" + str(meta_data[1]) + \",\" + str(meta_data[2]) + \",\" + str(meta_data[3]) + \",\" + str(meta_data[4]) + \"\\n\")\n\tmeta_data_file.close()\n\t\n\t# Compute Error\n\t\n\tif(SKIP_NUM != 0):\n\t\t\n\t\tif(not args.noDiff):\n\t\t\tprint(\"Calculating Diff Imgs ...\")\n\t\t\tcomputeDiffImgs(origPath, outPath, diffPath)\n\t\t\t\n\t\tif(not args.noErr):\n\t\t\tprint(\"Calculating Errors ...\")\n\t\t\t\n\t\t\terror_data = []\n\t\t\terror_data = computeErrorMetrics(origPath, outPath, error_data)\n\t\n\t\t\t# Write err to file\n\t\n\t\t\terror_data_file = open(os.path.join(DATA_ROOT_PATH, ERROR_FILENAME), \"w\")\n\t\t\terror_data_file.write(\"Index, MSE, SSIM, MS_SSIM, PSNR, VIFP, UQI\\n\")\n\t\t\n\t\t\tcounter = 0\n\t\t\twhile(counter < len(error_data)):\n\t\t\t\terror_data_file.write(str(error_data[counter+0]) + \",\" + str(error_data[counter+1]) + \",\" + str(error_data[counter+2]) + \",\" + str(error_data[counter+3]) + \",\" + str(error_data[counter+4]) + \",\" + str(error_data[counter+5]) + \",\" + str(error_data[counter+6]) + \"\\n\")\n\t\t\t\n\t\t\t\tcounter = counter + 7;\n\t\t\n\t\t\terror_data_file.close()\n\t\t\n\tprint(\"Done.\")\n\t\n\n","sub_path":"docker/cyclic/Workspace/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"386382406","text":"newString = ''\nuserString = input(\"Please enter a string: \")\ncount = 1\nuserString = userString.lower()\n\nfor char in userString:\n if count % 2 == 0:\n newString += char.upper()\n count += 1\n else:\n newString += char\n count += 1\nprint(newString)\n\ndef splitStr(string):\n return [ch for ch in string]\n\nnewStringSplit = splitStr(newString)\nnewStringLen = len(newStringSplit)\nnewStringFinalChar = newStringLen - 1\n\nif newStringSplit[0] == \"(\" and newStringSplit[newStringFinalChar] == \")\":\n print(\"Balanced!\")\nelse:\n print(\"Not Balanced!\")\n","sub_path":"00_parentheses.py","file_name":"00_parentheses.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"180189768","text":"from qubo_helper import Qubo\nfrom tsp_problem import TSPProblem \nfrom vrp_problem import VRPProblem\nfrom itertools import product\nimport DWaveSolvers\nimport networkx as nx\nimport numpy as np\nfrom vrp_solvers import *\n\nif __name__ == '__main__':\n\n # Some graph\n n = 20\n \"\"\"paths = np.random.random_integers(1, 10, (n, n))\n\n for u in range(n):\n paths[u][u] = 0\n\n for u in range(n):\n for (i, j) in product(range(n), range(n)):\n paths[i][j] = min(paths[i][j], paths[i][u] + paths[u][j])\"\"\"\n\n paths = np.zeros((n, n))\n for (i, j) in product(range(n), range(n)):\n paths[i][j] = abs(i - j)\n\n # Problem parameters\n sources = [5, 15]\n #sources = [0, 3, 15, 50, 77, 38, 89]\n costs = paths\n time_costs = costs\n #capacities = [n, n, n, n, n, n, n, n, n, n]\n capacities = [n, n]\n #dests = [1, 2, 16, 19, 8, 25, 55, 33, 31, 88, 97, 24, 10, 61, 48, 11, 92, 54, 38, 65]\n dests = [2, 4, 10, 19]\n weights = [1 for _ in range(0, n)]\n\n time_windows = dict()\n time_windows[2] = 10\n time_windows[4] = 15\n time_windows[19] = 10\n time_windows[10] = 20\n\n only_one_const = 100.\n order_const = 1.\n capacity_const = 0.\n time_const = 0.\n\n problem = VRPProblem(sources, costs, time_costs, capacities, dests, weights, time_windows)\n\n solver = FullQuboSolver(problem)\n #solver = AveragePartitionSolver(problem)\n\n result = solver.solve(only_one_const, order_const, capacity_const, time_const,\n solver_type = 'qbsolv', num_reads = 100)\n print(result.solution)\n #result.description()\n","sub_path":"vrp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"109113142","text":"import os\r\nimport re\r\n\r\ndef fileslist():\r\n kartoteka = os.listdir('.')\r\n return kartoteka\r\n\r\ndef readfiles(kartoteka):\r\n texts = []\r\n for file in kartoteka:\r\n with open(file, encoding=\"utf-8\") as f:\r\n text = f.read()\r\n texts.append(text)\r\n return texts\r\n\r\ndef maketable(text_list):\r\n for text in text_list:\r\n data1 = re.search('meta content=\"(.*)\" name=\"docid\"', text)\r\n if data1:\r\n docid = data1.group(1)\r\n data2 = re.search('(.*)', text)\r\n if data2:\r\n title = data2.group(1)\r\n data3 = re.search('meta content=\"(.*)\" name=\"author\"', text)\r\n if data3:\r\n author = data3.group(1)\r\n data4 = re.search('meta content=\"(.*)\" name=\"created\"', text)\r\n if data4:\r\n created = data4.group(1)\r\n data5 = re.search('meta content=\"(.*)\" name=\"topic\"', text)\r\n if data5:\r\n topic = data5.group(1)\r\n data6 = re.search('meta content=\"(.*)\" name=\"tagging\"', text)\r\n if data6:\r\n tagging = data6.group(1)\r\n print(docid + \",\" + title + \",\" + author + \",\" + created + \",\" + topic + \",\" + tagging)\r\n \r\ndef searchabb(text_list):\r\n freqdict = {}\r\n fullist = []\r\n for text in text_list:\r\n answer = re.findall(\"[/s]([А-ЯЁA-Z]).+^[а-яёa-z][/s]\", text)\r\n for item in answer:\r\n fullist.append(item)\r\n for element in fullist:\r\n if element in freqdict:\r\n freqdict[element] = freqdict[element] + 1\r\n else:\r\n freqdict[element] = 1\r\n for key, value in freqdict.items():\r\n print(key)\r\n print(\"\\t\")\r\n print(value)\r\n \r\n\r\n \r\ndef main():\r\n maketable(readfiles(fileslist()))\r\n searchabb(readfiles(fileslist()))\r\n\r\nmain()\r\n\r\n","sub_path":"prog-examen.py","file_name":"prog-examen.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"229786083","text":"number = \"9,223,372,036,854,775,807\"\ncleanedNumber = ''\n\n# for loop assigns each variable char to the next number in the string\nfor char in number:\n if char in '0123456789':\n cleanedNumber = cleanedNumber + char\n\nnewNumber = int(cleanedNumber)\nprint(\"The number is {}\".format(newNumber))\n\n# for loop goes through the sequence for each string in the list\nfor state in [\"not pinin'\", \"no more\", \"a stiff\", \"breft of life\"]:\n print(\"This parrot is \" + state)\n # print(\"This parrot is {}\".format(state))\n\n# the range is between 0 and 100 and it steps by every 5 numbers\nfor i in range(0, 100, 5):\n print(\"i is {}\".format(i))\n\n# shows the times table that children learn in elementary school\n# uses nested for loops to show this\nfor i in range(1, 13):\n for j in range(1, 13):\n print(\"{1} times {0} is {2}\".format(i, j, i * j), end='\\t')\n print(' ')\n","sub_path":"loopPractice/forloops2.py","file_name":"forloops2.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"375783613","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 30 15:18:29 2018\nALDS1-4c\n@author: maezawa\n\"\"\"\ndef can_load(w, k, p):\n n = len(w)\n m = 0\n tk = 0\n i = 0\n while tk < k:\n if m + w[i] <= p:\n m += w[i]\n i += 1\n if i >= n:\n return n+1\n else:\n m = 0\n tk += 1\n return i\n\n\nn, k = list(map(int, input().split()))\nw = []\ntr = [0 for _ in range(k)]\n\nfor i in range(n):\n w.append(int(input()))\n\nmaxw = max(w)\n# =============================================================================\n# for p in range(maxw, maxw*n):\n# if can_load(w, k, p) == n:\n# print(p)\n# break\n# =============================================================================\nright = maxw*n\nleft = maxw\nwhile left 6 (= 2 * 3)\n\t\"\"\"\n\tto_return = 1\n\tfor nb in list_numbers:\n\t\tto_return *= nb\n\treturn to_return\n\t\n\n\n\n\n","sub_path":"exh/model/vars.py","file_name":"vars.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376451393","text":"#! /usr/bin/env python\n\nimport tensorflow as tf\nimport numpy as np\nimport psycopg2\nfrom flask import Flask, jsonify, request, abort\nfrom preprocess import MyVocabularyProcessor\n\n# Parameters\n# ==================================================\n\n# Eval Parameters\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_string(\"vocab_filepath\", \"runs/1541748108/checkpoints/vocab\",\n \"Load training time vocabulary (Default: None)\")\ntf.flags.DEFINE_string(\"model\", \"runs/1541748108/checkpoints/model-33000\",\n \"Load trained model checkpoint (Default: None)\")\n\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\nFLAGS = tf.flags.FLAGS\n\n# Build vocabulary\nvocab_processor = MyVocabularyProcessor(30, min_frequency=0)\nvocab_processor = vocab_processor.restore(FLAGS.vocab_filepath)\n\n\ndef char2vec(arr):\n return np.asarray(list(vocab_processor.transform(arr)))\n\n\ndef get_test_data_set(text):\n return char2vec(np.full([len(abbr_vec_arr)], text))\n\n\ndef batch_iter(data, batch_size):\n \"\"\"\n Generates a batch iterator for a dataset.\n \"\"\"\n data = np.asarray(data)\n # print(data)\n # print(data.shape)\n data_size = len(data)\n num_batches_per_epoch = int(len(data) / batch_size) + 1\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]\n\n\ndef prepare_abbr_arr():\n cur = conn.cursor()\n cur.execute(\"SELECT trim(machine_name), trim(formal_name) FROM game_alias WHERE status = 'confirmed'\")\n conn.commit()\n abbr = []\n formal = []\n for t in cur:\n abbr.append(t[0])\n formal.append(t[1])\n return np.asarray(abbr), np.asarray(formal), char2vec(abbr)\n\n\ndef predict(x1_arr):\n batches = batch_iter(list(zip(abbr_vec_arr, x1_arr, np.zeros([len(abbr_vec_arr)]))), 2 * FLAGS.batch_size)\n all_predictions = []\n all_d = []\n for db in batches:\n x1_dev_b, x2_dev_b, y_dev_b = zip(*db)\n batch_predictions, batch_acc, batch_sim = sess.run([predictions, accuracy, sim],\n {input_x1: x1_dev_b, input_x2: x2_dev_b,\n input_y: y_dev_b, dropout_keep_prob: 1.0})\n all_predictions = np.concatenate([all_predictions, batch_predictions])\n all_d = np.concatenate([all_d, batch_sim])\n\n keys = np.nonzero(all_d)\n print(keys)\n print(list(zip(abbr_arr[keys], formal_arr[keys], all_predictions[keys])))\n # print(all_predictions[keys])\n mink = np.argmin(all_predictions[keys])\n print(abbr_arr[keys][mink])\n print(all_predictions[keys][mink])\n # return 'ok'\n return formal_arr[keys][mink]\n\n\n# Initial global variables\n\napp = Flask(__name__)\nconn = psycopg2.connect(\"dbname=feedback_nlp user=postgres password=postgres host=data-platform-sh-01.nvidia.com\")\nabbr_arr, formal_arr, abbr_vec_arr = prepare_abbr_arr()\n\ngraph = tf.Graph()\nwith graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{}.meta\".format(FLAGS.model))\n sess.run(tf.initialize_all_variables())\n saver.restore(sess, FLAGS.model)\n\n # Get the placeholders from the graph by name\n input_x1 = graph.get_operation_by_name(\"input_x1\").outputs[0]\n input_x2 = graph.get_operation_by_name(\"input_x2\").outputs[0]\n input_y = graph.get_operation_by_name(\"input_y\").outputs[0]\n\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n # Tensors we want to evaluate\n predictions = graph.get_operation_by_name(\"output/distance\").outputs[0]\n\n accuracy = graph.get_operation_by_name(\"accuracy/accuracy\").outputs[0]\n\n sim = graph.get_operation_by_name(\"accuracy/temp_sim\").outputs[0]\n\n\n@app.route('/sim/debug', methods=['POST'])\ndef sim_debug():\n data = request.get_json(force=True)\n if 'text' in data:\n _text = data['text']\n return jsonify(predict(get_test_data_set(_text)))\n else:\n return abort(406)\n\n\ndef main():\n app.run(host=\"0.0.0.0\", debug=True)\n\n\nif __name__ == '__main__':\n main()\n conn.close()\n","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"478171663","text":"import OpenGL.GL as gl, colours\nfrom math import cos, sin, pi\n\ndef points(*points, colour = colours.white):\n gl.glBegin(gl.GL_POINTS)\n for point in points:\n gl.glVertex2f(*point)\n gl.glEnd()\n\ndef line(point1, point2, colour = colours.white):\n \"Draw a line from point1 of screen coordinates to point2\"\n gl.glBegin(gl.GL_LINE_STRIP)\n for point in (point1, point2):\n gl.glVertex2f(*point)\n gl.glEnd()\n \ndef lines(point_list, loop = False, fill = False, colour = colours.white):\n \"Draw a line from point to end\"\n if fill:\n gl.glBegin(gl.GL_POLYGON)\n elif loop:\n gl.glBegin(gl.GL_LINE_LOOP)\n else:\n gl.glBegin(gl.GL_LINE_STRIP)\n\n for point in point_list:\n gl.glVertex2f(*point)\n gl.glEnd()\n\ndef circle(center, radius, fill = False, num_segments = False, colour = colours.white, ):\n if not num_segments:\n num_segments = radius * 10\n \n # walk around a circle \n point_list = [ ( int(radius*cos(theta/num_segments)+center[0]), int(radius*sin(theta/num_segments)+center[1]) )\n for theta in range(int(2*pi * (num_segments))) ]\n\n lines(point_list, fill = fill, loop = True, colour = colour)","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"200217000","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 10 09:28:41 2018\n\n@author: Dean\n\"\"\"\n\nclass Node:\n def __init__(self,value=None):\n self.value = value\n self.left = None\n self.right = None\n \ndef preOrderRecur(tree):\n if not tree:\n return\n print(tree.value,end=\" \")\n preOrderRecur(tree.left)\n preOrderRecur(tree.right)\n return\n\ndef inOrderRecur(tree):\n if not tree:\n return\n inOrderRecur(tree.left)\n print(tree.value,end=\" \")\n inOrderRecur(tree.right)\n return\n\ndef posOrderRecur(tree):\n if not tree:\n return\n posOrderRecur(tree.left)\n posOrderRecur(tree.right)\n print(tree.value,end=\" \")\n return\n\n\ndef createByPreAndIn(preArr,InArr):\n if not preArr or not InArr:\n return None\n rootValue = preArr[0]\n mid = 0\n for i in InArr:\n if i == rootValue:\n break\n mid += 1\n Llen = len(InArr[:mid])\n Rlen = len(InArr[mid+1:])\n root = Node(rootValue)\n root.left = createByPreAndIn(preArr[1:Llen+1],InArr[:mid])\n root.right = createByPreAndIn(preArr[Llen+1:],InArr[mid+1:])\n return root\n\ndef createByInAndPos(InArr,posArr):\n if not InArr or not posArr:\n return None\n mid = 0\n for i in InArr:\n if i == posArr[-1]:\n break\n mid += 1\n root = Node(posArr[-1])\n Llen = mid\n root.left = createByInAndPos(InArr[:mid],posArr[:Llen])\n root.right = createByInAndPos(InArr[mid+1:],posArr[Llen:-1])\n return root\n\n#注意只有不存在度为1的节点的二叉树才能被先序和后序重构\ndef createByPreAndPos(preArr,posArr):\n if not preArr and not posArr:\n return None\n root = Node(preArr[0])\n k = 0\n leftRoot = None #左子树的根节点\n if len(preArr) > 1: #这个地方要注意,需要特殊处理,负责就报错了。\n leftRoot = preArr[1]\n for i in posArr:\n if i == leftRoot:\n break\n k += 1\n Llen = 0\n if leftRoot is not None: #同理,这个地方要特殊处理。\n Llen = k + 1 #求出左子树包含的节点数\n root.left = createByPreAndPos(preArr[1:Llen+1],posArr[:Llen])\n root.right = createByPreAndPos(preArr[Llen+1:],posArr[Llen:-1])\n return root\n \n \n\nif __name__ == \"__main__\":\n preOrder = [1,2,4,5,3,6,7]\n InOrder = [4,2,5,1,6,3,7]\n posOrder = [4,5,2,6,7,3,1]\n tree = createByPreAndIn(preOrder,InOrder)\n tree2 = createByInAndPos(InOrder,posOrder)\n tree3 = createByPreAndPos(preOrder,posOrder)\n preOrderRecur(tree)\n print()\n inOrderRecur(tree)\n print()\n posOrderRecur(tree)\n print()\n preOrderRecur(tree2)\n print()\n inOrderRecur(tree2)\n print()\n posOrderRecur(tree2)\n print()\n preOrderRecur(tree3)\n print()\n inOrderRecur(tree3)\n print()\n posOrderRecur(tree3)\n print() \n \n \n","sub_path":"算法题/程序员面试指南/python/树/先中后序数组两两组合重构二叉树.py","file_name":"先中后序数组两两组合重构二叉树.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586573634","text":"# Copyright (c) Ye Liu. All rights reserved.\n\nfrom collections import OrderedDict\n\nimport torch.distributed as dist\nfrom torch._utils import (_flatten_dense_tensors, _take_tensors,\n _unflatten_dense_tensors)\nfrom torch.nn.utils import clip_grad\n\nfrom .base import HOOKS, Hook\n\n\ndef _allreduce_coalesced(tensors, world_size, bucket_size_mb):\n if bucket_size_mb > 0:\n bucket_size_bytes = bucket_size_mb * 1024 * 1024\n buckets = _take_tensors(tensors, bucket_size_bytes)\n else:\n buckets = OrderedDict()\n for tensor in tensors:\n tp = tensor.type()\n if tp not in buckets:\n buckets[tp] = []\n buckets[tp].append(tensor)\n buckets = buckets.values()\n\n for bucket in buckets:\n flat_tensors = _flatten_dense_tensors(bucket)\n dist.all_reduce(flat_tensors)\n flat_tensors.div_(world_size)\n for tensor, synced in zip(\n bucket, _unflatten_dense_tensors(flat_tensors, bucket)):\n tensor.copy_(synced)\n\n\ndef _allreduce_grads(params, coalesce, bucket_size_mb):\n grads = [\n param.grad.data for param in params\n if param.requires_grad and param.grad is not None\n ]\n world_size = dist.get_world_size()\n if coalesce:\n _allreduce_coalesced(grads, world_size, bucket_size_mb)\n else:\n for tensor in grads:\n dist.all_reduce(tensor.div_(world_size))\n\n\n@HOOKS.register()\nclass OptimizerHook(Hook):\n\n def __init__(self, interval=1):\n super(OptimizerHook, self).__init__()\n self._interval = interval\n\n def _avg_grads(self, engine):\n step_size = engine.iter_in_epoch - self._last_update_iter + 1\n for param in engine.model.parameters():\n if param.requires_grad and param.grad is not None:\n param.grad.data.div_(step_size)\n self._last_update_iter = engine.iter_in_epoch + 1\n\n def _clip_grads(self, params, cfg):\n params_with_grad = filter(\n lambda p: p.requires_grad and p.grad is not None, params)\n if len(params_with_grad) > 0:\n clip_grad.clip_grad_norm_(params_with_grad, **cfg)\n\n def before_train_epoch(self, engine):\n self._last_update_iter = 0\n engine.optimizer.zero_grad()\n\n def after_train_iter(self, engine):\n loss_type = engine.cur_stage.get('loss', 'loss')\n engine.losses[loss_type].backward()\n\n if self.every_n_iters_in_epoch(\n engine, self._interval) or self.last_iter_in_epoch(engine):\n self._avg_grads(engine)\n grad_clip = engine.cur_stage.get('grad_clip', None)\n if grad_clip is not None:\n self._clip_grads(engine.model.parameters(), grad_clip)\n engine.optimizer.step()\n engine.optimizer.zero_grad()\n\n def after_train_epoch(self, engine):\n engine.optimizer.zero_grad()\n\n\n@HOOKS.register()\nclass DistOptimizerHook(OptimizerHook):\n\n def __init__(self, interval=1, coalesce=True, bucket_size_mb=-1):\n super(DistOptimizerHook, self).__init__()\n self._interval = interval\n self._coalesce = coalesce\n self._bucket_size_mb = bucket_size_mb\n\n def _avg_grads(self, engine):\n super(DistOptimizerHook, self)._avg_grads(engine)\n _allreduce_grads(engine.model.parameters(), self._coalesce,\n self._bucket_size_mb)\n","sub_path":"nncore/engine/hooks/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529442855","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport shutil\nfrom jinja2 import Environment, FileSystemLoader\n\nTEMPLATE_FILETYPES = [\n \"md\"\n]\n\ndef write_file(dest, content):\n \"\"\"Write given content to the specified destination file\"\"\"\n with open(dest, 'w') as file:\n file.write(content)\n\nif len(sys.argv) < 3:\n print(\"Not enough arguments\")\n sys.exit(1)\n\nsearch_path = os.path.abspath(sys.argv[1])\noutput_path = os.path.abspath(sys.argv[2])\n\nenv = Environment(\n loader=FileSystemLoader(search_path)\n)\n\nfor template in env.list_templates():\n # extract file name\n filename = template.split(\"/\")[-1]\n # remove file from path to get relative path to specified file\n base_dir = os.path.abspath(os.path.join(search_path, template.replace(filename, \"\")))\n # create target path\n target_dir = base_dir.replace(search_path, output_path)\n target = os.path.abspath(os.path.join(target_dir, filename))\n\n # create target folder if not existent\n if not os.path.isdir(target_dir):\n os.makedirs(target_dir)\n\n # skip if file is not a plain text file\n if template.split(\".\")[-1] not in TEMPLATE_FILETYPES:\n shutil.copyfile(os.path.join(base_dir, filename), target)\n else:\n template_obj = env.get_template(template)\n \n def get_uml(path):\n \"\"\"Get content of specified file\"\"\"\n with open(os.path.join(base_dir, path), \"r\") as file:\n return file.read()\n\n # render template based on given file path\n rendered_result = template_obj.render(get_uml=get_uml) \n\n write_file(target, rendered_result)\n","sub_path":"templating.py","file_name":"templating.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350758906","text":"import sqlalchemy\nfrom dnascissors.config import cfg\nfrom dnascissors.model import Base\nfrom dnascissors.model import Target\nimport requests\nimport json\n\n\ndef get_gene_id(gene_symbol, species='homo_sapiens'):\n species = species.lower().replace(' ', '_')\n url = (\"https://rest.ensembl.org/xrefs/symbol/{}/{}?\").format(species, gene_symbol)\n r = requests.get(url, headers={\"Content-Type\": \"application/json\"})\n gene = json.loads(r.text)\n if gene:\n return gene[0]['id']\n\n\ndef main():\n engine = sqlalchemy.create_engine(cfg['DATABASE_URI'])\n Base.metadata.bind = engine\n DBSession = sqlalchemy.orm.sessionmaker(bind=engine)\n dbsession = DBSession()\n targets = dbsession.query(Target).all()\n print('Update gene ids on target in {} to be Ensembl ones starting with ENSG...'.format(cfg['DATABASE_URI']))\n for target in targets:\n old_gene_id = target.gene_id\n new_gene_id = get_gene_id(target.gene_id, target.genome.species)\n if not old_gene_id == new_gene_id:\n new_gene_id = get_gene_id(target.name, target.genome.species)\n if not new_gene_id and target.name.startswith('LEPR'):\n new_gene_id = get_gene_id('LEPR', target.genome.species)\n\n if new_gene_id:\n if target.description:\n if len(target.description) >= 984:\n target.description = \"{}... [submitted_gene_id: {}]\".format(target.description[:984], old_gene_id)\n else:\n target.description = \"{} [submitted_gene_id: {}]\".format(target.description, old_gene_id)\n else:\n target.description = \"[submitted_gene_id: {}]\".format(old_gene_id)\n target.gene_id = new_gene_id\n dbsession.commit()\n print(\">>>UPDATED: {}\\t{}\\t{}\\t{}\".format(target.name, old_gene_id, new_gene_id, target.description))\n else:\n print(\"!!! NO ID: {}\\t{}\\t{}\".format(target.name, target.gene_id, target.description))\n else:\n print(\"UP-TO-DATE: {}\\t{}\\t{}\".format(target.name, target.gene_id, target.description))\n print('done')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/scripts/update_target_gene_ids.py","file_name":"update_target_gene_ids.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531182429","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymysql\n\nfrom sqlalchemy.sql import func\n\nfrom models import Company, WikipediaDb, XingCompanyDb, session\n\n\nclass GoogleSpiderPipeline(object):\n \"\"\"\n Pipeline that processes info from google spider.\n For more info: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n \"\"\"\n collection_name = 'google_items'\n\n def process_item(self, item, spider):\n if not item['update']:\n new_company = Company(name=item['company_name'], website=item['url'], website_long=item['url_long'])\n session.add(new_company)\n else:\n session.query(Company).\\\n filter((Company.name == item['company_name']) & ((Company.website == None) | (Company.website == 'NA'))).\\\n update({'website': item['url'], 'website_long': item['url_long']})\n\n def close_spider(self, spider):\n session.commit()\n\n\nclass WikipediaSpiderPipeline(object):\n \"\"\"\n Pipeline that processes info from wikipedia spider.\n For more info: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n \"\"\"\n collection_name = 'wikipedia_items'\n\n def process_item(self, item, spider):\n update = item['update']\n company_name = item['company_name']\n company_website = item['company_website']\n item = dict(summary_wikipedia_w=item['summary'], categories_wikipedia_w=item['categories'],\n revenue_wikipedia_w=item['revenue'], revenue_currency_wiki_w=item['currency'],\n branch_wikipedia_w=item['branche'], wiki_url_w=item['url'], headquarters_wiki_w=item['sitz'],\n employees_wikipedia_w=item['mitarbeiter'], company_website_w=item['wiki_company_website'],\n last_update_w=func.now())\n\n company = session.query(Company).filter_by(name=company_name, website=company_website).first()\n if not update:\n new_entry = WikipediaDb(company_name_w=company_name, timestamp_w=func.now(), wc_id=company.id, **item)\n session.add(new_entry)\n else:\n session.query(WikipediaDb).filter(WikipediaDb.company_name_w == company_name).\\\n update(item, synchronize_session=False)\n\n company.last_update = func.now()\n company.wiki_evaluation = func.now()\n company.wikipedia_url = item['wiki_url_w']\n\n def close_spider(self, spider):\n session.commit()\n\n\nclass XingSpiderPipeline(object):\n \"\"\"\n Pipeline that processes info from xing spider.\n For more info: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n \"\"\"\n collection_name = 'xing_items'\n\n def process_item(self, item, spider):\n update = item['update']\n company_name = item['company_name']\n\n item = dict(street_xing=item.get('street'), city_xing=item.get('city'), description_xing=item.get('about_us'),\n zipcode_xing=item.get('postal_code'), country_xing=item.get('country'), tel_xing=item.get('phone'),\n fax_xing=item.get('fax'), company_email_xing=item.get('email'), industry_xing=item.get('industry'),\n established_in_xing=item.get('established'), products_xing=item.get('products'),\n employees_size_xing=item.get('employees_number'), company_website_x=item.get('url'),\n last_update_x=func.now(), employees_group_xing_x=item.get('registered_employees_number'))\n\n company = session.query(Company).filter_by(name=company_name)\n if not update:\n company = company.first()\n new_entry = XingCompanyDb(company_name_x=company_name, timestamp_x=func.now(), xc_id=company.id, **item)\n session.add(new_entry)\n else:\n company = company.filter(Company.xing_page != 'NA', Company.xing_page is not None).first()\n session.query(XingCompanyDb).filter(XingCompanyDb.xc_id == company.id).\\\n update(item, synchronize_session=False)\n\n company.last_update = func.now()\n company.xing_page_update = func.now()\n company.xing_page = item['company_website_x']\n\n def close_spider(self, spider):\n session.commit()\n","sub_path":"mx_crm/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381090783","text":"__author__ = 'shenli'\nfrom Logic.ChangLunLogic import *\nimport DataFeeder.DBFeeder as dbFeeder\nfrom Data.DataModel.TradePoints import trade_points\nimport UtilTools.DateUtils as DateUtils\n\ndef runModel(stockId,startDate,endDate,freq,tradeDate):\n trade_points.delete().where(trade_points.stockid == stockId).execute()\n sds = dbFeeder.getHistDataArray(stockId,startDate,endDate,freq)\n startIndex = getTradeStartIndex(sds,tradeDate)\n sds = sds[startIndex:]\n for i in range(50,len(sds)):\n changLun = ChangLunLogic()\n changLun.initData(stockId,startDate,endDate,freq,sds[0:i])\n changLun.run()\n changLun.isReadyTrade()\n changLun = ChangLunLogic()\n changLun.initData(stockId,startDate,endDate,freq,sds)\n changLun.run()\n changLun.loadhistBuyPoints()\n changLun.saveResult()\n #changLun.showResult()\n\ndef getStartIndex(histData):\n maxData = max(histData,key=lambda x:x.getHigh())\n minData = min(histData,key=lambda x:x.getLow())\n maxIndex = histData.index(maxData)\n minIndex = histData.index(minData)\n return max(0, min(maxIndex,minIndex) - 44)\n\ndef getTradeStartIndex(histData,tradeDate):\n tradeDate = DateUtils.convertDateStrToDatetime(tradeDate)\n for i in range(0,len(histData)):\n if histData[i].getDateTime() < tradeDate:\n continue\n return i\n\nif __name__ == '__main__':\n stockId = '000001'\n startDate = '2013-09-13'\n endDate = '2014-09-13'\n freq = '30'\n tradeDate = '2013-09-13'\n runModel(stockId,startDate,endDate,freq,tradeDate)\n","sub_path":"TradeModel/ChangLunModel.py","file_name":"ChangLunModel.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321086340","text":"\n# coding=utf-8\nfrom __future__ import absolute_import\n\nimport octoprint.plugin\n\nclass UICustomizerPlugin(octoprint.plugin.StartupPlugin,\n octoprint.plugin.SettingsPlugin,\n octoprint.plugin.AssetPlugin,\n octoprint.plugin.TemplatePlugin):\n\n def on_after_startup(self):\n self._logger.info(\"UI Customizer is initialized.\")\n\n def get_assets(self):\n return dict(\n js=[\"js/uicustomizer.js\",\"js/Sortable.min.js\"],\n css=[\"css/uicustomizer.css\",\"css/bootstrap-responsive.css\"],\n less=[\"less/uicustomizer.less\"]\n\n )\n\n # default settings\n def get_settings_defaults(self):\n return {\n \"rows\" : [\n {\n \"#sidebar_plugin_firmware_check_wrapper\": True,\n \"#files_wrapper\": True,\n \"#connection_wrapper\": True\n },\n {\n \"div.UICmainTabs\": True\n },\n {\n \"#UICWebCamWidget\": True,\n \"#state_wrapper\": True,\n \"#sidebar_plugin_action_command_notification_wrapper\": True\n }\n ],\n \"widths\" : [3,6,3],\n \"fluidLayout\" : True,\n \"fixedHeader\" : True,\n \"fixedFooter\" : True,\n \"hideGraphBackground\" : True,\n \"responsiveMode\": True,\n \"navbarplugintempfix\": False,\n \"addWebCamZoom\" : False\n }\n\n def get_template_configs(self):\n return [\n dict(type=\"settings\", custom_bindings=False)\n ]\n\n def get_update_information(self):\n # Define the configuration for your plugin to use with the Software Update\n # Plugin here. See https://docs.octoprint.org/en/master/bundledplugins/softwareupdate.html\n # for details.\n return dict(\n uicustomizer=dict(\n displayName=self._plugin_name,\n displayVersion=self._plugin_version,\n\n # version check: github repository\n type=\"github_release\",\n user=\"LazeMSS\",\n repo=\"OctoPrint-UICustomizer\",\n current=self._plugin_version,\n\n # update method: pip\n pip=\"https://github.com/LazeMSS/OctoPrint-UICustomizer/archive/{target_version}.zip\"\n )\n )\n\n\n__plugin_name__ = \"UI Customizer\"\n__plugin_pythoncompat__ = \">=2.7,<4\"\n\ndef __plugin_load__():\n global __plugin_implementation__\n __plugin_implementation__ = UICustomizerPlugin()\n\n global __plugin_hooks__\n __plugin_hooks__ = {\n \"octoprint.plugin.softwareupdate.check_config\": __plugin_implementation__.get_update_information\n }","sub_path":"octoprint_uicustomizer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"256841361","text":"import wx\n\n\ndata = [(\"Jessica Alba\", \"Pomona\", \"1981\"), \n (\"Sigourney Weaver\", \"New York\", \"1949\"),\n (\"Angelina Jolie\", \"Los Angeles\", \"1975\"),\n (\"Natalie Portman\", \"Jerusalem\", \"1981\"),\n (\"Rachel Weiss\", \"London\", \"1971\"), \n (\"Scarlett Johansson\", \"New York\", \"1984\")]\n\nclass Example(wx.Frame):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.selected_list = []\n\n self.initUI()\n\n def initUI(self):\n hbox = wx.BoxSizer(orient=wx.HORIZONTAL)\n panel = wx.Panel(parent=self)\n\n self.list = wx.ListCtrl(parent=panel, id=wx.ID_ANY, style=wx.LC_REPORT)\n self.list.InsertColumn(col=0, heading=\"name\", width=140)\n self.list.InsertColumn(col=1, heading=\"place\", width=130)\n self.list.InsertColumn(col=2, heading=\"year\", format=wx.LIST_FORMAT_LEFT, width=90)\n\n idx = 0\n\n for i in data:\n index = self.list.InsertItem(index=idx, label=i[0])\n self.list.SetItem(index=index, column=1, label=i[1])\n self.list.SetItem(index=index, column=2, label=i[2])\n idx += 1\n\n self.list.Bind(event=wx.EVT_LIST_ITEM_SELECTED, handler=self.selectedItem)\n self.list.Bind(event=wx.EVT_LIST_ITEM_DESELECTED, handler=self.deselectedItem)\n\n\n hbox.Add(window=self.list, proportion=1, flag=wx.EXPAND)\n panel.SetSizer(sizer=hbox)\n\n self.SetTitle(\"Actresses\")\n self.Center()\n\n def selectedItem(self, event):\n selected_item = event.GetIndex()\n print(self.list.GetItem(selected_item, col=1).GetText())\n self.selected_list.append(selected_item)\n\n # print(self.selected_list)\n\n def deselectedItem(self, event):\n deselected_item = event.GetIndex()\n self.selected_list.remove(deselected_item)\n\n # print(self.selected_list)\n\n\n # def deselectedItem(self, event):\n # selected_item = event.GetText()\n # self.selected_list.remove(selected_item)\n # print(self.selected_list)\n\n\ndef main():\n app = wx.App()\n ex = Example(None)\n ex.Show()\n app.MainLoop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python3/ZetCode/wxPython/7 - Advanced widgets/7.4 - actresses TEST.py","file_name":"7.4 - actresses TEST.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"126732262","text":"\nfrom network import MotionNet\n\nTEST=True\n\n#The following parameters must have the same value in 'training' and 'test' modes.\nnum_layer=1\nhidden_unit = 1000\ntime_step = 90\nbatch_Frame = 1\nsave_period = 300\nuse_gpu=True\nuse_cudnn=True\nModel=2 # # Only 1, 2, and 3 are possible and only works when TEST = True.\n'''Execution'''\nif TEST:\n MotionNet(TEST=TEST , Model=1 ,save_period=save_period, num_layer=num_layer , hidden_unit=hidden_unit , time_step = time_step , batch_Frame= batch_Frame , use_gpu=use_gpu , use_cudnn=use_cudnn , graphviz=False)\nelse:\n #batch learning\n MotionNet(epoch=300 , batch_size=68 , save_period=save_period, optimizer='adam', learning_rate=0.01 ,Dropout=0.2 , use_gpu=use_gpu , use_cudnn=use_cudnn ,\n TEST=TEST , num_layer=num_layer , hidden_unit=hidden_unit , time_step = time_step , batch_Frame = batch_Frame , graphviz=False )\n\n","sub_path":"DeepHumanPrediction/Code/Master_Thesis/Human Motion Analysis/Human Motion Pattern Classification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230774988","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('students', '0003_auto_20150324_2059'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Exam',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=256, verbose_name='\\u041d\\u0430\\u0437\\u0432\\u0430')),\n ('date', models.DateTimeField(null=True, verbose_name='\\u0414\\u0430\\u0442\\u0430 \\u0456 \\u0447\\u0430\\u0441 \\u043f\\u0440\\u043e\\u0432\\u0435\\u0434\\u0435\\u043d\\u043d\\u044f')),\n ('teacher', models.CharField(max_length=256, null=True, blank=True)),\n ('groups', models.ForeignKey(verbose_name=b'\\xd0\\x93\\xd1\\x80\\xd1\\x83\\xd0\\xbf\\xd0\\xb0', blank=True, to='students.Group', null=True)),\n ],\n options={\n 'verbose_name': '\\u0415\\u043a\\u0437\\u0430\\u043c\\u0435\\u043d',\n 'verbose_name_plural': '\\u0415\\u043a\\u0437\\u0430\\u043c\\u0435\\u043d\\u0438',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"students/migrations/0004_exam.py","file_name":"0004_exam.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"204198287","text":"# -*- coding: cp936 -*-\n\"\"\"\n题目:已知2016年1月1日是周五,请编写打印某个月月历的函数\nprintCalendar(month),该函数根据传递的month月份,按示\n例格式打印该月月历。请编写代码测试你所编写的函数正确性。\n比如:二月份的月历输出结果为:\n 日 一 二 三 四 五 六\n 1 2 3 4 5 6\n 7 8 9 10 11 12 13\n 14 15 16 17 18 19 20\n 21 22 23 24 25 26 27\n 28 29\n\"\"\"\n\nfirst_weekday = 5 # 该年份第一天是周5\ndays = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] # 每个月的天数\ndef printCalendar(month):\n # 计算month月份第一天是星期几\n t=days[month-1]\n s=0\n for i in range(0,month-1,1) :\n s=s+days[i]\n k=(s-2)%7\n p=[[],[],[],[],[],[]]\n j=(t-(7-k))//7\n b=t-(7-k)-7*j\n\n # 计算该月的每一天是星期几\n d=1\n for i in range(k,7,1):\n p[0].append(d)\n d=d+1 \n for q in range(1,j+1,1):\n for i in range(0,7,1):\n p[q].append(d)\n d=d+1 \n for i in range(0,b,1):\n p[j+1].append(d)\n d=d+1\n for i in range(b,7,1):\n p[j+1].append('\\t')\n \n\n \n\n\n # 打印标题(日一二三四五六)\n print('日\\t一\\t二\\t三\\t四\\t五\\t六')\n\n # 输出月历\n print('\\t'*k,end='')\n for i in range(0,7-k,1):\n print('%d\\t' %p[0][i],end=\"\")\n print('\\t') \n for j in range(1,j+1,1):\n for i in range(0,7,1):\n print('%d\\t' %p[j][i],end='')\n print('\\t') \n for i in range(0,b,1):\n print('%d\\t' %p[j+1][i],end='')\n print('\\t'*(6-b))\n \n# 函数是否正确第一个测试用例:输出2月份月历\nmonth = 2\nprintCalendar(month)\n\n\"\"\"\n# 函数是否正确第二个测试用例:输出全年各月份月历\nmonthNames = ['一月', '二月', '三月', '四月', '五月', '六月', '七月', '八月', '九月', '十月', '十一月', '十二月']\nfor month in range(1, 12+1):\n print (' ', monthNames[month-1])\n printCalendar(month)\n\"\"\"\n","sub_path":"work_frame.py","file_name":"work_frame.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"615570279","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.db import transaction\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import Group\nfrom django.views.generic import CreateView, ListView, View\nfrom django.db import transaction\nfrom django.dispatch import receiver\n# from django.forms import CheckboxSelectMultiple\n# from multiselectfield import MultiSelectField\nfrom .models import User, Vacancies\nfrom summary.models import Summary, Workexperience\nfrom category.models import Uzbekiston_provinces, Uzbekiston_region\nfrom category.models import Citizenship, Specialization\nfrom jobs.models import Vacanciess\n\n\n\n\n\n# Change OneToOne form \nclass UserForm(forms.ModelForm):\n last_name = forms.CharField(required=False, label=\"Familya\", widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'autofocus': 'autofocus', 'placeholder': 'Фамилия'}\n ))\n first_name = forms.CharField(required=False, label=\"Ism\", widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Имя'}\n )) \n email = forms.CharField(required=False, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Электронная почта'}\n ))\n username = forms.CharField(required=False, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Логин'}\n ))\n # password = forms.CharField(required=False, widget=forms.TextInput(\n # attrs = {'class':'form-control', 'placeholder': 'Parol'}\n # ))\n\n\n class Meta:\n model = User\n fields = ('last_name', 'first_name', 'email', 'username')\n\n\n\nclass VacanciesForm(forms.ModelForm):\n phone_numer = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Номер телефона'}\n ))\n name_company = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Названия компании'}\n ))\n\n class Meta:\n model = Vacancies\n fields = ('phone_numer', 'name_company', 'provinces', 'region')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['provinces'].widget.attrs.update({'class': 'form-control rounded-1', 'placeholder': 'Login'}) \n self.fields['region'].widget.attrs.update({'class': 'form-control rounded-1', 'placeholder': 'Login'}) \n\n\n\n\n\n\n# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓\n# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓\nclass UserSignUpForm(UserCreationForm):\n\n first_name = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'autofocus': 'autofocus', 'placeholder': 'Имя'}\n )) \n email = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'autocomplete':'username', 'placeholder': 'Электронная почта'}\n ))\n username = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'autocomplete':'username', 'placeholder': 'Логин'}\n ))\n password1 = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'type': 'password', 'class':'form-control rounded-1', 'autocomplete':'current-password', 'placeholder': 'Пароль'}\n ))\n password2 = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'type': 'password','class':'form-control rounded-1', 'autocomplete':'current-password', 'placeholder': 'Повтор пароля'}\n ))\n \n \n class Meta(UserCreationForm.Meta):\n model = User \n\n @transaction.atomic\n def save(self, commit=True):\n user = super().save(commit=False)\n user.is_active = True\n user.first_name = self.cleaned_data.get('first_name')\n user.email = self.cleaned_data.get('email')\n user.save()\n return user\n\n # def __init__(self, *args, **kwargs):\n # super().__init__(*args, **kwargs)\n # self.fields['username'].widget.attrs.update({'class': 'form-control', 'placeholder': 'Login'}) \n # self.fields['password1'].widget.attrs.update({'class': 'form-control', 'placeholder': 'Parol'}) \n # self.fields['password2'].widget.attrs.update({'class': 'form-control', 'placeholder': 'Parolni qaytadan kiriting'}) \n\n# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓\n# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓\n\n\n\n\nYJS = (\n ('юридического лица', 'юридического лица'),\n ('физическое лицо', 'физическое лицо'),\n ('другие', 'другие'),\n )\n\n# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓\n# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓\n\nclass UservacanciesSignUpForm(UserCreationForm):\n\n last_name = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'autofocus': 'autofocus', 'placeholder': 'Фамилия'}\n ))\n first_name = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Имя'}\n )) \n email = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Электронная почта'}\n ))\n name_company = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Названия компании'}\n ))\n choose = forms.ChoiceField(choices=YJS)\n phone_numer = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': '+9'}\n ))\n username = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Логин'}\n ))\n password1 = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'type': 'password','class':'form-control rounded-1', 'placeholder': 'Пароль'}\n ))\n password2 = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'type': 'password','class':'form-control rounded-1', 'placeholder': 'Повтор пароля'}\n ))\n provinces = forms.ModelChoiceField(queryset=Uzbekiston_provinces.objects.all(), label=\"Viloyatlar\", required=True)\n region = forms.ModelChoiceField(queryset=Uzbekiston_region.objects.all(), label=\"Tumanlar\", required=True)\n\n class Meta(UserCreationForm.Meta):\n model = User \n\n @transaction.atomic\n def save(self, commit=True):\n user = super().save(commit=False)\n user.is_active = True\n\n if commit:\n user.save()\n group = Group.objects.get(name=\"vacancies\")\n user.groups.add(group)\n\n user.first_name = self.cleaned_data.get('first_name')\n user.last_name = self.cleaned_data.get('last_name')\n user.email = self.cleaned_data.get('email')\n user.username = self.cleaned_data.get('username')\n user.password1 = self.cleaned_data.get('password1')\n user.password2 = self.cleaned_data.get('password2')\n user.save()\n useradminpage = Vacancies.objects.create(user=user)\n useradminpage.phone_numer=self.cleaned_data.get('phone_numer') \n useradminpage.name_company=self.cleaned_data.get('name_company') \n useradminpage.provinces=self.cleaned_data.get('provinces') \n useradminpage.region=self.cleaned_data.get('region') \n useradminpage.choose=self.cleaned_data.get('choose') \n useradminpage.save()\n return user\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['provinces'].widget.attrs.update({'class': 'form-control rounded-1'}) \n self.fields['region'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['choose'].widget.attrs.update({'class': 'form-control rounded-1'})\n\n# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓\n# ↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓\n\n\n\n\n\n\n\n\n\n\n\n\nSEX = (\n ('Мужской', 'Мужской'),\n ('Женский', 'Женский'),\n )\n\nWORKEX = (\n (\"Есть опыт работы\", \"Есть опыт работы\"),\n (\"Нет опыта работы\", \"Нет опыта работы\"),\n )\nMONEY = (\n ('USD', \"USD\"),\n ('RUB', \"RUB\"),\n (\"UZS\", \"UZS\"),\n )\n\nclass SummaryForm(forms.ModelForm):\n last_name = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'Фамилия'}\n ))\n first_name = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'autofocus': 'autofocus', 'placeholder': 'Имя'}\n ))\n phone_name = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': '+9 '}\n ))\n data_time = forms.DateField(required=True, widget=forms.DateInput(\n attrs = {'type': 'date', 'class':'form-control rounded-1'}\n ))\n\n sex = forms.ChoiceField(widget=forms.RadioSelect, choices=SEX)\n citizenship = forms.ModelMultipleChoiceField(queryset=Citizenship.objects.all(), widget=forms.CheckboxSelectMultiple(), required=False)\n workexper = forms.ChoiceField(widget=forms.RadioSelect, choices=WORKEX)\n \n moneys = forms.ChoiceField(required=False, choices=MONEY)\n speciali = forms.ModelMultipleChoiceField(queryset=Specialization.objects.all(), widget=forms.CheckboxSelectMultiple(), required=False)\n\n thesalary = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': ''}\n ))\n carobjec = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': ''}\n ))\n key_skills = forms.CharField(required=False, widget=forms.Textarea(\n attrs = {'class':'form-control col-md-12', 'type':'text','style':'height: 100px', 'placeholder': \"\"}\n ))\n\n class Meta:\n model = Summary\n fields = ('last_name', 'first_name', 'phone_name', 'provinces', 'region', 'data_time', 'sex', 'citizenship', 'workexper', 'thesalary', 'carobjec', 'key_skills', 'moneys', 'speciali', 'profess', 'thchopr', 'image1', 'image2', 'image3', 'image4', 'image5', 'image6')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['provinces'].widget.attrs.update({'class': 'form-control rounded-1'}) \n self.fields['region'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['moneys'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['profess'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['thchopr'].widget.attrs.update({'class': 'form-control rounded-1'})\n \n self.fields['image1'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['image2'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['image3'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['image4'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['image5'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['image6'].widget.attrs.update({'class': 'form-control rounded-1'})\n\n\n\n\nclass WorkexperienceForm(forms.ModelForm):\n begwork = forms.DateField(required=False, widget=forms.DateInput(attrs={'type': 'date', 'class': 'form-control rounded-1 formset-field'}))\n finishw = forms.DateField(required=False, widget=forms.DateInput(attrs={'type': 'date', 'class': 'form-control rounded-1 formset-field'}))\n organiza = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control rounded-1 formset-field'}))\n position = forms.CharField(required=False, widget=forms.TextInput(attrs={'class': 'form-control rounded-1 formset-field'}))\n workplaceres = forms.CharField(required=False, widget=forms.Textarea(attrs={'class': 'form-control rounded-1 formset-field', 'style':'height: 50px'}))\n\n class Meta:\n model = Workexperience\n\n fields = [\n 'begwork',\n 'finishw',\n 'organiza',\n 'position',\n 'workplaceres',\n ]\n\n # widgets = {\n # 'begwork': forms.DateInput(attrs={'type': 'date', 'class': 'formset-field'}),\n # 'finishw': forms.DateInput(attrs={'type': 'date', 'class': 'formset-field'}),\n # 'organiza': forms.TextInput(attrs={'class': 'formset-field'}),\n # 'position': forms.TextInput(attrs={'class': 'formset-field'}),\n # 'workplaceres': forms.Textarea(attrs={'class': 'formset-field'}),\n # }\n\n\n\n\n\n\n\n\n\n\n\n\n\nCHOOSEM = (\n ('до налогов', \"до налогов\"),\n (\"в руках\", \"в руках\"),\n )\n\nEMPLOYMENT = (\n (\"Полная занятость\", \"Полная занятость\"),\n (\"Неполная занятость\", \"Неполная занятость\"),\n (\"Проектная работа или разовое задание\", \"Проектная работа или разовое задание\"),\n (\"Волонтерство\", \"Волонтерство\"),\n (\"Практика\", \"Практика\"),\n )\n\nWORKHO = (\n (\"Полный день\", \"Полный день\"),\n (\"Сменная работа\", \"Сменная работа\"),\n (\"По таблицах\", \"По таблицах\"),\n (\"Долгая работа\", \"Долгая работа\"),\n )\n\nclass VacanciessForm(forms.ModelForm):\n job_title = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'autofocus': 'autofocus', 'placeholder': ''}\n ))\n job_code = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': ''}\n ))\n thesalaryfrom = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'от'}\n ))\n thesalaryto = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': 'до'}\n ))\n choosemo = forms.ChoiceField(widget=forms.RadioSelect, choices=CHOOSEM)\n geolocation = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': ''}\n ))\n emptype = forms.ChoiceField(widget=forms.RadioSelect, choices=EMPLOYMENT)\n worhou = forms.ChoiceField(widget=forms.RadioSelect, choices=WORKHO)\n name = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': ''}\n ))\n phone = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', 'placeholder': '+9'}\n ))\n comment = forms.CharField(required=True, widget=forms.TextInput(\n attrs = {'class':'form-control rounded-1', \"placeholder\": \"\"}\n ))\n\n class Meta:\n model = Vacanciess\n fields = (\n 'job_title', 'job_code', 'profess', 'thchopr', 'description', 'moneys',\n 'thesalaryfrom', 'thesalaryto', 'choosemo', 'provinces', 'region', 'geolocation', 'emptype',\n 'worhou', 'name', 'phone', 'comment'\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['profess'].widget.attrs.update({'class': 'form-control rounded-1'}) \n self.fields['thchopr'].widget.attrs.update({'class': 'form-control rounded-1'}) \n self.fields['moneys'].widget.attrs.update({'class': 'form-control rounded-1'})\n self.fields['provinces'].widget.attrs.update({'class': 'form-control rounded-1'}) \n self.fields['region'].widget.attrs.update({'class': 'form-control rounded-1'}) \n \n","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":17659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"504111481","text":"###########################################################################\n# \n# Copyright 2019 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n###########################################################################\n\nimport base64\nimport jsonpickle\nfrom time import sleep\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom django.utils import encoding\n\nfrom starthinker.util.project import project\nfrom starthinker.util.auth import get_profile\nfrom starthinker.util.auth.wrapper import CredentialsUserWrapper\nfrom starthinker.util.auth.storage import _credentials_storage_service\nfrom starthinker_ui.account.models import Account\n\nLEGACY_BUCKET_PREFIX = settings.UI_PROJECT.split(':', 1)[-1] # remove domain: part\n\ndef legacy_credentials_path(identifier):\n return '%s:ui/%s.json' % (LEGACY_BUCKET_PREFIX + \"-starthinker-users\", identifier)\n\ndef legacy_auth_decode(value):\n return jsonpickle.decode(base64.b64decode(encoding.smart_bytes(value)))\n\ndef legacy_credentails_get(cloud_path):\n service = _credentials_storage_service()\n bucket, filename = cloud_path.split(':',1)\n data = service.objects().get_media(bucket=bucket, object=filename).execute()\n return legacy_auth_decode(data)\n\n\nclass Command(BaseCommand):\n help = 'Migrate credentials to new format.'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--test',\n action='store_true',\n dest='test',\n default=False,\n help='Test conversion.',\n )\n\n def handle(self, *args, **kwargs):\n \n # loop through accounts\n for account in Account.objects.all():\n print('CONVERTING', account.email)\n\n try:\n # load legacy credentials\n credentials = legacy_credentails_get(legacy_credentials_path(account.identifier))\n\n # convert to new format\n new_credentials = CredentialsUserWrapper(credentials)\n\n # save new credentials\n account.set_credentials(new_credentials)\n\n if kwargs['test']:\n project.initialize(_user=account.get_credentials_path())\n profile = get_profile()\n print(profile)\n exit()\n\n except Exception as e:\n print(str(e))\n","sub_path":"starthinker_ui/account/management/commands/credentials_migrate.py","file_name":"credentials_migrate.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"15883018","text":"#!/usr/bin/python3\n# user Github API to list 10 commits from specific user in specific repo\nimport requests\nimport sys\n\n\nif __name__ == \"__main__\":\n # user Github API to list 10 commits from specific user in specific repo\n try:\n repo = sys.argv[1]\n owner = sys.argv[2]\n url = 'https://api.github.com/repos/{}/{}/commits'.format(owner, repo)\n r = requests.get(url)\n r_json = r.json()\n length = len(r_json)\n if length > 10:\n length = 10\n for i in range(length):\n print(\"{}: {}\".format(r_json[i].get('sha'),\n r_json[i].get('commit')\n .get('author').get('name')))\n except:\n print(\"None\")\n","sub_path":"0x11-python-network_1/100-github_commits.py","file_name":"100-github_commits.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"160929882","text":"#!/usr/bin/env python\n#\n# SPDX-License-Identifier: Apache-2.0\n# Copyright Contributors to the OpenTimelineIO project\n\nimport opentimelineio as otio\nimport opentimelineio.test_utils as otio_test_utils\n\nimport unittest\n\n\nclass OpenTimeTypeSerializerTest(unittest.TestCase):\n\n def test_serialize_time(self):\n rt = otio.opentime.RationalTime(15, 24)\n encoded = otio.adapters.otio_json.write_to_string(rt)\n decoded = otio.adapters.otio_json.read_from_string(encoded)\n self.assertEqual(rt, decoded)\n\n rt_dur = otio.opentime.RationalTime(10, 20)\n tr = otio.opentime.TimeRange(rt, rt_dur)\n encoded = otio.adapters.otio_json.write_to_string(tr)\n decoded = otio.adapters.otio_json.read_from_string(encoded)\n self.assertEqual(tr, decoded)\n\n tt = otio.opentime.TimeTransform(rt, scale=1.5)\n encoded = otio.adapters.otio_json.write_to_string(tt)\n decoded = otio.adapters.otio_json.read_from_string(encoded)\n self.assertEqual(tt, decoded)\n\n\nclass SerializableObjTest(unittest.TestCase, otio_test_utils.OTIOAssertions):\n def test_cons(self):\n so = otio.core.SerializableObjectWithMetadata()\n so.metadata['foo'] = 'bar'\n self.assertEqual(so.metadata['foo'], 'bar')\n\n def test_update(self):\n so = otio.core.SerializableObjectWithMetadata()\n so.metadata.update({\"foo\": \"bar\"})\n self.assertEqual(so.metadata[\"foo\"], \"bar\")\n so_2 = otio.core.SerializableObjectWithMetadata()\n so_2.metadata[\"foo\"] = \"not bar\"\n so.metadata.update(so_2.metadata)\n self.assertEqual(so.metadata[\"foo\"], \"not bar\")\n\n def test_copy_lib(self):\n so = otio.core.SerializableObjectWithMetadata()\n so.metadata[\"meta_data\"] = {\"foo\": \"bar\"}\n\n import copy\n\n # shallow copy is an error\n with self.assertRaises(ValueError):\n so_cp = copy.copy(so)\n\n # deep copy\n so_cp = copy.deepcopy(so)\n self.assertIsOTIOEquivalentTo(so, so_cp)\n\n so_cp.metadata[\"foo\"] = \"bar\"\n self.assertNotEqual(so, so_cp)\n\n def test_copy_subclass(self):\n @otio.core.register_type\n class Foo(otio.core.SerializableObjectWithMetadata):\n _serializable_label = \"Foof.1\"\n\n foo = Foo()\n foo.metadata[\"meta_data\"] = {\"foo\": \"bar\"}\n\n import copy\n\n with self.assertRaises(ValueError):\n foo_copy = copy.copy(foo)\n\n foo_copy = copy.deepcopy(foo)\n\n self.assertEqual(Foo, type(foo_copy))\n\n def test_schema_versioning(self):\n @otio.core.register_type\n class FakeThing(otio.core.SerializableObject):\n _serializable_label = \"Stuff.1\"\n foo_two = otio.core.serializable_field(\"foo_2\", doc=\"test\")\n ft = FakeThing()\n\n self.assertEqual(ft.schema_name(), \"Stuff\")\n self.assertEqual(ft.schema_version(), 1)\n\n with self.assertRaises(otio.exceptions.UnsupportedSchemaError):\n otio.core.instance_from_schema(\n \"Stuff\",\n 2,\n {\"foo\": \"bar\"}\n )\n\n ft = otio.core.instance_from_schema(\"Stuff\", 1, {\"foo\": \"bar\"})\n self.assertEqual(ft._dynamic_fields['foo'], \"bar\")\n\n @otio.core.register_type\n class FakeThing(otio.core.SerializableObject):\n _serializable_label = \"NewStuff.4\"\n foo_two = otio.core.serializable_field(\"foo_2\")\n\n @otio.core.upgrade_function_for(FakeThing, 2)\n def upgrade_one_to_two(_data_dict):\n return {\"foo_2\": _data_dict[\"foo\"]}\n\n @otio.core.upgrade_function_for(FakeThing, 3)\n def upgrade_one_to_two_three(_data_dict):\n return {\"foo_3\": _data_dict[\"foo_2\"]}\n\n ft = otio.core.instance_from_schema(\"NewStuff\", 1, {\"foo\": \"bar\"})\n self.assertEqual(ft._dynamic_fields['foo_3'], \"bar\")\n\n ft = otio.core.instance_from_schema(\"NewStuff\", 3, {\"foo_2\": \"bar\"})\n self.assertEqual(ft._dynamic_fields['foo_3'], \"bar\")\n\n ft = otio.core.instance_from_schema(\"NewStuff\", 4, {\"foo_3\": \"bar\"})\n self.assertEqual(ft._dynamic_fields['foo_3'], \"bar\")\n\n def test_equality(self):\n o1 = otio.core.SerializableObject()\n o2 = otio.core.SerializableObject()\n self.assertTrue(o1 is not o2)\n self.assertTrue(o1.is_equivalent_to(o2))\n self.assertIsOTIOEquivalentTo(o1, o2)\n\n def test_equivalence_symmetry(self):\n def test_equivalence(A, B, msg):\n self.assertTrue(A.is_equivalent_to(B), \"{}: A ~= B\".format(msg))\n self.assertTrue(B.is_equivalent_to(A), \"{}: B ~= A\".format(msg))\n\n def test_difference(A, B, msg):\n self.assertFalse(A.is_equivalent_to(B), \"{}: A ~= B\".format(msg))\n self.assertFalse(B.is_equivalent_to(A), \"{}: B ~= A\".format(msg))\n\n A = otio.core.Composable()\n B = otio.core.Composable()\n test_equivalence(A, B, \"blank objects\")\n\n A.metadata[\"key\"] = {\"a\": 0}\n test_difference(A, B, \"A has different metadata\")\n\n B.metadata[\"key\"] = {\"a\": 0}\n test_equivalence(A, B, \"add metadata to B\")\n\n A.metadata[\"key\"][\"sub-key\"] = 1\n test_difference(A, B, \"Add dict within A with specific metadata\")\n\n def test_truthiness(self):\n o = otio.core.SerializableObject()\n self.assertTrue(o)\n\n def test_instancing_without_instancing_support(self):\n o = otio.core.SerializableObjectWithMetadata()\n c = otio.core.SerializableObjectWithMetadata()\n o.metadata[\"child1\"] = c\n o.metadata[\"child2\"] = c\n self.assertTrue(o.metadata[\"child1\"] is o.metadata[\"child2\"])\n\n oCopy = o.clone()\n # Note: If we ever enable INSTANCING_SUPPORT in the C++ code,\n # then this will (and should) fail\n self.assertTrue(oCopy.metadata[\"child1\"] is not oCopy.metadata[\"child2\"])\n\n def test_cycle_detection(self):\n o = otio.core.SerializableObjectWithMetadata()\n o.metadata[\"myself\"] = o\n\n # Note: If we ever enable INSTANCING_SUPPORT in the C++ code,\n # then modify the code below to be:\n # oCopy = o.clone()\n # self.assertTrue(oCopy is oCopy.metadata[\"myself\"])\n with self.assertRaises(ValueError):\n o.clone()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_serializable_object.py","file_name":"test_serializable_object.py","file_ext":"py","file_size_in_byte":6334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"624683307","text":"import os\nimport sys\nfrom io import BytesIO\nimport numpy as np\nfrom PIL import Image\nimport cv2\n\nimport tensorflow as tf\nimport time\n\n\nworkspace = '/home/admin/workspace/models/workspace/seg/'\n\nmodel_dir = workspace + '035.512.saliency255/' # 'deconv.075.aug/'\nres_dir = model_dir + 'results/'\nif not os.path.exists(res_dir):\n os.makedirs(res_dir)\n\ndata_dir = workspace + 'test_images/'\nlistpath = data_dir + 'list.txt'\n\nFROZEN_GRAPH_NAME = model_dir + 'deploy/deploy_graph.pb'\nINPUT_SIZE = 512\nPADDING = True\nUSE_HEATMAP = True\n\nclass SgmtModel(object):\n \"\"\"Class to load deeplab model and run inference.\"\"\"\n\n INPUT_TENSOR_NAME = 'image:0'\n OUTPUT_TENSOR_NAME = 'heatmap:0'\n\n def __init__(self):\n \"\"\"Creates and loads pretrained deeplab model.\"\"\"\n self.graph = tf.Graph()\n graph_def = None\n with tf.gfile.GFile(FROZEN_GRAPH_NAME, \"rb\") as f:\n print(FROZEN_GRAPH_NAME)\n graph_def = tf.GraphDef().FromString(f.read())\n\n if graph_def is None:\n raise RuntimeError('Cannot find inference graph in tar archive.')\n\n with self.graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)\n\n self.sess = tf.Session(graph=self.graph, config=tf.ConfigProto(gpu_options=gpu_options))\n\n def run(self, image):\n \"\"\"Runs inference on a single image.\n\n Args:\n image: A PIL.Image object, raw input image.\n\n Returns:\n resized_image: RGB image resized from original input image.\n seg_map: Segmentation map of `resized_image`.\n \"\"\"\n width, height = image.size\n resize_ratio = 1.0 * INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n\n batch_size = 10\n the_input = [np.asarray(resized_image)]\n the_input = np.repeat(the_input, batch_size, 0)\n\n heatmap = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: the_input})\n\n t1 = time.time()\n heatmap = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: the_input})\n t2 = time.time()\n total = (t2 - t1) * 1000\n\n heatmap = heatmap[0, :, :, :]\n\n return heatmap, total\n\n\ndef infer_one(image, use_heatmap=True):\n # image preprocessing\n# img = Image.open(image_path)\n\n# padding = True\n width, height = image.size\n\n if PADDING:\n # padding\n large_one = max(width, height)\n \n scale = float(INPUT_SIZE) / float(large_one)\n \n new_width = 0\n new_height = 0\n if width >= height:\n new_width = INPUT_SIZE\n new_height = int(height * scale)\n else:\n new_height = INPUT_SIZE\n new_width = int(width * scale)\n \n image = image.resize((new_width, new_height), Image.ANTIALIAS)\n \n delta_w = INPUT_SIZE - new_width\n delta_h = INPUT_SIZE - new_height\n top, bottom = delta_h / 2, delta_h - delta_h / 2\n left, right = delta_w / 2, delta_w - delta_w / 2\n color = [127, 127, 127]\n img_array = np.array(image)\n img_array = cv2.copyMakeBorder(img_array, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=color)\n \n image = Image.fromarray(np.uint8(img_array))\n\n else:\n # resize\n image = image.resize((INPUT_SIZE, INPUT_SIZE), resample = Image.BILINEAR)\n img_array = np.array(image)\n\n # run model\n model = SgmtModel()\n heatmap, running_time = model.run(image)\n print(type(heatmap))\n heatmap = np.float32(heatmap) / 255.0\n if not use_heatmap:\n heatmap = np.where(heatmap > 0.5, 1, 0)\n\n # post processing\n if PADDING:\n # padding\n embed_array = img_array\n embed_array = np.multiply(img_array, heatmap) \n \n # get results\n embed_crop = embed_array[delta_h / 2 : new_height + delta_h / 2, delta_w / 2 : new_width + delta_w / 2]\n embed_crop = Image.fromarray(np.uint8(embed_crop))\n # embed_crop.save('data/embed_tf.png')\n \n heatmap_array = np.squeeze(heatmap)\n heatmap_array = heatmap_array[delta_h / 2 : new_height + delta_h / 2, delta_w / 2 : new_width + delta_w / 2] * 255\n heatmap_crop = Image.fromarray(np.uint8(heatmap_array))\n # heatmap_crop.save('data/heatmap_tf.png')\n\n else:\n # resize\n embed_array = img_array\n embed_array = np.multiply(img_array, heatmap)\n embed_crop = Image.fromarray(np.uint8(embed_array))\n# embed_crop = embed_crop.resize((width, height), resample = Image.BILINEAR)\n\n heatmap_array = np.squeeze(heatmap)\n heatmap_array = heatmap_array * 255\n heatmap_crop = Image.fromarray(np.uint8(heatmap_array))\n# heatmap_crop = heatmap_crop.resize((width, height), resample = Image.BILINEAR)\n\n return embed_crop, heatmap_crop, running_time\n\n\n# now start inferring\n# with open(listpath) as f:\n# lines = f.readlines()\n# lines = [x.strip('\\n') for x in lines] \n# print(lines)\n\n# for filename in lines:\n# filename_root = os.path.splitext(filename)[0]\n#\n# image = Image.open(data_dir + filename)\n# embed_crop, heatmap_crop, running_time = infer_one(image, USE_HEATMAP)\n# \n# image.save(res_dir + filename)\n# embed_crop.save(res_dir + filename_root + '.embed_tf.png')\n# heatmap_crop.save(res_dir + filename_root + '.heatmap_tf.png')\n# print('Time consumed on ', filename, ': ', running_time, ' ms.')\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(\"inference \")\n exit(1)\n image = Image.open(sys.argv[1])\n embed_crop, heapmap_crop, running_time = infer_one(image, USE_HEATMAP)\n \n heapmap_crop.save(sys.argv[2])\n \n\n","sub_path":"workspace/seg/infer_utils/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548388023","text":"name = input('Enter your name: ')\nage = input('Enter your age: ')\n\nyear = 2018 - int(age) + 100\nmessage = name + ' will be 100 years old in ' + str(year) + '\\n'\nprint(message)\n\nnum = input('Enter a number: ')\nmessage = message * int(num)\n\nprint(message)","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65809664","text":"from utils import TwitterStreamer\n\n# Twitter Bot Spotter\n# ******************FOLLOW THESE STEPS******************\n\n# *****BEFORE STARTING MAKE SURE YOU HAVE FILLED YOUR API KEYS INTO constants.py*****\n\n# STEP 1\n# Run \"step_1.py\" and stop it after 1 minute\n\n# STEP 2\n# In \"step_2.json\" replace the ',' at the beginning with '[' , then add ']' at the very end\n\n# STEP 3\n# Run \"step_3.py\"\n\n# STEP 4\n# Find your new Plotly chart in the \"step_4\" directory\n\n# ******************DELETE step_2.json BEFORE TRYING AGAIN******************\n\n\nif __name__ == '__main__':\n # try changing the keyword\n keyword = ['clinton']\n fetched_tweets_filename = \"step_2.json\"\n twitter_streamer = TwitterStreamer()\n twitter_streamer.stream_tweets(fetched_tweets_filename, keyword)\n\n","sub_path":"step_1.py","file_name":"step_1.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"514982489","text":"import graphene\nfrom db.solutions import solutions as list_of_solutions\n\nfrom flask import (\n Flask,\n request,\n jsonify,\n render_template,\n redirect,\n make_response,\n session\n)\n\nfrom flask_graphql import GraphQLView\n\nfrom graphene_sqlalchemy import (\n SQLAlchemyObjectType,\n SQLAlchemyConnectionField\n)\n\nfrom app import app, db\n\nfrom core import (\n security,\n helpers,\n middleware\n)\n\nfrom core.models import (\n Owner,\n Paste,\n User,\n Audit\n)\n\nfrom version import VERSION\n\n# SQLAlchemy Types\nclass UserObject(SQLAlchemyObjectType):\n class Meta:\n model = User\n interfaces = (graphene.relay.Node, )\n\nclass PasteObject(SQLAlchemyObjectType):\n p_id = graphene.String(source='id')\n class Meta:\n model = Paste\n interfaces = (graphene.relay.Node, )\n\nclass OwnerObject(SQLAlchemyObjectType):\n class Meta:\n model = Owner\n interfaces = (graphene.relay.Node, )\n\nclass CreatePaste(graphene.Mutation):\n title = graphene.String()\n content = graphene.String()\n public = graphene.Boolean()\n paste = graphene.Field(lambda:PasteObject)\n burn = graphene.Boolean()\n\n class Arguments:\n title = graphene.String()\n content = graphene.String()\n public = graphene.Boolean(required=False, default_value=True)\n burn = graphene.Boolean(required=False, default_value=False)\n\n def mutate(self, info, title, content, public, burn):\n owner = Owner.query.filter_by(name='DVGAUser').first()\n\n paste_obj = Paste.create_paste(\n title=title,\n content=content, public=public, burn=burn,\n owner_id=owner.id, owner=owner, ip_addr=request.remote_addr,\n user_agent=request.headers.get('User-Agent', '')\n )\n\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n\n return CreatePaste(paste=paste_obj)\n\nclass DeletePaste(graphene.Mutation):\n ok = graphene.Boolean()\n\n class Arguments:\n title = graphene.String()\n\n def mutate(self, info, title):\n Paste.query.filter_by(title=title).delete()\n db.session.commit()\n\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n\n return DeletePaste(ok=True)\n\nclass UploadPaste(graphene.Mutation):\n content = graphene.String()\n filename = graphene.String()\n\n class Arguments:\n content = graphene.String(required=True)\n filename = graphene.String(required=True)\n\n result = graphene.String()\n\n def mutate(self, info, filename, content):\n result = helpers.save_file(filename, content)\n owner = Owner.query.filter_by(name='DVGAUser').first()\n\n Paste.create_paste(\n title='Imported Paste from File - {}'.format(helpers.generate_uuid()),\n content=content, public=False, burn=False,\n owner_id=owner.id, owner=owner, ip_addr=request.remote_addr,\n user_agent=request.headers.get('User-Agent', '')\n )\n\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n\n return UploadPaste(result=result)\n\nclass ImportPaste(graphene.Mutation):\n result = graphene.String()\n\n class Arguments:\n host = graphene.String(required=True)\n port = graphene.Int(required=False)\n path = graphene.String(required=True)\n scheme = graphene.String(required=True)\n\n def mutate(self, info, host='pastebin.com', port=443, path='/', scheme=\"http\"):\n url = security.strip_dangerous_characters(f\"{scheme}://{host}:{port}{path}\")\n cmd = helpers.run_cmd(f'curl --insecure {url}')\n\n owner = Owner.query.filter_by(name='DVGAUser').first()\n Paste.create_paste(\n title='Imported Paste from URL - {}'.format(helpers.generate_uuid()),\n content=cmd, public=False, burn=False,\n owner_id=owner.id, owner=owner, ip_addr=request.remote_addr,\n user_agent=request.headers.get('User-Agent', '')\n )\n\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n\n return ImportPaste(result=cmd)\n\nclass Mutations(graphene.ObjectType):\n create_paste = CreatePaste.Field()\n delete_paste = DeletePaste.Field()\n upload_paste = UploadPaste.Field()\n import_paste = ImportPaste.Field()\n\nclass Query(graphene.ObjectType):\n node = graphene.relay.Node.Field()\n pastes = graphene.List(PasteObject, public=graphene.Boolean())\n paste = graphene.Field(PasteObject, p_id=graphene.String())\n system_update = graphene.String()\n system_diagnostics = graphene.String(username=graphene.String(), password=graphene.String(), cmd=graphene.String())\n system_health = graphene.String()\n read_and_burn = graphene.Field(PasteObject, p_id=graphene.Int())\n\n def resolve_pastes(self, info, public=False):\n query = PasteObject.get_query(info)\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n return query.filter_by(public=public, burn=False).order_by(Paste.id.desc())\n\n def resolve_paste(self, info, p_id):\n query = PasteObject.get_query(info)\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n return query.filter_by(id=p_id, burn=False).first()\n\n def resolve_system_update(self, info):\n security.simulate_load()\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n return 'no updates available'\n\n def resolve_system_diagnostics(self, info, username, password, cmd='whoami'):\n q = User.query.filter_by(username='admin').first()\n real_passw = q.password\n res, msg = security.check_creds(username, password, real_passw)\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n if res:\n output = f'{cmd}: command not found'\n if security.allowed_cmds(cmd):\n output = helpers.run_cmd(cmd)\n return output\n return msg\n\n def resolve_read_and_burn(self, info, p_id):\n result = Paste.query.filter_by(id=p_id, burn=True).first()\n Paste.query.filter_by(id=p_id, burn=True).delete()\n db.session.commit()\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n return result\n\n def resolve_system_health(self, info):\n Audit.create_audit_entry(gqloperation=helpers.get_opname(info.operation))\n return 'System Load: {}'.format(\n helpers.run_cmd(\"uptime | awk '{print $10, $11, $12}'\")\n )\n\n\n@app.route('/')\ndef index():\n resp = make_response(render_template('index.html'))\n resp.set_cookie(\"env\", \"Z3JhcGhpcWw6ZGlzYWJsZQ==\")\n return resp\n\n@app.route('/about')\ndef about():\n return render_template(\"about.html\")\n\n@app.route('/solutions')\ndef solutions():\n return render_template(\"solutions.html\", solutions=list_of_solutions)\n\n@app.route('/create_paste')\ndef create_paste():\n return render_template(\"paste.html\", page=\"create_paste\")\n\n@app.route('/import_paste')\ndef import_paste():\n return render_template(\"paste.html\", page=\"import_paste\")\n\n@app.route('/upload_paste')\ndef upload_paste():\n return render_template(\"paste.html\", page=\"upload_paste\")\n\n@app.route('/my_pastes')\ndef my_paste():\n return render_template(\"paste.html\", page=\"my_pastes\")\n\n@app.route('/public_pastes')\ndef public_paste():\n return render_template(\"paste.html\", page=\"public_pastes\")\n\n@app.route('/audit')\ndef audit():\n audit = Audit.query.all()\n return render_template(\"audit.html\", audit=audit)\n\n\n@app.route('/start_over')\ndef start_over():\n msg = \"Restored to default state.\"\n res = helpers.initialize()\n\n if 'done' not in res:\n msg=\"Could not restore to default state.\"\n\n return render_template('index.html', msg=msg)\n\n@app.route('/difficulty/')\ndef difficulty(level):\n if level in ('easy', 'hard'):\n message = f'Changed difficulty level to {level.capitalize()}'\n else:\n message = 'Level must be Beginner or Expert.'\n level = 'easy'\n\n helpers.set_mode(level)\n\n return render_template('index.html', msg = message)\n\n\n@app.context_processor\ndef get_version():\n return dict(version=VERSION)\n\n@app.before_request\ndef set_difficulty():\n mode_header = request.headers.get('X-DVGA-MODE', None)\n if mode_header:\n if mode_header == 'Expert':\n helpers.set_mode('hard')\n else:\n helpers.set_mode('easy')\n else:\n if session.get('difficulty') == None:\n helpers.set_mode('easy')\n\nschema = graphene.Schema(query=Query, mutation = Mutations)\n\ngql_middlew = [\n middleware.CostProtectionMiddleware(),\n middleware.DepthProtectionMiddleware(),\n middleware.IntrospectionMiddleware(),\n middleware.processMiddleware()\n]\n\nigql_middlew = [\n middleware.IGQLProtectionMiddleware()\n]\n\napp.add_url_rule('/graphql', view_func=GraphQLView.as_view(\n 'graphql',\n schema=schema,\n middleware=gql_middlew,\n batch=True\n))\n\napp.add_url_rule('/graphiql', view_func=GraphQLView.as_view(\n 'graphiql',\n schema = schema,\n graphiql = True,\n middleware = igql_middlew,\n batch=True\n))\n\n\n","sub_path":"Damn-Vulnerable-GraphQL-Application-master-SigSci-Agent/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394209635","text":"from tqdm import tqdm\nimport torch\nfrom modelfile import Model, Config\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom data_utils import load_data, Dataset\nimport os\n\n\nglove_vec_size = 300\nhidden_size = 100\n\ndata_dir = '../../data'\n\nlr = 0.002\t\t\t\t\t# learning rate\nDELTA = 0.0000000001\t\t# to avoid 0 in log\nweight_decay = 0.005\t\t# L2 regularization term\n\nmax_plen = 128\t\t# Max passage length\nmax_qlen = 16\t\t# Max query length\nnum_para = 10\t\t# Number of passages\n\nn_epochs = 100\t\t# Number of epochs\n\n\ndef train(n_epochs):\n\t\n\tmodel.train()\n\tfor epoch in tqdm(range(n_epochs)):\n\n\t\ttrain_loss = 0\n\t\t\n\t\tfor query_id, e_q, e_d, qseq_len, seq_len, labels in tqdm(training_generator):\n\t\t\t\n\t\t\tinputs = {\n\t\t\t\t\t'e_q' : e_q.to(device),\n\t\t\t\t\t'e_d' : e_d.to(device),\n\t\t\t\t\t'qseq_len' : qseq_len,\n\t\t\t\t\t'seq_len' : seq_len.to(device)\n\t\t\t}\n\n\t\t\toptimizer.zero_grad()\n\t\t\ts_d_norm = model(**inputs)\t\n\t\t\t# shape of s_d_norm - [batch_size, num_para]\n\n\t\t\tbatch_size = labels.shape[0]\n\t\t\t\n\t\t\tlabels = labels.to(device)\n\n\t\t\t# computing loss\n\t\t\tl1 = labels * torch.log(s_d_norm + DELTA)\n\t\t\tl2 = (1 - labels) * torch.log(1 - s_d_norm + DELTA)\n\t\t\tl_de = torch.add(l1,l2)\n\t\t\tloss = -torch.mean(l_de)\n\n\n\t\t\ttrain_loss += loss * batch_size\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\t\t\n\n\t\tavg_loss = train_loss/len(train_data)\n\n\t\tprint('training loss = ',avg_loss.item(),' epoch = ',epoch+1)\n\t\t\n\t\ttorch.save({\n\t\t\t'model_state_dict': model.state_dict(),\n\t\t 'optimizer_state_dict': optimizer.state_dict(),\n\t\t}, 'checkpoints/saved_model_'+str(epoch)+'.pth')\n\n\n\ttorch.save({\n\t\t\t'model_state_dict': model.state_dict(),\n\t\t 'optimizer_state_dict': optimizer.state_dict(),\n\t\t}, 'checkpoints/saved_model.pth')\n\n\n\ntrain_data = load_data(data_dir + '/msmarco/train_v2.1.json')\n\nprint('Done loading Training data.')\n\n\nparams = {'batch_size': 128,\n 'shuffle': True,\n 'num_workers': 20,\n\t \t 'pin_memory': True}\n\n\ntraining_set = Dataset(data, max_plen, max_qlen, glove_vec_size, data_dir)\ntraining_generator = DataLoader(training_set, **params)\n\n\ndevice = torch.device('cpu')\n\ncuda = torch.cuda.is_available()\nif(cuda):\n\tdevice = torch.device('cuda')\n\n\nconfig = Config(glove_vec_size, hidden_size, max_plen, max_qlen, num_para, device)\n\nmodel = Model(config)\nif(cuda):\n\tmodel = model.to(device)\n\noptimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n\nif not os.path.exists('checkpoints'):\n\tos.makedirs('checkpoints')\n\ntrain()","sub_path":"passage_ranking_extractive_summarization/passage_ranking/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128029222","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport operator\nimport re\n\ndef usage():\n return '''\nThis script extracts common German surnames from phone book data collected in 2002. To use, first\ndownload, as .html files, the following three wiki pages (each batch contains 1000 names):\n\nhttp://wiki-de.genealogy.net/Die_1000_h%C3%A4ufigsten_Familiennamen_in_Deutschland\nhttp://wiki-de.genealogy.net/Die_2000_h%C3%A4ufigsten_Familiennamen_in_Deutschland\nhttp://wiki-de.genealogy.net/Die_3000_h%C3%A4ufigsten_Familiennamen_in_Deutschland\n\nPut those into a single directory and point it to this script:\n\n%s genealogy_de_html_dir ../data/german_surnames.txt\n\ngerman_surnames.txt will include one line per name, ordered by rank.\n...\n ''' % sys.argv[0]\n\nEXTRACT_RANK_AND_TOKEN_RE = re.compile(r'>([\\s\\d.]*)]*\\(Familienname\\)[^>]*>([\\w\\s]+)')\n\nHAS_UMLAUTS_RE = re.compile(r'[äöüß]')\n\ndef parse_wiki_tokens(html_doc_str):\n results = []\n num_tokens = 0\n\n for rank_and_token_match in EXTRACT_RANK_AND_TOKEN_RE.finditer(html_doc_str):\n num_tokens += 1\n rank_match = rank_and_token_match.group(1)\n if rank_match:\n rank = int(rank_match.replace('.', ''))\n else:\n rank = num_tokens\n token = rank_and_token_match.group(2)\n new_records = [(normalized, rank) for normalized in resolve_umlauts(token)]\n results.extend(new_records)\n\n # Each batch has 1000 entries.\n assert 1000 == num_tokens\n return results\n\ndef resolve_umlauts(token):\n normalized = token.lower().replace(' ', '')\n if not HAS_UMLAUTS_RE.search(normalized):\n return [normalized]\n else:\n normalized_ae = (normalized\n .replace('ä', 'ae')\n .replace('ö', 'oe')\n .replace('ü', 'ue')\n .replace('ß', 'ss'))\n normalized_a = (normalized\n .replace('ä', 'a')\n .replace('ö', 'o')\n .replace('ü', 'u')\n .replace('ß', 's'))\n return [normalized, normalized_a, normalized_ae]\n\n\ndef main(genealogy_de_html_root, output_filename):\n token_rank = [] # list of pairs\n for filename in os.listdir(genealogy_de_html_root):\n path = os.path.join(genealogy_de_html_root, filename)\n with open(path, 'r', encoding='utf8') as f:\n token_rank.extend(parse_wiki_tokens(f.read()))\n token_rank.sort(key=operator.itemgetter(1))\n with open(output_filename, 'w', encoding='utf8') as f:\n for surname, _ in token_rank:\n f.write('{}\\n'.format(surname))\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print(usage())\n else:\n main(*sys.argv[1:])\n sys.exit(0)\n","sub_path":"data-scripts/count_genealogy_de.py","file_name":"count_genealogy_de.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"559260402","text":"import logging\nfrom sanic import Blueprint\nfrom sanic.response import json\nfrom sanic.exceptions import InvalidUsage, NotFound\nfrom tao.models.product import Product, Label, Order\nfrom tao.settings import PRODUCTS_LINE_LIMIT\nfrom tao.utils import jsonify\nfrom bson.objectid import ObjectId\n\n\nproduct_bp = Blueprint('product')\n\n\n@product_bp.get('/api/v1/products')\nasync def get_products(request):\n \"\"\"获取商品列表\"\"\"\n results = [record async for record in Product.find({'flag': True})]\n return json(jsonify({'products': results, 'total': len(results)}))\n\n\n@product_bp.get('/api/v1/all_products')\nasync def get_all_products(request):\n \"\"\"获取所有商品列表\"\"\"\n products = [record async for record in Product.find({})]\n for i in products:\n label = await Label.find_one({'_id': ObjectId(i.get('label', None))})\n i.update({'label': label['name']})\n return json(jsonify({'products': products, 'total': await Product.count_documents({})}))\n\n\n@product_bp.post('/api/v1/order/')\nasync def post_order(request, user_id):\n \"\"\"提交商品订单\"\"\"\n if not request.json:\n raise InvalidUsage('not json request!')\n product = request.json.get('product')\n num = request.json.get('num')\n user_id = user_id\n order = await Order.create(product, user_id, num)\n return json(jsonify({'id': order.inserted_id}))\n\n\n@product_bp.get('/api/v1/user_orders')\nasync def get_all_order(request):\n \"\"\"获取某个用户所有订单\"\"\"\n user_id = request.json.get('id')\n orders = [record async for record in Order.find({'id': ObjectId(user_id)})]\n return json(jsonify({'products': orders, 'total': await Product.count_documents({})}))\n\n\n@product_bp.get('/api/v1/alll_orders')\nasync def get_all_order(request):\n \"\"\"获取用户所有订单\"\"\"\n user_id = request.json.get('id')\n orders = [record async for record in Order.find({})]\n return json(jsonify({'products': orders, 'total': await Product.count_documents({})}))\n\n@product_bp.put('/api/v1/loadProduct')\nasync def update_product(request):\n \"\"\"商品上下架\"\"\"\n if not request.json:\n raise InvalidUsage('not json request!')\n flag = request.json.get('flag')\n name = request.json.get('name')\n logging.info(flag)\n result = await Product.update_one({'name': name}, {'$set': {'flag': flag}})\n logging.info(result)\n return json(jsonify({'_id': flag}))\n\n\n@product_bp.put('/api/v1/product')\nasync def update_product(request):\n \"\"\"商品更新\"\"\"\n if not request.json:\n raise InvalidUsage('not json request!')\n name = request.json.get('name')\n stock = request.json.get('stock')\n price = request.json.get('price')\n label = request.json.get('label')\n product = await Product.update_one(\n {'_id': id},\n {'$set':\n {\n 'name': name,\n 'stock': stock,\n 'price': price,\n 'label': label\n }\n }\n )\n return json(jsonify({'id': product.inserted_id}))\n\n\n@product_bp.delete('/api/v1/product')\nasync def delete_product(request):\n if not request.json:\n raise InvalidUsage('not json request!')\n name = request.json.get('name')\n await Product.delete_one({'name': name})\n return json(jsonify({'name': name}))\n\n\n@product_bp.post('/api/v1/product')\nasync def post_product(request):\n \"\"\"商品录入\"\"\"\n if not request.json:\n raise InvalidUsage('not json request!')\n name = request.json.get('name')\n stock = request.json.get('stock')\n price = request.json.get('price')\n label = request.json.get('label')\n product = await Product.create(name, stock, price, label)\n return json(jsonify({'id': product.inserted_id}))\n\n\n@product_bp.get('/api/v1/labels')\nasync def get_label(request):\n \"\"\"获取商品类型\"\"\"\n labels = [record async for record in Label.find({})]\n return json(jsonify({'labels': labels}))\n\n\n@product_bp.post('api/v1/label')\nasync def add_label(request):\n \"\"\"添加商品类型\"\"\"\n name = request.json.get('name')\n # await Label.delete_many({'name': name})\n result = await Label.find_one({'name': name})\n logging.info(result)\n logging.info(not result)\n if not result:\n await Label.create(name)\n return json(jsonify({'result': 'success'}))\n raise InvalidUsage('分类已存在')\n","sub_path":"src/tao/web/api/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"310964626","text":"\n\"\"\"\nThis is a main script to reproduce results in the paper 'Predicting United States policy outcomes\nwith Random Forests', by Shawn McGuire and Charles Delahunt.\n \nIt run a single setup (ie a hyperparameter and feature set combo). It will loop through each Policy\nDomain that is specified (or just once, if all Policy Domains combined is specified). For each\nPolicy Domain, it will train a Random Forest (or xgBoost) model, and optionally a Logistic model\nand a Neural Net model, on the cases within that Policy Domain. For each model, it repeats the\ntraining for the specified number of runs to collect accuracy statistics. Many plots and console\nprintouts are optional. Results are saved to .csv file. \n \nCopyright (c) 2020 Charles B. Delahunt. delahunt@uw.edu\nMIT License\n\n\"\"\" \n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport xgboost as xgb\nfrom sklearn import metrics\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import datasets\n\n# import support functions:\nfrom support_functions import plotFeatureStatsFunction, generateSetupStringFunction, \\\ngenerateTrainTestSplitFunction, parseDataframeFunction, calculateAccuracyStatsOnTestSetFunction, \\\ncalculatePrintAndSaveModelStatsOverAllRunsFunction\n\n\"\"\"\nNote re plot window location:\n1. To print plots in separate windows, run the following on the command line:\n%matplotlib qt\n2. To revert back to plots appearing in console:\n%matplotlib inline\n\"\"\"\n#%%\n\n\"\"\"\nUSER ENTRIES:\nThese are divided into entries you might modify, and entries (created for various experiments) that\nyou are unlikely to modify. So in most cases only the first set of entries will be relevant.\n\"\"\"\n\n''' 1. Parameters that might be modified: '''\nnumRuns = 3 # train this many models, to get accuracy statistics\nsaveResultsStub = 'resultsOneSetup_27july2020' # becomes part of filename\n\nstartYearForTest = 2005 # 2005 -> random train/test split from all data.\n# 1997 -> test set is all samples >= 1997 (retrodiction)\nrunLogisticRegressionFlag = True # True -> run logistic regression (as in Gilen's model) in\n# addition to Random Forest. Random Forest is always run.\noneModelPerPolicyDomainFlag = False # To select most salient IGs for each policy domain, use True.\n#--------------------------------------------\n# Interest group features:\nuseIGNAFlag = False # True -> use netIGA as feature. False -> do not use.\n# Some of the next few conditions have logical dependencies, eg they are mutually exclusive. Such\n# cases are called out before training begins.\nuseIndividualInterestGroupsFlag = True # Must be True for the next three flags to be relevant.\nuseAllInterestGroupsFlag = False\nuseRfChosenInterestGroupsFlag = True # True -> use RF-chosen, False -> use\n# logistic-chosen.\nuseShortInterestGroupListFlag = False # True -> use short list, False AND useAllIGsList == False ->\n# use medium list.\n# A list of Interest Groups and their column numbers are given at the end of this script.\n#-------------------------------------\n# policy domain feature:\nusePolicyDomainsFlag = False # True -> use policy domains as features.\n# NOTE: this script does not allow for using Policy Areas as features (this is\n# an artifact, and could be changed).\n#---------------------------------------------\n# Economic class features:\nuse90PercentFlag = True\nuse50PercentFlag = False\nuse10PercentFlag = False\nuse90Minus10PercentFlag = False\n#----------------------------------------------------\n# Output and plotting flags:\nprintDifferenceInRfChosenVsLogChosenFeaturesFlag = True # Table 4 in paper. NOTE: To reproduce\n# these results, use ONLY IGs as features, ie set both usePolicyDomainsFlag and\n# use90PercentFlag = False.\nshowFeaturePlotsFlag = True # Happens once per setup. This prints to console (and text file) RF\n# feature importance, IG-outcome correlations, and IG at-bats, and shows a bar plot of RF feature\n# importance. To see the most salient IGs for each policy domain, set this = True and\n# oneModelPerPolicyDomainFlag = True.\nnumFeaturesToPlot = 44 # 44 -> all IGs and also P90.\nshowAccuracyPlotsFlag = True # Plots accuracy results of each run separately. So if numRuns > 1,\n# set = False to prevent figure overload.\n\n# Outputs less likely to be used:\nprintToConsoleFlag = True # Only used in plotResultsFunction( )\nprintLogisticBetasFlag = True # To examine which features logistic regression weights.\nshowFeatureImportanceVsAtBatsScatterplotFlag = False # Print scatterplots of importance score vs\n# at-bats. This is voided if 'printLogisticBetasFlag' == False.\n\nprintTestCasesPerRunFlag = True\nprintExtraSetupDetailsFlag = True\nplotAccuraciesByThresholdsFlag = False # ! LOTS OF FIGURES (numRuns x numSetups x numModelTypes)\n# ---------- END OF ENTRIES MOST LIKELY TO BE MODIFIED -----------------------\n#%%\n''' 2. Parameters that can likely be left alone: '''\n\nlogisticPenalty = 'l2' # 'none', 'l1', 'l2'. 'l2' is the default. l1 and l2 return similar top\n# features and scores. 'none' returns wacky feature rankings. So sticking with 'l2' is good.\n\nuseBalancedModelFlag = True # whether to use balanced mode when training.\n\n# Choose whether to use Permutation or Gini impurity for RF feature selection:\nusePermutationFeatureRankingFlag = False # False -> use Gini impurity for choosing RF features.\n# True -> use permutation method for choosing RF features, and substitute these IGs in for the\n# usual RF-chosen SHORT list (ie only a short list is defined for permutation features). Gini\n# features work best.\nuseJointInsteadOfRFIGListsFlag = False # A one-off, can be ignored. Using combined (union of\n# rf-chosen and log-chosen) features gives worse results than RF-chosen features.\n\ntrainFractionDefault = 0.67\nignoreGunCasesFlag = False\nignoreMiscCasesFlag = False # these are cases that do not fit into the five main policy domains\n# (ie 'Miscellaneous').\nxgBoostFlag = False # True -> use xgBoost instead of RF. False -> standard RF.\nrunNeuralNetFlag = False # True -> train a NN in addition to a RF.\n\n# Flags to focus on cases where rich and poor disagree:\ndisagreementThreshold = 0.05 # To distinguish when 90 and 10 really disagree.\nrestrictTrainToHighDisagreementFlag = False # True -> only train on cases where |90-10| > 0.05.\nrestrictTestToHighDisagreementFlag = False # True -> only test on cases where |90-10| > 0.05.\n\n# Flags about how thresholds (for test set use) are picked on training sets:\nuseSimpleMaxForThreshFlag = True # False -> use a Bayesian-flavored version. See def\n# calculateAccuracyStatsOnTestSetFunction( )\nuncertaintyValue = 0.02 # used when 'useSimpleMaxForThreshFlag' = False.\n\nweightFeatureOutcomeCorrelationsByAtBatsOnly = True # True -> feature correlation with outcomes is\n# normalized by number of at-bats, not number of total cases.\nnumInLinspace = 101 # For roc and accuracy curves.\n#-------------------------------------------------------------------\n# Specialized model parameters (for NN, RF, and xgBoost):\n# RF AND xgboost parameters:\nmaxDepth = 4 # In general try 4. Try 13 when using all interest\n# groups on all data (ie one model for combined domains).\nnumTrees = 200\n# XG Boost parameters:\nnumRoundsBoosting = 50 # Number of rounds of boosting.\netaForBoosting = 1\n# NeuralNet params:\nhiddenLayers = (50, 50) # Try (50,50) for all cases.\n\n#---------------------------------------------------------------\n''' 3. Interest group subsets, that were determined to be most salient by\ntraining models and doing feature selection on individual Policy Domains.\nNote: Larger gaps between entries in a dict value denote different tiers of\nimportance (eg most important group vs 2nd most important group). '''\n\n# 1. RF-chosen via Gini impurity:\n# Medium subset:\nrfChosenInterestGroupsToKeepList = \\\n{'ECYN':[36, 38, 51, 33], 'FPYN':[29, 20, 52, 24, 46, 14, 33, 13],\n 'SWYN':[12, 49, 42, 47, 53, 40, 31],'RLYN':[45, 26, 22, 40, 39, 51],\n 'GNYN':[44], 'MISC':[25, 13, 24, 18, 46, 23, 26]}\n# Smaller subset:\nrfChosenInterestGroupsToKeepListShort = \\\n{'ECYN':[36, 38], 'FPYN':[29, 20, 52, 24], 'SWYN':[12, 49],'RLYN':[26, 45, 22],\n 'GNYN':[44], 'MISC':[25]}\n# 2. RF-chosen via Permutation method. The permutation method was unstable, giving different feature\n# sets depending on whether P90 was included as a feature. There are two choices, one of which is\n# commented out.\n# Either using only IGs (no P90):\nrfPermutationChosenIGsToKeepListShort = \\\n{'ECYN':[37, 36, 33],'FPYN':[29, 20], 'SWYN':[12, 26, 47],'RLYN':[45],\n 'GNYN':[44], 'MISC':[23, 25, 46]}\n# Or using IGs and P90:\n# rfPermutationChosenIGsToKeepListShort = \\\n#{'ECYN':[37, 25, 19, 47], 'FPYN':[29, 20, 13], 'SWYN':[12, 49 ,26], 'RLYN':[45], 'GNYN':[],\n# 'MISC':[23, 46]}\n# 3. Logistic-chosen:\n# Medium subset:\nlogisticChosenInterestGroupsToKeepList = \\\n{'ECYN':[47, 37, 31, 32], 'FPYN':[37, 46, 29, 52, 14, 24], 'SWYN':[49, 23, 13, 54, 15, 36, 40],\n 'RLYN':[45, 31, 16, 34, 42, 26], 'GNYN':[44], 'MISC':[23, 35, 36, 17, 51, 45,44, 22]}\n# Smaller subset:\nlogisticChosenInterestGroupsToKeepListShort = \\\n{'ECYN':[47, 37],'FPYN':[37, 46, 29, 52], 'SWYN':[49, 23, 13], 'RLYN':[45, 31, 16], 'GNYN':[44],\n 'MISC':[23, 35]}\n# 4. Combined RF- and logistic-chosen IGs (gives significantly worse results than RF-chosen):\n# Medium subset:\njointInterestGroupsToKeepList = \\\n{'ECYN':[36, 38, 51, 33, 47, 37, 31, 32], 'FPYN':[29, 20, 52, 24, 46, 14, 33, 13, 37, 24],\n 'SWYN':[12, 49, 42, 47, 53, 40, 31, 23, 13, 54, 15, 36],\n 'RLYN':[45, 26, 22, 40, 39, 51, 31, 16, 34, 42], 'GNYN':[44],\n 'MISC':[25, 13, 24, 18, 46, 23, 26, 35, 36, 17, 51, 45, 44, 22]}\n# Short subset:\njointInterestGroupsToKeepListShort = \\\n{'ECYN':[36, 38, 47, 37],'FPYN':[29, 20, 52, 24, 37, 46], 'SWYN':[12, 49, 23, 13],\n 'RLYN':[26, 45, 22, 45, 31, 16], 'GNYN':[44], 'MISC':[25, 23]}\n \ndataFolder = 'data/'\n\n\"\"\" END USER ENTRIES \"\"\"\n\n#%%\n\"\"\" MAIN: \"\"\"\n\n''' First some preparation:'''\n\n# Load the main dataset:\ndataFull = pd.read_csv(dataFolder + 'gilens_data_sm_copy.csv')\ndataFull = dataFull.iloc[0:1836] # All rows after 1835 are nan.\n\npolicyDomainList = ['ECYN', 'FPYN', 'SWYN', 'RLYN', 'GNYN', 'MISC'] # MISC is defined a couple\n# lines down. It contains all cases not in the 5 defined policy groups.\nallInterestGroupIndices = np.arange(12, 55) # Used if selecting a subset of IGs.\n\n# Add a column to the dataframe to mark cases in MISC:\ndataFull['MISC'] = np.round(-1*(dataFull.GNYN + dataFull.FPYN + dataFull.SWYN\n + dataFull.RLYN + dataFull.ECYN - 1))\n# Make a dataframe for saving results:\ncolumnNames = ['modelType', 'balanced', 'policyDomain', 'startYearForTest', 'use90', 'use50',\n 'use10', 'use90Minus10', 'useIGNA', 'useIndividualIGs', 'useAllIGs',\n 'useRfChosenMediumIGs', 'useRfChosenShortIGs', 'useLogChosenMediumIGs',\n 'useLogChosenShortIGs', 'usePolicyDomains', 'useOnly90Minus10Disagreements',\n 'trainFraction', 'disagreementThreshold', 'maxDepth', 'hiddenLayers',\n 'IGSubsetUsed', 'numRuns', 'rawAccTestMeans', 'rawAccTestMedians', 'rawAccTestStds',\n 'balAccTestMeans', 'balAccTestMedians', 'balAccStds']\nresultsDataFrame = pd.DataFrame(columns=columnNames)\n\n# Complete the names of the files to save:\nbalStr = 'UNbalanced'\nif useBalancedModelFlag:\n balStr = 'balanced'\ncombinedStr = 'combinedDomains'\nif oneModelPerPolicyDomainFlag:\n combinedStr = 'oneModelPerDomain'\nyearStr = str(startYearForTest)\n\nif useJointInsteadOfRFIGListsFlag:\n rfChosenInterestGroupsToKeepList = jointInterestGroupsToKeepList\n rfChosenInterestGroupsToKeepListShort = jointInterestGroupsToKeepListShort\nif usePermutationFeatureRankingFlag:\n rfChosenInterestGroupsToKeepListShort = \\\n rfPermutationChosenIGsToKeepListShort # only SHORT list is modified\n\nsaveDataframeResultsFilename = \\\n'resultsDataframe_' + combinedStr + '_' + balStr + '_' + yearStr + '_' \\\n+ saveResultsStub + '.csv' # save results to dataframe\nsaveStringResultsFilename = \\\n'resultsAsStrings_' + combinedStr + '_' + balStr + '_' + yearStr + '_' \\\n+ saveResultsStub + '.txt' # save results as textfile (same as console output)\n\nfid = open(saveStringResultsFilename, 'a')\n\n# correct a dependent flag if necessary:\nif oneModelPerPolicyDomainFlag:\n usePolicyDomainsList = [False]\n\n# Initialize arrays to hold model results from each setup. The second index should match or exceed\n# the number of setups being run:\nrfRawAccArray = np.zeros([numRuns, 4, len(policyDomainList)])\nrfBalAccArray = np.zeros([numRuns, 4, len(policyDomainList)])\nrfAucArray = np.zeros([numRuns, 4, len(policyDomainList)])\nlogRawAccArray = np.zeros([numRuns, 4, len(policyDomainList)])\nlogBalAccArray = np.zeros([numRuns, 4, len(policyDomainList)])\nlogAucArray = np.zeros([numRuns, 4, len(policyDomainList)])\n\n# Some setup conditions cause duplication due to logical dependencies. List some conditions to\n# trigger skipping such setups:\ncondition1 = useAllInterestGroupsFlag and (useRfChosenInterestGroupsFlag\n or useShortInterestGroupListFlag)\n# Using all interest groups is mutually exclusive with using only subsets.\ncondition2 = oneModelPerPolicyDomainFlag and usePolicyDomainsFlag # These are mutually exclusive\n# conditions.\ncondition3 = not useIndividualInterestGroupsFlag \\\nand (useAllInterestGroupsFlag or useRfChosenInterestGroupsFlag\n or useShortInterestGroupListFlag) # The three conditions in parentheses can be active only if\n# 'useIndividualInterestGroupsFlag' == True.\nif condition1:\n print('Note: Use either all interest groups or just subsets as features, but not both.')\nif condition2:\n print('Note: Running one model per policy domain makes Policy Domains a meaningless' \\\n + ' feature, though resulting models are still valid.')\nif condition3:\n print(\" Note: 'useIndividualInterestGroupsFlag' must be True for other interest group flags\"\n + \" to be meaningful. \")\n\n#%%\n# Define some variables:\n\n# 1. use the specified interest group list:\nif useRfChosenInterestGroupsFlag:\n if useShortInterestGroupListFlag:\n interestGroupsToKeepList = rfChosenInterestGroupsToKeepListShort\n else:\n interestGroupsToKeepList = rfChosenInterestGroupsToKeepList\nelse:\n if useShortInterestGroupListFlag:\n interestGroupsToKeepList = logisticChosenInterestGroupsToKeepListShort\n else:\n interestGroupsToKeepList = logisticChosenInterestGroupsToKeepList\n\nif useBalancedModelFlag:\n balanceStr = 'balanced'\nelse:\n balanceStr = None\n\nif startYearForTest < 2003: # Case: retrodiction\n trainFraction = 1 # Use all pre-cutoff date cases for training\nelse:\n trainFraction = trainFractionDefault # < 1, to divide the full dataset into Train/Test.\n\n# 2. Book-keeping: combine all these interest groups for use in one big model:\na = list(interestGroupsToKeepList.values())\nb = []\nfor i in a:\n for j in i:\n b.append(j)\ninterestGroupsToKeepGroupedByDomain = b\ncombinedInterestGroupsToKeep = np.unique(b)\nif useAllInterestGroupsFlag:\n combinedInterestGroupsToKeep = allInterestGroupIndices\nif not useIndividualInterestGroupsFlag:\n combinedInterestGroupsToKeep = []\n#%%\n# store setup params for future use:\nsetupParams = \\\n {'numRuns':numRuns, 'startYearForTest':startYearForTest, 'trainFraction':trainFraction,\n 'oneModelPerPolicyDomainFlag':oneModelPerPolicyDomainFlag, 'ignoreGunCasesFlag':ignoreGunCasesFlag,\n 'ignoreMiscCasesFlag':ignoreMiscCasesFlag, 'xgBoostFlag':xgBoostFlag, 'usePolicyDomainsFlag':\n usePolicyDomainsFlag, 'use90PercentFlag':use90PercentFlag, 'use50PercentFlag':use50PercentFlag,\n 'use10PercentFlag':use10PercentFlag, 'use90Minus10PercentFlag':use90Minus10PercentFlag,\n 'useIndividualInterestGroupsFlag':useIndividualInterestGroupsFlag, 'useAllInterestGroupsFlag':\n useAllInterestGroupsFlag, 'useIGNAFlag':useIGNAFlag, 'restrictTrainToHighDisagreementFlag':\n restrictTrainToHighDisagreementFlag, 'restrictTestToHighDisagreementFlag':\n restrictTestToHighDisagreementFlag, 'maxDepth':maxDepth, 'numTrees':numTrees, 'numRoundsBoosting':\n numRoundsBoosting, 'etaForBoosting':etaForBoosting, 'hiddenLayers':hiddenLayers,\n 'weightFeatureOutcomeCorrelationsByAtBatsOnly':weightFeatureOutcomeCorrelationsByAtBatsOnly,\n 'useBalancedModelFlag':useBalancedModelFlag, 'useSimpleMaxForThreshFlag':useSimpleMaxForThreshFlag,\n 'uncertaintyValue':uncertaintyValue, 'useRfChosenInterestGroupsFlag':useRfChosenInterestGroupsFlag,\n 'useShortInterestGroupListFlag':useShortInterestGroupListFlag, 'disagreementThreshold':\n disagreementThreshold, 'policyDomainList':policyDomainList, 'showAccuracyPlotsFlag':\n showAccuracyPlotsFlag, 'printToConsoleFlag':printToConsoleFlag, 'plotAccuraciesByThresholdsFlag':\n plotAccuraciesByThresholdsFlag, 'useOnly90Minus10Disagreements':restrictTestToHighDisagreementFlag,\n 'allInterestGroupIndices': allInterestGroupIndices, 'interestGroupsToKeepGroupedByDomain':\n interestGroupsToKeepGroupedByDomain, 'combinedInterestGroupsToKeep':combinedInterestGroupsToKeep}\n\n#%%\n# print out a summary of setup details for the model about to run:\nsetupStr = generateSetupStringFunction(setupParams)\nprint()\nprint(setupStr)\nfid.write('' + '\\n')\nfid.write(setupStr + '\\n')\n\n#%%\n# Prep for looping through models (either one model for combined policy domains\n# or one model per policy domain):\nif oneModelPerPolicyDomainFlag:\n numToLoop = len(policyDomainList) # To prevent repeated plots\nelse:\n numToLoop = 1 # If one model only for all domains\n\n# Initialize arrays to store results from all runs and all models:\na = np.zeros((numRuns, numToLoop))\nrandomForestBalAcc = np.copy(a)\nrandomForestRawAcc = np.copy(a)\nrandomForestAuc = np.copy(a)\nrandomForestSens = np.copy(a)\nrandomForestSpec = np.copy(a)\nrandomForestPrecision = np.copy(a)\nrandomForestRecall = np.copy(a)\nrandomForestF1 = np.copy(a)\nxgBoostBalAcc = np.copy(a)\nxgBoostRawAcc = np.copy(a)\nxgBoostAuc = np.copy(a)\nxgBoostSens = np.copy(a)\nxgBoostSpec = np.copy(a)\nxgBoostPrecision = np.copy(a)\nxgBoostRecall = np.copy(a)\nxgBoostF1 = np.copy(a)\nxgBoostF1Raw = np.copy(a)\nlogisticBalAcc = np.copy(a)\nlogisticRawAcc = np.copy(a)\nlogisticAuc = np.copy(a)\nlogisticSens = np.copy(a)\nlogisticSpec = np.copy(a)\nlogisticPrecision = np.copy(a)\nlogisticRecall = np.copy(a)\nlogisticF1 = np.copy(a)\nnNetBalAcc = np.copy(a)\nnNetRawAcc = np.copy(a)\nnNetAuc = np.copy(a)\nnNetSens = np.copy(a)\nnNetSpec = np.copy(a)\nnNetPrecision = np.copy(a)\nnNetRecall = np.copy(a)\nnNetF1 = np.copy(a)\nacceptTestResultsMatrix = np.ones(a.shape, dtype=bool) # To mark invalid train/test splits\n\n#%%\n# If this is the first Setup, generate train/test splits for each run-ind pair. For each run, the\n# same split will get used for all model types.\ndataTrainBooleanArray = \\\n[[None for j in range(numToLoop)] for i in range(numRuns)] # Makes an array numRuns x numToLoop.\ndataTestBooleanArray = \\\n[[None for j in range(numToLoop)] for i in range(numRuns)]\n\nfor ind in range(numToLoop):\n for run in range(numRuns):\n if oneModelPerPolicyDomainFlag:\n policyDomainsToKeep = policyDomainList[ind]\n else:\n policyDomainsToKeep = policyDomainList\n setupParams['policyDomainsToKeep'] = policyDomainsToKeep # Temporary, to make the\n # domain-specific train/test splits here. We reassign this later.\n dataTrainBoolean, dataTestBoolean = \\\n generateTrainTestSplitFunction(dataFull, setupParams)\n dataTrainBooleanArray[run][ind] = dataTrainBoolean\n dataTestBooleanArray[run][ind] = dataTestBoolean\n\n#%%\n''' Preparation is done. Now run the model(s):'''\n\nfor ind in range(numToLoop): \n # 'ind' indexes the policy domains we are running over. This is *not* the Setup, which is the \n # selection of all parameters including features). Either ind = 0 (if one model for policy \n # combined domains) or ind = 0 (economic), 1 (foreign), 2 (social welfare), 3 (religion),\n # 4 (guns), or 5 (misc). Note that for each 'ind', we may train multiple models (certainly\n # RF or xgBoost, and maybe Logistic and NN).\n\n # Initialize some matrices for this model:\n featureScoresAllRuns = np.zeros([100, numRuns]) # Overshoot number of features (since currently\n # unknown) when initializing.\n featureOutcomeCorrelationsAllRuns = np.zeros([100, numRuns])\n lregCoeffs = np.zeros([100, numRuns]) # To hold logistic beta coeffs.\n numAtBatsAllRuns = np.zeros([100, numRuns])\n numTrainCasesAllRuns = np.zeros([numRuns, 1])\n numTestCasesAllRuns = np.zeros([numRuns, 1])\n acceptTestResultsAllRuns = np.ones([numRuns, 1], dtype=bool) # This will mark whether a run had\n # Both pass and fail cases in the test set. Make a flag for extracting the cases in this\n # domain. Do this here since it applies to all runs:\n if oneModelPerPolicyDomainFlag:\n policyDomainsToKeep = policyDomainList[ind] # Case: one policy domain.\n else:\n policyDomainsToKeep = str(policyDomainList) # Only for print to console.\n# policyDomainsToKeep = policyDomainsToKeep.replace('[','')\n# policyDomainsToKeep = policyDomainsToKeep.replace(']','')\n# policyDomainsToKeep = policyDomainsToKeep.replace(\"'\",'')\n\n # Define the set of interest groups to use as features:\n if useAllInterestGroupsFlag:\n interestGroupsToKeep = allInterestGroupIndices\n else: # Use a strict subset of interest groups:\n if oneModelPerPolicyDomainFlag:\n interestGroupsToKeep = interestGroupsToKeepList[policyDomainsToKeep]\n else: # Case: only one model, so combine all relevant interest groups:\n interestGroupsToKeep = combinedInterestGroupsToKeep\n if not useIndividualInterestGroupsFlag:\n interestGroupsToKeep = []\n setupParams['policyDomainsToKeep'] = policyDomainsToKeep\n setupParams['interestGroupsToKeep'] = interestGroupsToKeep\n\n #%% Do many runs and save results to the above arrays:\n for run in range(numRuns):\n #%%\n \"\"\" Load and parse data\"\"\"\n\n # load the relevant train/test split:\n dataTrainBoolean = dataTrainBooleanArray[run][ind]\n dataTestBoolean = dataTestBooleanArray[run][ind]\n#\n # Create a dataframe with the correct columns:\n data = parseDataframeFunction(dataFull, setupParams)\n\n # Create feature and label arrays:\n dataTrain = data.loc[dataTrainBoolean]\n dataTest = data.loc[dataTestBoolean]\n\n train_features = dataTrain.drop('Binary Outcome', axis=1).values\n train_labels = dataTrain['Binary Outcome'].values\n test_features = dataTest.drop('Binary Outcome', axis=1).values\n test_labels = dataTest['Binary Outcome'].values\n\n # misc book-keeping:\n temp = dataTrain.drop('Binary Outcome', axis=1) # Used as an intermediate var here; has to\n # be a dataframe\n featureNames = list(temp.head(0))\n numFeaturesToPlot = min(numFeaturesToPlot, len(featureNames))\n # Reduce this param value if the original value exceeds the total number of features.\n\n # Check num of cases of each type:\n numPosTestCases = np.sum(test_labels == 1)\n numNegTestCases = np.sum(test_labels == 0)\n numNegTrainCases = np.sum(train_labels == 0)\n numPosTrainCases = np.sum(train_labels == 1)\n numTestCasesAllRuns[run] = numPosTestCases + numNegTestCases\n acceptTestResultsAllRuns[run] = numPosTestCases > 0 and numNegTestCases > 0 # False if\n # there are 0 pos or 0 neg test cases.\n\n # Print warnings re bad train/test splits:\n if numPosTestCases == 0:\n print(policyDomainsToKeep + ', run ' + str(run)\n + ' Test has 0 pos cases. Excluding this run.')\n fid.write(policyDomainsToKeep + ', run ' + str(run)\n + ' Test has 0 pos cases. Excluding this run.' + '\\n')\n if numNegTestCases == 0:\n print(policyDomainsToKeep + ', run ' + str(run)\n + ' Test has 0 neg cases. Excluding this run.')\n fid.write(policyDomainsToKeep + ', run ' + str(run)\n + ' Test has 0 neg cases. Excluding this run.' + '\\n')\n\n # Record number of at-bats in test set, per interest group:\n for i in range(len(featureNames)):\n if 'pred' in featureNames[i]:\n numAtBatsAllRuns[i, run] = \\\n np.sum(np.abs(dataTest[featureNames[i]].values - 0.5) > 0.1) # For voter\n # preferences, consider 0.4 to 0.6 as 'neutral'.\n else:\n numAtBatsAllRuns[i, run] = np.sum(dataTest[featureNames[i]].values != 0)\n\n #%%\n ''' Always train a random forest model '''\n\n ''' 1. standard random forest: '''\n if not xgBoostFlag:\n forestType = 'Random forest'\n modelStr = 'randomForest' # Different capitalization is artifact.\n\n # Instantiate model and train\n clf = RandomForestClassifier(n_estimators=numTrees, max_depth=maxDepth,\n class_weight=balanceStr)\n\n # Train the model for use on test set\n clf.fit(train_features, train_labels)\n # Run test set through model:\n testProbScores = clf.predict_proba(test_features) # This returns two columns of\n # probabiliies\n trainProbScores = clf.predict_proba(train_features) # For setting thresholds to apply\n # to test set\n\n # Record feature importances:\n if usePermutationFeatureRankingFlag:\n # CAUTION: This method is very noisy:\n # 1. We'll need the original model balanced acc:\n # a. Either use variable threshold:\n fullModelAccStats = calculateAccuracyStatsOnTestSetFunction \\\n (trainProbScores, testProbScores, train_labels, test_labels, setupParams, modelStr,\n fid)\n # b. Or fixed 0.5 threshold:\n testYhat = clf.predict(test_features)\n tp = np.sum(np.logical_and(testYhat == 1, test_labels == 1))\n fn = np.sum(np.logical_and(testYhat == 0, test_labels == 1))\n tn = np.sum(np.logical_and(testYhat == 0, test_labels == 0))\n fp = np.sum(np.logical_and(testYhat == 1, test_labels == 0))\n fullTestBalAcc = 0.5*(tp/(tp+fn) + tn/(tn + fp))\n\n # 2. Now mix up the values of each feature in turn, then calculate the loss in\n # accuracy:\n for i in range(len(featureNames)):\n X = test_features.copy()\n np.random.shuffle(X[:, i])\n # goes with (a):\n thisPermutedSetProbScores = clf.predict_proba(X)\n thisAccStats = \\\n calculateAccuracyStatsOnTestSetFunction(trainProbScores,\n thisPermutedSetProbScores, train_labels,\n test_labels, setupParams, modelStr,\n fid)\n featureScoresAllRuns[i, run] = \\\n (fullModelAccStats['balancedAccTest'] - thisAccStats['balancedAccTest']) \\\n / fullModelAccStats['balancedAccTest']\n\n else: # Just use the built-in gini impurity\n featureScoresAllRuns[range(len(featureNames)), run] = clf.feature_importances_\n\n # Eliminate surplus rows:\n featureScoresAllRuns = featureScoresAllRuns[range(len(featureNames)), :]\n\n ''' 2. xgboost: '''\n if xgBoostFlag:\n forestType = 'xgBoost'\n modelStr = 'xgBoost'\n\n datasets.dump_svmlight_file(train_features, train_labels, 'xgbTrain.txt')\n xgbTrain = xgb.DMatrix('xgbTrain.txt', silent=True)\n datasets.dump_svmlight_file(test_features, test_labels, 'xgbTest.txt')\n xgbTest = xgb.DMatrix('xgbTest.txt', silent=True)\n\n # define cost:\n if useBalancedModelFlag:\n scale_pos_weight = numNegTrainCases/numPosTrainCases\n else:\n scale_pos_weight = 1\n\n xgbParam = {'max_depth':maxDepth, 'eta':etaForBoosting, 'objective':'binary:logistic',\n 'booster':'gblinear', 'scale_pos_weight': scale_pos_weight}\n bst = xgb.train(xgbParam, xgbTrain, numRoundsBoosting)\n # make predictions:\n xgbTestOutput = bst.predict(xgbTest)\n testProbScores = np.zeros((len(xgbTestOutput), 2))\n testProbScores[:, 1] = xgbTestOutput\n testProbScores[:, 0] = 1 - xgbTestOutput\n # predictions on training set, to find a threshold:\n xgbTrainOutput = bst.predict(xgbTrain)\n trainProbScores = np.zeros((len(xgbTrainOutput), 2))\n trainProbScores[:, 1] = xgbTrainOutput\n trainProbScores[:, 0] = 1 - xgbTrainOutput\n # End of xgBoost-specific section.\n\n # Calculate test set accuracy: ('aS' = accuracy stats). This function includes possible roc,\n # balAcc, sens, spec plots.\n aS = calculateAccuracyStatsOnTestSetFunction(trainProbScores, testProbScores, train_labels,\n test_labels, setupParams, modelStr, fid)\n\n # Save to numRuns array:\n if xgBoostFlag:\n xgBoostBalAcc[run, ind] = aS['balancedAccTest']\n xgBoostAuc[run, ind] = aS['aucScore']\n xgBoostSens[run, ind] = aS['sensTest']\n xgBoostSpec[run, ind] = aS['specTest']\n xgBoostPrecision[run, ind] = aS['precisionTest']\n xgBoostRecall[run, ind] = aS['sensTest']\n xgBoostF1[run, ind] = 2*aS['sensTest']*aS['specTest'] \\\n / (aS['sensTest'] + aS['specTest'])\n xgBoostRawAcc[run, ind] = aS['rawAccTest']\n else:\n randomForestBalAcc[run, ind] = aS['balancedAccTest']\n randomForestAuc[run, ind] = aS['aucScore']\n randomForestSens[run, ind] = aS['sensTest']\n randomForestSpec[run, ind] = aS['specTest']\n randomForestPrecision[run, ind] = aS['precisionTest']\n randomForestRecall[run, ind] = aS['sensTest']\n randomForestF1[run, ind] = 2*aS['sensTest']*aS['specTest'] \\\n / (aS['sensTest'] + aS['specTest'])\n randomForestRawAcc[run, ind] = aS['rawAccTest']\n\n #%%\n ''' Train a Neural Net '''\n\n if runNeuralNetFlag:\n neuralNet = MLPClassifier(hiddenLayers, activation='relu', max_iter=5000,\n early_stopping=True)\n neuralNet = neuralNet.fit(train_features, train_labels)\n testProbScoresNN = neuralNet.predict_proba(test_features)\n trainProbScoresNN = neuralNet.predict_proba(train_features) # For setting thresholds to\n # apply to test set\n\n # Calculate test set accuracy: ('aS' = accuracy stats). Includes possible roc, balAcc,\n # sens, spec plots.\n aS = calculateAccuracyStatsOnTestSetFunction(trainProbScoresNN, testProbScoresNN,\n train_labels, test_labels, setupParams,\n 'neuralNet', fid)\n\n # save to numRuns array:\n nNetBalAcc[run, ind] = aS['balancedAccTest']\n nNetRawAcc[run, ind] = aS['rawAccTest']\n nNetAuc[run, ind] = aS['aucScore']\n nNetSens[run, ind] = aS['sensTest']\n nNetSpec[run, ind] = aS['specTest']\n nNetPrecision[run, ind] = aS['precisionTest']\n nNetRecall[run, ind] = aS['sensTest']\n nNetF1[run, ind] = 2*aS['sensTest']*aS['specTest'] \\\n / (aS['sensTest'] + aS['specTest'])\n\n #%%\n ''' Train a logistic regression model (ie Gilens method) '''\n if runLogisticRegressionFlag:\n # Instantiate and train model:\n if logisticPenalty == 'none':\n lreg = LogisticRegression(class_weight=balanceStr, penalty=logisticPenalty,\n solver='lbfgs') # C = 1, fit_intercept = True\n else:\n lreg = LogisticRegression(class_weight=balanceStr, penalty=logisticPenalty)\n\n # Create a feature scaler and fit it to the training data:\n scaler = StandardScaler()\n trainFeaturesScaled = scaler.fit_transform(train_features)\n testFeaturesScaled = scaler.transform(test_features)\n\n lreg = lreg.fit(trainFeaturesScaled, train_labels)\n\n # Run test set througgh model:\n testProbScoresLreg = lreg.predict_proba(testFeaturesScaled)\n trainProbScoresLreg = lreg.predict_proba(trainFeaturesScaled)\n\n # Calculate test set accuracy: ('aS' = accuracy stats). Includes possible roc, balAcc,\n # sens, spec plots.\n aS = calculateAccuracyStatsOnTestSetFunction(trainProbScoresLreg, testProbScoresLreg,\n train_labels, test_labels, setupParams,\n 'logistic', fid)\n\n # Save to numRuns array:\n logisticBalAcc[run, ind] = aS['balancedAccTest']\n logisticRawAcc[run, ind] = aS['rawAccTest']\n logisticAuc[run, ind] = aS['aucScore']\n logisticSens[run, ind] = aS['sensTest']\n logisticSpec[run, ind] = aS['specTest']\n logisticPrecision[run, ind] = aS['precisionTest']\n logisticRecall[run, ind] = aS['sensTest']\n logisticF1[run, ind] = 2*aS['sensTest']*aS['specTest'] \\\n / (aS['sensTest'] + aS['specTest'])\n\n # Save betas:\n betas = lreg.coef_[0]\n lregCoeffs[range(len(betas)), run] = betas.transpose()\n\n #%%\n # See whether each interest group's correlation with the test set outcomes is positive or\n # negative. Note: this calculation will not be meaningful for policy domain features.\n for i in range(len(featureNames)):\n temp = np.copy(test_labels)\n temp[np.where(temp == 0)[0]] = -1 # Change labels to -1 and 1 to allow inner product\n # with preferences.\n pref = test_features[:, i] # This feature's preferences.\n # For pred90 etc, set the middle region (0.4 to 0.6) to 0 and scale to [-2, 2] to match\n # interest groups.\n if 'pred' in featureNames[i]:\n pref = (pref - 0.5)*4\n pref = np.maximum(pref, -2) # Only needed if multiplier > 4.\n pref = np.minimum(pref, 2)\n pref[np.where(np.logical_and(pref > -0.4, pref < 0.4))[0]] = 0\n numberAtBats = np.sum(pref != 0) # For weighting correlation score by at-bats only\n # rather than all cases\n if weightFeatureOutcomeCorrelationsByAtBatsOnly:\n denom = 2*numberAtBats\n else:\n denom = 2*len(test_labels)\n featureOutcomeCorrelationsAllRuns[i, run] = sum(np.multiply(pref, temp)) / denom\n\n ''' End of loop through numRuns '''\n\n # Save the RF accuracies to the 3-D array: (the second dimension is a carry-over (deadwood here)\n # from the script that loops over many models + feature sets).\n rfRawAccArray[:, 0, ind] = (randomForestRawAcc[:, ind]).flatten()\n rfBalAccArray[:, 0, ind] = (randomForestBalAcc[:, ind]).flatten()\n rfAucArray[:, 0, ind] = (randomForestAuc[:, ind]).flatten()\n # save the logistic accuracies similarly:\n logRawAccArray[:, 0, ind] = (logisticRawAcc[:, ind]).flatten()\n logBalAccArray[:, 0, ind] = (logisticBalAcc[:, ind]).flatten()\n logAucArray[:, 0, ind] = (logisticAuc[:, ind]).flatten()\n\n # Print number of test cases to console:\n testCasesMean = np.round(np.mean(numTestCasesAllRuns), 1)\n testCasesStd = np.round(np.std(numTestCasesAllRuns), 1)\n if printTestCasesPerRunFlag:\n print(str(testCasesMean) + ' +/-' + str(testCasesStd) + ' test cases per run')\n fid.write(str(testCasesMean) + ' +/-' + str(testCasesStd) + ' test cases per run' + '\\n')\n # 3. Feature importance and feature-outcome correlation plots:\n if showFeaturePlotsFlag and not xgBoostFlag: # Because we do not have xgBoost feature scores.\n # Cut off the surplus placeholders in featureScores:\n featureScoresAllRuns = featureScoresAllRuns[range(len(featureNames)), :]\n plotFeatureStatsFunction(featureScoresAllRuns, featureOutcomeCorrelationsAllRuns,\n numAtBatsAllRuns, featureNames, str(policyDomainsToKeep),\n numFeaturesToPlot, testCasesMean, fid)\n\n # 4. Print out logistic betas, mean and std:\n lregCoeffs = lregCoeffs[range(len(featureNames))]\n if runLogisticRegressionFlag and printLogisticBetasFlag:\n betaMeans = np.round(np.mean(lregCoeffs, axis=1), 3)\n betaStds = np.round(np.std(lregCoeffs, axis=1), 3)\n betaFoms = np.round(np.divide(np.abs(betaMeans), betaStds), 3) # Use abs(mean)/std as FoM.\n # remove nans:\n betaFoms[np.where(np.isnan(betaFoms))[0]] = 0\n sortedBetaFomIndices = np.flip(np.argsort(betaFoms))\n sortedBetaFoms = betaFoms[sortedBetaFomIndices]\n print('')\n print('logistic regression betas for ' + str(policyDomainsToKeep) + ':')\n for i in range(numFeaturesToPlot):\n print(str(sortedBetaFoms[i]) + '= mu/sigma, ' + str(betaMeans[sortedBetaFomIndices[i]]) \\\n + ' +/- ' + str(betaStds[sortedBetaFomIndices[i]]) + ' '\n + featureNames[sortedBetaFomIndices[i]])\n fid.write('' + '\\n')\n fid.write('logistic regression betas for ' + str(policyDomainsToKeep) + ':' + '\\n')\n for i in range(numFeaturesToPlot):\n fid.write(str(sortedBetaFoms[i]) + '= mu/sigma, ' \\\n + str(betaMeans[sortedBetaFomIndices[i]]) + ' +/- ' \\\n + str(betaStds[sortedBetaFomIndices[i]]) + ' ' \\\n + featureNames[sortedBetaFomIndices[i]] + '\\n')\n\n # Scatterplot logistic mean feature score and also RF feature score vs number of at-bats for\n # each IG:\n # We need numAtBats:\n atBatMean = np.mean(numAtBatsAllRuns, axis=1)\n atBatMean = atBatMean[:len(betaMeans)]\n # scale the importance scores so they are comparable:\n scaledRfFeatureMeans = np.mean(featureScoresAllRuns, axis=1)\n scaledRfFeatureMeans = scaledRfFeatureMeans / np.max(scaledRfFeatureMeans)\n scaledBetaMeans = np.abs(betaMeans) / np.max(np.abs(betaMeans))\n\n if startYearForTest >= 2003 and not xgBoostFlag: # Since we have no xgBoost feature scores.\n # Calculate linear fits, to plot these:\n r = scaledRfFeatureMeans.reshape(-1, 1) # To make shape [n, 1] instead of [n,]\n lo = scaledBetaMeans.reshape(-1, 1)\n ab = atBatMean.reshape(-1, 1)\n # use only IGs with >0 at-bats:\n nonZeroAtBatInds = np.where(ab > 1)[0]\n r = r[nonZeroAtBatInds]\n lo = lo[nonZeroAtBatInds]\n ab = ab[nonZeroAtBatInds]\n\n lin = LinearRegression()\n lin = lin.fit(ab, r)\n mRF = lin.coef_[0]\n bRF = lin.intercept_[0]\n\n # calculate R^2 scores\n r2RF = metrics.r2_score(r, bRF + mRF*ab)\n r2RF = np.round(r2RF, 2)\n\n lin = lin.fit(ab, lo)\n mLog = lin.coef_[0]\n bLog = lin.intercept_[0]\n # calculate R^2 scores\n r2Log = metrics.r2_score(lo, bLog + mLog*ab)\n r2Log = np.round(r2Log, 2)\n\n if showFeatureImportanceVsAtBatsScatterplotFlag:\n plt.figure()\n plt.plot(atBatMean, scaledBetaMeans, 'r.', markersize=12, label='Logistic')\n plt.plot(atBatMean, scaledRfFeatureMeans, 'b.', markersize=12, label='RF')\n plt.legend(loc='lower right', prop={'size':12, 'weight':'bold'})\n plt.xlabel('# at-bats', fontsize=14, fontweight='bold')\n plt.ylabel('importance score', fontsize=14, fontweight='bold')\n plt.title(str(policyDomainsToKeep) +' feature importances, ' + '. RF R^2 = ' \\\n + str(r2RF) + ', logistic R^2 = ' + str(r2Log),\n fontsize=14, fontweight='bold')\n plt.xticks(size=12, weight='bold')\n plt.yticks(size=12, weight='bold')\n plt.xlim([0, testCasesMean])\n plt.ylim([0, 1.05])\n plt.grid(b=False)\n # Plot linear fits:\n xAxisVals = [0, int(np.max(ab))]\n plt.plot(xAxisVals, bLog + mLog*xAxisVals, 'r:')\n plt.plot(xAxisVals, bRF + mRF*xAxisVals, 'b:')\n plt.show()\n\n # Populate acceptableTestResultsMatrix for this policy domain:\n acceptTestResultsMatrix[:, ind] = acceptTestResultsAllRuns.transpose()\n\n''' End of loop through policy domains '''\n\n#%%\n''' print out statistics of results to console: '''\n\n# For each model type, calc stats and put them into a string:\n# Note that *BalAcc, *Auc etc have one column per model. If oneModelPerPolicyDomainFlag == True,\n# then col 0 to 4 = ECYN, FPYN, SWYN, RLYN, GNYN.\n\n# 1. Random Forest XOR xgBoost:\nif not xgBoostFlag:\n accResultDict = {'rawAcc':randomForestRawAcc, 'balAcc':randomForestBalAcc,\n 'AUC': randomForestAuc, 'prec': randomForestPrecision,\n 'recall': randomForestRecall, 'f1': randomForestF1}\nelse:\n accResultDict = {'rawAcc':xgBoostRawAcc, 'balAcc':xgBoostBalAcc, 'AUC': xgBoostAuc,\n 'prec': xgBoostPrecision, 'recall': xgBoostRecall, 'f1': xgBoostF1}\n\nif xgBoostFlag:\n typeStr = 'xgBoost'\nelse:\n typeStr = 'random forest'\nif useBalancedModelFlag:\n typeStr = typeStr + '_balanced'\nelse:\n typeStr = typeStr + '_UNbalanced'\n\nresultsDataFrame = \\\ncalculatePrintAndSaveModelStatsOverAllRunsFunction(accResultDict, acceptTestResultsMatrix, typeStr,\n setupParams, resultsDataFrame, fid)\n\n# 2. logistic:\nif runLogisticRegressionFlag:\n accResultDict = {'rawAcc':logisticRawAcc, 'balAcc':logisticBalAcc, 'AUC': logisticAuc, 'prec':\n logisticPrecision, 'recall': logisticRecall, 'f1': logisticF1}\n\n typeStr = 'logistic'\n if useBalancedModelFlag:\n typeStr = typeStr + '_balanced'\n else:\n typeStr = typeStr + '_UNbalanced'\n\n resultsDataFrame = \\\n calculatePrintAndSaveModelStatsOverAllRunsFunction(accResultDict, acceptTestResultsMatrix,\n typeStr, setupParams, resultsDataFrame, fid)\n # Also print out difference of RF and logistic:\n rfRa = randomForestRawAcc\n rfBa = randomForestBalAcc\n rfAuc = randomForestAuc\n if xgBoostFlag:\n rfRa = xgBoostRawAcc\n rfBa = xgBoostBalAcc\n rfAuc = xgBoostAuc\n accResultDict = {'rawAcc': rfRa - logisticRawAcc, 'balAcc': rfBa - logisticBalAcc,\n 'AUC': rfAuc - logisticAuc}\n typeStr = 'RF minus logistic'\n resultsDataFrame = \\\n calculatePrintAndSaveModelStatsOverAllRunsFunction(accResultDict, acceptTestResultsMatrix,\n typeStr, setupParams, resultsDataFrame, fid)\n# 3. Neural net:\nif runNeuralNetFlag:\n accResultDict = {'rawAcc':nNetRawAcc, 'balAcc':nNetBalAcc, 'AUC': nNetAuc,\n 'prec': nNetPrecision, 'recall': nNetRecall, 'f1': nNetF1}\n\n typeStr = 'neuralNet'\n if useBalancedModelFlag:\n typeStr = typeStr + '_balanced'\n else:\n typeStr = typeStr + '_UNbalanced'\n\n resultsDataFrame = \\\n calculatePrintAndSaveModelStatsOverAllRunsFunction(accResultDict, acceptTestResultsMatrix,\n typeStr, setupParams, resultsDataFrame, fid)\n\nresultsDataFrame.to_csv(saveDataframeResultsFilename)\n\nfid.close()\n\n#%%\n'''\nInterest groups column number key:\n\nAARP = 12\nAFL CIO = 13\nAirlines = 14\nAmerican Bankers Association = 15\nAmerican Council of Life Insurance = 16\nAmerican Farm Bureau Federation = 17\nAmerican Federation of State County and Municipal Employees = 18\nAmerican Hospital Association = 19\nAmerican Israel Public Affairs Committee = 20\nAmerican Legion = 21\nAmerican Medical Association = 22\nAssociation of Trial Lawyers = 23\nAutomobile companies = 24\nChamber of Commerce = 25\nChristian Coalition = 26\nComputer software and hardware = 27\nCredit Union National Association = 28\nDefense contractors = 29\nElectric companies = 30\nHealth Insurance Association = 31\nIndependent Insurance Agents of America = 32\nInternational Brotherhood of Teamsters = 33\nMotion Picture Association of America = 34\nNational Association of Broadcasters = 35\nNational Association of Home Builders = 36\nNational Association of Manufacturers = 37\nNational Association of Realtors = 38\nNational Beer Wholesalers Association = 39\nNational Education Association = 40\nNational Federation of Independent Business = 41\nNational Governors Association = 42\national Restaurant Association = 43\nNational Rifle Association = 44\nNational Right to Life Committee = 45\nOil Companies = 46\nPharmaceutical Research Manufacturers = 47\nRecording Industry Association = 48\nSecurities and investment companies = 49\nTelephone companies = 50\nTobbaco companies = 51\nUnited Auto Workers union = 52\nUniversities = 53\nVeterans of Foreign Wars of the US = 54\n '''\n'''\nMIT license:\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and\nassociated documentation files (the \"Software\"), to deal in the Software without restriction,\nincluding without limitation the rights to use, copy, modify, merge, publish, distribute,\nsublicense, and/or sell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all copies or\nsubstantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT\nNOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\nDAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT\nOF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n'''","sub_path":"run_models_main.py","file_name":"run_models_main.py","file_ext":"py","file_size_in_byte":47100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"441845467","text":"import psycopg2\r\nimport json\r\n\r\n\r\nclass ArgumentsManager:\r\n\r\n @staticmethod\r\n def get_str(sys_args, arg_id):\r\n \"\"\"\r\n\r\n :param sys_args: Array of string. I would REALLY appreciate if you use the sys.argv here. Really.\r\n :param arg_id: String. Example \"-linkid\".\r\n :return:\r\n \"\"\"\r\n\r\n # basic check\r\n if (sys_args is None) or (arg_id is None):\r\n return None\r\n\r\n if (arg_id in sys_args) and (sys_args.index(arg_id) < (len(sys_args) - 1)):\r\n return sys_args[sys_args.index(arg_id) + 1]\r\n else:\r\n return None\r\n\r\n @staticmethod\r\n def get_int(sys_args, arg_id):\r\n \"\"\"\r\n\r\n :param sys_args:\r\n :param arg_id:\r\n :return:\r\n \"\"\"\r\n\r\n arg_value = ArgumentsManager.get_str(sys_args, arg_id)\r\n if arg_value is not None:\r\n try:\r\n return int(ArgumentsManager.get_str(sys_args, arg_id))\r\n except ValueError:\r\n print(\"Argument '{0}' is not an integer ({1}).\".format(arg_id, arg_value))\r\n return None\r\n else:\r\n return None\r\n\r\n @staticmethod\r\n def get_flt(sys_args, arg_id):\r\n \"\"\"\r\n\r\n :param sys_args:\r\n :param arg_id:\r\n :return:\r\n \"\"\"\r\n\r\n arg_value = ArgumentsManager.get_str(sys_args, arg_id)\r\n if arg_value is not None:\r\n try:\r\n return float(ArgumentsManager.get_str(sys_args, arg_id))\r\n except ValueError:\r\n print(\"Argument '{0}' is not an float ({1}).\".format(arg_id, arg_value))\r\n return None\r\n else:\r\n return None\r\n\r\n def __init__(self):\r\n return\r\n\r\n\r\nclass DatabaseManager:\r\n\r\n @staticmethod\r\n def open_database_connection(dbc_json_file_path):\r\n \"\"\"\r\n\r\n :param dbc_json_file_path:\r\n :return:\r\n \"\"\"\r\n\r\n # read database connection file\r\n with open(dbc_json_file_path) as dbc_json_file:\r\n dbc_json_data = json.load(dbc_json_file)\r\n\r\n db_conn = psycopg2.connect(\"dbname={0} user={1} password={2} host={3} port={4}\".format(\r\n dbc_json_data['database'], dbc_json_data['username'], dbc_json_data['password'], dbc_json_data['host'],\r\n dbc_json_data['port']))\r\n\r\n print(\"Connecting to '{0}' as '{1}'.\".format(dbc_json_data['database'], dbc_json_data['username']))\r\n\r\n return db_conn\r\n\r\n def __init__(self):\r\n return\r\n","sub_path":"backend/server_crons/def_lib.py","file_name":"def_lib.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553856553","text":"# Copyright (c) 2020 University of Chicago\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_config import cfg\n\nfrom blazar.manager import exceptions as manager_exceptions\nfrom blazar.utils.openstack import base\nfrom oslo_log import log as logging\nfrom zunclient import client as zun_client\nfrom zunclient import exceptions as zun_exception\n\n\nzun_opts = [\n cfg.StrOpt(\n 'zun_api_version',\n default='1',\n help='Zun API version'),\n cfg.StrOpt(\n 'zun_api_microversion',\n default='1.22',\n help='Zun API microversion'),\n cfg.StrOpt(\n 'endpoint_override',\n help='Zun endpoint URL to use')\n]\n\nCONF = cfg.CONF\nCONF.register_opts(zun_opts, group='zun')\n\nLOG = logging.getLogger(__name__)\n\n\nclass BlazarZunClient(object):\n \"\"\"Client class for Zun service.\"\"\"\n\n def __init__(self, **kwargs):\n client_kwargs = base.client_kwargs(**kwargs)\n client_kwargs.setdefault('os_zun_api_version',\n CONF.zun.zun_api_microversion)\n self.zun = zun_client.Client(\n CONF.zun.zun_api_version, **client_kwargs)\n\n def __getattr__(self, attr):\n return getattr(self.zun, attr)\n\n\nclass ZunClientWrapper(object):\n @property\n def zun(self):\n zun = BlazarZunClient(endpoint_override=CONF.zun.endpoint_override)\n return zun\n\n\nclass ZunInventory(BlazarZunClient):\n def get_host_details(self, host):\n \"\"\"Get Zun capabilities of a single host\n\n :param host: UUID or name of zun compute node\n :return: Dict of capabilities or raise HostNotFound\n \"\"\"\n try:\n host = self.zun.hosts.get(host)\n except (zun_exception.NotFound, zun_exception.BadRequest):\n host_ids = []\n for h in self.zun.hosts.list():\n if h.hostname == host:\n host_ids.append(h.uuid)\n if len(host_ids) == 0:\n raise manager_exceptions.HostNotFound(host=host)\n elif len(host_ids) > 1:\n raise manager_exceptions.MultipleHostsFound(host=host)\n else:\n host = self.zun.hosts.get(host_ids[0])\n\n return {'id': host.uuid,\n 'name': host.hostname,\n 'containers': self.zun.containers.list(host=host.hostname)\n }\n","sub_path":"blazar/utils/openstack/zun.py","file_name":"zun.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"583201794","text":"# Copyright (c) 2021 Christian Corsica. All Rights Reserved.\n\nimport os\nimport subprocess\nfrom piper.core.dcc import DCC\n\n\nclass Maya(DCC):\n\n def __init__(self):\n super(Maya, self).__init__()\n self.registry_path = 'SOFTWARE\\\\Autodesk\\\\Maya'\n self.registry_exclude = 'Capabilities'\n self.registry_install_key = 'MAYA_INSTALL_LOCATION'\n self.registry_install_path = 'SOFTWARE\\\\Autodesk\\\\Maya\\\\{}\\\\Setup\\\\InstallPath'.format(self.version_replace)\n self.relative_python_path = ['bin', 'mayapy.exe']\n self.relative_batch_path = ['bin', 'mayabatch.exe']\n\n def runPythonBatch(self, version, command):\n batch_path = self.getBatchPath(version)\n full_command = '{0} -noAutoloadPlugins -command \"python(\"\"{1}\"\")\"'.format(batch_path, command)\n subprocess.call(full_command)\n\n def runPythonBatches(self, command):\n for version in self.getVersions():\n self.runPythonBatch(version, command)\n\n def onBeforeInstalling(self):\n os.environ['MAYA_SKIP_USERSETUP_PY'] = '1'\n os.environ['PYMEL_SKIP_MEL_INIT'] = '1'\n","sub_path":"piper/core/maya_dcc.py","file_name":"maya_dcc.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176494243","text":"#!/usr/bin/env python3\n\n# Some tools for reporting time of functions and the like.\n#\nimport datetime\nimport time\nfrom functools import wraps\n\n\ndef timethis(func):\n ''' Report execution time of function; function result not maintained.'''\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n return end-start\n return wrapper\n\n\ndef logthis(func, filename=None, mode='a'):\n ''' Write function results to output filename; results returned.'''\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not filename:\n filename = datetime.date.today() + '-' + func.__name__\n result = func(*args, **kwargs)\n with open(filename, mode) as f:\n print(result, file=f)\n return result\n return wrapper","sub_path":"03/logtools.py","file_name":"logtools.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"481236660","text":"#======================================================================\n# ====================================\n# README file for Skew Correction component\n# ====================================\n\n# Filename : ocrd-anyBaseOCR-deskew.py\n\n# Author: Syed Saqib Bukhari, Mohammad Mohsin Reza, Md. Ajraf Rakib\n# Responsible: Syed Saqib Bukhari, Mohammad Mohsin Reza, Md. Ajraf Rakib\n# Contact Email: Saqib.Bukhari@dfki.de, Mohammad_mohsin.reza@dfki.de, Md_ajraf.rakib@dfki.de\n# Note: \n# 1) this work has been done in DFKI, Kaiserslautern, Germany.\n# 2) The parameters values are read from ocrd-anyBaseOCR-parameter.json file. The values can be changed in that file.\n# 3) The command line IO usage is based on \"OCR-D\" project guidelines (https://ocr-d.github.io/). A sample image file (samples/becker_quaestio_1586_00013.tif) and mets.xml (work_dir/mets.xml) are provided. The sequence of operations is: binarization, deskewing, cropping and dewarping (or can also be: binarization, dewarping, deskewing, and cropping; depends upon use-case).\n\n# *********** Method Behaviour ********************\n# This function takes a document image as input and do the skew correction of that document.\n# *********** Method Behaviour ********************\n\n# *********** LICENSE ********************\n# License: ocropus-nlbin.py (from https://github.com/tmbdev/ocropy/) contains both functionalities: binarization and skew correction. \n# This method (ocrd-anyBaseOCR-deskew.py) only contains the skew correction functionality of ocropus-nlbin.py. \n# It still has the same licenses as ocropus-nlbin, i.e Apache 2.0 (the ocropy license details are pasted below).\n# This file is dependend on ocrolib library which comes from https://github.com/tmbdev/ocropy/. \n\n#Copyright 2014 Thomas M. Breuel\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#*********** LICENSE ********************\n#======================================================================\n#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom pylab import *\nfrom numpy.ctypeslib import ndpointer\nimport argparse,os,os.path,glob\nfrom scipy.ndimage import filters,interpolation,morphology,measurements\nfrom scipy import stats\nimport multiprocessing\nimport ocrolib\nimport json\nfrom xml.dom import minidom\n\nparser = argparse.ArgumentParser(\"\"\"\nImage deskewing using non-linear processing.\n \n python ocrd-anyBaseOCR-deskew.py -m (mets input file path) -I (input-file-grp name) -O (output-file-grp name) -w (Working directory)\n\nThis is a compute-intensive deskew method that works on degraded and historical book pages.\n\"\"\")\n\nparser.add_argument('-p','--parameter',type=str,help=\"Parameter file location\")\nparser.add_argument('-e','--escale',type=float,help='scale for estimating a mask over the text region, default: %(default)s')\nparser.add_argument('-t','--threshold',type=float,help='threshold, determines lightness, default: %(default)s')\nparser.add_argument('-b','--bignore',type=float,help='ignore this much of the border for threshold estimation, default: %(default)s')\nparser.add_argument('-ms','--maxskew',type=float,help='skew angle estimation parameters (degrees), default: %(default)s')\nparser.add_argument('--skewsteps',type=int,help='steps for skew angle estimation (per degree), default: %(default)s')\nparser.add_argument('--debug',type=float,help='display intermediate results, default: %(default)s')\nparser.add_argument('--lo',type=float,help='percentile for black estimation, default: %(default)s')\nparser.add_argument('--hi',type=float,help='percentile for white estimation, default: %(default)s')\nparser.add_argument('-Q','--parallel',type=int)\nparser.add_argument('-O','--Output',default=None,help=\"output directory\")\nparser.add_argument('-w','--work',type=str,help=\"Working directory location\", default=\".\")\nparser.add_argument('-I','--Input',default=None,help=\"Input directory\")\nparser.add_argument('-m','--mets',default=None,help=\"METs input file\")\nparser.add_argument('-o','--OutputMets',default=None,help=\"METs output file\")\nparser.add_argument('-g','--group',default=None,help=\"METs image group id\")\n\nargs = parser.parse_args()\n\n## Read parameter values from json file\nif args.parameter:\n if not os.path.exists(args.parameter):\n print(\"Error : Parameter file does not exists.\")\n sys.exit(0)\n else:\n param = json.load(open(args.parameter))\nelse:\n if not os.path.exists('ocrd-anyBaseOCR-parameter.json'):\n print(\"Error : Parameter file does not exists.\")\n sys.exit(0)\n else:\n param = json.load(open('ocrd-anyBaseOCR-parameter.json'))\n\nargs.bignore = param[\"anyBaseOCR\"][\"deskew\"][\"bignore\"]\nargs.escale = param[\"anyBaseOCR\"][\"deskew\"][\"escale\"]\nargs.threshold = param[\"anyBaseOCR\"][\"deskew\"][\"threshold\"]\nargs.lo = param[\"anyBaseOCR\"][\"deskew\"][\"lo\"]\nargs.hi = param[\"anyBaseOCR\"][\"deskew\"][\"hi\"]\nargs.maxskew = param[\"anyBaseOCR\"][\"deskew\"][\"maxskew\"]\nargs.skewsteps = param[\"anyBaseOCR\"][\"deskew\"][\"skewsteps\"]\nargs.debug = param[\"anyBaseOCR\"][\"deskew\"][\"debug\"]\nargs.parallel = param[\"anyBaseOCR\"][\"deskew\"][\"parallel\"]\n### End to read parameters\n\ndef parseXML(fpath):\n input_files=[]\n xmldoc = minidom.parse(fpath)\n nodes = xmldoc.getElementsByTagName('mets:fileGrp')\n for attr in nodes:\n if attr.attributes['USE'].value==args.Input:\n childNodes = attr.getElementsByTagName('mets:FLocat')\n for f in childNodes:\n input_files.append(f.attributes['xlink:href'].value)\n return input_files\n\ndef write_to_xml(fpath):\n xmldoc = minidom.parse(args.mets)\n subRoot = xmldoc.createElement('mets:fileGrp')\n subRoot.setAttribute('USE', args.Output)\n\n for f in fpath:\n #basefile = os.path.splitext(os.path.splitext(os.path.basename(f))[0])[0]\n basefile = ocrolib.allsplitext(os.path.basename(f))[0]\n child = xmldoc.createElement('mets:file')\n child.setAttribute('ID', 'DESKEW_'+basefile)\n child.setAttribute('GROUPID', 'P_' + basefile)\n child.setAttribute('MIMETYPE', \"image/png\")\n\n subChild = xmldoc.createElement('mets:FLocat')\n subChild.setAttribute('LOCTYPE', \"URL\")\n subChild.setAttribute('xlink:href', f)\n\n #xmldoc.getElementsByTagName('mets:file')[0].appendChild(subChild);\n subRoot.appendChild(child)\n child.appendChild(subChild)\n\n #subRoot.appendChild(child)\n xmldoc.getElementsByTagName('mets:fileSec')[0].appendChild(subRoot);\n\n if not args.OutputMets:\n metsFileSave = open(os.path.join(args.work, os.path.basename(args.mets)), \"w\")\n else:\n metsFileSave = open(os.path.join(args.work, args.OutputMets if args.OutputMets.endswith(\".xml\") else args.OutputMets+'.xml'), \"w\")\n metsFileSave.write(xmldoc.toxml()) \n\n#args.files = ocrolib.glob_all(args.files)\n\ndef print_info(*objs):\n print(\"INFO: \", *objs, file=sys.stdout)\n\ndef estimate_skew_angle(image,angles):\n estimates = []\n for a in angles:\n v = mean(interpolation.rotate(image,a,order=0,mode='constant'),axis=1)\n v = var(v)\n estimates.append((v,a))\n if args.debug>0:\n plot([y for x,y in estimates],[x for x,y in estimates])\n ginput(1,args.debug)\n _,a = max(estimates)\n return a\n\ndef deskew(fpath, job):\n base,_ = ocrolib.allsplitext(fpath)\n basefile = ocrolib.allsplitext(os.path.basename(fpath))[0]\n\n if args.parallel<2: print_info(\"=== %s %-3d\" % (fpath, job))\n raw = ocrolib.read_image_gray(fpath)\n\n flat = raw\n # estimate skew angle and rotate\n if args.maxskew>0:\n if args.parallel<2: print_info(\"estimating skew angle\")\n d0,d1 = flat.shape\n o0,o1 = int(args.bignore*d0),int(args.bignore*d1)\n flat = amax(flat)-flat\n flat -= amin(flat)\n est = flat[o0:d0-o0,o1:d1-o1]\n ma = args.maxskew\n ms = int(2*args.maxskew*args.skewsteps)\n angle = estimate_skew_angle(est,linspace(-ma,ma,ms+1))\n flat = interpolation.rotate(flat,angle,mode='constant',reshape=0)\n flat = amax(flat)-flat\n else:\n angle = 0\n\n # estimate low and high thresholds\n if args.parallel<2: print_info(\"estimating thresholds\")\n d0,d1 = flat.shape\n o0,o1 = int(args.bignore*d0),int(args.bignore*d1)\n est = flat[o0:d0-o0,o1:d1-o1]\n if args.escale>0:\n # by default, we use only regions that contain\n # significant variance; this makes the percentile\n # based low and high estimates more reliable\n e = args.escale\n v = est-filters.gaussian_filter(est,e*20.0)\n v = filters.gaussian_filter(v**2,e*20.0)**0.5\n v = (v>0.3*amax(v))\n v = morphology.binary_dilation(v,structure=ones((int(e*50),1)))\n v = morphology.binary_dilation(v,structure=ones((1,int(e*50))))\n if args.debug>0: imshow(v); ginput(1,args.debug)\n est = est[v]\n lo = stats.scoreatpercentile(est.ravel(),args.lo)\n hi = stats.scoreatpercentile(est.ravel(),args.hi)\n # rescale the image to get the gray scale image\n if args.parallel<2: print_info(\"rescaling\")\n flat -= lo\n flat /= (hi-lo)\n flat = clip(flat,0,1)\n if args.debug>0: imshow(flat,vmin=0,vmax=1); ginput(1,args.debug)\n bin = 1*(flat>args.threshold)\n\n # output the normalized grayscale and the thresholded images\n print_info(\"%s lo-hi (%.2f %.2f) angle %4.1f\" % (basefile, lo, hi, angle))\n if args.parallel<2: print_info(\"writing\")\n ocrolib.write_image_binary(base+\".ds.png\",bin)\n return base+\".ds.png\"\n\n# mendatory parameter check\nif not args.mets or not args.Input or not args.Output or not args.work:\n parser.print_help()\n print(\"Example: python ocrd-anyBaseOCR-deskew.py -m (mets input file path) -I (input-file-grp name) -O (output-file-grp name) -w (Working directory)\")\n sys.exit(0)\n\nif args.work:\n if not os.path.exists(args.work):\n os.mkdir(args.work)\n\nfiles = parseXML(args.mets)\nfname=[]\nfor i, f in enumerate(files):\n fname.append(deskew(str(f),i+1))\nwrite_to_xml(fname)\n","sub_path":"ocrd-anyBaseOCR-deskew.py","file_name":"ocrd-anyBaseOCR-deskew.py","file_ext":"py","file_size_in_byte":10461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"139914334","text":"\"\"\" Parallel Jaccard/Tanimoto coefficient module \"\"\"\nimport csv\nimport math\nimport time\nimport re\nimport multiprocessing as mp\n\n\ndef open_file():\n \"\"\"Opens a tsv file and returns a list with the data.\"\"\"\n chemicals_list = []\n with open(\"ZINC_chemicals.tsv\") as file:\n reader = csv.reader(file, delimiter=\"\\t\", quotechar='\"')\n for row in reader:\n row_aux = (row[1], row[3])\n chemicals_list.append(row_aux)\n return chemicals_list\n\n\ndef analyze_string(chemical_compound):\n \"\"\"Analyzes the string of chemical compound's elements \\\n and returns the analyzed data in a dictionary.\"\"\"\n chemical_compound = \"\".join(re.findall(\"[a-zA-Z@]+\", chemical_compound))\n analyzed_string = {}\n for character in chemical_compound:\n if character in analyzed_string:\n new_value = analyzed_string.get(character) + 1\n analyzed_string.update({character: new_value})\n else:\n analyzed_string.update({character: 1})\n if '@' in analyzed_string:\n analyzed_string.update({'@': 1})\n return analyzed_string\n\n\ndef get_number_common_elements(chemical_a, chemical_b):\n \"\"\"Returns the number of common elements between chemical \\\n compound a and chemical compound b.\"\"\"\n number_elements = 0\n for key_a in chemical_a:\n if key_a in chemical_b:\n number_elements += min(chemical_a.get(key_a), chemical_b.get(key_a))\n return number_elements\n\n\ndef get_number_chemical_elements(chemical_compound):\n \"\"\"Returns the number of elements in a chemical compound.\"\"\"\n number_elements = 0\n for key in chemical_compound:\n number_elements += chemical_compound.get(key)\n return number_elements\n\n\ndef get_jac_tan_coefficient(chemical_a, chemical_b):\n \"\"\"Returns the coefficient of Jaccard/Tanimoto between two chemical compounds.\"\"\"\n letters_a = analyze_string(chemical_a)\n letters_b = analyze_string(chemical_b)\n elements_a = get_number_chemical_elements(letters_a)\n elements_b = get_number_chemical_elements(letters_b)\n common_elements = get_number_common_elements(letters_a, letters_b)\n return round((common_elements / (elements_a + elements_b - common_elements)), 2)\n\n\ndef get_pivots(chemicals_length, number_processors):\n \"\"\"Calculates the pivots to divide the chemicals between the threads.\"\"\"\n pivots_list = []\n pivots_list.append(0)\n for i in range(number_processors - 1, 0, -1):\n pivots_list.append(\n int(round(chemicals_length - (math.sqrt(i / number_processors) * chemicals_length))))\n pivots_list.append(chemicals_length - 1)\n return pivots_list\n\n\ndef fill_compared_list(chemicals_list, pivot_min, pivot_max, queues_list):\n \"\"\"Fills a list with the comparison of the chemical compounds between two pivots in the list.\"\"\"\n compared_chemicals_list = []\n for i in range(pivot_min, pivot_max):\n for j in range(i + 1, len(chemicals_list)):\n coef = get_jac_tan_coefficient(chemicals_list[i][1], chemicals_list[j][1])\n row = chemicals_list[i][0] + \"\\t\" + chemicals_list[j][0] + \"\\t\" + str(coef)\n compared_chemicals_list.append(row)\n queues_list.put(compared_chemicals_list)\n\n\ndef start_join_all(compared_chemicals_list, processes_list, queues_list):\n \"\"\"Starts and joins all the threads in the list.\"\"\"\n for process in processes_list:\n process.start()\n for i in range(len(processes_list)):\n compared_chemicals_list[i] = queues_list[i].get()\n processes_list[i].join()\n\n\ndef write_file(number_threads, compared_chemicals_list, total_time):\n \"\"\"Writes a tsv file with the compared chemicals.\"\"\"\n with open(\"chem_sim_total_Python_Processes.tsv\", \"w\") as record_file:\n record_file.write(\"Chem_ID_1\\tChem_ID_2\\tTanimoto_similarity\\n\")\n for i in range(number_threads):\n print(len(compared_chemicals_list[i]))\n for chemical in compared_chemicals_list[i]:\n record_file.write(chemical + \"\\n\")\n record_file.write(\"Total time = \" + str(total_time) + \" [s]\\n\")\n\n\ndef print_to_console(number_threads, compared_chemicals_list, total_time):\n \"\"\"Prints in console the compared chemicals.\"\"\"\n counter = 0\n for i in range(number_threads):\n for chemical in compared_chemicals_list[i]:\n counter += 1\n print(chemical)\n print(\"Total Elements = \" + str(counter))\n print(\"Total time = \" + str(total_time) + \" [s]\")\n\n\nif __name__ == \"__main__\":\n NUMBER_THREADS = mp.cpu_count()\n CHEMICALS_LIST = open_file()\n PIVOTS_LIST = get_pivots(len(CHEMICALS_LIST), NUMBER_THREADS)\n\n START_TIME = time.time()\n compared_chemicals = []\n queues = []\n processes = []\n for index in range(NUMBER_THREADS):\n compared_chemicals.append([])\n queues.append(mp.Queue())\n processes.append(mp.Process(target=fill_compared_list, args=(\n CHEMICALS_LIST, PIVOTS_LIST[index],\n PIVOTS_LIST[index + 1],\n queues[index], )))\n start_join_all(compared_chemicals, processes, queues)\n\n END_TIME = time.time()\n TOTAL_TIME = END_TIME - START_TIME\n write_file(NUMBER_THREADS, compared_chemicals, TOTAL_TIME)\n #print_to_console(NUMBER_THREADS, compared_chemicals, TOTAL_TIME)\n","sub_path":"jaccard_tanimoto_process.py","file_name":"jaccard_tanimoto_process.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"275984330","text":"import timeit\nimport random\n\n\ndef one_two():\n return random.randrange(1,3)\n\n# Basic Test\n'''\nlength = 60000\ntreshold = 0.05\ncount = Counter(one_two_three() for _ in range(length)) # past here student's function\n\nprint(1 in count, '1 is present')\nprint(2 in count, '2 is present')\nprint(3 in count, '3 is present')\nprint(abs(count[1] / length - 1 / 3) < treshold, '1 count is nearly equal 1/3')\nprint(abs(count[2] / length - 1 / 3) < treshold, '2 count is nearly equal 1/3')\nprint(abs(count[3] / length - 1 / 3) < treshold, '3 count is nearly equal 1/3')\n'''\n\nccquiel_setup = '''\nfrom __main__ import ccquiel_day30\n'''\n\ndef ccquiel_day30():\n t = one_two() + one_two()\n if t == 2: return 1\n if t == 4: return 2\n if t == 3:\n if one_two() == 1: return 3\n else: return ccquiel_day30()\n\nTEST_CODE_ccquiel = '''\nresult = ccquiel_day30()\n'''\n\ncharlie_ang_setup = '''\nfrom __main__ import charlie_ang_day30\n'''\n\ndef charlie_ang_day30():\n while True:\n if one_two() == 1:\n return one_two()\n elif one_two() == 1:\n return 3\n\nTEST_CODE_charlie_ang = '''\nresult = charlie_ang_day30()\n'''\n\ndiana_henninger_setup = '''\nfrom __main__ import diana_henninger_day30\n'''\n\ndef diana_henninger_day30():\n if one_two()==1:\n if one_two()==1: return 1\n else: return 2\n else:\n if one_two()== 1: return 3\n else: return diana_henninger_day30()\n\nTEST_CODE_diana_henninger = '''\nresult = diana_henninger_day30()\n'''\n\nJens_setup = '''\nfrom __main__ import Jens_day30\n'''\n\ndef Jens_day30():\n while True:\n r_list = [i for i in range(1, 4) if one_two() == 1]\n if len(r_list) == 1:\n return r_list[0]\n elif len(r_list) == 2:\n return r_list[0] if one_two() == 1 else r_list[1]\n\nTEST_CODE_Jens = '''\nresult = Jens_day30()\n'''\n\nKurt_Hinderer_setup = '''\nfrom __main__ import Kurt_Hinderer_day30\n'''\n\ndef Kurt_Hinderer_day30():\n repeat = False\n while not repeat:\n if one_two() == 1:\n if one_two() == 1:\n return 1\n else:\n return 2\n else:\n if one_two() == 1:\n return 3\n\nTEST_CODE_Kurt_Hinderer = '''\nresult = Kurt_Hinderer_day30()\n'''\n\nOleksandra_Chmel_setup = '''\nfrom __main__ import Oleksandra_Chmel_day30\n'''\n\ndef Oleksandra_Chmel_day30():\n one = one_two()\n two = one_two()\n if one == 1 and two == 1:\n return 1\n if one == 2 and two == 2:\n return 2\n if one == 2 and two == 1:\n return 3\n return Oleksandra_Chmel_day30()\n\nTEST_CODE_Oleksandra_Chmel = '''\nresult = Oleksandra_Chmel_day30()\n'''\n\nTushar_Jain_setup = '''\nfrom __main__ import Tushar_Jain_day30\n'''\n\ndef Tushar_Jain_day30():\n while True:\n if one_two() == 1:\n return one_two()\n elif one_two() == 1:\n return 3\n\nTEST_CODE_Tushar_Jain = '''\nresult = Tushar_Jain_day30()\n'''\n\nYang_setup = '''\nfrom __main__ import Yang_day30\n'''\n\ndef Yang_day30(): \n a,b = one_two(),one_two()+1\n if a*b==4: \n return 1 \n elif a*b<4: \n return a*b\n else: \n return Yang_day30() \n\nTEST_CODE_Yang = '''\nresult = Yang_day30()\n'''\n\nprint(\"Time for ccquiel test code: \" + str(timeit.timeit(stmt=TEST_CODE_ccquiel, setup=ccquiel_setup, number=100000)) + \" seconds\")\nprint(\"Time for charlie_ang test code: \" + str(timeit.timeit(stmt=TEST_CODE_charlie_ang, setup=charlie_ang_setup, number=100000)) + \" seconds\")\nprint(\"Time for diana_henninger test code: \" + str(timeit.timeit(stmt=TEST_CODE_diana_henninger, setup=diana_henninger_setup, number=100000)) + \" seconds\")\nprint(\"Time for Jens test code: \" + str(timeit.timeit(stmt=TEST_CODE_Jens, setup=Jens_setup, number=100000)) + \" seconds\")\nprint(\"Time for Kurt_Hinderer test code: \" + str(timeit.timeit(stmt=TEST_CODE_Kurt_Hinderer, setup=Kurt_Hinderer_setup, number=100000)) + \" seconds\")\nprint(\"Time for Oleksandra_Chmel test code: \" + str(timeit.timeit(stmt=TEST_CODE_Oleksandra_Chmel, setup=Oleksandra_Chmel_setup, number=100000)) + \" seconds\")\nprint(\"Time for Tushar_Jain test code: \" + str(timeit.timeit(stmt=TEST_CODE_Tushar_Jain, setup=Tushar_Jain_setup, number=100000)) + \" seconds\")\nprint(\"Time for Yang test code: \" + str(timeit.timeit(stmt=TEST_CODE_Yang, setup=Yang_setup, number=100000)) + \" seconds\")\n","sub_path":"day30/day30.py","file_name":"day30.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"312839932","text":"# coding=utf-8\n\nfrom pyexcel_xls import get_data\nimport sqlite3\n\nraw = get_data('2_教室代码.xls')\ndel raw[0]\n\nconn = sqlite3.connect('schedule.db')\ncur = conn.cursor()\n\ncur.execute('CREATE TABLE Classrooms(Id text, Name text, seats int)')\ncur.executemany('INSERT INTO Classrooms VALUES(?, ?, ?)', raw)\nprint(cur.execute('select id from Classrooms').fetchall())\n\nconn.commit()\nconn.close()\n","sub_path":"gen2.py","file_name":"gen2.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394128462","text":"import time\n\ndef FCcheck(new_x,new_y,un_visited):\n # print('x,y',new_x,new_y)\n length = len(un_visited)\n for i in range(0,length):\n node = un_visited[i]\n new_domain = []\n for y in node.domain:\n x = node.index\n # print('a',x,y,new_x,new_y)\n if x == new_x or y == new_y or (abs(x - new_x) == abs(y - new_y)):\n # print(\"a\")\n continue\n else:\n # print('b')\n new_domain.append(y)\n if len(new_domain) == 0:\n return \"DWO\"\n node.domain = new_domain\n # print(un_visited[1].domain,un_visited[2].domain,un_visited[3].domain)\n return True\n\ndef FC(node,N,is_visited,un_visited):\n\n # print('id,domain,solution',node.index,node.domain,node.solution)\n new_x = node.index\n domain = node.domain\n for i in domain:\n new_y = i\n constraintsOK = True\n for k in range(len(is_visited)):\n x = is_visited[k].solution[0]\n y = is_visited[k].solution[1]\n # print(\"bbb\",x,y,new_x,new_y)\n if x == new_x or y == new_y or (abs(x - new_x) == abs(y - new_y)):\n constraintsOK = False\n break\n if constraintsOK == False:\n continue\n save_domain = {}\n for k in range(len(un_visited)):\n save_tmp = [x for x in un_visited[k].domain]\n save_domain[un_visited[k].index] = save_tmp\n\n DWOoccurred = False\n if FCcheck(new_x,new_y,un_visited) == \"DWO\":\n DWOoccurred = True\n # print(\"new:\",new_x,new_y,constraintsOK,DWOoccurred)\n # print(un_visited[1].domain,un_visited[2].domain,un_visited[3].domain)\n if DWOoccurred == False:\n node.solution = (new_x,new_y)\n is_visited.append(node)\n if len(is_visited) >= N:\n return node\n min_node_index = 0\n min_domain_length = len(un_visited[0].domain)\n for k in range(len(un_visited)):\n if len(un_visited[k].domain) < min_domain_length:\n min_node_index = k\n\n newNode = un_visited.pop(min_node_index)\n newNode.parent = node\n dstNode = FC(newNode,N,is_visited,un_visited)\n if dstNode != None:\n return dstNode\n is_visited.pop(-1)\n un_visited.append(newNode)\n for k in range(len(un_visited)):\n index = un_visited[k].index\n un_visited[k].domain = [x for x in save_domain[index]]\n\n # un_visited.insert(0,node1)\n return None\n\n\ndef start_FC(N):\n\n un_visited = []\n for i in range(N):\n newNode = Queen(i,0,N,None)\n un_visited.append(newNode)\n\n for i in range(N):\n start_node = Queen(0,i,N,None)\n is_visited = []\n # is_visited.append(start_node)\n un_visited.pop(0)\n dst_node = FC(start_node,N,is_visited,un_visited)\n if dst_node != None:\n return dst_node\n\n return None\n\nclass Queen():\n def __init__(self,x,y,N,parent):\n self.domain = [x for x in range(N)]\n self.index = x\n self.solution = (x,y)\n self.parent = parent\n\ndef main():\n N = int(input('Please input N = '))\n start = time.clock()\n dst_node = start_FC(N)\n\n if dst_node == None:\n print(\"No solution\")\n else:\n solution = []\n while dst_node:\n solution.append(dst_node.solution)\n dst_node = dst_node.parent\n solution = list(reversed(solution))\n print(\"Solution: \",solution)\n\n end = time.clock()\n print(\"Forwardchecking run time : \",float(end - start),' s')\n\n queen_map = []\n for i in range(N):\n tmp = ['.'] * N\n queen_map.append(tmp)\n for i in range(len(solution)):\n queen_map[solution[i][0]][solution[i][1]] = 'X'\n\n for i in range(N):\n print(' ',' '.join(queen_map[i]))\n\nif __name__ == '__main__':\n main()","sub_path":"FC_Tree_MRV.py","file_name":"FC_Tree_MRV.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628834104","text":"def maior(x, y):\n if x>y:\n return x\n else:\n return y\n\ndef menor(x, y):\n if x>y:\n return y\n else:\n return x\n\ndef listamaior(lista):\n if len(lista)==0:\n return none\n else:\n n= lista[0]\n for aux in lista:\n if aux>n:\n n= aux\n return n\n\ndef listamenor(lista):\n if len(lista)==0:\n return none\n else:\n n= lista[0]\n for aux in lista:\n if auxi:\n cont= cont+1\n return cont\n\ndef lista12(lista1, lista2):\n n1= lista1[0]\n n2= lista2[0]\n for aux in lista1:\n if aux> n1:\n n1= aux\n for ind in lista2:\n if ind> n2:\n n2= ind\n if n1> n2:\n return lista1\n else:\n return lista2\n\ndef listacomp(lista1, lista2):\n n1= lista1[0]\n n2= lista2[0]\n cont= 0\n for aux in lista1:\n if aux> n1:\n n1= aux\n for ind in lista2:\n if ind> n2:\n n2= ind\n\n if n1> n2:\n cont= cont+3\n elif n1==n2:\n cont= cont+1\n return cont\n\ndef impoupar(x):\n if (x%2)==0:\n x= True\n else:\n x= False\n return x\n\ndef listaparouimpar(lista):\n listapar= []\n listaimpar= []\n for aux in lista:\n if aux%2==0:\n listapar.append(aux)\n else:\n listaimpar.append(aux)\n return listapar.sort()\n return listaimpar.sort()\n\n \n","sub_path":"Pythons/trabalho sala.py","file_name":"trabalho sala.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178068860","text":"\nfrom keras.utils import plot_model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, InputLayer, Bidirectional, TimeDistributed, Embedding, Activation\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.sequence import pad_sequences\nfrom sklearn.model_selection import train_test_split\nfrom keras import backend as K\nimport numpy as np\nfrom nltk.corpus import brown\nfrom keras.callbacks import EarlyStopping\nearly_stopping = EarlyStopping(patience=3)\nimport matplotlib.pyplot as plt\n\n\n\ndef to_categorical(sequences, categories):\n cat_sequences = []\n for s in sequences:\n cats = []\n for item in s:\n cats.append(np.zeros(categories))\n cats[-1][item] = 1.0\n cat_sequences.append(cats)\n return np.array(cat_sequences)\n\n\ndef logits_to_tokens(sequences, index):\n token_sequences = []\n for categorical_sequence in sequences:\n token_sequence = []\n for categorical in categorical_sequence:\n token_sequence.append(index[np.argmax(categorical)])\n\n token_sequences.append(token_sequence)\n\n return token_sequences\n\n\ndef ignore_class_accuracy(to_ignore=0):\n def ignore_accuracy(y_true, y_pred):\n y_true_class = K.argmax(y_true, axis=-1)\n y_pred_class = K.argmax(y_pred, axis=-1)\n\n ignore_mask = K.cast(K.not_equal(y_pred_class, to_ignore), 'int32')\n matches = K.cast(K.equal(y_true_class, y_pred_class), 'int32') * ignore_mask\n accuracy = K.sum(matches) / K.maximum(K.sum(ignore_mask), 1)\n return accuracy\n\n return ignore_accuracy\n\n\nif __name__ == '__main__':\n\n tagged_sentences = brown.tagged_sents(tagset='universal')\n sentences, sentence_tags = [], []\n for tagged_sentence in tagged_sentences:\n sentence, tags = zip(*tagged_sentence)\n sentences.append(np.array(sentence))\n sentence_tags.append(np.array(tags))\n\n (train_sentences,\n test_sentences,\n train_tags,\n test_tags) = train_test_split(sentences, sentence_tags, test_size=0.2)\n\n words, tags = set([]), set([])\n\n for s in train_sentences:\n for w in s:\n words.add(w.lower())\n\n for ts in train_tags:\n for t in ts:\n tags.add(t)\n\n word2index = {w: i + 2 for i, w in enumerate(list(words))}\n word2index['-PAD-'] = 0 # The special value used for padding\n word2index['-OOV-'] = 1 # The special value used for OOVs\n\n tag2index = {t: i + 1 for i, t in enumerate(list(tags))}\n tag2index['-PAD-'] = 0 # The special value used to padding\n\n train_sentences_X, test_sentences_X, train_tags_y, test_tags_y = [], [], [], []\n\n for s in train_sentences:\n s_int = []\n for w in s:\n try:\n s_int.append(word2index[w.lower()])\n except KeyError:\n s_int.append(word2index['-OOV-'])\n\n train_sentences_X.append(s_int)\n\n for s in test_sentences:\n s_int = []\n for w in s:\n try:\n s_int.append(word2index[w.lower()])\n except KeyError:\n s_int.append(word2index['-OOV-'])\n\n test_sentences_X.append(s_int)\n\n for s in train_tags:\n train_tags_y.append([tag2index[t] for t in s])\n\n for s in test_tags:\n test_tags_y.append([tag2index[t] for t in s])\n\n MAX_LENGTH = len(max(train_sentences_X, key=len))\n\n train_sentences_X = pad_sequences(train_sentences_X, maxlen=MAX_LENGTH, padding='post')\n test_sentences_X = pad_sequences(test_sentences_X, maxlen=MAX_LENGTH, padding='post')\n train_tags_y = pad_sequences(train_tags_y, maxlen=MAX_LENGTH, padding='post')\n test_tags_y = pad_sequences(test_tags_y, maxlen=MAX_LENGTH, padding='post')\n\n cat_train_tags_y = to_categorical(train_tags_y, len(tag2index))\n model = Sequential()\n model.add(InputLayer(input_shape=(MAX_LENGTH,)))\n model.add(Embedding(len(word2index), 128))\n model.add(Dense(len(tag2index)))\n model.add(Dense(len(tag2index)))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(0.001),\n metrics=['accuracy', ignore_class_accuracy(0)])\n\n model.summary()\n\n hist = model.fit(train_sentences_X, to_categorical(train_tags_y, len(tag2index)), batch_size=2048, epochs=3, validation_split=0.2,callbacks=[early_stopping])\n\n score = model.evaluate(test_sentences_X, to_categorical(test_tags_y, len(tag2index)), verbose=0)\n print(model.metrics_names)\n\n print('model loss: {} acc: {} ignore_accuracy : {}'.format(score[0], score[1],score[2]))\n plot_model(model, to_file='tmp/dnn_model_structure.png', show_shapes=True)\n\n fig, loss_ax = plt.subplots()\n\n acc_ax = loss_ax.twinx()\n\n loss_ax.plot(hist.history['loss'], 'y', label='train loss')\n loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\n\n acc_ax.plot(hist.history['acc'], 'b', label='train acc')\n acc_ax.plot(hist.history['val_acc'], 'g', label='val acc')\n\n loss_ax.set_xlabel('epoch')\n loss_ax.set_ylabel('loss')\n acc_ax.set_ylabel('accuray')\n\n loss_ax.legend(loc='upper left')\n acc_ax.legend(loc='lower left')\n plt.savefig(\"tmp/dnn_tagger_hist.png\",dpi=300)\n\n","sub_path":"dnn_tagger.py","file_name":"dnn_tagger.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66412202","text":"import subprocess\r\nimport os\r\nimport sys\r\nimport shutil\r\n\r\nstudioMode = (\"--studio\" in sys.argv) or (\"-s\" in sys.argv) # Uninstall from studio directory\r\ncleanup = (\"--cleanup\" in sys.argv) or (\"-c\" in sys.argv) # Cleanup files\r\n\r\nimport installUtil\r\ninstallUtil.setStudio(studioMode)\r\n\r\nif installUtil.verifyAndGoto():\r\n\tprint(\"Found install directory\")\r\n\r\nwFiles = installUtil.getWrapperFiles(_allFiles=True)\r\nfor i, fileName in enumerate(installUtil.getInstallFiles(_allFiles=True)):\r\n\twFileName = wFiles[i]\r\n\tif os.path.isfile(wFileName):\r\n\t\tif os.path.isfile(fileName):\r\n\t\t\tsubprocess.run(\"attrib -r \"+fileName)\r\n\t\t\tos.remove(fileName)\r\n\t\tos.rename(wFileName, fileName)\r\n\t\tprint(\"Deleted compiled wrapper executable \"+fileName+\" and renamed real executable to original\")\r\n\telse:\r\n\t\tprint(\"Skipping \"+fileName+\" because \"+wFileName+\" doesn't exist\")\r\n\r\ninstallerPaths = [\"command-window.cmd\", \"Icons\", \"Ahk2Exe\", \"Wrapper.ahk\", \"help.pyw\", \"install.py\", \"config.py\", \"__pycache__\", \"installUtil.py\", \"uninstall.py\"]\r\nif cleanup:\r\n\tif installUtil.isInVersionFolder():\r\n\t\tdoCleanup = input(\"Are you sure you want to cleanup *all* installed files? This includes your custom python files and the Wrapper.ahk file, so be sure to back them up if they contain important code! [Y/N]\")\r\n\t\tif doCleanup.lower() == \"y\":\r\n\t\t\tfor path in installUtil.getInstallFiles(_includeExt=False, _allFiles=True):\r\n\t\t\t\tif os.path.isfile(path+\".py\"):\r\n\t\t\t\t\tos.remove(path+\".py\")\r\n\t\t\tfor path in os.listdir(os.getcwd()):\r\n\t\t\t\tif path in installerPaths:\r\n\t\t\t\t\tif os.path.isdir(path):\r\n\t\t\t\t\t\tshutil.rmtree(path)\r\n\t\t\t\t\tif os.path.isfile(path):\r\n\t\t\t\t\t\tos.remove(path)\r\n\t\t\tprint(\"Cleaned up files!\")\r\n\t\t\tsys.exit()\r\n\t\telse:\r\n\t\t\tprint(\"Did not cleanup files: Canceled\")\r\n\telse:\r\n\t\tprint(\"Did not clean up files: Not in a valid install directory\")","sub_path":"Wrapper/uninstall.py","file_name":"uninstall.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"366369695","text":"from django.core.urlresolvers import reverse\nfrom edx_proctoring.api import get_all_exams_for_course\nfrom student.models import CourseEnrollment\nfrom xmodule.modulestore.django import modulestore\nfrom enrollment.serializers import CourseEnrollmentSerializer\nfrom openedx.core.djangoapps.course_groups.models import CourseUserGroup\nfrom opaque_keys.edx.keys import CourseKey, UsageKey\n\n\nVERIFIED = 'verified'\n\n\ndef get_course_enrollments(user_id=None, **kwargs):\n \"\"\"\n Retrieve a list representing all aggregated data for a user's course enrollments.\n Construct a representation of all course enrollment data for a specific user.\n Args:\n user_id (str): The name of the user to retrieve course enrollment information for.\n Returns:\n A serializable list of dictionaries of all aggregated enrollment data for a user.\n \"\"\"\n qset = CourseEnrollment.objects.filter(is_active=True, **kwargs)\n if user_id is not None:\n qset = qset.filter(user__username=user_id)\n qset = qset.order_by('created')\n return CourseEnrollmentSerializer(qset).data # pylint: disable=no-member\n\n\ndef get_user_proctored_exams(username, request):\n enrollments = CourseEnrollment.objects.filter(is_active=True,\n user__username=username)\n system = request.data.get('system')\n result = {}\n for enrollment in enrollments:\n course = enrollment.course\n try:\n course_id = str(course.id)\n except AttributeError:\n continue\n\n cohorts = CourseUserGroup.objects.filter(\n course_id=enrollment.course_id,\n users__username=username,\n group_type=CourseUserGroup.COHORT,\n name=VERIFIED\n )\n\n if course_id not in result and cohorts.exists():\n proctoring_service = modulestore().get_course(CourseKey.from_string(course_id)).proctoring_service\n if system and system != proctoring_service:\n continue\n result[course_id] = {\n \"id\": course_id,\n \"name\": course.display_name,\n \"uri\": request.build_absolute_uri(\n reverse('course_structure_api:v0:detail',\n kwargs={'course_id': course_id})),\n \"image_url\": course.course_image_url,\n \"start\": course.start,\n \"end\": course.end,\n \"system\": proctoring_service,\n 'exams': []\n }\n exams = get_all_exams_for_course(course_id=course.id)\n for exam in exams:\n if exam['is_proctored']:\n item_id = UsageKey.from_string(exam['content_id'])\n item = modulestore().get_item(item_id)\n exam['visible_to_staff_only'] = item.visible_to_staff_only\n oldest = None\n due_dates = []\n for vertical in item.get_children():\n if vertical.due:\n due_dates.append(vertical.due)\n if due_dates:\n oldest = min(due_dates)\n exam['deadline'] = oldest\n exam['start'] = item.start\n result[course_id]['exams'].append(exam)\n result = {key: value for key, value in result.items() if\n len(value['exams']) > 0}\n return result\n\n","sub_path":"open_edx_api_extension/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"68529202","text":"#!/usr/bin/python3\nimport json, math, telepot\nfrom uptime import uptime\nfrom subprocess import call, check_output\n\n#enter your json path below with constant and bot information\nautht_path = '/home/auth.json'\n\ndef temp():\n res = check_output('vcgencmd measure_temp', shell=True)\n return(res.decode()).replace('temp=',' ').replace(\"'C\",\"°C🌡\")\n\ndef cpu():\n disk = check_output(\"top -bn1 | grep load | awk '{printf \\\"%.2f\\\", $(NF-2)}'\", shell=True)\n return(str(disk)).replace(\"b\",\"\").replace(\"'\",\"\")\n\ndef mem():\n mem = check_output(\"free -m | awk 'NR==2{printf \\\"%s/%sMB %.2f%%\\\", $3,$2,$3*100/$2 }'\", shell=True)\n return(str(mem)).replace(\"b\",\"\").replace(\"'\",\"\")\n\ndef disk():\n disk = check_output(\"df -h | awk '$NF==\\\"/\\\"{printf \\\"%.1f/%.1fGB %s\\\", $3,$2,$5}'\", shell=True)\n return(str(disk)).replace(\"b\",\"\").replace(\"'\",\"\")\n\ndef uptm():\n if math.floor(uptime() / 60) < 2:\n tm = (str(math.floor(uptime() / 60)) + ' minute')\n\n elif math.floor(uptime() / 60) >= 2 and math.floor(uptime() / 60) < 60:\n tm = (str(math.floor(uptime() / 60)) + ' minutes')\n\n elif math.floor(uptime() / 60) >= 60 and math.floor(uptime() / 60) < 120:\n tm = (str(math.floor(uptime() / 3600)) + ' hour')\n\n elif math.floor(uptime() / 60) >= 120 and math.floor(uptime() / 86400) < 1:\n tm = (str(math.floor(uptime() / 3600)) + ' hours')\n\n elif math.floor(uptime() / 86400) >= 1 and math.floor(uptime() / 86400) < 2:\n tm = (str(math.floor(uptime() / 86400)) + ' day')\n\n else:\n tm = (str(math.floor(uptime() / 86400)) + ' days')\n\n return(tm)\n\ndef porta():\n call('curl -4 ifconfig.co/port/22 > /home/pi/porta.txt', shell=True)\n with open('/home/pi/porta.txt') as file:\n ifconfig = json.load(file)\n ip = ifconfig['ip']\n\n if ifconfig['reachable'] == True:\n estado = '\\n*Firewall:* Disable ⛔'\n else:\n estado = '\\n*Firewall:* Enable ✅'\n\n call('rm /home/pi/porta.txt &', shell=True)\n\n return(ip, estado)\n\n\nip, estado = porta()\n\nwith open(autht_path) as file:\n secrets = json.load(file)\n\n bot = telepot.Bot(secrets['tg_tk_bot'])\n tg_destiny = secrets['tg_id_to']\n\nbot.sendMessage(tg_destiny, '*CPU Temp:*' + temp() + '*Uptime:* ' \\\n\t\t\t\t + uptm() + \"\\n*IP:* \" + ip + '\\n*CPU Load:* ' + cpu() + '\\n*Mem:* '\\\n + mem() + '\\n*Disk:* ' + disk() + estado, parse_mode='Markdown')","sub_path":"status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80500785","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom cluster import Cluster\n\n\ndef gjf_head(fname):\n gjf_head_list = ['%mem=1GB', '%nprocl=1', '%nprocs=4', '%%chk=%s.chk' % fname,\n '# pw91pw91/6-31g* iop(3/33=1,5/33=2) nosymm', '', fname, '', '0 1', '']\n return '\\n'.join(gjf_head_list)\n\n\ndef write_gjf(idx, title, cluster):\n if not os.path.exists(str(idx)):\n os.mkdir(str(idx))\n with open('%d/%s.gjf' % (idx, title), 'w') as fout:\n fout.write(gjf_head(title))\n fout.write(cluster.to_gjf())\n fout.write('\\n') # add a \\n just in case\n\ncenter = Cluster('center', fname='center.xsd')\nneighbors = Cluster('neighbors', fname='neighbor.xsd')\n\nfor i, mlcl in enumerate(neighbors.molecules):\n neighbor = Cluster('neighbor%d' % i, molecules=[mlcl])\n write_gjf(i+1, 'mol1', center)\n write_gjf(i+1, 'mol2', neighbor)\n write_gjf(i+1, '2mol', center + neighbor)\n with open('%d/H.inp' % (i+1), 'w') as hout:\n hout.write('%d\\n%d\\n' % (center.homo, neighbor.homo))\n with open('%d/L.inp' % (i+1), 'w') as lout:\n lout.write('%d\\n%d\\n' % (center.lumo, neighbor.lumo))\n\n# write a cluster file just for debugging\n(center + neighbors).to_xyz(True)\n","sub_path":"TransferIntergral/build_dimer.py","file_name":"build_dimer.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649995635","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 13 14:52:36 2020\n\n@author: tonedogga\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom pandas.plotting import scatter_matrix\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport seaborn as sns\n#print(\"matplotlib:\",mpl.__version__)\n\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n\nwwjamsxls=\"Ww_jams_scan_data_300520.xlsx\"\n\n\n\n# self.start_point=0\n \n \ndef add_dates_back(df,all_dates):\n return pd.concat((df,all_dates_df),axis=1) #,on='ww_scan_week',how='right')\n\n \n \n \n#column_list=list([\"coles_scan_week\",\"bb_total_units\",\"bb_promo_disc\",\"sd_total_units\",\"sd_promo_disc\",\"bm_total_units\",\"bm_promo_disc\"]) \n \ncol_dict=dict({0:\"WW_scan_week\",\n 1:\"BB_off_promo_sales\",\n 2:\"BB_on_promo_sales\",\n 3:\"SD_off_promo_sales\",\n 4:\"SD_on_promo_sales\",\n 5:\"BM_off_promo_sales\",\n 6:\"BM_on_promo_sales\"})\n \ndf=pd.read_excel(wwjamsxls,-1,skiprows=[0,1,2]).T.reset_index() #,header=[0,1,2]) #,skip_rows=0) #[column_list] #,names=column_list) #,sheet_name=\"AttacheBI_sales_trans\",use_cols=range(0,16),verbose=True) # -1 means all rows #print(df)\n#print(\"before\",df)\ndf = df.rename(col_dict,axis='index').T\n\ndf['WW_scan_week']=pd.to_datetime(df['WW_scan_week'],format=\"%d/%m/%y\")\n#df['coles_scan_week']=df[\"date\"] #.strftime(\"%Y-%m-%d\")\ndf.fillna(0.0,inplace=True)\ndf.drop_duplicates(keep='first', inplace=True)\n#df.replace(0.0, np.nan, inplace=True)\n#print(\"after\",df)\n\ndf=df.sort_values(by=['WW_scan_week'], ascending=True)\ndf=df.set_index('WW_scan_week') \ndf=df.astype(np.float32) #,inplace=True)\ndf['weekno']= np.arange(len(df))\nprint(\"final\",df,df.T)\n\ndf['BB_on_promo']=(df['BB_on_promo_sales']>0.0)\ndf['SD_on_promo']=(df['SD_on_promo_sales']>0.0)\ndf['BM_on_promo']=(df['BM_on_promo_sales']>0.0)\n\ndf['BB_total_sales']=df['BB_off_promo_sales']+df['BB_on_promo_sales']\ndf['SD_total_sales']=df['SD_off_promo_sales']+df['SD_on_promo_sales']\ndf['BM_total_sales']=df['BM_off_promo_sales']+df['BM_on_promo_sales']\n\n\n\ndf.replace(0.0, np.nan, inplace=True)\n\n#sns.lmplot(x='weekno',y='BB_total_sales',data=df,col='SD_on_promo',hue='BM_on_promo') #,fit_reg=True,robust=True,legend=True) \n#sns.lmplot(x='weekno',y='BB_total_sales',data=df,col='BM_on_promo',hue='SD_on_promo') #,fit_reg=True,robust=True,legend=True) \nsns.lmplot(x='weekno',y='BB_total_sales',data=df,col='SD_on_promo',hue='BB_on_promo') #,fit_reg=True,robust=True,legend=True) \nsns.lmplot(x='weekno',y='BB_total_sales',data=df,col='BM_on_promo',hue='BB_on_promo')\n\nsns.lmplot(x='weekno',y='BB_total_sales',data=df,col='BM_on_promo',hue='SD_on_promo') #,fit_reg=True,robust=True,legend=True) \nsns.lmplot(x='weekno',y='BB_total_sales',data=df,col='SD_on_promo',hue='BM_on_promo')\n\n","sub_path":"brand_index_WW_v2_00.py","file_name":"brand_index_WW_v2_00.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618654590","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n# importing a useful library -- feel free to add any others you find necessary\nimport hashlib\nimport string\n\n# this will work if you place this script in your writeup folder\nwordlist = open(\"../probable-v2-top1575.txt\", 'r')\nhashfile = open(\"../hashes\", 'r')\n\n# a string equal to 'abcdefghijklmnopqrstuvwxyz'.\nsalts = string.ascii_lowercase\n\nwords = []\nfor word in wordlist:\n words.append(word.rstrip())\n\nhashes = []\nfor h in hashfile:\n hashes.append(h.rstrip())\n\nfor salt in salts:\n for word in words:\n salted = str(salt + word).encode('utf-8')\n computed_hash = hashlib.sha512(salted).hexdigest()\n for h in hashes:\n if h == computed_hash:\n print(\"---------------------------\")\n print(\"Match found!\")\n print(\"Hash: \" + h)\n print(\"Salt: \" + salt)\n print(\"Password: \" + word)\n print(\"---------------------------\")\n","sub_path":"week/9/writeup/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491071646","text":"import taichi as ti\nimport os\nimport sys\nimport functools\n\nsys.path.append(os.path.join(ti.core.get_repo_dir(), 'tests', 'python'))\n\nfrom fuse_test_template import template_fuse_dense_x2y2z, \\\n template_fuse_reduction\n\n\n# Note: this is a short-term solution. In the long run we need to think about how to reuse pytest\ndef benchmark_async(func):\n @functools.wraps(func)\n def body():\n for arch in [ti.cpu, ti.cuda]:\n for async_mode in [True, False]:\n os.environ['TI_CURRENT_BENCHMARK'] = func.__name__\n ti.init(arch=arch, async_mode=async_mode)\n func()\n\n return body\n\n\n@benchmark_async\ndef fuse_dense_x2y2z():\n template_fuse_dense_x2y2z(size=100 * 1024**2,\n repeat=10,\n benchmark_repeat=10,\n benchmark=True)\n\n\n@benchmark_async\ndef fuse_reduction():\n template_fuse_reduction(size=100 * 1024**2,\n repeat=10,\n benchmark_repeat=10,\n benchmark=True)\n\n\n@benchmark_async\ndef fill_1d():\n a = ti.field(dtype=ti.f32, shape=100 * 1024**2)\n\n @ti.kernel\n def fill():\n for i in a:\n a[i] = 1.0\n\n ti.benchmark(fill, repeat=100)\n\n\n@benchmark_async\ndef fill_scalar():\n a = ti.field(dtype=ti.f32, shape=())\n\n @ti.kernel\n def fill():\n a[None] = 1.0\n\n ti.benchmark(fill, repeat=1000)\n\n\n@benchmark_async\ndef sparse_numpy():\n a = ti.field(dtype=ti.f32)\n b = ti.field(dtype=ti.f32)\n\n block_count = 64\n block_size = 32\n # a, b always share the same sparsity\n ti.root.pointer(ti.ij, block_count).dense(ti.ij, block_size).place(a, b)\n\n @ti.kernel\n def initialize():\n for i, j in ti.ndrange(block_count * block_size,\n block_count * block_size):\n if (i // block_size + j // block_size) % 4 == 0:\n a[i, j] = i + j\n\n @ti.kernel\n def saxpy(x: ti.template(), y: ti.template(), alpha: ti.f32):\n for i, j in x:\n y[i, j] = alpha * x[i, j] + y[i, j]\n\n def task():\n initialize()\n saxpy(a, b, 2)\n saxpy(b, a, 1.1)\n saxpy(b, a, 1.1)\n saxpy(a, b, 1.1)\n saxpy(a, b, 1.1)\n saxpy(a, b, 1.1)\n\n ti.benchmark(task, repeat=100)\n\n\nwith_autodiff = False # For some reason autodiff crashes with async.\nif with_autodiff:\n\n @benchmark_async\n def autodiff():\n\n n = 1024**2 * 10\n\n a = ti.field(dtype=ti.f32, shape=n, needs_grad=True)\n b = ti.field(dtype=ti.f32, shape=n)\n loss = ti.field(dtype=ti.f32, shape=(), needs_grad=True)\n\n @ti.kernel\n def compute_loss():\n for i in a:\n loss[None] += a[i]\n\n @ti.kernel\n def accumulate_grad():\n for i in a:\n b[i] += a.grad[i]\n\n def task():\n with ti.Tape(loss=loss):\n # The forward kernel of compute_loss should be completely eliminated (except for the last one)\n compute_loss()\n\n accumulate_grad()\n\n ti.benchmark(task, repeat=100)\n\n\n@benchmark_async\ndef stencil_reduction():\n a = ti.field(dtype=ti.f32)\n b = ti.field(dtype=ti.f32)\n total = ti.field(dtype=ti.f32, shape=())\n\n block_count = 1024\n block_size = 1024\n # a, b always share the same sparsity\n ti.root.pointer(ti.i, block_count).dense(ti.i, block_size).place(a, b)\n\n @ti.kernel\n def initialize():\n for i in range(block_size, (block_size - 1) * block_count):\n a[i] = i\n\n @ti.kernel\n def stencil():\n for i in a:\n b[i] = a[i - 1] + a[i] + a[i + 1]\n\n @ti.kernel\n def reduce():\n for i in a:\n total[None] += b[i]\n\n def task():\n for i in range(2):\n initialize()\n stencil()\n reduce()\n\n ti.benchmark(task, repeat=100)\n\n\n# TODO: add mpm_breakdown\n","sub_path":"benchmarks/async_cases.py","file_name":"async_cases.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"306227387","text":"#!/usr/bin/env python3\n# controller.py\n# author: Wrytus, Flosean\n#\n# purpose: The center controller to get the messages received from\n# nlu_server and responsible for controlling the devices\n\nimport os\nimport requests\nimport json\nimport time\nimport random\nimport subprocess\n#from device.DHT import Adafruit_DHT\n\ndef make_requests():\n req = {\n 'device': DEVICE_ID,\n 'event_type': 'request_cmd'\n }\n return requests.get('{}:{}'.format(SERVER_LOC, PORT), params=req)\n\ndef send_sensor():\n #temp, humidity = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, '18')\n out = subprocess.check_output(\"python2 /home/pi/Pistgut/RPi/device/DHT/examples/AdafruitDHT.py 22 18\", shell=True)\n temp, humidity = out.decode('utf8').strip()[1:-1].split(',')\n if float(temp) >= 30:\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/LED_3_on.py\", shell=True)\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/LED_1_off.py\", shell=True)\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/LED_2_off.py\", shell=True)\n else:\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/LED_3_off.py\", shell=True)\n\n print(temp, humidity)\n ack_status = {\n 'device_id': DEVICE_ID,\n 'event_type': 'sensor',\n 'temp': temp,\n 'humidity': humidity\n }\n ack = requests.post('{}:{}'.format(SERVER_LOC, PORT), data=ack_status)\n print('send sensor')\n\ndef switch(obj):\n cmd_type = obj['intent']\n\n ### Target: light\n if cmd_type == 'light':\n target_led = obj['command']['object']\n if target_led == 'all':\n if obj['command']['action'] == 'turn_on':\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/LED_1_on.py\", shell=True)\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/LED_2_on.py\", shell=True)\n elif obj['command']['action'] == 'turn_off':\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/LED_1_off.py\", shell=True)\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/LED_2_off.py\", shell=True)\n else:\n if obj['command']['action'] == 'turn_on':\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/{}_on.py\".format(target_led), shell=True)\n elif obj['command']['action'] == 'turn_off':\n ret = subprocess.call(\"python3 /home/pi/Pistgut/RPi/device/LED/{}_off.py\".format(target_led), shell=True)\n\n ### Target: music\n elif cmd_type == 'music':\n music_list = os.listdir(MUSIC_PATH)\n song = obj['command']['song']\n global NOW_PLAYING\n if obj['command']['action'] == 'play':\n song_name = song['name']\n song_artist = song['artist']\n target = '周杰倫_七里香.mp3'\n for f in music_list:\n if song_name in f and song_artist in f:\n target = f\n print(target)\n\n args = []\n args.append('mpg321')\n args.append(os.path.join(MUSIC_PATH, target))\n print(args)\n\n if NOW_PLAYING is not None:\n NOW_PLAYING.kill()\n\n NOW_PLAYING = subprocess.Popen(args)\n print(NOW_PLAYING.pid)\n elif obj['command']['action'] == 'stop':\n NOW_PLAYING.kill()\n\ndef main():\n global CNT\n sensor_time_interval = 0\n while CNT > 0:\n print('timestamp: {}'.format(CNT))\n res = make_requests()\n print(res.content)\n if len(res.text) == 0:\n pass\n elif res.status_code == 200 and len(res.text) != 0:\n ack_status = {\n 'device_id': DEVICE_ID,\n 'event_type': 'ack',\n 'command_id': json.loads(res.text)['command_id'],\n 'status': 'success'\n }\n ack = requests.post('{}:{}'.format(SERVER_LOC, PORT), data=ack_status)\n switch(json.loads(res.text))\n else:\n ack_status = {\n 'event_type': 'error',\n 'details': 'requests_get_error'\n }\n ack = requests.post('{}:{}'.format(SERVER_LOC, PORT), data=ack_status)\n\n if sensor_time_interval % (SENSOR_TIME_LIMIT * 2) == 0:\n send_sensor()\n sensor_time_interval = 0\n sensor_time_interval += 1\n\n CNT -= 1\n time.sleep(0.5)\n\n\nif __name__ == '__main__':\n SERVER_LOC = 'http://52.197.239.243'\n PORT = '6789'\n DEVICE_ID = 'A'\n MUSIC_PATH = '/home/pi/Pistgut/RPi/device/Music'\n NOW_PLAYING = None\n CNT = 1e10\n SENSOR_TIME_LIMIT = 3\n\n main()\n","sub_path":"RPi/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"231820503","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/11/15 21:23\n# @Author : smyyan & ghoskno\n# @File : brybt.py\n# @Software: PyCharm\n\nimport time\nimport os\nimport re\nimport pickle\nfrom io import BytesIO\nimport platform\nfrom contextlib import ContextDecorator\nfrom PIL import Image\nimport requests\nfrom requests.cookies import RequestsCookieJar\nfrom bs4 import BeautifulSoup\n\nfrom decaptcha import DeCaptcha\n\n# ##################需要配置的变量###################\n_username = '用户名'\n_passwd = '密码'\n_transmission_user_pw = 'user:passwd' # transmission的用户名和密码,按照格式填入\n_windows_download_path = './torrent' # windows测试下载种子路径\n_linux_download_path = '' # linux服务器下载种子的路径\n_torrent_infos = './torrent.pkl' # 种子信息保存文件路径\nmax_torrent = 20 # 最大种子数\nsearch_time = 120 # 轮询种子时间,默认120秒\n# ##################################################\n_decaptcha_model = 'captcha_classifier.pkl' # 验证码识别模型\n_cookies_save_path = 'ByrbtCookies.pickle' # cookies保存路径\n\n# 判断平台\nosName = platform.system()\nif osName == 'Windows':\n osName = 'Windows'\nelif osName == 'Linux':\n osName = 'Linux'\nelse:\n raise Exception('not support this system : {}'.format(osName))\n\n# 常量\n_BASE_URL = 'https://bt.byr.cn/'\n_tag_map = {\n 'free': '免费',\n 'twoup': '2x上传',\n 'twoupfree': '免费&2x上传',\n 'halfdown': '50%下载',\n 'twouphalfdown': '50%下载&2x上传',\n 'thirtypercent': '30%下载',\n}\n_cat_map = {\n '电影': 'movie',\n '剧集': 'episode',\n '动漫': 'anime',\n '音乐': 'music',\n '综艺': 'show',\n '游戏': 'game',\n '软件': 'software',\n '资料': 'material',\n '体育': 'sport',\n '记录': 'documentary',\n}\n\n\n# 全局变量\ndownload_path = None\nbyrbt_cookies = None\n\nif osName == 'Windows':\n download_path = os.path.abspath(_windows_download_path)\nelif osName == 'Linux':\n download_path = os.path.abspath(_linux_download_path)\nelse:\n raise Exception('not support system! {}'.format(osName))\n\ndecaptcha = DeCaptcha()\ndecaptcha.load_model(_decaptcha_model)\n\nold_torrent = list()\nif os.path.exists(_torrent_infos):\n old_torrent = pickle.load(open(_torrent_infos, 'rb'))\n\n\ndef get_url(url):\n return _BASE_URL + url\n\n\ndef login():\n url = get_url('login.php')\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}\n\n session = requests.session()\n for i in range(5):\n login_content = session.get(url)\n login_soup = BeautifulSoup(login_content.text, 'lxml')\n\n img_url = _BASE_URL + login_soup.select('#nav_block > form > table > tr:nth-of-type(3) img')[0].attrs['src']\n img_file = Image.open(BytesIO(session.get(img_url).content))\n\n captcha_text = decaptcha.decode(img_file)\n\n login_res = session.post(get_url('takelogin.php'), headers=headers,\n data=dict(username=_username, password=_passwd, imagestring=captcha_text,\n imagehash=img_url.split('=')[-1]))\n if '最近消息' in login_res.text:\n cookies = {}\n for k, v in session.cookies.items():\n cookies[k] = v\n\n with open(_cookies_save_path, 'wb') as f:\n pickle.dump(cookies, f)\n return cookies\n\n time.sleep(1)\n\n raise Exception('Cat not get Cookies!')\n\n\ndef load_cookie():\n global byrbt_cookies\n if os.path.exists(_cookies_save_path):\n print('find {}, loading cookies'.format(_cookies_save_path))\n read_path = open(_cookies_save_path, 'rb')\n byrbt_cookies = pickle.load(read_path)\n else:\n print('not find {}, get cookies...'.format(_cookies_save_path))\n byrbt_cookies = login()\n\n return byrbt_cookies\n\n\ndef _get_tag(tag):\n try:\n if tag == '':\n return ''\n else:\n tag = tag.split('_')[0]\n\n return _tag_map[tag]\n except KeyError:\n return ''\n\n\ndef _get_torrent_info(table):\n assert isinstance(table, list)\n torrent_infos = list()\n for item in table:\n torrent_info = dict()\n tds = item.select('td')\n cat = tds[0].select('img')[0].attrs['title']\n main_td = tds[1].select('table > tr > td')[0]\n href = main_td.select('a')[0].attrs['href']\n seed_id = re.findall(r'id=(\\d+)&', href)[0]\n title = main_td.text\n title = title.split('\\n')\n if len(title) == 2:\n sub_title = title[1]\n title = title[0]\n else:\n sub_title = ''\n title = title[0]\n\n tags = set([font.attrs['class'][0] for font in main_td.select('b > font') if 'class' in font.attrs.keys()])\n if '' in tags:\n tags.remove('')\n\n is_seeding = len(main_td.select('img[src=\"pic/seeding.png\"]')) > 0\n is_finished = len(main_td.select('img[src=\"pic/finished.png\"]')) > 0\n\n is_hot = False\n if 'hot' in tags:\n is_hot = True\n tags.remove('hot')\n is_new = False\n if 'new' in tags:\n is_new = True\n tags.remove('new')\n is_recommended = False\n if 'recommended' in tags:\n is_recommended = True\n tags.remove('recommended')\n\n if 'class' in tds[1].select('table > tr')[0].attrs.keys():\n tag = _get_tag(tds[1].select('table > tr')[0].attrs['class'][0])\n else:\n tag = ''\n\n file_size = tds[6].text.split('\\n')\n\n seeding = int(tds[7].text) if tds[7].text.isdigit() else -1\n\n downloading = int(tds[8].text) if tds[8].text.isdigit() else -1\n\n finished = int(tds[9].text) if tds[9].text.isdigit() else -1\n\n torrent_info['cat'] = cat\n torrent_info['is_hot'] = is_hot\n torrent_info['tag'] = tag\n torrent_info['is_seeding'] = is_seeding\n torrent_info['is_finished'] = is_finished\n torrent_info['seed_id'] = seed_id\n torrent_info['title'] = title\n torrent_info['sub_title'] = sub_title\n torrent_info['seeding'] = seeding\n torrent_info['downloading'] = downloading\n torrent_info['finished'] = finished\n torrent_info['file_size'] = file_size\n torrent_info['is_new'] = is_new\n torrent_info['is_recommended'] = is_recommended\n torrent_infos.append(torrent_info)\n\n return torrent_infos\n\n\ndef get_torrent(torrent_infos, tags):\n free_infos = list()\n for torrent_info in torrent_infos:\n if torrent_info['tag'] in tags:\n free_infos.append(torrent_info)\n\n return free_infos\n\n\ndef get_ok_torrent(torrent_infos):\n ok_infos = list()\n if len(torrent_infos) >= 20: # 遇到free或者免费种子太多了,择优选取\n print('ok种子过多,怀疑free了。。。')\n for torrent_info in torrent_infos:\n if torrent_info['seed_id'] in old_torrent:\n continue\n if 'GB' not in torrent_info['file_size'][0]:\n continue\n if torrent_info['seeding'] <= 0 or torrent_info['downloading'] < 0:\n continue\n if torrent_info['seeding'] != 0 and float(torrent_info['downloading']) / float(\n torrent_info['seeding']) < 20:\n continue\n file_size = torrent_info['file_size'][0]\n file_size = file_size.replace('GB', '')\n file_size = float(file_size.strip())\n if file_size < 20.0:\n continue\n ok_infos.append(torrent_info)\n else:\n for torrent_info in torrent_infos:\n if torrent_info['seed_id'] in old_torrent:\n continue\n if 'GB' not in torrent_info['file_size'][0]:\n continue\n if torrent_info['seeding'] <= 0 or torrent_info['downloading'] < 0:\n continue\n if torrent_info['seeding'] != 0 and float(torrent_info['downloading']) / float(torrent_info['seeding']) < \\\n 0.6:\n continue\n\n ok_infos.append(torrent_info)\n\n return ok_infos\n\n\ndef download_torrent(op_str):\n cookie_jar = RequestsCookieJar()\n for k, v in byrbt_cookies.items():\n cookie_jar[k] = v\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}\n\n id_re = re.findall(r'dl (\\d+)', op_str, re.I)\n if len(id_re) == 0:\n print('no such torrent')\n return\n torrent_id = id_re[0]\n\n download_url = 'download.php?id={}'.format(torrent_id)\n download_url = get_url(download_url)\n try:\n torrent = requests.get(download_url, cookies=cookie_jar, headers=headers)\n torrent_file_name = str(\n torrent.headers['Content-Disposition'].split(';')[1].strip().split('=')[-1][1:-1].encode('ascii',\n 'ignore').decode(\n 'ascii')).replace(' ', '#')\n with open(os.path.join(download_path, torrent_file_name), 'wb') as f:\n f.write(torrent.content)\n\n except:\n print('login failed!')\n return False\n\n index = 20\n while index > 0:\n if os.path.exists(os.path.join(download_path, torrent_file_name)):\n if osName == 'Linux':\n torrent_file_path = os.path.join(download_path, torrent_file_name)\n cmd_str = \"transmission-remote -n '{}' -a {}\".format(_transmission_user_pw, torrent_file_path)\n ret_val = os.system(cmd_str)\n if ret_val != 0:\n print('script `{}` returns {}'.format(cmd_str, ret_val))\n print('下载失败')\n else:\n print('下载成功')\n else:\n print('下载成功')\n return\n else:\n time.sleep(0.5)\n index = index - 1\n print('下载失败')\n return\n\n\ndef execCmd(cmd):\n r = os.popen(cmd)\n text = r.read()\n r.close()\n return text\n\n\ndef op_help():\n return \"\"\"\n byrbt bot: a bot that handles basic usage of bt.byr.cn\n usage:\n 1. main - run main program\n\n 2. download - download and start torrent file\n i.e. dl $id\n $id - torrent id, acquired by `ls` or `se`\n\n 3. list torrent status - list the torrent files status, merely call `transmission-remote -l`\n i.e. tls\n\n 4. remove torrent - remove specific torrent job, merely call `transmission-remote -t $id -r`\n i.e. trm $torrent_id\n\n 5. refresh - refresh cookies\n 6. help - print this message\n 7. exit\n \"\"\"\n\n\ndef list_torrent():\n os.system('transmission-remote -n \"{}\" -l'.format(_transmission_user_pw))\n\n\ndef get_info(text):\n text = text.split('\\n')\n sum_to = text[-2]\n text = text[1:-2]\n text_s = list()\n for t in text:\n ts = t.split()\n torrent = dict()\n torrent['id'] = ts[0]\n torrent['done'] = ts[1]\n torrent['size'] = ts[2] + ts[3]\n if 'GB' not in torrent['size']:\n torrent['size'] = '1GB'\n torrent['name'] = ts[-1]\n text_s.append(torrent)\n sum_to = sum_to.split()\n sum_size = sum_to[1] + sum_to[2]\n if 'GB' in sum_size or 'TB' in sum_size:\n pass\n else:\n sum_size = '1GB'\n\n return text_s, sum_size\n\n\ndef remove_torrent(op_str):\n id_re = re.findall(r'trm (\\d+)', op_str, re.I)\n if len(id_re) == 0:\n print('no such torrent id')\n return\n id_str = id_re[0]\n id_str = str(id_str)\n\n text = execCmd('transmission-remote -n \"{}\" -l'.format(_transmission_user_pw))\n text_s, sum_size = get_info(text)\n flag = False\n for to_info in text_s:\n if to_info['id'] == id_str:\n res = execCmd('transmission-remote -n \"{}\" -t {} --remove-and-delete'.format(_transmission_user_pw, id_str))\n if \"success\" not in res:\n print('remove torrent fail:')\n for k, v in to_info.items():\n print('{} : {}'.format(k, v))\n if os.path.exists(os.path.join(download_path, to_info['name'])):\n cmd_str = 'rm -rf {}'.format(os.path.join(download_path, to_info['name']))\n ret_val = os.system(cmd_str)\n if ret_val != 0:\n print('script `{}` returns {}'.format(cmd_str, ret_val))\n print(\n 'remove torrent from transmission-daemon success, but cat not remove it from disk!')\n else:\n print('remove torrent from transmission-daemon success!')\n else:\n print('remove torrent from transmission-daemon success!')\n flag = True\n break\n\n if flag is False:\n print('cat find this torrent id in torrent list, please use cmd \"tls\" ')\n\n\nclass TorrentBot(ContextDecorator):\n def __init__(self):\n super(TorrentBot, self).__init__()\n self.torrent_url = get_url('torrents.php')\n self.cookie_jar = RequestsCookieJar()\n for k, v in byrbt_cookies.items():\n self.cookie_jar[k] = v\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}\n self.tags = ['免费', '免费&2x上传']\n\n def __enter__(self):\n print('启动byrbt_bot!')\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n print('退出')\n print('保存数据')\n pickle.dump(old_torrent, open(_torrent_infos, 'wb'), protocol=2)\n\n def remove(self):\n text = execCmd('transmission-remote -n \"{}\" -l'.format(_transmission_user_pw))\n text_s, sum_size = get_info(text)\n if len(text_s) <= max_torrent:\n return\n torrent_len = len(text_s)\n while torrent_len > max_torrent:\n text_s.sort(key=lambda x: int(x['id'].strip(\"*\")), reverse=False)\n remove_torrent_info = text_s.pop(0)\n res = execCmd('transmission-remote -n \"{}\" -t {} --remove-and-delete'.format(_transmission_user_pw,\n remove_torrent_info['id']))\n if \"success\" not in res:\n print('remove torrent fail:')\n for k, v in remove_torrent_info.items():\n print('{} : {}'.format(k, v))\n\n if os.path.exists(os.path.join(download_path, remove_torrent_info['name'])):\n cmd_str = 'rm -rf {}'.format(os.path.join(download_path, remove_torrent_info['name']))\n ret_val = os.system(cmd_str)\n if ret_val != 0:\n print('script `{}` returns {}'.format(cmd_str, ret_val))\n print('remove torrent from transmission-daemon success, but cat not remove it from disk!')\n else:\n print('remove {} from transmission-daemon success!'.format(remove_torrent_info['name']))\n else:\n print('remove {} from transmission-daemon success!'.format(remove_torrent_info['name']))\n torrent_len = torrent_len - 1\n\n def download(self, torrent_id):\n global byrbt_cookies\n download_url = 'download.php?id={}'.format(torrent_id)\n download_url = get_url(download_url)\n torrent_file_name = None\n for i in range(5):\n try:\n torrent = requests.get(download_url, cookies=self.cookie_jar, headers=self.headers)\n torrent_file_name = str(\n torrent.headers['Content-Disposition'].split(';')[1].strip().split('=')[-1][1:-1].encode('ascii',\n 'ignore').decode(\n 'ascii')).replace(' ', '#')\n print(torrent_file_name)\n with open(os.path.join(download_path, torrent_file_name), 'wb') as f:\n f.write(torrent.content)\n break\n\n except:\n print('login failed')\n byrbt_cookies = load_cookie()\n self.__init__()\n continue\n\n index = 20\n while index > 0:\n if torrent_file_name is not None and os.path.exists(os.path.join(download_path, torrent_file_name)):\n if osName == 'Linux':\n torrent_file_path = os.path.join(download_path, torrent_file_name)\n cmd_str = 'transmission-remote -n \"{}\" -a {}'.format(_transmission_user_pw, torrent_file_path)\n ret_val = os.system(cmd_str)\n if ret_val != 0:\n print('script `{}` returns {}'.format(cmd_str, ret_val))\n return True\n else:\n print('添加种子: {}'.format(torrent_file_name))\n\n old_torrent.append(torrent_id)\n else:\n pass\n return True\n else:\n time.sleep(0.5)\n index = index - 1\n\n return True\n\n def start(self):\n global byrbt_cookies\n while True:\n print('扫描种子列表')\n try:\n torrents_soup = BeautifulSoup(\n requests.get(self.torrent_url, cookies=self.cookie_jar, headers=self.headers).content)\n torrent_table = torrents_soup.select('.torrents > form > tr')[1:]\n pass\n except:\n byrbt_cookies = load_cookie()\n self.__init__()\n continue\n torrent_infos = _get_torrent_info(torrent_table)\n\n free_infos = get_torrent(torrent_infos, self.tags)\n print('种子列表:')\n for i, info in enumerate(free_infos):\n print('{} : {} {} {}'.format(i, info['seed_id'], info['file_size'], info['title']))\n ok_torrent = get_ok_torrent(free_infos)\n print('可用种子:')\n for i, info in enumerate(ok_torrent):\n print('{} : {} {} {}'.format(i, info['seed_id'], info['file_size'], info['title']))\n for torrent in ok_torrent:\n if self.download(torrent['seed_id']) is False:\n print('{} download fail'.format(torrent['title']))\n continue\n self.remove()\n time.sleep(search_time)\n\n\ndef main():\n with TorrentBot() as byrbt_bot:\n byrbt_bot.start()\n\n\nif __name__ == '__main__':\n byrbt_cookies = load_cookie()\n\n print(op_help())\n while True:\n action_str = input()\n if action_str == 'refresh':\n print('refresh cookie by login!')\n byrbt_cookies = login()\n elif action_str == 'exit':\n break\n elif action_str == 'help':\n print(op_help())\n elif action_str == 'main':\n main()\n elif action_str.startswith('dl'):\n download_torrent(action_str)\n elif action_str.startswith('tls'):\n list_torrent()\n elif action_str.startswith('trm'):\n remove_torrent(action_str)\n else:\n print('invalid operation')\n print(op_help())\n","sub_path":"byrbt.py","file_name":"byrbt.py","file_ext":"py","file_size_in_byte":19555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"146441259","text":"import logging\n\nfrom django.test import TestCase\n\nfrom translations.models import Translation\nfrom translations.models.factories import (\n ProjectFactory,\n LanguageFactory,\n)\nfrom translations.utils.sync_project import sync_project\n\n\nclass SyncProjectTestCase(TestCase):\n def setUp(self):\n logging.disable(logging.CRITICAL)\n\n english = LanguageFactory(name=\"English\", language_code=\"en\")\n swedish = LanguageFactory(name=\"Swedish\", language_code=\"sv\")\n\n self.project = ProjectFactory(\n name=\"Test Git Project\",\n repository_name=\"zetkin/translators-interface\",\n locale_files_path=\"backend/translations/utils/tests/mock_files/sync_project\",\n languages=(english, swedish),\n )\n\n def test_create_translations_from_git(self):\n sync_project(self.project)\n # Check 8 Swedish translations created\n swedish_translations = Translation.objects.filter(language__language_code=\"sv\")\n self.assertEqual(len(swedish_translations), 8)\n # Check 8 English translations created\n english_translations = Translation.objects.filter(language__language_code=\"en\")\n self.assertEqual(len(english_translations), 8)\n\n # Check a couple dotpath\n english_home_page_header_title = Translation.objects.get(\n language__language_code=\"en\",\n file_path=\"./home_page/en.yaml\",\n object_path=\"header.title\",\n )\n self.assertEqual(\n english_home_page_header_title.dotpath, \"home_page.header.title\"\n )\n self.assertEqual(english_home_page_header_title.text, \"Edit translations here\")\n\n swedish_home_page_header_title = Translation.objects.get(\n language__language_code=\"sv\",\n file_path=\"./home_page/sv.yaml\",\n object_path=\"header.title\",\n )\n self.assertEqual(\n swedish_home_page_header_title.dotpath, \"home_page.header.title\"\n )\n self.assertEqual(\n swedish_home_page_header_title.text, \"Redigera översättningar här\"\n )\n","sub_path":"backend/translations/utils/tests/test_sync_project.py","file_name":"test_sync_project.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"54032231","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 4 09:00:28 2016\n\n@author: matthieu\n\"\"\"\n\nimport re\n\nfileName = '/data/contracteo/extractContract/data/raw/CGG_CDI_GUILLOUET.txt'\n\nwith open(fileName,'r',encoding='utf-8') as f:\n strCt = f.read()\n f.close()\n\n\nstructCt = []\n\n#### white spaces, line breaks, tabulations\nwhiteSpace = re.compile('\\s')\nwhitePos = []\nfor wh in whiteSpace.finditer(strCt):\n whitePos.append([wh.span()[0],wh.span()[1]])\n \nfirstElt = whitePos[0][0]\nwhitePos1 = []\nii = 0\nfor iWh in range(0,len(whitePos)-1):\n if whitePos[iWh][1]!=whitePos[iWh+1][0]:\n whitePos1.append([firstElt, whitePos[iWh][1]])\n firstElt = whitePos[iWh+1][0]\n \nwhitePos1.append([firstElt, whitePos[-1][1]])\n\nif whitePos1[0][0]==0:\n firstElt = whitePos[0][1]\n iWhite = 1\nelse:\n firstElt = 0\n iWhite = 0\n\nfor iWh in range(iWhite,len(whitePos1)):\n word = strCt[firstElt:whitePos1[iWh][0]]\n structCt.append([firstElt, whitePos1[iWh][0], word])\n firstElt = whitePos1[iWh][1]\n\nif firstElt=word[0] and begin=word[0] and aposPos=len(structCt):\n word = structCt[iW]\n \n if word[2].isdigit() and not inDigit:\n inDigit = True\n beginDigit = iW\n iW += 1\n \n elif inDigit and not word[2].isdigit():\n begin = structCt[beginDigit][0]\n end = structCt[iW-1][1]\n newElt = [begin, end, strCt[begin:end], 'DIGIT']\n for iDelete in range(beginDigit,iW):\n structCt.pop(beginDigit)\n structCt.insert(beginDigit,newElt)\n inDigit = False\n iW -= (iW-beginDigit)-1\n \n else:\n iW += 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316261833","text":"'''\nCreated on 10 de Jan de 2012\n\n@author: ricardoosorio\n'''\n\n# --------------------------------------------\n# created by Ricardo Osorio\n# update 11/10/2011\n# --------------------------------------------\nfrom libDB import *\nfrom libXML import *\nfrom xml.dom.minidom import parseString,Document\nfrom encoding import smart_str\n\nclass dbFenotipos(object):\n \"\"\"dbFenotipos Class. Used to manage fenotipos\"\"\"\n \n fen_name = None\n fen_w = None\n fen_n = None\n fen_country = None\n fen_wineregion = None\n fen_vineyard = None\n fen_samplingsite = None\n fen_grapevar = None\n fen_ScAAT1_1 = None\n fen_ScAAT1_2 = None\n fen_ScAAT2_1 = None\n fen_ScAAT2_2 = None\n fen_ScAAT3_1 = None\n fen_ScAAT3_2 = None\n fen_ScAAT4_1 = None\n fen_ScAAT4_2 = None\n fen_ScAAT5_1 = None\n fen_ScAAT5_2 = None\n fen_ScAAT6_1 = None\n fen_ScAAT6_2 = None\n fen_C4_1 = None\n fen_C4_2 = None\n fen_C5_1 = None\n fen_C5_2 = None\n fen_C11_1 = None\n fen_C11_2 = None\n fen_YPL009c_1 = None\n fen_YPL009c_2 = None\n fen_YOR267c_1 = None\n fen_YOR267c_2 = None\n\n def __init__(self, *args):\n \"\"\"dbfenotipos is able to create a new fenotipo.\n\n STANDARD OPTIONS TO CREATE\n\n name\n\n \"\"\"\n if len(args) == 1:\n if self.create(args[0]) :\n dg(2, \"Loaded.\")\n \n def create(self, _fen_name):\n if self.isFenotipo(_fen_name) :\n dg(2, \"create() | Cannot create fenotipo. Fenotipo \" + _fen_name + \" already exists.\")\n else:\n self.fen_name = _fen_name\n database = db()\n database.sql(\"INSERT INTO FENOTIPOS (COLNUM) VALUES ('\" + _fen_name + \"' )\")\n self.fetch(_fen_name)\n dg(2, \"Fenotipo \" + _fen_name + \" created.\")\n\n def getname(self):\n if self.fen_name == None:\n dg(3, \"getfenotipo | No fenotipo loaded.\")\n return \"\"\n else :\n return self.fen_name\n # -- menu Properties\n \n def isFenotipo(self, _fen_name):\n database = db()\n row = database.query(\"SELECT * FROM FENOTIPOS WHERE (COLNUM='\" + _fen_name + \"')\")\n dg(2, \"isFenotipo | ROW: \" + str(row))\n return row != []\n\n def similars(self,argument):\n database = db()\n arg = str(argument)\n \n query = \"SELECT * FROM FENOTIPOS WHERE (COLNUM LIKE '%\" + arg + \"%')\"\n\n allrows = database.query(query)\n if allrows == [] :\n dg(2, \"Fenotipo \" + arg + \" does not exist.\")\n return None\n else :\n return allrows\n\n \n def load(self,row):\n self.fen_name = row[1]\n self.fen_w = row[2]\n self.fen_n = row[3]\n self.fen_country = row[4]\n self.fen_wineregion = row[5]\n self.fen_vineyard = row[6]\n self.fen_samplingsite = row[7]\n self.fen_grapevar = row[8]\n self.fen_ScAAT1_1 = row[9]\n self.fen_ScAAT1_2 = row[10]\n self.fen_ScAAT2_1 = row[11]\n self.fen_ScAAT2_2 = row[12]\n self.fen_ScAAT3_1 = row[13]\n self.fen_ScAAT3_2 = row[14]\n self.fen_ScAAT4_1 = row[15]\n self.fen_ScAAT4_2 = row[16]\n self.fen_ScAAT5_1 = row[17]\n self.fen_ScAAT5_2 = row[18]\n self.fen_ScAAT6_1 = row[19]\n self.fen_ScAAT6_2 = row[20]\n self.fen_C4_1 = row[21]\n self.fen_C4_2 = row[22]\n self.fen_C5_1 = row[23]\n self.fen_C5_2 = row[24]\n self.fen_C11_1 = row[25]\n self.fen_C11_2 = row[26]\n self.fen_YPL009c_1 = row[27]\n self.fen_YPL009c_2 = row[28]\n self.fen_YOR267c_1 = row[29]\n self.fen_YOR267c_2 = row[30]\n \n \n def fetch(self, argument):\n database = db()\n arg = str(argument)\n \n query = \"SELECT * FROM FENOTIPOS WHERE (COLNUM='\" + arg + \"')\"\n\n allrows = database.query(query)\n if allrows == [] :\n dg(2, \"Fenotipo \" + arg + \" does not exist.\")\n return False\n else :\n row=allrows[0]\n self.fen_name = row[1]\n self.fen_w = row[2]\n self.fen_n = row[3]\n self.fen_country = row[4]\n self.fen_wineregion = row[5]\n self.fen_vineyard = row[6]\n self.fen_samplingsite = row[7]\n self.fen_grapevar = row[8]\n self.fen_ScAAT1_1 = row[9]\n self.fen_ScAAT1_2 = row[10]\n self.fen_ScAAT2_1 = row[11]\n self.fen_ScAAT2_2 = row[12]\n self.fen_ScAAT3_1 = row[13]\n self.fen_ScAAT3_2 = row[14]\n self.fen_ScAAT4_1 = row[15]\n self.fen_ScAAT4_2 = row[16]\n self.fen_ScAAT5_1 = row[17]\n self.fen_ScAAT5_2 = row[18]\n self.fen_ScAAT6_1 = row[19]\n self.fen_ScAAT6_2 = row[20]\n self.fen_C4_1 = row[21]\n self.fen_C4_2 = row[22]\n self.fen_C5_1 = row[23]\n self.fen_C5_2 = row[24]\n self.fen_C11_1 = row[25]\n self.fen_C11_2 = row[26]\n self.fen_YPL009c_1 = row[27]\n self.fen_YPL009c_2 = row[28]\n self.fen_YOR267c_1 = row[29]\n self.fen_YOR267c_2 = row[30]\n\n dg(2, \"Fenotipo \" + self.fen_name + \" loaded.\")\n return True\n\n\n def allcountries_repr(self):\n \n \n database = db()\n xml=dbXML(\"FENOTIPO\")\n xml.ident(\"COUNTRIES\")\n allrows = database.query(\"SELECT DISTINCT COUNTRY FROM FENOTIPOS\")\n for row in allrows:\n xml.add(\"COUNTRY\",str(row[0]))\n xml.unident()\n return str(xml)\n\n\n\n def public_repr(self):\n if (self.fen_name == None):\n xml=dbXML(\"FENOTIPO\",\"No Fenotipo Loaded\")\n return str(xml)\n else :\n \n xml=dbXML(\"FENOTIPO\")\n xml.add(\"COLNUM\",self.fen_name)\n xml.add(\"COUNTRY\",self.fen_country)\n xml.add(\"WINEREGION\",self.fen_wineregion)\n xml.add(\"VINEYARD\",self.fen_vineyard)\n xml.add(\"GRAPEVAR\",self.fen_grapevar)\n xml.add(\"SCAAT1-1\",self.fen_ScAAT1_1)\n xml.add(\"SCAAT1-2\",self.fen_ScAAT1_2)\n \n return str(xml)\n \n# -- Representation of dbUser itself in XML format\n# \n# \n# ...\n# \n \n def __repr__(self):\n if (self.fen_name == None):\n xml=dbXML(\"FENOTIPO\",\"No Fenotipo Loaded\")\n return str(xml)\n else :\n \n xml=dbXML(\"FENOTIPO\")\n xml.add(\"COLNUM\",self.fen_name)\n xml.add(\"W\",smart_str(self.fen_w))\n xml.add(\"N\",smart_str(self.fen_n))\n xml.add(\"COUNTRY\",self.fen_country)\n xml.add(\"WINEREGION\",self.fen_wineregion)\n xml.add(\"VINEYARD\",self.fen_vineyard)\n xml.add(\"SAMPLINGSITE\",self.fen_samplingsite)\n xml.add(\"GRAPEVAR\",self.fen_grapevar)\n xml.add(\"SCAAT1-1\",self.fen_ScAAT1_1)\n xml.add(\"SCAAT1-2\",self.fen_ScAAT1_2)\n xml.add(\"SCAAT2-1\",self.fen_ScAAT2_1)\n xml.add(\"SCAAT2-2\",self.fen_ScAAT2_2)\n xml.add(\"SCAAT3-1\",self.fen_ScAAT3_1)\n xml.add(\"SCAAT3-2\",self.fen_ScAAT3_2)\n xml.add(\"SCAAT4-1\",self.fen_ScAAT4_1)\n xml.add(\"SCAAT4-2\",self.fen_ScAAT4_2)\n xml.add(\"SCAAT5-1\",self.fen_ScAAT5_1)\n xml.add(\"SCAAT5-2\",self.fen_ScAAT5_2)\n xml.add(\"SCAAT6-1\",self.fen_ScAAT6_1)\n xml.add(\"SCAAT6-2\",self.fen_ScAAT6_2)\n xml.add(\"C4-1\",self.fen_C4_1)\n xml.add(\"C4-2\",self.fen_C4_2)\n xml.add(\"C5-1\",self.fen_C5_1)\n xml.add(\"C5-2\",self.fen_C5_2)\n xml.add(\"C11-1\",self.fen_C11_2)\n xml.add(\"C11-2\",self.fen_C11_1)\n xml.add(\"YPL009C-1\",self.fen_YPL009c_1)\n xml.add(\"YPL009C-2\",self.fen_YPL009c_2)\n xml.add(\"YOR267C-1\",self.fen_YOR267c_1)\n xml.add(\"YOR267C-2\",self.fen_YOR267c_2)\n \n return str(xml)\n\n# Attributes\n \n name = property(getname)\n public=property(public_repr)\n allcountries=property(allcountries_repr)\n\n# Classes\n \n isFenotipo = classmethod(isFenotipo) \n","sub_path":"src/webserver/libFenotipos.py","file_name":"libFenotipos.py","file_ext":"py","file_size_in_byte":8238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571492907","text":"from tkinter import *\r\nfrom playerHome import playerHome\r\nfrom playerVisiting import playerVisiting\r\nfrom soccerBall import soccerBall\r\nimport math\r\nimport random\r\n####################################\r\n# customize these functions\r\n####################################\r\n\r\ndef init(data):\r\n # load data.xyz as appropriate\r\n data.mode = \"startScreen\"\r\n data.playerHomeIndex = 2\r\n data.playerVisitingIndex = 0\r\n data.picture = PhotoImage(file = \"newSoccer pitch.png\")\r\n data.Bayern = PhotoImage(file = \"Bayern.png\")\r\n data.Bayern = data.Bayern.subsample(2,2)\r\n data.realMadrid = PhotoImage(file = \"realMadrid.png\")\r\n data.realMadrid = data.realMadrid.subsample(3,3)\r\n data.Barcelona = PhotoImage(file = \"Barcelona.png\")\r\n data.Barcelona = data.Barcelona.subsample(5,5)\r\n data.Juventus = PhotoImage(file = \"Juventus.png\")\r\n data.Juventus = data.Juventus.subsample(4,4)\r\n data.picture = data.picture.subsample(2,2)\r\n data.leftPostX = 187.5\r\n data.rightPostX = 312.5\r\n data.intLeftPostX = int(data.leftPostX)\r\n data.intRightPostX = int(data.rightPostX)\r\n data.theFixedPoint = random.randint(data.intLeftPostX + 10,data.intRightPostX - 10)\r\n data.leftBoarder = 80\r\n data.rightBoarder = 420\r\n data.lowerBoarder = 30\r\n data.upperBoarder = 470\r\n data.soccerBall = soccerBall(data.width/2,data.height/2) \r\n data.soccerMovingState = False\r\n #data.collided means when the soccer ball collides with the visiting player\r\n data.collided = False\r\n data.inStrategy = False\r\n data.inShootingProcess = False\r\n data.bouncedWall = False\r\n data.direction = 0\r\n data.bouncedDirection = 0\r\n data.collidedSpeed = 0\r\n data.ballSpeed = 1.5\r\n data.blueTeamScore = 0\r\n data.redTeamScore = 0\r\n data.playerVisitingSpeed = 0.2\r\n data.playerVisitingSupportingSpeed = 0.2\r\n data.startingPostionHome = [[200,300],[300,300],[250,450]]\r\n data.team1 = [playerHome(data.startingPostionHome[0][0],data.startingPostionHome[0][1]),playerHome(data.startingPostionHome[1][0],data.startingPostionHome[1][1]),playerHome(data.startingPostionHome[2][0],data.startingPostionHome[2][1])]\r\n\r\ndef mousePressed(event, data):\r\n if(data.mode == \"levelSelection\"):\r\n if(event.x > 200 and event.x < 400 and event.y > 300 and event.y < 350):\r\n data.playerVisitingSpeed = 0.05\r\n data.playerVisitingSupportingSpeed = 0.05\r\n data.mode = \"gameState\"\r\n data.team2 = [playerVisiting(200,200,data.playerVisitingSpeed,data.playerVisitingSupportingSpeed,data.theFixedPoint),playerVisiting(300,200,data.playerVisitingSpeed,data.playerVisitingSupportingSpeed,data.theFixedPoint),playerVisiting(250,50,data.playerVisitingSupportingSpeed,data.playerVisitingSupportingSpeed,data.theFixedPoint)]\r\n \r\n if(event.x > 200 and event.x < 400 and event.y > 375 and event.x < 425):\r\n data.playerVisitingSpeed = 0.1\r\n data.playerVisitingSupportingSpeed = 0.1\r\n data.mode = \"gameState\"\r\n data.team2 = [playerVisiting(200,200,data.playerVisitingSpeed,data.playerVisitingSupportingSpeed,data.theFixedPoint),playerVisiting(300,200,data.playerVisitingSpeed,data.playerVisitingSupportingSpeed,data.theFixedPoint),playerVisiting(250,50,data.playerVisitingSupportingSpeed,data.playerVisitingSupportingSpeed,data.theFixedPoint)]\r\n \r\n if(event.x > 200 and event.x < 400 and event.y > 450 and event.x < 500):\r\n data.playerVisitingSpeed = 0.5\r\n data.playerVisitingSupportingSpeed = 0.5\r\n data.mode = \"gameState\"\r\n data.team2 = [playerVisiting(200,200,data.playerVisitingSpeed,data.playerVisitingSupportingSpeed,data.theFixedPoint),playerVisiting(300,200,data.playerVisitingSpeed,data.playerVisitingSupportingSpeed,data.theFixedPoint),playerVisiting(250,50,data.playerVisitingSupportingSpeed,data.playerVisitingSupportingSpeed,data.theFixedPoint)]\r\n # use event.x and event.y\r\n pass\r\n\r\ndef keyPressed(event, data):\r\n # use event.char and event.keysym\r\n if(event.keysym == \"Up\"):\r\n data.team1[data.playerHomeIndex].move(\"up\")\r\n data.team1[data.playerHomeIndex].speed = 10\r\n if(event.keysym == \"Down\"):\r\n data.team1[data.playerHomeIndex].move(\"down\")\r\n data.team1[data.playerHomeIndex].speed = 10\r\n if(event.keysym == \"Left\"):\r\n data.team1[data.playerHomeIndex].move(\"left\")\r\n data.team1[data.playerHomeIndex].speed = 10\r\n if(event.keysym == \"Right\"):\r\n data.team1[data.playerHomeIndex].move(\"right\")\r\n data.team1[data.playerHomeIndex].speed = 10\r\n if(event.keysym == \"s\"):\r\n data.mode = \"levelSelection\"\r\n if(event.keysym == \"p\"):\r\n data.mode = \"startScreen\"\r\n pass\r\n \r\ndef smartAngleFunction(selfX,selfY,otherX, otherY):\r\n if(abs(otherX - selfX) < 0.5 and (otherY - selfY) < 0):\r\n return 90\r\n if(abs(otherX - selfX) < 0.5 and (otherY - selfY) > 0):\r\n return 270\r\n if((otherX - selfX) > 0 and abs(otherY - selfY) < 0.5):\r\n return 0\r\n if((otherX - selfX) < 0 and abs(otherY - selfY) < 0.5):\r\n return 180\r\n theta = math.atan(abs(otherY - selfY) / abs(otherX - selfX))\r\n theta = math.degrees(theta)\r\n if((otherX - selfX) > 0 and (otherY - selfY) < 0):\r\n return theta\r\n elif((otherX - selfX) < 0 and (otherY - selfY) < 0):\r\n return 180 - theta\r\n elif((otherX - selfX) < 0 and (otherY - selfY) > 0):\r\n return 180 + theta\r\n elif((otherX - selfX) > 0 and (otherY - selfY) > 0):\r\n return 0 - theta\r\n \r\ndef visitingTeamRespond(data):\r\n pass\r\n \r\n #Switching players based on who is the closest\r\n newList = []\r\n i = 0\r\n while(i < len(data.team2)):\r\n newList = newList + [((data.team2[i].x - data.soccerBall.x)**(2) + \r\n (data.team2[i].y - data.soccerBall.y) ** (2))**(0.5)]\r\n i = i + 1\r\n data.playerVisitingIndex = newList.index(min(newList))\r\n \r\n #Check if the visiting player collides with the home player, if the visiting player collides with the home player, make the visiting player retreat back in the same direction from where it comes from by a speed, the time duration only lasts for 1ms, after a millisecond, the ball speed gets set to 0. \r\n \r\n \r\n for i in data.team1:\r\n for j in data.team2:\r\n if(j.collideWithOtherTeamCheck(i)[0] == True):\r\n j.bounce(i)\r\n \r\n #Check if the visiting player collides with the visiting player, if the visiting player collides with the visiting player, make the visiting player bounce back\r\n if(data.team2[0].collideWithSameTeamCheck(data.team2[1])[0] == True):\r\n data.team2[0].bounce(1)\r\n if(data.team2[1].collideWithSameTeamCheck(data.team2[2])[0] == True):\r\n data.team2[1].bounce(2)\r\n if(data.team2[2].collideWithSameTeamCheck(data.team2[0])[0] == True):\r\n data.team2[2].bounce(0)\r\n \r\n \r\n #Make the visiting team player gravitates toward shooting position\r\n newDirection = data.team2[data.playerVisitingIndex].ballChasingDirection(data.soccerBall)\r\n if(data.inShootingProcess == False):\r\n data.team2[data.playerVisitingIndex].move(newDirection,data.team2[data.playerVisitingIndex].speed)\r\n if(data.team2[data.playerVisitingIndex].checkInFirePosition(data.soccerBall)):\r\n data.inShootingProcess = True\r\n \r\n #If the ball moves to the designated position, make a hit \r\n if(data.inShootingProcess == True):\r\n toTheBallDirection = data.team2[data.playerVisitingIndex].ballShootingDirectionWhenReady(data.soccerBall)\r\n # print(\"toTheBallDirection\",toTheBallDirection)\r\n # print(\"data.team2[data.playerVisitingIndex]\",data.team2[data.playerVisitingIndex])\r\n # print(\"data.team2[data.playerVisitingIndex].speed\",data.team2[data.playerVisitingIndex].speed)\r\n \r\n data.team2[data.playerVisitingIndex].move(toTheBallDirection,data.team2[data.playerVisitingIndex].speed)\r\n if(abs(data.ballSpeed-0)<0.04):\r\n data.inShootingProcess = False\r\n data.ballSpeed = 0\r\n #If one of the team2 player collides with the ball, make the ball move accordingly to the direction it collides\r\n if(data.team2[data.playerVisitingIndex].collideWithSoccerBallCheck(data.soccerBall)[0]):\r\n data.soccerMovingState = True\r\n data.direction = data.team2[data.playerVisitingIndex].collideWithSoccerBallCheck(data.soccerBall)[1]\r\n \r\n#Check if the soccer ball has scored\r\ndef checkScored(data):\r\n if(data.soccerBall.y < data.lowerBoarder and data.soccerBall.x > data.leftPostX and data.soccerBall.x < data.rightPostX):\r\n data.blueTeamScore = data.blueTeamScore + 1\r\n data.soccerBall.y = data.width/2\r\n data.soccerBall.x = data.height/2\r\n data.ballSpeed= 1.5\r\n reset(data)\r\n if(data.soccerBall.y > data.upperBoarder and data.soccerBall.x > data.leftPostX and data.soccerBall.x < data.rightPostX):\r\n data.redTeamScore = data.redTeamScore + 1\r\n data.soccerBall.y = data.width/2\r\n data.soccerBall.x = data.height/2\r\n data.ballSpeed= 1.5\r\n reset(data)\r\n\r\n#Reset Everything to the starting position\r\ndef reset(data):\r\n for i in data.team1:\r\n i.reset()\r\n for i in data.team2:\r\n i.reset()\r\n\r\ndef strategicMoveHome(data):\r\n if(data.playerHomeIndex == 0):\r\n \r\n if(data.team1[0].y < (data.height / 2) and \r\n data.team1[0].y >= (data.height / 4) and \r\n data.team1[0].x < (data.width) / 2):\r\n data.inStrategy = True\r\n data.team1[1].makingMoveUpperFieldRightSide(data.team1[0])\r\n if(data.team1[1].makingMoveUpperFieldRightSide(data.team1[0]) == True):\r\n data.inStrategy = False\r\n else:\r\n data.inStrategy = True\r\n \r\n if(data.team1[0].y < (data.height / 2) and \r\n data.team1[0].y >= (data.height / 4) and \r\n data.team1[0].x > (data.width) / 2):\r\n data.inStrategy = True\r\n data.team1[1].makingMoveUpperFieldLeftSide(data.team1[0])\r\n if(data.team1[1].makingMoveUpperFieldLeftSide(data.team1[0] == False)):\r\n data.inStrategy = False\r\n else:\r\n data.inStrategy = True\r\n \r\n if(data.team1[0].y > (data.height / 2) and \r\n data.team1[0].y <= (data.height / 4 * 3) and \r\n data.team1[0].x < (data.width) / 2):\r\n data.inStrategy = True\r\n data.team1[1].makingMoveLowerFieldRightSide(data.team1[0])\r\n if(data.team1[1].makingMoveLowerFieldRightSide(data.team1[0]) == True):\r\n data.inStrategy = False\r\n else:\r\n data.inStrategy = True\r\n \r\n if(data.team1[0].y > (data.height / 2) and \r\n data.team1[0].y <= (data.height / 4 * 3) and \r\n data.team1[0].x > (data.width) / 2):\r\n data.inStrategy = True\r\n data.team1[1].makingMoveLowerFieldLeftSide(data.team1[0])\r\n if(data.team1[1].makingMoveLowerFieldLeftSide(data.team1[0]) == True):\r\n data.inStrategy = False\r\n else:\r\n data.inStrategy = True\r\n \r\n if(data.playerHomeIndex == 1):\r\n \r\n if(data.team1[1].y < (data.height / 2) and \r\n data.team1[1].y >= (data.height / 4) and \r\n data.team1[1].x < (data.width) / 2):\r\n data.inStrategy = True\r\n data.team1[0].makingMoveUpperFieldRightSide(data.team1[1])\r\n if(data.team1[0].makingMoveUpperFieldRightSide(data.team1[1]) == True):\r\n data.inStrategy = False\r\n else:\r\n data.inStrategy = True\r\n \r\n if(data.team1[1].y < (data.height / 2) and \r\n data.team1[1].y >= (data.height / 4) and \r\n data.team1[1].x > (data.width) / 2):\r\n data.inStrategy = True\r\n data.team1[0].makingMoveUpperFieldLeftSide(data.team1[1])\r\n if(data.team1[0].makingMoveUpperFieldLeftSide(data.team1[1] == False)):\r\n data.inStrategy = False\r\n else:\r\n data.inStrategy = True\r\n \r\n if(data.team1[1].y > (data.height / 2) and \r\n data.team1[1].y <= (data.height / 4 * 3) and \r\n data.team1[1].x < (data.width) / 2):\r\n data.inStrategy = True\r\n data.team1[0].makingMoveLowerFieldRightSide(data.team1[1])\r\n if(data.team1[0].makingMoveLowerFieldRightSide(data.team1[1]) == True):\r\n data.inStrategy = False\r\n else:\r\n data.inStrategy = True\r\n \r\n if(data.team1[1].y > (data.height / 2) and \r\n data.team1[1].y <= (data.height / 4 * 3) and \r\n data.team1[1].x > (data.width) / 2):\r\n data.inStrategy = True\r\n data.team1[0].makingMoveLowerFieldLeftSide(data.team1[1])\r\n if(data.team1[0].makingMoveLowerFieldLeftSide(data.team1[1]) == True):\r\n data.inStrategy = False\r\n else:\r\n data.inStrategy = True\r\n \r\ndef strategicMoveVisiting(data):\r\n if(data.playerVisitingIndex == 0):\r\n if(data.team2[0].y > (data.height / 2) and\r\n data.team2[0].y <= (data.height / 4 * 3) and\r\n data.team2[0].x < (data.width / 2)):\r\n data.team2[1].makingMoveUpperFieldRightSide(data.team2[0])\r\n \r\n if(data.team2[0].y > (data.height / 2) and \r\n data.team2[0].y <= (data.height / 4 * 3) and \r\n data.team2[0].x > (data.width / 2)):\r\n data.team2[1].makingMoveUpperFieldLeftSide(data.team2[0])\r\n \r\n if(data.team2[0].y < (data.height / 2) and \r\n data.team2[0].y >= (data.height / 4) and\r\n data.team2[0].x < (data.width / 2)):\r\n data.team2[1].makingMoveLowerFieldRightSide(data.team2[0])\r\n \r\n \r\n \r\ndef timerFired(data):\r\n if(data.mode == \"gameState\"):\r\n if(data.blueTeamScore > 3):\r\n data.mode = \"blueTeamWinState\"\r\n if(data.redTeamScore > 3):\r\n data.mode = \"redTeamWinState\"\r\n # Check if the soccer has scored\r\n checkScored(data)\r\n \r\n #Make one of the visiting team player to chase the ball\r\n visitingTeamRespond(data)\r\n \r\n #Make the home team move strategically \r\n strategicMoveHome(data) \r\n \r\n #Make the oppposing team move strategically \r\n strategicMoveVisiting(data)\r\n \r\n #Normal moving state\r\n if(data.soccerMovingState == True):\r\n if(data.collided == False and data.bouncedWall == False):\r\n data.ballSpeed = data.ballSpeed - 0.01\r\n if(abs(data.ballSpeed-0)<0.02):\r\n data.soccerMovingState = False\r\n data.ballSpeed = 1.5\r\n data.soccerBall.reactToKick(data.direction,data.ballSpeed)\r\n \r\n #Check if the ball collides with the wall or not\r\n if(data.soccerBall.bouncingBoarder(data.direction,data.leftBoarder,data.rightBoarder,data.lowerBoarder,data.upperBoarder,data.bouncedDirection)[0] == True):\r\n data.bouncedWall = True\r\n if(data.bouncedWall == True) and (data.soccerMovingState == True):\r\n data.bouncedDirection = data.soccerBall.bouncingBoarder(data.direction,data.leftBoarder,data.rightBoarder,data.lowerBoarder,data.upperBoarder,data.bouncedDirection)[1]\r\n data.ballSpeed = data.ballSpeed - 0.01\r\n if(abs(data.ballSpeed - 0) < 0.02):\r\n data.soccerMovingState = False\r\n data.ballSpeed = 0\r\n data.bouncedWall = False\r\n data.soccerBall.reactToKick(data.bouncedDirection,data.ballSpeed)\r\n \r\n #Check if the ball collide with Visiting player\r\n # for i in data.team2:\r\n # if(i == data.playerHomeIndex):\r\n # continue\r\n # else:\r\n # if(data.soccerBall.collideWithVisitingTeamCheck(i) and data.soccerMovingState == True):\r\n # data.collided = True\r\n # data.collidedSpeed = -data.ballSpeed\r\n \r\n #Switching Control of the ball for home player\r\n # for i in range(len(data.team1)):\r\n # if(i == data.playerHomeIndex):\r\n # continue\r\n # else:\r\n # if(data.soccerBall.collideWithHomeTeamCheck(data.team1[i]) and data.soccerMovingState == True):\r\n # data.collided = True\r\n # data.collidedSpeed = -data.ballSpeed\r\n # data.playerHomeIndex = i\r\n \r\n #Switching Control of the ball for home player\r\n # if(data.inStrategy == False):\r\n newList = []\r\n i = 0\r\n while(i < len(data.team1)):\r\n newList = newList + [((data.team1[i].x - data.soccerBall.x)**(2) + \r\n (data.team1[i].y - data.soccerBall.y) ** (2))**(0.5)]\r\n i = i + 1\r\n data.playerHomeIndex = newList.index(min(newList))\r\n \r\n #When the control is swithced, try to make the other player accords with the player with the ball. \r\n \r\n #If the player with the ball is greater than half of the line and is in the left, make the accompanying player move in a slow speed up. Trying to go to the point where it is 30 degrees above the home player. \r\n strategicMoveHome(data)\r\n # strategicMoveVisiting(data)\r\n \r\n # if(data.team2[data.playerHomeIndex].y < (data.height / 4)):\r\n # data.team2[1].makingMoveInPenaltyArea()\r\n \r\n # if(data.playerHomeIndex == 1):\r\n # if(data.team1[1].y < (data.height / 2) and \r\n # data.team1[1].y >= (data.height /4 )and \r\n # data.team1[1].x < (data.width) / 2):\r\n # data.team1[0].makingMoveUpperFieldRightSide(data.team1[1])\r\n # if(data.team2[data.playerHomeIndex].y < (data.height / 4)):\r\n # data.team2[1].makingMoveInPenaltyArea()\r\n \r\n \r\n \r\n #This is the central control of how to react when collided\r\n if(data.soccerMovingState == True and data.collided == True):\r\n data.collidedSpeed = data.collidedSpeed + 0.07\r\n if(abs(data.collidedSpeed-0)<0.1):\r\n data.soccerMovingState = False\r\n data.collided = False\r\n data.collidedSpeed = 1.5\r\n data.soccerBall.reactToKick(data.team1[data.playerHomeIndex].collideWithSoccerBallCheck(data.soccerBall)[1],data.collidedSpeed)\r\n \r\n #Check if the player hits a soccer ball\r\n if(data.team1[data.playerHomeIndex].collideWithSoccerBallCheck(data.soccerBall)[0]):\r\n data.soccerMovingState = True\r\n data.direction = data.team1[data.playerHomeIndex].collideWithSoccerBallCheck(data.soccerBall)[1]\r\n \r\n #Check if the player hits a wall. If he hits a wall, makes him freeze\r\n data.team1[data.playerHomeIndex].collideWithWallCheck(data.leftBoarder,data.rightBoarder,data.lowerBoarder,data.upperBoarder)\r\n \r\n #Check if a player hits a visiting team player, if so, he can't move forward anymore. \r\n for i in data.team2:\r\n if(data.team1[data.playerHomeIndex].collideWithOtherTeamCheck(i)):\r\n data.team1[data.playerHomeIndex].move(data.team1[data.playerHomeIndex].collideWithOtherTeamCheck(i)[0])\r\n \r\n #Check if a player hits a home team player, if so, he can't move forward anymore\r\n for i in range(len(data.team1)):\r\n if(i == data.playerHomeIndex):\r\n continue\r\n else:\r\n if(data.team1[data.playerHomeIndex].collideWithSameTeamCheck(data.team1[i])):\r\n data.team1[data.playerHomeIndex].move(data.team1[data.playerHomeIndex].collideWithSameTeamCheck(data.team1[i])[0])\r\n \r\n\r\n \r\n \r\n \r\ndef draw(canvas,data):\r\n if(data.mode == \"blueTeamWinState\"):\r\n canvas.create_text(250,250,text = \"Congratulations!\",font = \"30\")\r\n if(data.mode == \"redTeamWinState\"):\r\n canvas.create_text(250,250,text = \"Tough Loss\",font = \"30\")\r\n if (data.mode == \"startScreen\"):\r\n canvas.create_rectangle(0,0,500,500,fill = \"green\")\r\n canvas.create_text(250,250,text = \"Exciting Soccer Game\",fill = \"orange\",\r\n font=\"Times 28 bold italic\")\r\n canvas.create_image(125,125,image = data.Barcelona)\r\n canvas.create_image(125,375,image = data.Juventus)\r\n canvas.create_image(375,125,image = data.realMadrid)\r\n canvas.create_image(375,375,image = data.Bayern)\r\n if(data.mode == \"gameState\"):\r\n canvas.create_image(data.width/2,data.height/2,image = data.picture)\r\n canvas.create_text(40,50,text = \"BlueTeam:%d\"%data.blueTeamScore,font = \"Times 12 italic\")\r\n canvas.create_text(40,70,text = \"RedTeam:%d\"%data.redTeamScore,font = \"Times 12 italic\")\r\n for i in data.team1:\r\n i.draw(canvas)\r\n for i in data.team2:\r\n i.draw(canvas)\r\n data.soccerBall.draw(canvas)\r\n \r\n if(data.mode == \"levelSelection\"):\r\n levelColor = \"indian red\"\r\n canvas.create_rectangle(0,0,500,500,fill = \"orange\")\r\n canvas.create_text(200,200,text = \"Choose Your Level\", font = \"Times 28 bold italic\",)\r\n canvas.create_rectangle(200,300,400,350,fill = levelColor,outline = levelColor)\r\n canvas.create_text(300,325,text = \"intramural\", font = \"Times 18\")\r\n canvas.create_rectangle(200,375,400,425,fill = levelColor,outline = levelColor)\r\n canvas.create_text(300,400,text = \"club\",font = \"Times 18\")\r\n canvas.create_rectangle(200,450,400,500,fill = levelColor,outline = levelColor)\r\n canvas.create_text(300,475,text = \"professional\",font = \"Times 18\")\r\n \r\n \r\n \r\ndef redrawAll(canvas, data):\r\n # draw in canvas\r\n draw(canvas, data)\r\n pass\r\n\r\n####################################\r\n# use the run function as-is\r\n#####################################\r\n\r\n#Important!!!This code Is directly cited from 15112 course note. \r\n#This is the link to the website. Thank you! https://www.cs.cmu.edu/~112/notes/notes-graphics.html\r\ndef run(width=300, height=300):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.timerDelay = 1\r\n data.width = width\r\n data.height = height\r\n root = Tk()\r\n root.resizable(width=False, height=False) # prevents resizing window\r\n init(data)\r\n # create the root and the canvas\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.configure(bd=0, highlightthickness=0)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(500, 500)","sub_path":"tkinter_soccer_game.py","file_name":"tkinter_soccer_game.py","file_ext":"py","file_size_in_byte":23955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550508325","text":"import sys\n\nfrom PyQt5.QtWidgets import QApplication, QLabel, QMainWindow, QPushButton, QToolTip\n\nfrom PyQt5 import QtGui\n\n\nclass Janela(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.topo = 100\n self.esquerda = 100\n self.largura = 800\n self.altura = 700\n self.titulo = \"Primeira Janela\"\n \n \n\n botao1 = QPushButton('Botão 1', self)\n botao1.move(250, 250) \n botao1.resize(50,50) \n botao1.setStyleSheet('QPushButton {background-color:#0FB329; font-size:10px}') \n botao1.clicked.connect(self.bota01_click) \n\n \n\n botao2 = QPushButton('Botão 2', self)\n botao2.move(350, 250) \n botao2.resize(50,50) \n botao2.setStyleSheet('QPushButton {background-color:#0FB329; font-size:10px}') \n botao2.clicked.connect(self.bota02_click) \n\n\n\n botao3 = QPushButton('Botão 3', self)\n botao3.move(450, 250) \n botao3.resize(50,50) \n botao3.setStyleSheet('QPushButton {background-color:#0FB329; font-size:10px}') \n botao3.clicked.connect(self.bota03_click) \n \n \n self.label_1 = QLabel(self)\n self.label_1.setText(\"LINGUAGEM DE PROGRAMAÇÃO\")\n self.label_1.move(220, 220)\n self.label_1.setStyleSheet(\"QLabel {color:red; font-size:25px}\")\n self.label_1.resize(400, 25)\n\n \n self.logo = QLabel(self)\n self.logo.move(500, 200)\n self.logo.resize(500, 600)\n self.CarregarJanela()\n\n def CarregarJanela(self):\n self.setGeometry(self.esquerda, self.topo, self.largura, self.altura)\n self.setWindowTitle(self.titulo)\n self.show()\n\n \n def bota01_click(self):\n self.label_1.setStyleSheet(\"QLabel {color:blue; font-size:25px}\")\n\n self.label_1.setText(\"Pyton\")\n self.logo.setPixmap(QtGui.QPixmap('python.png')) \n\n \n def bota02_click(self):\n self.label_1.setStyleSheet(\"QLabel {color:blue; font-size:25px}\")\n\n self.label_1.setText(\"Java\")\n self.logo.setPixmap(QtGui.QPixmap('java.png')) \n\n def bota03_click(self):\n self.label_1.setStyleSheet(\"QLabel {color:blue; font-size:25px}\")\n\n self.label_1.setText(\"C\")\n self.logo.setPixmap(QtGui.QPixmap('cc.png')) \n\n\n\naplicação = QApplication(sys.argv)\n\nj = Janela()\nsys.exit(aplicação.exec_())","sub_path":"Aula05/Praticas Aula 05/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"538008567","text":"import sys\nimport numpy as np\ndef f(n,a_):\n a=[]\n for i,ai in enumerate(a_):\n a.append([1+i,ai])\n a.sort(key=lambda x:x[1],reverse=True)\n dp=np.zeros((n+1,n+1),np.int64) #dp[i,j]:i人目までを配置し終えていて、そのうちj人を右に置いた時の最大値。j<=i\n dp[1,1]=a[0][1]*(n-a[0][0])\n dp[1,0]=a[0][1]*(a[0][0]-1)\n for i in range(2,n+1):#i人目の幼児。a[i-1]\n dp[i,0]=dp[i-1,0]+a[i-1][1]*abs((a[i-1][0]-1-(i-1))) #i人目を左にする。その時左に溜まっているのはi-1人\n dp[i,i]=dp[i-1,i-1]+a[i-1][1]*abs((n-a[i-1][0]-(i-1))) #i人目を右にする。その時右に溜まっているのはi-1人\n #i人目を左に\n l=dp[i-1,1:i]+[a[i-1][1]*abs((a[i-1][0]-1-j))for j in range(i-2,-1,-1)] #j:左に溜まっている人数\n #i人目を右に\n r=dp[i-1,:i-1]+[a[i-1][1]*abs((n-a[i-1][0]-j)) for j in range(i-1)] #j:右に溜まっている人数\n dp[i,1:i]=np.maximum(l,r)\n return max(dp[n])\n\nif __name__ == '__main__':\n input = sys.stdin.readline\n n=int(input())\n a_=list(map(int,input().split()))\n print(f(n,a_))\n","sub_path":"Python_codes/p02709/s142304741.py","file_name":"s142304741.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334285668","text":"from time import sleep\nimport psycopg2\nimport serial\nfrom scast_buffer import *\n\nclass SerialPort:\n def __init__(self, port_name):\n self.ser = serial.Serial(\n port = port_name,\n baudrate = 19200,\n parity = serial.PARITY_NONE,\n stopbits = serial.STOPBITS_ONE,\n bytesize = serial.EIGHTBITS)\n\n\n buffer = ScastBuffer()\n while 1:\n chars = \"\"\n while self.ser.inWaiting() > 0:\n chars += self.ser.read(1).encode('hex')\n if len(chars) > 0 and buffer.add(chars):\n self.process_alarms_and_relays()\n sleep(1)\n\n def process_alarms_and_relays(self):\n conn = psycopg2.connect(\"dbname=pyppm user=pyppm password=PyPPM\")\n cur = conn.cursor()\n cur.execute('SELECT * FROM \"GetRelayStates\"()')\n for record in cur:\n self.ser.write(\"AT+UCAST:%s,00%0.2X\\r\\n\" % (record[0], record[1]))\n conn.commit()\n cur.close()\n conn.close()\n\n","sub_path":"serial_port.py","file_name":"serial_port.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284472591","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 23 15:47:31 2016\n\n@author: SRINIVAS\n\"\"\"\n\nimport numpy as np\nimport wavio\nimport sounddevice as sd\nfs= 44100\na=0\nduration=5\nprint(0)\nrecording = sd.rec(duration * fs, samplerate=fs, channels=2)\nsd.wait()\nprint(0)\n\nwavio.write('ine.wav',recording,rate=44100,sampwidth=3)","sub_path":"WAV.py","file_name":"WAV.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462330697","text":"# -*- coding=UTF-8 -*-\n\"\"\"Tray icon and menu. \"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom Qt.QtGui import QCursor, QIcon\nfrom Qt.QtWidgets import QSystemTrayIcon\n\nfrom ..pathtools import module_path\nfrom .core import Menu\n\n\nclass Tray(QSystemTrayIcon):\n \"\"\"Tray icon with menu. \"\"\"\n\n instance = None\n initiated = False\n icon = module_path('assets', 'tray_icon.png')\n\n def __new__(cls):\n if cls.instance:\n return cls.instance\n return super(Tray, cls).__new__(cls)\n\n def __init__(self):\n if self.initiated:\n return\n\n super(Tray, self).__init__(QIcon(self.icon))\n\n # Menu.\n self.menu = Menu()\n self.setContextMenu(self.menu)\n\n # Signals.\n self.activated.connect(self.on_activated)\n\n self.initiated = True\n Tray.instance = self\n\n def on_activated(self, reason):\n if reason == self.Trigger:\n self.contextMenu().popup(QCursor.pos())\n\n @classmethod\n def _message(cls, title, text, seconds, icon):\n \"\"\"Show a traytip. \"\"\"\n\n tray = cls()\n tray.show()\n tray.showMessage(title, text, icon=icon, msecs=seconds * 1000)\n\n @classmethod\n def message(cls, title, text, seconds=3):\n \"\"\"Show a traytip with no icon. \"\"\"\n\n cls._message(title, text, seconds, QSystemTrayIcon.NoIcon)\n\n @classmethod\n def information(cls, title, text, seconds=3):\n \"\"\"Show a traytip with information icon. \"\"\"\n\n cls._message(title, text, seconds, QSystemTrayIcon.Information)\n\n @classmethod\n def warning(cls, title, text, seconds=3):\n \"\"\"Show a traytip with warning icon. \"\"\"\n\n cls._message(title, text, seconds, QSystemTrayIcon.Warning)\n\n @classmethod\n def critical(cls, title, text, seconds=3):\n \"\"\"Show a traytip with critical icon. \"\"\"\n\n cls._message(title, text, seconds, QSystemTrayIcon.Critical)\n","sub_path":"wlf/uitools/tray.py","file_name":"tray.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444753494","text":"from collections import defaultdict\n\n\nclass Track:\n def __init__(self, nodes, tracks):\n self.nodes = {}\n self.tracks = {}\n\n tracks_by_node = defaultdict(lambda: [])\n\n for node in nodes:\n self.nodes[node.uuid] = node\n\n for track in tracks:\n self.tracks[track.uuid] = track\n\n for node in track.get_nodes():\n tracks_by_node[node].append(track)\n\n for node in tracks_by_node:\n if node not in self.nodes:\n raise AssertionError(node + ' not found in provided nodes')\n if len(tracks_by_node[node]) != 2:\n print('Warning: node has ' + str(len(tracks_by_node[node])) + ' connections')\n\n for track in tracks_by_node[node]:\n for other_track in tracks_by_node[node]:\n if track.uuid == other_track.uuid:\n continue\n\n track.add_connection(node, other_track)\n\n def get_updated_location(self, loc, offset):\n return self.tracks[loc.track_uuid].get_updated_location(loc, offset)\n\n def to_string(self):\n string = 'Track\\n'\n string += '--- NODES ---\\n\\n'\n\n for n in self.nodes.values():\n string += n.to_string() + '\\n'\n\n string += '\\n--- TRACKS --- \\n\\n'\n\n for t in self.tracks.values():\n string += t.to_string()\n\n return string\n\n def render(self, render):\n for track in self.tracks.values():\n geom = track.get_geometry()\n render.attachNewNode(geom)\n","sub_path":"src/layout/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263121949","text":"default_app_config = 'emitter.AppConfig'\n\nfrom django.apps import AppConfig, apps\n\nclass AppConfig(AppConfig): # Our app config class\n name = 'emitter'\n verbose_name = 'Emitter'\n\n def ready(self):\n for app in apps.app_configs.keys():\n try:\n __import__('%s.listeners' % app)\n except ImportError:\n pass\n if LISTENERS:\n print('\\n[listeners]')\n for listener in LISTENERS.keys():\n print('\\t*\\t%s' % listener)\n print('\\n')\n\nLISTENERS = {}\n\ndef addListener(*args, **kwargs):\n \"\"\"\n Create listener (decorator).\n \"\"\"\n def registerListener(*args, **kwargs):\n def wrap_function(fun):\n channel = args[0]\n LISTENERS[channel] = fun\n return wrap_function\n\n if len(args) == 1 and callable(args[0]):\n raise Exception('invalid listener: no args or kwargs')\n return registerListener(*args, **kwargs)\n\ndef callListener(channel, *args, **kwargs):\n return LISTENERS[channel](*args, **kwargs)","sub_path":"backend/emitter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"68803258","text":"# https://leetcode.com/problems/powx-n/\n\nclass Solution(object):\n def myPow(self, x, n):\n \"\"\"\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n\n if not n:\n return 1\n if n < 0:\n return 1 / self.myPow(x, -n)\n if n % 2:\n return x * self.myPow(x, n - 1)\n return self.myPow(x * x, n / 2)\n\n\ndef main():\n solution = Solution()\n\n x = 2.2\n n = 2\n print(solution.myPow(x, n))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"leetcode_py/50_pow_xn/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"89971509","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 4 19:38:51 2018\n\n@author: vr308\n\"\"\"\nfrom mpl_toolkits.mplot3d import Axes3D \nimport matplotlib.pylab as plt\nimport scipy.interpolate as intp\nfrom matplotlib import cm\nimport random\nimport numpy as np\n\n#np.square(np.sin(X))*np.cos(Y) + Y*np.sin(X)\n#X*np.exp(-np.square(X) - np.square(Y))\n\ndef sample_surface(X,Y):\n \n return((np.subtract(X,1)*np.exp(-np.square(np.subtract(X,1)) - np.square(Y)) + np.cos(X)*np.exp(-np.square(np.add(Y,1)) - np.square(X)) + np.exp(-np.square(np.add(X,2)) - np.square(np.add(Y,-2)))))\n\nx = y = np.linspace(-4,4,1000)\nX, Y = np.meshgrid(x,y)\nZ = sample_surface(X,Y)\n\nx_sample = random.sample(set(x),100)\ny_sample = random.sample(set(y),100)\nz_sample = sample_surface(x_sample,y_sample) + 0.2*np.random.randn(100)\n\n# 3D rendering of the function along with scatter\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.scatter(x_sample,y_sample,z_sample, c='k', depthshade=False)\nax.plot_surface(X,Y,Z, cmap=cm.get_cmap('jet'), alpha=0.8)\n\n# 2D contour of the mathematical function along with scatter\n\nplt.figure()\nplt.contourf(X,Y,Z, cmap=cm.get_cmap('jet'))\nplt.plot(x_sample,y_sample,'ko')\n\n# Multivariate interpolation using rbf technique applied to scatter data\n\nrbf_interp = intp.Rbf(x_sample,y_sample,z_sample,smooth=0.2, epsilon=1)\nX_int, Y_int = np.meshgrid(np.linspace(-4,4,100), np.linspace(-4,4,100)) \nZ_interp = rbf_interp(X_int,Y_int)\n\n# Multivariate interpolation using nearest neighbour technique applied to scatter data\n\ninputs = np.array([x_sample,y_sample]).reshape(100,2)\noutputs = z_sample\nninterp = intp.NearestNDInterpolator(inputs, outputs)\nZ_interp = ninterp(X_int,Y_int)\n\n# Plot the interpolated surface\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.scatter(x_sample,y_sample,z_sample, c='k', depthshade=False)\nax.plot_surface(X_int,Y_int,Z_interp, cmap=cm.get_cmap('jet'), alpha=0.8)\n\n# Plot the contour \nplt.figure()\nplt.contourf(X,Y,Z, cmap=cm.get_cmap('jet'))\nplt.plot(x_sample,y_sample,'ko')\n\nplt.figure()\nplt.contourf(X_int,Y_int,Z_interp, cmap=cm.get_cmap('jet'))\nplt.plot(x_sample,y_sample,'ko')\n\n\n","sub_path":"Visualisation/surfaces.py","file_name":"surfaces.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"398916545","text":"from rest_framework.reverse import reverse\n\nCERTIFICATE_LIST_API_URL = reverse(\"certificate-list\")\n\n\ndef certificate_detail_api_url(certificate_id):\n return reverse('certificate-detail', args=[certificate_id])\n\n\ndef sample_certificate_payload(**params):\n defaults = {\n \"issuer\": \"test\",\n \"title\": \"test\",\n \"certificate_id\": \"test\",\n \"certificate_url\": \"http://test.com\",\n \"valid_until\": None,\n \"does_not_expire\": True\n }\n\n defaults.update(params)\n\n return defaults\n\n\ndef new_certificate_payload():\n payload = {\n \"issuer\": \"newissuer\",\n \"title\": \"newtitle\",\n \"certificate_id\": \"newid\",\n \"certificate_url\": \"http://newtest.com\",\n \"valid_until\": None,\n \"does_not_expire\": True\n }\n\n return payload\n","sub_path":"src/tests/utils/api/certificate.py","file_name":"certificate.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"319761910","text":"from flask import Flask, render_template, flash, request\nfrom wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField\nfrom pymongo import MongoClient\nfrom flask import jsonify\n# pprint library is used to make the output look more pretty\nfrom pprint import pprint\n# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string\nmyclient = MongoClient(\"mongodb://10.0.0.13:27017/\")\nmydb = myclient[\"cloudapp\"]\nmycol = mydb[\"messages\"]\n# Issue the serverStatus command and print the results\n\n# App config.\nDEBUG = True\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'\n\n\nclass ReusableForm(Form):\n name = TextField('Message:', validators=[validators.required()])\n\n\n@app.route(\"/send\", methods=['GET', 'POST'])\ndef send():\n data = request.get_data()\n print(data)\n message = data.decode(\"utf-8\").split(\"=\")[1]\n result = mycol.insert_one({'message': message})\n form = ReusableForm(request.form)\n return render_template('hello.html', form=form)\n\n@app.route(\"/retreive\", methods=['GET', 'POST'])\ndef retreive():\n messages = {}\n count = 0\n for x in mycol.find():\n count += 1\n messages[str(count)] = x['message']\n return jsonify(messages)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='50001')\n","sub_path":"rianaAssignment3Pt2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"388020150","text":"from typing import Optional\nfrom utils.measure import checker\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\ndef printl(l: ListNode) -> None:\n while l:\n print(l.val, end=' ')\n l = l.next\n print('')\n\n\ndef make(l: list[int]) -> ListNode | None:\n head = ListNode()\n curr = head\n\n for val in l:\n curr.next = ListNode(val)\n curr = curr.next\n\n return head.next\n\n\ndef unmake(ln: ListNode | None) -> list[int]:\n res = []\n while ln:\n res.append(ln.val)\n ln = ln.next\n return res\n\n\nclass Solution:\n\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n merged = ListNode()\n curr = merged\n\n while list1 and list2:\n if list1.val < list2.val:\n curr.next = list1\n list1 = list1.next\n else:\n curr.next = list2\n list2 = list2.next\n curr = curr.next\n\n curr.next = list1 or list2\n\n return merged.next\n\n def wrapped(self, list1: list[int], list2: list[int]) -> list[int]:\n return unmake(self.mergeTwoLists(make(list1), make(list2)))\n\n\nif __name__ == '__main__':\n with checker(Solution().wrapped, repeat=0, check_success=True) as c:\n c.check_2([1, 2, 4], [1, 3, 4], [1, 1, 2, 3, 4, 4])\n c.check_2([], [], [])\n c.check_2([], [0], [0])\n c.check_2([1, 2, 4, 6, 7, 8], [1, 3, 4], [1, 1, 2, 3, 4, 4, 6, 7, 8])\n","sub_path":"algorithms/l/studyplan/mergesortedlists.py","file_name":"mergesortedlists.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"585819886","text":"#!/usr/bin/env python3\n\nimport sys\nimport requests\nfrom urllib.parse import urlparse\nfrom bs4 import BeautifulSoup\n\n\ndef validate_url(url):\n \"Check if URL is valid.\"\n parsed_url = urlparse(url)\n if parsed_url.scheme and parsed_url.netloc:\n return True\n\n return False\n\n\ndef get_url_scheme(url):\n \"Get the scheme of a URL.\"\n parsed_url = urlparse(url)\n return parsed_url.scheme\n\n\ndef get_url_netloc(url):\n \"Extract the NETwork LOCality (netloc) out of a URL.\"\n parsed_url = urlparse(url)\n return parsed_url.netloc\n\n\ndef evaluate_cmd():\n \"\"\"\n Evaluate the command line arguments or exit with help printed if none\n given.\n \"\"\"\n url_arg = sys.argv[1]\n\n if len(sys.argv) <= 1:\n print(__doc__)\n sys.exit(0)\n\n if len(sys.argv) > 2:\n print(\"The script takes only 1 argument - a URL!\")\n\n if not validate_url(url_arg):\n print(\n \"\\n\\n\\nERROR: The URL '{}' {}!\\n\\n\\n\".format(\n url_arg,\n \"you have given is invalid\"\n )\n )\n sys.exit(1)\n\n return url_arg\n\n\ndef is_tag_visible(tag):\n \"Checks if a given element is visible in the article\"\n style = tag.attrs.get('style', False)\n if style and ('hidden' in style or 'display: none' in style or 'display:none' in style):\n return False\n\n parent = tag.parent\n if parent and not is_tag_visible(parent):\n return False\n\n return True\n\n\ndef remove_footer(content):\n \"Removes footer of an article, if such exists.\"\n footer = content.find('footer')\n\n if not footer:\n footer = content.find('div', class_='footer')\n\n footer.decompose()\n\n return content\n\n\ndef download_img(url, filename):\n \"Downloads image\"\n request = requests.get(url)\n if request.ok:\n with open(filename, 'wb') as img_file:\n img_file.write(request.content)\n\n\nif __name__ == \"__main__\":\n target_url = evaluate_cmd()\n response = requests.get(target_url)\n\n if not response.ok:\n print(\"\\n\\n\\nERROR: Status code: {}!\\n\\n\\n\".format(status))\n sys.exit(2)\n\n content = response.text\n content_parsed = BeautifulSoup(content, \"html.parser\")\n content_parsed = remove_footer(content_parsed)\n title = content_parsed.title.string\n print(title)\n\n articles = content_parsed.find_all('article')\n paragraphs = content_parsed.find_all('p')\n if articles:\n for article in articles:\n if(is_tag_visible(article)):\n print(article.prettify())\n else:\n for p in paragraphs:\n if(is_tag_visible(p)):\n print(p.prettify())\n\n for img in content_parsed.find_all('img'):\n img_src = img.get('src')\n if not validate_url(img_src):\n img_src = ''.join([\n get_url_scheme(target_url),\n \"://\",\n get_url_netloc(target_url),\n img_src\n ])\n if not validate_url(img_src):\n next\n download_img(img_src, \"./test.jpg\")\n break\n","sub_path":"fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"505152153","text":"from django.shortcuts import render\nfrom .Run_Requests import *\nfrom .Profs import *\n\n\n# Create your views here.\ndef parse_modle_1_request(user_input:dict):\n proof_list = []\n for key in user_input:\n print(user_input[key])\n proof_list.append(Prof(key, user_input[key][0], user_input[key][1]))\n return proof_list\n\nkeyslist=[]\nvalueslist=[]\nprofslist=[]\n\n\ndef get_scores(request):\n\t# if this is a POST request we need to process the form data\n\tif request.method == 'POST':\n #print('request is POST')\n #print(type(request.POST)) \n #print(type(request.POST.lists()))\n #print(request.POST.lists())\n #print(parse_modle_1_request(request.POST.dict()) \n for key, value in request.POST.lists():\n print(key)\n print(value)\n \n if key == 'psyhco':\n #continue\n psycho = (value[0], value[1], value[2], value[3])\n \n else:\n keyslist.append(key)\n valueslist.append(value)\n \n #print(keyslist)\n #print(valueslist)\n for i in valueslist:\n i.insert(0, keyslist[valueslist.index(i)])\n #print(valueslist)\n for i in valueslist:\n print(i)\n this_prof=Prof(i[0],i[1],i[2])\n profslist.append(this_prof)\n print(profslist)\n print(psycho)\n all_results=main(profslist,psycho)\n \n \n return render(request, 'module_3.html', {\n'Tech_avg_bagrut_score': all_results[0].bagrot_score,\n'Tech_optimal_bagrut_score':all_results[0].optimal_bagrot_regex,\n'Tech_sechem_score':all_results[0].sechem_score,\n'Tech_accepted_majors':all_results[0].accepted_majors,\n'huji_avg_bagrut_score':all_results[1].bagrut_score,\n'huji_accepted_majors':all_results[1].accepted_str,\n'TLV_average_bagrut_score':all_results[2].bagrut_score,\n'TLV_sechem_score':all_results[2].sechem,\n'TLV_accepted_majors':all_results[2].accepted_str,\n'BGU_bagrut_score':all_results[3].bagrot_score,\n'BGU_scheme_score':all_results[3].scheme_score,\n#'':,\n})","sub_path":"Tziunim_server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"138384113","text":"#-*-coding:utf-8-*-\n# 计算大小写字母的个数\ndef string_test(s):\n d = {\"UPPER _CASE\":0, \"LOWER_CASE\":0} # 用字典,键值对的形式分别定义大小写字母的个数,大写:个数;小写:个数\n for c in s: # 遍历s中的每一个字母,\n if c.isupper(): # 如果该字母是大写的\n d[\"UPPER _CASE\"] += 1 #大写字母的个数加一\n elif c.islower():# 如果该字母是小写的\n d[\"LOWER_CASE\"] += 1 # 小写字母个数加一\n # else:\n # pass\n print(\"Original String :\", s)\n print(\"No.of Upper case characters :\", d[\"UPPER _CASE\"])\n print(\"No.of Lower case Characters :\", d[\"LOWER_CASE\"])\n\nstring_test(\"I am a Student.\")# 计算大小写字母的个数\n","sub_path":"function/exercise/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"277638774","text":"import appuifw\nimport btsocket as socket\nimport e32\n\nclass BTReader:\n\tdef connect(self):\n\t\tglobal sock\n\t\tarduino_addr='00:19:a4:02:44:2a' #add your arduino BT adress here\n\t\tsock=socket.socket(socket.AF_BT, socket.SOCK_STREAM)\n\t\ttarget=(arduino_addr,1) # serial connection to arduino BT\n\t\tsock.connect(target)\n\t\t\n\tdef readline(self):\n\t\tline=[]\n\t\twhile 1:\n\t\t\tch=self.sock.recv(1)\n\t\t\tif(ch==\"A\"):\n\t\t\t\tbreak\n\t\t\tline.append(ch)\n\t\treturn ''.join(line)\n\tdef writechar(ch):\n\t\t# ch=\"1\";\n\t\tself.sock.send(ch);\n\tdef close(self):\n\t\tself.sock.close();\n\nbt=BTReader()\nbt.connect()\n\nmode = appuifw.query(u\"MODE\",\"number\")\nred_val = appuifw.query(u\"RED\",\"number\")\ngreen_val = appuifw.query(u\"GREEN\",\"number\")\nblue_val = appuifw.query(u\"BLUE\",\"number\")\ndelay_val = appuifw.query(u\"Delay\",\"number\")\n\nm_str = \"m\" + str(mode)\nsock.send(m_str)\n\nr_str = \"r\" + chr(int(red_val))\nsock.send(r_str)\n\ng_str = \"g\" + chr(int(green_val))\nsock.send(g_str)\n\nb_str = \"b\" + chr(int(blue_val))\nsock.send(b_str)\n\nd_str = \"d\" + chr(int(delay_val))\nsock.send(d_str)","sub_path":"GUI/Mobile/SymbainS60/RGB_led_controller.py","file_name":"RGB_led_controller.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377528016","text":"'''\nAuthor: your name\nDate: 2021-05-23 11:03:59\nLastEditTime: 2021-05-23 11:15:47\nLastEditors: Please set LastEditors\nDescription: In User Settings Edit\nFilePath: \\Study\\MagePython\\Chapter5\\5-2-5.py\n'''\nimport datetime\nimport time\n\ndef logger(fn):\n def wrap(*args,**kwargs):\n # before enhance function \n print(\"arg={},kwarge={}\".format(args,kwargs))\n start = datetime.datetime.now()\n ret = fn(*args,**kwargs)\n #after enhance function\n delta= datetime.datetime.now()-start\n print(\"function {} took {}s.\".format(fn.__name__,delta.total_seconds()))\n return ret\n return wrap\n\n@logger\ndef add(x,y):\n print(\"===call add =========================\")\n time.sleep(6)\n return x + y\n\nprint(add(4,y=7))\n ","sub_path":"MagePython/Chapter5/5-2-5.py","file_name":"5-2-5.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607182763","text":"import math\n\ndef miniWindow(arr):\n low, high = 0, len(arr) - 1\n while low < len(arr) - 1 and arr[low] <= arr[low + 1]:\n low += 1\n if low == len(arr):\n return 0\n while high > 0 and arr[high] >= arr[high - 1]:\n high -= 1\n if high == 0:\n return len(arr)\n \n minNum = math.inf\n maxNum = -math.inf\n for k in range(low, high + 1):\n minNum = min(minNum, arr[k])\n maxNum = max(maxNum, arr[k])\n \n while low > 0 and arr[low - 1] > minNum:\n low -= 1\n while high < len(arr) - 1 and arr[high + 1] < maxNum:\n high += 1\n\n return high - low + 1\n\ndef main():\n print(miniWindow([1,2,5,3,7,10,9,12]))\n\nmain()","sub_path":"Algorithm/src/twopointers/C3_M_MiniWindowSort/C3_M_MiniWindowSort.py","file_name":"C3_M_MiniWindowSort.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"383398264","text":"import argparse\n\nimport numpy as np\nimport torch\nfrom torch import optim\nfrom torchvision import utils\nfrom tqdm import tqdm\n\nfrom model import Glow\nfrom samplers import memory_mnist, memory_fashion, celeba, ffhq_5, cifar_horses_40, ffhq_50, cifar_horses_20, cifar_horses_80, mnist_30, mnist_gan_all, mnist_pad, cifar_horses_20_top, cifar_horses_40_top, cifar_horses_20_top_small_lr, cifar_horses_40_top_small_lr, arrows_small, arrows_big, cifar_20_picked_inds_2, cifar_40_picked_inds_2, cifar_20_picked_inds_3, cifar_40_picked_inds_3\nfrom utils import (\n net_args,\n calc_z_shapes,\n calc_loss,\n string_args,\n)\n\nparser = net_args(argparse.ArgumentParser(description=\"Glow trainer\"))\n\n\ndef train(args, model, optimizer):\n if args.dataset == \"mnist\":\n dataset_f = memory_mnist\n elif args.dataset == \"fashion_mnist\":\n dataset_f = memory_fashion\n elif args.dataset == \"celeba\":\n dataset_f = celeba\n elif args.dataset == \"ffhq_gan_32\":\n dataset_f = ffhq_gan_32\n elif args.dataset == \"cifar_horses_40\":\n dataset_f = cifar_horses_40\n elif args.dataset == \"ffhq_50\":\n dataset_f = ffhq_50\n elif args.dataset == \"cifar_horses_20\":\n dataset_f = cifar_horses_20\n elif args.dataset == \"cifar_horses_80\":\n dataset_f = cifar_horses_80\n elif args.dataset == \"mnist_30\":\n dataset_f = mnist_30\n elif args.dataset == \"mnist_gan_all\":\n dataset_f = mnist_gan_all\n elif args.dataset == \"mnist_pad\":\n dataset_f = mnist_pad\n elif args.dataset == \"cifar_horses_20_top\":\n dataset_f = cifar_horses_20_top\n elif args.dataset == \"cifar_horses_40_top\":\n dataset_f = cifar_horses_40_top\n elif args.dataset == \"cifar_horses_20_top_small_lr\":\n dataset_f = cifar_horses_20_top_small_lr\n elif args.dataset == \"cifar_horses_40_top_small_lr\":\n dataset_f = cifar_horses_40_top_small_lr\n elif args.dataset == \"arrows_small\":\n dataset_f = arrows_small\n elif args.dataset == \"arrows_big\":\n dataset_f = arrows_big\n elif args.dataset == \"cifar_20_picked_inds_2\":\n dataset_f = cifar_20_picked_inds_2\n elif args.dataset == \"cifar_40_picked_inds_2\":\n dataset_f = cifar_40_picked_inds_2\n elif args.dataset == \"cifar_40_picked_inds_3\":\n dataset_f = cifar_40_picked_inds_3\n elif args.dataset == \"cifar_20_picked_inds_3\":\n dataset_f = cifar_20_picked_inds_3\n else:\n raise ValueError(\"Unknown dataset:\", args.dataset)\n\n repr_args = string_args(args)\n n_bins = 2.0 ** args.n_bits\n\n z_sample = []\n z_shapes = calc_z_shapes(args.n_channels, args.img_size, args.n_flow, args.n_block)\n for z in z_shapes:\n z_new = torch.randn(args.n_sample, *z) * args.temp\n z_sample.append(z_new.to(device))\n\n epoch_losses = []\n f_train_loss = open(f\"losses/losses_train_{repr_args}_.txt\", \"a\", buffering=1)\n f_test_loss = open(f\"losses/losses_test_{repr_args}_.txt\", \"a\", buffering=1)\n\n last_model_path = f\"checkpoint/model_{repr_args}_last_.pt\"\n try:\n model.load_state_dict(torch.load(last_model_path))\n model.eval()\n f_epoch = open(f\"checkpoint/last_epoch_{repr_args}.txt\", \"r\", buffering=1)\n epoch_n = int(f_epoch.readline().strip())\n f_epoch.close()\n except FileNotFoundError:\n print(\"Training the model from scratch.\")\n epoch_n = 0\n\n with tqdm(range(epoch_n, args.epochs + epoch_n)) as pbar:\n for i in pbar:\n repr_args = string_args(args)\n train_loader, val_loader, train_val_loader = dataset_f(\n args.batch, args.img_size, args.n_channels\n )\n train_losses = []\n for image in train_loader:\n if isinstance(image, list):\n image = image[0]\n optimizer.zero_grad()\n image = image.to(device)\n noisy_image = image\n if args.tr_dq:\n noisy_image += torch.rand_like(image) / n_bins\n noisy_image += torch.randn_like(image) * args.delta\n log_p, logdet, _ = model(noisy_image)\n\n logdet = logdet.mean()\n loss, log_p, log_det = calc_loss(\n log_p, logdet, args.img_size, n_bins, args.n_channels\n )\n loss.backward()\n optimizer.step()\n train_losses.append(loss.item())\n current_train_loss = np.mean(train_losses)\n print(f\"{current_train_loss},{args.delta},{i + 1}\", file=f_train_loss)\n with torch.no_grad():\n utils.save_image(\n model.reverse(z_sample).cpu().data,\n f\"sample/sample_{repr_args}_{str(i + 1).zfill(6)}.png\",\n normalize=True,\n nrow=10,\n range=(-0.5, 0.5),\n )\n losses = []\n logdets = []\n logps = []\n for image in val_loader:\n if isinstance(image, list):\n image = image[0]\n image = image.to(device)\n log_p, logdet, _ = model(image)\n logdet = logdet.mean()\n loss, log_p, log_det = calc_loss(\n log_p, logdet, args.img_size, n_bins, args.n_channels\n )\n losses.append(loss.item())\n logdets.append(log_det.item())\n logps.append(log_p.item())\n pbar.set_description(\n f\"Loss: {np.mean(losses):.5f}; logP: {np.mean(logps):.5f}; logdet: {np.mean(logdets):.5f}; delta: {args.delta:.5f}\"\n )\n current_loss = np.mean(losses)\n print(f\"{current_loss},{args.delta},{i + 1}\", file=f_test_loss)\n epoch_losses.append(current_loss)\n # early stopping\n if len(epoch_losses) >= 20 and epoch_losses[-20] < min(epoch_losses[-19:]):\n break\n '''\n too much space\n if (i + 1) % 5 == 0:\n torch.save(\n model.state_dict(), f\"checkpoint/model_{repr_args}_{i + 1}_.pt\"\n )\n '''\n torch.save(model.state_dict(), last_model_path)\n f_epoch = open(\n f\"checkpoint/last_epoch_{repr_args}.txt\", \"w\", buffering=1\n )\n f_epoch.write(str(i + 1))\n f_epoch.close()\n\n f_ll = open(f\"ll/ll_{repr_args}_{i + 1}.txt\", \"w\")\n train_loader, val_loader, train_val_loader = dataset_f(\n args.batch, args.img_size, args.n_channels\n )\n train_val_loader = iter(train_val_loader)\n for image_val in val_loader:\n image = image_val\n if isinstance(image, list):\n image = image[0]\n image = image.to(device)\n log_p_val, logdet_val, _ = model(image)\n\n image = next(train_val_loader)\n if isinstance(image, list):\n image = image[0]\n image = image.to(device)\n log_p_train_val, logdet_train_val, _ = model(image)\n\n for (\n lpv,\n ldv,\n lptv,\n ldtv,\n ) in zip(log_p_val, logdet_val, log_p_train_val, logdet_train_val):\n print(\n args.delta,\n lpv.item(),\n ldv.item(),\n lptv.item(),\n ldtv.item(),\n file=f_ll,\n )\n f_ll.close()\n f_train_loss.close()\n f_test_loss.close()\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n print(string_args(args))\n device = args.device\n model = Glow(\n args.n_channels,\n args.n_flow,\n args.n_block,\n affine=args.affine,\n conv_lu=not args.no_lu,\n )\n model = model.to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n train(args, model, optimizer)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"650911909","text":"import cv2\nimport cv2.cv as cv\n\n\nidc_hl = int(62)\nidc_sl = int(100)\nidc_vl = int(50)\n\nidc_hh = int(82)\nidc_sh = int(200)\nidc_vh = int(200)\n\nidc_green_range = (cv.Scalar(idc_hl,idc_sl,idc_vl), cv.Scalar(idc_hh,idc_sh,idc_vh))\n\nridc_hl = int(172)\nridc_sl = int(180)\nridc_vl = int(150)\n\nridc_hh = int(7)\nridc_sh = int(220)\nridc_vh = int(254)\n\nidc_red_range = (cv.Scalar(ridc_hl,ridc_sl,ridc_vl), cv.Scalar(ridc_hh,ridc_sh,ridc_vh))\n\nyidc_hl = int(20)\nyidc_sl = int(133)\nyidc_vl = int(121)\n\nyidc_hh = int(27)\nyidc_sh = int(193)\nyidc_vh = int(243)\n\nidc_yellow_range = (cv.Scalar(yidc_hl,yidc_sl,yidc_vl), cv.Scalar(yidc_hh,yidc_sh,yidc_vh))\n\n# Reduce image size\ndef bgr_to_hsv(img):\n\t# Convert from BGR to HSV\n\thsv = cv.CreateImage(cv.GetSize(img), 8, 3)\n\tcv.CvtColor(img, hsv, cv.CV_BGR2HSV)\n\treturn hsv\n\ndef threshold_green_balls(img):\n\t# Threshold the img in hsv space for green\n\timg_thresh = cv.CreateImage(cv.GetSize(img), 8, 1)\n\tcv2.cv.InRangeS(img, idc_green_range[0], idc_green_range[1], img_thresh)\n\treturn img_thresh\n\ndef threshold_red_balls(img):\n\t# Threshold the img in hsv space for green\n\timg_thresh = cv.CreateImage(cv.GetSize(img), 8, 1)\n\tcv2.cv.InRangeS(img, idc_red_range[0], idc_red_range[1], img_thresh)\n\treturn img_thresh","sub_path":"b2/thalamic_modules.py","file_name":"thalamic_modules.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394067658","text":"\"\"\"\n207. Course Schedule\n- Medium\n- DFS, BFS, Graph, Topological Sort\n- Link: https://leetcode.com/problems/course-schedule/\n\"\"\"\n\n\n# Solution 1: Topological Sort\n# Time: O(E + V), where V is the number of vertex and E is the number of edge | Space: O(E + V)\nclass GNode(object):\n def __init__(self):\n self.inDegrees = 0\n self.outNodes = []\n\n\nclass Solution:\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n from collections import defaultdict, deque\n\n graph = defaultdict(GNode)\n\n # 建立 graph,計算 number of dependency\n totalDeps = 0\n for relation in prerequisites:\n nextCourse, prevCourse = relation[0], relation[1]\n graph[prevCourse].outNodes.append(nextCourse)\n graph[nextCourse].inDegrees += 1\n totalDeps += 1 # total dependencies\n\n # 紀錄起始節點 (不存在 indegrees 的 node)\n nodepCourses = deque()\n for idx, node in graph.items():\n if node.inDegrees == 0:\n nodepCourses.append(idx)\n\n # 從起始節點開始,針對其 outNodes 開始消除 edge/dependency\n removedEdges = 0\n while nodepCourses:\n course = nodepCourses.pop()\n\n for nextCourse in graph[course].outNodes:\n graph[nextCourse].inDegrees -= 1\n removedEdges += 1\n\n # 當 inDegrees 不等於 0 時,代表該處有迴圈的可能\n if graph[nextCourse].inDegrees == 0:\n nodepCourses.append(nextCourse)\n\n # 當消除的 edge 等同於一開始計算的 dependency 數量,代表該圖不存在 cycle\n if removedEdges == totalDeps:\n return True\n else:\n return False\n","sub_path":"leetcode/graph/207_course_schedule.py","file_name":"207_course_schedule.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"4562753","text":"from collections import Counter\nX = int(input())\nshoe_sizes = Counter(map(int, input().split()))\nN = int(input())\narr = []\nfor i in range(N):\n arr_demand = [int(a) for a in input().split()]\n arr.append(arr_demand)\n\nearn = 0\nfor i in arr:\n size = i[0]\n if shoe_sizes[size]:\n earn += i[1]\n shoe_sizes[size] -= 1\n\n# print(shoe_sizes)\n# print(arr)\nprint(earn)\n\n# similar but simpler one\nimport collections\nnumShoes = int(input())\nshoes = collections.Counter(map(int, input().split()))\nnumCust = int(input())\nincome = 0\nfor i in range(numCust):\n size, price = map(int, input().split())\n if shoes[size]:\n income += price\n shoes[size] -= 1\nprint(income)","sub_path":"HR/HR_Py_Coll_Cntr.py","file_name":"HR_Py_Coll_Cntr.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242323022","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"search/\", views.search, name=\"search\"),\n path(\"random/\", views.random_page, name=\"random_page\"),\n path(\"editor/\", views.editor, name=\"editor\"),\n path(\"wiki/\", views.wiki, name=\"entry-detail\")\n]\n","sub_path":"encyclopedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"240235504","text":"# -*- coding: utf-8 -*-\n# @Author: lidong\n# @Date: 2018-03-18 16:31:14\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-09-21 23:32:28\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef l1_r(input, target, weight=None, size_average=True):\n relation=[]\n loss=nn.MSELoss()\n\n for i in range(3):\n target=torch.reshape(target,(input[i].shape))\n #print(target.shape)\n t=loss(input[i],target)\n #print(t.item())\n relation.append(t)\n\n return relation\ndef l1_a(input, target, weight=None, size_average=True):\n relation=[]\n loss=nn.MSELoss()\n\n for i in range(4):\n target=torch.reshape(target,(input[i].shape))\n #print(target.shape)\n t=torch.sqrt(loss(input[i],target))\n #print(t.item())\n relation.append(t)\n\n return relation\ndef log_r(input, target, weight=None, size_average=True):\n relation=[]\n d=[]\n out=[]\n target=torch.reshape(target,(input[0].shape))\n target=torch.log(target+1e-6)\n loss=nn.MSELoss()\n for i in range(3):\n # pre=input[i]\n # num=torch.sum(torch.where(pre>0,torch.ones_like(pre),torch.zeros_like(pre)))/torch.sum(torch.ones_like(pre))\n # print(num)\n input[i]=torch.log(input[i]+1e-6) \n relation.append(loss(input[i],target))\n #d.append(0.5*torch.pow(torch.sum(input[i]-target),2)/torch.pow(torch.sum(torch.ones_like(input[i])),2))\n #out.append(relation[i]-d[i])\n return relation \ndef cross_entropy2d(input, target, weight=None, size_average=True):\n n, c, h, w = input.size()\n #print(c,target.max().data.cpu().numpy())\n\n log_p = F.log_softmax(input, dim=1)\n log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)\n log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]\n log_p = log_p.view(-1, c)\n\n mask = target >= 0\n target = target[mask]\n #loss=log_p.sum()\n loss = F.nll_loss(log_p, target,\n weight=weight, size_average=False)\n #print(loss, mask.data.sum())\n if size_average:\n # print(mask.data.sum())\n loss /= mask.data.sum()\n # loss=loss/(950*540)\n return loss\ndef l1(input, target,mask, weight=None, size_average=True):\n # P1=mask[...,0].cuda(0)\n # P2=mask[...,3].cuda(0)\n one,zero=torch.ones(1).cuda(0),torch.zeros(1).cuda(0)\n mask=torch.where(input>0,one,zero)\n #print(torch.sum(mask)/(input.shape[0]*input.shape[1]))\n mask=torch.reshape(mask,(input.shape))\n target=torch.reshape(target,(input.shape))\n loss=nn.L1Loss(reduction='none')\n #loss=nn.MSELoss(reduction='none')\n #print(torch.sqrt(loss(input,target)).shape)\n relation=torch.sum(mask*loss(input,target))/torch.sum(mask)\n #print(torch.max(torch.where(input>zero,input,zero)),torch.min(torch.where(P1>zero,input,192*one)))\n #relation=torch.sum(torch.sqrt(mask*loss(input,target)))/torch.sum(mask)\n return relation\ndef l2(input, target, weight=None, size_average=True):\n target=torch.reshape(target,(input.shape))\n #print(input.shape)\n #print(target.shape)\n # num=torch.sum(torch.where(input==0,torch.ones_like(input),torch.zeros_like(input)))\n # positive=num/torch.sum(torch.ones_like(input))\n #print(positive.item())\n loss=nn.MSELoss()\n relation=loss(input,target)\n #mean=torch.abs(torch.mean(input)-torch.mean(target))\n #print(\"pre_depth:%.4f,ground_depth:%.4f\"%(torch.mean(input[1]).data.cpu().numpy().astype('float32'),torch.mean(target).data.cpu().numpy().astype('float32')))\n #output=relation+0.2*mean\n return relation\ndef log_loss(input, target, weight=None, size_average=True):\n # num=torch.sum(torch.where(input==0,torch.ones_like(input),torch.zeros_like(input)))\n # positive=num/torch.sum(torch.ones_like(input))\n # print(positive.item())\n target=torch.reshape(target,(input.shape))\n loss=nn.MSELoss() \n input=torch.log(input+1e-12) \n target=torch.log(target+1e-12) \n #relation=torch.sqrt(loss(input,target)) \n relation=loss(input,target) \n d=0.5*torch.pow(torch.sum(input-target),2)/torch.pow(torch.sum(torch.ones_like(input)),2)\n #relation=relation-d \n return relation\n\n # target=torch.reshape(target,(input.shape))\n # #loss=nn.MSELoss()\n # num=torch.sum(torch.where(input>0,torch.ones_like(input),torch.zeros_like(input)))\n # input=torch.log(torch.where(input>0,input,torch.ones_like(input)))\n # target=torch.log(torch.where(target>0,target,torch.ones_like(target)))\n # # #relation=torch.sqrt(loss(input,target))\n # relation=torch.sum(torch.pow(torch.where(input==0,input,input-target),2))/num\n # d=torch.pow(torch.sum(torch.where(input==0,input,input-target)),2)/torch.pow(num,2)*0.5\n # #positive=num/torch.sum(torch.ones_like(input))\n # #print(positive.item())\n # #-torch.sum(torch.where(input<0,input,torch.zeros_like(input)))/num\n # losses=relation+d\n # return losses\n\ndef log_l1(input, target, weight=None, size_average=True):\n l1loss=l1(input,target)\n logloss=log_loss(input,target)\n num=torch.sum(torch.where(input==0,torch.ones_like(input),torch.zeros_like(input)))\n positive=num/torch.sum(torch.ones_like(input))\n print(positive.item())\n loss=(1-positive)*logloss+positive*l1loss\n return loss\ndef l1_kitti(input, target, weight=None, size_average=True):\n zero=torch.zeros_like(input)\n target=torch.reshape(target,(input.shape))\n input=torch.where(target>0,input,zero)\n target=torch.where(target>0,target,zero)\n loss=nn.MSELoss(size_average=False) \n relation=loss(input,target)/torch.sum(torch.where(target>0,torch.ones_like(input),zero))\n return relation\ndef log_kitti(input, target, weight=None, size_average=True):\n zero=torch.zeros_like(input)\n target=torch.reshape(target,(input.shape))\n loss=nn.MSELoss(size_average=False) \n input=torch.where(target>0,torch.log(input),zero)\n target=torch.where(target>0,torch.log(target),zero)\n\n #relation=torch.sqrt(loss(input,target)) \n relation=loss(input,target)/torch.sum(torch.where(target>0,torch.ones_like(input),zero))\n d=0.5*torch.pow(torch.sum(input-target),2)/torch.pow(torch.sum(torch.where(target>0,torch.ones_like(input),zero)),2)\n \n return relation-d \n# def region(input,target,instance):\n# loss=0\n# lf=nn.MSELoss(size_average=False,reduce=False)\n# target=torch.reshape(target,(input.shape))\n# instance=torch.reshape(instance,(input.shape))\n# zero=torch.zeros_like(input)\n# one=torch.ones_like(input)\n# dis=lf(input,target)\n# for i in range(0,int(torch.max(instance).item()+1)):\n# input_region=torch.where(instance==i,input,zero)\n# ground_region=torch.where(instance==i,target,zero)\n# m=torch.max(ground_region)\n# if m==0:\n# continue\n# num=torch.sum(torch.where(instance==i,one,zero))\n# loss+=lf(input_region,ground_region)/num\n# # average=torch.sum(input_region)/num\n# # input_region=input_region-average\n# # input_region=torch.pow(input_region,2)\n# # var=torch.sum(input_region)/num\n# # loss+=0.5*var\n# loss=loss/torch.max(instance)\n# return loss\n\n\ndef region(input,target,instance):\n loss=0\n lf=nn.MSELoss(size_average=False,reduce=False)\n target=torch.reshape(target,(input.shape))\n # input=torch.log(input+1e-12) \n # target=torch.log(target+1e-12) \n instance=torch.reshape(instance,(input.shape))\n zero=torch.zeros_like(input)\n one=torch.ones_like(input)\n dis=lf(input,target)\n for i in range(1,int(torch.max(instance).item()+1)):\n dis_region=torch.where(instance==i,dis,zero)\n num=torch.sum(torch.where(instance==i,one,zero))\n average=torch.sum(dis_region)/num\n loss=loss+average\n #dis_region=torch.where(instance==i,dis_region-average,zero)\n # var=0.1*torch.sqrt(torch.sum(torch.pow(dis_region,2))/num)/average\n # loss=loss+var\n loss=loss/(torch.max(instance))\n return loss\n\ndef region_log(input,target,instance):\n loss=0\n lf=nn.MSELoss(size_average=False,reduce=False)\n target=torch.reshape(target,(input.shape))\n input=torch.log(input+1e-6) \n target=torch.log(target+1e-6) \n instance=torch.reshape(instance,(input.shape))\n zero=torch.zeros_like(input)\n one=torch.ones_like(input)\n dis=lf(input,target)\n for i in range(1,int(torch.max(instance).item()+1)):\n dis_region=torch.where(instance==i,dis,zero)\n num=torch.sum(torch.where(instance==i,one,zero))\n average=torch.sum(dis_region)/num\n loss=loss+average\n # dis_region=torch.where(instance==i,dis_region-average,zero)\n # var=(torch.sum(torch.pow(dis_region,2))/num)/average\n # loss=loss+var\n loss=loss/(torch.max(instance))\n #print(torch.max(instance).item())\n return loss\n\n\ndef region_r(input,target,instance):\n loss=0\n relation=[]\n lf=nn.MSELoss(size_average=False,reduce=False)\n target=torch.reshape(target,(input[0].shape))\n \n target=torch.log(target+1e-6) \n instance=torch.reshape(instance,(input[0].shape))\n zero=torch.zeros_like(input[0])\n one=torch.ones_like(input[0])\n for i in range(3):\n input[i]=torch.log(input[i]+1e-6)\n dis=lf(input[i],target)\n for i in range(1,int(torch.max(instance).item()+1)):\n dis_region=torch.where(instance==i,dis,zero)\n num=torch.sum(torch.where(instance==i,one,zero))\n average=torch.sum(dis_region)/num\n loss=loss+average\n #print(torch.max(instance).item())\n relation.append(loss/(torch.max(instance)))\n loss=0\n return relation","sub_path":"back of code/CMF/cmf/loss-20181014143903.py","file_name":"loss-20181014143903.py","file_ext":"py","file_size_in_byte":9650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"509003424","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom decimal import Decimal\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('booking', '0012_auto_20150219_1918'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userprofile',\n name='cash',\n field=models.DecimalField(\n default=Decimal('0.0'), max_digits=6, decimal_places=2),\n preserve_default=True,\n ),\n ]\n","sub_path":"Booking/booking/migrations/0013_auto_20150220_1519.py","file_name":"0013_auto_20150220_1519.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197122088","text":"\ndef z_algorithm(s):\n \"\"\"\n Given a string S of length n, the Z Algorithm produces an array Z where Z[i] is the length of the longest substring\n starting from S[i] which is also a prefix of S, i.e. the maximum k such that S[j] = S[i+j] or all 0<=j R):\n L = R = i\n while R < n and s[R-L] == s[R]:\n R += 1\n z[i] = R - L\n R -= 1\n else:\n k = i - L\n if z[k] < R - i + 1:\n z[i] = z[k]\n else:\n L = i\n while R < n and s[R-L] == s[R]:\n R += 1\n z[i] = R - L\n R -= 1\n return z\n","sub_path":"alg/z_algorithm.py","file_name":"z_algorithm.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"610959116","text":"import string\n\ndef filter_words(words, skip_words):\n \"\"\"This function takes a list of words and returns a copy of the list from\n which all words provided in the list skip_words have been removed.\n For example:\n \"\"\"\n initial_input = words\n new_input = []\n\n for s in initial_input:\n valid = True\n for s_w in skip_words:\n if(s == s_w):\n valid = False\n if (valid):\n new_input = new_input + [s]\n return new_input\n\n\n\n \ndef remove_punct(text):\n \"\"\"This function is used to remove all punctuation\n marks from a string. Spaces do not count as punctuation and should\n not be removed. The funcion takes a string and returns a new string\n which does not contain any puctuation. For example:\n \"\"\"\n no_punct = \"\"\n for char in text:\n if not (char in string.punctuation):\n no_punct = no_punct + char\n\n return no_punct\n\n\ndef normalise_input(user_input):\n \"\"\"This function removes all punctuation from the string and converts it to\n lower case. It then splits the string into a list of words (also removing\n any extra spaces between words) and further removes all \"unimportant\"\n words from the list of words using the filter_words() function. The\n resulting list of \"important\" words is returned. For example:\n \"\"\"\n # Remove punctuation and convert to lower case\n no_punct = remove_punct(user_input).lower()\n word_temp = \"\"\n word_list = []\n\n for ch in no_punct:\n if(ch != ' '):\n word_temp = word_temp + ch\n elif(ch == ' '):\n if (word_temp != \"\"):\n word_list = word_list + [word_temp]\n word_temp = \"\"\n\n if (word_temp != \"\"):\n word_list = word_list + [word_temp]\n \n word_list = filter_words(word_list, skip_words)\n\n return word_list\n\n","sub_path":"gameparser.py","file_name":"gameparser.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"366648755","text":"from tkinter import *\r\nfrom PIL import Image, ImageTk\r\nimport requests, json, io, socket, threading\r\n\r\nclass Login:\r\n def __init__(self):\r\n self.root = Tk()\r\n self.root.title(\"Chat - Login\")\r\n #self.root.iconbitmap(\"icon.ico\")\r\n self.root.resizable(0, 0)\r\n self.root.configure(bg=\"#505050\", pady=20, padx=20)\r\n\r\n self.clLogin = Frame(self.root, bg=\"#505050\")\r\n self.clLogin.pack()\r\n\r\n self.login = ImageTk.PhotoImage(Image.open(\"Login.png\"))\r\n self.lLogin = Label(self.clLogin, font=(\"Verdana\", \"30\"), bg=\"#505050\", fg=\"#FFFFFF\", image=self.login, height=100)\r\n self.lLogin.pack()\r\n\r\n self.autenticado = Frame(self.root, bg=\"#505050\")\r\n self.autenticado.pack()\r\n\r\n self.lautenticar = Label(self.autenticado, bg=\"#505050\", fg=\"#FFFFFF\")\r\n self.lautenticar.pack()\r\n\r\n self.ccredenciais = Frame(self.root, bg=\"#505050\")\r\n self.ccredenciais.pack()\r\n\r\n self.cllcredenciais = Frame(self.ccredenciais, bg=\"#505050\")\r\n self.cllcredenciais.pack(side=LEFT)\r\n\r\n self.lmatricula = Label(self.cllcredenciais, font=(\"Verdana\", \"18\"), bg=\"#505050\", fg=\"#FFFFFF\")\r\n self.lmatricula[\"text\"] = \"Matrícula:\"\r\n self.lmatricula.pack()\r\n\r\n self.lSenha = Label(self.cllcredenciais, font=(\"Verdana\", \"18\"), bg=\"#505050\", fg=\"#FFFFFF\")\r\n self.lSenha[\"text\"] = \"Senha:\"\r\n self.lSenha.pack()\r\n\r\n self.ceecredenciais = Frame(self.ccredenciais, bg=\"#505050\", pady=5)\r\n self.ceecredenciais.pack()\r\n\r\n self.matricula = Entry(self.ceecredenciais, width=20, bg=\"#707070\", borderwidth=0, font=(\"Verdana\", \"15\"), insertbackground=\"#FFFFFF\", fg=\"white\")\r\n self.matricula.bind(\"\", self.autenticacao)\r\n self.matricula.pack()\r\n\r\n self.senha = Entry(self.ceecredenciais, width=20, bg=\"#707070\", borderwidth=0, show=\"●\", font=(\"Verdana\", \"15\"), insertbackground=\"#FFFFFF\", fg=\"white\")\r\n self.senha.bind(\"\", self.autenticacao)\r\n self.senha.pack(pady=(8,0))\r\n\r\n self.cAutenticar = Frame(self.root, bg=\"#505050\")\r\n self.cAutenticar.pack(pady=(15,0))\r\n\r\n self.autenticar = Button(self.cAutenticar, width=15, bg=\"#707070\", borderwidth=0, font=(\"Verdana\", \"18\"), fg=\"white\")\r\n self.autenticar[\"text\"] = \"Autenticar\"\r\n self.autenticar.bind(\"\", self.autenticacao)\r\n self.autenticar.pack()\r\n\r\n self.root.mainloop()\r\n\r\n def token(self):\r\n matricula = self.matricula.get()\r\n senha = self.senha.get()\r\n url_token = \"https://suap.ifrn.edu.br/api/v2/autenticacao/token/\"\r\n credenciais = {\r\n \"username\":matricula,\r\n \"password\":senha\r\n }\r\n response = requests.post(url_token, data=credenciais)\r\n if response.status_code == 200:\r\n tokenDir = json.loads(response.content.decode(\"utf-8\"))\r\n return tokenDir[\"token\"]\r\n else:\r\n return None\r\n\r\n def autenticacao(self, event):\r\n if self.token() == None:\r\n self.matricula.delete(0,END)\r\n self.senha.delete(0,END)\r\n self.lautenticar[\"text\"] = \"Usuário e/ou senha inválidos!\"\r\n \r\n else:\r\n self.recuperaDados()\r\n\r\n def recuperaDados(self):\r\n cabecalho = {\"Authorization\":\"JWT {}\".format(self.token())}\r\n url_dados = \"https://suap.ifrn.edu.br/api/v2/minhas-informacoes/meus-dados/\"\r\n self.dadosDir = requests.get(url_dados, headers=cabecalho)\r\n self.dadosDir = json.loads(self.dadosDir.content.decode(\"utf-8\"))\r\n self.imagem = requests.get(\"https://suap.ifrn.edu.br\" + self.dadosDir[\"url_foto_75x100\"])\r\n self.imagem = self.imagem.content\r\n self.imagem = io.BytesIO(self.imagem)\r\n self.imagemopen = Image.open(self.imagem)\r\n self.foto = ImageTk.PhotoImage(self.imagemopen)\r\n self.reformular()\r\n\r\n def reformular(self):\r\n self.clLogin.destroy()\r\n self.lLogin.destroy()\r\n self.autenticado.destroy()\r\n self.lautenticar.destroy()\r\n self.ccredenciais.destroy()\r\n self.cllcredenciais.destroy()\r\n self.lmatricula.destroy()\r\n self.lSenha.destroy()\r\n self.ceecredenciais.destroy()\r\n self.matricula.destroy()\r\n self.senha.destroy()\r\n self.cAutenticar.destroy()\r\n self.autenticar.destroy()\r\n \r\n self.root.title(\"Chat - Cliente\")\r\n self.root.configure(bg=\"#404040\", padx=20)\r\n\r\n self.cliente()\r\n\r\n self.D = {}\r\n\r\n self.cCons = Frame(self.root, bg=\"#606060\")\r\n self.cCons.pack(side=LEFT, fill=BOTH)\r\n\r\n self.cMens = Frame(self.root, bg=\"#606060\", padx=10)\r\n\r\n self.cMenRecv = Frame(self.cMens, bg=\"#606060\", pady=10)\r\n self.cMenRecv.pack(fill=BOTH, expand=1)\r\n \r\n self.cMenstyle = Frame(self.cMenRecv, bg=\"#707070\", padx=4)\r\n self.cMenstyle.pack(fill=BOTH, expand=1)\r\n \r\n self.cRecv = Frame(self.cMenstyle, bg=\"#606060\")\r\n self.cRecv.pack(fill=BOTH, expand=1)\r\n\r\n self.cEnv = Frame(self.cMens, bg=\"#606060\", padx=10, pady=10)\r\n self.cEnv.pack(fill=BOTH)\r\n\r\n self.cIdentificacao = Frame(self.cCons, bg=\"#606060\", padx=5, pady=5)\r\n self.cIdentificacao.pack(fill=X)\r\n\r\n self.cIFoto = Frame(self.cIdentificacao, bg=\"#505050\", padx=3, pady=3)\r\n self.cIFoto.pack(side=LEFT)\r\n\r\n self.iIdentificacao = Label(self.cIFoto, image=self.foto, borderwidth=0)\r\n self.iIdentificacao.pack()\r\n\r\n self.ccIdentificacao = Frame(self.cIdentificacao)\r\n self.ccIdentificacao.pack(side=LEFT, fill=X)\r\n \r\n self.l1Identificacao = Label(self.ccIdentificacao, text=\"Nome:\", bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\", \"bold\"), anchor=W)\r\n self.l1Identificacao.pack(fill=X)\r\n\r\n self.l2Identificacao = Label(self.ccIdentificacao, text=\"Matrícula:\", bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\", \"bold\"), anchor=W)\r\n self.l2Identificacao.pack(fill=X)\r\n\r\n self.l3Identificacao = Label(self.ccIdentificacao, text=\"Email:\", bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\", \"bold\"), anchor=W)\r\n self.l3Identificacao.pack(fill=X)\r\n\r\n self.l4Identificacao = Label(self.ccIdentificacao, text=\"Curso:\", bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\", \"bold\"), anchor=W)\r\n self.l4Identificacao.pack(fill=X)\r\n\r\n self.l5Identificacao = Label(self.ccIdentificacao, text=\"Vínculo:\", bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\", \"bold\"), anchor=W)\r\n self.l5Identificacao.pack(fill=X)\r\n\r\n self.cccIdentificacao = Frame(self.cIdentificacao, pady=5, bg=\"#606060\")\r\n self.cccIdentificacao.pack(fill=X)\r\n\r\n self.ll1Identificacao = Label(self.cccIdentificacao, text=self.dadosDir[\"vinculo\"][\"nome\"], bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\"), anchor=W)\r\n self.ll1Identificacao.pack(fill=X)\r\n\r\n self.ll2Identificacao = Label(self.cccIdentificacao, text=self.dadosDir[\"vinculo\"][\"matricula\"], bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\"), anchor=W)\r\n self.ll2Identificacao.pack(fill=X)\r\n\r\n self.ll3Identificacao = Label(self.cccIdentificacao, text=self.dadosDir[\"email\"], bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\"), anchor=W)\r\n self.ll3Identificacao.pack(fill=X)\r\n\r\n self.ll4Identificacao = Label(self.cccIdentificacao, text=self.dadosDir[\"vinculo\"][\"curso\"], bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\"), anchor=W)\r\n self.ll4Identificacao.pack(fill=X)\r\n\r\n self.ll5Identificacao = Label(self.cccIdentificacao, text=self.dadosDir[\"tipo_vinculo\"], bg=\"#606060\", fg=\"#FFFFFF\", font=(\"Verdana\", \"8\"), anchor=W)\r\n self.ll5Identificacao.pack(fill=X)\r\n\r\n self.cIStyle = Frame(self.cCons, bg=\"#707070\", height=4, width=350)\r\n self.cIStyle.pack()\r\n \r\n self.listaconexao = Listbox(self.cCons, bg=\"#505050\", activestyle=\"none\", justify=CENTER,font=(\"Verdana\", \"20\"), fg=\"white\", borderwidth=0, selectbackground=\"#606060\", highlightthickness=0, state=\"disabled\")\r\n self.listaconexao.pack(fill=BOTH,expand=1)\r\n\r\n self.add = Button(self.cCons, text=\"Adicionar Amigo\", borderwidth=0, bg=\"#707070\", fg=\"#FFFFFF\", font=(\"Verdana\", \"18\"))\r\n self.add.bind(\"\", self.adicionarAmigo)\r\n self.add.pack(fill=X)\r\n \r\n self.smen = Scrollbar(self.cRecv)\r\n self.caixademensagens = Text(self.cRecv, state=\"disabled\", cursor=\"arrow\", font=(\"Verdana\", \"15\"), bg=\"#606060\", fg=\"#FFFFFF\", relief=\"flat\")\r\n self.smen.pack(side=RIGHT, fill=Y)\r\n self.caixademensagens.pack(fill=BOTH, expand=1)\r\n self.smen.config(command=self.caixademensagens.yview)\r\n self.caixademensagens.config(yscrollcommand=self.smen.set)\r\n\r\n self.caixadeenvio = Text(self.cEnv, height=1, insertbackground=\"#FFFFFF\", bg=\"#707070\", relief=\"flat\", font=(\"Verdana\", \"15\"), fg=\"white\")\r\n self.caixadeenvio.bind(\"\", self.enviandoMensagem)\r\n self.caixadeenvio.bind(\"\", self.aumentar)\r\n self.caixadeenvio.pack(fill=BOTH)\r\n\r\n def enviandoMensagem(self, event):\r\n msg = {\"flag\":\"MSG\", \"content\":None}\r\n content = self.caixadeenvio.get(0.0,INSERT)\r\n msg[\"content\"] = {\"matricula\":self.dadosDir[\"vinculo\"][\"matricula\"], \"msg\":content, \"grupo\":None}\r\n nome = self.listaconexao.get(self.listaconexao.curselection()[0])\r\n for matricula in self.D.keys():\r\n if self.D[matricula][\"nome\"] == nome:\r\n msg[\"content\"][\"grupo\"] = self.D[matricula][\"grupo\"]\r\n msg = json.dumps(msg)\r\n msg = msg.encode(\"utf-8\")\r\n self.s.send(msg)\r\n self.caixadeenvio.delete(0.0,END)\r\n self.caixadeenvio.configure(height=1)\r\n\r\n def recebendo(self):\r\n while True:\r\n msg = self.s.recv(512)\r\n msg = msg.decode(\"utf-8\")\r\n msg = json.loads(msg)\r\n if msg[\"rflag\"] == \"MSG\":\r\n self.printMSG(msg[\"rcontent\"])\r\n elif msg[\"rflag\"] == \"CST\":\r\n self.respAmigo(msg[\"rcontent\"])\r\n elif msg[\"rflag\"] == \"SLA1\":\r\n self.recebeGrupo(msg[\"rcontent\"])\r\n elif msg[\"rflag\"] == \"SLA2\":\r\n self.recebeMensagens(msg[\"rcontent\"])\r\n\r\n def printMSG(self, msg):\r\n msg[\"mensagem\"] = msg[\"mensagem\"].replace(\"\\n\", \"\\n\\t\")\r\n self.caixademensagens.configure(state=\"normal\")\r\n if msg[\"nome\"] == self.dadosDir[\"vinculo\"][\"nome\"]:\r\n if msg[\"mensagem\"][0] == \"\\n\":\r\n self.caixademensagens.insert(END, \"\\nVocê:\\n\" + str(msg[\"mensagem\"][1:]))\r\n else:\r\n msg[\"mensagem\"] = \"\\t\" + msg[\"mensagem\"]\r\n self.caixademensagens.insert(END, \"\\nVocê:\\n\" + str(msg[\"mensagem\"]))\r\n else:\r\n if msg[\"mensagem\"][0] == \"\\n\":\r\n self.caixademensagens.insert(END, \"\\n{}:\\n\".format(msg[\"nome\"]) + str(msg[\"mensagem\"][1:]))\r\n else:\r\n msg[\"mensagem\"] = \"\\t\" + msg[\"mensagem\"]\r\n self.caixademensagens.insert(END, \"\\n{}:\\n\".format(msg[\"nome\"]) + str(msg[\"mensagem\"]))\r\n self.caixademensagens.configure(state=\"disabled\")\r\n self.caixademensagens.see(\"end\")\r\n\r\n def respAmigo(self, resp):\r\n if resp[\"nome\"] == \"\" and resp[\"matricula\"] == \"\":\r\n self.lladd.configure(text=\"Usuário Não Encontrado!\")\r\n else:\r\n self.D[resp[\"matricula\"]] = {\"nome\":resp[\"nome\"], \"grupo\": None}\r\n self.listaconexao.configure(state=\"normal\")\r\n self.listaconexao.bind(\"<>\", self.selecionarAmigo)\r\n self.listaconexao.insert(END, resp[\"nome\"])\r\n self.add.destroy()\r\n\r\n def recebeGrupo(self, resp):\r\n self.D[resp[\"matricula_destino\"]][\"grupo\"]= resp[\"grupo\"]\r\n\r\n def recebeMensagens(self, resp):\r\n self.caixademensagens.configure(state=\"normal\")\r\n self.caixademensagens.delete(0.0,END)\r\n for mensagem in resp:\r\n mensagem[1] = mensagem[1].replace(\"\\n\", \"\\n\\t\")\r\n if mensagem[0] == self.dadosDir[\"vinculo\"][\"nome\"]:\r\n if mensagem[1][0] == \"\\n\":\r\n self.caixademensagens.insert(END, \"\\nVocê:\\n\" + str(mensagem[1][1:]))\r\n else:\r\n mensagem[1] = \"\\t\" + mensagem[1]\r\n self.caixademensagens.insert(END, \"\\nVocê:\\n\" + str(mensagem[1]))\r\n else:\r\n if mensagem[1][0] == \"\\n\":\r\n self.caixademensagens.insert(END, \"\\n{}:\\n\".format(mensagem[0]) + str(mensagem[1][1:]))\r\n else:\r\n mensagem[1] = \"\\t\" + mensagem[1]\r\n self.caixademensagens.insert(END, \"\\n{}:\\n\".format(mensagem[0]) + str(mensagem[1]))\r\n self.caixademensagens.configure(state=\"disabled\")\r\n self.caixademensagens.see(\"end\")\r\n \r\n def cliente(self):\r\n reg = {\"flag\":\"REG\", \"content\":None}\r\n content = {\"nome\":self.dadosDir[\"vinculo\"][\"nome\"], \"matricula\":self.dadosDir[\"vinculo\"][\"matricula\"]}\r\n reg[\"content\"] = content\r\n reg = json.dumps(reg)\r\n reg = reg.encode(\"utf-8\")\r\n \r\n host = \"localhost\"\r\n port = 5000\r\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.s.connect((host, port))\r\n self.s.send(reg)\r\n threading.Thread(target=self.recebendo).start()\r\n\r\n def aumentar(self,event):\r\n self.caixadeenvio.configure(height=2)\r\n\r\n def adicionarAmigo(self, event):\r\n self.add = Toplevel()\r\n #self.add.iconbitmap(\"icon.ico\")\r\n self.add.title(\"Chat - Adicionar Amigo\")\r\n self.add.configure(bg=\"#505050\", padx = 20, pady=20)\r\n self.add.resizable(0, 0)\r\n\r\n cladd = Frame(self.add, bg=\"#505050\")\r\n cladd.pack()\r\n\r\n ladd = Label(cladd, font=(\"Verdana\", \"18\"), text=\"Adicionar Amigo\", bg=\"#505050\", fg=\"#FFFFFF\")\r\n ladd.pack()\r\n\r\n self.lladd = Label(cladd, font=(\"Verdana\"), text=\"\", bg=\"#505050\", fg=\"#FFFFFF\")\r\n self.lladd.pack()\r\n\r\n cadd = Frame(self.add, bg=\"#505050\")\r\n cadd.pack()\r\n\r\n cladd = Frame(cadd, bg=\"#505050\")\r\n cladd.pack(side=LEFT)\r\n\r\n lmatriculaadd = Label(cladd, font=(\"Verdana\", \"15\"), text=\"Matrícula:\", bg=\"#505050\", fg=\"#FFFFFF\")\r\n lmatriculaadd.pack(pady=(6,0))\r\n\r\n ceadd = Frame(cadd, bg=\"#505050\")\r\n ceadd.pack()\r\n\r\n self.ematriculaadd = Entry(ceadd, width=20, bg=\"#707070\", borderwidth=0, font=(\"Verdana\", \"15\"), insertbackground=\"#FFFFFF\", fg=\"white\")\r\n self.ematriculaadd.pack(pady=(10,0))\r\n\r\n cbadd = Frame(self.add, bg=\"#505050\")\r\n cbadd.pack()\r\n\r\n badd = Button(cbadd, bg=\"#707070\", fg=\"#FFFFFF\", font=(\"Verdana\", \"18\"), text=\"Adicionar\", borderwidth=0)\r\n badd.bind(\"\", self.consultarAmigo)\r\n badd.pack(pady=(20,0))\r\n\r\n def consultarAmigo(self, event):\r\n if self.ematriculaadd.get() != self.dadosDir[\"vinculo\"][\"matricula\"]:\r\n cst = {\"flag\":\"CST\", \"content\":self.ematriculaadd.get()}\r\n cst = json.dumps(cst)\r\n cst = cst.encode(\"utf-8\")\r\n self.s.send(cst)\r\n else:\r\n self.lladd.configure(text=\"Você Não Pode Se Adicionar!\")\r\n\r\n def selecionarAmigo(self, event):\r\n self.cMens.pack(side=LEFT, fill=BOTH, expand=1)\r\n nome = event.widget.get(event.widget.curselection()[0])\r\n for matricula in self.D.keys():\r\n if self.D[matricula][\"nome\"] == nome:\r\n if self.D[matricula][\"grupo\"] == None:\r\n sla = {\"flag\":\"SLA1\", \"content\":{\"matricula_origem\":self.dadosDir[\"vinculo\"][\"matricula\"], \"matricula_destino\":matricula}}\r\n sla = json.dumps(sla)\r\n sla = sla.encode(\"utf-8\")\r\n self.s.send(sla)\r\n else:\r\n sla = {\"flag\":\"SLA2\", \"content\":{\"matricula_origem\":self.dadosDir[\"vinculo\"][\"matricula\"], \"grupo\":self.D[matricula][\"grupo\"]}}\r\n sla = json.dumps(sla)\r\n sla = sla.encode(\"utf-8\")\r\n self.s.send(sla)\r\n break\r\n \r\nLogin()\r\n","sub_path":"aplicativo.py","file_name":"aplicativo.py","file_ext":"py","file_size_in_byte":16362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"161328183","text":"from tkinter import *\r\nfrom PIL import ImageTk, Image\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport DataIn as DI\r\nfrom InputValidation import InputValidation as InpVal\r\nimport DataOut as DO\r\nimport StockOut as SO\r\n\r\n\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"Customer Recommendation System\")\r\nroot.iconbitmap(\"D:/Study Material/Project/Final Draft/custom.ico\")\r\n\r\nFrontPage = LabelFrame(root)\r\n\r\n\r\n\r\ndef onClick(frame):\r\n label = Label(frame,text = \"Some button was clicked\",padx = 30, background = \"Black\",pady = 30)\r\n label.grid(row = 30, column = 0, columnspan = 2)\r\n # simple print() will print the text on the console \r\n # instead of the root widget window \r\n\r\n\r\ndef RemoveFrame(frame, Frame):\r\n\tFrame.grid()\r\n\tframe.grid_forget()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef InputCustomerFrame():\r\n\tFrontPage.grid_forget()\r\n\r\n\r\n\tdef SubmitButton():\r\n\t\tif NameInp.get() == \" \":\r\n\t\t\t\tErrorLabel = Label(Inpframe, text = \"Enter required fields' values.\",fg = \"red\", pady = 20)\r\n\t\t\t\tErrorLabel.grid(row = 5, column = 0, columnspan = 2)\r\n\t\t\t\treturn\r\n\t\tdata = {}\r\n\t\tdata[\"Name\"] = NameInp.get()\r\n\t\tdata[\"Age\"] = AgeInp.get()\r\n\t\tdata[\"Annual Income (k$)\"] = SalaryInp.get()\r\n\t\tdata[\"Genre\"] = GenderInp.get()\r\n\t\tdata[\"Spending Score (1-100)\"] = ScoreInp.get()\r\n\t\tprint( data)\t\r\n\t\tdata = DI.TakeCustomerGui(data)\r\n\t\tcstr = str(data[\"Cluster\"])\r\n\t\tcsnm = str(data[\"Name\"])\r\n\t\tmsg = csnm+\" belonged to Cluster \" + cstr\r\n\t\tprint(msg)\r\n\t\tClusterLabel = Label(Inpframe, text = msg)\r\n\t\tClusterLabel.grid(row = 8)\t\t\r\n\t\tGenderInp.delete(0,END)\r\n\t\tNameInp.delete(0,END)\r\n\t\tSalaryInp.delete(0,END)\r\n\t\tAgeInp.delete(0,END)\r\n\t\tScoreInp.delete(0,END)\r\n\r\n\r\n\t# create the frame\r\n\tglobal Inpframe\r\n\tInpframe = LabelFrame(root, text = \"Input Customer Data here:\", padx = 120, pady = 50, bd = 3, relief = SUNKEN)\r\n\t\r\n\tExitButton = Button(Inpframe, text= \"Back\", padx= 20,pady = 10, fg= \"Red\", command =lambda: RemoveFrame(Inpframe, FrontPage))\r\n\tSubmitButton = Button(Inpframe, text= \"Submit\", padx= 20,pady = 10, fg= \"Red\", command =SubmitButton)\r\n\r\n\r\n\tNameLabel = Label(Inpframe, text = \"Name:\", pady = 20)\r\n\tNameInp = Entry(Inpframe, width = 40, borderwidth= 3)\r\n\tNameInp.insert(10, \" \")\r\n\r\n\tAgeLabel = Label(Inpframe, text = \"Age:\", pady = 20)\r\n\tAgeInp = Entry(Inpframe, width = 40, borderwidth= 3)\r\n\tAgeInp.insert(10, \" \")\r\n\r\n\r\n\tSalaryLabel = Label(Inpframe, text = \"Salary:\", pady = 20)\r\n\tSalaryInp = Entry(Inpframe, width = 40, borderwidth= 3)\r\n\tSalaryInp.insert(10, \" \")\r\n\r\n\r\n\tGenderLabel = Label(Inpframe, text = \"Gender:\", pady = 20)\r\n\tGenderInp = Entry(Inpframe, width = 40, borderwidth= 3)\r\n\tGenderInp.insert(10, \" \")\r\n\r\n\r\n\r\n\tScoreLabel = Label(Inpframe, text = \"Spending Socre(1-100):\", pady = 20)\r\n\tScoreInp = Entry(Inpframe, width = 40, borderwidth= 3)\r\n\tScoreInp.insert(10, \" \")\r\n\r\n\r\n\t\r\n\t# show the InpFrame widgets\r\n\tNameLabel.grid(row = 0, column = 0)\r\n\tNameInp.grid(row = 0, column = 1)\r\n\tAgeLabel.grid(row = 1, column = 0)\r\n\tAgeInp.grid(row = 1, column = 1)\r\n\tSalaryLabel.grid(row = 2, column = 0)\r\n\tSalaryInp.grid(row = 2, column = 1)\r\n\tGenderLabel.grid(row = 3, column = 0)\r\n\tGenderInp.grid(row = 3, column = 1)\r\n\tScoreLabel.grid(row = 4, column = 0)\r\n\tScoreInp.grid(row = 4, column = 1)\r\n\t\r\n\r\n\r\n\r\n\t#show the frame\r\n\tInpframe.grid(row = 10, column = 0, columnspan = 2, pady = 40)\r\n\tExitButton.grid(row = 10, column = 1)\r\n\tSubmitButton.grid(row = 10, column = 0,padx = 30)\r\n\r\ni = 0\r\n\r\ndef InputBasketFrame():\r\n\tFrontPage.grid_forget()\r\n\tbasket = []\r\n\r\n\tdef AddButtonFun():\r\n\t\tglobal i\r\n\t\tx = ItemInp.get().lower()\r\n\t\tbasket.append(x)\r\n\t\tprint(basket)\t\r\n\t\tCurrentItemLabel = Label(Inpframe, text = ItemInp.get(), pady = 20)\r\n\t\tCurrentItemLabel.grid(row = 11+i, column = 0)\r\n\t\ti+=1\r\n\t\tItemInp.delete(0,END)\r\n\r\n\r\n\tdef SubmitButtonFun():\r\n\t\tAddButtonFun()\r\n\t\tDI.SaveBasketData(basket)\r\n\t\tglobal i\r\n\t\ti= 0\r\n\t\tRemoveFrame(Inpframe, FrontPage)\r\n\t\tInputBasketFrame()\r\n\r\n\r\n\r\n\r\n\t# create the frame\r\n\tglobal Inpframe\r\n\tInpframe = LabelFrame(root, text = \"Input Basket Items here:\", padx = 120, pady = 50, bd = 3, relief = SUNKEN)\r\n\t\r\n\tExitButton = Button(Inpframe, text= \"Finish\", padx= 20,pady = 10, fg= \"Red\", command =lambda: RemoveFrame(Inpframe, FrontPage))\r\n\tAddButton = Button(Inpframe, text= \"Add Item\", padx= 20,pady = 10, fg= \"Red\", command =AddButtonFun)\r\n\tSubmitButton = Button(Inpframe, text= \"Submit Basket\", padx= 20,pady = 10, fg= \"Red\", command =SubmitButtonFun)\r\n\r\n\r\n\tItemLabel = Label(Inpframe, text = \"Enter Item:\", pady = 20)\r\n\tItemInp = Entry(Inpframe, width = 40, borderwidth= 3)\r\n\tItemInp.insert(10, \" \")\r\n\r\n\r\n\r\n\r\n\t# show the InpFrame widgets\r\n\tItemLabel.grid(row = 0, column = 0)\r\n\tItemInp.grid(row = 0, column = 1)\r\n\t\r\n\r\n\r\n\t#show the frame\r\n\tInpframe.grid(row = 10, column = 0, columnspan = 2, pady = 40)\r\n\tExitButton.grid(row = 10, column = 2)\r\n\tAddButton.grid(row = 10, column = 0,padx = 30)\r\n\tSubmitButton.grid(row = 10, column = 1,padx = 30)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef ShowCustomerFrameDisplay():\r\n\tFrontPage.grid_forget()\r\n\tShowCustomerFrame = LabelFrame(root)\r\n\tShowLabel = Label(ShowCustomerFrame, text= \"Cutomer Data\",font = (\"Courier 14 bold\"), padx= 20,\r\n\t pady = 20, fg= \"Red\")\r\n\r\n\tInputLabel = Label(ShowCustomerFrame, text= \"How do you want the Data to be Filtered? :\", padx= 20,\r\n\t pady = 20, fg= \"Blue\")\r\n\r\n\r\n\t#Functions\r\n\tdef ClusterGraph():\r\n\t\tdataset = pd.read_csv('New Customer.csv')\r\n\t\tX = dataset.iloc[:, [2,3,4,5,6]].values\r\n\r\n\t\tfrom sklearn.cluster import KMeans\r\n\t\twcss = []\r\n\t\tfor i in range(1, 11):\r\n\t\t kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)\r\n\t\t kmeans.fit(X)\r\n\t\t wcss.append(kmeans.inertia_)\r\n\r\n\t\tkmeans = KMeans(n_clusters = 5, init = 'k-means++', random_state = 42)\r\n\t\ty_kmeans = kmeans.fit_predict(X)\r\n\t\tprint(y_kmeans)\r\n\r\n\t\tplt.scatter(X[y_kmeans == 0, 3], X[y_kmeans == 0, 4], s = 100, c = 'red', label = 'Cluster 1')\r\n\t\tplt.scatter(X[y_kmeans == 1, 3], X[y_kmeans == 1, 4], s = 100, c = 'blue', label = 'Cluster 2')\r\n\t\tplt.scatter(X[y_kmeans == 2, 3], X[y_kmeans == 2, 4], s = 100, c = 'green', label = 'Cluster 3')\r\n\t\tplt.scatter(X[y_kmeans == 3, 3], X[y_kmeans == 3, 4], s = 100, c = 'cyan', label = 'Cluster 4')\r\n\t\tplt.scatter(X[y_kmeans == 4, 3], X[y_kmeans == 4, 4], s = 100, c = 'magenta', label = 'Cluster 5')\r\n\t\tplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids')\r\n\t\tplt.title('Clusters of customers')\r\n\t\tplt.xlabel('Annual Income (k$)')\r\n\t\tplt.ylabel('Spending Score (1-100)')\r\n\t\tplt.legend()\r\n\t\tplt.show()\r\n\r\n\r\n\r\n\r\n\tdef ShowAllData():\r\n\t\tShowCustomerFrame.grid_forget()\r\n\t\tdataset = pd.read_csv(r'D:\\Study Material\\Project\\Final Draft\\New Customer.csv')\r\n\t\tx = max(dataset[\"CustomerID\"])\r\n\r\n\t\tOutputFrame = LabelFrame(root, text = \"Output Window\")\r\n\t\tprint(list(dataset.columns))\r\n\t\tItemFrame = LabelFrame(OutputFrame)\r\n\r\n\t\t#scroll = Scrollbar(OutputFrame, orient = 'vertical')\r\n\t\t#scroll.config(command = OutputFrame.xview)\r\n\t\tBackButton = Button(OutputFrame, text = \"Back\", command = lambda: RemoveFrame(OutputFrame, ShowCustomerFrame))\r\n\t\tBackButton.grid(row = 0, column = 0, padx = 7, pady = 7)\r\n\r\n\r\n\t\tHeadLable = Label(OutputFrame, text = \"Showing ALL CUSTOMERS Data\", relief = SUNKEN, border = 2, padx = 0, pady = 3, font = \"BOLD 28\")\r\n\t\tHeadLable.grid(row = 0, column = 1, padx = 7, pady = 7, columnspan = 20)\r\n\r\n\r\n\r\n\t\tcoln = 0\r\n\t\tfor i in list(dataset.columns):\r\n\t\t\tColLable = Label(ItemFrame, text = str(i), relief = SUNKEN, border = 0, padx = 0, pady = 3, font = \"BOLD\")\r\n\t\t\tColLable.grid(row = 1, column = coln, padx = 7, pady = 7)\r\n\t\t\tcoln = coln + 1\r\n\t\tOutputFrame.grid()\r\n\r\n\t\trown = 1\r\n\t\tcount = 0\r\n\t\tfor row in range(x):\t\r\n\t\t\tif count < 30:\r\n\t\t\t\tcount+=1\r\n\t\t\t\tfor col in range(len(list(dataset.columns))):\r\n\t\t\t\t\telement = dataset.iloc[row,col]\r\n\t\t\t\t\tEleLabel = Label(ItemFrame, text = str(element), relief = SUNKEN, border = 1, padx = 95)\r\n\t\t\t\t\tEleLabel.grid(row = row+2, column = col)\r\n\t\tItemFrame.grid(columnspan = 6)\r\n\t\tBackButton = Button(OutputFrame, text = \"Back\", command = lambda: RemoveFrame(OutputFrame, ShowCustomerFrame))\r\n\t\tBackButton.grid(row = 3, column = 3, padx = 7, pady = 7)\r\n\r\n\tdef GetCluster(): \r\n\t\tGetClusterFrame = LabelFrame(ShowCustomerFrame, text = \"Choose the Cluster\")\r\n\r\n\r\n\r\n\t\tdef ShowClusterCustomers(cls = 1):\r\n\t\t\tShowCustomerFrame.grid_forget()\r\n\t\t\tdataset = pd.read_csv(r'D:\\Study Material\\Project\\Final Draft\\New Customer.csv')\r\n\t\t\tx = max(dataset[\"CustomerID\"])\r\n\t\t\tOutputFrame = LabelFrame(root, text = \"Output Window\")\r\n\r\n\r\n\r\n\t\t\tBackButton = Button(OutputFrame, text = \"Back\", command = lambda: RemoveFrame(OutputFrame, ShowCustomerFrame))\r\n\t\t\tBackButton.grid(row = 0, column = 0, padx = 7, pady = 7)\r\n\r\n\t\t\tHeadLable = Label(OutputFrame, text = \"Showing Cluster \"+str(cls)+\" CUSTOMERS Data\", relief = SUNKEN, border = 2, padx = 0, pady = 3, font = \"BOLD 28\")\r\n\t\t\tHeadLable.grid(row = 0, column = 1, padx = 7, pady = 7, columnspan = 20)\r\n\r\n\r\n\r\n\r\n\t\t\tcoln = 0\r\n\t\t\tfor i in list(dataset.columns):\r\n\t\t\t\tColLable = Label(OutputFrame, text = str(i), relief = SUNKEN, border = 0, padx = 0, pady = 3, font = \"BOLD\")\r\n\t\t\t\tColLable.grid(row = 1, column = coln, padx = 7, pady = 7)\r\n\t\t\t\tcoln = coln + 1\r\n\t\t\tOutputFrame.grid(row = 0, pady = 30)\r\n\t\t\trown = 1\r\n\t\t\tfor col in range(len(list(dataset.columns))):\r\n\t\t\t\tfor row in range(x):\r\n\t\t\t\t\tif dataset.iloc[row,2] == cls:\r\n\t\t\t\t\t\telement = dataset.iloc[row,col]\r\n\t\t\t\t\t\tEleLabel = Label(OutputFrame, text = str(element), relief = SUNKEN, border = 1, padx = 95)\r\n\t\t\t\t\t\tEleLabel.grid(row = row+2, column = col)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\tcls0 = Button(GetClusterFrame, text = \"Cluster 0\", command = lambda: ShowClusterCustomers(0))\r\n\t\tcls1 = Button(GetClusterFrame, text = \"Cluster 1\", command = lambda: ShowClusterCustomers(1))\r\n\t\tcls2 = Button(GetClusterFrame, text = \"Cluster 2\", command = lambda: ShowClusterCustomers(2))\r\n\t\tcls3 = Button(GetClusterFrame, text = \"Cluster 3\", command = lambda: ShowClusterCustomers(3))\r\n\t\tcls4 = Button(GetClusterFrame, text = \"Cluster 4\", command = lambda: ShowClusterCustomers(4))\r\n\r\n\t\tHead = Label(GetClusterFrame, text = \"Choose which cluster data is to be shown: \")\r\n\r\n\t\tHead.grid(row = 0, column = 0, columnspan = 2)\r\n\t\tcls0.grid(row = 1, column = 0)\r\n\t\tcls1.grid(row = 2, column = 0)\r\n\t\tcls2.grid(row = 3, column = 0)\r\n\t\tcls3.grid(row = 1, column = 1)\r\n\t\tcls4.grid(row = 2, column = 1)\r\n\t\tGetClusterFrame.grid(row= 5)\r\n\r\n\r\n\r\n\tdef GetOrder():\r\n\t\tGetOrderFrame = LabelFrame(ShowCustomerFrame, text = \"Choose the type of Ordering: \")\r\n\r\n\r\n\r\n\t\tdef ShowOrderedCustomers(cls = 1):\r\n\t\t\tShowCustomerFrame.grid_forget()\r\n\t\t\tdataset = pd.read_csv(r'D:\\Study Material\\Project\\Final Draft\\New Customer.csv')\r\n\t\t\tOutputFrame = LabelFrame(root, text = \"Output Window\")\r\n\r\n\r\n\r\n\t\t\tBackButton = Button(OutputFrame, text = \"Back\", command = lambda: RemoveFrame(OutputFrame, ShowCustomerFrame))\r\n\t\t\tBackButton.grid(row = 0, column = 0, padx = 7, pady = 7)\r\n\r\n\t\t\tHeadLable = Label(OutputFrame, text = \"Showing Customer in Order of Spending Score\", relief = SUNKEN, border = 2, padx = 0, pady = 3, font = \"BOLD 28\")\r\n\t\t\tHeadLable.grid(row = 0, column = 1, padx = 7, pady = 7, columnspan = 20)\r\n\r\n\r\n\r\n\r\n\t\t\tcoln = 0\r\n\t\t\tfor i in list(dataset.columns):\r\n\t\t\t\tColLable = Label(OutputFrame, text = str(i), relief = SUNKEN, border = 0, padx = 0, pady = 3, font = \"BOLD\")\r\n\t\t\t\tColLable.grid(row = 1, column = coln, padx = 7, pady = 7)\r\n\t\t\t\tcoln = coln + 1\r\n\t\t\tOutputFrame.grid(row = 0, pady = 30)\r\n\t\t\trown = 1\r\n\r\n\t\t\tdf = dataset.sort_values(\"Spending Score (1-100)\", ascending = cls)\r\n\t\t\tx = max(dataset[\"CustomerID\"])\r\n\r\n\t\t\tfor col in range(len(list(df.columns))):\r\n\t\t\t\tfor row in range(x):\r\n\t\t\t\t\telement = df.iloc[row,col]\r\n\t\t\t\t\tEleLabel = Label(OutputFrame, text = str(element), relief = SUNKEN, border = 1, padx = 95)\r\n\t\t\t\t\tEleLabel.grid(row = row+2, column = col)\r\n\r\n\r\n\r\n\r\n\r\n\t\tord0 = Button(GetOrderFrame, text = \"Ascending Order\", command = lambda: ShowOrderedCustomers(1))\r\n\t\tord1 = Button(GetOrderFrame, text = \"Descending Order\", command = lambda: ShowOrderedCustomers(0))\r\n\r\n\t\tHead = Label(GetOrderFrame, text = \"Choose in what Oreder Data is to be shown: \")\r\n\r\n\t\tHead.grid(row = 0, column = 0, columnspan = 2)\r\n\t\tord0.grid(row = 1, column = 0)\r\n\t\tord1.grid(row = 2, column = 0)\r\n\t\tGetOrderFrame.grid(row= 8)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t# Buttons\r\n\tAllCustomerButton = Button(ShowCustomerFrame, text = \"All Customer Data\", padx = 0, pady = 20, command = ShowAllData)\r\n\tClusterCustomerButton = Button(ShowCustomerFrame, text = \"Cluster Customer Data\", padx = 0, pady = 20, command = GetCluster)\r\n\tSortedCustomerButton = Button(ShowCustomerFrame, text = \"Sorted Customer Data\", padx = 0, pady = 20, command =GetOrder)\r\n\tBackButton = Button(ShowCustomerFrame, text = \"Go Back\", padx = 0, pady = 20, command =lambda: RemoveFrame(ShowCustomerFrame, FrontPage))\r\n\tClusterShowButton = Button(ShowCustomerFrame, text = \"Show Graph of All Customers \\nAccording to Clusters\", command = ClusterGraph, pady = 30)\r\n\r\n\r\n\r\n\r\n\r\n\t# put Customer Display page on the screen\r\n\tShowLabel.grid(row = 0, column = 0)\r\n\tInputLabel.grid(row = 1, column = 0)\r\n\tAllCustomerButton.grid(row = 2, column = 0, pady = 20)\r\n\tClusterCustomerButton.grid(row = 3, column = 0, pady = 20)\r\n\tSortedCustomerButton.grid(row = 7, column = 0, pady = 20)\r\n\t\r\n\tClusterShowButton.grid(row = 9, column = 0)\r\n\tBackButton.grid(row = 10, column = 0, pady = 20)\r\n\tShowCustomerFrame.grid()\r\n\t\r\n\r\n\r\n\r\ndef ShowItemFrameDisplay():\r\n\tFrontPage.grid_forget()\r\n\tShowItemFrame = LabelFrame(root)\r\n\tShowLabel = Label(ShowItemFrame, text= \"Stock Items Data\",font = (\"Courier 14 bold\"), padx= 20,\r\n\t pady = 20, fg= \"Red\")\r\n\r\n\tInputLabel = Label(ShowItemFrame, text= \"How do you want the Data to be Filtered? :\", padx= 20,\r\n\t pady = 20, fg= \"Blue\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef GetOrder():\r\n\t\tGetOrderFrame = LabelFrame(ShowItemFrame, text = \"Choose the type of Ordering: \")\r\n\r\n\t\tdef SortedItems(ord):\r\n\t\t\tdataset = pd.read_csv(\"D:\\Study Material\\Project\\Final Draft\\Items Table.csv\")\r\n\t\t\tdataset = dataset.sort_values(\"Rating\", ascending = bool(ord))\r\n\r\n\t\t\tprint(\"\\nColumns:\\n\",list(dataset.columns),\"\\n\")\r\n\r\n\t\t\tShowItemFrame.grid_forget()\r\n\t\t\tx = dataset[\"Item\"].count()\r\n\r\n\t\t\tOutputFrame = LabelFrame(root, text = \"Output Window\")\r\n\t\t\tprint(list(dataset.columns))\r\n\r\n\t\t\t#scroll = Scrollbar(OutputFrame, orient = 'vertical')\r\n\t\t\t#scroll.config(command = OutputFrame.xview)\r\n\t\t\tBackButton = Button(OutputFrame, text = \"Back\", command = lambda: RemoveFrame(OutputFrame, ShowItemFrame))\r\n\t\t\tBackButton.grid(row = 0, column = 0)\r\n\r\n\r\n\t\t\tHeadLable = Label(OutputFrame, text = \"Showing ALL Stock Items by Rating\", relief = SUNKEN, border = 2, padx = 0, pady = 3, font = \"BOLD 28\")\r\n\t\t\tHeadLable.grid(row = 0, column = 1, padx = 7, pady = 7, columnspan = 2)\r\n\r\n\r\n\r\n\t\t\tcoln = 0\r\n\t\t\tfor i in list(dataset.columns):\r\n\t\t\t\tColLable = Label(OutputFrame, text = str(i), relief = SUNKEN, border = 0, padx = 0, pady = 3, font = \"BOLD\")\r\n\t\t\t\tColLable.grid(row = 1, column = coln, padx = 7, pady = 7)\r\n\t\t\t\tcoln = coln + 1\r\n\t\t\tOutputFrame.grid()\r\n\t\t\trown = 1\r\n\t\t\tfor col in range(len(list(dataset.columns))):\r\n\t\t\t\tfor row in range(x):\r\n\t\t\t\t\telement = dataset.iloc[row,col]\r\n\t\t\t\t\tEleLabel = Label(OutputFrame, text = str(element), relief = SUNKEN, border = 1, padx =140)\r\n\t\t\t\t\tEleLabel.grid(row = row+2, column = col)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\tord0 = Button(GetOrderFrame, text = \"Ascending Order\", command = lambda: SortedItems(1))\r\n\t\tord1 = Button(GetOrderFrame, text = \"Descending Order\", command = lambda: SortedItems(0))\r\n\r\n\t\tHead = Label(GetOrderFrame, text = \"Choose in what Oreder Data is to be shown: \")\r\n\r\n\t\tHead.grid(row = 0, column = 0, columnspan = 2)\r\n\t\tord0.grid(row = 1, column = 0)\r\n\t\tord1.grid(row = 2, column = 0)\r\n\t\tGetOrderFrame.grid(row= 5)\r\n\r\n\r\n\r\n\r\n\r\n\tdef AllItems():\r\n\t\tdataset = pd.read_csv(\"D:\\Study Material\\Project\\Final Draft\\Items Table.csv\")\r\n\t\tprint(\"\\nColumns:\\n\",list(dataset.columns),\"\\n\")\r\n\r\n\t\tShowItemFrame.grid_forget()\r\n\t\tx = dataset[\"Item\"].count()\r\n\r\n\t\tOutputFrame = LabelFrame(root, text = \"Output Window\")\r\n\t\tprint(list(dataset.columns))\r\n\r\n\t\t#scroll = Scrollbar(OutputFrame, orient = 'vertical')\r\n\t\t#scroll.config(command = OutputFrame.xview)\r\n\t\tBackButton = Button(OutputFrame, text = \"Back\", command = lambda: RemoveFrame(OutputFrame, ShowItemFrame))\r\n\t\tBackButton.grid(row = 0, column = 0)\r\n\r\n\r\n\t\tHeadLable = Label(OutputFrame, text = \"Showing ALL Stock Items\", relief = SUNKEN, border = 2, padx = 0, pady = 3, font = \"BOLD 28\")\r\n\t\tHeadLable.grid(row = 0, column = 1, padx = 7, pady = 7, columnspan = 2)\r\n\r\n\r\n\r\n\t\tcoln = 0\r\n\t\tfor i in list(dataset.columns):\r\n\t\t\tColLable = Label(OutputFrame, text = str(i), relief = SUNKEN, border = 0, padx = 0, pady = 3, font = \"BOLD\")\r\n\t\t\tColLable.grid(row = 1, column = coln, padx = 7, pady = 7)\r\n\t\t\tcoln = coln + 1\r\n\t\tOutputFrame.grid()\r\n\t\trown = 1\r\n\t\tfor col in range(len(list(dataset.columns))):\r\n\t\t\tfor row in range(x):\r\n\t\t\t\telement = dataset.iloc[row,col]\r\n\t\t\t\tEleLabel = Label(OutputFrame, text = str(element), relief = SUNKEN, border = 1, padx =140)\r\n\t\t\t\tEleLabel.grid(row = row+2, column = col)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef Together():\r\n\t\tdataset = SO.Together()\r\n\t\tShowItemFrame.grid_forget()\r\n\t\tprint(dataset.columns)\r\n\t\tx = dataset[\"Support\"].count()\r\n\r\n\t\tOutputFrame = LabelFrame(root, text = \"Output Window\")\r\n\t\tprint(list(dataset.columns))\r\n\r\n\t\t#scroll = Scrollbar(OutputFrame, orient = 'vertical')\r\n\t\t#scroll.config(command = OutputFrame.xview)\r\n\t\tBackButton = Button(OutputFrame, text = \"Back\", command = lambda: RemoveFrame(OutputFrame, ShowItemFrame))\r\n\t\tBackButton.grid(row = 0, column = 0)\r\n\r\n\r\n\t\tHeadLable = Label(OutputFrame, text = \"Showing ALL Stock Items\", relief = SUNKEN, border = 2, padx = 0, pady = 3, font = \"BOLD 28\")\r\n\t\tHeadLable.grid(row = 0, column = 1, padx = 7, pady = 7, columnspan = 2)\r\n\r\n\r\n\r\n\t\tcoln = 0\r\n\t\tfor i in list(dataset.columns):\r\n\t\t\tColLable = Label(OutputFrame, text = str(i), relief = SUNKEN, border = 0, padx = 0, pady = 3, font = \"BOLD\")\r\n\t\t\tColLable.grid(row = 1, column = coln, padx = 7, pady = 7)\r\n\t\t\tcoln = coln + 1\r\n\t\tOutputFrame.grid()\r\n\t\trown = 1\r\n\t\tfor col in range(len(list(dataset.columns))):\r\n\t\t\tfor row in range(x):\r\n\t\t\t\telement = dataset.iloc[row,col]\r\n\t\t\t\tEleLabel = Label(OutputFrame, text = str(element), relief = SUNKEN, border = 1, padx =70)\r\n\t\t\t\tEleLabel.grid(row = row+2, column = col)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t# Buttons\r\n\tAllItemButton = Button(ShowItemFrame, text = \"All Items Data\", padx = 0, pady = 20, command = AllItems)\r\n\tTogetherButton = Button(ShowItemFrame, text = \"Items Bought Together\", padx = 0, pady = 20, command =Together)\r\n\tSortedItemButton = Button(ShowItemFrame, text = \"Rating-wise Sorted Data\", padx = 0, pady = 20, command =GetOrder)\r\n\tBackButton = Button(ShowItemFrame, text = \"Go Back\", padx = 0, pady = 20, command =lambda: RemoveFrame(ShowItemFrame, FrontPage))\r\n\r\n\r\n\t# put First page on the screen\r\n\tShowLabel.grid(row = 0, column = 0)\r\n\tInputLabel.grid(row = 1, column = 0)\r\n\tAllItemButton.grid(row = 2, column = 0, pady = 20)\r\n\tTogetherButton.grid(row = 3, column = 0, pady = 20)\r\n\tSortedItemButton.grid(row = 4, column = 0, pady = 20)\r\n\tBackButton.grid(row = 6, column = 0)\r\n\tShowItemFrame.grid()\r\n\r\n\r\n\r\ndef EditCustomerFrame():\r\n\tFrontPage.grid_forget()\r\n\tEditCustomerFrame = LabelFrame(root)\r\n\tEditLabel = Label(EditCustomerFrame, text= \"Enter Cutomer Data to search by:\",font = (\"Courier 14 bold\"), padx= 20,\r\n\t pady = 20, fg= \"Red\")\r\n\r\n\r\n\r\n\tNameLabel = Label(EditCustomerFrame, text = \"Enter Name:\", pady = 20)\r\n\tNameInp = Entry(EditCustomerFrame, width = 40, borderwidth= 3)\r\n\tNameInp.insert(10, \" \")\r\n\tNameSearch = Button(EditCustomerFrame, text = \"Search\", fg = \"Red\")\r\n\r\n\tOrLabel = Label(EditCustomerFrame, text = \"OR\")\r\n\r\n\tIDLabel = Label(EditCustomerFrame, text = \"Enter Name:\", pady = 20)\r\n\tIDInp = Entry(EditCustomerFrame, width = 40, borderwidth= 3)\r\n\tIDInp.insert(10, \" \")\r\n\tIDSearch = Button(EditCustomerFrame, text = \"Search\", fg = \"Red\")\r\n\r\n\tEditLabel.grid(row = 0, column = 0, columnspan = 2)\r\n\tNameLabel.grid(row = 1,column = 0)\r\n\tNameInp.grid(row = 1,column = 1)\r\n\tNameSearch.grid(row = 1,column = 2)\r\n\tOrLabel.grid(row = 2,column = 0, columnspan = 2)\r\n\tIDLabel.grid(row = 3,column = 0)\r\n\tIDInp.grid(row = 3,column = 1)\r\n\tIDSearch.grid(row = 3,column = 2)\r\n\tEditCustomerFrame.pack()\r\n\r\n\r\n\r\n\r\ndef EditBasketFrame():\r\n\tFrontPage.grid_forget()\r\n\tEditStockFrame = LabelFrame(root)\r\n\tEditLabel = Label(EditStockFrame, text= \"Enter Item Data to search by:\",font = (\"Courier 14 bold\"), padx= 20,\r\n\t pady = 20, fg= \"Red\")\r\n\r\n\r\n\r\n\tNameLabel = Label(EditStockFrame, text = \"Enter Name:\", pady = 20)\r\n\tNameInp = Entry(EditStockFrame, width = 40, borderwidth= 3)\r\n\tNameInp.insert(10, \" \")\r\n\tNameSearch = Button(EditStockFrame, text = \"Search\", fg = \"Red\")\r\n\r\n\tOrLabel = Label(EditStockFrame, text = \"OR\")\r\n\r\n\tIDLabel = Label(EditStockFrame, text = \"Enter ID:\", pady = 20)\r\n\tIDInp = Entry(EditStockFrame, width = 40, borderwidth= 3)\r\n\tIDInp.insert(10, \" \")\r\n\tIDSearch = Button(EditStockFrame, text = \"Search\", fg = \"Red\")\r\n\r\n\tEditLabel.grid(row = 0, column = 0, columnspan = 2)\r\n\tNameLabel.grid(row = 1,column = 0)\r\n\tNameInp.grid(row = 1,column = 1)\r\n\tNameSearch.grid(row = 1,column = 2)\r\n\tOrLabel.grid(row = 2,column = 0, columnspan = 2)\r\n\tIDLabel.grid(row = 3,column = 0)\r\n\tIDInp.grid(row = 3,column = 1)\r\n\tIDSearch.grid(row = 3,column = 2)\r\n\tEditStockFrame.pack()\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef FrontPageFrame():\r\n\t# first page widgets\r\n\t# the operation buttons\r\n\tBasketLabel = Label(FrontPage, text= \"Customers\",font = (\"Courier 14 bold\"), padx= 20,\r\n\t pady = 20, fg= \"Red\")\r\n\r\n\tCustomerLabel = Label(FrontPage, text= \"Stock\",font = (\"Courier 14 bold\"), padx= 20,\r\n\t pady = 20, fg= \"Blue\")\r\n\r\n\t# greeting image\r\n\tGreetImg= ImageTk.PhotoImage(Image.open(\"file-delivery.png\"))\r\n\tGreetImgLabel = Label(FrontPage, image = GreetImg)\r\n\r\n\t# greeting text\r\n\tGreeting = Label(FrontPage,text = \"Welcome to Customer Recommendation System\", font = \"Courier 18 bold\",anchor = \"s\",padx = 10, pady =40)\r\n\r\n\t# Buttons\r\n\tCustomerDataSubmitButton = Button(FrontPage, text = \"Enter Customer Data\", padx = 0, pady = 20, command = InputCustomerFrame)\r\n\tBasketDataSubmitButton = Button(FrontPage, text = \"Enter Basket Data\", padx = 10, pady = 20,command = InputBasketFrame)\r\n\r\n\r\n\tCustomerDataShowButton = Button(FrontPage, text = \"Show Customer Data\", padx = 0, pady = 20, command = ShowCustomerFrameDisplay)\r\n\tBasketDataShowButton = Button(FrontPage, text = \"Show Basket Data\", padx = 10, pady = 20, command = ShowItemFrameDisplay)\r\n\r\n\r\n\tCustomerEditButton = Button(FrontPage, text = \"Edit Customer Data\", padx = 0, pady = 20, command = EditCustomerFrame)\r\n\tBasketEditButton = Button(FrontPage, text = \"Edit Basket Data\", padx = 10, pady = 20, command = EditBasketFrame)\r\n\r\n\r\n\r\n\t# put First page on the screen\r\n\tCustomerLabel.grid(row = 2, column = 1)\r\n\tBasketLabel.grid(row = 2, column = 0)\r\n\tGreeting.grid(row = 1, column = 0,columnspan = 2)\r\n\tGreetImgLabel.grid(row = 0 , column = 0, columnspan = 2)\r\n\tCustomerDataSubmitButton.grid(row = 3, column = 0, pady = 20)\r\n\tBasketDataSubmitButton.grid(row = 3, column = 1, pady = 20)\r\n\tCustomerDataShowButton.grid(row = 2, column = 0, pady = 20)\r\n\tBasketDataShowButton.grid(row = 2, column = 1, pady = 20)\r\n\tCustomerEditButton.grid(row = 4, column = 0, pady = 20)\r\n\tBasketEditButton.grid(row = 4, column = 1, pady = 20)\r\n\r\n\r\nFrontPageFrame()\r\n\r\n\r\nFrontPage.grid()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# something is wrong with the image int the front page frame so it is not showing\r\nGreetImg= ImageTk.PhotoImage(Image.open(\"file-delivery.png\"))\r\nGreetImgLabel = Label(FrontPage, image = GreetImg)\r\nGreetImgLabel.grid(row = 0,column = 0, columnspan = 2)\r\n\r\n\r\n\r\nroot.mainloop()\r\n","sub_path":"FinalProject.py","file_name":"FinalProject.py","file_ext":"py","file_size_in_byte":23633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"146163834","text":"# 实现 int sqrt(int x) 函数\r\n#计算并返回 x 的平方根,其中 x 是非负整数。由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去\r\n\r\ndef mySqrt(x):\r\n if x == 0:\r\n return 0\r\n begin = 1\r\n end = x\r\n while begin <= end:\r\n mid = begin + (end-begin)//2\r\n s = mid**2\r\n if s <= x <(mid+1)**2:\r\n return mid\r\n elif s > x:\r\n end = mid\r\n else:\r\n begin = mid + 1\r\n\r\nif __name__ == \"__main__\":\r\n x = 1\r\n print(mySqrt(x))\r\n","sub_path":"每日一题/69.x的平方根.py","file_name":"69.x的平方根.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438394612","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom typing import List, Optional\n\nfrom django.db import transaction\n\nfrom backend.apps.policy.models import Policy as PolicyModel\nfrom backend.component import iam\nfrom backend.util.json import json_dumps\n\nfrom ..models import Policy, PolicyIDExpiredAt, Subject\nfrom .query import PolicyList, new_backend_policy_list_by_subject\n\n\nclass PolicyOperationService:\n def delete_by_ids(self, system_id: str, subject: Subject, policy_ids: List[int]):\n \"\"\"\n 删除指定policy_id的策略\n \"\"\"\n with transaction.atomic():\n self._delete_db_policies(system_id, subject, policy_ids)\n iam.delete_policies(system_id, subject.type, subject.id, policy_ids)\n\n def alter(\n self,\n system_id: str,\n subject: Subject,\n create_policies: Optional[List[Policy]] = None,\n update_policies: Optional[List[Policy]] = None,\n delete_policy_ids: Optional[List[int]] = None,\n ):\n \"\"\"\n 变更subject的Policies\n \"\"\"\n create_policies = create_policies or []\n update_policies = update_policies or []\n delete_policy_ids = delete_policy_ids or []\n\n with transaction.atomic():\n if create_policies:\n self._create_db_policies(system_id, subject, create_policies)\n\n if update_policies:\n self._update_db_policies(system_id, subject, update_policies)\n\n if delete_policy_ids:\n self._delete_db_policies(system_id, subject, delete_policy_ids)\n\n if create_policies or update_policies or delete_policy_ids:\n self._alter_backend_policies(system_id, subject, create_policies, update_policies, delete_policy_ids)\n\n if create_policies:\n self._sync_db_policy_id(system_id, subject)\n\n def _alter_backend_policies(\n self,\n system_id: str,\n subject: Subject,\n create_policies: List[Policy],\n update_policies: List[Policy],\n delete_policy_ids: List[int],\n ):\n \"\"\"\n 执行对policies的创建, 更新, 删除操作, 调用后端批量操作接口\n \"\"\"\n # 组装backend变更策略的数据\n backend_create_policies = [p.to_backend_dict() for p in create_policies]\n backend_update_policies = [p.to_backend_dict() for p in update_policies]\n\n return iam.alter_policies(\n system_id, subject.type, subject.id, backend_create_policies, backend_update_policies, delete_policy_ids\n )\n\n def _create_db_policies(self, system_id: str, subject: Subject, policies: List[Policy]) -> None:\n \"\"\"\n 创建新的策略\n \"\"\"\n db_policies = [p.to_db_model(system_id, subject) for p in policies]\n PolicyModel.objects.bulk_create(db_policies, batch_size=100)\n\n def _update_db_policies(self, system_id: str, subject: Subject, policies: List[Policy]) -> None:\n \"\"\"\n 更新已有的策略\n \"\"\"\n policy_list = PolicyList(policies)\n\n db_policies = PolicyModel.objects.filter(\n subject_id=subject.id, subject_type=subject.type, system_id=system_id, policy_id__in=policy_list.ids\n ).only(\"id\", \"action_id\")\n\n # 使用主键更新, 避免死锁\n for p in db_policies:\n update_policy = policy_list.get(p.action_id)\n if not update_policy:\n continue\n PolicyModel.objects.filter(id=p.id).update(\n _resources=json_dumps([rt.dict() for rt in update_policy.related_resource_types])\n )\n\n def _delete_db_policies(self, system_id: str, subject: Subject, policy_ids: List[int]):\n \"\"\"\n 删除db Policies\n \"\"\"\n PolicyModel.objects.filter(\n system_id=system_id, subject_type=subject.type, subject_id=subject.id, policy_id__in=policy_ids\n ).delete()\n\n def _sync_db_policy_id(self, system_id: str, subject: Subject) -> None:\n \"\"\"\n 同步SaaS-后端策略的policy_id\n \"\"\"\n db_policies = PolicyModel.objects.filter(\n system_id=system_id, subject_type=subject.type, subject_id=subject.id, policy_id=0\n ).defer(\"_resources\", \"_environment\")\n\n if len(db_policies) == 0:\n return\n\n backend_policy_list = new_backend_policy_list_by_subject(system_id, subject)\n for p in db_policies:\n backend_policy = backend_policy_list.get(p.action_id)\n if not backend_policy:\n continue\n p.policy_id = backend_policy.id\n\n PolicyModel.objects.bulk_update(db_policies, fields=[\"policy_id\"], batch_size=100)\n\n def renew(self, subject: Subject, thin_policies: List[PolicyIDExpiredAt]):\n \"\"\"\n 权策续期\n \"\"\"\n iam.update_policy_expired_at(subject.type, subject.id, [one.dict() for one in thin_policies])\n","sub_path":"saas/backend/service/policy/operation.py","file_name":"operation.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287932716","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/boduch/constant/priority.py\n# Compiled at: 2009-08-14 17:29:31\n\"\"\"This module defines constants related to handle priority.\"\"\"\nPRIORITY_CRITICAL = 40\nPRIORITY_MAJOR = 30\nPRIORITY_MINOR = 20\nPRIORITY_TRIVIAL = 10\n__all__ = [\n 'PRIORITY_CRITICAL', 'PRIORITY_MAJOR', 'PRIORITY_MINOR',\n 'PRIORITY_TRIVIAL']","sub_path":"pycfiles/boduch-0.2.1-py2.5/priority.py","file_name":"priority.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"25113794","text":"__author__ = 'Maksymilian Mika'\r\n\r\nimport sys, pygame\r\nimport Display\r\n\r\npygame.init()\r\npygame.font.init()\r\nsize = width, height = 60*8, 60*8\r\nblack = 0, 0, 0\r\nscreen = pygame.display.set_mode(size)\r\n\r\nboard_display = Display.BoardDisplay()\r\n\r\nset = False\r\n\r\nwhile 1:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT: sys.exit()\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n board_display.dragged(pygame.mouse.get_pos())\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n board_display.dropped(pygame.mouse.get_pos())\r\n\r\n \r\n screen.fill(black)\r\n board_display.display(screen)\r\n pygame.display.flip()\r\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"570720086","text":"from argparse import Namespace\nfrom base64 import b64decode\nfrom functools import wraps\nfrom typing import Optional\n\nfrom flask import Response, request\nfrom flask.json import JSONEncoder\nfrom flask_restful import Resource, abort\nfrom werkzeug.exceptions import HTTPException\n\nfrom api.app import status\nfrom api.app.exc import APIError\nfrom config import config\nfrom db.exc import DBError\nfrom db.models import UserAccessToken\n\n__all__ = (\n 'login_required',\n 'APIResource',\n)\n\n\ndef login_required(func):\n @wraps(func)\n def check_authorization(*args, **kwargs):\n user = APIResource.get_user()\n if user is None:\n return abort(403)\n return func(user, *args, **kwargs)\n\n return check_authorization\n\n\nclass APIResource(Resource):\n\n @property\n def ip_address(self):\n return request.environ.get('REMOTE_ADDR', None)\n\n def dispatch_request(self, *args, **kwargs):\n common_processor = getattr(self, 'common', None)\n # noinspection PyBroadException\n try:\n if common_processor is not None:\n ret = common_processor(*args, **kwargs)\n if not isinstance(ret, (list, tuple)):\n args = (ret,)\n response = super().dispatch_request(*args)\n except HTTPException as exc:\n return self.error_response(exc)\n except DBError as exc:\n return self.error_response(exc, status.HTTP_400_BAD_REQUEST)\n except Exception:\n if config.DEBUG:\n raise\n return self.error_response(APIError())\n\n try:\n response, code = response\n except ValueError:\n code = status.HTTP_200_OK\n return response, code\n\n @classmethod\n def error_response(cls, exc, status_code=None):\n if status_code is None:\n status_code = exc.code\n description = exc.description\n data = dict(code=exc.__class__.__name__, description=description)\n data = JSONEncoder().encode(data)\n resp = Response(\n response=data,\n status=status_code,\n mimetype='application/json'\n )\n return resp\n\n @staticmethod\n def get_page(args: Namespace, default_page: int = 1):\n page = args.page\n return default_page if page is None or page <= 0 else page\n\n @staticmethod\n def get_per_page(args: Namespace, default_per_page: int = 10):\n per_page = args.per_page\n return default_per_page if per_page is None or per_page <= 0 else per_page\n\n @staticmethod\n def get_token() -> Optional[str]:\n authorization = request.headers.get('Authorization', '')\n if not authorization:\n return None\n\n try:\n bearer = authorization.split(' ')[1]\n except IndexError:\n return None\n\n try:\n return b64decode(bearer).decode()\n except (UnicodeEncodeError, ValueError):\n return None\n\n @staticmethod\n def get_user(token: Optional[str] = None):\n if token is None:\n token = APIResource.get_token()\n return UserAccessToken.get_user_by_token(token)\n","sub_path":"api/app/resources/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"162078122","text":"from django import forms\nfrom .models import Tienda, Region, Ciudad,Producto,Lista\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.forms.models import inlineformset_factory\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\nclass TiendaForm(forms.ModelForm):\n\n class Meta:\n model = Tienda\n fields = ('nombre','sucursal','direccion','region','ciudad')\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['ciudad'].queryset = Ciudad.objects.none()\n\n if 'region' in self.data:\n try:\n region_id = int(self.data.get('region'))\n self.fields['ciudad'].queryset = Ciudad.objects.filter(region_id=region_id).order_by('nombre')\n except (ValueError, TypeError):\n pass # invalid input from the client; ignore and fallback to empty Ciudad queryset\n elif self.instance.pk:\n self.fields['ciudad'].queryset = self.instance.region.ciudad_set.order_by('nombre')\n \n\nclass ListaForm(forms.ModelForm):\n class Meta:\n model = Lista\n \n fields = ['nombre']\n\n \n def __init__(self, *args, **kwargs):\n super(ListaForm, self).__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n \nclass ProductoForm(forms.ModelForm):\n\n\n class Meta:\n model = Producto\n fields = ['nombre','costoPresupuestado','costoReal','notasAdicionales', 'estado','tienda','lista']\n\n def __init__(self, *args, **kwargs):\n super(ProductoForm, self).__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n\nclass DetalleProductoForm(forms.ModelForm):\n class Meta:\n model = Producto\n fields = ['nombre','costoPresupuestado','costoReal','notasAdicionales', 'estado','tienda']\n\n def __init__(self, *args, **kwargs):\n super(DetalleProductoForm, self).__init__(*args, **kwargs)\n for field in iter(self.fields):\n instance = getattr(self, 'nombre', '')\n if instance is not None:\n self.fields['nombre'].widget.attrs['readonly'] = True\n self.fields['costoPresupuestado'].widget.attrs['readonly'] = True\n self.fields['notasAdicionales'].widget.attrs['readonly'] = True\n self.fields['tienda'].widget.attrs['disabled'] = True\n \n def clean_nombre(self):\n instance = getattr(self, 'instance', None)\n if instance and instance.pk:\n return instance.nombre\n else:\n return self.cleaned_data['nombre']\n \n \n \n \nDetalleCompraFormSet = inlineformset_factory(Lista, Producto, form=ProductoForm, extra=5)\n\n\nDetalleCompraFormSetV2 = inlineformset_factory(Lista, Producto, form=DetalleProductoForm, extra=10)\n\n\n\n \n\n\n \n \n\n\n \n\n\n\n\n\n\n","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"190589281","text":"#!/usr/bin/python3\n\"\"\" start app\"\"\"\nfrom models import storage\nfrom api.v1.views import app_views\nfrom flask import Flask, jsonify\nfrom models import storage\nfrom os import environ\n\n\napp = Flask(__name__)\napp.register_blueprint(app_views)\n\n\n@app.teardown_appcontext\ndef close(self):\n \"\"\"call close methot\"\"\"\n storage.close()\n\n\n@app.errorhandler(404)\ndef error_not_found(e):\n return {\"error\": \"Not found\"}, 404\n\nif __name__ == \"__main__\":\n host = environ.get('HBNB_API_HOST', '0.0.0.0')\n port = environ.get('HBNB_API_PORT', '5000')\n app.run(host=host, port=port, threaded=True)\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313290091","text":"# Name: William W\n# Period 6\n# Dice Rolling Simulator\n\nimport random\n\nrollnums = [0, 0, 0, 0, 0, 0]\n\n# main code\nnumber = int(input(\"How many rolls? \"))\n\nfor i in range(number):\n roll = random.randint(1, 6)\n print(str(i + 1) + \": Rolled \" + str(roll))\n rollnums[roll - 1] += 1\n\nprint(\"Total Rolls: \" + str(number + 1))\n\nfor i in range(6):\n print(str(i + 1) + \": \" + str(rollnums[i]))\n\nprint(\"\\nPercentages:\")\nfor i in range(6):\n print(str(i + 1) + \": \" + str(rollnums[i] / 10 * 100))","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"614857575","text":"from functools import wraps\nfrom datetime import timedelta\nfrom time import sleep\n\n\ndef with_delay(total_delay: timedelta,\n sleep_delay: timedelta,\n to_catch=(AssertionError, ),\n strict=True):\n \"\"\"\n Description:\n Decorator for cycling execution of a function till it returns without throwing any error.\n It can be used for functions that should wait an input, variable change and so on.\n\n When original function is called, if it returns a result, it is returned from a decorator.\n If in same call any assertion that is listed in to_catch is raised, original function will be called\n once again in sleep_delay.\n After total_delay if error from to_catch does not disappear, actual error will be risen.\n If original function throws error not listed in to_catch list, it will be propagated outside the decorator.\n\n :param total_delay: Total delay to perform repeated function call.\n :param sleep_delay: Sleep time between function calls.\n :param to_catch: Exceptions to ignore to continue cycling.\n :param strict: If True than can raise any function exception not listed in to_catch list.\n If False, then all exception (based on Exception) will be suppressed.\n\n Usage:\n with_delay(timedelta(minutes=3), timedelta(seconds=30))\n def function():\n some_bool_arg = get_some_bool_arg()\n assert some_bool_arg is True\n\n In this case will be performed validation multiple times until some_bool_arg becomes True.\n Every 30 seconds and during 3 minutes function will be executed periodically until timeout.\n After timeout will try to execute function without catching an exception.\n \"\"\"\n def decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n end_time = datetime.today() + total_delay\n while datetime.today() < end_time:\n try:\n return function(*args, **kwargs)\n except to_catch: # catch only selected exceptions\n sleep(sleep_delay.total_seconds())\n except Exception: # catch remaining exceptions\n if strict is True:\n raise\n else:\n sleep(sleep_delay.total_seconds())\n # execute original function without try-except block\n # as total_delay was passed\n return function(*args, **kwargs)\n return wrapper\n return decorator\n\n\nif __name__ == '__main__':\n import unittest\n from datetime import datetime\n\n class TestCase(unittest.TestCase):\n # todo mock sleep function\n def test_timeout(self):\n \"\"\"\n Expected sequence: f(), sleep(0.7), f(), sleep(0.7), f() raises AssertionError.\n \"\"\"\n @with_delay(timedelta(seconds=1), timedelta(seconds=0.7))\n def f():\n raise AssertionError\n start = datetime.today()\n with self.assertRaises(AssertionError):\n f()\n end = datetime.today()\n delta = end - start\n # expected delta: 0.7[s] * 2 = 1.4[s]\n # tolerance: 0.1[s]\n self.assertTrue(timedelta(seconds=1.3) <= delta <= timedelta(seconds=1.5))\n\n def test_no_timeout(self):\n \"\"\"\n Expected sequence: f(), sleep(0.7), f() returns None\n \"\"\"\n c = 0\n @with_delay(timedelta(seconds=1), timedelta(seconds=0.7))\n def f():\n nonlocal c\n c += 1\n if c <= 1: # first call raise AssertionError\n raise AssertionError\n # return None\n start = datetime.today()\n f()\n end = datetime.today()\n delta = end - start\n # expected delta: 0.7[s]\n # tolerance: 0.1[s]\n self.assertTrue(timedelta(seconds=0.6) <= delta <= timedelta(seconds=0.8))\n\n def test_no_delay(self):\n \"\"\"\n Expected sequence: f() returns None\n \"\"\"\n @with_delay(timedelta(seconds=1), timedelta(seconds=0.7))\n def f():\n pass\n start = datetime.today()\n f()\n end = datetime.today()\n delta = end - start\n # expected delta: 0[s]\n # tolerance: 0.1[s]\n self.assertTrue(delta <= timedelta(seconds=0.1))\n\n unittest.main()\n","sub_path":"util/with_delay.py","file_name":"with_delay.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"372450676","text":"import numpy as np\r\n\r\ndef Linear_Regression(x, y):\r\n \r\n x_mean = np.mean(x)\r\n y_mean = np.mean(y)\r\n\r\n sum_of_cross_deviations_YX = np.sum(x * y) - len(x) * y_mean * x_mean\r\n sum_of_squared_devations_X = np.sum(x**2) - len(x) * x_mean * x_mean\r\n\r\n beta1 = sum_of_cross_deviations_YX / sum_of_squared_devations_X\r\n beta0 = y_mean - beta1 * x_mean\r\n estimatedY = beta0 + beta1 * x\r\n \r\n return (estimatedY, beta0, beta1)\r\n \r\n\r\n","sub_path":"regression_function.py","file_name":"regression_function.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"349790164","text":"#!/usr/bin/env python3\n\nimport argparse\nimport asyncio\nimport logging\n\nimport capnp\nimport calculator_capnp\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nasync def evaluate_impl(expression, params=None):\n \"\"\"Implementation of CalculatorImpl::evaluate(), also shared by\n FunctionImpl::call(). In the latter case, `params` are the parameter\n values passed to the function; in the former case, `params` is just an\n empty list.\"\"\"\n\n which = expression.which()\n\n if which == \"literal\":\n return expression.literal\n elif which == \"previousResult\":\n return (await expression.previousResult.read()).value\n elif which == \"parameter\":\n assert expression.parameter < len(params)\n return params[expression.parameter]\n elif which == \"call\":\n call = expression.call\n func = call.function\n\n # Evaluate each parameter.\n paramPromises = [evaluate_impl(param, params) for param in call.params]\n vals = await asyncio.gather(*paramPromises)\n\n # When the parameters are complete, call the function.\n result = await func.call(vals)\n return result.value\n else:\n raise ValueError(\"Unknown expression type: \" + which)\n\n\nclass ValueImpl(calculator_capnp.Calculator.Value.Server):\n \"Simple implementation of the Calculator.Value Cap'n Proto interface.\"\n\n def __init__(self, value):\n self.value = value\n\n async def read(self, **kwargs):\n return self.value\n\n\nclass FunctionImpl(calculator_capnp.Calculator.Function.Server):\n\n \"\"\"Implementation of the Calculator.Function Cap'n Proto interface, where the\n function is defined by a Calculator.Expression.\"\"\"\n\n def __init__(self, paramCount, body):\n self.paramCount = paramCount\n self.body = body.as_builder()\n\n async def call(self, params, _context, **kwargs):\n \"\"\"Note that we're returning a Promise object here, and bypassing the\n helper functionality that normally sets the results struct from the\n returned object. Instead, we set _context.results directly inside of\n another promise\"\"\"\n\n assert len(params) == self.paramCount\n return await evaluate_impl(self.body, params)\n\n\nclass OperatorImpl(calculator_capnp.Calculator.Function.Server):\n\n \"\"\"Implementation of the Calculator.Function Cap'n Proto interface, wrapping\n basic binary arithmetic operators.\"\"\"\n\n def __init__(self, op):\n self.op = op\n\n async def call(self, params, **kwargs):\n assert len(params) == 2\n\n op = self.op\n\n if op == \"add\":\n return params[0] + params[1]\n elif op == \"subtract\":\n return params[0] - params[1]\n elif op == \"multiply\":\n return params[0] * params[1]\n elif op == \"divide\":\n return params[0] / params[1]\n else:\n raise ValueError(\"Unknown operator\")\n\n\nclass CalculatorImpl(calculator_capnp.Calculator.Server):\n \"Implementation of the Calculator Cap'n Proto interface.\"\n\n async def evaluate(self, expression, _context, **kwargs):\n return ValueImpl(await evaluate_impl(expression))\n\n async def defFunction(self, paramCount, body, _context, **kwargs):\n return FunctionImpl(paramCount, body)\n\n async def getOperator(self, op, **kwargs):\n return OperatorImpl(op)\n\n\nasync def new_connection(stream):\n await capnp.TwoPartyServer(stream, bootstrap=CalculatorImpl()).on_disconnect()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n usage=\"\"\"Runs the server bound to the given address/port ADDRESS. \"\"\"\n )\n\n parser.add_argument(\"address\", help=\"ADDRESS:PORT\")\n\n return parser.parse_args()\n\n\nasync def main():\n host, port = parse_args().address.split(\":\")\n server = await capnp.AsyncIoStream.create_server(new_connection, host, port)\n async with server:\n await server.serve_forever()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","sub_path":"examples/async_calculator_server.py","file_name":"async_calculator_server.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"601285860","text":"import random\n\na = random.randint(1, 20)\nprint(\"请输入你要猜的数字\")\nb = 0\nwhile a != b:\n c = input()\n b = int(c)\n if b < a:\n print(\"你输入小了\")\n elif b > a:\n print(\"你输入大了\")\nelse:\n print(\"猜对了\")\n","sub_path":"0324/text01/baiyunfucecond02.py","file_name":"baiyunfucecond02.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"256037209","text":"#!/usr/bin/python3\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport csv\nfrom pprint import pprint\nfrom config import headers as h\nfrom time import sleep\nimport psycopg2\nimport datetime\nimport json\nimport re\nfrom selenium import webdriver\n\n\nb = webdriver.Chrome()\nb.implicitly_wait(15)\n\n\ndef get_html(url):\n b.get(url)\n sleep(5)\n return b.page_source\n\n\n# Возвращает 6 категорий товаров\ndef get_store_menu(html):\n store_menu = []\n soup = bs(html, 'lxml')\n store_menu = soup.find('div', id='store.menu').find('nav', class_='navigation').find('ul').find_all('li')\n for store in store_menu:\n store = store.find('a').get('href')\n store_menu.append(store)\n return store_menu\n\n\n# Return list cards from page\ndef get_cards(html):\n list_cards = []\n soup = bs(html, 'lxml')\n cards = soup.find('div', class_='products wrapper grid products-grid').find('ol').find_all('li')\n for card in cards:\n card = card.find('a').get('href')\n print(card)\n list_cards.append(card)\n return list_cards\n\nlist_flex = []\ndef get_data(html):\n soup = bs(html, 'lxml')\n name = soup.find('div', class_='category-colors').find('div', class_='page-title-wrapper product').text.strip()\n print(name)\n specifications = soup.find('div', class_='specifications').text.strip().replace('\\n', '').replace(' ', ' ')\n print(specifications)\n available = soup.find('div', class_='specifications').find('a').get('href')\n list_flex.append(available)\n available_text = soup.find('div', class_='specifications').find('a').text.strip()\n print(available_text)\n overview = soup.find('div', class_='product attribute desktop overview').find('div').text.strip()\n print(overview)\n scheme_picture = soup.find('div', class_='product-info-cross-section-image desktop').find('img').get('src')\n print(scheme_picture)\n banner = soup.find('div', class_='usp-banner').find('div', class_='row').find('div').text.strip().replace('\\n', ' ')\n print(banner)\n\n main_pic = soup.find('div', class_='gallery-placeholder')\\\n .find('div', attrs={'data-gallery-role':'gallery'}).find('div', class_='fotorama__stage__shaft')\\\n .find('div').get('href')\n print(main_pic)\n\n\n\ndef main():\n headers = h\n '''\n url = 'https://oracdecor.ru/plintusy?p=2'\n get_cards(get_html(url, headers))\n '''\n urls = [\n 'https://oracdecor.ru/plintusy/sx138f_skirting',\n 'https://oracdecor.ru/karnizy/c391_cornice_moulding',\n 'https://oracdecor.ru/moldingi/p4020_panel_moulding',\n 'https://oracdecor.ru/skrytoe-osveschenie/c380_uplighter'\n ]\n for url in urls:\n get_data(get_html(url))\n print('---------')\n\n\nif __name__ == '__main__':\n main()","sub_path":"components/oracdecor/selenium_oradecor.py","file_name":"selenium_oradecor.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"179965747","text":"import logging\nimport time\nimport pandas as pd\nfrom ibapi.utils import iswrapper\nfrom ibapi.client import EClient\nfrom ibapi.wrapper import EWrapper\n# types\nfrom ibapi.common import * # @UnusedWildImport\nfrom ibapi.contract import * # @UnusedWildImport\n\n# https://stackoverflow.com/questions/41510945/interactive-brokers-obtain-historical-data-of-opt-midpoint-and-trades\n# https://groups.io/g/twsapi/topic/data_for_expired_contracts_no/4042776?p=\n\nclass TestApp(EWrapper, EClient):\n def __init__(self):\n EWrapper.__init__(self)\n EClient.__init__(self, wrapper=self)\n self.data = [] # Initialize variable to store candle\n self.contract = Contract()\n\n def nextValidId(self, orderId: int):\n # we can start now\n self.start()\n\n def start(self):\n self.historicalDataOperations_req()\n print(\"Executing requests ... finished\")\n\n def historicalDataOperations_req(self):\n self.contract.symbol = \"TQQQ\"\n self.contract.secType = \"OPT\"\n self.contract.exchange = \"SMART\"\n self.contract.currency = \"USD\"\n self.contract.lastTradeDateOrContractMonth = \"20210730\"\n self.contract.strike = 128\n self.contract.right = \"C\"\n self.contract.multiplier = \"100\"\n\n self.reqHistoricalData(4103, self.contract, '',\n \"2 D\", \"1 hour\", \"MIDPOINT\", 1, 1, False, [])\n\n # https://interactivebrokers.github.io/tws-api/historical_bars.html\n\n def historicalData(self, reqId: int, bar: BarData):\n self.data.append([reqId, bar])\n #print(\"HistoricalData. ReqId:\", reqId, \"BarData.\", bar)\n\n df = pd.DataFrame(self.data)\n print(df)\n df.to_csv('history.csv')\n self.disconnect()\n\ndef main():\n app = TestApp()\n app.connect(\"127.0.0.1\", port=7497, clientId=102)\n print(\"serverVersion:%s connectionTime:%s\" % (app.serverVersion(), app.twsConnectionTime()))\n app.run()\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"options/historical_options_bars_clean.py","file_name":"historical_options_bars_clean.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411953403","text":"from django import forms\nfrom django.forms.formsets import formset_factory\nfrom django.forms.models import modelformset_factory\nfrom .models import *\n\n\nclass AddIngredientsForm(forms.ModelForm):\n class Meta:\n model = Ingredients\n fields = ('ingredient_name',)\n\n\nAddIngredientsFormSet = formset_factory(AddIngredientsForm, extra=5)\n\nAddIngredientsInOrderFormSet = modelformset_factory(Ingredients, form=AddIngredientsForm, fields=('ingredient_name', 'count'))\n\n\nclass AddDishForm(forms.ModelForm):\n class Meta:\n model = Dish\n fields = ('dish_name',)\n exclude = ('ingredients', 'author')\n\n\nclass AddOrderForm(forms.ModelForm):\n class Meta:\n model = Order\n exclude = ('dish', 'author', 'ingredients')\n","sub_path":"mymenu/menu/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"58619612","text":"import json\nimport requests\nfrom parse import parse\nimport warnings\n\nheaders = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0'}\n\nwarnings.filterwarnings(\"ignore\")\napikey = \"\"\ndef get_message():\n\tr = requests.post(\"https://www.hackthebox.eu/api/shouts/get/initial/html/100?api_token=\"+apikey)\n\tjs = json.loads(r.content)\n\thtml = js['html']\n\tlastmsg = html[99]\n\tfor x in html:\n\t\tif 'https://www.hackthebox.eu/storage/avatars/' in x:\n\t\t\tparse(x)\n\nquite = True\n\ndef get_last_message(quite):\n\twhile True:\n\t\tr = requests.post(\"https://www.hackthebox.eu/api/shouts/get/initial/html/1?api_token=\"+apikey)\n\t\tjs = json.loads(r.content)\n\t\thtml = js['html'][0]\n\t\tif quite == True:\n\t\t\tif 'https://www.hackthebox.eu/storage/avatars/' in html:\n\t\t\t\tif html != lastmsg:\n\t\t\t\t\tparse(html)\n\t\t\t\t\tlastmsg = html\n\t\telse:\n\t\t\tif html != lastmsg:\n\t\t\t\tparse(html)\n\t\t\t\tlastmsg = html\n","sub_path":"chats.py","file_name":"chats.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"540170182","text":"\"\"\"pytest configuration.\n\"\"\"\n\nfrom __future__ import print_function\nimport sys\nimport os.path\nimport datetime\nimport re\nfrom random import getrandbits\nimport zlib\nimport subprocess\nimport shutil\nimport tempfile\nimport logging\nfrom distutils.version import StrictVersion as Version\nimport pytest\nimport icat\nimport icat.config\ntry:\n from suds.sax.date import UtcTimezone\nexcept ImportError:\n UtcTimezone = None\n\n\n# Note that pytest captures stderr, so we won't see any logging by\n# default. But since Suds uses logging, it's better to still have\n# a well defined basic logging configuration in place.\nlogging.basicConfig(level=logging.INFO)\n\ntestdir = os.path.dirname(__file__)\n\n\n# ============================= helper ===============================\n\nif sys.version_info < (3, 0):\n def buf(seq):\n return buffer(bytearray(seq))\nelse:\n def buf(seq):\n return bytearray(seq)\n\nclass DummyDatafile(object):\n \"\"\"A dummy file with random content to be used for test upload.\n \"\"\"\n def __init__(self, directory, name, size, date=None):\n if date is not None:\n date = (date, date)\n self.name = name\n self.fname = os.path.join(directory, name)\n chunksize = 8192\n crc32 = 0\n with open(self.fname, 'wb') as f:\n while size > 0:\n if chunksize > size:\n chunksize = size\n chunk = buf(getrandbits(8) for _ in range(chunksize))\n size -= chunksize\n crc32 = zlib.crc32(chunk, crc32)\n f.write(chunk)\n if date:\n os.utime(self.fname, date)\n self.crc32 = \"%x\" % (crc32 & 0xffffffff)\n self.stat = os.stat(self.fname)\n self.size = self.stat.st_size\n if UtcTimezone:\n mtime = int(self.stat.st_mtime)\n self.mtime = datetime.datetime.fromtimestamp(mtime, UtcTimezone())\n else:\n self.mtime = None\n\nclass tmpSessionId:\n \"\"\"Temporarily switch to another sessionId in an ICAT client.\n \"\"\"\n def __init__(self, client, sessionId):\n self.client = client\n self.saveSessionId = client.sessionId\n self.sessionId = sessionId\n def __enter__(self):\n self.client.sessionId = self.sessionId\n return self.client\n def __exit__(self, type, value, tb):\n self.client.sessionId = self.saveSessionId\n\n\ndef gettestdata(fname):\n fname = os.path.join(testdir, \"data\", fname)\n assert os.path.isfile(fname)\n return fname\n\n\ndef getConfig(confSection=\"root\", **confArgs):\n \"\"\"Get the configuration, skip on ConfigError.\n \"\"\"\n confFile = os.path.join(testdir, \"data\", \"icat.cfg\")\n if not os.path.isfile(confFile):\n pytest.skip(\"no test ICAT server configured\")\n try:\n args = [\"-c\", confFile, \"-s\", confSection]\n conf = icat.config.Config(**confArgs).getconfig(args)\n conf.cmdargs = [\"-c\", conf.configFile[0], \"-s\", conf.configSection]\n return conf\n except icat.ConfigError as err:\n pytest.skip(err.message)\n\n\ndef get_icat_version():\n conf = getConfig(needlogin=False)\n client = icat.Client(conf.url, **conf.client_kwargs)\n return client.apiversion\n\n# ICAT server version we talk to. Ignore any errors from\n# get_icat_version(), if something fails (e.g. no server is configured\n# at all), set a dummy zero version number.\ntry:\n icat_version = get_icat_version()\nexcept:\n icat_version = Version(\"0.0\")\n\ndef require_icat_version(minversion, reason):\n if icat_version < minversion:\n pytest.skip(\"need ICAT server version %s or newer: %s\" \n % (minversion, reason))\n\n\ndef callscript(scriptname, args, stdin=None, stdout=None, stderr=None):\n script = os.path.join(testdir, \"scripts\", scriptname)\n cmd = [sys.executable, script] + args\n print(\"\\n>\", *cmd)\n subprocess.check_call(cmd, stdin=stdin, stdout=stdout, stderr=stderr)\n\n\nyaml_filter = (re.compile(r\"^# (Date|Service|ICAT-API|Generator): .*$\"),\n r\"# \\1: ###\")\nxml_filter = (re.compile(r\"^\\s*<(date|service|apiversion|generator)>.*$\"),\n r\" <\\1>###\")\n\ndef filter_file(infile, outfile, pattern, repl):\n \"\"\"Filter a text file.\n\n This may be needed to compare some test output file with\n predefined results, because some information in the file might not\n depend on the actual test but rather dynamically change with each\n call. Such as the header of a dump file that contains date and\n ICAT version.\n \"\"\"\n with open(infile, 'rt') as inf, open(outfile, 'wt') as outf:\n while True:\n l = inf.readline()\n if not l:\n break\n l = re.sub(pattern, repl, l)\n outf.write(l)\n\n\n# ============================ fixtures ==============================\n\n# Deliberately not using the 'tmpdir' fixture provided by pytest,\n# because it seem to use a predictable directory name in /tmp wich is\n# insecure.\n\nclass TmpDir(object):\n \"\"\"Provide a temporary directory.\n \"\"\"\n def __init__(self):\n self.dir = tempfile.mkdtemp(prefix=\"python-icat-test-\")\n def __del__(self):\n self.cleanup()\n def cleanup(self):\n if self.dir:\n shutil.rmtree(self.dir)\n self.dir = None\n\n@pytest.fixture(scope=\"session\")\ndef tmpdirsec(request):\n tmpdir = TmpDir()\n request.addfinalizer(tmpdir.cleanup)\n return tmpdir\n\n\n@pytest.fixture(scope=\"session\")\ndef standardConfig():\n return getConfig()\n\n\ntestcontent = gettestdata(\"icatdump.yaml\")\n\n@pytest.fixture(scope=\"session\")\ndef setupicat(standardConfig):\n require_icat_version(\"4.4.0\", \"need InvestigationGroup\")\n callscript(\"wipeicat.py\", standardConfig.cmdargs)\n args = standardConfig.cmdargs + [\"-f\", \"YAML\", \"-i\", testcontent]\n callscript(\"icatingest.py\", args)\n\n\n# ============================= hooks ================================\n\nclass DependencyItemStatus(object):\n \"\"\"Status of a test item in a dependency manager.\n \"\"\"\n\n Phases = ('setup', 'call', 'teardown')\n\n def __init__(self):\n self.results = { w:None for w in self.Phases }\n\n def __str__(self):\n l = [\"%s: %s\" % (w, self.results[w]) for w in self.Phases]\n return \"Status(%s)\" % \", \".join(l)\n\n def addResult(self, rep):\n self.results[rep.when] = rep.outcome\n\n def isSuccess(self):\n return list(self.results.values()) == ['passed', 'passed', 'passed']\n\nclass DependencyManager(object):\n \"\"\"Dependency manager, stores the results of tests.\n \"\"\"\n\n ScopeCls = {'module':pytest.Module, 'session':pytest.Session}\n\n @classmethod\n def getManager(cls, item, scope='module'):\n \"\"\"Get the DependencyManager object from the node at scope level.\n Create it, if not yet present.\n \"\"\"\n node = item.getparent(cls.ScopeCls[scope])\n if not hasattr(node, 'dependencyManager'):\n node.dependencyManager = cls()\n return node.dependencyManager\n\n def __init__(self):\n self.results = {}\n\n def addResult(self, item, marker, rep):\n name = marker.kwargs.get('name')\n if not name:\n name = item.name\n status = self.results.setdefault(name, DependencyItemStatus())\n status.addResult(rep)\n\n def checkDepend(self, depends):\n for i in depends:\n if not(i in self.results and self.results[i].isSuccess()):\n pytest.skip(\"depends on %s\" % i)\n\n\n@pytest.hookimpl(tryfirst=True, hookwrapper=True)\ndef pytest_runtest_makereport(item, call):\n \"\"\"Store the test outcome if this item is marked \"dependency\".\n \"\"\"\n outcome = yield\n marker = item.get_marker(\"dependency\")\n if marker is not None:\n rep = outcome.get_result()\n manager = DependencyManager.getManager(item)\n manager.addResult(item, marker, rep)\n\n\ndef pytest_runtest_setup(item):\n \"\"\"Check dependencies if this item is marked \"dependency\".\n Skip if any of the dependencies has not been run successfully.\n \"\"\"\n marker = item.get_marker(\"dependency\")\n if marker is not None:\n depends = marker.kwargs.get('depends')\n if depends:\n manager = DependencyManager.getManager(item)\n manager.checkDepend(depends)\n\n\ndef require_servertest():\n if not pytest.config.getoption(\"--servertests\"):\n pytest.skip(\"need --servertests option to run\")\n\ndef pytest_addoption(parser):\n parser.addoption(\"--servertests\", action=\"store_true\",\n help=\"run tests for testing the server.\")\n\n\ndef pytest_report_header(config):\n \"\"\"Add information on the icat package used in the tests.\n \"\"\"\n modpath = os.path.dirname(os.path.abspath(icat.__file__))\n if icat_version > \"0.0\":\n server = icat_version\n else:\n server = \"-\"\n return [ \"python-icat: %s (%s)\" % (icat.__version__, icat.__revision__), \n \" %s\" % (modpath),\n \"icat.server: %s\" % server]\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":8948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"71317362","text":"# -*- coding: utf-8 -*-\r\n\r\nimport requests\r\nimport pymysql\r\nimport json\r\nimport datetime\r\nimport time\r\nimport unittest\r\nfrom Common.readConfig import *\r\n\r\napi_host=global_var.api_host #测试地址:192.168.10.56\r\nheaders1=global_var.headers4 #发货宝18883612485\r\nheaders2=global_var.headers5 #小二18599937985\r\nheaders3=global_var.headers6 #运营后台18883612485\r\nheaders4=global_var.headers7 #直营系统18883612485\r\nheadersNull=global_var.headersNull #居家小二客户端18883612485\r\ngoods=global_var.goods\r\ndb=global_var.db #数据库 59\r\ndb1=global_var.db1 #数据库70\r\narg1=global_var.arg1 #断言文本\r\nPictureid = '5b4ef81bd423d40001f0195e' #图片id\r\n\r\n# 录单使用的产品信息\r\n\r\n\r\nclass testYiChangFlow(unittest.TestCase):\r\n def test_a(self):\r\n '''录单'''\r\n #发货宝录单接口\r\n url1 = \"http://\" + api_host + \"/ms-fahuobao-order/FhbOrder/saveOrder\"\r\n global i\r\n i = datetime.datetime.now()\r\n print(\"收件人姓名:异常流程测试\"+ str(i.month) + str(i.day)+'-'+str(i.hour)+'-'+str(i.minute))\r\n data1 = {\r\n \"businessNo\": \"BSTE02\",\r\n \"serviceNo\": \"FHB01\",\r\n \"orderWay\": \"1\",\r\n \"wokerUserName\": \"\",\r\n \"wokerPhone\": \"\",\r\n \"wokerPrice\": \"\",\r\n \"checked\": \"\",\r\n \"verfiyType\": \"\",\r\n \"goods\": goods,\r\n \"isElevator\": \"\",\r\n \"predictServiceDate\": \"\",\r\n \"predictDevliveryDate\": \"\",\r\n \"memo\": \"\",\r\n \"isArriva\": 1,\r\n \"boolCollection\": \"0\",\r\n \"collectionMoney\": \"\",\r\n \"collectionMemo\": \"\",\r\n \"allVolume\": \"1\",\r\n \"allWeight\": \"1\",\r\n \"allPackages\": \"1\",\r\n \"consigneeName\": \"异常流程测试\"+ str(i.month) + str(i.day)+'-'+str(i.hour)+'-'+str(i.minute),\r\n \"consigneePhone\": \"18883612485\",\r\n \"consigneeAddress\": \"231\",\r\n \"deliveryName\": \"23213\",\r\n \"deliveryPhone\": \"18883612485\",\r\n \"provinceNo\": \"430000\",\r\n \"province\": \"湖南省\",\r\n \"cityNo\": \"430100\",\r\n \"city\": \"长沙市\",\r\n \"districtNo\": \"430103\",\r\n \"district\": \"天心区\",\r\n \"deliveryProvinceNo\": \"\",\r\n \"deliveryProvince\": \"\",\r\n \"deliveryCityNo\": \"\",\r\n \"deliveryCity\": \"\",\r\n \"deliveryDistrictNo\": \"\",\r\n \"deliveryDistrict\": \"\",\r\n \"verifyOrderNo\": \"\"\r\n }\r\n\r\n request1 = requests.request(\"POST\", url=url1, data = json.dumps(data1) ,headers = headers1)\r\n print(\"录单:\" + request1.text)\r\n time.sleep(2)\r\n self.assertIn(arg1, request1.text, msg='测试field')\r\n\r\n def test_b(self):\r\n '''通过订单的收件人姓名查询出订单id、订单编号,将订单推到天心区并用185这个账号进行竞价'''\r\n i = datetime.datetime.now()\r\n consigne_name1 = \"异常流程测试\" + str(i.month) + str(i.day)+'-'+str(i.hour)+'-'+str(i.minute)\r\n\r\n # 通过订单的收件人姓名查询出订单id\r\n sql1 = \"SELECT fhb_order_id,order_no FROM fhb_order_consignee_info a inner join fhb_order b on a.fhb_order_id=b.id WHERE a.consigne_name = '\"+consigne_name1+\"' ORDER BY a.foundtime DESC\"\r\n\r\n # 使用cursor()方法获取操作游标\r\n cursor = db.cursor()\r\n # 执行SQL语句\r\n cursor.execute(sql1)\r\n # 获取所有记录列表\r\n results = cursor.fetchall()\r\n # print(results[0])\r\n # 有多个的情况,取第一个订单的id\r\n global orderid,orderno\r\n orderid = results[0]['fhb_order_id']\r\n orderno = results[0]['order_no']\r\n print(\"订单id:\" + orderid)\r\n print(\"订单编号:\" + orderno)\r\n db.close()\r\n # 将订单推到天心区并用185这个账号进行竞价\r\n url2 = \"http://\" + api_host + \"/ms-fahuobao-order/bidding/quoted-price\"\r\n data2 = {\r\n \"memo\":\"\",\r\n \"money\":\"200\",\r\n \"orderId\":orderid\r\n }\r\n\r\n request2 = requests.request(\"POST\", url=url2, data=json.dumps(data2), headers=headers2)\r\n print(\"竞价:\" + request2.text)\r\n self.assertIn(arg1, request2.text, msg='测试field')\r\n def test_c(self):\r\n # 通过师傅id查询竞价记录\r\n '''通过师傅id查询竞价记录'''\r\n db.connect()\r\n sql2 = \"SELECT id,fhb_order_id FROM fhb_order_bidding_log WHERE people_user_id ='0cacc658-dd29-40bb-9c69-b2e19677275f' and fhb_order_id= '\"+orderid+\"' ORDER BY foundtime DESC\"\r\n # 使用cursor()方法获取操作游标\r\n cursor2 = db.cursor()\r\n # 执行SQL语句\r\n cursor2.execute(sql2)\r\n # 获取所有记录列表\r\n results2 = cursor2.fetchall()\r\n # print(results[0])\r\n # 有多个的情况,取第一个订单的id\r\n global fhb_order_id,jingjiaid\r\n fhb_order_id=orderid\r\n jingjiaid = results2[0]['id']\r\n print(\"订单id:\" + fhb_order_id)\r\n print(\"竞价id:\" + jingjiaid)\r\n # db.close()\r\n def test_d(self):\r\n # 修改竞价金额为0.01\r\n '''修改竞价金额为0.01'''\r\n sql3 = \"UPDATE fhb_order_bidding_log set money = '0.01' where fhb_order_id = '\" + orderid + \"'\"\r\n print(sql3)\r\n cursor3 = db.cursor()\r\n # 执行SQL语句\r\n cursor3.execute(sql3)\r\n #MySQL的默认存储引擎就是InnoDB, 所以对数据库数据的操作会在事先分配的缓存中进行, 只有在commit之后, 数据库的数据才会改变\r\n db.commit()\r\n def test_e(self):\r\n # 选择接口\r\n '''选择师傅'''\r\n url3 = \"http://\" + api_host + \"/ms-fahuobao-order/FhbOrder/choice-pay?t=1531964865851&orderId=\"+fhb_order_id+\"&biddingLogId=\"+jingjiaid+\"\"\r\n request_yichang = requests.request(\"GET\", url=url3, headers=headers1)\r\n # print('选择中标师傅'+request_yichang.text)\r\n time.sleep(2)\r\n self.assertIn(arg1, request_yichang.text, msg='测试field')\r\n\r\n\r\n def test_f(self):\r\n # 支付接口,objectList为订单id\r\n '''支付'''\r\n time.sleep(5)\r\n url4 = \"http://\" + api_host + \"/ms-fahuobao-user/wallet/balance-pay\"\r\n data4 = {\"objectList\":[fhb_order_id],\"money\":0.01,\"password\":\"123456\"}\r\n request4 = requests.request(\"POST\", url=url4, data=json.dumps(data4), headers=headers1)\r\n print(\"支付:\" + request4.text)\r\n time.sleep(2)\r\n self.assertIn(arg1, request4.text, msg='测试field')\r\n\r\n\r\n # 通过发货宝订单编号查出scm订单id与订单编号\r\n def test_g(self):\r\n '''通过发货宝订单编号查出scm订单id与订单编号'''\r\n time.sleep(5)\r\n db1.connect()\r\n sql4 = \"select id,order_no from order_data where order_no='\"+orderno+\"'\"\r\n\r\n # 使用cursor()方法获取操作游标\r\n cursor3 = db1.cursor()\r\n # 执行SQL语句\r\n cursor3.execute(sql4)\r\n # 获取所有记录列表\r\n results3 = cursor3.fetchall()\r\n # print(results[0])\r\n # 有多个的情况,取第一个订单的id\r\n global scmorderid,scmorderno\r\n scmorderid = results3[0]['id']\r\n scmorderno=results3[0]['order_no']\r\n print(\"scm订单id:\" + scmorderid)\r\n print(\"scm订单编号:\" + scmorderno)\r\n\r\n # db2.close()\r\n def test_h(self):\r\n # 预约\r\n '''预约'''\r\n url5 = \"http://\" + api_host + \"/ms-fahuobao-order-data/appOrder/appointappoint-distributionOne-choose\"\r\n data5 = {\r\n \"branchUserId\": \"\",\r\n \"cause\": \"\",\r\n \"codeYT\": \"night\",\r\n \"ids\": [scmorderid],\r\n \"timeYT\": str(i.year) + \"-\" + str(i.month) + \"-\" + str(i.day)\r\n }\r\n # print(json.dumps(data5))\r\n request5 = requests.request(\"POST\", url=url5, data=json.dumps(data5), headers=headers2)\r\n print(\"预约:\" + request5.text)\r\n time.sleep(2)\r\n self.assertIn(arg1, request5.text, msg='测试field')\r\n def test_i(self):\r\n # 师傅发起异常\r\n '''师傅发起异常'''\r\n Pictureid='5b4ef81bd423d40001f0195e'\r\n url6 = \"http://\" + api_host + \"/ms-fahuobao-order/fhbAbnormal/saveAbnormal\"\r\n data6 = {\r\n \"trunkPicture\":[\r\n Pictureid\r\n ],\r\n \"isPickUp\":1,\r\n \"abnormalCode\":\"EXE02\",\r\n \"abnormalPicture\":[\r\n Pictureid\r\n ],\r\n \"abnormalMemo\":\"\",\r\n \"abnormalPacks\":\"1\",\r\n \"orderId\":scmorderid\r\n }\r\n\r\n request6 = requests.request(\"POST\", url=url6, data=json.dumps(data6), headers=headers2)\r\n print(\"师傅发起异常:\" + request6.text)\r\n time.sleep(2)\r\n self.assertIn(arg1, request6.text, msg='测试field')\r\n\r\n def test_j(self):\r\n # 通过订单id查出异常id\r\n '''通过订单id查出异常id'''\r\n time.sleep(5)\r\n db.connect()\r\n sql5 = \"SELECT id FROM fhb_order_abnormal WHERE order_id ='\"+orderid+\"' ORDER BY found_date DESC\"\r\n\r\n # 使用cursor()方法获取操作游标\r\n cursor5 = db.cursor()\r\n # 执行SQL语句\r\n cursor5.execute(sql5)\r\n # 获取所有记录列表\r\n results5 = cursor5.fetchall()\r\n # print(results[0])\r\n # 有多个的情况,取第一个订单的id\r\n global yichangid\r\n yichangid = results5[0]['id']\r\n print(\"异常id:\" + yichangid)\r\n def test_k(self):\r\n # 货主给出方案\r\n '''货主给出方案'''\r\n url7 = \"http://\" + api_host + \"/ms-fahuobao-order-abnormal/FhbOrderAbnormal/merProvideScheme\"\r\n data7 = {\r\n \"schemeDesc\":\"231321\",\r\n \"pic\":[\r\n Pictureid\r\n ],\r\n \"id\":yichangid\r\n }\r\n request7 = requests.request(\"POST\", url=url7, data=json.dumps(data7), headers=headers1)\r\n print(\"货主给出方案:\" + request7.text)\r\n time.sleep(2)\r\n self.assertIn(arg1, request7.text, msg='测试field')\r\n def test_l(self):\r\n # 师傅不同意货主方案(发起仲裁)\r\n '''师傅不同意货主方案(发起仲裁)'''\r\n url8 = \"http://\" + api_host + \"/ms-fahuobao-order-abnormal/FhbOrderAbnormal/workerApplyArbitration\"\r\n data8 = {\r\n \"pic\":[\r\n Pictureid\r\n ],\r\n \"schemeDesc\":\"321\",\r\n \"id\":yichangid\r\n }\r\n request8 = requests.request(\"POST\", url=url8, data=json.dumps(data8), headers=headers2)\r\n print(\"师傅不同意货主方案(发起仲裁):\" + request8.text)\r\n time.sleep(2)\r\n self.assertIn(arg1, request8.text, msg='测试field')\r\n def test_m(self):\r\n # 仲裁处理\r\n '''仲裁处理'''\r\n url9 = \"http://\" + api_host + \"/ms-fahuobao-order-abnormal/FhbOrderAbnormal/dealArbitration\"\r\n data9 ={\"schemeDesc\":\"3131\",\"refundAmount\":\"0.00\",\"pic\":[Pictureid],\"refundOrderState\":1,\"id\":yichangid}\r\n request9 = requests.request(\"POST\", url=url9, data=json.dumps(data9), headers=headers3)\r\n print(\"仲裁处理:\" + request9.text)\r\n time.sleep(2)\r\n self.assertIn(arg1, request9.text, msg='测试field')\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"SaaSFlowTest/NewFlowTestCase/testYiChangNormalFlow.py","file_name":"testYiChangNormalFlow.py","file_ext":"py","file_size_in_byte":11772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532253287","text":"# -*- coding: utf-8 -*-\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom framework.algorithm.predictor import Predictor as Predictor_Base\nfrom algorithms.dqn.model import Model\n\n\nclass Predictor(Predictor_Base):\n def __init__(self, with_random=True):\n super().__init__(with_random)\n self.model = Model(with_random)\n self._init_sess()\n self.epsilon_start = 1.0\n self.epsilon_end = 0.01\n self.epsilon_decay_steps = 20000\n self.epsilons = np.linspace(self.epsilon_start,\n self.epsilon_end,\n self.epsilon_decay_steps)\n self.step = 0\n\n def get_value(self, feature):\n return self.sess.run(self.model.value,\n feed_dict={self.feature: [feature]})\n\n def process(self, feature):\n q_value, value = self.sess.run([self.model.q_value, self.model.value], \\\n feed_dict={self.feature: [feature]})\n return self._sample(q_value), value, None\n\n def _sample(self, q_value):\n self.step += 1\n epsilon = self.epsilons[min(self.step,\n self.epsilon_decay_steps - 1)]\n rand_float = float(random.uniform(0, self.epsilon_decay_steps)) / float(self.epsilon_decay_steps)\n if rand_float < epsilon and self.with_random:\n return random.randint(0, self.action_dim - 1)\n else:\n return np.argmax(q_value, axis=1)\n \n def _init_sess(self):\n cpu_num = 1\n config = tf.ConfigProto(device_count={\"CPU\": cpu_num}, inter_op_parallelism_threads=cpu_num, \\\n intra_op_parallelism_threads=cpu_num, log_device_placement=False)\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.feature = tf.placeholder(shape=(None, self.state_dim[0], self.state_dim[1], self.state_dim[2]), \\\n name=\"feature\", dtype=np.uint8)\n self.model.inference(self.feature)\n self.init_saver = tf.train.Saver(tf.global_variables())\n self.sess = tf.Session(graph=self.graph, config=config)\n self.sess.run(tf.global_variables_initializer())\n","sub_path":"demonstrate/algorithms/dqn/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"400267934","text":"import pygame, os\nfrom pygame import image\nfrom .drawable import Drawable\nfrom polybius.managers import FRAMES\n\nclass Animated(Drawable):\n \n def __init__(self, imageName, location):\n \n super().__init__(imageName, location, (0,0))\n \n self._frame = 0\n self._row = 0\n self._animationTimer = 0\n self._framesPerSecond = 5\n self._nFrames = 2\n \n self._animate = True\n \n\n \n def updateAnimation(self, ticks):\n if self._animate:\n self._animationTimer += ticks\n \n if self._animationTimer > 1 / self._framesPerSecond:\n \n self._frame += 1\n self._frame %= self._nFrames\n self._animationTimer -= 1 / self._framesPerSecond\n self._image = FRAMES.getFrame(self._imageName, (self._frame, self._row))\n self._defaultImage = self._image\n \n if self.isFlipped():\n self._image = pygame.transform.flip(self._image, True, False)\n if self.isScaled():\n self.scale(self._scaleValue)\n if self.isRotated():\n angle = self._rotation\n self.setRotation(0)\n self.rotate(angle)\n\n self._mask = pygame.mask.from_surface(self._image)\n \n def startAnimation(self):\n self._animate = True\n \n def stopAnimation(self):\n self._animate = False\n\n def setFramesInRow(self, frames):\n self._nFrames = frames\n\n def setRowOnSpriteSheet(self, row):\n self._row = row\n\n def setCurrentFrame(self, frame):\n self._frame = frame\n\n def setFPS(self, fps):\n self._framesPerSecond = fps\n\n def setAnimationTimer(self, animationTime):\n self._animationTimer = animationTime\n","sub_path":"polybius/graphics/basics/animated.py","file_name":"animated.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"286652441","text":"# Author: Izaak Neutelings (December 2018)\n# /shome/ytakahas/work/Leptoquark/CMSSW_9_4_4/src/PhysicsTools/NanoAODTools/NanoTreeProducer/leptonSF\n# HTT: https://github.com/CMS-HTT/LeptonEfficiencies\n# https://twiki.cern.ch/twiki/bin/view/CMS/MuonReferenceEffs2017\nfrom corrections import modulepath\nfrom ScaleFactorTool import ScaleFactor, ScaleFactorHTT, ScaleFactorEmb\n\n\nclass EmbeddingSFs:\n def __init__(self, year=2017):\n \"\"\"Load histograms from files.\"\"\"\n\n assert year in [\n 2016, 2017, 2018\n ], \"EmbeddingSFs: You must choose a year from: 2016, 2017, or 2018.\"\n if year == 2016:\n pathEmb = modulepath + \"/leptonEfficiencies/kit/inputs/2016/KIT/legacy_16_v1/\"\n pathEmbSel = modulepath + \"/leptonEfficiencies/kit/inputs/2016/KIT/embeddingselection/\"\n self.sftool_id = ScaleFactorEmb(\n pathEmb + \"muon_TP_Data_2016_Fits_ID_pt_eta_bins.root\",\n pathEmb + \"muon_TP_Embedding_2016_Fits_ID_pt_eta_bins.root\",\n 'ID',2016) # MediumID,\n self.sftool_iso = ScaleFactorEmb(\n pathEmb + \"muon_TP_Data_2016_Fits_Iso_pt_eta_bins.root\",\n pathEmb + \"muon_TP_Embedding_2016_Fits_Iso_pt_eta_bins.root\",\n \"Iso\",2016) # isolation\n self.sftool_trig = ScaleFactorEmb(\n pathEmb +\n \"muon_TP_Data_2016_Fits_Trg_pt_eta_bins.root\",\n pathEmb +\n \"muon_TP_Embedding_2016_Fits_Trg_pt_eta_bins.root\",\n 'Trg',2016)\n self.sftool_seltrig = ScaleFactorEmb(\n pathEmbSel + \"embeddingselection_TP_Data_2016_Fits_Trg8_pt_eta_bins.root\",\n pathEmbSel + \"embeddingselection_TP_Data_2016_Fits_Trg17_pt_eta_bins.root\",\n 'selection',2016)\n elif year == 2017:\n pathEmb = modulepath + \"/leptonEfficiencies/kit/inputs/2017/KIT/legacy/\"\n pathEmbSel = modulepath + \"/leptonEfficiencies/kit/inputs/2017/ICSF/2017/\"\n self.sftool_id = ScaleFactorEmb(\n pathEmb + \"muon_TP_Data_2017_Fits_ID_pt_eta_bins.root\",\n pathEmb + \"muon_TP_Embedding_2017_Fits_ID_pt_eta_bins.root\",\n 'ID',2017) # MediumID,\n self.sftool_iso = ScaleFactorEmb(\n pathEmb + \"muon_TP_Data_2017_Fits_Iso_pt_eta_bins.root\",\n pathEmb + \"muon_TP_Embedding_2017_Fits_Iso_pt_eta_bins.root\",\n \"Iso\",2017) # isolation\n self.sftool_trig = ScaleFactorEmb(\n pathEmb +\n \"muon_TP_Data_2017_Fits_Trg_IsoMu27_or_IsoMu24_pt_eta_bins.root\",\n pathEmb +\n \"muon_TP_Embedding_2017_Fits_Trg_IsoMu27_or_IsoMu24_pt_eta_bins.root\",\n 'Trg_IsoMu27_or_IsoMu24',2017)\n self.sftool_seltrig = ScaleFactorEmb(\n pathEmbSel + \"Mu8/muon_SFs.root\",\n pathEmbSel + \"Mu17/muon_SFs.root\",\n 'selection',2017)\n else:\n pathEmb = modulepath + \"/leptonEfficiencies/kit/inputs/2018/KIT/v18_2/\"\n pathEmbSel = modulepath + \"/leptonEfficiencies/kit/inputs/2018/KIT/2018/\"\n self.sftool_id = ScaleFactorEmb(\n pathEmb + \"muon_TP_Data_2018_Fits_ID_pt_eta_bins.root\",\n pathEmb + \"muon_TP_Embedding_2018_Fits_ID_pt_eta_bins.root\",\n 'ID',2018) # MediumID,\n self.sftool_iso = ScaleFactorEmb(\n pathEmb + \"muon_TP_Data_2018_Fits_Iso_pt_eta_bins.root\",\n pathEmb + \"muon_TP_Embedding_2018_Fits_Iso_pt_eta_bins.root\",\n \"Iso\",2018) # isolation\n self.sftool_trig = ScaleFactorEmb(\n pathEmb +\n \"muon_TP_Data_2018_Fits_Trg_IsoMu27_or_IsoMu24_pt_eta_bins.root\",\n pathEmb +\n \"muon_TP_Embedding_2018_Fits_Trg_IsoMu27_or_IsoMu24_pt_eta_bins.root\",\n 'Trg_IsoMu27_or_IsoMu24',2018)\n self.sftool_seltrig = ScaleFactorEmb(\n pathEmbSel + \"Mu8/muon_SFs.root\",\n pathEmbSel + \"Mu17/muon_SFs.root\", 'selection',2018)\n\n def getTriggerSF(self, pt, eta):\n \"\"\"Get SF for single muon trigger.\"\"\"\n return self.sftool_trig.getSF(pt, abs(eta))\n\n def getIsoSF(self, pt, eta):\n \"\"\"Get SF for muon Isolation.\"\"\"\n return self.sftool_trig.getSF(pt, abs(eta))\n\n def getIdSF(self, pt, eta):\n \"\"\"Get SF for muon identification \"\"\"\n return self.sftool_id.getSF(pt, abs(eta))\n\n def getEmbSelSF(self, pt_1, eta_1, pt_2, eta_2):\n \"\"\" Get SF for Embeddeded DoubleMuon Selection \"\"\"\n return self.sftool_seltrig.getSelectionSF(pt_1, abs(eta_1), pt_2,\n abs(eta_2))\n","sub_path":"corrections/EmbeddingSFs.py","file_name":"EmbeddingSFs.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131559001","text":"\n\nfrom collections import deque\nfrom collections import OrderedDict\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom tools.base import BasePolicyOptimizer\n\nfrom IPython import embed\n\n\n# model\ndef normalized_columns_initializer(weights, std=1.0):\n out = torch.randn(weights.size())\n out *= std / torch.sqrt(out.pow(2).sum(1, keepdim=True))\n return out\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n weight_shape = list(m.weight.data.size())\n fan_in = np.prod(weight_shape[1:4])\n fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n elif classname.find('Linear') != -1:\n weight_shape = list(m.weight.data.size())\n fan_in = weight_shape[1]\n fan_out = weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n\n\nclass Mode(object):\n NORMAL = 0\n REWARD_PREDICTION = 1\n PIXEL_CONTROL = 2\n\n\nclass Model(torch.nn.Module):\n\n def __init__(self, args, env, n_chnnnels, n_states, n_actions):\n super(Model, self).__init__()\n self.eval_interval = args.eval_interval\n self.n_actions = env.n_actions\n self.n_outputs = env.n_outputs\n self.n_lstm_cells = 256\n\n # visual embedding\n self.conv1 = nn.Conv2d(env.ob_channels, 16, 6, stride=3, padding=0)\n # self.pool1 = nn.MaxPool2d(2)\n self.conv2 = nn.Conv2d(16, 32, 6, stride=3, padding=0)\n # self.pool2 = nn.MaxPool2d(2)\n # self.conv2_bn = nn.BatchNorm2d(16)\n self.conv3 = nn.Conv2d(32, 32, 3, stride=1, padding=0)\n # self.conv3_bn = nn.BatchNorm2d(32)\n self.conv4 = nn.Conv2d(32, 32, 3, stride=1, padding=0)\n # self.conv4_bn = nn.BatchNorm2d(32)\n # self.conv5 = nn.Conv2d(32, 32, 3, stride=1, padding=0)\n # self.conv6 = nn.Conv2d(32, 32, 3, stride=1, padding=0)\n # self.fc1 = nn.Linear(288, self.n_lstm_cells)\n self.fc1 = nn.Linear(512, self.n_lstm_cells)\n\n # recurrent processing\n self.lstm = nn.LSTMCell(self.n_lstm_cells + n_states + env.n_keys + 1,\n self.n_lstm_cells)\n\n # policy and baseline\n # num_outputs = n_actions\n self.critic = nn.Linear(self.n_lstm_cells, 1)\n # self.actor = nn.Linear(self.n_lstm_cells, n_actions)\n for out_idx in range(len(env.n_outputs)):\n setattr(self, 'actor{}'.format(out_idx),\n nn.Linear(self.n_lstm_cells, env.n_outputs[out_idx]))\n\n # reward prediction\n self.rp_fc1 = nn.Linear(self.n_lstm_cells * 3 + n_states, 128)\n self.reward = nn.Linear(128, 3) # positive, negative, neutral\n\n self.apply(weights_init)\n\n self.fc1.weight.data = normalized_columns_initializer(\n self.fc1.weight.data, 1.0)\n self.fc1.bias.data.fill_(0)\n\n self.lstm.weight_ih.data = normalized_columns_initializer(\n self.lstm.weight_ih.data, 1.0)\n self.lstm.bias_ih.data.fill_(0)\n self.lstm.weight_hh.data = normalized_columns_initializer(\n self.lstm.weight_hh.data, 1.0)\n self.lstm.bias_hh.data.fill_(0)\n\n self.critic.weight.data = normalized_columns_initializer(\n self.critic.weight.data, 1.0)\n self.critic.bias.data.fill_(0)\n\n for out_idx in range(len(env.n_outputs)):\n actor = getattr(self, 'actor{}'.format(out_idx))\n actor.weight.data = normalized_columns_initializer(\n actor.weight.data, 0.1)\n actor.bias.data.fill_(0)\n\n # self.actor.weight.data = normalized_columns_initializer(\n # self.actor.weight.data, 0.1)\n # self.actor.bias.data.fill_(0)\n\n self.rp_fc1.weight.data = normalized_columns_initializer(\n self.rp_fc1.weight.data, 1.0)\n self.rp_fc1.bias.data.fill_(0)\n\n self.reward.weight.data = normalized_columns_initializer(\n self.reward.weight.data, 1.0)\n self.reward.bias.data.fill_(0)\n self.train()\n\n # act 메소드\n # self._prev_action = torch.zeros(1, env.n_keys)\n # self._prev_r = torch.zeros(1, 1)\n\n # act buffer\n self.cx = np.zeros((1, self.n_lstm_cells))\n self.hx = np.zeros((1, self.n_lstm_cells))\n self.last_action = 0\n self.prev_a = np.zeros((1, env.n_keys))\n self.prev_r = np.zeros((1, 1))\n # self.value = None\n # self.probs = None\n self.state_value = None\n self.action_values = None\n\n def visual_embedding(self, obs):\n x = F.relu(self.conv1(obs))\n # x = self.pool1(x)\n\n # x = F.relu(self.conv2_bn(self.conv2(x)))\n # x = F.relu(self.conv3_bn(self.conv3(x)))\n # x = F.relu(self.conv4_bn(self.conv4(x)))\n\n x = F.relu(self.conv2(x))\n # x = self.pool2(x)\n\n x_ = self.conv3(x) + x[:, :, 1:-1, 1:-1]\n x = F.relu(x_)\n\n x = self.conv4(x) + x_[:, :, 1:-1, 1:-1]\n x = F.relu(x)\n # x = F.relu(self.conv5(x) + x[:, :, 1:-1, 1:-1])\n # x = F.relu(self.conv6(x) + x[:, :, 1:-1, 1:-1])\n\n x = x.view(x.size(0), -1)\n return self.fc1(x)\n\n def forward(self, inputs, mode=Mode.NORMAL):\n if mode == Mode.NORMAL:\n obs, state, (hx, cx), pa, pr = inputs\n x = self.visual_embedding(obs)\n x = torch.cat((x, state, pa, pr), dim=1)\n hx, cx = self.lstm(x, (hx, cx))\n x = hx\n\n value = self.critic(x)\n probs = list()\n for out_idx in range(len(self.n_outputs)):\n actor = getattr(self, 'actor{}'.format(out_idx))\n logit = actor(x)\n probs.append(F.softmax(logit, dim=1))\n probs = torch.einsum('bi,bj->bij', probs)\n probs = probs.reshape(-1, self.n_actions)\n log_probs = torch.log(probs)\n\n return value, probs, log_probs, (hx, cx)\n\n elif mode == Mode.REWARD_PREDICTION:\n ob1, ob2, ob3, state = inputs\n x1 = self.visual_embedding(ob1)\n x2 = self.visual_embedding(ob2)\n x3 = self.visual_embedding(ob3)\n x = torch.cat((x1, x2, x3, state), dim=1)\n x = F.relu(self.rp_fc1(x))\n return self.reward(x)\n\n elif mode == Mode.PIXEL_CONTROL:\n pass\n\n else:\n raise NotImplementedError\n\n def reset_buffer(self):\n self.cx.fill(0)\n self.hx.fill(0)\n self.prev_a.fill(0)\n self.prev_r.fill(0)\n\n def act(self, device, args, inputs, eval_game):\n with torch.no_grad():\n # obs, state, (hx, cx), pa, pr = inputs\n obs, state = inputs\n hx, cx = self.hx, self.cx\n pa, pr = self.prev_a, self.prev_r\n\n obs = torch.from_numpy(obs).unsqueeze(0).to(torch.float).to(device)\n state = torch.from_numpy(state).unsqueeze(\n 0).to(torch.float).to(device)\n hx = torch.from_numpy(hx).to(torch.float).to(device)\n cx = torch.from_numpy(cx).to(torch.float).to(device)\n pa = torch.from_numpy(pa).to(torch.float).to(device)\n pr = torch.from_numpy(pr).to(torch.float).to(device)\n inputs_pt = obs, state, (hx, cx), pa, pr\n\n value, prob, log_prob, (hx, cx) = self(inputs_pt, mode=Mode.NORMAL)\n prob_np = prob.cpu().numpy()\n log_prob_ = log_prob.cpu().numpy()\n self.hx = hx.cpu().numpy()[:]\n self.cx = cx.cpu().numpy()[:]\n\n action = np.random.choice(prob.shape[1], p=prob_np.squeeze(0))\n self.last_action = action\n action_log_prob = np.take(log_prob_, [1], axis=1)\n\n self.state_value = value.cpu().mean().item()\n self.action_values = prob.cpu().numpy()\n\n return action, action_log_prob\n\n def buffer_step(self, env):\n self.prev_a[:] = env.actions[self.last_action]\n self.prev_r[:] = env.reward\n\n def to_params(self):\n return [p.data.cpu().numpy() for p in self.parameters()]\n\n def from_solution(self, device, solution):\n for param, weight in zip(self.parameters(), solution.params):\n param.data = torch.from_numpy(weight).to(device)\n\n def save(self):\n pass\n\n def load(self):\n pass\n\n def is_eval(self, n_games):\n return n_games % self.eval_interval == 0\n\n\n# optimizer\nclass MetaOptimizer(BasePolicyOptimizer):\n\n @staticmethod\n def seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n\n @staticmethod\n def make_model(device, args, env):\n n_channels = env.ob_channels\n n_states = env.state_dims\n n_actions = len(env.actions)\n model = Model(args, env, n_channels, n_states, n_actions)\n model.eval_interval = 1\n return model.to(device)\n\n def __init__(self, rank, device, args, env, model, mailbox):\n super(Impala, self).__init__(rank, device, args, env, model, mailbox)\n\n self.max_steps = args.max_steps\n\n height = args.ob_height\n width = args.ob_width\n len_lstm = model.n_lstm_cells\n self.n_batches = args.n_batches\n self.n_steps = args.n_steps\n\n self.episodes = [[] for _ in range(args.n_batches)]\n self.observations = torch.zeros(\n args.n_steps + 1, args.n_batches, env.ob_channels, height, width).to(device)\n self.states = torch.zeros(\n args.n_steps + 1, args.n_batches, env.state_dims).to(device)\n self.actions = torch.LongTensor(\n args.n_steps, args.n_batches).to(device)\n self.action_log_probs_mu = torch.zeros(\n args.n_steps, args.n_batches).to(device)\n\n self.hx = torch.zeros(\n args.n_steps + 1, args.n_batches, len_lstm).to(device)\n self.cx = torch.zeros(\n args.n_steps + 1, args.n_batches, len_lstm).to(device)\n self.prev_actions = torch.zeros(\n args.n_steps + 1, args.n_batches, env.n_keys).to(device)\n self.prev_rewards = torch.zeros(\n args.n_steps + 1, args.n_batches, 1).to(device)\n self.rewards = torch.zeros(args.n_steps, args.n_batches, 1).to(device)\n self.masks = torch.zeros(args.n_steps, args.n_batches, 1).to(device)\n\n self.optimizer = self.update_optimizer(args, model)\n self._loss_variable = 0\n self.loss_dict = OrderedDict(\n loss=0, value_loss=0, policy_loss=0, entropy_loss=0)\n self.value_dict = OrderedDict(value=0)\n\n # aux. tasks\n self.aux_tasks = list()\n if UnrealRewardPredction.get_coef(args) > 0.0:\n self.aux_tasks.append(UnrealRewardPredction(\n rank, device, args, env, model, mailbox))\n\n def update_optimizer(self, args, model):\n\n if args.optimizer == 'sgd':\n self.optimizer = optim.SGD(\n model.parameters(), lr=args.lr, momentum=args.momentum)\n elif args.optimizer == 'rmsprop':\n self.optimizer = optim.RMSprop(\n model.parameters(), lr=args.lr, momentum=args.momentum)\n elif args.optimizer == 'adam':\n self.optimizer = optim.Adam(model.parameters(), lr=args.lr)\n else:\n raise NotImplementedError\n\n return self.optimizer\n\n def ready(self):\n if min([len(self.episodes[env_id]) for env_id in range(self.n_batches)]) > self.n_steps:\n return True\n else:\n return False\n\n def put(self, episode):\n env_id = np.argmin([len(self.episodes[env_id])\n for env_id in range(self.n_batches)])\n # self.episodes[env_id] += episode[:]\n self.episodes[env_id] += episode\n\n for aux_task in self.aux_tasks:\n aux_task.put(episode)\n\n def step(self, device, args, env, model):\n height = args.ob_height\n width = args.ob_width\n len_lstm = model.n_lstm_cells\n self.n_batches = args.n_batches\n self.n_steps = args.n_steps\n\n # Section: Value, Policy Func\n reward_list = deque(maxlen=1000)\n for env_id in range(self.n_batches):\n for step in range(self.n_steps):\n # ob1, s1, hx1, cx1, action, a_u, reward, ob2, s2, info2, done = self.episodes[env_id].pop(0)\n sample = self.episodes[env_id].pop(0)\n ob1 = sample.get('ob1')\n s1 = sample.get('state1')\n hx1 = sample.get('hx')\n cx1 = sample.get('cx')\n action = sample.get('action')\n a_u = sample.get('action_log_prob')\n reward = sample.get('reward')\n ob2 = sample.get('ob2')\n s2 = sample.get('state2')\n info2 = sample.get('info')\n done = sample.get('done')\n\n ob1 = torch.from_numpy(ob1).to(device)\n s1 = torch.from_numpy(s1).to(device)\n hx1 = torch.from_numpy(hx1).to(device)\n cx1 = torch.from_numpy(cx1).to(device)\n reward_list.append(reward)\n\n self.observations[step, env_id] = ob1\n self.states[step, env_id] = s1\n self.actions[step, env_id] = action\n self.action_log_probs_mu[step, env_id] = torch.tensor(a_u)\n self.rewards[step, env_id] = reward\n self.masks[step, env_id] = 0.0 if done else 1.0\n self.hx[step + 1, env_id] = hx1 * self.masks[step, env_id]\n self.cx[step + 1, env_id] = cx1 * self.masks[step, env_id]\n self.prev_actions[step + 1, env_id,\n :] = torch.tensor(env.actions[action]).to(torch.float)\n self.prev_actions[step + 1, env_id,\n :] *= self.masks[step, env_id]\n self.prev_rewards[step + 1, env_id] = reward * \\\n self.masks[step, env_id]\n\n ob2 = torch.from_numpy(ob2).to(device)\n s2 = torch.from_numpy(s2).to(device)\n self.observations[self.n_steps, env_id] = ob2\n self.states[self.n_steps, env_id] = s2\n\n flat_observations = self.observations.reshape(\n -1, env.ob_channels, height, width)\n flat_states = self.states.reshape(-1, env.state_dims)\n flat_hx = self.hx.reshape(-1, len_lstm)\n flat_cx = self.cx.reshape(-1, len_lstm)\n flat_prev_actions = self.prev_actions.reshape(-1, env.n_keys)\n flat_prev_r = self.prev_rewards.reshape(-1, 1)\n values, probs, log_probs, _ = model(\n (flat_observations, flat_states, (flat_hx, flat_cx), flat_prev_actions, flat_prev_r))\n values = values.view(self.n_steps + 1, self.n_batches, -1)\n # probs = F.softmax(logits, dim=1).view(args.n_steps + 1, args.n_batches, -1)\n # log_probs = F.log_softmax(logits, dim=1).view(args.n_steps + 1, args.n_batches, -1)\n probs = probs.view(self.n_steps + 1, self.n_batches, -1)\n log_probs = log_probs.view(self.n_steps + 1, self.n_batches, -1)\n action_log_probs = log_probs[:-1].gather(2, self.actions.unsqueeze(2))\n\n # v-trace\n entropies = -(log_probs * probs).sum(-1).unsqueeze(2)\n value_loss = torch.zeros(self.n_batches, 1).to(device)\n policy_loss = torch.zeros(self.n_batches, 1).to(device)\n gae = torch.zeros(self.n_batches, 1).to(device)\n returns = torch.zeros(self.n_batches, 1).to(device)\n returns.copy_(values[-1].data)\n rhos = torch.exp(action_log_probs.data -\n self.action_log_probs_mu.unsqueeze(2)).data.clamp(max=1.0)\n cs = torch.ones(self.n_batches, 1).to(device)\n\n for step in reversed(range(self.n_steps)):\n delta = rhos[step] * (self.rewards[step] + args.gamma *\n values[step + 1].data - values[step].data)\n cs = (cs * rhos[step]).clamp(max=1.0)\n returns = values[step].data + delta + args.lmbd * args.gamma * \\\n cs * (returns - values[step + 1].data) * self.masks[step - 1]\n # returns = values[step].data + delta + args.lmbd * args.gamma * cs * (returns - values[step + 1].data) * self.masks[step]\n value_loss = value_loss + 0.5 * \\\n (values[step] - returns.data).pow(2)\n # value_loss = value_loss + 0.5 * (self.rewards[step] + args.gamma * values[step + 1].data - values[step]).pow(2)\n if args.no_gae:\n advantages = returns - values[step].data\n policy_loss = policy_loss - \\\n action_log_probs[step] * advantages - \\\n args.ent_coef * entropies[step].data\n else:\n gae = args.gamma * args.tau * gae + delta\n policy_loss = policy_loss - \\\n action_log_probs[step] * gae - \\\n args.ent_coef * entropies[step].data\n\n self.value_dict['value'] = values.mean().item()\n\n value_loss = value_loss.mean()\n policy_loss = policy_loss.mean()\n entropy_loss = entropies.mean()\n self._loss_variable = args.value_coef * \\\n value_loss + args.policy_coef * policy_loss\n self.loss_dict['value_loss'] = value_loss.item()\n self.loss_dict['policy_loss'] = policy_loss.item()\n self.loss_dict['entropy_loss'] = entropy_loss.item()\n\n loss_dict = dict(\n total_loss=self._loss_variable.item(),\n value_loss=args.value_coef * value_loss.item(),\n policy_loss=args.policy_coef * policy_loss.item(),\n entropy_loss=args.ent_coef * entropy_loss.item())\n\n for aux_task in self.aux_tasks:\n if aux_task.ready():\n name = aux_task.loss_name\n coef = aux_task.get_coef(args)\n aux_task_loss = aux_task.step(device, args, env, model)\n aux_task_loss = aux_task_loss.mean()\n self._loss_variable += coef * aux_task_loss\n self.loss_dict[name] = aux_task_loss.item()\n loss_dict[name] = coef * aux_task_loss.item()\n\n self.loss_dict['total_loss'] = self._loss_variable.item()\n\n grad_dict = self._optimize(args, model)\n\n # self.hx[0].copy_(self.hx[-1])\n # self.cx[0].copy_(self.cx[-1])\n\n self.hx[0] = self.hx[-1]\n self.cx[0] = self.cx[-1]\n\n return loss_dict, grad_dict\n\n def _optimize(self, args, model):\n\n self.optimizer.zero_grad()\n self._loss_variable.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n grad_dict = OrderedDict()\n for name, param in model.named_parameters():\n if param.grad is not None:\n grad_dict[name] = param.grad.data.mean().item()\n else:\n grad_dict[name] = 0.0\n self.optimizer.step()\n\n return grad_dict\n\n @property\n def buffer_size(self):\n buffer_size = OrderedDict()\n\n for bid, episode in enumerate(self.episodes):\n buffer_size[f'b{bid:03d}'] = len(episode)\n\n for aux_task in self.aux_tasks:\n buffer_size.update(aux_task.buffer_size)\n\n return buffer_size\n\n\nclass UnrealRewardPredction(BasePolicyOptimizer):\n\n loss_name = 'reward_pred_loss'\n\n def __init__(self, rank, device, args, env, model, mailbox):\n super(UnrealRewardPredction, self).__init__(\n rank, device, args, env, model, mailbox)\n self.device = device\n self.n_batches = args.n_batches\n self.base_reward = env.base_reward\n self.zero_reward_memory = deque(maxlen=args.replay_memory_size)\n self.nonzero_reward_memory = deque(maxlen=args.replay_memory_size)\n self._new_data_added = False\n\n @staticmethod\n def get_coef(args):\n return getattr(args, 'reward_pred_coef', 0.0)\n\n def put(self, episode):\n buff = deque(maxlen=4)\n pa, pr = None, 0\n\n for sample in episode:\n ob1 = sample.get('ob1')\n s1 = sample.get('state1')\n action = sample.get('action')\n reward = sample.get('reward')\n ob2 = sample.get('ob2')\n\n buff.append(ob1)\n if len(buff) == 4:\n ob1 = np.expand_dims(buff[0], 0)\n ob2 = np.expand_dims(buff[1], 0)\n ob3 = np.expand_dims(buff[2], 0)\n if reward == self.base_reward:\n self.zero_reward_memory.append([ob1, ob2, ob3, s1, reward])\n else:\n self.nonzero_reward_memory.append(\n [ob1, ob2, ob3, s1, reward])\n pa, pr = action, reward\n\n self._new_data_added = True # 새로운 데이터 추가: ready() -> True\n\n def ready(self):\n if self._new_data_added:\n # 데이터가 새로 추가되어야만 학습할 수 있음\n if len(self.zero_reward_memory) > self.n_batches // 2 and \\\n len(self.nonzero_reward_memory) > self.n_batches // 2:\n return True\n return False\n\n def step(self, device, args, env, model):\n\n zero_samples = random.sample(\n self.zero_reward_memory, self.n_batches // 2)\n nonzero_samples = random.sample(\n self.nonzero_reward_memory, self.n_batches // 2)\n samples = zero_samples + nonzero_samples\n\n ob1, ob2, ob3, st, rw = zip(*samples)\n ob1 = torch.from_numpy(np.vstack(ob1)).to(self.device)\n ob2 = torch.from_numpy(np.vstack(ob2)).to(self.device)\n ob3 = torch.from_numpy(np.vstack(ob3)).to(self.device)\n st = torch.from_numpy(np.vstack(st)).to(self.device)\n rwc = torch.zeros(len(rw), 3).to(self.device)\n for idx, r in enumerate(rw):\n if r > self.base_reward:\n rwc[idx, 0] = 1\n elif r < self.base_reward:\n rwc[idx, 1] = 1\n else:\n rwc[idx, 2] = 1\n\n logit = model((ob1, ob2, ob3, st), mode=Mode.REWARD_PREDICTION)\n log_prob = F.log_softmax(logit, dim=1)\n loss = -(rwc * log_prob).mean()\n\n self._new_data_added = False # 새로운 데이터 추가 필요: ready() -> False\n return loss\n\n def optimize(self):\n pass\n\n @property\n def buffer_size(self):\n buffer_size = OrderedDict()\n value = 100 * len(self.zero_reward_memory) / \\\n self.zero_reward_memory.maxlen\n buffer_size['zero'] = value\n value = 100 * len(self.nonzero_reward_memory) / \\\n self.nonzero_reward_memory.maxlen\n buffer_size['non-zero'] = value\n return buffer_size\n","sub_path":"toolbox/policy_optimizer/meta_optimizer.py","file_name":"meta_optimizer.py","file_ext":"py","file_size_in_byte":22945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"227148223","text":"#!/usr/bin/env python\n\nimport logging\nimport pathlib\nimport platform\nimport shutil\nimport shlex\nimport subprocess\nimport warnings\n\n\ndef get_script_path() -> pathlib.Path:\n return pathlib.Path.cwd() / __file__\n\n\ndef eval_os_cmd(cmd: str) -> (int, str):\n proc = subprocess.Popen(shlex.split(cmd),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n\n if proc.returncode:\n return proc.returncode, f'Evaluation of {cmd} raised the error {stderr}'\n else:\n return proc.returncode, stdout\n\n\ndef install_brew():\n if platform.system() == 'Darwin':\n if shutil.which('brew') is None:\n eval_os_cmd(\n '/bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)\"'\n )\n else:\n raise ValueError(f'{platform.system()} is not supported')\n\n\ndef install_git():\n if platform.system() == 'Darwin':\n if shutil.which('git') is None:\n eval_os_cmd('brew install git')\n elif platform.system() == 'Linux':\n if shutil.which('git') is None:\n eval_os_cmd('sudo apt install -y git')\n else:\n raise ValueError(f'{platform.system()} is not supported')\n\n\ndef install_curl():\n if platform.system() == 'Darwin':\n if shutil.which('curl') is None:\n eval_os_cmd('brew install curl')\n elif platform.system() == 'Linux':\n if shutil.which('curl') is None:\n eval_os_cmd('sudo apt install -y curl')\n else:\n raise ValueError(f'{platform.system()} is not supported')\n\n\ndef copy_tmux_config(dot_folder: pathlib.Path):\n logging.debug('Installing tmux config ...')\n tmux_conf_src = dot_folder / 'tmux.conf'\n tmux_conf_dst = pathlib.Path.home() / '.tmux.conf'\n\n if tmux_conf_dst.exists():\n if tmux_conf_dst.is_symlink() and tmux_conf_dst.resolve() == tmux_conf_src:\n logging.info(f'{tmux_conf_dst} is alread symlink to the {tmux_conf_src}')\n else:\n raise ValueError(f'{tmux_conf_dst} exists.')\n else:\n tmux_conf_dst.symlink_to(tmux_conf_src)\n\n\ndef copy_neovim_config(dot_folder: pathlib.Path):\n logging.debug('Installing neovim config ...')\n\n neovim_conf_src = dot_folder / 'nvim'\n neovim_conf_dst = pathlib.Path.home() / '.config/nvim'\n\n if neovim_conf_dst.exists():\n if neovim_conf_dst.is_symlink() and neovim_conf_dst.resolve(\n ) == neovim_conf_src:\n logging.info(\n f'{neovim_conf_dst} is alread symlink to the {neovim_conf_src}')\n else:\n raise ValueError(f'{neovim_conf_dst} exists.')\n else:\n neovim_conf_dst.symlink_to(neovim_conf_src)\n\n\ndef copy_kitty_config(dot_folder: pathlib.Path):\n logging.debug(\"Installing kitty config ...\")\n if platform.system() == 'Linux':\n\n kitty_conf_src = dot_folder / 'kitty'\n kitty_conf_dst = pathlib.Path.home() / '.config/kitty'\n\n if kitty_conf_dst.exists():\n if kitty_conf_dst.is_symlink() and kitty_conf_dst.resolve(\n ) == kitty_conf_src:\n logging.info(\n f'{kitty_conf_dst} is alread symlink to the {kitty_conf_src}')\n else:\n raise ValueError(f'{kitty_conf_dst} exists.')\n else:\n kitty_conf_dst.symlink_to(kitty_conf_src)\n else:\n logging.info(f'Platform {platform.platform()} is not supported')\n\n\ndef copy_zsh_config(dot_folder: pathlib.Path):\n logging.debug(\"Installing zsh config ...\")\n zsh_conf_src = dot_folder / 'zshrc'\n zsh_conf_dst = pathlib.Path.home() / '.zshrc'\n\n if zsh_conf_dst.exists():\n if zsh_conf_dst.is_symlink() and zsh_conf_dst.resolve() == zsh_conf_src:\n logging.info(f'{zsh_conf_dst} is alread symlink to the {zsh_conf_src}')\n else:\n raise ValueError(f'{zsh_conf_dst} exists.')\n else:\n zsh_conf_dst.symlink_to(zsh_conf_src)\n\n\ndef install_fira_code():\n logging.debug(\"Installing fira code...\")\n if platform.system() == 'Linux':\n rcode, msg = eval_os_cmd(\"apt list --installed fonts-firacode\")\n if not rcode and len(msg.decode('utf8').rstrip().split('\\n')) == 1:\n rcode, msg = eval_os_cmd(\"sudo apt install -y fonts-firacode\")\n if rcode:\n logging.critical(msg)\n elif platform.system() == 'Darwin':\n install_brew()\n rcode, _ = eval_os_cmd(\"brew list --cask font-fira-code\")\n if rcode:\n warnings.warn(f\"list firacode returns {rcode}\")\n eval_os_cmd(\n \"brew tap homebrew/cask-fonts && brew cask install font-fira-code\")\n\n\ndef install_dircolors():\n logging.debug(\"Installing dircolors...\")\n if platform.system() == 'Darwin':\n rcode, _ = eval_os_cmd(\"brew list coreutils\")\n if rcode:\n rcode, msg = eval_os_cmd(\"brew install coreutils\")\n if rcode:\n logging.critical(msg)\n elif platform.system() == 'Linux':\n logging.info(\"Linux has dircolors by default\")\n else:\n raise ValueError(f'{platform.system()} platform is not supported yet')\n\n\ndef install_zsh():\n logging.debug(\"Installing zsh...\")\n if platform.system() == 'Linux':\n rcode, msg = eval_os_cmd(\"apt list --installed zsh\")\n if not rcode and len(msg.decode('utf8').rstrip().split('\\n')) == 1:\n rcode, msg = eval_os_cmd(\"sudo apt install -y zsh\")\n if rcode:\n logging.critical(msg)\n rcode, msg = eval_os_cmd(\"chsh -s $(which zsh) dbihbka\")\n if rcode:\n logging.critical(msg)\n elif platform.system() == 'Darwin':\n logging.info(\"MacOS X uses zsh by default\")\n else:\n raise ValueError(f'{platform.system()} platform is not supported yet')\n\n\ndef install_kitty():\n logging.debug(\"Installing kitty...\")\n if platform.system() == 'Linux':\n rcode, msg = eval_os_cmd(\"apt list --installed kitty\")\n if not rcode and len(msg.decode('utf8').rstrip().split('\\n')) == 1:\n rcode, msg = eval_os_cmd(\"sudo apt install -y kitty\")\n if rcode:\n logging.critical(msg)\n elif platform.system() == 'Darwin':\n logging.info(\"with use iTerm2 in MacOS X usually\")\n else:\n raise ValueError(f'{platform.system()} platform is not supported yet')\n\n\ndef install_vim_plug():\n logging.info(\"Install vim plug...\")\n config_folder = pathlib.Path.home() / \".local/share\"\n logging.critical(f'curl -fLo {config_folder}/nvim/site/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim')\n\n\ndef install_node():\n logging.info(\"Installing node...\")\n if shutil.which('node') is None:\n rcode, msg = eval_os_cmd('curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash - && sudo apt-get install -y nodejs')\n if rcode:\n logging.critical(msg)\n\ndef install_neovim():\n if platform.system() == 'Darwin':\n if shutil.which('nvim') is None:\n eval_os_cmd('brew install neovim')\n elif platform.system() == 'Linux':\n if shutil.which('nvim') is None:\n eval_os_cmd('sudo apt install -y neovim')\n else:\n raise ValueError(f'{platform.system()} is not supported')\n\ndef install_tmux():\n if platform.system() == 'Darwin':\n if shutil.which('tmux') is None:\n eval_os_cmd('brew install tmux')\n elif platform.system() == 'Linux':\n if shutil.which('tmux') is None:\n eval_os_cmd('sudo apt install -y tmux')\n else:\n raise ValueError(f'{platform.system()} is not supported')\n\n\n\ndef install_zsh_plugins():\n install_git()\n zsh_folder = pathlib.Path.home() / \".zsh\"\n if not zsh_folder.exists():\n zsh_folder.mkdir()\n rcode, msg = eval_os_cmd(\n f\"git clone https://github.com/zsh-users/zsh-autosuggestions {zsh_folder}/zsh-autosuggestions\"\n )\n if rcode:\n warnings.warn(msg)\n\n rcode, msg = eval_os_cmd(\n f\"git clone --recursive https://github.com/joel-porquet/zsh-dircolors-solarized {zsh_folder}/zsh-dircolors-solarized\"\n )\n if rcode:\n logging.critical(msg)\n\n\ndef install_starship():\n logging.debug(\"Installing starship...\")\n if platform.system() == 'Darwin':\n rcode, _ = eval_os_cmd('brew list starship')\n if rcode:\n eval_os_cmd('brew install starship')\n elif platform.system() == 'Linux':\n if shutil.which('starship') is None:\n rcode, msg = eval_os_cmd(\n 'curl -fsSL https://starship.rs/install.sh | bash')\n if rcode:\n raise RuntimeError(msg)\n else:\n raise ValueError(f'{platform.system()} platform isnot supported yet')\n\n\ndef install_exa():\n logging.debug('Installing exa ...')\n if platform.system() == 'Darwin':\n install_brew()\n rcode, _ = eval_os_cmd('brew list exa')\n if rcode:\n rcode, msg = eval_os_cmd('brew install exa')\n if rcode:\n logging.warn(msg)\n elif platform.system() == 'Linux':\n rcode, msg = eval_os_cmd(\"apt list --installed exa\")\n if not rcode and len(msg.decode('utf8').rstrip().split('\\n')) == 1:\n rcode, msg = eval_os_cmd('sudo apt install -y exa')\n if rcode:\n logging.critical(msg)\n\n\ndef install_fd():\n logging.debug('Installing fd ...')\n if platform.system() == 'Darwin':\n install_brew()\n rcode, _ = eval_os_cmd('brew list fd')\n if rcode:\n rcode, msg = eval_os_cmd('brew install fd')\n if rcode:\n logging.warn(msg)\n elif platform.system() == 'Linux':\n rcode, msg = eval_os_cmd(\"apt list --installed fd-find\")\n if not rcode and len(msg.decode('utf8').rstrip().split('\\n')) == 1:\n rcode, msg = eval_os_cmd(\"sudo apt install -y fd-find\")\n if rcode:\n logging.critical(msg)\n\n\ndef install_bat():\n logging.debug('Installing bat ...')\n if platform.system() == 'Darwin':\n install_brew()\n rcode, _ = eval_os_cmd('brew list bat')\n if rcode:\n rcode, msg = eval_os_cmd('brew install bat')\n if rcode:\n logging.warn(msg)\n elif platform.system() == 'Linux':\n rcode, msg = eval_os_cmd(\"apt list --installed bat\")\n if not rcode and len(msg.decode('utf8').rstrip().split('\\n')) == 1:\n rcode, msg = eval_os_cmd('sudo apt install -y bat')\n if rcode:\n logging.critical(msg)\n\n\ndef generate_alaises(dot_folder):\n logging.debug(\"Generating aliases ...\")\n alias_conf_src = dot_folder / 'aliases'\n alias_conf_dst = pathlib.Path.home() / '.aliases'\n\n with open(alias_conf_src, 'w') as src:\n if platform.system() == 'Darwin':\n dircolors_cmd = 'dircolors=gdircolors'\n cat_cmd = 'cat=bat'\n elif platform.system() == 'Linux':\n dircolors_cmd = 'dircolors=dircolors'\n cat_cmd = 'cat=batcat'\n ls_cmd = 'ls=exa'\n find_cmd = 'find=fdfind'\n src.write('\\n'.join(\n [f'alias {cmd}' for cmd in [ls_cmd, dircolors_cmd, find_cmd, cat_cmd]]))\n\n if alias_conf_dst.exists():\n if alias_conf_dst.is_symlink() and alias_conf_dst.resolve(\n ) == alias_conf_src:\n logging.info(\n f'{alias_conf_dst} is alread symlink to the {alias_conf_src}')\n else:\n raise ValueError(f'{alias_conf_dst} exists.')\n else:\n alias_conf_dst.symlink_to(alias_conf_src)\n\n\ndef main():\n dot_folder = get_script_path().parent\n\n install_git()\n install_curl()\n install_fira_code()\n install_zsh()\n install_zsh_plugins()\n install_starship()\n install_exa()\n install_dircolors()\n install_fd()\n install_bat()\n install_kitty()\n install_node()\n install_vim_plug()\n install_neovim()\n install_tmux()\n\n generate_alaises(dot_folder)\n\n copy_zsh_config(dot_folder)\n copy_tmux_config(dot_folder)\n copy_neovim_config(dot_folder)\n\n if platform.system() == 'Linux':\n copy_kitty_config(dot_folder)\n\n\nif __name__ == \"__main__\":\n import sys\n logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":11270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"356987545","text":"#!/usr/bin/env micropython\n\n\nfrom ev3dev2.sensor.lego import InfraredSensor\nfrom ev3dev2.sensor import INPUT_4\nfrom ev3dev2.sound import Sound\n\n\nIR_SENSOR = InfraredSensor(address=INPUT_4)\nSPEAKER = Sound()\n\n\ndef detect_object(\n distance: float = 30,\n sound_to_play: str = 'Hi'):\n if IR_SENSOR.proximity <= distance:\n SPEAKER.play_file(\n wav_file='/home/robot/sound/{}.wav'.format(sound_to_play),\n volume=100,\n play_type=Sound.PLAY_WAIT_FOR_COMPLETE)\n\n \nwhile True:\n detect_object(\n distance=30,\n sound_to_play='Magic wand')\n","sub_path":"Computing-Platforms/EV3/Education-Edition/Lessons/Animal-Rescue/1-Explore-Idea-1/4a-Make-Your-Own-with-IR-Sensor.EV3Dev2.py","file_name":"4a-Make-Your-Own-with-IR-Sensor.EV3Dev2.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"540776651","text":"\"\"\"\nTrain a model on TACRED.\n\"\"\"\n\nimport os\nimport sys\nfrom datetime import datetime\nimport time\nimport numpy as np\nimport random\nimport argparse\nfrom shutil import copyfile\nimport torch\nimport pickle\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom collections import defaultdict\n\nfrom data.semeval_loader import DataLoader\nfrom model.trainer import GCNTrainer\nfrom utils import torch_utils, scorer, constant_semeval as constant, helper\nfrom utils.vocab import Vocab\n\ncwd = os.getcwd()\non_server = 'Desktop' not in cwd\n\n\ndef str2bool(v):\n return v.lower() in ('true')\n\n# Local paths\nif not on_server:\n data_dir = '/Volumes/External HDD/dataset/semeval/data/json'\n vocab_dir = '/Volumes/External HDD/dataset/semeval/data/vocab'\n model_save_dir = '/Volumes/External HDD/dataset/semeval/saved_models'\n test_save_dir = os.path.join(cwd, '{dataset}_test_performances')\n os.makedirs(test_save_dir, exist_ok=True)\n# Server paths\nelse:\n data_dir = '/usr0/home/gis/data/semeval/data/json'\n vocab_dir = '/usr0/home/gis/data/semeval/data/vocab'\n model_save_dir = '/usr0/home/gis/research/tacred-exploration/saved_models'\n test_save_dir = '/usr0/home/gis/research/tacred-exploration/semeval_test_performances'\n os.makedirs(test_save_dir, exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_dir', type=str, default=data_dir)\nparser.add_argument('--vocab_dir', type=str, default=vocab_dir)\nparser.add_argument('--model_save_dir', type=str, default=model_save_dir)\nparser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')\nparser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')\nparser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')\nparser.add_argument('--hidden_dim', type=int, default=200, help='RNN hidden state size.')\nparser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')\nparser.add_argument('--input_dropout', type=float, default=0.5, help='Input dropout rate.')\nparser.add_argument('--gcn_dropout', type=float, default=0.5, help='GCN layer dropout rate.')\nparser.add_argument('--word_dropout', type=float, default=0.04, help='The rate at which randomly set a word to UNK.')\nparser.add_argument('--topn', type=int, default=1e10, help='Only finetune top N word embeddings.')\nparser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')\nparser.add_argument('--no-lower', dest='lower', action='store_false')\nparser.add_argument('--test_save_dir', default=test_save_dir, type=str)\nparser.set_defaults(lower=False)\n\nparser.add_argument('--prune_k', default=-1, type=int,\n help='Prune the dependency tree to <= K distance off the dependency path; set to -1 for no pruning.')\nparser.add_argument('--conv_l2', type=float, default=0, help='L2-weight decay on conv layers only.')\nparser.add_argument('--pooling', choices=['max', 'avg', 'sum'], default='max',\n help='Pooling function type. Default max.')\nparser.add_argument('--pooling_l2', type=float, default=0, help='L2-penalty for all pooling output.')\nparser.add_argument('--mlp_layers', type=int, default=2, help='Number of output mlp layers.')\nparser.add_argument('--no_adj', dest='no_adj', action='store_true', help=\"Zero out adjacency matrix for ablation.\")\n\nparser.add_argument('--no-rnn', dest='rnn', action='store_false', help='Do not use RNN layer.')\nparser.add_argument('--rnn_hidden', type=int, default=200, help='RNN hidden state size.')\nparser.add_argument('--rnn_layers', type=int, default=1, help='Number of RNN layers.')\nparser.add_argument('--rnn_dropout', type=float, default=0.5, help='RNN dropout rate.')\n\nparser.add_argument('--lr', type=float, default=1.0, help='Applies to sgd and adagrad.')\nparser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate decay rate.')\nparser.add_argument('--decay_epoch', type=int, default=5, help='Decay learning rate after this epoch.')\nparser.add_argument('--optim', choices=['sgd', 'adagrad', 'adam', 'adamax'], default='sgd',\n help='Optimizer: sgd, adagrad, adam or adamax.')\nparser.add_argument('--num_epoch', type=int, default=100, help='Number of total training epochs.')\nparser.add_argument('--batch_size', type=int, default=50, help='Training batch size.')\nparser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')\nparser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')\nparser.add_argument('--log', type=str, default='logs.txt', help='Write training log to file.')\nparser.add_argument('--save_epoch', type=int, default=100, help='Save model checkpoints every k epochs.')\nparser.add_argument('--id', type=str, default='00', help='Model ID under which to save models.')\nparser.add_argument('--info', type=str, default='', help='Optional info for the experiment.')\nparser.add_argument('--test_confusion_save_file', default='')\nparser.add_argument('--seed', type=int, default=1234)\nparser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())\nparser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')\n\nparser.add_argument('--load', dest='load', action='store_true', help='Load pretrained model.')\nparser.add_argument('--model_file', type=str, help='Filename of the pretrained model.')\n\nparser.add_argument('--adj_type', type=str, default='regular')\nparser.add_argument('--deprel_emb_dim', type=int, default=200)\nparser.add_argument('--deprel_dropout', type=float, default=.5)\n\nparser.add_argument('--use_bert_embeddings', type=str2bool, default=False)\nparser.add_argument('--emb_dropout', type=float, default=.0)\nparser.add_argument('--dataset', type=str, default='semeval')\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\nrandom.seed(1234)\nif args.cpu:\n args.cuda = False\nelif args.cuda:\n torch.cuda.manual_seed(args.seed)\ninit_time = time.time()\n\n# make opt\nopt = vars(args)\nlabel2id = constant.LABEL_TO_ID\nopt['num_class'] = len(label2id)\n\n# load vocab`\nvocab_file = opt['vocab_dir'] + '/vocab.pkl'\nvocab = Vocab(vocab_file, load=True)\nopt['vocab_size'] = vocab.size\nemb_file = opt['vocab_dir'] + '/embedding.npy'\n\n# Change embedding size for BERT\nif opt['use_bert_embeddings']:\n opt['emb_dim'] = 1024\n emb_matrix = None\nelse:\n emb_matrix = np.load(emb_file)\n assert emb_matrix.shape[0] == vocab.size\n assert emb_matrix.shape[1] == opt['emb_dim']\n\nif opt['use_bert_embeddings']:\n print('Loading BERT Embeddings...')\n embeddings_file = '/usr0/home/gis/data/bert_saves/id2embeddings.pkl'\n id2embeddings = pickle.load(open(embeddings_file, 'rb'))\n print('Embeddings Loaded')\nelse:\n id2embeddings = None\nprint(\"Loading data from {} with batch size {}...\".format(opt['data_dir'], opt['batch_size']))\ntrain_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt,\n vocab, evaluation=False, bert_embeddings=id2embeddings)\ntest_batch = DataLoader(opt['data_dir'] + '/test.json', opt['batch_size'], opt,\n vocab, evaluation=True, bert_embeddings=id2embeddings)\n\nmodel_id = opt['id'] if len(opt['id']) > 1 else '0' + opt['id']\nmodel_save_dir = opt['model_save_dir'] + '/' + model_id\nopt['model_save_dir'] = model_save_dir\nhelper.ensure_dir(model_save_dir, verbose=True)\n\n# save config\nhelper.save_config(opt, model_save_dir + '/config.json', verbose=True)\nvocab.save(model_save_dir + '/vocab.pkl')\nfile_logger = helper.FileLogger(model_save_dir + '/' + opt['log'],\n header=\"# epoch\\ttrain_loss\\ttest_loss\\ttrain_score\\tbest_train_score\")\n\ntest_save_dir = os.path.join(opt['test_save_dir'], opt['id'])\nos.makedirs(test_save_dir, exist_ok=True)\ntest_save_file = os.path.join(test_save_dir, 'test_records.pkl')\ntest_confusion_save_file = os.path.join(test_save_dir, 'test_confusion_matrix.pkl')\ntrain_confusion_save_file = os.path.join(test_save_dir, 'train_confusion_matrix.pkl')\ndeprel_save_file = os.path.join(test_save_dir, 'deprel_embs.pkl')\n# print model info\nhelper.print_config(opt)\n\n# model\nif not opt['load']:\n trainer = GCNTrainer(opt, emb_matrix=emb_matrix)\nelse:\n # load pretrained model\n model_file = opt['model_file']\n print(\"Loading model from {}\".format(model_file))\n model_opt = torch_utils.load_config(model_file)\n model_opt['optim'] = opt['optim']\n trainer = GCNTrainer(model_opt)\n trainer.load(model_file)\n\nid2label = dict([(v, k) for k, v in label2id.items()])\ntrain_score_history = []\ncurrent_lr = opt['lr']\n\nglobal_step = 0\nglobal_start_time = time.time()\nformat_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'\nmax_steps = len(train_batch) * opt['num_epoch']\nbest_train_metrics = defaultdict(lambda: -np.inf)\ntest_metrics_at_best_train = defaultdict(lambda: -np.inf)\n\n# start training\nupdate_gap = max(int(50 / opt['batch_size']), 1)\nfor epoch in range(1, opt['num_epoch'] + 1):\n train_loss = 0\n # Training in-case of mini-batches\n trainer.model.train()\n trainer.optimizer.zero_grad()\n\n for i, batch in enumerate(train_batch):\n start_time = time.time()\n global_step += 1\n loss = trainer.update(batch)\n loss.backward()\n loss_val = loss.item()\n step_num = i + 1\n if step_num % update_gap == 0:\n torch.nn.utils.clip_grad_norm_(trainer.model.parameters(), trainer.opt['max_grad_norm'])\n trainer.optimizer.step()\n trainer.optimizer.zero_grad()\n\n train_loss += loss_val\n if global_step % opt['log_step'] == 0:\n duration = time.time() - start_time\n print(format_str.format(datetime.now(), global_step, max_steps, epoch,\n opt['num_epoch'], loss, duration, current_lr))\n # Update grads if needed\n torch.nn.utils.clip_grad_norm_(trainer.model.parameters(), trainer.opt['max_grad_norm'])\n trainer.optimizer.step()\n trainer.optimizer.zero_grad()\n\n print(\"Saving Deprel Embeddings...\")\n with open(deprel_save_file, 'wb') as handle:\n pickle.dump(trainer.get_deprel_emb(), handle)\n\n # eval on train\n print(\"Evaluating on train set...\")\n train_predictions = []\n train_eval_loss = 0\n for i, batch in enumerate(train_batch):\n preds, _, loss = trainer.predict(batch)\n train_predictions += preds\n train_eval_loss += loss\n train_predictions = [id2label[p] for p in train_predictions]\n train_eval_loss = train_eval_loss / train_batch.num_examples * opt['batch_size']\n\n train_p, train_r, train_f1 = scorer.score(train_batch.gold(), train_predictions)\n print(\"epoch {}: train_loss = {:.6f}, train_eval_loss = {:.6f}, train_f1 = {:.4f}\".format(\n epoch, train_loss, train_eval_loss, train_f1))\n train_score = train_f1\n file_logger.log(\"{}\\t{:.6f}\\t{:.6f}\\t{:.4f}\\t{:.4f}\".format(epoch, train_loss, train_eval_loss, train_f1,\n max([train_score] + train_score_history)))\n current_train_metrics = {'f1': train_f1, 'precision': train_p, 'recall': train_r}\n # eval on test\n test_predictions = []\n for i, batch in enumerate(test_batch):\n preds, _, loss = trainer.predict(batch)\n test_predictions += preds\n test_predictions = [id2label[p] for p in test_predictions]\n\n test_p, test_r, test_f1 = scorer.score(test_batch.gold(), test_predictions)\n current_test_metrics = {'f1': test_f1, 'precision': test_p, 'recall': test_r}\n\n if best_train_metrics['f1'] < current_train_metrics['f1']:\n best_train_metrics = current_train_metrics\n test_metrics_at_best_train = current_test_metrics\n trainer.save(os.path.join(model_save_dir, 'best_model.pt'), epoch)\n print(\"New best model saved\")\n file_logger.log(\"new best model saved at epoch {}: {:.2f}\\t{:.2f}\\t{:.2f}\" \\\n .format(epoch, test_p * 100, test_r * 100, test_f1 * 100))\n\n # Compute Confusion Matrices over triples excluded in Training\n test_preds = np.array(test_predictions)\n test_gold = np.array(test_batch.gold())\n train_preds = np.array(train_predictions)\n train_gold = np.array(train_batch.gold())\n test_confusion_matrix = scorer.compute_confusion_matrices(ground_truth=test_gold,\n predictions=test_preds)\n dev_confusion_matrix = scorer.compute_confusion_matrices(ground_truth=train_gold,\n predictions=train_preds)\n print(\"Saving Excluded Triple Confusion Matrices...\")\n with open(test_confusion_save_file, 'wb') as handle:\n pickle.dump(test_confusion_matrix, handle)\n\n print(\"Best Train Metrics | F1: {} | Precision: {} | Recall: {}\".format(\n best_train_metrics['f1'], best_train_metrics['precision'], best_train_metrics['recall']\n ))\n print(\"Test Metrics at Best Train | F1: {} | Precision: {} | Recall: {}\".format(\n test_metrics_at_best_train['f1'], test_metrics_at_best_train['precision'], test_metrics_at_best_train['recall']\n ))\n\n # save\n model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)\n trainer.save(model_file, epoch)\n if epoch == 1 or train_score > max(train_score_history):\n copyfile(model_file, model_save_dir + '/best_model.pt')\n print(\"new best model saved.\")\n file_logger.log(\"new best model saved at epoch {}: {:.2f}\\t{:.2f}\\t{:.2f}\" \\\n .format(epoch, train_p * 100, train_r * 100, train_score * 100))\n if epoch % opt['save_epoch'] != 0:\n os.remove(model_file)\n\n # lr schedule\n if len(train_score_history) > opt['decay_epoch'] and train_score <= train_score_history[-1] and \\\n opt['optim'] in ['sgd', 'adagrad', 'adadelta']:\n current_lr *= opt['lr_decay']\n trainer.update_lr(current_lr)\n\n train_score_history += [train_score]\n print(\"\")\n\nprint(\"Training ended with {} epochs.\".format(epoch))\n\n","sub_path":"train_semeval.py","file_name":"train_semeval.py","file_ext":"py","file_size_in_byte":14163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23756264","text":"#!/usr/bin/python\n#-*-coding:utf8-*-\n\nimport sys\nimport os\nimport cPickle as pickle\nfrom random import randint\n\ntm_lambda=0.2\nlm_lambda=0.8\n\ndef get_uppers(token_list):\n uppers=[]\n for token in token_list:\n uppers.append([])\n for index,char in enumerate(token):\n if char.isupper():\n uppers[-1].append(index)\n return uppers\n\ndef apply_uppers(uppers,token_list):\n for token_index,indices in enumerate(uppers):\n token=token_list[token_index]\n for index in indices:\n if index \n#Author: Alice Naftaly, May 2020\n\nimport sys\n\n#read in uncondensed file\n#returns dictionary with key == junction id and value == junction counts\ndef read_uncondensed():\n input_file = sys.argv[1]\n junction_dict = {}\n with open(input_file, 'r') as info:\n for line in info:\n if line.startswith(\"Junction.Identifier\"):\n continue\n else:\n new_line = line.split()\n junction_id = new_line[0]\n junction_counts = new_line[1]\n if junction_id in junction_dict:\n junction_dict[junction_id].append(junction_counts)\n elif junction_id not in junction_dict:\n junction_dict.update({junction_id:[junction_counts]})\n return junction_dict\n\n\n#collapse any junction that has more than one set of counts\n#returns dictionary where every key(junction) has one total count\ndef collapse_counts():\n junction_dict = read_uncondensed()\n final_junction_dict = {}\n for junction in junction_dict:\n single_junction = junction_dict[junction]\n if len(single_junction) == 1:\n final_junction_dict.update({junction:single_junction[0]})\n elif len(single_junction) > 1:\n final_counts = int(single_junction[0]) + int(single_junction[1])\n final_junction_dict.update({junction:str(final_counts)})\n return final_junction_dict\n\n#write new junction counts:\ndef write():\n junction_dict = collapse_counts()\n output = sys.argv[2]\n with open(output, 'a') as out:\n for junction in junction_dict:\n single_junction = junction_dict[junction]\n final = \"%s\\t%s\\n\" % (str(junction), str(single_junction))\n out.write(final)\n\nwrite()\n","sub_path":"Short_Read_RNA_Analyses/PSI_Exon_Usage/Condense_RNA_reads_PSI.py","file_name":"Condense_RNA_reads_PSI.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123927229","text":"\n\"\"\"\nThis module is used to define the class containing the entire Bayesian Network,\nand the corresponding attributes/methods to train the model, set algorithmic options, calculate lower bound, etc.\n\"\"\"\n\nfrom __future__ import division\nfrom time import time\nimport os\nimport scipy as s\nimport pandas as pd\nimport sys\nimport numpy.ma as ma\nimport math\nimport resource\n\nfrom mofapy2.core.nodes.variational_nodes import Variational_Node\nfrom mofapy2.core import gpu_utils\nfrom .utils import corr, nans, infer_platform\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nclass BayesNet(object):\n def __init__(self, dim, nodes):\n \"\"\" Initialisation of a Bayesian network\n\n PARAMETERS\n ----------\n dim: dict\n keyworded dimensionalities, ex. {'N'=10, 'M'=3, ...}\n nodes: dict\n dictionary with all nodes where the keys are the name of the node and the values are instances the 'Node' class\n \"\"\"\n\n self.dim = dim\n self.nodes = nodes\n self.options = None # TODO rename to train_options everywhere\n\n # Training and simulations flag\n self.trained = False\n self.simulated = False\n\n # Set GPU mode\n # gpu_utils.gpu_mode = options['gpu_mode']\n\n def setTrainOptions(self, train_opts):\n \"\"\" Method to store training options \"\"\"\n\n # Sanity checks\n assert \"maxiter\" in train_opts, \"'maxiter' not found in the training options dictionary\"\n assert \"start_drop\" in train_opts, \"'start_drop' not found in the training options dictionary\"\n assert \"freq_drop\" in train_opts, \"'freq_drop' not found in the training options dictionary\"\n assert \"verbose\" in train_opts, \"'verbose' not found in the training options dictionary\"\n assert \"quiet\" in train_opts, \"'quiet' not found in the training options dictionary\"\n # assert \"tolerance\" in train_opts, \"'tolerance' not found in the training options dictionary\"\n assert \"convergence_mode\" in train_opts, \"'convergence_mode' not found in the training options dictionary\"\n assert \"forceiter\" in train_opts, \"'forceiter' not found in the training options dictionary\"\n assert \"schedule\" in train_opts, \"'schedule' not found in the training options dictionary\"\n assert \"start_sparsity\" in train_opts, \"'start_sparsity' not found in the training options dictionary\"\n assert \"gpu_mode\" in train_opts, \"'gpu_mode' not found in the training options dictionary\"\n assert \"start_elbo\" in train_opts, \"'gpu_mode' not found in the training options dictionary\"\n\n self.options = train_opts\n\n def getParameters(self, *nodes):\n \"\"\" Method to collect all parameters of a given set of nodes\n\n PARAMETERS\n ----------\n nodes: iterable\n name of the nodes (all nodes by default)\n \"\"\"\n\n if len(nodes) == 0: nodes = self.nodes.keys()\n params = {}\n for node in nodes:\n tmp = self.nodes[node].getParameters()\n if tmp != None: params[node] = tmp\n return params\n\n def getExpectations(self, only_first_moments=False, *nodes):\n \"\"\"Method to collect all expectations of a given set of nodes\n\n PARAMETERS\n ----------\n only_first_moments: bool\n get only first moments? (Default is False)\n nodes: list\n name of the nodes (Default is all nodes)\n \"\"\"\n\n if len(nodes) == 0: nodes = self.nodes.keys()\n expectations = {}\n for node in nodes:\n if only_first_moments:\n tmp = self.nodes[node].getExpectation()\n else:\n tmp = self.nodes[node].getExpectations()\n expectations[node] = tmp\n return expectations\n\n def getNodes(self):\n \"\"\" Method to return all nodes \"\"\"\n return self.nodes\n\n def calculate_variance_explained(self, total=False):\n\n # Collect relevant expectations\n Z = self.nodes['Z'].getExpectation()\n W = self.nodes[\"W\"].getExpectation()\n Y = self.nodes[\"Y\"].getExpectation()\n\n # Get groups\n groups = self.nodes[\"AlphaZ\"].groups if \"AlphaZ\" in self.nodes else s.array([0]*self.dim['N'])\n\n if total:\n r2 = [ s.zeros(self.dim['M']) for g in range(self.dim['G'])]\n else:\n r2 = [ s.zeros([self.dim['M'], self.dim['K']]) for g in range(self.dim['G'])]\n\n for m in range(self.dim['M']):\n mask = self.nodes[\"Y\"].getNodes()[m].getMask(full=True)\n for g in range(self.dim['G']):\n gg = groups==g\n SS = s.square(Y[m][gg,:]).sum()\n\n # Total variance explained (using all factors)\n if total:\n Ypred = s.dot(Z[gg,:], W[m].T)\n Ypred[mask[gg,:]] = 0.\n Res = s.sum((Y[m][gg, :] - Ypred) ** 2.)\n r2[g][m] = 1. - Res / SS\n\n # Variance explained per factor\n else:\n for k in range(self.dim['K']):\n Ypred = s.outer(Z[gg,k], W[m][:,k])\n Ypred[mask[gg,:]] = 0.\n Res = s.sum((Y[m][gg,:] - Ypred)**2.)\n r2[g][m,k] = 1. - Res/SS\n return r2\n\n def removeInactiveFactors(self, min_r2=None):\n \"\"\"Method to remove inactive factors\n\n PARAMETERS\n ----------\n min_r2: float\n threshold to shut down factors based on a minimum variance explained per group and view\n \"\"\"\n drop_dic = {}\n\n if min_r2 is not None:\n r2 = self.calculate_variance_explained()\n\n tmp = [ s.where( (r2[g]>min_r2).sum(axis=0) == 0)[0] for g in range(self.dim['G']) ]\n drop_dic[\"min_r2\"] = list(set.intersection(*map(set,tmp)))\n if len(drop_dic[\"min_r2\"]) > 0:\n drop_dic[\"min_r2\"] = [ s.random.choice(drop_dic[\"min_r2\"]) ]\n\n # Drop the factors\n drop = s.unique(s.concatenate(list(drop_dic.values())))\n if len(drop) > 0:\n for node in self.nodes.keys():\n self.nodes[node].removeFactors(drop)\n self.dim['K'] -= len(drop)\n\n if self.dim['K']==0:\n print(\"All factors shut down, no structure found in the data.\")\n exit()\n\n pass\n\n def precompute(self):\n # Precompute terms\n for n in self.nodes:\n self.nodes[n].precompute(self.options)\n\n # Precompute ELBO\n for node in self.nodes[\"Y\"].getNodes(): node.TauTrick = False # important to do this for ELBO computation\n elbo = self.calculateELBO()\n for node in self.nodes[\"Y\"].getNodes(): node.TauTrick = self.options[\"Y_ELBO_TauTrick\"]\n\n if self.options['verbose']:\n print(\"ELBO before training:\")\n print(\"\".join([ \"%s=%.2f \" % (k,v) for k,v in elbo.drop(\"total\").iteritems() ]) + \"\\nTotal: %.2f\\n\" % elbo[\"total\"])\n else:\n if not self.options['quiet']:\n print('ELBO before training: %.2f' % elbo[\"total\"])\n print(\"\\n\")\n\n return elbo\n\n def iterate(self):\n \"\"\"Method to start iterating and updating the variables using the VB algorithm\"\"\"\n\n # Define some variables to monitor training\n nodes = list(self.getVariationalNodes().keys())\n elbo = pd.DataFrame(data = nans((self.options['maxiter']+1, len(nodes)+1 )), columns = nodes+[\"total\"] )\n number_factors = nans((self.options['maxiter']+1))\n iter_time = nans((self.options['maxiter']+1))\n\n # Precompute\n converged = False; convergence_token = 1\n elbo.iloc[0] = self.precompute()\n number_factors[0] = self.dim['K']\n iter_time[0] = 0.\n\n for i in range(1,self.options['maxiter']):\n t = time();\n\n # Remove inactive factors\n if (i>=self.options[\"start_drop\"]) and (i%self.options['freq_drop']) == 0:\n if self.options['drop'][\"min_r2\"] is not None:\n self.removeInactiveFactors(**self.options['drop'])\n number_factors[i] = self.dim[\"K\"]\n\n # Update node by node, with E and M step merged\n t_updates = time()\n for node in self.options['schedule']:\n if (node==\"ThetaW\" or node==\"ThetaZ\") and i=self.options[\"start_elbo\"]) and ((i-self.options[\"start_elbo\"])%self.options['elbofreq']==0):\n t_elbo = time()\n elbo.iloc[i] = self.calculateELBO()\n t_elbo = time() - t_elbo\n\n # Check convergence using the ELBO\n if i==self.options[\"start_elbo\"]: \n delta_elbo = elbo.iloc[i][\"total\"]-elbo.iloc[0][\"total\"]\n else:\n delta_elbo = elbo.iloc[i][\"total\"]-elbo.iloc[i-self.options['elbofreq']][\"total\"]\n\n # Print ELBO monitoring\n if not self.options['quiet']:\n print(\"Iteration %d: time=%.2f, ELBO=%.2f, deltaELBO=%.3f (%.9f%%), Factors=%d\" % (i, time()-t, elbo.iloc[i][\"total\"], delta_elbo, 100*abs(delta_elbo/elbo.iloc[0][\"total\"]), (self.dim['K'])))\n if delta_elbo<0 and not self.options['stochastic']: print(\"Warning, lower bound is decreasing...\\a\")\n\n # Print ELBO decomposed by node and variance explained\n if self.options['verbose']:\n print(\"\".join([ \"%s=%.2f \" % (k,v) for k,v in elbo.iloc[i].drop(\"total\").iteritems() ]))\n print('Time spent in ELBO computation: %.1f%%' % (100*t_elbo/(t_updates+t_elbo)) )\n\n # Assess convergence\n if i>self.options[\"start_elbo\"] and not self.options['forceiter']:\n convergence_token, converged = self.assess_convergence(delta_elbo, elbo.iloc[0][\"total\"], convergence_token)\n if converged:\n number_factors = number_factors[:i]\n elbo = elbo[:i]\n iter_time = iter_time[:i]\n print (\"\\nConverged!\\n\"); break\n\n # Do not calculate lower bound\n else:\n if not self.options['quiet']: print(\"Iteration %d: time=%.2f, Factors=%d\" % (i,time()-t,self.dim[\"K\"]))\n\n # Print other statistics\n if self.options['verbose']:\n # Memory usage\n print('Peak memory usage: %.2f MB' % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / infer_platform() ))\n\n # Variance explained\n r2 = s.asarray(self.calculate_variance_explained(total=True)).mean(axis=0)\n r2[r2<0] = 0.\n print(\"Variance explained:\\t\" + \" \".join([ \"View %s: %.3f%%\" % (m,100*r2[m]) for m in range(self.dim[\"M\"])]))\n\n # Sparsity levels of the weights\n W = self.nodes[\"W\"].getExpectation()\n foo = [s.mean(s.absolute(W[m])<1e-3) for m in range(self.dim[\"M\"])]\n print(\"Fraction of zero weights:\\t\" + \" \".join([ \"View %s: %.0f%%\" % (m,100*foo[m]) for m in range(self.dim[\"M\"])]))\n\n # Sparsity levels of the factors\n # Z = self.nodes[\"Z\"].getExpectation()\n # bar = s.mean(s.absolute(Z)<1e-3)\n # print(\"Fraction of zero samples: %.0f%%\" % (100*bar))\n print(\"\\n\")\n\n iter_time[i] = time()-t\n \n # Flush (we need this to print when running on the cluster)\n sys.stdout.flush()\n\n # Finish by collecting the training statistics\n self.train_stats = { 'time':iter_time, 'number_factors':number_factors, 'elbo':elbo[\"total\"].values, 'elbo_terms':elbo.drop(\"total\",1) }\n self.trained = True\n\n def assess_convergence(self, delta_elbo, first_elbo, convergence_token):\n converged = False\n\n # Option 1: deltaELBO\n # if abs(delta_elbo) < self.options['tolerance']: \n # converged = True\n\n # Assess convergence based on the fraction of deltaELBO change\n if self.options[\"convergence_mode\"] == \"fast\":\n convergence_threshold = 0.00001\n elif self.options[\"convergence_mode\"] == \"medium\":\n convergence_threshold = 0.000001\n elif self.options[\"convergence_mode\"] == \"slow\":\n convergence_threshold = 0.0000001\n else:\n print(\"Convergence mode not recognised\"); exit()\n\n if 100*abs(delta_elbo/first_elbo) < convergence_threshold: \n convergence_token += 1\n if convergence_token==5: converged = True\n else:\n convergence_token = 1\n\n return convergence_token, converged\n\n def getVariationalNodes(self):\n \"\"\" Method to return all variational nodes \"\"\"\n # TODO problem with dictionnary comprehension here\n to_ret = {}\n for node in self.nodes.keys():\n if isinstance(self.nodes[node],Variational_Node):\n to_ret[node] =self.nodes[node]\n\n return to_ret\n # return { node:self.nodes[node] for node in self.nodes.keys() if isinstance(self.nodes[node],Variational_Node)}\n # return { k:v for k,v in self.nodes.items() if isinstance(v,Variational_Node) }\n\n def getTrainingStats(self):\n \"\"\" Method to return training statistics \"\"\"\n return self.train_stats\n\n def getTrainingOpts(self):\n \"\"\" Method to return training options \"\"\"\n return self.options\n\n def getTrainingData(self):\n \"\"\" Method to return training data \"\"\"\n return self.nodes[\"Y\"].getValues()\n\n def calculateELBO(self, *nodes):\n \"\"\"Method to calculate the Evidence Lower Bound of the model\"\"\"\n if len(nodes) == 0: nodes = self.getVariationalNodes().keys()\n elbo = pd.Series(s.zeros(len(nodes)+1), index=list(nodes)+[\"total\"])\n for node in nodes:\n elbo[node] = float(self.nodes[node].calculateELBO())\n elbo[\"total\"] += elbo[node]\n return elbo\n\n\nclass StochasticBayesNet(BayesNet):\n def __init__(self, dim, nodes):\n super().__init__(dim=dim, nodes=nodes)\n\n def step_size(self, i):\n # return the step size for the considered iteration\n return (i + self.options['learning_rate'])**(-self.options['forgetting_rate'])\n\n def step_size2(self, i):\n # return the step size for the considered iteration\n return self.options['learning_rate'] / ((1 + self.options['forgetting_rate'] * i)**(3./4.))\n\n def sample_mini_batch(self):\n # TODO if multiple group, sample indices in each group evenly ? prob yes\n S = int( self.options['batch_size'] * self.dim['N'] )\n ix = s.random.choice(range(self.dim['N']), size=S, replace=False)\n self.define_mini_batch(ix)\n return ix\n\n def sample_mini_batch_no_replace(self, i):\n \"\"\" Method to define mini batches\"\"\"\n\n # TODO :\n # - if multiple group, sample indices in each group evenly ? prob yes\n\n i -= 1 # This is because we start at iteration 1 in the main loop\n\n # Sample mini-batch indices and define epoch\n n_batches = math.ceil(1./self.options['batch_size'])\n S = self.options['batch_size'] * self.dim['N']\n batch_ix = i % n_batches\n epoch = int(i / n_batches)\n if batch_ix == 0:\n print(\"## Epoch %s ##\" % str(epoch+1))\n print(\"-------------------------------------------------------------------------------------------\")\n self.shuffled_ix = s.random.choice(range(self.dim['N']), size= self.dim['N'], replace=False)\n\n min = int(S * batch_ix)\n max = int(S * (batch_ix + 1))\n if max > self.dim['N']:\n max = self.dim['N']\n\n ix = self.shuffled_ix[min:max]\n self.define_mini_batch(ix)\n\n return ix, epoch\n \n def define_mini_batch(self, ix):\n # Define mini-batch for each node\n self.nodes['Y'].define_mini_batch(ix)\n self.nodes['Tau'].define_mini_batch(ix)\n if 'AlphaZ' in self.nodes:\n self.nodes['AlphaZ'].define_mini_batch(ix)\n if 'ThetaZ' in self.nodes:\n self.nodes['ThetaZ'].define_mini_batch(ix) \n\n def iterate(self):\n \"\"\"Method to start iterating and updating the variables using the VB algorithm\"\"\"\n\n # Define some variables to monitor training\n nodes = list(self.getVariationalNodes().keys())\n elbo = pd.DataFrame(data = nans((self.options['maxiter']+1, len(nodes)+1 )), columns = nodes+[\"total\"] )\n number_factors = nans((self.options['maxiter']+1))\n iter_time = nans((self.options['maxiter']+1))\n\n # Precompute\n converged = False; convergence_token = 1\n elbo.iloc[0] = self.precompute()\n number_factors[0] = self.dim['K']\n iter_time[0] = 0.\n\n # Print stochastic settings before training\n print(\"Using stochastic variational inference with the following parameters:\")\n print(\"- Batch size (fraction of samples): %.2f\\n- Forgetting rate: %.2f\\n- Learning rate: %.2f\\n- Starts at iteration: %d \\n\" % \n (100*self.options['batch_size'], self.options['forgetting_rate'], self.options['learning_rate'], self.options['start_stochastic']) )\n ix = None\n\n for i in range(1,self.options['maxiter']):\n t = time();\n\n # Sample mini-batch and define step size for stochastic inference\n if i>=(self.options[\"start_stochastic\"]):\n ix, epoch = self.sample_mini_batch_no_replace(i-(self.options[\"start_stochastic\"]-1))\n ro = self.step_size2(epoch)\n else:\n ro = 1.\n\n # Remove inactive factors\n if (i>=self.options[\"start_drop\"]) and (i%self.options['freq_drop']) == 0:\n # if any(self.options['drop'].values()):\n if self.options['drop'][\"min_r2\"] is not None:\n self.removeInactiveFactors(**self.options['drop'])\n number_factors[i] = self.dim[\"K\"]\n\n # Update node by node, with E and M step merged\n t_updates = time()\n for node in self.options['schedule']:\n if (node==\"ThetaW\" or node==\"ThetaZ\") and i=self.options[\"start_elbo\"]) and ((i-self.options[\"start_elbo\"])%self.options['elbofreq']==0):\n t_elbo = time()\n elbo.iloc[i] = self.calculateELBO()\n t_elbo = time() - t_elbo\n\n # Check convergence using the ELBO\n if i==self.options[\"start_elbo\"]: \n delta_elbo = elbo.iloc[i][\"total\"]-elbo.iloc[0][\"total\"]\n else:\n delta_elbo = elbo.iloc[i][\"total\"]-elbo.iloc[i-self.options['elbofreq']][\"total\"]\n\n # Print ELBO monitoring\n print(\"Iteration %d: time=%.2f, ELBO=%.2f, deltaELBO=%.3f (%.9f%%), Factors=%d\" % (i, time()-t, elbo.iloc[i][\"total\"], delta_elbo, 100*abs(delta_elbo/elbo.iloc[0][\"total\"]), (self.dim['K'])))\n if delta_elbo<0 and not self.options['stochastic']: print(\"Warning, lower bound is decreasing...\\a\")\n\n # Print ELBO decomposed by node and variance explained\n if self.options['verbose']:\n print(\"\".join([ \"%s=%.2f \" % (k,v) for k,v in elbo.iloc[i].drop(\"total\").iteritems() ]))\n print('Time spent in ELBO computation: %.1f%%' % (100*t_elbo/(t_updates+t_elbo)) )\n\n # Assess convergence\n if i>self.options[\"start_elbo\"] and not self.options['forceiter']:\n convergence_token, converged = self.assess_convergence(delta_elbo, elbo.iloc[0][\"total\"], convergence_token)\n if converged:\n number_factors = number_factors[:i]\n elbo = elbo[:i]\n iter_time = iter_time[:i]\n print (\"\\nConverged!\\n\"); break\n\n # Do not calculate lower bound\n else:\n print(\"Iteration %d: time=%.2f, Factors=%d\" % (i,time()-t,self.dim[\"K\"]))\n\n # Print other statistics\n print(\"Step size (rho): %.3f\" % ro )\n if self.options['verbose']:\n # Memory usage\n print('Peak memory usage: %.2f MB' % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / infer_platform() ))\n\n # Variance explained\n r2 = s.asarray(self.calculate_variance_explained(total=True)).mean(axis=0)\n r2[r2<0] = 0.\n print(\"Variance explained:\\t\" + \" \".join([ \"View %s: %.3f%%\" % (m,100*r2[m]) for m in range(self.dim[\"M\"])]))\n\n # Sparsity levels of the weights\n W = self.nodes[\"W\"].getExpectation()\n foo = [s.mean(s.absolute(W[m])<1e-3) for m in range(self.dim[\"M\"])]\n print(\"Fraction of zero weights:\\t\" + \" \".join([ \"View %s: %.0f%%\" % (m,100*foo[m]) for m in range(self.dim[\"M\"])]))\n \n # Sparsity levels of the factors\n # Z = self.nodes[\"Z\"].getExpectation()\n # bar = s.mean(s.absolute(Z)<1e-3)\n # print(\"Fraction of zero samples: %.0f%%\" % (100*bar))\n print(\"\")\n\n iter_time[i] = time()-t\n \n # Flush (we need this to print when running on the cluster)\n sys.stdout.flush()\n\n # Finish by collecting the training statistics\n self.train_stats = { 'time':iter_time, 'number_factors':number_factors, 'elbo':elbo[\"total\"].values, 'elbo_terms':elbo.drop(\"total\",1) }\n self.trained = True\n","sub_path":"mofapy2/core/BayesNet.py","file_name":"BayesNet.py","file_ext":"py","file_size_in_byte":22066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"297952261","text":" # -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\nfrom openerp.exceptions import Warning\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\n\nclass jmdlead(models.Model):\n _inherit = \"crm.lead\"\n\n @api.one\n def compute_oxoo(self):\n cadena = \"04\"\n cadena += str(self.partner_id.ncliente)\n cadena += str(self.fecha_vencimiento)[-2:]\n cadena += str(self.fecha_vencimiento)[-5: -3]\n cadena += str(int(self.pago_mensual)).zfill(5)\n cadena += str(round(self.pago_mensual, 2))[-2:]\n self.oxxo_barcode = cadena\n\n @api.one\n @api.depends(\"credito_solicitado\")\n def get_vencimiento(self):\n vencimiento = \"\"\n for i in self.credito_solicitado.lineas:\n vencimiento = i.fecha\n self.fecha_vencimiento = vencimiento\n\n @api.one\n @api.depends(\"credito_solicitado\")\n def get_mensual(self):\n pago = 0\n for i in self.credito_solicitado.lineas:\n pago = i.monto\n self.pago_mensual = pago\n\n name = fields.Char(\"Codigo\", default=lambda self: self.\n env[\"ir.sequence\"].get(\"sofom.lead\"))\n interesado = fields.Boolean(\"Interesado\")\n producto = fields.Selection([(\"micro\", \"Microcrédito\"),\n (\"nom\", \"Nómina\")], string=\"Producto\")\n giro = fields.Many2one(\"sofom.giro\", \"Giro\")\n subgiro = fields.Many2one(\"sofom.subgiro\", \"Subgiro\")\n cotizaciones = fields.One2many(\"sofom.calculator\", \"lead\",\n string=\"Cotizaciones\")\n credito_solicitado = fields.Many2one(\"sofom.calculator\",\n string=\"Cotización Aceptada\")\n visita = fields.Boolean(\"Inspección Ocular\")\n avisita = fields.Binary(\"Evidencia de la Visita\")\n navisita = fields.Char(\"Nombre de la Evidencia\")\n ife = fields.Binary(\"IFE\")\n nife = fields.Char(\"NIFE\")\n cd = fields.Binary(\"Comprobante Domiciliario\")\n ncd = fields.Char(\"NCD\")\n ci = fields.Binary(\"Comprobante de Ingresos\")\n nci = fields.Char(\"NCI\")\n ac = fields.Binary(\"Autorización Para Consulta\")\n nac = fields.Char(\"NAC\")\n sol = fields.Binary(\"Solicitud Firmada\")\n nsol = fields.Char(\"Nombre de la Solicitud\")\n ing = fields.Binary(\"Declaración de Ingresos\")\n ning = fields.Char(\"Nombre Delaracion\")\n curp = fields.Binary(\"CURP\")\n ncurp = fields.Char(\"Nombre CURP\")\n ccpdf = fields.Binary(\"PDF\")\n nccpdf = fields.Char(\"NPDF\")\n\n ffachada = fields.Binary(\"Fotografía de la Fachada\")\n finteriora = fields.Binary(\"Fotografía Interior 1\")\n finteriorb = fields.Binary(\"Fotografia Interior 2\")\n fvehiculo = fields.Binary(\"Fotografía Vehiculo\")\n fotro = fields.Binary(\"Otra Fotografía\")\n\n nffachada = fields.Char(\"Fotografía de la Fachada\")\n nfinteriora = fields.Char(\"Fotografía Interior 1\")\n nfinteriorb = fields.Char(\"Fotografia Interior 2\")\n nfvehiculo = fields.Char(\"Fotografía Vehiculo\")\n nfotro = fields.Char(\"Otra Fotografía\")\n\n puntaje = fields.Float(\"Puntaje\")\n cdp = fields.Float(\"Capacidad de Pago\")\n contrato = fields.Binary(\"Contrato\")\n ncontrato = fields.Char(\"NC\")\n caratula = fields.Binary(\"Carátula\")\n ncaratula = fields.Char(\"NC\")\n resumen = fields.Binary(\"Resúmen del Contrato\")\n nresumen = fields.Char(\"NR\")\n anexos = fields.Binary(\"Anexos\")\n nanexos = fields.Char(\"NA\")\n ingreso = fields.Float(\"Ingreso Neto\")\n puntaje = fields.Float(\"Puntaje\")\n solicitud = fields.One2many(\"sofom.solicitud\", \"lead_id\",\n string=\"Solicitud\")\n solicitudn = fields.One2many(\"sofom.solicitudn\", \"lead_id\",\n string=\"Solicitud\")\n evaluacion_ids = fields.One2many(\"sofom.evaluacion\", \"lead_id\",\n string=\"Evaluación\")\n evaluacionn_ids = fields.One2many(\"sofom.evaluacionn\", \"lead_id\",\n string=\"Evaluación\")\n cuanti = fields.One2many(\"sofom.cuanti\", \"lead_id\",\n string=\"Evaluación Cualitativa\")\n cuantim = fields.One2many(\"sofom.cuantim\", \"lead_id\",\n string=\"Evaluación Cualitativa\")\n credito_id = fields.Many2one(\"sofom.credito\", string=\"Crédito\")\n fecha_contrato = fields.Date(\"Fecha del Contrato\")\n aceptado = fields.Boolean(\"Credito Autorizado\")\n forma_disposicion = fields.Selection([('Efectivo', 'Efectivo'),\n ('Deposito', 'Depósito'), ('Transferencia', 'Transferencia'),\n ('Cheque', 'Cheque')],\n string=\"Forma de Disposición\")\n primer_pago = fields.Date(\"Fecha de Primer Pago\")\n cat = fields.Float(\"CAT\")\n credito_generado = fields.Boolean(\"Credito Generado\")\n asignacion = fields.Selection([('Propios', 'Propios'),\n ('Otros', 'Otros')], string=\"Asignación de Recursos\")\n fuente_id = fields.Many2one(\"sofom.fuente\", \"Fuente de Recursos\")\n numero_cheque = fields.Char(\"Numero de cheque\")\n\n isolicitud = fields.Many2one(\"sofom.solicitud\", string=\"Importar Solicitud\")\n isolicitudn = fields.Many2one(\"sofom.solicitudn\",\n string=\"Importar Solicitud\")\n monto = fields.Float(\"Monto Solicitado\", related=\"credito_solicitado.monto\")\n planned_revenue = fields.Float(string=\"Monto del Cŕedito Solicitado\")\n #Barcode\n oxxo_barcode = fields.Char(\"Codigo de Barras Oxxo\", compute=compute_oxoo)\n fecha_vencimiento = fields.Date(\"Vencimiento\", compute=get_vencimiento)\n pago_mensual = fields.Float(\"Pago Mensual\", compute=get_mensual)\n\n @api.onchange('credito_solicitado')\n @api.one\n def onchange_credito(self):\n self.planned_revenue = self.credito_solicitado.monto\n\n @api.one\n def change_primer(self):\n print(\"Aqui 1\")\n print((str(self.primer_pago)))\n self.credito_solicitado.write({'inicio': self.primer_pago})\n print(\"Aqui 2\")\n print((self.credito_solicitado.name))\n print(\"Aqui 3\")\n inicio_obj = datetime.datetime.strptime(self.primer_pago, \"%Y-%m-%d\")\n siguiente_pago = inicio_obj\n for i in self.credito_solicitado.lineas:\n i.write({'fecha': siguiente_pago.strftime(\"%Y-%m-%d\")})\n siguiente_pago += datetime.timedelta(days=self.credito_solicitado\n .plazo.dias_ciclo)\n siguiente_pago += relativedelta(months=self.credito_solicitado\n .plazo.meses_ciclo)\n return True\n\n @api.one\n def copy(self, default=None):\n default = dict(default or {})\n name = self.env[\"ir.sequence\"].get(\"sofom.calculator\")\n solicitud = None\n solicitudn = None\n for i in self.solicitud:\n solicitud = i.copy()\n break\n for i in self.solicitudn:\n solicitudn = i.copy()\n break\n print((\"Id de la solicitud duplicada\" + str(solicitud)))\n default.update({\n 'stage_id': 10,\n 'name': name,\n 'solicitud': [(1, solicitud, {})],\n 'solicitudn': [(1, solicitudn, {})],\n })\n return super(jmdlead, self).copy(default)\n\n @api.multi\n def goto_solicitud(self):\n if self.credito_solicitado:\n self.write({'stage_id': 1})\n else:\n raise Warning('No ha colocado la cotización en el campo Cotización\\\n Aceptada')\n\n @api.one\n def importar(self):\n if self.isolicitud:\n self.isolicitud.copy({'lead_id': self.id})\n if self.isolicitudn:\n self.isolicitudn.copy({'lead_id': self.id})\n\n @api.multi\n def goto_documentos(self):\n if (bool(self.solicitud) or bool(self.solicitudn)):\n self.write({'stage_id': 4})\n else:\n raise Warning('No ha llenado la Solicitud, vaya a la pesataña\\\n Solicitud')\n\n @api.multi\n def goto_analisis(self):\n if (self.ife and self.cd and self.ci and self.ac and self.sol\n and self.ing and self.curp):\n self.write({'stage_id': 5})\n else:\n raise Warning('No se han ingresado todos los documentos')\n\n#Ir a autorizacion\n @api.multi\n def goto_impresion(self):\n pago_mensual = 0\n capacidad_pago = 0\n if self.credito_solicitado:\n pago_mensual = self.credito_solicitado.pago\n if self.cuantim:\n capacidad_pago = self.cuantim.capacidad\n if self.cuanti:\n capacidad_pago = self.cuanti.capacidad\n if pago_mensual > capacidad_pago:\n raise Warning(('La capacidad de pago ' + str(capacidad_pago) +\n ' es menor al pago mensual ' + str(pago_mensual)))\n if (self.evaluacion_ids and self.cuantim) or\\\n (self.evaluacionn_ids and self.cuanti):\n self.write({'stage_id': 6})\n else:\n raise Warning('No se han realizado ambas evaluaciones')\n\n#Ir a impresion\n @api.multi\n def goto_autorizacion(self):\n if (self.aceptado):\n self.write({'stage_id': 11})\n else:\n raise Warning('No se ha Autorizado el Crédito')\n\n @api.multi\n def restart(self):\n self.write({'stage_id': 10})\n\n @api.multi\n def generate_payment(self):\n if self.credito_generado:\n return\n\n print(\"Generando el crédito\")\n credito_id = self.env['sofom.credito'].create({\n 'name': self.name,\n 'titular': self.partner_id.id,\n 'tasa': self.credito_solicitado.tasa.name,\n 'oxxo_barcode': self.oxxo_barcode,\n })\n print(\"Llego hasta aqui\")\n self.write({'credito_id': credito_id.id,\n 'credito_generado': True})\n fecha = self.primer_pago\n dias_plazo = self.credito_solicitado.plazo.dias_ciclo\n meses_plazo = self.credito_solicitado.plazo.meses_ciclo\n inicio_obj = datetime.datetime.strptime(self.primer_pago, \"%Y-%m-%d\")\n siguiente_pago = inicio_obj\n for i in self.credito_solicitado.lineas:\n fecha = siguiente_pago.strftime(\"%Y-%m-%d\")\n id_factura = self.env['account.invoice'].create(\n {'partner_id': self.partner_id.id,\n 'date_invoice': fecha,\n 'account_id': 142,\n 'journal_id': 3,\n 'type': 'out_invoice',\n 'reference_type': 'none'})\n self.env['account.invoice.line'].create(\n {'invoice_id': id_factura.id,\n 'name': 'Capital',\n 'quantity': 1,\n 'price_unit': i.capital,\n 'account_id': 284})\n self.env['account.invoice.line'].create(\n {'invoice_id': id_factura.id,\n 'name': 'Interés',\n 'quantity': 1,\n 'price_unit': i.intereses,\n 'account_id': 284})\n self.env['sofom.pago'].create({\n 'numero': i.npago,\n 'fecha': fecha,\n 'monto': i.monto,\n 'capital': i.capital,\n 'intereses': i.intereses,\n 'factura': id_factura.id,\n 'credito_id': credito_id.id,\n })\n siguiente_pago += datetime.timedelta(days=dias_plazo)\n siguiente_pago += relativedelta(months=+meses_plazo)\n\n\nclass jmdgiro(models.Model):\n _name = \"sofom.giro\"\n _inherit = \"mail.thread\"\n name = fields.Char(\"Nombre\")\n\n\nclass jmdaccount(models.Model):\n _name = \"sofom.subgiro\"\n _inherit = \"mail.thread\"\n name = fields.Char(\"Nombre\")\n giro = fields.Many2one(\"sofom.giro\", \"Giro\")\n\n\nclass jmdfuente(models.Model):\n _name = \"sofom.fuente\"\n name = fields.Char(\"Nombre\")\n actual = fields.Float(\"Monto Actual\")\n nomina = fields.Boolean(\"Activo en Nómina\")\n micro = fields.Boolean(\"Activo en Microcrédito\")\n tasa = fields.Float(\"Tasa\")\n pagos_ids = fields.One2many(\"sofm.fuente.pago\", \"fuente_id\",\n string=\"Fuentes\")\n\n\nclass jmdfpago(models.Model):\n _inherit = \"mail.thread\"\n _name = \"sofm.fuente.pago\"\n name = fields.Char(\"Folio del Pago\")\n monto = fields.Float(\"Monto\")\n intereses = fields.Float(\"Intereses\")\n iva = fields.Float(\"IVA\")\n fuente_id = fields.Many2one(\"sofom.fuente\")\n fecha = fields.Date(\"Fecha\")","sub_path":"sofom/lead.py","file_name":"lead.py","file_ext":"py","file_size_in_byte":12133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"318166236","text":"# 假设按照升序排序的数组在预先未知的某个点上进行了旋转。\r\n#\r\n# ( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。\r\n#\r\n# 搜索一个给定的目标值,如果数组中存在这个目标值,则返回它的索引,否则返回 -1 。\r\n#\r\n# 你可以假设数组中不存在重复的元素。\r\n#\r\n# 你的算法时间复杂度必须是 O(log n) 级别。\r\n#\r\n# 示例 1:\r\n#\r\n# 输入: nums = [4,5,6,7,0,1,2], target = 0\r\n# 输出: 4\r\n#\r\n#\r\n# 示例 2:\r\n#\r\n# 输入: nums = [4,5,6,7,0,1,2], target = 3\r\n# 输出: -1\r\n#\r\nfrom typing import List\r\n\r\n\r\nclass Solution:\r\n def search1(self, nums: List[int], target: int) -> int:\r\n if len(nums) == 0: return -1\r\n if target == nums[0]: return 0\r\n if target == nums[-1]: return len(nums) - 1\r\n if len(nums) <= 2: return -1\r\n if target > nums[0]:\r\n i = 1\r\n while i < len(nums) and nums[i - 1] <= nums[i] != target:\r\n i += 1\r\n if i >= len(nums) or nums[i] < nums[i - 1]:\r\n return -1\r\n else:\r\n return i\r\n else:\r\n i = len(nums) - 2\r\n while i >= 0 and nums[i + 1] >= nums[i] != target:\r\n i -= 1\r\n if i < 0 or nums[i] > nums[i + 1]:\r\n return -1\r\n else:\r\n return i\r\n\r\n # 二分查找\r\n def search2(self, nums: List[int], target: int) -> int:\r\n if not nums:\r\n return -1\r\n low = 0\r\n high = len(nums) - 1\r\n while low <= high:\r\n mid = (low + high) // 2\r\n if target == nums[mid]:\r\n return mid\r\n if nums[low] == nums[mid]:\r\n low += 1\r\n elif nums[low] < nums[mid]:\r\n if nums[low] <= target <= nums[mid]:\r\n high = mid\r\n else:\r\n low += 1\r\n else:\r\n if nums[mid] <= target <= nums[high]:\r\n low = mid\r\n else:\r\n high = mid - 1\r\n return -1\r\n\r\n def search(self, nums: List[int], target: int) -> int:\r\n if not nums: return -1\r\n if target == nums[0]: return 0\r\n if target == nums[-1]: return len(nums) - 1\r\n if len(nums) <= 2: return -1\r\n left = 0\r\n right = len(nums) - 1\r\n while left < right:\r\n mid = left + (right - left) // 2\r\n if nums[mid] > nums[right]:\r\n left = mid + 1\r\n else:\r\n right = mid\r\n t = left\r\n left = 0 if target > nums[0] else t\r\n right = t if target > nums[0] and t > 0 else len(nums) - 1\r\n while left <= right:\r\n mid = (left + right) // 2\r\n if nums[mid] == target:\r\n return mid\r\n elif nums[mid] > target:\r\n right = mid - 1\r\n else:\r\n left = mid + 1\r\n return -1\r\n\r\n\r\nif __name__ == '__main__':\r\n nums = [4, 5, 6, 7, 0, 1, 2]\r\n target = 0\r\n assert Solution().search(nums, target) == 4\r\n nums = [4, 5, 6, 7, 0, 1, 2]\r\n target = 3\r\n assert Solution().search(nums, target) == -1\r\n nums = []\r\n target = 5\r\n assert Solution().search(nums, target) == -1\r\n nums = [1]\r\n target = 2\r\n assert Solution().search(nums, target) == -1\r\n nums = [1, 3, 5]\r\n target = 2\r\n assert Solution().search(nums, target) == -1\r\n nums = [1, 3, 5]\r\n target = 3\r\n assert Solution().search(nums, target) == 1\r\n","sub_path":"31_40/033.py","file_name":"033.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582029006","text":"# feature_extraction.py\n# since: 11/2018\n# Developed by: Shehu Lab\n\n\"\"\"Module for extracting features of a conformation.\n\nThis module provides methods to extract the USR features of a structure.\n\nAvailable Functions:\n- similarity_metrics: Returns USR moments of ctd, fct, and ftf metrics\n of a conformation.\n\"\"\"\n\nimport math\nimport pyrosetta.toolbox.extract_coords_pose as pte\nimport numpy as np\nimport scipy.stats as st\n\n\ndef similarity_metrics(pose):\n \"\"\"Returns moments of ctd, cst, fct, and ftf metrics of a\n conformation.\n\n This function calculates Ultrafast Shape Recognition Matrics (i.e.\n moments of ctd, cst, fct, and ftf distances) of a given structure.\n\n Args:\n pose: A pyrosetta Pose object containing a structure.\n\n Returns:\n A list containing 12 moments of the USR distances.\n The order is:\n [ctd_distances_mean, ctd_distances_variance,\n ctd_distances skewness, cst_distances_mean, ....]\n \"\"\"\n xyz = pte.extract_coordinates_from_pose_1x3(pose)\n no_of_atoms = len(xyz)\n\n sum_x = 0.0\n sum_y = 0.0\n sum_z = 0.0\n\n for i in xyz:\n sum_x += i[0]\n sum_y += i[1]\n sum_z += i[2]\n\n # Find molecular centroid\n center_x = sum_x / no_of_atoms\n center_y = sum_y / no_of_atoms\n center_z = sum_z / no_of_atoms\n\n # Calculate moments of ctd distances\n max_distance = 0\n min_distance = math.inf\n fct = []\n cst = []\n distances_list = []\n\n for i in xyz:\n distance = math.sqrt(\n ((center_x - i[0]) ** 2) + ((center_y - i[1]) ** 2) +\n ((center_z - i[2]) ** 2))\n if distance > max_distance:\n max_distance = distance\n fct = i\n if distance < min_distance:\n min_distance = distance\n cst = i\n distances_list.append(distance)\n\n distances = np.array(distances_list)\n mean_ctd_distance = np.mean(distances)\n variance_ctd_distance = np.var(distances)\n skewness_ctd_distance = st.skew(distances)\n\n # Calculate moments of cst distances\n distances_list = []\n\n for i in xyz:\n distances_list.append(math.sqrt(((cst[0] - i[0]) ** 2)\n + ((cst[1] - i[1]) ** 2)\n + ((cst[2] - i[2]) ** 2)))\n\n distances = np.array(distances_list)\n mean_cst_distance = np.mean(distances)\n variance_cst_distance = np.var(distances)\n skewness_cst_distance = st.skew(distances)\n\n # Calculate moments of fct distances\n max_distance = 0\n ftf = []\n distances_list = []\n\n for i in xyz:\n distance = math.sqrt(((fct[0] - i[0]) ** 2) + ((fct[1] - i[1]) ** 2) +\n ((fct[2] - i[2]) ** 2))\n if distance > max_distance:\n max_distance = distance\n ftf = i\n\n distances_list.append(distance)\n\n distances = np.array(distances_list)\n mean_fct_distance = np.mean(distances)\n variance_fct_distance = np.var(distances)\n skewness_fct_distance = st.skew(distances)\n\n # Calculate moments of ftf distances\n distances_list = []\n\n for i in xyz:\n distances_list.append(math.sqrt(((ftf[0] - i[0]) ** 2) +\n ((ftf[1] - i[1]) ** 2) +\n ((ftf[2] - i[2]) ** 2)))\n\n distances = np.array(distances_list)\n mean_ftf_distance = np.mean(distances)\n variance_ftf_distance = np.var(distances)\n skewness_ftf_distance = st.skew(distances)\n\n return [mean_ctd_distance, variance_ctd_distance, skewness_ctd_distance,\n mean_cst_distance, variance_cst_distance, skewness_cst_distance,\n mean_fct_distance, variance_fct_distance, skewness_fct_distance,\n mean_ftf_distance, variance_ftf_distance, skewness_ftf_distance]\n","sub_path":"modules/feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"252725718","text":"# encoding=utf8\nimport pika\nimport sys\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\nchannel = connection.channel()\n\nchannel.exchange_declare(exchange='broadcast', exchange_type='fanout')\n\nmessage = ' '.join(sys.argv[1:]) or \"info: Hello World!\" # 如果键盘有输入,message为键盘输入,如果键盘没有输入,消息message=\"info: Hello World!\";\nchannel.basic_publish(exchange='broadcast', routing_key='', body=message)\nprint(\" [x] Sent {message}\".format(message=message))\nconnection.close()\n","sub_path":"store/Python/learn/learn_rabbitMQ/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526920238","text":"import sys\nsys.path.insert(0, '/usr/local/lib/python2.7/dist-packages')\nfrom pytg import Telegram\nfrom pytg.utils import coroutine\n\n\ntg = Telegram(\n\ttelegram=\"/home/rozalski/tg-cli/bin/telegram-cli\",\n\tpubkey_file=\"/home/rozalski/tg-cli/tg-server.pub\")\nreceiver = tg.receiver\nsender = tg.sender\nsender.send_msg(\"@Phonebot\", \"xxxtelxxx\")\nreceiver.start()\n@coroutine\ndef main_loop (receiver):\n\twhile True:\n\t\tmsg = (yield)\n\t\tprint('Full dump: {array}'.format(array=str(msg)))\nreceiver.message(main_loop(receiver))\nreceiver.stop()\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"174401768","text":"import os\nfrom os.path import join, basename, dirname\nfrom numpy.random import choice, normal, rand, randint\nfrom PIL import Image\nimport numpy as np\nimport cv2\nfrom functools import reduce\nimport os\nimport sys\nfrom matplotlib.pyplot import plot, imshow, show, colorbar\nfrom glob import glob\nhome = os.environ['HOME']\n\nhtrAssetsRoot = join(home, 'datasets', 'htr_assets')\ncrowdRoot = join(htrAssetsRoot, 'crowdsource')\nprocessedRoot = join(crowdRoot, 'processed')\npatchBoxesRoot = join(htrAssetsRoot, 'cropped_patches', 'nw_boxes-3')\npatchHorizRoot = join(htrAssetsRoot, 'cropped_patches', 'nw_horizontal-2')\npatchBoxesFiles = glob(join(patchBoxesRoot, '*.jpg'))\npatchHorizFiles = glob(join(patchHorizRoot, '*.jpg'))\ntestsetRoot = join(htrAssetsRoot, 'nw_im_crop_curated')\n\ndef clean_lines(img, threshold=.23):\n '''use hough transnform to remove lines from the ey dataset'''\n img_copy = img.copy()\n if len(img.shape) > 2:\n gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)\n else:\n gray = img_copy.copy()\n largerDim = np.max(gray.shape)\n origShape = gray.shape\n sqrShape = [largerDim, largerDim]\n\n # image preprocessing for the hough transform\n gray = cv2.resize(gray,\n (largerDim, largerDim)) # resize to be square so that votes for both horz and vert lines are equal\n gray = cv2.GaussianBlur(gray, (5, 5), 0)\n edges = cv2.Canny(gray, 50, 150, apertureSize=3) # edge detection\n # Image.fromarray(edges).show() # debug\n\n # apply hough transform\n width = edges.shape[0]\n thresholdPix = int(threshold * width) # threshold is percentage of full image width expected to get votes\n lines = cv2.HoughLines(edges, 1, 1 * np.pi / 180, threshold=thresholdPix)\n\n # loop over detected lines in hough space and convert to euclidean\n for rho, theta in np.squeeze(lines):\n # leverage the fact that we know the lines occur at the borders of the image and are horz or vert\n conditionTheta = (abs(180 / np.pi * theta - 0) < 3) | \\\n (abs(180 / np.pi * theta - 90) < 3) | \\\n (abs(180 / np.pi * theta - 180) < 3) | \\\n (abs(180 / np.pi * theta - 270) < 3) | \\\n (abs(180 / np.pi * theta - 360) < 3)\n conditionRho = (abs(180 / np.pi * theta - 0) < 3) & (abs(rho - 0) < .07 * width) | \\\n (abs(180 / np.pi * theta - 0) < 3) & (abs(rho - width) < .07 * width) | \\\n (abs(180 / np.pi * theta - 0) < 3) & (abs(rho + width) < .07 * width) | \\\n (abs(180 / np.pi * theta - 180) < 3) & (abs(rho - 0) < .07 * width) | \\\n (abs(180 / np.pi * theta - 180) < 3) & (abs(rho - width) < .07 * width) | \\\n (abs(180 / np.pi * theta - 180) < 3) & (abs(rho + width) < .07 * width) | \\\n (abs(180 / np.pi * theta - 90) < 3) & (abs(rho - 0) < .2 * width) | \\\n (abs(180 / np.pi * theta - 90) < 3) & (abs(rho - width) < .2 * width) | \\\n (abs(180 / np.pi * theta - 90) < 3) & (abs(rho + width) < .2 * width)\n # draw the lines\n if conditionTheta & conditionRho:\n # plot( rho, theta, 'or' , markersize=4) # debug\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * (a))\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * (a))\n # scale back to original image size\n x1 = int(x1 * origShape[1] / sqrShape[1])\n x2 = int(x2 * origShape[1] / sqrShape[1])\n y1 = int(y1 * origShape[0] / sqrShape[0])\n y2 = int(y2 * origShape[0] / sqrShape[0])\n cv2.line(img_copy, (x1, y1), (x2, y2), (255, 255, 255), thickness=14)\n else:\n # plot( rho, theta, '.b' , markersize=4) # debug\n pass\n return img_copy\n\n\ndef tight_crop(img, threshold=1 - 1.5e-2):\n '''tightly crop an image, removing whitespace'''\n img_copy = 255 - img\n if len(img_copy.shape) > 2: img_copy = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)\n img_copy[img_copy > 20] = 255 # binarize\n img_copy[img_copy <= 20] = 0\n # Image.fromarray(img_copy).show() # debug\n img_copy = cv2.erode(img_copy, np.ones((3, 3)))\n\n # function: whiten the border given the crop coordinates\n def clean_border(img, r1, r2, c1, c2, debug=False):\n img_copy = img.copy()\n if debug:\n img_copy[:r1, :] = 125\n img_copy[-r2:, :] = 125\n img_copy[:, :c1] = 125\n img_copy[:, -c2:] = 125\n else:\n img_copy[:r1, :] = 0\n img_copy[-r2:, :] = 0\n img_copy[:, :c1] = 0\n img_copy[:, -c2:] = 0\n return img_copy\n\n # function: calculate ratio of preserved black pixels after the border cleaning\n ratio_preserved = lambda crop: np.sum(clean_border(img_copy, crop[0], crop[1], crop[2], crop[3])) / np.sum(img_copy)\n\n # iteratively crop more and more on each side alternatingly till preservedRatio hits threshold\n crop = [0, 1, 0, 1]\n edgeId = -1\n subThreshold = 1\n increment = .5e-3\n while subThreshold >= threshold:\n edgeId += 1\n subThreshold -= increment\n nextCrop = crop.copy()\n while ratio_preserved(nextCrop) >= subThreshold:\n crop = nextCrop.copy()\n nextCrop[np.mod(edgeId, 4)] += 1\n # Image.fromarray(clean_border(img_copy, crop[0], crop[1], crop[2], crop[3], debug=True)).show()\n # print(crop, np.mod(edgeId,4), ratio_preserved(crop), subThreshold)\n\n return img[crop[0]:-crop[1], crop[2]:-crop[3]] # crop the image and return\n\n\ndef center_pad(img, pad):\n '''center crop the image by defining the amoiunt of negative padding to shrink'''\n return img[pad:-pad, pad:-pad]\n\ndef add_artifacts(img,args): #yike: to be assessed\n if not args.noartifact:\n #cv2.imwrite('/root/Engagements/test/tst1_bf.jpg', img)\n img= horizontal_stretch(img, minFactor=.5, maxFactor=1.5)\n img = target_aspect_pad(img, targetRatio=args.imgsize[1] / args.imgsize[0])\n img = keep_aspect_pad(img, maxFactor=1.1)\n\n img = cv2.resize(img, tuple(args.imgsize), interpolation=cv2.INTER_CUBIC)\n\n if rand() < .70:\n img = merge_patch_box_random(img, centroid_std=.03)\n else:\n img = merge_patch_horiz_random(img, centroid_std=.05)\n #cv2.imwrite('/root/Engagements/test/tst1_aft.jpg', img)\n return img\n\ndef img_normalize(img):\n (m, s) = cv2.meanStdDev(img)\n m = m[0][0]\n s = s[0][0]\n img = img - m\n img = img / s if s > 0 else img\n\n\n\n\nif __name__ == '__main__':\n\n files = glob('/Users/dl367ny/datasets/htr_assets/crowdsource/extracted/*/*.jpg')\n # files = ['/Users/dl367ny/datasets/htr_assets/crowdsource/extracted/112301/42544.jpg']\n # files = ['/Users/dl367ny/datasets/htr_assets/crowdsource/extracted/112116/719,000.jpg']\n # files = ['/Users/dl367ny/datasets/htr_assets/crowdsource/extracted/112133/$341,510.jpg']\n # files = ['/Users/dl367ny/datasets/htr_assets/crowdsource/extracted/112042/2,504,650.jpg']\n for file in np.array(files)[np.random.permutation(len(files))[:10]]:\n img = Image.open(file)\n img = np.array(img)\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # img = center_pad(img, 10)\n # img = np.pad(img, 10, 'maximum')\n # Img = Image.fromarray(img); Img.show()\n img = clean_lines(img)\n img = tight_crop(img)\n Img = Image.fromarray(img);\n Img.show()\n\n\ndef horizontal_stretch(img, minFactor, maxFactor):\n '''randomly stretch image horizontally by amount uniformly between minFactor and maxFactor'''\n return cv2.resize(img, (int(img.shape[1] * np.random.uniform(minFactor, maxFactor)), img.shape[0]))\n\n\ndef target_aspect_pad(img, targetRatio=32 / 128):\n '''change aspect ratio of image to targetRatio by padding one of the dimensions. original image will be placed in\n random location within the expanded canvas'''\n nr, nc = img.shape\n currentRatio = nr / nc\n if currentRatio > targetRatio:\n dc = int(nr * (1 / targetRatio - 1 / currentRatio))\n dc2 = randint(max(dc, 1))\n dc1 = dc - dc2\n padding = ((0, 0), (dc1, dc2))\n else:\n dr = int(nc * (targetRatio - currentRatio))\n dr2 = randint(max(dr, 1))\n dr1 = dr - dr2\n padding = ((dr1, dr2), (0, 0))\n img = np.pad(img, padding, 'constant', constant_values=np.max(img))\n return img\n\n\ndef keep_aspect_pad(img, maxFactor):\n '''pad image by such that it expands by rand(maxFactor) while keeping its aspect ratio fixed. original image will be\n placed in random location within tthe expanded canvas. maxFactor must be greater than 1'''\n nr, nc = img.shape\n ratio = nr / nc\n dc = randint(max(int((maxFactor - 1) * nc), 1))\n dr = int(ratio * dc)\n dc2 = randint(max(dc, 1))\n dc1 = dc - dc2\n dr2 = randint(max(dr, 1))\n dr1 = dr - dr2\n padding = ((dr1, dr2), (dc1, dc2))\n img = np.pad(img, padding, 'constant', constant_values=np.max(img))\n return img\n\n\ndef remove_background(im, threshold):\n mask = im < threshold\n imMasked = im.copy()\n imMasked[mask] = 0\n return imMasked\n\n\ndef merge_patch(imBase, imPatch, centroid, threshold=100):\n '''Takes imPatch and superimpose on imBase at centroid. Returns modified image'''\n\n imBase, imPatch = 255 - imBase, 255 - imPatch # invert images fro processing\n nrb, ncb = imBase.shape\n nrp, ncp = imPatch.shape\n\n # make white areas of imPatch transparent\n imPatchMasked = remove_background(imPatch, threshold)\n\n # get difference of centroids between base and patch\n centroidPatch = np.array([int(dim / 2) for dim in imPatchMasked.shape])\n delta = np.array(centroid) - centroidPatch\n\n # add difference of centroids to the x,y position of patch\n cc, rr = np.meshgrid(np.arange(ncp), np.arange(nrp))\n rr = rr + int(delta[0])\n cc = cc + int(delta[1])\n\n # remove all parts of patch image that would expand base image\n keep = reduce(np.logical_and, [rr >= 0, rr < nrb, cc >= 0, cc < ncb])\n nrk, nck = np.max(rr[keep]) - np.min(rr[keep]) + 1, np.max(cc[keep]) - np.min(cc[keep]) + 1\n imPatchKeep = imPatchMasked[keep]\n\n # merge base and patch by taking maximum pixel at each position\n imMerge = imBase.copy()\n imBaseCrop = imBase.copy()\n imBaseCrop = imBaseCrop[rr[keep], cc[keep]]\n imMerge[rr[keep], cc[keep]] = np.maximum(imBaseCrop, imPatchKeep)\n\n return 255 - imMerge # invert back\n\n\ndef merge_patch_box_random(img, centroid_std=.05):\n imgSize = img.shape[::-1]\n imPatchFile = choice(patchBoxesFiles)\n imPatch = cv2.imread(imPatchFile, cv2.IMREAD_GRAYSCALE)\n imPatch = cv2.resize(imPatch, img.shape[::-1])\n imPatch = cv2.normalize(imPatch, None, np.min(img), np.max(img), norm_type=cv2.NORM_MINMAX)\n centroid = [imgSize[1] / 2 * (1 + normal(0, centroid_std)), imgSize[0] / 2 * (1 + normal(0, centroid_std))]\n return merge_patch(img, imPatch, centroid, threshold=50)\n\n\ndef merge_patch_horiz_random(img, centroid_std=.05):\n imgSize = img.shape[::-1]\n imPatchFile = choice(patchBoxesFiles)\n imPatchFile = choice(patchHorizFiles)\n imPatch = cv2.imread(imPatchFile, cv2.IMREAD_GRAYSCALE)\n imPatch = cv2.resize(imPatch, None, fx=4, fy=1)\n imPatch = cv2.normalize(imPatch, None, np.min(img), np.max(img), norm_type=cv2.NORM_MINMAX)\n centroid = [imgSize[1] * (.75 + normal(0, centroid_std)), imgSize[0] / 2 * (1 + normal(0, centroid_std))]\n return merge_patch(img, imPatch, centroid, threshold=50)\n\n\nif __name__ == '__main__':\n # file = '/Users/dl367ny/htrdata/crowdsource/extracted/111003/$9,900,000.jpg'\n # patchFile = '/Users/dl367ny/htrdata/cropped_patches/nw_horizontal-2/Declined - Handwritten (1)_Redacted-2-aligned-Unnamed2.jpg'\n # imBase = cv2.imread(file, cv2.IMREAD_GRAYSCALE)\n # im = cv2.imread(patchFile, cv2.IMREAD_GRAYSCALE)\n # im = cv2.resize(im, None, fx=3, fy=1)+50\n #\n # nrb, ncb = imBase.shape\n # centroid = int(.4*nrb), int(ncb/2)\n # imMerge = merge_patch(imBase, im, centroid, 100)\n # Image.fromarray(imMerge).show()\n pass\n","sub_path":"src/CleaningRecognitionJointModel/archive/recognition/utils_preprocess.py","file_name":"utils_preprocess.py","file_ext":"py","file_size_in_byte":11647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"232905564","text":"import json\nimport sys\nfrom os.path import join, isdir\nsys.path.insert(0, '../')\nfrom mocap.visualization.sequence import SequenceVisualizer\nfrom mocap.data.cmu import CMUHandler\nfrom mocap.data.simplified import Simplified\n\nSettings = json.load(open('../settings.txt'))\nroot = Settings['data_root']\nassert isdir(root), root\n\nsubsampling = 10\nsubjects = ['94']\ncmu = CMUHandler(root, subjects)\nhandler = Simplified(cmu)\n\nviz = SequenceVisualizer(data_root=Settings['video_export'],\n name='gt_cmu', vmax=1, vmin=-1,\n subsampling=10,\n with_pauses=True,\n mark_origin=False,\n to_file=True)\n\nprint('#videos', len(handler))\n\n\nseq = handler[0]\n\nviz.plot(seq, noaxis=False, plot_jid=True)\n","sub_path":"samples/simplified_cmu.py","file_name":"simplified_cmu.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627657272","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport re\r\nimport time\r\nimport datetime\r\n\r\nimport shutil\r\nimport random\r\n\r\ntrain_percentage = 0.8\r\nval_percentage = 0.2\r\ntest_percentage = 0.0\r\n\r\ndef backup_txt_rename(txt_path):\r\n\tif os.path.exists(txt_path):\r\n\t\ti = datetime.datetime.now()\r\n\t\tdate = str(i.year) + str(i.month) + str(i.day) + str(i.hour) + str(i.minute) + str(i.second)\r\n\t\tnew_name = txt_path +\".bak\" + date\r\n\t\tos.rename(txt_path, new_name)\r\n\t\tprint(\"copied and deleted file, new_name = {}\".format(new_name))\r\n\r\ndef get_slice_train_val_test(slice_path_txt_list, train_slice_path, val_slice_path, test_slice_path, label):\r\n\t### added by hcq 20180119\r\n\t### \r\n\t# MCI_subject_num = 825 - 199 - 230 = 396 including sMCI, pMCI\r\n\tbackup_txt_rename(train_slice_path)\r\n\tbackup_txt_rename(val_slice_path)\r\n\tslice_list = os.listdir(slice_path_txt_list)\r\n\t# print(slice_list[0]) = GMAD17283_S002.jpg\r\n\tAD_subject_num = 199\r\n\tNC_subject_num = 230\r\n\ttrain_slice_num = 0\r\n\tval_slice_num = 0\r\n\r\n\tif ((label + \"_subject_num\") == \"AD_subject_num\"):\r\n\t\tsubject_num = AD_subject_num\r\n\telif ((label + \"_subject_num\") == \"NC_subject_num\"):\r\n\t\tsubject_num = NC_subject_num\r\n\telse:\r\n\t\tprint(\"fuck..\")\r\n\r\n\trondom_list = random.sample(range(1, subject_num+1), subject_num)\r\n\r\n\t### set the number of train, val, test\r\n\ttrain_num = int(train_percentage * subject_num)\r\n\tval_num = subject_num - train_num\r\n\t# test_num = len_slice_list - train_num - val_num\r\n\tprint(\"total_sbject = {}\".format(subject_num))\r\n\tprint(\"train_sbject = {}\".format(train_num))\r\n\tprint(\"val_sbject = {}\".format(val_num))\r\n\r\n\t# train: [0, train_num-1]\r\n\tprint(\"{}, train_slice_path = {}\".format(label, train_slice_path))\r\n\twith open(train_slice_path, \"a\") as train_txt:\r\n\t\tfor i in range(train_num):\r\n\t\t\trondom_id = rondom_list[i]\r\n\t\t\trondom_id = \"S\" + str(\"%.3d\"%rondom_id)\r\n\t\t\t\r\n\t\t\tfor slice_item in slice_list:\r\n\t\t\t\tslice_name = slice_item.split(\".\")[0]\r\n\t\t\t\tsubject_name = slice_name.split(\"_\")[1]\r\n\t\t\t\t# print(subject_name)\r\n\t\t\t\t# print(\"rondom_id = {}\".format(rondom_id))\r\n\t\t\t\t# print(\"subject_name = {}\".format(subject_name))\r\n\t\t\t\tif (rondom_id == subject_name):\r\n\t\t\t\t\tprint(slice_item)\r\n\t\t\t\t\ttrain_txt.writelines(slice_item + \"\\n\")\r\n\t\t\t\t\ttrain_slice_num = train_slice_num + 1\r\n\r\n\t# val: [train_num, train_num + val_num - 1]\r\n\tprint(\"{}, val_slice_path = {}\".format(label, val_slice_path))\r\n\twith open(val_slice_path, \"a\") as val_txt:\r\n\t\tfor i in range(val_num):\r\n\t\t\tindex = i + train_num\r\n\t\t\trondom_id = rondom_list[index]\r\n\t\t\trondom_id = \"S\" + str(\"%.3d\"%rondom_id)\r\n\t\t\t# print(\"test_num = {}\".format(test_num))\t\t\r\n\t\t\tfor slice_item in slice_list:\r\n\t\t\t\tslice_name = slice_item.split(\".\")[0]\r\n\t\t\t\tsubject_name = slice_name.split(\"_\")[1]\r\n\t\t\t\t# print(subject_name)\t\r\n\t\t\t\tif (rondom_id == subject_name):\r\n\t\t\t\t\tprint(slice_item)\r\n\t\t\t\t\tval_txt.writelines(slice_item + \"\\n\")\r\n\t\t\t\t\tval_slice_num = val_slice_num + 1\r\n\tprint(\"train_slice_num = {}\".format(train_slice_num))\r\n\tprint(\"val_slice_num = {}\".format(val_slice_num))\r\n\t### added finished 20180119\r\n\r\n\t### adde by hcq 20180113\r\n\t### get all silce through its path\r\n\t# slice_list = os.listdir(slice_path_txt_list)\r\n\t# len_slice_list = len(slice_list)\r\n\t# # print(slice_list[0]) = GMAD17283_S002.jpg\r\n\r\n\t# ### backup the old txt file\r\n\t# backup_txt_rename(train_slice_path)\r\n\t# backup_txt_rename(val_slice_path)\r\n\t# # backup_txt_rename(test_slice_path)\r\n\t\r\n\t# ### set the number of train, val, test\r\n\t# train_num = int(train_percentage * len_slice_list)\r\n\t# val_num = len_slice_list - train_num\r\n\t# # test_num = len_slice_list - train_num - val_num\r\n\t# print(\"total_num = {}\".format(len_slice_list))\r\n\t# print(\"train_num = {}\".format(train_num))\r\n\t# print(\"val_num = {}\".format(val_num))\r\n\t# # print(\"test_num = {}\".format(test_num))\r\n\r\n\t# ### create a rondom list without repetition\r\n\t# rondom_list = random.sample(range(1, len_slice_list+1), len_slice_list)\r\n\r\n\t# ### create txt file to store the index of train, val, test\r\n\t# # train: [0, train_num-1]\r\n\t# with open(train_slice_path, \"a\") as train_txt:\r\n\t# \tfor i in range(train_num):\r\n\t# \t\tslice_index = rondom_list[i]\r\n\t# \t\tslice_name = \"GM\" + label + str(\"%.5d\"%slice_index) + \".jpg\"\r\n\t# \t\ttrain_txt.writelines(slice_name + \"\\n\")\r\n\t\t\r\n\t# # val: [train_num, train_num + val_num - 1]\r\n\t# with open(val_slice_path, \"a\") as val_txt:\r\n\t# \tfor i in range(val_num):\r\n\t# \t\tindex = i + train_num\r\n\t# \t\tslice_index = rondom_list[index]\r\n\t# \t\tslice_name = \"GM\" + label + str(\"%.5d\"%slice_index) + \".jpg\"\r\n\t# \t\tval_txt.writelines(slice_name + \"\\n\")\r\n\r\n\t# test: [train_num + val_num, end]\r\n\t# with open(test_slice_path, \"a\") as test_txt:\r\n\t# \tfor i in range(test_num):\r\n\t# \t\tindex = train_num + i\r\n\t# \t\tslice_index = rondom_list[index]\r\n\t# \t\tslice_name = \"GM\" + label + str(\"%.5d\"%slice_index) + \".jpg\"\r\n\t# \t\ttest_txt.writelines(slice_name + \"\\n\")\r\n\r\n\t### added finished 20180113\r\n\r\ndef move_slice_to_train_val_test_fold(slice_path_txt_list, slice_txt_path, target_path):\r\n\tif os.path.exists(slice_txt_path):\r\n\t\twith open(slice_txt_path,\"r\") as slice_list_file:\r\n\t\t\tfor slice_name in slice_list_file:\r\n\t\t\t\tslice_name = slice_name.replace(\"\\n\", \"\")\r\n\t\t\t\ttry:\r\n\t\t\t\t\tif (slice_name.split(\".\")[1] == \"jpg\"):\r\n\t\t\t\t\t\tslice_pos = os.path.join(slice_path_txt_list, slice_name)\r\n\t\t\t\t\t\t# print(\"slice_pos = {}\".format(slice_pos))\r\n\t\t\t\t\t\ttarget_slice_pos = os.path.join(target_path, slice_name)\r\n\t\t\t\t\t\tprint(\"target_slice_pos = {}\".format(target_slice_pos))\r\n\t\t\t\t\t\tshutil.copyfile(slice_pos, target_slice_pos)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\telse:\r\n\t\tprint(\"slice txt [{}] is not exist...\".format(slice_path_txt_list))\r\n\r\n\r\nif __name__==\"__main__\":\r\n\t### \r\n\trun_flag = 'NC'\r\n\r\n\t### \r\n\tif (run_flag == 'AD'):\r\n\t\t### initial\r\n\t\t# AD GM\r\n\t\tslice_path_txt_list = './AD_NC_except_entropy_zero/AD_GM_except_entropy_zero_single_subject'\r\n\t\ttrain_slice_path = './AD_NC_except_entropy_zero/ADGM_train_except_entropy_zero_single_subject.txt'\r\n\t\tval_slice_path = './AD_NC_except_entropy_zero/ADGM_val_except_entropy_zero_single_subject.txt'\r\n\t\ttest_slice_path = './AD_NC_except_entropy_zero/ADGM_test_except_entropy_zero_single_subject.txt'\r\n\t\tlabel = 'AD'\r\n\t\t### create the txt file of train, val, test\r\n\t\tget_slice_train_val_test(slice_path_txt_list, train_slice_path, val_slice_path, test_slice_path, label)\r\n\r\n\t\t### move slice to folder of train, val, test\r\n\t\ttrain_target_path = \"./InceptionV4_FineTunning/single_subject_data_fold_01_entropy_except_zero/train/\" + label\r\n\t\tmove_slice_to_train_val_test_fold(slice_path_txt_list, train_slice_path, train_target_path)\r\n\r\n\t\tval_target_path = \"./InceptionV4_FineTunning/single_subject_data_fold_01_entropy_except_zero/validation/\" + label\r\n\t\tmove_slice_to_train_val_test_fold(slice_path_txt_list, val_slice_path, val_target_path)\r\n\r\n\r\n\telif (run_flag == 'NC'):\r\n\t\t# NC GM\r\n\t\tslice_path_txt_list = './AD_NC_except_entropy_zero/NC_GM_except_entropy_zero_single_subject'\r\n\t\ttrain_slice_path = './AD_NC_except_entropy_zero/NCGM_train_except_entropy_zero_single_subject.txt'\r\n\t\tval_slice_path = './AD_NC_except_entropy_zero/NCGM_val_except_entropy_zero_single_subject.txt'\r\n\t\ttest_slice_path = './AD_NC_except_entropy_zero/NCGM_test_except_entropy_zero_single_subject.txt'\r\n\t\tlabel = 'NC'\r\n\t\t### create the txt file of train, val, test\r\n\t\tget_slice_train_val_test(slice_path_txt_list, train_slice_path, val_slice_path, test_slice_path, label)\r\n\r\n\t\t### move slice to folder of train, val, test\r\n\t\ttrain_target_path = \"./InceptionV4_FineTunning/single_subject_data_fold_01_entropy_except_zero/train/\" + label\r\n\t\tmove_slice_to_train_val_test_fold(slice_path_txt_list, train_slice_path, train_target_path)\r\n\r\n\t\tval_target_path = \"./InceptionV4_FineTunning/single_subject_data_fold_01_entropy_except_zero/validation/\" + label\r\n\t\tmove_slice_to_train_val_test_fold(slice_path_txt_list, val_slice_path, val_target_path)\r\n\r\n\r\n\r\n\r\n### run it \r\n### python .\\get_data.py > result.txt","sub_path":"ADNI-825/specified_subject_get_slice_train_val_test.py","file_name":"specified_subject_get_slice_train_val_test.py","file_ext":"py","file_size_in_byte":7854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115378133","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Standard library imports\nimport json\nimport socket\nimport sys\nimport time\n\n\ndef main():\n if len(sys.argv) != 4:\n print(\"Usage: %s HOST PORT TESTS_FILE\" % (sys.argv[0]))\n sys.exit(1)\n\n host = sys.argv[1]\n port = int(sys.argv[2])\n sample_tests_file = sys.argv[3]\n delimiter = \"\\r\\n\"\n timeout = 5\n\n fh = open(sample_tests_file)\n sample_tests = json.load(fh)\n fh.close()\n\n delimiter = \"\\n\"\n timeout = 5\n trial_count = len(sample_tests)\n socket.setdefaulttimeout(timeout)\n curr_trial = 1\n success = True\n\n s = socket.create_connection((host, port))\n s.send(\"Total trial count: %d, timeout: %ds\" % (trial_count, timeout) + delimiter)\n time.sleep(1)\n for test_case in sample_tests:\n n = test_case\n solution = sample_tests[test_case]\n s.send(str(n) + delimiter)\n time.sleep(1)\n try:\n response = long(s.makefile().readline().strip())\n except socket.timeout:\n s.send(\"Boo! Too slow! Bye!\" + delimiter)\n success = False\n break\n except ValueError:\n response = None\n\n if response != solution:\n s.send(\"Got %s, expected %d\" % (str(response), solution) + delimiter)\n success = False\n break\n else:\n curr_trial += 1\n\n if success:\n print(\"Success\")\n s.send(\"0\" + delimiter)\n else:\n print(\"Failed\")\n\n s.close()\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sem1/NetworksLab/lab3/Socket programming helpers/2/test-client.py","file_name":"test-client.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211271571","text":"#!/usr/bin/env python\n\nimport os\nimport pickle\nimport smtplib\nimport pickle\n\nfrom selenium import webdriver\nfrom subprocess import Popen, PIPE\n\nSITES = [\n (\"http://www.zaletsi.cz\",\"#content .article-list a\")\n]\n\nDB_PATH = \"links\"\nFULL_FILE_PATH = os.path.realpath(__file__)\nPATH, FILENAME = os.path.split(FULL_FILE_PATH)\nBROWSER_PATH = PATH + \"/phantomjs-2.1.1-linux-x86_64/bin/phantomjs\"\n\nclass TicketScrapper(object):\n def __init__(self):\n self.driver = webdriver.PhantomJS(executable_path = BROWSER_PATH)\n self.driver.set_window_size(1120,550)\n try:\n self.links = pickle.load(open(DB_PATH, \"rb\"))\n except IOError:\n self.links = []\n self.new_links = []\n\n def __del__(self):\n self.driver.quit()\n\n def scrape(self):\n for url, css in SITES:\n self.driver.get(url)\n for site_link in self.driver.find_elements_by_css_selector(css):\n link_object = site_link.get_attribute(\"href\")\n if link_object not in self.links and link_object not in self.new_links:\n self.new_links.append(link_object)\n self.links.append(link_object)\n pickle.dump(self.links, open(DB_PATH, \"wb\"))\n return self.new_links\n\nif __name__ == '__main__':\n\n\n scrapper = TicketScrapper()\n links = scrapper.scrape()\n\n if links:\n msg = 'Subject: %s\\n\\n' % \"Fresh fly tickets\"\n for link in links:\n msg += str(link) + '\\n'\n print(msg)\n s = smtplib.SMTP('relay.fi.muni.cz')\n s.sendmail('tomas.marton22@gmail.com',['tomas.marton22@gmail.com'], msg)\n s.quit()\n","sub_path":"tickets_scrapper/scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"30669983","text":"from tornado.web import RequestHandler\nfrom typing import List, Tuple\n\nfrom web.handlers import (\n PingHandler,\n AddToFavourite,\n RemoveFromFavourite,\n GetAllFavourite,\n CheckIfFavourite\n)\n\nping_url = (r'/ping/', PingHandler)\n\ncustom_urls = [\n (r'/add-to-favourite/', AddToFavourite),\n (r'/remove-from-favourite/', RemoveFromFavourite),\n (r'/get-all-favourite/', GetAllFavourite),\n (r'/check-if-favourite/', CheckIfFavourite),\n]\n\n\ndef get_all_urls() -> List[Tuple[str, RequestHandler]]:\n return custom_urls + [ping_url]\n","sub_path":"favourite/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"591339500","text":"\n\nfrom xai.brain.wordbase.nouns._asperity import _ASPERITY\n\n#calss header\nclass _ASPERITIES(_ASPERITY, ):\n\tdef __init__(self,): \n\t\t_ASPERITY.__init__(self)\n\t\tself.name = \"ASPERITIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"asperity\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_asperities.py","file_name":"_asperities.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"384552391","text":"def partitionfunc(n,k,l=1):\n '''n is the integer to partition, k is the length of partitions, l is the min partition element size'''\n if k < 1:\n raise StopIteration\n if k == 1:\n if n >= l:\n yield (n,)\n raise StopIteration\n for i in range(l, n+1):\n for result in partitionfunc(n-i, k-1, i):\n yield (i,)+result\n \n#print(set(partitionfunc(8, 2)))\n\ndef weak_compositions(k, n, parent=tuple()):\n if k > 1:\n for i in range(n + 1):\n for x in weak_compositions(k - 1, i, parent + (n - i,)):\n yield x\n else:\n yield parent + (n,)\n \n#print(set(weak_compositions(2, 8)))\n\ndef int_combinations(iterable, r):\n # combinations('ABCD', 2) --> AB AC AD BC BD CD\n # combinations(range(4), 3) --> 012 013 023 123\n pool = tuple(iterable)\n n = len(pool)\n if r > n:\n return\n indices = list(range(r))\n yield tuple(pool[i] for i in indices)\n while True:\n for i in reversed(range(r)):\n if indices[i] != i + n - r:\n break\n else:\n return\n indices[i] += 1\n for j in range(i+1, r):\n indices[j] = indices[j-1] + 1\n yield tuple(pool[i] for i in indices)\n\n#print(set(int_combinations(range(7), 1)))\n \n \n \n \n \n\nfrom itertools import combinations\n#from scipy.special import binom\n\ndef compositions(k, n):\n last = (n-1,)\n first = (-1,)\n for t in combinations(range(n-1), k-1):\n yield tuple(v - u for u, v in zip(first + t, t + last))\n\n#print(set(compositions(2, 8)))\n#print(set(compositions(2, 5)))\n#print(set(compositions(3, 7)))\n\n\n\ndef constrained_compositions(n, m):\n # inputs: n is of type 'int' and m is a list of integers\n # output: a set of tuples\n \n #\n k = len(m)\n full_set = set(compositions(k, n))\n constrained_set = full_set.copy()\n \n for i in full_set:\n for x,y in zip(i, m):\n if x > y:\n constrained_set.remove(i)\n break\n \n# constrained_set = set(i for i in full_set if all(x <= y for x, y in zip(i, m)))\n return constrained_set\n \n\n#print(constrained_compositions(7, [1,4,4]))\nprint(constrained_compositions(8, [3,2,4]))\n\n\n#\n#constrained_test = full_test.copy()\n#for i in full_test:\n# print('i', i)\n# \n# for x,y in zip(i,m):\n# if x > y:\n# constrained_test.remove(i)\n \n#\n#tuple(constrained_test.remove(i) for i in full_test if all(x <= y for x, y in zip(i, m)))\n","sub_path":"Week4-Conditioning/homework4.py","file_name":"homework4.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313671975","text":"import cv2\n# here we read the predefined classifier\nface_cascade=cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\neye_cascade=cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')\n\n# read an image\n# img=cv2.imread(\"download.jpg\")\ncap=cv2.VideoCapture(0)\nwhile cap.isOpened():\n ret, img = cap.read()\n gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces=face_cascade.detectMultiScale(gray,1.1,4)\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),3)\n roi_gray=gray[y:y+h,x:x+w]\n roi_img=img[y:y+h,x:x+w]\n eyes=eye_cascade.detectMultiScale(roi_gray,1.1,4)\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_img, (ex,ey),(ex+ew,ey + eh),(0,0,255),3)\n cv2.imshow(\"image\",img)\n if cv2.waitKey(1) == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"face_and_eye_detection.py","file_name":"face_and_eye_detection.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111630882","text":"import numpy as np\r\nimport cv2\r\nimport os\r\n\r\nLHtwo=0\r\nLStwo=0\r\nLLtwo=0\r\nHHtwo=0\r\nHStwo=0\r\nHLtwo=0\r\n\r\ndef nothing(x):\r\n pass\r\nadd = False\r\non = False\r\n\r\npath1 = \"Img/\"\r\nfor root, directory,filename in os.walk(path1):\r\n print(filename)\r\n\r\n#Trackbar Image & Size\r\nImgwnd = 'Image & Size'\r\nImgwnd2 = 'Image & Size2'\r\ncv2.namedWindow(Imgwnd,cv2.WINDOW_KEEPRATIO)\r\ncv2.createTrackbar(\"Image\", Imgwnd,0,len(filename)-1,nothing)\r\ncv2.createTrackbar(\"Size\", Imgwnd,1,99,nothing)\r\n\r\ncv2.namedWindow(Imgwnd2,cv2.WINDOW_KEEPRATIO)\r\ncv2.createTrackbar(\"Image\", Imgwnd2,0,len(filename)-1,nothing)\r\ncv2.createTrackbar(\"Size\", Imgwnd2,1,99,nothing)\r\n\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n\r\n\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n##cv2.createTrackbar(\"\", wnd,0,255,nothing)\r\n\r\nwhile(1):\r\n \r\n i=cv2.getTrackbarPos(\"Image\", Imgwnd)\r\n Size=cv2.getTrackbarPos(\"Size\", Imgwnd)\r\n image = cv2.imread(root+filename[i])\r\n img = cv2.resize(image, (0,0), fx = 1-0.01*Size, fy= 1-0.01*Size)\r\n cv2.namedWindow(\"img\",cv2.WINDOW_AUTOSIZE)\r\n cv2.imshow(\"img\",img)\r\n\r\n i2=cv2.getTrackbarPos(\"Image\", Imgwnd2)\r\n Size2=cv2.getTrackbarPos(\"Size\", Imgwnd2)\r\n image2 = cv2.imread(root+filename[i2])\r\n img2 = cv2.resize(image2, (0,0), fx = 1-0.01*Size2, fy= 1-0.01*Size2)\r\n cv2.namedWindow(\"img2\",cv2.WINDOW_AUTOSIZE)\r\n cv2.imshow(\"img2\",img2)\r\n\r\n if cv2.waitKey(1) == ord('y'):\r\n break\r\ncv2.destroyAllWindows()\r\nprint(\"-Confirmed-\")\r\nprint(filename[i])\r\n#Image and Size\r\nimage = cv2.imread(root+filename[i])\r\nimg = cv2.resize(image, (0,0), fx=1-0.01*Size, fy=1-0.01*Size)\r\nLeftALL = np.zeros_like(img)\r\n\r\nimage2 = cv2.imread(root+filename[i2])\r\nimg2 = cv2.resize(image2, (0,0), fx=1-0.01*Size2, fy=1-0.01*Size2)\r\nLeftALL2 = np.zeros_like(img2)\r\n#LeftALL[:] = (0, 0, 255)\r\n\r\nimage_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\nmaskALL = cv2.inRange(img, (0,0,0), (0,0,0))\r\n\r\nimage_HSV2 = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)\r\nmaskALL2 = cv2.inRange(img2, (0,0,0), (0,0,0))\r\n\r\n#Trackbar Color\r\nwnd = 'H-S-L bar'\r\ncv2.namedWindow(wnd,cv2.WINDOW_NORMAL)\r\n\r\ncv2.createTrackbar(\"Low-H\", wnd,0,255,nothing)\r\ncv2.createTrackbar(\"Low-S\", wnd,0,255,nothing)\r\ncv2.createTrackbar(\"Low-L\", wnd,0,255,nothing)\r\n\r\ncv2.createTrackbar(\"High-H\", wnd,0,255,nothing)\r\ncv2.setTrackbarPos(\"High-H\", wnd, 255)\r\ncv2.createTrackbar(\"High-S\", wnd,0,255,nothing)\r\ncv2.setTrackbarPos(\"High-S\", wnd, 255)\r\ncv2.createTrackbar(\"High-L\", wnd,0,255,nothing)\r\ncv2.setTrackbarPos(\"High-L\", wnd, 255)\r\nold = 0\r\nwhile(1):\r\n #Get Value\r\n LH = cv2.getTrackbarPos(\"Low-H\", wnd)\r\n LS = cv2.getTrackbarPos(\"Low-S\", wnd)\r\n LL = cv2.getTrackbarPos(\"Low-L\", wnd)\r\n\r\n HH = cv2.getTrackbarPos(\"High-H\", wnd)\r\n HS = cv2.getTrackbarPos(\"High-S\", wnd)\r\n HL = cv2.getTrackbarPos(\"High-L\", wnd)\r\n\r\n## #add color\r\n## if add == True:\r\n## cv2.createTrackbar(\"2Low-H\", wnd,0,255,nothing)\r\n## cv2.createTrackbar(\"2Low-S\", wnd,0,255,nothing)\r\n## cv2.createTrackbar(\"2Low-L\", wnd,0,255,nothing)\r\n##\r\n## cv2.createTrackbar(\"2High-H\", wnd,0,255,nothing)\r\n## cv2.setTrackbarPos(\"2High-H\", wnd, 255)\r\n## cv2.createTrackbar(\"2High-S\", wnd,0,255,nothing)\r\n## cv2.setTrackbarPos(\"2High-S\", wnd, 255)\r\n## cv2.createTrackbar(\"2High-L\", wnd,0,255,nothing)\r\n## cv2.setTrackbarPos(\"2High-L\", wnd, 255)\r\n## add = False\r\n \r\n\r\n \r\n #Filter background using colored mask\r\n light_one = (LH, LS, LL)\r\n dark_one = (HH, HS ,HL)\r\n\r\n mask_one = cv2.inRange(image_HSV,light_one,dark_one)\r\n mask_two = cv2.inRange(image_HSV2,light_one,dark_one)\r\n \r\n mask = mask_one\r\n Getone = cv2.bitwise_and(img,img, mask= mask_one)\r\n\r\n mask2 = mask_two\r\n Gettwo = cv2.bitwise_and(img2,img2, mask= mask_two)\r\n\r\n Leftmask = cv2.bitwise_not(mask)\r\n Leftmask2 = cv2.bitwise_not(mask2)\r\n\r\n LeftALL[Leftmask == 255] = img[Leftmask == 255]\r\n LeftALL2[Leftmask2 == 255] = img2[Leftmask2 == 255]\r\n## if on == True:\r\n## LeftALL = np.zeros_like(img)\r\n## LHtwo = cv2.getTrackbarPos(\"2Low-H\", wnd)\r\n## LStwo = cv2.getTrackbarPos(\"2Low-S\", wnd)\r\n## LLtwo = cv2.getTrackbarPos(\"2Low-L\", wnd)\r\n##\r\n## HHtwo = cv2.getTrackbarPos(\"2High-H\", wnd)\r\n## HStwo = cv2.getTrackbarPos(\"2High-S\", wnd)\r\n## HLtwo = cv2.getTrackbarPos(\"2High-L\", wnd)\r\n##\r\n## light_two = (LHtwo, LStwo, LLtwo)\r\n## dark_two = (HHtwo, HStwo ,HLtwo)\r\n## mask_two = cv2.inRange(image_HSV,light_two,dark_two)\r\n##\r\n## mask = mask_one + mask_two\r\n##\r\n## Gettwo = cv2.bitwise_and(img,img, mask= mask_two)\r\n## GetALL = cv2.bitwise_and(img,img, mask= mask)\r\n## Leftmask = cv2.bitwise_not(mask)\r\n## \r\n## LeftALL[Leftmask == 255] = img[Leftmask == 255]\r\n##\r\n cv2.imshow(\"Getone\",Getone)\r\n cv2.imshow(\"Gettwo\",Gettwo)\r\n \r\n## cv2.imshow(\"mask\",mask)\r\n## cv2.imshow(\"GetALL\",GetALL)\r\n cv2.imshow(\"Leftmask\",Leftmask)\r\n## cv2.imshow(\"LeftALL\",LeftALL)\r\n cv2.imshow(\"Leftmask2\",Leftmask2)\r\n## cv2.imshow(\"LeftALL2\",LeftALL2)\r\n## Getcount = cv2.countNonZero(mask)\r\n\r\n## if old != Getcount:\r\n## old = Getcount\r\n## print(Getcount)\r\n\r\n## if cv2.waitKey(1) == ord('+'):\r\n## add = True\r\n## on = True\r\n##\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\ncv2.destroyAllWindows()\r\nprint(\"LOW[%d,%d,%d] HIGH[%d,%d,%d]\"%(LH,LS,LL,HH,HS,HL))\r\n##print(\"2LOW[%d,%d,%d] 2HIGH[%d,%d,%d]\"%(LHtwo,LStwo,LLtwo,HHtwo,HStwo,HLtwo))\r\n##cv2.imwrite(\"result/LOW[%d,%d,%d] HIGH[%d,%d,%d]-2LOW[%d,%d,%d] 2HIGH[%d,%d,%d].jpg\"%(LH,LS,LL,HH,HS,HL,LHtwo,LStwo,LLtwo,HHtwo,HStwo,HLtwo),LeftALL)\r\ncv2.imwrite(\"result/LOW[%d,%d,%d] HIGH[%d,%d,%d].jpg\"%(LH,LS,LL,HH,HS,HL),LeftALL)\r\n","sub_path":"SUAS 2019/Install Pack/Color/ColorCompare.py","file_name":"ColorCompare.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67429775","text":"#!/usr/bin/env python\n\n#-----------------------------------------------------------------------\n# CASClient.py\n# Authors: Alex Halderman, Scott Karlin, Brian Kernighan, Bob Dondero\n#-----------------------------------------------------------------------\n\nfrom urllib.request import urlopen\nfrom urllib.parse import quote\nfrom re import sub, match\nfrom flask import request, session, redirect, abort\nfrom sys import stderr\n\n#-----------------------------------------------------------------------\n\nclass CASClient:\n\n #-------------------------------------------------------------------\n \n # Initialize a new CASClient object so it uses the given CAS\n # server, or fed.princeton.edu if no server is given.\n \n def __init__(self, url='https://fed.princeton.edu/cas/'):\n self.cas_url = url\n\n #-------------------------------------------------------------------\n\n # Return the URL of the current request after stripping out the\n # \"ticket\" parameter added by the CAS server.\n\t\n def stripTicket(self):\n url = request.url\n if url is None:\n return \"something is badly wrong\"\n url = sub(r'ticket=[^&]*&?', '', url)\n url = sub(r'\\?&?$|&$', '', url)\n return url\n \n #-------------------------------------------------------------------\n\n # Validate a login ticket by contacting the CAS server. If\n # valid, return the user's username; otherwise, return None.\n\n def validate(self, ticket):\n val_url = self.cas_url + \"validate\" + \\\n '?service=' + quote(self.stripTicket()) + \\\n '&ticket=' + quote(ticket)\n r = urlopen(val_url).readlines() # returns 2 lines\n if len(r) != 2:\n return None \n firstLine = r[0].decode('utf-8')\n secondLine = r[1].decode('utf-8')\n if not firstLine.startswith('yes'):\n return None\n return secondLine\n \n #-------------------------------------------------------------------\n\n # Authenticate the remote user, and return the user's username.\n # Do not return unless the user is successfully authenticated.\n \t\n def authenticate(self):\n \n # If the user's username is in the session, then the user was\n # authenticated previously. So return the user's username.\n if 'username' in session:\n return session.get('username')\n \n # If the request contains a login ticket, then try to\n # validate it.\n ticket = request.args.get('ticket')\n if ticket is not None:\n username = self.validate(ticket)\n if username is not None: \n # The user is authenticated, so store the user's\n # username in the session. \n session['username'] = username \n return username\n \n # The request does not contain a valid login ticket, so\n # redirect the browser to the login page to get one.\n login_url = self.cas_url + 'login' \\\n + '?service=' + quote(self.stripTicket())\n \n abort(redirect(login_url))\n\n#-----------------------------------------------------------------------\n\ndef main():\n print(\"CASClient does not run standalone\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"CASClient.py","file_name":"CASClient.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"299503701","text":"import csv\nimport os\nimport logging\nfrom collections import OrderedDict\n\nfrom src.visualizing.plot_training_stats import plot_training_stats\n\n\nclass Logger:\n def __init__(self, directory, monitor_dict, visualizing, train):\n self.train = train\n self.visualizing = visualizing\n\n self.statsdir = os.path.join(directory, 'stats')\n self.plotsdir = os.path.join(directory, 'plots')\n os.makedirs(self.statsdir)\n os.makedirs(self.plotsdir)\n self.scalar_filename = os.path.join(self.statsdir, 'scalars.csv')\n\n self.monitors = OrderedDict()\n self.visualized_scalar_monitors = []\n self.headers = []\n self.add_many_monitors(**monitor_dict)\n\n with open(self.scalar_filename, 'a', newline='') as f:\n writer = csv.DictWriter(f, self.headers)\n writer.writeheader()\n\n #import ipdb; ipdb.set_trace()\n\n def add_many_monitors(self, **monitors):\n for name, monitor in monitors.items():\n self.add_monitor(name, monitor)\n\n def add_monitor(self, name, monitor):\n self.monitors[name] = monitor\n if monitor.scalar and monitor.visualizing:\n self.visualized_scalar_monitors.append(name)\n if monitor.scalar:\n self.headers.append(name)\n monitor.initialize(self.statsdir, self.plotsdir)\n return monitor\n\n def compute_monitors(self, **kwargs):\n stats_dict = {}\n for name, monitor in self.monitors.items():\n monitor_value = monitor(**kwargs)\n if monitor.scalar:\n stats_dict[name] = monitor_value\n if monitor.visualizing:\n monitor.visualize()\n return stats_dict\n\n def log_scalars(self, stats_dict):\n with open(self.scalar_filename, 'a', newline='') as f:\n writer = csv.DictWriter(f, self.headers)\n writer.writerow(stats_dict)\n if self.train:\n ##pass\n plot_training_stats(self.scalar_filename, self.visualized_scalar_monitors, self.plotsdir)\n\n def log(self, compute_monitors=True,**kwargs):\n if compute_monitors:\n stats_dict = self.compute_monitors(**kwargs)\n self.log_scalars(stats_dict)\n else:\n self.log_scalars(kwargs)\n\n def complete_logging(self):\n for monitor in self.monitors.values():\n monitor.finish()\n","sub_path":"src/admin/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62207491","text":"'''\r\n -----EdPiy-----\r\n\r\nAutores:\r\n Axel Mercado Gasque\r\n Armando Montaño González\r\n'''\r\n#Librerias/Paquetes requeridos\r\nfrom tkinter import *\r\nimport random\r\nfrom tkinter import messagebox\r\nfrom PIL import ImageTk, Image\r\nimport os\r\nimport requests\r\nfrom io import BytesIO\r\n\r\n# Funciones\r\ndef n_pregunta():\r\n global l_value1\r\n global l_operator\r\n global l_value2\r\n global e_ans\r\n global l_opcion\r\n global b_pregunta\r\n global l_check\r\n\r\n def check_ans():\r\n ans = e_ans.get()\r\n print(ans)\r\n if oper == \"+\":\r\n if val + val1 == int(ans):\r\n l_check.config(text = \"Correcto!\", fg = \"green\")\r\n else:\r\n l_check.config(text = \"Incorrecto!\", fg = \"red\")\r\n\r\n elif oper == \"x\":\r\n if val * val1 == int(ans):\r\n l_check.config(text = \"Correcto!\", fg = \"green\")\r\n else:\r\n l_check.config(text = \"Incorrecto!\", fg = \"red\")\r\n\r\n elif oper == \"-\":\r\n if val - val1 == int(ans):\r\n l_check.config(text = \"Correcto!\", fg = \"green\")\r\n else:\r\n l_check.config(text = \"Incorrecto!\", fg = \"red\")\r\n\r\n else:\r\n if val // val1 == int(ans):\r\n l_check.config(text = \"Correcto!\", fg = \"green\")\r\n else:\r\n l_check.config(text = \"Incorrecto!\", fg = \"red\")\r\n\r\n arr_signs = ['+', '-', 'x', '/']\r\n global val\r\n val = str(random.randint(5, 20))\r\n l_value1 = Label(frame3, text=val)\r\n global val1\r\n val1 = str(random.randint(1, 5))\r\n global oper\r\n oper = arr_signs[random.randint(0, 3)]\r\n l_operator = Label(frame3, text=oper)\r\n l_value2 = Label(frame3, text=val1)\r\n e_ans = Entry(frame3, text=\"Respuesta\")\r\n b_respuesta = Button(frame3, text=\"Ok\", command=check_ans)\r\n l_check = Label(frame3, text=\"Correcto o Incorrecto\")\r\n l_opcion = Label(frame3, text=\"Desea otra pregunta\")\r\n b_pregunta = Button(frame3, text=\"Si\", command=destroy_pregunta)\r\n button_back1 = Button(frame3, text=\"Anterior\", padx=50, pady=20, command=from_arit_showstart)\r\n val = int(val)\r\n val1 = int(val1)\r\n\r\n frame3.grid(row=0, column=0)\r\n l_value1.grid(row=0, column=0)\r\n l_operator.grid(row=0, column=1)\r\n l_value2.grid(row=0, column=2)\r\n e_ans.grid(row=0, column=3)\r\n b_respuesta.grid(row=1, column=3)\r\n l_check.grid(row=2, column=3)\r\n l_opcion.grid(row=1, column=0)\r\n b_pregunta.grid(row=2, column=0)\r\n\r\n\r\ndef destroy_pregunta():\r\n l_value1.destroy()\r\n l_operator.destroy()\r\n l_value2.destroy()\r\n e_ans.destroy()\r\n l_opcion.destroy()\r\n b_pregunta.destroy()\r\n\r\n n_pregunta()\r\n\r\ndef from_geo_showstart():\r\n frame4.grid_forget()\r\n start()\r\n\r\n\r\ndef from_arit_showstart():\r\n frame3.grid_forget()\r\n start()\r\n\r\ndef geo_again():\r\n frame4.grid_forget()\r\n geometria()\r\n\r\ndef geometria():\r\n frame2.grid_forget()\r\n global frame4\r\n frame4 = Frame(main)\r\n\r\n figuras = ['Triángulo Equilátero', 'Cuadrado', 'Rectangulo', 'Trapecio']\r\n i = random.randint(0, 3)\r\n\r\n if i == 0:\r\n def check_area():\r\n if int(e_area.get()) == (h * b / 2):\r\n t_check_area.config(text = 'Correcto!', fg = 'green')\r\n else:\r\n t_check_area.config(text='Incorrecto!', fg = 'red')\r\n def check_per():\r\n if int(e_per.get()) == (b * 3):\r\n t_check_per.config(text = 'Correcto!', fg = 'green')\r\n else:\r\n t_check_per.config(text='Incorrecto!', fg = 'red')\r\n global b\r\n b = random.randint(1, 20)\r\n global h\r\n h = random.randint(1, 20)\r\n label_t_b = Label(frame4, text= 'Base: ' + str(b))\r\n label_preg = Label(frame4, text = '¿Desea otra pregunta?')\r\n b_si = Button(frame4, text = 'Si', command = geo_again)\r\n label_t_h = Label(frame4, text = 'Altura: ' + str(h))\r\n label_area = Label(frame4, text = 'Area: ')\r\n label_per = Label(frame4, text = 'Perímetro: ')\r\n e_area = Entry(frame4)\r\n e_per = Entry(frame4)\r\n global t_check_area\r\n global t_check_per\r\n t_check_area = Label(frame4, text = 'Area: Correcto o Incorrecto')\r\n t_check_per = Label(frame4, text='Perímetro: Correcto o Incorrecto')\r\n button_check_area = Button(frame4, text = 'Ok', command = check_area)\r\n button_check_per = Button(frame4, text='Ok', command = check_per)\r\n col = ['red', 'blue', 'yellow', 'black', 'green', 'pink']\r\n canvas_r = Canvas(frame4, width=190, height=190, bg='white')\r\n canvas_r.grid(row=8, column=3)\r\n canvas_r.create_line(55, 85, 155, 85, 105, 180, 55, 85)\r\n\r\n b_si.grid(row = 4, column = 0)\r\n label_preg.grid(row = 3, column = 0)\r\n t_check_area.grid(row = 3, column = 1, columnspan = 3)\r\n t_check_per.grid(row = 3, column = 5)\r\n button_check_area.grid(row=4, column=1, columnspan = 3)\r\n button_check_per.grid(row=4, column=5 )\r\n label_t_b.grid(row = 1, column = 0)\r\n label_t_h.grid(row = 1, column = 1)\r\n label_area.grid(row = 1, column = 2)\r\n label_per.grid(row=1, column=4)\r\n e_area.grid(row = 1, column = 3)\r\n e_per.grid(row=1, column=5)\r\n\r\n elif i == 1:\r\n def check_area():\r\n if int(e_area.get()) == (l * l):\r\n c_check_area.config(text = 'Correcto!', fg = 'green')\r\n else:\r\n c_check_area.config(text='Incorrecto!', fg = 'red')\r\n def check_per():\r\n if int(e_per.get()) == (l * 4):\r\n c_check_per.config(text = 'Correcto!', fg = 'green')\r\n else:\r\n c_check_per.config(text='Incorrecto!', fg = 'red')\r\n global l\r\n l = random.randint(1, 20)\r\n label_lado = Label(frame4, text='Lado: ' + str(l))\r\n label_area = Label(frame4, text='Area: ')\r\n label_per = Label(frame4, text='Perímetro: ')\r\n e_area = Entry(frame4)\r\n e_per = Entry(frame4)\r\n label_preg = Label(frame4, text='¿Desea otra pregunta?')\r\n b_si = Button(frame4, text='Si', command = geo_again)\r\n global c_check_area\r\n global c_check_per\r\n c_check_area = Label(frame4, text='Area: Correcto o Incorrecto')\r\n c_check_per = Label(frame4, text='Perímetro: Correcto o Incorrecto')\r\n button_check_area = Button(frame4, text='Ok', command = check_area)\r\n button_check_per = Button(frame4, text='Ok', command = check_per)\r\n col = ['red','blue','yellow','black','green','pink']\r\n canvas_r = Canvas(frame4, width = 150, height = 150, bg = col[random.randint(0,5)])\r\n canvas_r.grid(row = 8, column = 3)\r\n\r\n c_check_area.grid(row = 3, column = 2)\r\n c_check_per.grid(row = 3, column = 4)\r\n button_check_area.grid(row = 4, column = 2)\r\n button_check_per.grid(row = 4, column = 4)\r\n b_si.grid(row=4, column=0)\r\n label_preg.grid(row=3, column=0)\r\n label_lado.grid(row=1, column=0)\r\n label_area.grid(row=1, column=1)\r\n label_per.grid(row=1, column=3)\r\n e_area.grid(row=1, column=2)\r\n e_per.grid(row=1, column=4)\r\n\r\n elif i == 2:\r\n def check_area():\r\n if int(e_area.get()) == (largo * altura):\r\n r_check_area.config(text = 'Correcto!', fg = 'green')\r\n else:\r\n r_check_area.config(text='Incorrecto!', fg = 'red')\r\n def check_per():\r\n if int(e_per.get()) == (largo + largo + altura + altura):\r\n r_check_per.config(text = 'Correcto!', fg = 'green')\r\n else:\r\n r_check_per.config(text='Incorrecto!', fg = 'red')\r\n\r\n global largo\r\n global altura\r\n largo = random.randint(1, 20)\r\n altura = random.randint(1, 20)\r\n label_lado = Label(frame4, text='Lado: ' + str(largo))\r\n label_altura1 = Label(frame4, text='Altura: ' + str(altura))\r\n label_area = Label(frame4, text='Area: ')\r\n label_per = Label(frame4, text='Perímetro: ')\r\n e_area = Entry(frame4)\r\n e_per = Entry(frame4)\r\n label_preg = Label(frame4, text='¿Desea otra pregunta?')\r\n b_si = Button(frame4, text='Si', command = geo_again)\r\n global r_check_area\r\n global r_check_per\r\n r_check_area = Label(frame4, text='Area: Correcto o Incorrecto')\r\n r_check_per = Label(frame4, text='Perímetro: Correcto o Incorrecto')\r\n button_check_area = Button(frame4, text='Ok', command = check_area)\r\n button_check_per = Button(frame4, text='Ok', command = check_per)\r\n col = ['red', 'blue', 'yellow', 'black', 'green', 'pink']\r\n canvas_r = Canvas(frame4, width=170, height=160, bg= 'white')\r\n canvas_r.grid(row=8, column=3)\r\n canvas_r.create_rectangle(10,10,160,120, fill = col[random.randint(0,5)])\r\n\r\n r_check_area.grid(row=3, column=3)\r\n r_check_per.grid(row=3, column=5)\r\n button_check_area.grid(row=4, column=3)\r\n button_check_per.grid(row=4, column=5)\r\n b_si.grid(row=4, column=0)\r\n label_preg.grid(row=3, column=0)\r\n label_lado.grid(row=1, column=0)\r\n label_altura1.grid(row = 1, column = 1)\r\n label_area.grid(row=1, column=2)\r\n label_per.grid(row=1, column=4)\r\n e_area.grid(row=1, column=3)\r\n e_per.grid(row=1, column=5)\r\n\r\n else:\r\n def check_area():\r\n if int(e_area.get()) == ((B + base) * altura_t)/2:\r\n tr_check_area.config(text='Correcto!', fg='green')\r\n else:\r\n tr_check_area.config(text='Incorrecto!', fg='red')\r\n\r\n def check_per():\r\n if int(e_per.get()) == (B + base + lado1 + lado2):\r\n tr_check_per.config(text='Correcto!', fg='green')\r\n else:\r\n tr_check_per.config(text='Incorrecto!', fg='red')\r\n global B,base,altura_t,lado1,lado2\r\n B = random.randint(1, 20)\r\n base = random.randint(1, 20)\r\n altura_t = random.randint(1, 20)\r\n lado1 = random.randint(1, 20)\r\n lado2 = random.randint(1, 20)\r\n\r\n label_Base = Label(frame4, text='Base mayor: ' + str(B))\r\n label_base = Label(frame4, text='Base menor: ' + str(base))\r\n label_altura = Label(frame4, text='Altura: ' + str(altura_t))\r\n label_lado1 = Label(frame4, text='Lado 1: ' + str(lado1))\r\n label_lado2 = Label(frame4, text='Lado 2: ' + str(lado2))\r\n label_area = Label(frame4, text='Area: ')\r\n label_per = Label(frame4, text='Perímetro: ')\r\n e_area = Entry(frame4)\r\n e_per = Entry(frame4)\r\n label_preg = Label(frame4, text='¿Desea otra pregunta?')\r\n b_si = Button(frame4, text='Si', command = geo_again)\r\n global tr_check_area, tr_check_per\r\n tr_check_area = Label(frame4, text='Area: Correcto o Incorrecto')\r\n tr_check_per = Label(frame4, text='Perímetro: Correcto o Incorrecto')\r\n button_check_area = Button(frame4, text='Ok', command = check_area )\r\n button_check_per = Button(frame4, text='Ok', command = check_per)\r\n\r\n tr_check_area.grid(row=3, column=2)\r\n tr_check_per.grid(row=3, column=4)\r\n button_check_area.grid(row=4, column=2)\r\n button_check_per.grid(row=4, column=4)\r\n label_preg.grid(row = 3, column = 0)\r\n b_si.grid(row = 4, column = 0)\r\n label_Base.grid(row = 1, column = 0)\r\n label_base.grid(row=1, column=1)\r\n label_altura.grid(row=1, column=2)\r\n label_lado1.grid(row=1, column=3)\r\n label_lado2.grid(row=1, column=4)\r\n label_area.grid(row = 2, column = 1)\r\n e_area.grid(row = 2, column = 2)\r\n label_per.grid(row = 2, column = 3)\r\n e_per.grid(row = 2, column = 4)\r\n\r\n label_figura = Label(frame4, text = figuras[i])\r\n button_back1 = Button(frame4, text=\"Anterior\", padx=50, pady=20, command=from_geo_showstart)\r\n\r\n label_figura.grid(row = 0, column = 0)\r\n button_back1.grid(row = 5, column = 0)\r\n\r\n frame4.grid(row = 0, column = 0)\r\n\r\ndef aritmetica():\r\n frame2.grid_forget()\r\n global frame3\r\n frame3 = Frame(main)\r\n\r\n global l_value1\r\n global l_operator\r\n global l_value2\r\n global e_ans\r\n global l_opcion\r\n global b_pregunta\r\n global b_respuesta\r\n global l_check\r\n\r\n def check_ans():\r\n ans = e_ans.get()\r\n print(ans)\r\n if oper == \"+\":\r\n if val + val1 == int(ans):\r\n l_check.config(text = \"Correcto!\", fg = \"black\")\r\n else:\r\n l_check.config(text = \"Incorrecto!\", fg = \"red\")\r\n\r\n elif oper == \"x\":\r\n if val * val1 == int(ans):\r\n l_check.config(text = \"Correcto!\", fg = \"black\")\r\n else:\r\n l_check.config(text = \"Incorrecto!\", fg = \"red\")\r\n\r\n elif oper == \"-\":\r\n if val - val1 == int(ans):\r\n l_check.config(text = \"Correcto!\", fg = \"black\")\r\n else:\r\n l_check.config(text = \"Incorrecto!\", fg = \"red\")\r\n\r\n else:\r\n if val // val1 == int(ans):\r\n l_check.config(text = \"Correcto!\", fg = \"black\")\r\n else:\r\n l_check.config(text = \"Incorrecto!\", fg = \"red\")\r\n\r\n\r\n arr_signs = ['+', '-', 'x', '/']\r\n global val\r\n val = str(random.randint(5, 20))\r\n l_value1 = Label(frame3, text=val)\r\n global val1\r\n val1 = str(random.randint(1, 5))\r\n global oper\r\n oper = arr_signs[random.randint(0, 3)]\r\n l_operator = Label(frame3, text=oper)\r\n l_value2 = Label(frame3, text=val1)\r\n e_ans = Entry(frame3, text=\"Respuesta\")\r\n b_respuesta = Button(frame3, text = \"Ok\", command = check_ans)\r\n l_check = Label(frame3, text = \"Correcto o Incorrecto\")\r\n l_opcion = Label(frame3, text=\"Desea otra pregunta\")\r\n b_pregunta = Button(frame3, text=\"Si\", command=destroy_pregunta)\r\n button_back1 = Button(frame3, text=\"Anterior\", padx=50, pady=20, command=from_arit_showstart)\r\n val = int(val)\r\n val1 = int(val1)\r\n\r\n frame3.config(bg='deep sky blue')\r\n frame3.grid(row = 0, column = 0)\r\n l_value1.grid(row=0, column=0)\r\n l_operator.grid(row=0, column=1)\r\n l_value2.grid(row=0, column=2)\r\n e_ans.grid(row=0, column=3)\r\n b_respuesta.grid(row = 1, column = 3)\r\n l_check.grid(row = 2, column = 3)\r\n l_opcion.grid(row=1, column=0)\r\n b_pregunta.grid(row=2, column=0)\r\n button_back1.grid(row=3, column=1)\r\n\r\ndef show_frame1():\r\n frame2.grid_forget()\r\n frame1.grid(row = 0, column = 0)\r\n\r\ndef show_frame2():\r\n frame2.pack()\r\n\r\ndef start(): # Oculta frame1 y crea otra frame con las secciones\r\n frame1.grid_forget()\r\n global frame2\r\n frame2 = Frame(main)\r\n\r\n img_url2 = \"http://pluspng.com/img-png/math-symbols-png-math-symbols-600.png\"\r\n response2 = requests.get(img_url2)\r\n img_data2 = response2.content\r\n img2 = ImageTk.PhotoImage(Image.open(BytesIO(img_data2)))\r\n panel2 = Label(frame2, image=img2, bg=\"deep sky blue\")\r\n panel2.image = img2\r\n\r\n button_back = Button(frame2, text=\"Anterior\", padx=50, pady=20, command=show_frame1)\r\n button_geo = Button(frame2, text=\"Geometría\", padx=50, pady=20, command=geometria)\r\n button_arit = Button(frame2, text=\"Aritmetica\", padx=50, pady=20, command=aritmetica)\r\n\r\n frame2.config(bg='deep sky blue')\r\n frame2.grid(row=0, column=0)\r\n button_geo.grid(row=1, column=0)\r\n button_back.grid(row=2, column=0)\r\n button_arit.grid(row=1, column=1)\r\n panel2.grid(row = 3, column = 2)\r\n\r\nmain = Tk()\r\nmain.geometry(\"950x650\") # define tamaño de ventana\r\nmain.title(\"Proyecto Integrador\") # define título de ventana\r\nglobal frame1\r\nframe1 = Frame(main) # se agrega un frame para almacenar contenido de primera ventana\r\n\r\nimg_url = \"http://atlanticschools.net/wp-content/uploads/2017/05/PISA_LOGO-04.png\"\r\nresponse = requests.get(img_url)\r\nimg_data = response.content\r\nimg = ImageTk.PhotoImage(Image.open(BytesIO(img_data)))\r\npanel = Label(frame1, image=img, bg=\"RoyalBlue2\")\r\npanel.image = img\r\n#panel.pack(side=\"bottom\", fill=\"both\", expand=\"yes\")\r\n\r\nlabel_title = Label(frame1, text=\"Proyecto Integrador.\", fg=\"blue\")\r\nlabel_app = Label(frame1, text=\"Mejora tu rendimiento en la prueba PISA.\", fg=\"blue\")\r\nlabel_names = Label(frame1,\r\n text=\"Desarrollado por: Armando Montaño, Axel Mercado, Fabrizzio Ramírez, Fernando Cuellar y Fernando Fernández.\",\r\n fg=\"blue\")\r\nbutton_start = Button(frame1, text=\"Comienza a practicar\", padx=50, pady=60, command=start)\r\n\r\nframe1.config(bg='brown2')\r\nmain.config(bg='#abd7e5')\r\npanel.grid(row = 1, column = 0) #se agrega el label de la imágen y su ubicación en la ventana\r\nlabel_title.grid(row=2, column=0)\r\nlabel_app.grid(row=3, column=0)\r\nlabel_names.grid(row=4, column=0)\r\nbutton_start.grid(row=5, column=0)\r\nframe1.grid(row=0, column=0)\r\n\r\nmain.mainloop()\r\n","sub_path":"pythonGUI.py","file_name":"pythonGUI.py","file_ext":"py","file_size_in_byte":16996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"61781445","text":"from .ecliptic import GeocentricSolarEcliptic, HeliocentricEclipticJ2000\nfrom .enums import Planes\nfrom .equatorial import (\n GCRS,\n HCRS,\n ICRS,\n JupiterICRS,\n MarsICRS,\n MercuryICRS,\n NeptuneICRS,\n PlutoICRS,\n SaturnICRS,\n UranusICRS,\n VenusICRS,\n)\nfrom .util import get_frame\n\n__all__ = [\n \"Planes\",\n \"get_frame\",\n \"ICRS\",\n \"HCRS\",\n \"MercuryICRS\",\n \"VenusICRS\",\n \"GCRS\",\n \"MarsICRS\",\n \"JupiterICRS\",\n \"SaturnICRS\",\n \"UranusICRS\",\n \"NeptuneICRS\",\n \"PlutoICRS\",\n \"HeliocentricEclipticJ2000\",\n \"GeocentricSolarEcliptic\",\n]\n","sub_path":"src/poliastro/frames/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"558860694","text":"\"\"\"\nCopyright 2019 - 2020 VMWare Inc. Proprietary, All Rights Reserved, Confidential\n\"\"\"\n\nimport time\nimport json\nimport requests\nimport argparse\n\n\ndef milli_time():\n return int(time.time_ns() / 1000000) # Requires python 3.7\n\n\ndef check_http(result):\n if result.status_code < 200 or result.status_code > 202:\n if result.text:\n raise requests.HTTPError(result.text)\n result.raise_for_status()\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Recommend Build')\n parser.add_argument('--pipeline_name', required=True, action='store',\n help='pipeline_name')\n parser.add_argument('--build_id', required=True, action='store', type=int,\n help='build_id')\n parser.add_argument('--build_system', required=True, action='store',\n help='build_system')\n parser.add_argument('--product', required=True, action='store',\n help='product')\n parser.add_argument('--pipeline_build_number', required=False, action='store', default=milli_time(), type=int,\n help='pipeline_build_number')\n parser.add_argument('--jenkins_url', required=False, action='store', default=None,\n help='jenkins_url')\n parser.add_argument('--bu', required=False, action='store', default='cpbu',\n help='bu')\n parser.add_argument('--title', required=False, action='store', default=None,\n help='title')\n parser.add_argument('--test_name', required=False, action='store', default=None,\n help='test_name')\n parser.add_argument('--owner', required=False, action='store', default=None,\n help='owner')\n parser.add_argument('--pipeline_status', required=False, action='store', default='SUCCESS',\n help='Pipeline status [Success or Failure]')\n parser.add_argument('--api_env', required=False, action='store', default='development',\n help='Env [production or staging or development]')\n args = parser.parse_args()\n return args\n\n\nclass Uts:\n def __init__(self,\n pipeline_name,\n build_id,\n build_system,\n product,\n pipeline_build_number=milli_time(),\n jenkins_url=None,\n bu='cpbu',\n title=None,\n test_name=None,\n owner=None,\n pipeline_status=\"SUCCESS\",\n api_env='development'):\n self.pipeline_name = pipeline_name\n self.build_id = build_id\n self.build_system = build_system\n self.product = product\n self.pipeline_build_number = pipeline_build_number\n self.jenkins_url = jenkins_url\n self.business_unit = bu\n self.title = title\n if not test_name:\n self.test_name = pipeline_name+'_'+self.build_system\n else:\n self.test_name = test_name\n self.owner = owner\n self.pipeline_status = pipeline_status\n if pipeline_status==\"SUCCESS\":\n self.test_status = \"Passed\"\n else:\n self.test_status = \"Failed\"\n if api_env == 'production':\n self.server_url = 'https://testdata.svc.eng.vmware.com'\n elif api_env == 'staging':\n self.server_url = 'https://testdata.svc-stage.eng.vmware.com'\n else:\n self.server_url = 'http://localhost:3001'\n\n def start_pipeline(self):\n result = requests.post(self.server_url + '/v1/api/testdata/start_pipeline', json={\n 'bu': self.business_unit,\n 'pipeline_name': self.pipeline_name,\n 'pipeline_build_number': self.pipeline_build_number,\n 'title': self.title,\n 'jenkins_url': self.jenkins_url,\n 'url': self.jenkins_url\n })\n check_http(result)\n return result.text\n\n def get_pipeline(self, pipeline_id):\n result = requests.get(self.server_url + '/v1/api/testdata/pipeline/{}'.format(pipeline_id))\n check_http(result)\n return result.json()\n\n def start_test(self, pipeline_id):\n result = requests.post(self.server_url + '/v1/api/testdata/start_test', json={\n 'pipeline_fk': pipeline_id,\n 'test_name': self.test_name,\n 'product': self.product,\n 'triage_owners': self.owner,\n 'buildweb_id': self.build_id,\n 'buildweb_system': self.build_system,\n 'test_tag': 'build',\n 'test_type': 'BUILD'\n })\n check_http(result)\n return result.text\n\n def get_test(self, test_fk):\n result = requests.get(self.server_url + '/v1/api/testdata/test/{}'.format(test_fk))\n check_http(result)\n return result.json()\n\n def finish_test(self, test_id, test_details, data={}, state_dump_filename=None):\n state_dump = None\n if state_dump_filename:\n with open(state_dump_filename, \"r\") as state_dump_file:\n state_dump = json.load(state_dump_file)\n finish_test_data = {\n 'test_fk': test_id,\n 'state_dump': state_dump,\n 'change_details': [test_details],\n 'result': self.test_status,\n 'end_time': milli_time()\n }\n finish_test_data.update(data)\n result = requests.post(self.server_url + '/v1/api/testdata/finish_test_with_state_dump', json=finish_test_data)\n check_http(result)\n return result.text\n\n def finish_pipeline(self, pipeline_id, finish_data):\n final_products = []\n for product in finish_data['products']:\n del product['_update_time']\n del product['_add_time']\n if self.pipeline_status == \"SUCCESS\":\n product['pass_percentage'] = 100\n product['recommendation'] = True\n final_products.append(product)\n else:\n product['pass_percentage'] = 0\n product['recommendation'] = False\n final_products.append(product)\n finish_data['products'] = final_products\n\n data = {\n 'pipeline_fk': pipeline_id,\n 'status': self.pipeline_status\n }\n data.update(finish_data)\n result = requests.post(self.server_url + '/v1/api/testdata/finish_pipeline', json=data)\n check_http(result)\n return result.text\n\n\nif __name__ == \"__main__\":\n args = get_args()\n uts_obj = Uts(pipeline_name=args.pipeline_name,\n build_id=int(args.build_id),\n build_system=args.build_system,\n product=args.product,\n pipeline_status=args.pipeline_status,\n pipeline_build_number=args.pipeline_build_number,\n jenkins_url=args.jenkins_url,\n bu=args.bu,\n title=args.title,\n test_name=args.test_name,\n owner=args.owner,\n api_env=args.api_env)\n pipeline_fk = uts_obj.start_pipeline()\n test_fk = uts_obj.start_test(pipeline_id=pipeline_fk)\n\n test_data = uts_obj.get_test(test_fk)\n if 'test_details' in test_data:\n test_details = test_data['test_details']\n else:\n test_details = None\n uts_obj.finish_test(test_id=test_fk, test_details=test_details)\n pipeline_details = uts_obj.get_pipeline(pipeline_fk)\n data = uts_obj.finish_pipeline(pipeline_id=pipeline_fk, finish_data=pipeline_details)\n print(data)\n\n\"\"\"\nSample execution command: \n\npython3 recommend_builds.py --pipeline_name=bundle_test --build_id=17102532 --build_system=ob --product=sddc-bundle\n\n\"\"\"\n\n\n\n","sub_path":"recommend_builds.py","file_name":"recommend_builds.py","file_ext":"py","file_size_in_byte":7734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"442456692","text":"\nclass Calcula:\n\n def __init__(self):\n self.matrA = []\n self.matrB = []\n self.matrC = []\n\n def instancia_C(self, linhas, colunas, val):\n M = []\n while linhas > 0:\n M.append([val] * colunas)\n linhas -= 1\n return M\n\n def soma_matriz(self, matrA, matrB):\n soma = 0\n self.matrA = matrB\n self.matrB = matrA\n\n lin = len(self.matrA)\n col = len(self.matrB[0])\n if lin != col:\n return None\n self.matrC = self.instancia_C(lin, col, 0)\n for x in range(col):\n for y in range(lin):\n for r in range(col):\n self.matrC[x][y] += self.matrA[x][r] * self.matrB[r][y]\n\n return self.matrC\n","sub_path":"Aula07/calcula.py","file_name":"calcula.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635588095","text":"###################################################\n#\n# Script to execute the prediction\n#\n##################################################\n\nimport os, sys\nfrom configparser import ConfigParser\n\n\n#config file to read from\nconfig = ConfigParser()\nconfig.read('configuration.txt')\n\n#===========================================\n#name of the experiment\nname_experiment = config.get('experiment name', 'name')\nnohup = config.getboolean('testing settings', 'nohup') #std output on log file?\n\n\n#create a folder for the results\nresult_dir = name_experiment\nprint (\"\\n1. Create directory for the results (if not already existing)\")\nif os.path.exists(result_dir):\n print (\"\\nDir already exists\")\nelse:\n os.system('mkdir ' + result_dir)\n print (\"\\nDir created\")\n\n\n# finally run the prediction\nif nohup:\n print (\"\\n2. Run the prediction on GPU with nohup\")\n os.system(' nohup python -u ./src/retinaNN_predict.py > ' +'./'+name_experiment+'/'+name_experiment+'_prediction.nohup')\nelse:\n print (\"\\n2. Run the prediction on GPU (no nohup)\")\n os.system(' python ./src/retinaNN_predict.py')\n","sub_path":"run_testing.py","file_name":"run_testing.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"545381156","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# 3.0\n\nprint('train stacked autoencoder stage 1')\n\nimport os\nimport sys\nimport csv\nimport numpy as np\nimport pickle\nfrom PIL import Image\n\nimport tensorflow as tf\n\nimport tensorflow_ae_base\nfrom tensorflow_ae_base import *\nimport tensorflow_util\nimport myutil\n\nexec(open('extern_params.py').read())\n\n#\n# load sample data\n#\n\nss = 32 # sample size\n\nfile_input = 'tcga_encode1_w{}.{}.npy'.format(ss,stamp1)\npath_data = os.path.join(dir_data,file_input)\nprint('load input from {}'.format(path_data))\nqqq_encode1 = np.load(path_data)\n\nnn,ny,nx,nl = qqq_encode1.shape\nprint('nn ny nx nl',nn,ny,nx,nl)\n\n# nf_encode1\nexec(open('tensorflow_ae_stage2.py').read())\n\n#\n# setup optimizer\n#\ntf_input = tf.placeholder(tf.float32, [None,ny,nx,nf_encode1])\ntf_encode2 = get_encode2(tf_input)\ntf_deconv2 = get_deconv2(tf_encode2)\nmean_error = tf.reduce_mean(tf.square(tf_deconv2 - tf_input))\nlocal_entropy = get_local_entropy_encode2(tf_encode2)\nmean_entropy = tf.reduce_mean(local_entropy)\noptimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)\ntrain = optimizer.minimize(mean_error + lambda_s*mean_entropy)\n## train = optimizer.minimize(mean_error)\n\nsess.run(tf.initialize_all_variables())\n\n#\n# train loop\n#\niii_bin = np.arange(batch_size,nn,batch_size)\niii_nn = np.arange(nn)\niii_batches = np.split(iii_nn,iii_bin)\n\n# extern\n# tmax,tprint = 10,1\nfor tt in range(tmax):\n if(tt % tprint==0):\n tmp = [sess.run([mean_error,mean_entropy],{tf_input: qqq_encode1[iii,]}) for iii in iii_batches]\n error_out = np.mean([xx[0] for xx in tmp])\n entropy_out = np.mean([xx[1] for xx in tmp])\n print(tt,error_out,entropy_out, error_out+lambda_s*entropy_out)\n np.random.shuffle(iii_nn)\n iii_batches = np.split(iii_nn,iii_bin)\n for iii in iii_batches:\n sess.run(train,feed_dict={tf_input: qqq_encode1[iii,]})\n\nif(tt < tmax):\n tmp = [sess.run([mean_error,mean_entropy],{tf_input: qqq_encode1[iii,]}) for iii in iii_batches]\n error_out = np.mean([xx[0] for xx in tmp])\n entropy_out = np.mean([xx[1] for xx in tmp])\n print(tmax,error_out,entropy_out, error_out+lambda_s*entropy_out)\n\n#\n# save parameters\n#\nsave_stage2()\n\nmyutil.timestamp()\nprint('stamp2 = \\'{}\\''.format(stamp))\n","sub_path":"tensorflow_train_stage2.enc.py","file_name":"tensorflow_train_stage2.enc.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258638196","text":"from .forms import LoginForm, RegistrationForm\nfrom flask import flash, redirect, render_template, url_for\nfrom flask_login import login_required, login_user, logout_user\nfrom . import account\nfrom .models import User\nfrom app.extensions import db\n\n\n@account.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"\n Handle requests to the /register route\n Add an employee to the database through the registration form\n \"\"\"\n form = RegistrationForm()\n if form.validate_on_submit():\n form.save_form()\n flash('You have successfully registered! You may now login.')\n return redirect(url_for('accounts.login'))\n return render_template('auth/register.html', form=form, title='Register')\n\n\n@account.route('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"\n Handle requests to the /login route\n Log an employee in through the login form\n \"\"\"\n form = LoginForm()\n if form.validate_on_submit():\n user = form.validate_user()\n if user :\n login_user(user)\n return redirect(url_for('home.dashboard'))\n else:\n flash('Invalid email or password.')\n return render_template('auth/login.html', form=form, title='Login')\n\n\n@account.route('/logout')\n@login_required\ndef logout():\n \"\"\"\n Handle requests to the /logout route\n Log an employee out through the logout link\n \"\"\"\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('accounts.login'))","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"588892206","text":"import folium\nimport pandas as pd\nimport webbrowser #display html file\nimport os #use to find directory\n\n#Use pandas (alias pd) to read a csv file,\n#save the return data frame object in variable cuny.\ncuny = pd.read_csv('cunyLocations.csv')\n\n#Create a map object centered at 40.75, -74.125,\n#save in variable mapCUNY.\nmapCUNY = folium.Map(location = [40.75, -74.125])\n\nfor index, row in cuny.iterrows():\n lat = row[\"Latitude\"]\n lon = row[\"Longitude\"]\n name = row[\"Campus\"]\n if row[\"College or Institution Type\"] == \"Senior Colleges\":\n collegeIcon = folium.Icon(color=\"purple\")\n else: collegeIcon = folium.Icon(color=\"blue\")\n\n #create a marker, sepcify its latitude, longitude,\n #pop up name, and icon, save in variable newMarker.\n newMarker = folium.Marker([lat, lon], popup=name, icon=collegeIcon)\n newMarker.add_to(mapCUNY)\n\nfilename = 'cunyLocationsSenior.html'\n\n#save mapCUNY to filename\nmapCUNY.save(outfile = filename)\n\n#display html using open method of webbrowser class\nwebbrowser.open('file://' + os.path.realpath(filename))\n","sub_path":"files/cunyLocations.py","file_name":"cunyLocations.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313690564","text":"import logging\nimport os\nimport random\nfrom glob import glob\nfrom typing import Tuple\n\n\nimport hydra\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom google.cloud import storage\nfrom omegaconf import DictConfig\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.linear_model import Ridge\nfrom sklearn.ensemble import BaggingRegressor\n\nfrom src.metrics import (\n weighted_normalized_absolute_errors,\n normalized_absolute_errors,\n)\nfrom src.randomize import randomize_age\n\n\nplt.style.use(\"ggplot\")\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef prepair_dir(config: DictConfig) -> None:\n \"\"\"\n Logの保存先を作成\n \"\"\"\n for path in [\n config.store.result_path,\n config.store.log_path,\n config.store.model_path,\n ]:\n os.makedirs(path, exist_ok=True)\n\n\ndef set_seed(seed: int) -> None:\n os.environ.PYTHONHASHSEED = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n\ndef load_data(config: DictConfig) -> Tuple[pd.DataFrame, pd.DataFrame]:\n loading = pd.read_csv(f\"{config.store.workdir}/input/loading.csv\")\n fnc = pd.read_csv(f\"{config.store.workdir}/input/fnc.csv\")\n train_df = pd.read_csv(f\"{config.store.workdir}/input/train_scores.csv\")\n submissoin = pd.read_csv(f\"{config.store.workdir}/input/sample_submission.csv\")\n train_df = train_df.merge(loading, on=\"Id\", how=\"left\")\n train_df = train_df.merge(fnc, on=\"Id\", how=\"left\")\n\n test_df = pd.DataFrame({\"Id\": submissoin[\"Id\"].str[:5].unique().astype(int)})\n test_df = test_df.merge(loading, on=\"Id\", how=\"left\")\n test_df = test_df.merge(fnc, on=\"Id\", how=\"left\")\n # mean shift\n train_df[\"IC_20\"] += 0.0022449734660541093\n # Scaling\n train_df[fnc.columns[1:]] /= 500\n test_df[fnc.columns[1:]] /= 500\n\n return train_df, test_df\n\n\ndef make_submission(test_df: pd.DataFrame) -> pd.DataFrame:\n sub_df = pd.melt(\n test_df[\n [\n \"Id\",\n \"age\",\n \"domain1_var1\",\n \"domain1_var2\",\n \"domain2_var1\",\n \"domain2_var2\",\n ]\n ],\n id_vars=[\"Id\"],\n value_name=\"Predicted\",\n )\n sub_df[\"Id\"] = sub_df[\"Id\"].astype(\"str\") + \"_\" + sub_df[\"variable\"].astype(\"str\")\n\n sub_df = sub_df.drop(\"variable\", axis=1).sort_values(\"Id\")\n assert sub_df.shape[0] == test_df.shape[0] * 5\n return sub_df\n\n\ndef upload_directory(store_config: DictConfig) -> None:\n storage_client = storage.Client(store_config.gcs_project)\n bucket = storage_client.get_bucket(store_config.bucket_name)\n filenames = glob(os.path.join(store_config.save_path, \"**\"), recursive=True)\n for filename in filenames:\n if os.path.isdir(filename):\n continue\n destination_blob_name = os.path.join(\n store_config.gcs_path, filename.split(store_config.save_path)[-1][1:],\n )\n blob = bucket.blob(destination_blob_name)\n blob.upload_from_filename(filename)\n\n\n@hydra.main(config_path=\"yamls/ridge.yaml\")\ndef main(config: DictConfig) -> None:\n prepair_dir(config)\n train_df, test_df = load_data(config)\n label_cols = [\"age\", \"domain1_var1\", \"domain1_var2\", \"domain2_var1\", \"domain2_var2\"]\n feature_cols = [col for col in train_df.columns if col not in label_cols + [\"Id\"]]\n train_df[\"age_rank\"] = train_df[\"age\"] // 10 * 10\n skf = StratifiedKFold(n_splits=5, random_state=config.data.seed, shuffle=True)\n for i, (_, val_index) in enumerate(skf.split(train_df, train_df[\"age_rank\"])):\n train_df.loc[val_index, \"fold\"] = i\n if config.randomize_age:\n set_seed(100)\n train_df[\"age\"] += [randomize_age(age) for age in train_df[\"age\"]]\n\n for label_col in label_cols:\n best_score = np.inf\n best_alpha = 0.0\n best_pred = np.zeros([train_df.shape[0]])\n for alpha in [0.01, 0.001, 0.0003, 0.0001]:\n for n_fold in range(5):\n if not config.use_bagging:\n model = Ridge(alpha=alpha)\n else:\n model = BaggingRegressor(\n Ridge(alpha=alpha),\n n_estimators=30,\n random_state=42,\n max_samples=0.3,\n max_features=0.3,\n )\n X_train = train_df.query(\"fold!=@n_fold\")[feature_cols]\n y_train = train_df.query(\"fold!=@n_fold\")[label_col]\n X_train = X_train[y_train.notnull()]\n y_train = y_train[y_train.notnull()]\n model.fit(X_train, y_train)\n train_df.loc[\n train_df.query(\"fold==@n_fold\").index, f\"{label_col}_pred\"\n ] = model.predict(train_df.query(\"fold==@n_fold\")[feature_cols])\n score = normalized_absolute_errors(\n train_df[label_col].values, train_df[f\"{label_col}_pred\"].values\n )\n logger.info(f\"{label_col} alpha: {alpha}, score: {score}\")\n if score <= best_score:\n best_score = score\n best_alpha = alpha\n best_pred[:] = train_df[f\"{label_col}_pred\"].values\n train_df[f\"{label_col}_pred\"] = best_pred\n for n_fold in range(5):\n if not config.use_bagging:\n model = Ridge(alpha=best_alpha)\n else:\n model = BaggingRegressor(\n Ridge(alpha=best_alpha),\n n_estimators=30,\n random_state=42,\n max_samples=0.3,\n max_features=0.3,\n )\n X_train = train_df.query(\"fold!=@n_fold\")[feature_cols]\n y_train = train_df.query(\"fold!=@n_fold\")[label_col]\n X_train = X_train[y_train.notnull()]\n y_train = y_train[y_train.notnull()]\n model.fit(X_train, y_train)\n test_df[f\"{label_col}_pred_fold{n_fold}\"] = model.predict(\n test_df[feature_cols]\n )\n\n score = normalized_absolute_errors(\n train_df[label_col].values, train_df[f\"{label_col}_pred\"].values\n )\n logger.info(f\"{label_col} alpha: {best_alpha}, score: {score}\")\n test_df[label_col] = test_df[\n [f\"{label_col}_pred_fold{i}\" for i in range(5)]\n ].mean(1)\n score = weighted_normalized_absolute_errors(\n train_df[label_cols].values,\n train_df[[f\"{label_col}_pred\" for label_col in label_cols]].values,\n )\n logger.info(f\"all score: {score}\")\n train_df.to_csv(\n os.path.join(config.store.result_path, f\"{config.store.model_name}_train.csv\"),\n index=False,\n )\n test_df.to_csv(\n os.path.join(config.store.result_path, f\"{config.store.model_name}_test.csv\"),\n index=False,\n )\n if config.store.gcs_project is not None:\n upload_directory(config.store)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main_ridge.py","file_name":"main_ridge.py","file_ext":"py","file_size_in_byte":6998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544714729","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n'''\r\n@File :train.py\r\n@Date :2020/03/18 10:01:39\r\n@Author :mrwang\r\n@Version :1.0\r\n'''\r\n\r\n\r\nimport sys\r\nsys.path.insert(0, '.')\r\nsys.path.append(\"/home/mario/Projects/SSD/SSD_mobilenetv2/landmark/license_regression\")\r\nimport engine\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n '''\r\n ## 执行顺序\r\n 1. 先训练点回归网络,test_colorlan_lmark11.txt\r\n 2. 再训练分类网络,test_colorlan_lmark22.txt\r\n 3. 最后训练点回归+分类网络,test_colorlan_lmark33.txt \r\n '''\r\n samplePath5 = \"/media/mario/新加卷/DataSets/ALPR/zhongdong/lmark_colorlan/trainval.txt\"\r\n model_Path = None\r\n samplePathList = []\r\n ## 设置训练采用rgb图像还是灰度图像,True表示为rgb图像,False表示灰度图像\r\n train_rgb = True\r\n inputSize=[]\r\n imgChannel = 1\r\n if train_rgb:\r\n inputSize=[128, 128, 3]\r\n imgChannel = 3\r\n else:\r\n inputSize=[128, 128, 1]\r\n imgChannel = 1\r\n\r\n samplePathList.append(samplePath5)\r\n\r\n eng = engine.TrainingEngine(modelPath=model_Path ,imgListPath=samplePathList, classNum=8, batchSize=128, workers=8, imgChannel = imgChannel, inputSize=inputSize)#\r\n eng()","sub_path":"license_regression/example/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"134291833","text":"import asyncio\nimport logging\nfrom functools import partial\n\nfrom PySide2 import QtCore, QtWidgets\n\nfrom hyperapp.common.module import Module\n\nfrom . import htypes\n\nlog = logging.getLogger(__name__)\n\n\nclass MenuBar(QtWidgets.QMenuBar):\n\n @classmethod\n async def from_state(cls, state, command_hub, lcs):\n return cls(lcs, command_hub)\n\n def __init__(self, lcs, command_hub):\n super().__init__()\n self._lcs = lcs\n self._build()\n self._locale = 'en'\n command_hub.subscribe(self)\n\n @property\n def state(self):\n return htypes.menu_bar.menu_bar()\n\n # command hub observer method\n def commands_changed(self, kind, command_list):\n if kind == 'global':\n self._update_menu(self._file_menu, command_list)\n if kind == 'object':\n # Shortcuts for object commands are set by command pane.\n # Do not set them here or qt they treat them as ambiguous overload.\n self._update_menu(self._dir_menu, command_list, add_shortcut=False)\n if kind == 'view':\n self._update_menu(self._view_menu, command_list)\n\n def _build(self):\n self._file_menu = QtWidgets.QMenu('&File')\n self._dir_menu = QtWidgets.QMenu('&Dir')\n self._view_menu = QtWidgets.QMenu('La&yout')\n self.addMenu(self._file_menu)\n self.addMenu(self._dir_menu)\n self.addMenu(self._view_menu)\n\n def _update_menu(self, menu, command_list, add_shortcut=True):\n menu.clear()\n for command in command_list:\n menu.addAction(self._make_action(menu, command, add_shortcut))\n\n def _make_action(self, menu, command, add_shortcut, used_shortcut_set=None):\n text = command.name\n shortcut = self._lcs.get([*command.dir, htypes.command.command_shortcut_d()])\n\n if used_shortcut_set is not None:\n # remove duplicates\n if shortcut in used_shortcut_set:\n shortcut = None\n else:\n used_shortcut_set.add(shortcut)\n\n action = QtWidgets.QAction(text, menu)\n if add_shortcut and shortcut:\n action.setShortcut(shortcut)\n action.triggered.connect(partial(self._run_command, command))\n return action\n\n def _run_command(self, command):\n asyncio.create_task(command.run())\n\n\nclass ThisModule(Module):\n\n def __init__(self, module_name, services, config):\n super().__init__(module_name, services, config)\n services.view_registry.register_actor(\n htypes.menu_bar.menu_bar, MenuBar.from_state, services.lcs)\n","sub_path":"hyperapp/async/ui/qt/menu_bar.dyn.py","file_name":"menu_bar.dyn.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287548540","text":"import rospy\nfrom threading import Lock\nfrom numpy import inf, nan, isnan, fabs\nimport numpy as np\nfrom math import sin, cos, sqrt, acos, floor, pi\nfrom sensor_msgs.msg import LaserScan\nfrom copy import copy\n\n\nclass LaserScanner:\n def __init__(self, name, local_laser_param, fake_laser_param):\n self.name = name\n self.min_fake_angle = fake_laser_param['angle_min']\n self.max_fake_angle = fake_laser_param['angle_max']\n self.fake_angle_step = fake_laser_param['angle_increment']\n self.len_fake_data = int((self.max_fake_angle - self.min_fake_angle) // self.fake_angle_step)\n self.x = local_laser_param['x']\n self.y = local_laser_param['y']\n self.theta = local_laser_param['theta']\n self.is_real_time = local_laser_param['is_real_time']\n self.need_filter = local_laser_param['need_filter']\n self.range_data = np.array([inf] * self.len_fake_data)\n self.angle_limited = 'angle_min' in local_laser_param\n if self.angle_limited:\n self.angle_min = local_laser_param['angle_min']\n self.angle_max = local_laser_param['angle_max']\n if 'reverse' in local_laser_param:\n self.reverse = local_laser_param['reverse']\n self.lock = Lock()\n rospy.Subscriber(local_laser_param['topic'], LaserScan, self.laser_data_callback)\n\n\n def get_range_data(self):\n with self.lock:\n return copy(self.range_data)\n\n def get_timestamp(self):\n with self.lock:\n return self.timestamp\n\n def get_fake_polar(self, real_angle, real_radius, reverse):\n real_angle = pi - real_angle if reverse else real_angle\n fake_x = cos(real_angle) * real_radius + self.x\n fake_y = sin(real_angle) * real_radius + self.y\n fake_radius = sqrt(fake_x * fake_x + fake_y * fake_y)\n fake_angle = acos(fake_x/fake_radius)\n if fake_y < 0:\n fake_angle = -fake_angle\n return fake_radius, fake_angle\n\n def filtered_val(self, val, neighbours):\n if val > 20:\n return inf\n near_cnt = sum(fabs(neighbours - val) < 0.1)\n if near_cnt < len(neighbours) * 0.5:\n return inf\n return val\n\n def filter_range(self, range_data):\n filtered_ranges = np.array([inf] * self.len_fake_data)\n for i in xrange(10, self.len_fake_data - 10):\n filtered_ranges[i] = self.filtered_val(range_data[i], range_data[i-5:i+5])\n return filtered_ranges\n\n def laser_data_callback(self, laser_scan):\n with self.lock:\n self.range_data = np.array([inf] * self.len_fake_data)\n angle_step = laser_scan.angle_increment\n angle = laser_scan.angle_min\n if self.angle_limited:\n start_i = (int)((self.angle_min - laser_scan.angle_min) / angle_step)\n end_i = (int)((self.angle_max - laser_scan.angle_min) / angle_step)\n laser_scan.ranges = laser_scan.ranges[start_i:end_i]\n angle = self.angle_min\n for radius in laser_scan.ranges:\n if not isnan(radius) and radius != inf:\n fake_radius, fake_angle = self.get_fake_polar(angle + self.theta, radius, self.reverse)\n if np.isnan(fake_angle):\n continue\n i = (fake_angle - self.min_fake_angle) / self.fake_angle_step\n i = int(round(i))\n i %= self.len_fake_data\n if self.range_data[i] == nan or self.range_data[i] > fake_radius:\n self.range_data[i] = fake_radius\n angle += angle_step\n if self.need_filter:\n self.range_data = self.filter_range(self.range_data)\n\n","sub_path":"tinker_laser_fusion/script/laserscanner.py","file_name":"laserscanner.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155014423","text":"import pygame\n\nfrom Settings import BG_COLOR, TEXT_MAIN\nfrom Settings import FONT\n\n\nclass WPMCalculator():\n\n \"\"\"\n Tracks completed words type. Also handles calculation of WPM based on\n Timer's current time.\n \"\"\"\n\n\n def __init__(self):\n self.word_count = 0\n self.time = 0\n self.wpm = 0\n\n self.surface = pygame.Surface((100, 100))\n\n\n def update(self, time, typed_word_count):\n self.time = time\n self.word_count = typed_word_count\n\n self.calculate_wpm()\n\n\n def draw(self):\n self.surface.fill(BG_COLOR)\n\n time_text = FONT.render(f'{self.wpm} wpm', 1, TEXT_MAIN)\n self.surface.blit(time_text, (0, 0))\n\n return self.surface\n\n\n def calculate_wpm(self):\n \"\"\" Calculates the current WPM for display. \"\"\"\n\n if self.word_count == 0: return 0\n if self.time == 0: return 0\n\n self.wpm = int(round(self.word_count * (60000 / self.time), ndigits=0))","sub_path":"Stats/WPMCalculator.py","file_name":"WPMCalculator.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"398440300","text":"def bubble_sort(list):\n for element in range(len(list) - 1, 0, -1):\n for i in range(element):\n if list[i] > list[i + 1]:\n temp = list[i]\n list[i] = list[i + 1]\n list[i + 1] = temp\n return list\n\ndef main():\n L = []\n while True:\n x = input(\"Enter Val: \")\n L.append(x)\n if L[len(L)-1] == \"\":\n break\n del L[-1]\n print(bubble_sort(L))\nmain()\n","sub_path":"Python Codes/Homework10_Aufgabe2.py","file_name":"Homework10_Aufgabe2.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"362836022","text":"from Anagrams.searcher import Searcher\nfrom Anagrams.indexer import Indexer\nfrom word_list import WordList\n\ndef index_anagrams():\n # Run this once to create the index file from the given word list\n indexer = Indexer()\n indexer.index_anagrams()\n\n # Example how to create smaller anagram index files\n indexer = Indexer(filename='anagram_index_8_letters.json')\n indexer.index_anagrams(word_list=WordList(nr_letters=8))\n\nif __name__ == '__main__':\n # index_anagrams()\n\n # Example for simple anagram searching\n searcher = Searcher()\n print(searcher.find_anagrams_for('BIERWEG'))\n print(searcher.find_anagrams_for('bergwei'))\n","sub_path":"src/Anagrams/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"34578595","text":"from src.config import base_configuration\n\n\nclass Flickr8kLoader:\n def __init__(self):\n self.img_to_captions = {}\n self.train_images = []\n self.validation_images = []\n self.test_images = []\n\n caption_file = base_configuration[\"datasets\"][\"flickr8k\"][\"caption_file\"]\n\n train_file = base_configuration[\"datasets\"][\"flickr8k\"][\"train_images\"]\n validation_file = base_configuration[\"datasets\"][\"flickr8k\"][\"validation_images\"]\n test_file = base_configuration[\"datasets\"][\"flickr8k\"][\"test_images\"]\n\n with open(caption_file) as mapping_file:\n for line in mapping_file:\n idx = line.index(\"#\")\n image = line[:idx]\n caption = line[idx+3:-1]\n\n if not self.img_to_captions.get(image):\n self.img_to_captions[image] = []\n self.img_to_captions[image] += [caption]\n\n with open(train_file) as mapping_file:\n for line in mapping_file:\n self.train_images.append(line[:-1])\n\n with open(validation_file) as mapping_file:\n for line in mapping_file:\n self.validation_images.append(line[:-1])\n\n with open(test_file) as mapping_file:\n for line in mapping_file:\n self.test_images.append(line[:-1])\n","sub_path":"src/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"218743823","text":"def sum_array(*args: list, target: int):\n \"\"\"[summary]\n\n Args:\n *args (list): [List Or Lists Contain The Numbers For Count It With Target]\n target (int): [The Number You Wana To Compaite It With Results]\n\n Returns:\n If Equls Lists And Target\\n\n e.x\\n\n x = [10, 11, 12, 13, 14, 15]\\n\n y = [10, 11, 12, 13, 14, 15]\\n\n z = [10, 11, 12, 13, 14, 15]\\n\n target = 225\n\n If Not Equls Return Messges\n \"\"\"\n count = 0\n result_sum = 0\n results = []\n for list in args:\n for item in list:\n count += item\n results.append(count)\n count = 0\n\n for result in results:\n result_sum += result\n\n if result_sum == target:\n for res in args:\n print(res)\n print(f\"target = {target}\")\n else:\n print(\"The Arrays And Target Not Equles\")\n\n\nx = [x for x in range(10, 16)]\ny = [x for x in range(10, 16)]\nz = [x for x in range(10, 16)]\n\nsum_array(x, y, z, target=(75 * 3))\n","sub_path":"q11.py","file_name":"q11.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"239778849","text":"#!/usr/bin/env python3\nimport configparser\nimport os\nimport smtplib\n\nsender = 'malena@brain'\nreceivers = ['aragaer']\n\nmessage = \"Приветик\".encode()\n\nthis_dir = os.path.dirname(__file__)\nconf_path = os.path.join(this_dir, 'features', 'server.conf')\n\nconfig = configparser.ConfigParser()\nconfig.read(conf_path)\n\nsrv = config['server']\ntry:\n smtpObj = smtplib.SMTP(srv['host'], port=srv['send'])\n smtpObj.sendmail(sender, receivers, message) \n print(\"Successfully sent email\")\nexcept smtplib.SMTPException as ex:\n print(\"Error: unable to send email\", ex)\n","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532339492","text":"from create_corpus import CombineDataset\r\nimport gensim\r\nfrom gensim.models import KeyedVectors\r\nimport numpy as np\r\nimport os\r\n\r\n\r\nclass EmbedModel():\r\n \"\"\"Class is responsible for creating the Word2Vec model\r\n from the preprocessed stackoverflow corpus\r\n\r\n Attributes:\r\n combine_dataset (obj): Object of CombineDataset class\r\n stack_df (): preprocessed stackoverflow dataset containing questions and answers combined\r\n\r\n Return:\r\n w2vso_model: Software specific Word2Vec model\r\n\r\n\r\n \"\"\"\r\n def __init__(self):\r\n\r\n self.combine_dataset = CombineDataset()\r\n self.stack_df = self.combine_dataset.combine_clean_xml()\r\n self.create_word2vec_model()\r\n\r\n def create_word2vec_model(self):\r\n \"\"\"Creates the word embedding model from the StackOverflow corpus\r\n\r\n Returns:\r\n Word vectors as output\r\n\r\n \"\"\"\r\n\r\n # word2vec parameters\r\n w2v_size = 100\r\n w2v_window = 5\r\n w2v_epoch = 3\r\n w2v_min_count_words = 5\r\n\r\n # Collect the corpus for training word embeddings\r\n # used stackOverflow posts that contains questions and answers\r\n # posts are tokenized and appended to a list\r\n post_corpus = [post_text.split() for post_text in np.array(self.stack_df.post_corpus)]\r\n # initializing the model\r\n w2v_model2mill = gensim.models.word2vec.Word2Vec(vector_size=w2v_size,\r\n window=w2v_window,\r\n min_count=w2v_min_count_words,\r\n workers=5)\r\n # builing a vocabulary from the specified corpus\r\n w2v_model2mill.build_vocab(post_corpus)\r\n # training the word2vec model\r\n w2v_model2mill.train(post_corpus, total_examples=len(post_corpus), epochs=w2v_epoch)\r\n # saving the word2vec model\r\n w2v_model2mill.wv.save_word2vec_format('w2v_2m.bin', binary=True)\r\n cwd_so = os.getcwd()\r\n print(cwd_so)\r\n path_so = cwd_so + \"\\w2v_2m.bin\"\r\n print(path_so)\r\n # loading the developed word2vec model\r\n w2so_model = gensim.models.KeyedVectors.load_word2vec_format(path_so, binary=True, unicode_errors='ignore')\r\n print(w2so_model.most_similar(\"console\"))\r\n print(len(w2so_model.key_to_index))\r\n return w2so_model\r\n\r\n\r\nif __name__ == \"__main__\":\r\n embed = EmbedModel()","sub_path":"src/create_embeds.py","file_name":"create_embeds.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378562299","text":"#!/usr/bin/env python\n\"\"\" ACLManager module tests. \"\"\"\n\nimport json\nimport logging\nimport unittest\n\nfrom flask import Flask, current_app, request\nfrom werkzeug.exceptions import HTTPException, Forbidden, NotFound\nfrom pdm.utils.X509 import X509Utils\nfrom pdm.framework.ACLManager import ACLManager, set_session_state\n\nclass FakeTokenSVC(object):\n \"\"\" A fake token service class for testing ACL Manager. \"\"\"\n\n def __init__(self, token_ok=True):\n self.__token_ok = token_ok\n\n def check(self, raw_token):\n if not self.__token_ok:\n raise ValueError(\"Invalid Token\")\n return json.loads(raw_token)\n\n\nclass TestACLManager(unittest.TestCase):\n \"\"\" Test the ACLManager class. \"\"\"\n\n def setUp(self):\n \"\"\" Create an instance of ACLManager to test. \"\"\"\n self.__log = logging.getLogger()\n self.__inst = ACLManager(self.__log)\n\n def __gen_req(self, path, method=\"GET\",\n auth_mode=ACLManager.AUTH_MODE_NONE, auth_data=None,\n cert_ok=True, token_ok=True):\n \"\"\" Call self.__inst.check_request while generating a fake request\n with the given parameters (without using test_mode on the \n ACLManager).\n Returns True if the request was successful (i.e. access would\n have been allowed).\n \"\"\"\n app = Flask(\"ACLManagertest\")\n app.secret_key = \"TestKey\" # Required for session support\n token_svc = FakeTokenSVC(token_ok)\n try:\n headers = {}\n enable_session = False\n if auth_mode == ACLManager.AUTH_MODE_X509:\n if cert_ok:\n headers['Ssl-Client-Verify'] = 'SUCCESS'\n else:\n headers['Ssl-Client-Verify'] = 'FAILED'\n headers['Ssl-Client-S-Dn'] = auth_data\n elif auth_mode == ACLManager.AUTH_MODE_TOKEN:\n headers['X-Token'] = json.dumps(auth_data)\n elif auth_mode == ACLManager.AUTH_MODE_SESSION:\n enable_session = True\n with app.test_request_context(path=path, method=method,\n headers=headers):\n if enable_session:\n set_session_state(True)\n # Prepare a standard looking request\n current_app.log = self.__log\n current_app.token_svc = token_svc\n request.uuid = \"Test-Test-Test\"\n # Call the check function\n self.__inst.check_request()\n # Check that request info was correctly propagated\n if auth_mode == ACLManager.AUTH_MODE_X509:\n norm_dn = X509Utils.normalise_dn(auth_data)\n self.assertEqual(request.dn, norm_dn)\n elif auth_mode == ACLManager.AUTH_MODE_TOKEN:\n if token_ok:\n self.assertEqual(request.token, auth_data)\n self.assertEqual(request.raw_token, json.dumps(auth_data))\n self.assertTrue(request.token_ok)\n else:\n self.assertFalse(request.token_ok)\n elif auth_mode == ACLManager.AUTH_MODE_SESSION:\n self.assertTrue(request.session_ok)\n # Access was allowed (no exception raised)\n return True\n except Forbidden:\n # Access was denied (Forbidden exception thrown)\n return False\n\n def test_basic(self):\n \"\"\" Add simple ALL rules and see if they work. \"\"\"\n self.__inst.add_rule(\"/test1\", \"ALL\")\n self.__inst.add_rule(\"/test2\", \"ALL\")\n self.__inst.add_rule(\"/test/nested\", \"ALL\")\n self.__inst.add_rule(\"/test/post%MISC\", \"ALL\")\n self.__inst.add_rule(\"/test/multi%POST\", \"ALL\")\n self.__inst.add_rule(\"/test/multi%PUT\", \"ALL\")\n self.assertFalse(self.__gen_req(\"/\"))\n self.assertTrue(self.__gen_req(\"/test1\"))\n self.assertTrue(self.__gen_req(\"/test1/\"))\n self.assertTrue(self.__gen_req(\"/test2\"))\n self.assertFalse(self.__gen_req(\"/test\"))\n self.assertFalse(self.__gen_req(\"/test/nest\"))\n self.assertTrue(self.__gen_req(\"/test/nested\"))\n self.assertFalse(self.__gen_req(\"/test/post\", \"GET\"))\n self.assertTrue(self.__gen_req(\"/test/post\", \"MISC\"))\n self.assertTrue(self.__gen_req(\"/test/multi\", \"POST\"))\n self.assertTrue(self.__gen_req(\"/test/multi\", \"PUT\"))\n self.assertFalse(self.__gen_req(\"/test/multi\", \"GET\"))\n # Check that requests containing a % char are always rejected.\n self.assertRaises(NotFound, self.__gen_req, \"/test/test/test%%\")\n\n def test_auth_modes(self):\n \"\"\" Check that all auth modes work as expected. \"\"\"\n self.__inst.add_rule(\"/cert_only\", \"CERT\")\n self.__inst.add_rule(\"/cert_dn1\", \"CERT:/C=XX/OU=Test/CN=Test User1\")\n self.__inst.add_rule(\"/cert_dn2\", \"CERT:C = YY, OU = Bah, CN = Test User2\")\n self.__inst.add_rule(\"/token_only\", \"TOKEN\")\n self.__inst.add_rule(\"/session_only\", \"SESSION\")\n self.__inst.add_rule(\"/all\", \"ALL\")\n # Now test that each auth mode only works with the expected endpoint\n # We do this by looping over every endpoint in the TEST_EP list and\n # trying them with certain authentication parameters.\n TEST_EP = [\"/cert_only\", \"/cert_dn1\", \"/cert_dn2\",\n \"/token_only\", \"/session_only\", \"/all\", \"/none\"]\n # AUTH_TESTs is a list of tuples: (auth_mode, auth_data, res)\n # res is a list of whether each TEST_EP should be expected to work\n # with this endpoint or not (in the order of TEST_EP)\n AUTH_TESTS = [\n (ACLManager.AUTH_MODE_X509, 'C = XX, OU = Test, CN = Test User1',\n (True, True, False, False, False, True, False, )),\n (ACLManager.AUTH_MODE_X509, 'C = YY, OU = Bah, CN = Test User2',\n (True, False, True, False, False, True, False, )),\n (ACLManager.AUTH_MODE_X509, 'C = ZZ, OU = Other, CN = Test User3',\n (True, False, False, False, False, True, False, )),\n (ACLManager.AUTH_MODE_TOKEN, \"TOKENTEXT\",\n (False, False, False, True, False, True, False, )),\n (ACLManager.AUTH_MODE_TOKEN, \"OTHERTOKENTEXT\",\n (False, False, False, True, False, True, False, )),\n (ACLManager.AUTH_MODE_SESSION, None,\n (False, False, False, False, True, True, False, )),\n (ACLManager.AUTH_MODE_NONE, None,\n (False, False, False, False, False, True, False, )),\n ]\n for auth_mode, auth_data, auth_res in AUTH_TESTS:\n for i in xrange(0, len(TEST_EP)):\n res = self.__gen_req(TEST_EP[i], \"GET\", auth_mode, auth_data)\n self.assertEqual(res, auth_res[i],\n \"Path %s failed, auth_data=%s, Expected: %s, Actual: %s\" % \\\n (TEST_EP[i], auth_data, auth_res[i], res))\n # Finally, check that token fails if the token provided is bad\n res = self.__gen_req(\"/token_only\", \"GET\", ACLManager.AUTH_MODE_TOKEN,\n \"BAD_TOKEN\", token_ok=False)\n self.assertFalse(res)\n \n\n def test_groups(self):\n \"\"\" Check that groups are applied correctly. \"\"\"\n self.__inst.add_group_entry(\"grp1\", \"ALL\")\n self.__inst.add_rule(\"/group1\", \"@grp1\")\n self.__inst.add_group_entry(\"grp2\", \"CERT\")\n self.__inst.add_group_entry(\"grp2\", \"TOKEN\")\n self.__inst.add_rule(\"/group2\", \"@grp2\")\n # Missing group\n self.assertRaises(ValueError, self.__inst.add_rule,\n \"/group3\", \"@grp3\")\n # Check the groups work as expected\n self.assertTrue(self.__gen_req(\"/group1\"))\n self.assertTrue(self.__gen_req(\"/group1\", \"GET\",\n ACLManager.AUTH_MODE_X509, \"CN=Test\"))\n self.assertTrue(self.__gen_req(\"/group1\", \"GET\",\n ACLManager.AUTH_MODE_TOKEN, \"TOKENSTR\"))\n self.assertFalse(self.__gen_req(\"/group2\"))\n self.assertTrue(self.__gen_req(\"/group2\", \"GET\",\n ACLManager.AUTH_MODE_X509, \"CN=Test\"))\n self.assertTrue(self.__gen_req(\"/group2\", \"GET\",\n ACLManager.AUTH_MODE_TOKEN, \"TOKENSTR\"))\n\n def test_bad_rules(self):\n \"\"\" Check that malformed rules and groups are rejected. \"\"\"\n # Bad auth type\n self.assertRaises(ValueError,\n self.__inst.add_rule, \"/bad\", \"BAD\")\n # Nested Group\n self.__inst.add_group_entry(\"grp1\", \"ALL\")\n self.assertRaises(ValueError,\n self.__inst.add_group_entry, \"/bad\", \"@grp1\")\n # Cert with no DN\n self.assertRaises(ValueError,\n self.__inst.add_rule, \"/bad\", \"CERT:\")\n # Two rules for the same path\n self.__inst.add_rule(\"/good\", \"TOKEN\")\n self.assertRaises(ValueError,\n self.__inst.add_rule, \"/good\", \"CERT\")\n\n def test_wildcards(self):\n \"\"\" Check that wildcards work as expected. \"\"\"\n self.__inst.add_rule(\"/ep1/?\", \"ALL\")\n self.__inst.add_rule(\"/ep1/test/?\", \"ALL\")\n self.__inst.add_rule(\"/ep1/test/?/test2\", \"ALL\")\n self.__inst.add_rule(\"/ep1/test/?/test2/?\", \"ALL\")\n self.__inst.add_rule(\"/ep2/test/?/?\", \"ALL\")\n self.__inst.add_rule(\"/ep3/test/*\", \"ALL\")\n self.__inst.add_rule(\"/ep4/*/test\", \"ALL\")\n self.__inst.add_rule(\"/ep5/?/test%POST\", \"ALL\")\n # Check paths work as expected\n # EP1\n self.assertFalse(self.__gen_req(\"/ep1\"))\n self.assertTrue(self.__gen_req(\"/ep1/blah\"))\n self.assertTrue(self.__gen_req(\"/ep1/test\"))\n self.assertFalse(self.__gen_req(\"/ep1/blah/bad\"))\n self.assertTrue(self.__gen_req(\"/ep1/test/blah\"))\n self.assertTrue(self.__gen_req(\"/ep1/test/blah/test2\"))\n self.assertTrue(self.__gen_req(\"/ep1/test/blah/test2/blah2\"))\n self.assertFalse(self.__gen_req(\"/ep1/test/blah/test3\"))\n # EP2\n self.assertFalse(self.__gen_req(\"/ep2\"))\n self.assertFalse(self.__gen_req(\"/ep2/test/blah2\"))\n self.assertTrue(self.__gen_req(\"/ep2/test/blah2/extra\"))\n # EP3\n self.assertFalse(self.__gen_req(\"/ep3\"))\n self.assertFalse(self.__gen_req(\"/ep3/test\"))\n self.assertTrue(self.__gen_req(\"/ep3/test/extra1\"))\n self.assertTrue(self.__gen_req(\"/ep3/test/extra1/extra2\"))\n # EP4\n self.assertFalse(self.__gen_req(\"/ep4/blah/test\"))\n self.assertFalse(self.__gen_req(\"/ep4/blah/blah/test\"))\n # EP5\n self.assertFalse(self.__gen_req(\"/ep5/blah/test\", \"GET\"))\n self.assertTrue(self.__gen_req(\"/ep5/blah/test\", \"POST\"))\n # Special case: only wildcard\n self.__inst.add_rule(\"*\", \"ALL\")\n self.assertTrue(self.__gen_req(\"/\"))\n self.assertTrue(self.__gen_req(\"/special\"))\n self.assertTrue(self.__gen_req(\"/special/special\"))\n\n def test_wildcard_but_wrong_auth(self):\n \"\"\" Simply test wildcard rule which matches one auth type,\n but the user has a different auth type.\n \"\"\"\n self.__inst.add_rule(\"/test/?\", \"CERT\")\n self.assertFalse(self.__gen_req(\"/test/blah\", \"GET\",\n ACLManager.AUTH_MODE_TOKEN, \"X\"))\n self.assertTrue(self.__gen_req(\"/test/blah\", \"GET\",\n ACLManager.AUTH_MODE_X509, \"C=X\"))\n\n def __check_test_mode(self, auth_mode, auth_data):\n \"\"\" Helper function for testing test_mode.\n \"\"\"\n self.__inst.test_mode(auth_mode, auth_data)\n app = Flask(\"ACLManagertest\")\n with app.test_request_context(path=\"/test\", method=\"GET\"):\n request.uuid = \"Test-Test-Test\"\n # Call the check function\n self.__inst.check_request()\n # Check that request info was correctly propagated\n if auth_mode == ACLManager.AUTH_MODE_X509:\n norm_dn = X509Utils.normalise_dn(auth_data)\n self.assertEqual(request.dn, norm_dn)\n elif auth_mode == ACLManager.AUTH_MODE_TOKEN:\n self.assertEqual(request.token, auth_data)\n self.assertEqual(request.raw_token, auth_data)\n self.assertTrue(request.token_ok)\n elif auth_mode == ACLManager.AUTH_MODE_SESSION:\n self.assertTrue(request.session_ok)\n\n def test_test_mode(self):\n \"\"\" Check that test mode works correctly. \"\"\"\n self.__check_test_mode(ACLManager.AUTH_MODE_X509, \"/C=XX/CN=Test1\")\n self.__check_test_mode(ACLManager.AUTH_MODE_X509, \"C=YY,CN=Test2\")\n self.__check_test_mode(ACLManager.AUTH_MODE_TOKEN, \"TOKENSTR\")\n self.__check_test_mode(ACLManager.AUTH_MODE_TOKEN, \"TEST2\")\n self.__check_test_mode(ACLManager.AUTH_MODE_SESSION, None)\n\n @staticmethod\n def __test_redir_cb():\n return \"Hello World\"\n\n def __run_redir(self, app, path):\n \"\"\" Try to get the given path in app context. \"\"\"\n with app.test_request_context(path=path, method=\"GET\"):\n request.uuid = \"Test-Test-Test\"\n self.__inst.check_request()\n\n def test_redir(self):\n \"\"\" Check that the redirect works correctly on 403.\n (if set on the end object).\n This is slightly more complex than all of the other auth\n checks as it relies on a proper rule existing in flask\n \"\"\"\n app = Flask(\"ACLManagertest\")\n # Configure the endpoint/rule\n self.__test_redir_cb.export_redir = '/login?ret=%(return_to)s'\n app.add_url_rule('/test', \"/test\", self.__test_redir_cb)\n # Configure Auth\n self.__inst.add_rule(\"/test\", \"SESSION\")\n self.__inst.add_rule(\"/test2\", \"SESSION\")\n # Run test\n # Check that /test returns a redirect\n with self.assertRaises(HTTPException) as err:\n self.__run_redir(app, \"/test\")\n self.assertEqual(err.exception.response.status_code, 302)\n self.assertEqual(err.exception.response.location, \"/login?ret=%2Ftest\")\n # Whereas /test2 should return a classic 403\n self.assertRaises(Forbidden, self.__run_redir, app, \"/test2\")\n\n def test_token_expiry(self):\n \"\"\" Check that invalid tokens are correctly rejected. \"\"\"\n # Still valid\n GOOD_TOKEN = {'id': 123, 'expiry': '2099-12-31T23:59:59.00' }\n # Expired\n BAD_TOKEN = {'id': 123, 'expiry': '1999-12-31T23:59:59.00' }\n # Malformed expiry string\n UGLY_TOKEN = {'id': 123, 'expiry': 'This is not a date.' }\n # Prepare the endpoint\n self.__inst.add_rule(\"/ep_tkn\", \"TOKEN\")\n # Check all tokens can still access\n self.assertTrue(self.__gen_req('/ep_tkn', 'GET',\n ACLManager.AUTH_MODE_TOKEN, GOOD_TOKEN))\n self.assertFalse(self.__gen_req('/ep_tkn', 'GET',\n ACLManager.AUTH_MODE_TOKEN, BAD_TOKEN))\n self.assertFalse(self.__gen_req('/ep_tkn', 'GET',\n ACLManager.AUTH_MODE_TOKEN, UGLY_TOKEN))\n","sub_path":"test/pdm/framework/test_ACLManager.py","file_name":"test_ACLManager.py","file_ext":"py","file_size_in_byte":15258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"145125367","text":"from flask import Flask,render_template,url_for,request\nimport re\nimport pandas as pd\nimport spacy\nfrom spacy import displacy\nimport pickle\nimport os\nimport docx\nimport sys, fitz\n\ndef getTextFromDoc(filename):\n doc = docx.Document(filename)\n fullText = []\n for para in doc.paragraphs:\n fullText.append(para.text)\n return '\\n'.join(fullText)\n\ndef getTextFromPDF(filepath):\n doc = fitz.open(filepath)\n text = \"\"\n for page in doc:\n text = text + str(page.getText())\n\n tx = \" \".join(text.split('\\n'))\n return tx\n\n#def resumeData(text,model):\n# '''\n# Data should be in the Format demonstrated in test mode \n# \n# '''\n# nlp_model = spacy.load(open(model, 'rb'))\n# doc = nlp_model(text)\n# final_output = \"\"\n# for ent in doc.ents:\n# final_output += f'{ent.label_.upper():{30}}- {ent.text}\\n'\n# \n# return final_output\n\nnlp = spacy.load('en_core_web_sm')\n\napp = Flask(__name__)\n\nner_model = pickle.load(open(\"trainFile\", 'rb'))\n\n@app.route('/')\ndef home():\n\treturn render_template(\"first_page_design.html\")\n\n@app.route('/extract', methods = [\"POST\"])\ndef extract():\n if request.method == 'POST':\n document = request.form['document']\n doc_name = os.path.basename(document)\n resume_text = \"\"\n \n if(doc_name.endswith(\".docx\")):\n resume_text = getTextFromDoc(document)\n elif(doc_name.endswith(\".pdf\")):\n resume_text = getTextFromPDF(document)\n elif(doc_name.endswith(\".txt\")):\n resume_text = document\n else:\n print(\"Invalid Format!\\nPlease input a file of the following formats: .pdf, .docx and .txt\")\n \n #rawtext = resumeData(resume_text, \"trainFile\")\n rawtext = ner_model[0].predict(resume_text)\n doc = nlp(rawtext)\n d = []\n for ent in doc.ents:\n d.append((ent.label_, ent.text))\n df = pd.DataFrame(d, columns=('named entity', 'output'))\n NAME_named_entity = df.loc[df['named entity'] == 'NAME']['output']\n DESIGNATION_named_entity = df.loc[df['named entity'] == 'DESIGNATION']['output']\n EMAIL_named_entity = df.loc[df['named entity'] == 'EMAIL']['output']\n LOCATION_named_entity = df.loc[df['named entity'] == 'LOCATION']['output']\n COMPANIES_named_entity = df.loc[df['named entity'] == 'COMPANIES WORKED AT']['output']\n COLLEGE_named_entity = df.loc[df['named entity'] == 'COLLEGE']['output']\n GRADUATION_named_entity = df.loc[df['named entity'] == 'GRADUATION YEAR']['output']\n PHONE_named_entity = df.loc[df['named entity'] == 'PHONE NUMBER']['output']\n SKILLS_named_entity = df.loc[df['named entity'] == 'SKILLS']['output']\n \n#\t\tif choice == 'organization':\n#\t\t\tresults = ORG_named_entity\n#\t\t\tnum_of_results = len(results)\n#\t\telif choice == 'person':\n#\t\t\tresults = PERSON_named_entity\n#\t\t\tnum_of_results = len(results)\n#\t\telif choice == 'geopolitical':\n#\t\t\tresults = GPE_named_entity\n#\t\t\tnum_of_results = len(results)\n#\t\telif choice == 'money':\n#\t\t\tresults = MONEY_named_entity\n#\t\t\tnum_of_results = len(results)\n\n output = NAME_named_entity+\", \"+DESIGNATION_named_entity+\", \"+EMAIL_named_entity+\", \"+LOCATION_named_entity+\", \"+COMPANIES_named_entity+\", \"+COLLEGE_named_entity+\", \"+GRADUATION_named_entity+\", \"+PHONE_named_entity+\", \"+SKILLS_named_entity\n\n return render_template('first_page_design.html', output) #, DESIGNATION_named_entity, EMAIL_named_entity\n\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True)","sub_path":"Week 12/Deployment - Zyad Al-Azazi/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39580461","text":"import json\nimport re\nfrom datetime import datetime\nfrom datetime import time as tm\nfrom time import mktime\n\nfrom django.db.models import Q\nfrom rest_framework.views import APIView\n\nfrom TimeManagement.settings import DEBUG\nfrom Users.models import Tokenaizer, User\nfrom .models import Affairs, Paterns,Icons\nfrom .serializer import (ItemSerializer, ItemAddSerializer,\n PaternsSerializer, PaternsAddSerializer, PaternsEditSerializer)\nfrom .views import _response, ADD, EDIT\n\ndef check_patern_data(request, response, query_type):\n data = json.loads(request.data['data'])\n response['status'] = 'success'\n response['error'] = ''\n user_empty = fast_access_empty = False\n try:\n user = Tokenaizer.objects.filter(token=request.data['token']).values('user').get()['user']\n if not (data['user']==user and User.objects.filter(email=user)):\n response['status'] = 'error'\n response['error'] += 'Вы не можете создать шаблон для этого пользователя или он не зарегистрирован.\\n'\n except KeyError:\n user_empty = True\n response['status'] = 'error'\n response['error'] += 'Не передан обязательный параметр: user.\\n'\n\n try:\n if not user_empty:\n icon = Icons.objects.filter(id=data['icon'],user__in=['all',user])\n if not icon:\n response['status'] = 'error'\n response['error'] += 'Указанная иконка не существует или не принадлежит вам.\\n'\n except:\n response['status'] = 'error'\n response['error'] += 'Не передан обязательный параметр: icon.\\n'\n try:\n if not (2 <= len(data['name']) < 50):\n response['status'] = 'error'\n response['error'] += 'Имя шаблона должно быть от 2 до 50 символов.\\n'\n except:\n response['status'] = 'error'\n response['error'] += 'Не передан обязательный параметр: name.\\n'\n try:\n try:\n if not isinstance(data['affairs'],str):\n response['status'] = 'error'\n response['error'] += 'Список дел должен быть строкой с идентификаторами, разделенными запятыми.\\n'\n else:\n for i in data['affairs'].split(','):\n if int(i)<0:\n response['status'] = 'error'\n response['error'] += 'Идентификатор в списке affairs не может быть отрицательным.\\n'\n break\n affair = Affairs.objects.filter(id=int(i))\n if not affair:\n response['status'] = 'error'\n response['error'] += 'Дело с id = %s не обнаружено или не принадлежит вам.\\n' % int(i)\n except ValueError:\n response['status'] = 'error'\n response['error'] += 'Один или несколько идентификаторов не число.\\n'\n except KeyError:\n response['status'] = 'error'\n response['error'] += 'Не передан обязательный параметр: affairs.\\n'\n try:\n if not (data['fast_access'] in ['true', 'false', 0, 1, '0', '1']):\n response['status'] = 'error'\n response['error'] += 'Указанной вами fast_access недоступен.\\n'\n except:\n fast_access_empty = True\n try:\n int(data['fast_access_index'])\n except ValueError:\n response['status'] = 'error'\n response['error'] += 'Указанной вами fast_access_index не число.\\n'\n except KeyError:\n if not fast_access_empty:\n response['status'] = 'error'\n response['error'] += 'При включенном быстром доступе, обязательно указывать индекс.\\n'\n except:\n response['status'] = 'error'\n response['error'] += 'Указанной вами fast_access_index неопределен.\\n'\n if len(response['error']):\n return response\n else:\n response.pop('error')\n return False\n\nclass Patern(APIView):\n def get(self, request, source=None):\n response = {}\n try:\n token = request.META['HTTP_AUTHORIZATION'] # request.GET['token']#request.data['token']\n if token.split(' ')[0] == 'Bearer':\n try:\n token = token.split(' ')[1]\n except IndexError:\n token = \"\"\n else:\n response['status'] = 'error'\n response['error'] = \"Указанный вид токена не поддерживается.\"\n return _response(response, source, request)\n except KeyError:\n response['status'] = 'error'\n response['error'] = \"Не передан обязательный параметр token\"\n return _response(response, source, request)\n user = Tokenaizer.objects.filter(token=token)\n if user:\n user = user.values('id').get()['id']\n paterns = Paterns.objects.filter(user=user)\n serializer = PaternsSerializer(paterns, many=True)\n response['status'] = 'success'\n response['data'] = serializer.data\n return _response(response, source, request)\n\n\nclass PaternAdd(APIView):\n def post(self, request, source=None):\n response = {}\n try:\n token = request.META['HTTP_AUTHORIZATION'] # request.GET['token']#request.data['token']\n if token.split(' ')[0] == 'Bearer':\n try:\n token = token.split(' ')[1]\n except IndexError:\n token = \"\"\n else:\n response['status'] = 'error'\n response['error'] = \"Указанный вид токена не поддерживается.\"\n return _response(response, source, request)\n except KeyError:\n response['status'] = 'error'\n response['error'] = \"Не передан обязательный параметр token\"\n return _response(response, source, request)\n email = Tokenaizer.objects.filter(token=token)\n if email:\n email = email.values('user').get()['user']\n user = User.objects.filter(email=email)\n if user:\n\n if re.search(ADD, request.META['PATH_INFO']):\n status = check_patern_data(request, response, 'add')\n if status:\n return _response(status, source, request)\n else:\n serializer = PaternsAddSerializer(data=json.loads(request.data['data']))\n else:\n try:\n patern_id = json.loads(request.data['data'])[\"id\"]\n except:\n response['status'] = 'error'\n response['error'] = 'Не удалось обновить шаблон. Возможно не переданы обязательные параметры.'\n return _response(response, source, request)\n patern = Paterns.objects.filter(id=patern_id, user=email)\n if not patern:\n response['status'] = 'error'\n response['error'] = 'Указанный вами шаблон не найден или не принадлежит вам.'\n return _response(response, source, request)\n status = check_patern_data(request, response, 'edit')\n if status:\n return _response(status,source,request)\n else:\n serializer = PaternsEditSerializer(patern, data=json.loads(request.data['data']), partial=True)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n response['status'] = 'success'\n response['data'] = 'Шаблон успешно добавлен/обновлен.'\n return _response(response, source, request)\n else:\n response['status'] = 'error'\n response['error'] = \"Не удалось добавить/обновить шаблон.\"\n return _response(response, source, request)\n else:\n response['status'] = 'error'\n response['error'] = \"Не зарегистрированные пользователи не могут добавлять шаблоны.\"\n return _response(response, source, request)\n\n\nclass PaternPlace(APIView):\n def post(self, request, source=None):\n response = {}\n try:\n token = request.META['HTTP_AUTHORIZATION']#request.GET['token']#request.data['token']\n if token.split(' ')[0]=='Bearer':\n token = token.split(' ')[1]\n else:\n response['status'] = 'error'\n response['error'] = \"Указанный вид токена не поддерживается.\"\n return _response(response, source, request)\n except KeyError:\n response['status'] = 'error'\n response['error'] = \"Не передан обязательный параметр token\"\n return _response(response, source, request)\n user = Tokenaizer.objects.filter(token=token)\n if user:\n user = user.values('user').get()['user']\n user = User.objects.get(email=user)\n if user:\n try:\n patern_id = json.loads(request.data['data'])['id']\n start_date = json.loads(request.data['data'])['start_date']\n start_time = json.loads(request.data['data'])['start_time']\n except:\n response['status'] = 'error'\n response['error'] = \"Не переданы обязательные параметры.\"\n return _response(response, source, request)\n patern = Paterns.objects.get(id=patern_id, user=user.id)\n if patern:\n paterns = patern.affairs[1:-1].split(',')\n affairs = Affairs.objects.filter(~Q(status='deleted'), id__in=paterns).order_by('start_timestamp')\n delta = [affairs.values()[i + 1]['start_timestamp'] - affairs.values()[i]['start_timestamp'] for i in\n range(len(affairs.values()) - 1)]\n\n print(delta, start_date, start_time)\n new_affairs = []\n try:\n start_timestamp = int(start_date) + int(start_time)\n except:\n response['status'] = 'error'\n response['error'] = \"Не переданы дата и время начала шаблона.\"\n return _response(response, source, request)\n for i, affair in enumerate(affairs.values()):\n affair['start_timestamp'] = start_timestamp\n end_timestamp = start_timestamp + affair['duration']\n start_datetime = datetime.fromtimestamp(start_timestamp)\n affair['start_date'] = int(mktime(datetime.combine(start_datetime.date(), tm(0, 0, 0)).timetuple()))\n affair['start'] = int(start_timestamp - affair['start_date'])\n end_datetime = datetime.fromtimestamp(end_timestamp)\n affair['end_date'] = int(mktime(datetime.combine(end_datetime.date(), tm(0, 0, 0)).timetuple()))\n affair['end'] = int(end_timestamp - affair['end_date'])\n affair['category'] = affair['category_id']\n affair['notifications'] = 1 if affair['notifications'] else 0\n affair['fast_access'] = 1 if affair['fast_access'] else 0\n\n new_affairs.append(affair)\n if i != len(affairs.values()) - 1:\n start_timestamp += int(delta[i])\n response['data'] = []\n for i in new_affairs:\n serializer = ItemAddSerializer(data=i)\n print(i)\n if serializer.is_valid(raise_exception=DEBUG):\n affair = serializer.save()\n return_serializer = ItemSerializer(Affairs.objects.filter(id=affair.id).get())\n response['data'].append(return_serializer.data)\n else:\n response['status'] = 'error'\n response['error'] = \"Не удалось сохранить дела.\"\n return _response(response, source, request)\n response['status'] = 'success'\n return _response(response, source, request)\n else:\n response['status'] = 'error'\n response['error'] = \"Указанный шаблон не принадлежит вам.\"\n return _response(response, source, request)\n\n else:\n response['status'] = 'error'\n response['error'] = \"Не зарегистрированные пользователи не могут добавлять шаблоны.\"\n return _response(response, source, request)\n","sub_path":"MainPage/paterns.py","file_name":"paterns.py","file_ext":"py","file_size_in_byte":13558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"268794109","text":"# BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# END GPL LICENSE BLOCK #####\n\nbl_info = {\n \"name\": \"Mifth Tools\",\n \"author\": \"Paul Geraskin\",\n \"version\": (0, 1, 0),\n \"blender\": (2, 71, 0),\n \"location\": \"3D Viewport\",\n \"description\": \"Mifth Tools\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"tracker_url\": \"\",\n \"category\": \"Tools\"}\n\n\nif \"bpy\" in locals():\n import imp\n imp.reload(mifth_tools_cloning)\n imp.reload(mifth_tools)\nelse:\n from . import mifth_tools_cloning\n from . import mifth_tools\n \n\nimport bpy\nfrom bpy.props import *\n\n\ndef getGroups(scene, context):\n\n lst = []\n obj = context.scene.objects.active\n for group in bpy.data.groups:\n if obj is not None and obj.name in group.objects:\n lst.append((group.name, group.name,\"\"))\n\n return lst\n\n\ndef register():\n bpy.mifthTools = dict()\n\n class MFTProperties(bpy.types.PropertyGroup):\n\n\n # Draw Cloned Settings\n drawStrokeLength = FloatProperty(\n default = 0.5,\n min = 0.001,\n max = 500.0\n )\n\n drawRandomStrokeScatter = FloatProperty(\n default = 0.0,\n min = 0.0,\n max = 500.0\n )\n\n drawClonesDirectionRotate = BoolProperty(\n name=\"drawClonesDirectionRotate\",\n description=\"drawClonesDirectionRotate...\",\n default=False\n )\n\n drawClonesRadialRotate = BoolProperty(\n name=\"drawClonesRadialRotate\",\n description=\"drawClonesRadialRotate...\",\n default=True\n )\n\n drawClonesNormalRotate = BoolProperty(\n name=\"drawClonesNormalRotate\",\n description=\"drawClonesNormalRotate...\",\n default=True\n )\n\n drawClonesOptimize = BoolProperty(\n name=\"drawClonesOptimize\",\n description=\"drawClonesOptimize...\",\n default=True\n )\n\n randNormalRotateClone = FloatProperty(\n default = 0.0,\n min = 0.0,\n max = 180.0\n )\n\n randDirectionRotateClone = FloatProperty(\n default = 0.0,\n min = 0.0,\n max = 180.0\n )\n\n randScaleClone = FloatProperty(\n default = 0.0,\n min = 0.0,\n max = 0.99\n )\n\n drawPressure = FloatProperty(\n default = 0.7,\n min = 0.0,\n max = 0.95\n )\n\n drawClonesAxis = EnumProperty(\n items = (('X', 'X', ''),\n ('-X', '-X', ''),\n ('Y', 'Y', ''),\n ('-Y', '-Y', ''),\n ('Z', 'Z', ''),\n ('-Z', '-Z', '')\n ),\n default = 'Z'\n )\n\n # Radial Clone Settings\n radialClonesAxis = EnumProperty(\n items = (('X', 'X', ''),\n ('Y', 'Y', ''),\n ('Z', 'Z', '')\n ),\n default = 'Z'\n )\n\n radialClonesAxisType = EnumProperty(\n items = (('Global', 'Global', ''),\n ('Local', 'Local', '')\n ),\n default = 'Global'\n )\n\n # Output Settings\n outputFolder = StringProperty(\n name=\"outputFolder\",\n subtype=\"NONE\",\n default=\"seq\"\n )\n\n outputSubFolder = StringProperty(\n name=\"outputSubFolder\",\n subtype=\"NONE\",\n default=\"ren\"\n )\n\n outputSequence = StringProperty(\n name=\"outputSequence\",\n subtype=\"NONE\",\n default=\"render\"\n )\n\n outputSequenceSize = IntProperty(\n default = 8,\n min = 1,\n max = 60\n )\n\n doOutputSubFolder = BoolProperty(\n name=\"do Output SubFolder\",\n description=\"do Output SubFolder...\",\n default=False\n )\n\n # Curve Animator Settings\n doUseSceneFrames = BoolProperty(\n name=\"do use scene frames\",\n description=\"do use scene frames...\",\n default=False\n )\n\n curveAniStartFrame = IntProperty(\n default = 1,\n min = 1,\n max = 10000\n )\n\n curveAniEndFrame = IntProperty(\n default = 100,\n min = 1,\n max = 10000\n )\n\n curveAniStepFrame = IntProperty(\n default = 10,\n min = 1,\n max = 10000\n )\n\n curveAniInterpolation = FloatProperty(\n default = 0.3,\n min = 0.0,\n max = 1.0\n )\n\n # MorfCreator settings\n morfCreatorNames = StringProperty(\n name=\"MorfNames\",\n subtype=\"NONE\",\n default=\"\"\n )\n\n morfUseWorldMatrix = BoolProperty(\n name=\"use world matrix\",\n description=\"use world matrix...\",\n default=False\n )\n\n morfApplyModifiers = BoolProperty(\n name=\"apply modifiers to morf\",\n description=\"apply modifiers to morf...\",\n default=False\n )\n\n # GroupInstance to Cursor\n getGroupsLst = EnumProperty(name='Get Groups',\n description='Get Groups.',\n items=getGroups)\n\n\n bpy.utils.register_module(__name__)\n\n bpy.types.Scene.mifthTools = PointerProperty(\n name=\"Mifth Tools Variables\",\n type=MFTProperties,\n description=\"Mifth Tools Properties\"\n )\n\n\ndef unregister():\n import bpy\n\n del bpy.types.Scene.mifthTools\n del bpy.mifthTools\n bpy.utils.unregister_module(__name__)\n\n\nif __name__ == \"__main__\":\n register()\n","sub_path":"blender/mifth_tools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288675463","text":"#!/usr/bin/python3\n'''\n不止三个区域,\n+ t1-3t1-t2-3t2\n+ t1-t2-3t1-3t2\n+ x0=t1, tt=t2\n'''\n\ndef f(l):\n n,p1,v1,p2,v2 = l\n # change coordinate!\n if p1>p2:\n p1 = n-p1\n else:\n p2 = n-p2\n t1 = p1/v1\n t2 = p2/v2\n q1 = (t1<=t2)\n tt = max(t1,t2)\n\n #region0: [0-x0]\n x0 = t1 if q1 else t2\n y0 = p1 if q1 else p2\n if n<=y0:\n return x0\n\n qv = v1 if q1 else v2\n sv = v2 if q1 else v1\n tv = v1+v2\n\n #region1: [x0-x1]\n k1 = qv/2\n x1 = min(x0*3,tt)\n y1 = y0 + k1*(x1-x0)\n if n<=y1:\n return x0 + (n-y0)/k1\n y1 = y1 if x1!=tt else p1+p2+k1*(x1-x0) #might JUMP!\n\n #region2: [x1-x2]\n k2 = tv/2 if x1==tt else qv\n x2 = max(x0*3,tt)\n y2 = y1 + k2*(x2-x1)\n if n<=y2:\n return x1 + (n-y1)/k2\n y2 = y2 if x2!=tt else p1+p2+k1*(x1-x0)+k2*(x2-x1)\n \n #region3: [x2-x3]\n k3 = qv + sv/2\n x3 = tt*3\n y3 = y2 + k3*(x3-x2)\n if n<=y3:\n return x2 + (n-y2)/k3\n else:\n return x3 + (n-y3)/tv #region4\n #return (n+p1+p2)/tv\n\nT = int(input())\nfor _ in range(T):\n l = list(map(float,input().split()))\n print(f(l))\n\n","sub_path":"contests/icpc20sh/d错13.py","file_name":"d错13.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"428472534","text":"from django.shortcuts import render\nfrom .models import Tejidos, Grafo\nfrom django_pandas.io import read_frame\nfrom math import sqrt\n\ndef home(request):\n return render(request, 'app/home.html')\n\n\ndef datos(request):\n datos = Tejidos.objects.all()\n procesado = procesa_tabla(datos)\n return render(request, 'app/datos.html', {'misTejidos': datos, 'proceso': procesado})\n\n\ndef procesa_tabla(qs):\n df = read_frame(qs)\n return df\n\n\ndef resultados(request):\n tejido = Tejidos.objects.all()\n print('longitud: ' + str(len(tejido)))\n df = read_frame(tejido)\n\n mediaTemperatura = df['temperatura'].mean()\n\n mediaColor = df['color'].mean()\n \n mediaInflamacion = df['inflamacion'].mean()\n\n # diccionario={'mediaT':mediaTemperatura, 'mediaC':mediaColor}\n m = df.iloc[0, 2:5].mean()\n # print(m)\n # diccionario['mediaPorRegistro']=m\n \n \n # fila1 = df.iloc[0, 2:5]\n # lista = []\n\n # for j in range(0,4):\n # for i in range(j+1, 5):\n # lista.append({'registro1': j, 'registro2': i, 'distancia':abs(df.iloc[j, 2:5]-df.iloc[i, 2:5]).sum() })\n\n # lista=[{'registro1': j, 'registro2': i, 'distancia':abs(df.iloc[j, 2:5]-df.iloc[i, 2:5]).sum() } for j in range (0,4) for i in range (j+1, 5) ]\n\n # lista = [{'registro1: ': df['id'][j], 'registro2: ': df['id'][i], 'distancia': abs(df.iloc[j, 2:5]-df.iloc[i, 2:5]).sum()} for j in range(0, len(tejido)-1) for i in range(j+1, len(tejido))]\n lista = [{'r1: ': df['id'][j], 'r2: ': df['id'][i], 'distancia': sqrt((abs(df.iloc[j, 2:5]-df.iloc[i, 2:5]).sum())*2)} for j in range(0, len(tejido)-1) for i in range(j+1, len(tejido))]\n lista2 = [{'r1: ': df['id'][j], 'r2: ': df['id'][i], 'conectado: ': True if sqrt((abs(df.iloc[j, 2:5]-df.iloc[i, 2:5]).sum())*2)<5 else False} for j in range(0, len(tejido)-1) for i in range(j+1, len(tejido))]\n \n listafinal = [{'nodo': Grafo.objects.create(origen=Tejidos.objects.get(pk=df.iloc[j,0:1]), destino=Tejidos.objects.get(pk=df.iloc[i,0:1]), conectado= True if sqrt((abs(df.iloc[j, 2:5]-df.iloc[i, 2:5]).sum())*2)<5 else False).save() }for j in range(0, len(tejido)-1) for i in range(j+1, len(tejido))]\n # origen = Tejidos.objects.create(partes=20, temperatura=20, color=20, inflamacion=20)\n # destino = Tejidos.objects.create(partes=40, temperatura=40, color=40, inflamacio=40)\n # origen = Tejidos.objects.get(pk=lista2[1]['r1'])\n # destino = Tejidos.objects.get(pk=lista2[1]['r2'])\n # nodo = Grafo.objects.create(origen=origen, destino=destino, conectado=lista2[1]['conectado'])\n # nodo.save()\n \n # print(lista)\n \n # grafo = []\n # umbral = 5\n # for elemento in lista:\n # # grafo['vertice '+str(i)] = elemento['registro1']\n # # grafo['vertice '+str(i+1)] = elemento['registro2']\n\n # if elemento['distancia'] < umbral:\n # tupla = (elemento['registro1: '],\n # elemento['registro2: '], elemento['distancia'], 'Si')\n # else:\n # tupla = (elemento['registro1: '],\n # elemento['registro2: '], elemento['distancia'], 'No')\n # grafo.append(tupla)\n\n # print(grafo)\n # print(\"lllll\")\n # df.iloc[0, 2:5]-df.iloc[1, 2:5]\n # df.iloc[0, 2:5]-df.iloc[2, 2:5]\n # df.iloc[0, 2:5]-df.iloc[3, 2:5]\n # df.iloc[0, 2:5]-df.iloc[4, 2:5]\n\n # fila2 = df.iloc[1, 2:5]\n # fila3 = df.iloc[2, 2:5]\n # fila4 = df.iloc[3, 2:5]\n\n # filaRes = abs(fila2-fila1)\n # filaRes1 = abs(fila3-fila1)\n\n # resultadoSuma = filaRes.sum()\n # print(\"************************************\")\n # print(fila1)\n # print(fila2)\n # print(resultadoSuma)\n # print(fila4)\n # print(filaRes1)\n # print(\"*************************************\")\n\n diccionario = {'mediaT': mediaTemperatura, 'mediaC': mediaColor, 'm': m, 'mediaI': mediaInflamacion}\n diccionario['lista2']=lista2\n diccionario['lista']=lista\n diccionario['max'] = df.iloc[0, 2:5].max()\n # diccionario['miLista1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n # diccionario['miLista'] = [(i, i) for i in range(0, 100)]\n # diccionario['miLista2'] = [(x, y) for y in range(0, 6)for x in range(0, 11) if x == y]\n\n # print(df)\n return render(request, 'app/resultados.html', diccionario)\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535905552","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport pkg_resources\nimport webbrowser\n\nimport click\n\n\ndef _get_profiles():\n json_profiles_filepath = pkg_resources.resource_filename('data', 'profiles.json')\n profiles = None\n\n with open(json_profiles_filepath) as json_profiles_file:\n profiles = json.load(json_profiles_file)\n\n return profiles\n\n@click.group()\ndef cli():\n pass\n\n@cli.command(short_help=\"Show ssebastianj's profile at a given website (if registered). Default: website\")\n@click.argument('profile_website', default='website', nargs=1)\n@click.option('--open', 'open_in_browser',\n is_flag=True, default=False,\n help='Open profile URL in the default browser installed.')\ndef show(profile_website, open_in_browser):\n profiles = _get_profiles()\n\n try:\n profile_url = profiles.get(profile_website)['url']\n except TypeError:\n click.echo('Ops! The website \"{}\" is not a registered profile.'.format(profile_website))\n except KeyError:\n click.echo('Ops! The website \"{}\" does not have a registered URL.'.format(profile_website))\n else:\n if open_in_browser:\n click.echo('Opening {url} in browser...'.format(url=profile_url))\n webbrowser.open(profile_url)\n else:\n click.echo(profile_url)\n\nif __name__ == '__main__':\n cli()\n","sub_path":"python/ssebastianj/ssebastianj/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"364379787","text":"from api import app, db\nfrom flask import render_template, url_for\nimport os\nimport numpy as np\nimport pandas as pd\nfrom .models import Report, Puf, Cancer\nfrom sqlalchemy import func, desc\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy\n\n\ndef get_abs_path():\n \"\"\"\n This function takes no parameters and returns the api root directory pathway.\n :return: api directory pathway\n \"\"\"\n return os.path.abspath(os.path.dirname(__file__))\n\n\n@app.route('/')\ndef home():\n return render_template(\"home.html\", img_file=url_for('static', filename='img/cms_logo.jpg'))\n\n\n@app.route('/prevalence')\ndef prevalence():\n # State Average Disease Prevalence\n rows = db.session.query(Report.provider_state_code,func.avg(Report.percent_of_beneficiaries_identified_with_cancer),\n func.avg(Report.percent_of_beneficiaries_identified_with_atrial_fibrillation),\n func.avg(Report.percent_of_beneficiaries_identified_with_alzheimers_disease_or_dementia),\n func.avg(Report.percent_of_beneficiaries_identified_with_asthma),\n func.avg(Report.percent_of_beneficiaries_identified_with_heart_failure),\n func.avg(Report.percent_of_beneficiaries_identified_with_chronic_kidney_disease),\n func.avg(Report.percent_of_beneficiaries_identified_with_chronic_obstructive_pulmonary_disease),\n func.avg(Report.percent_of_beneficiaries_identified_with_depression),\n func.avg(Report.percent_of_beneficiaries_identified_with_diabetes),\n func.avg(Report.percent_of_beneficiaries_identified_with_hyperlipidemia),\n func.avg(Report.percent_of_beneficiaries_identified_with_hypertension),\n func.avg(Report.percent_of_beneficiaries_identified_with_ischemic_heart_disease),\n func.avg(Report.percent_of_beneficiaries_identified_with_osteoporosis),\n func.avg(Report.percent_of_beneficiaries_identified_with_rheumatoid_arthritis_osteoarthritis),\n func.avg(Report.percent_of_beneficiaries_identified_with_schizophrenia_other_psychotic_disorders),\n func.avg(Report.percent_of_beneficiaries_identified_with_stroke)).\\\n order_by(Report.provider_state_code).group_by(Report.provider_state_code).all()\n\n state_avg = []\n for elem in rows:\n state_tup = tuple()\n state_tup += (elem[0],)\n i=1\n while i < len(elem):\n state_tup += (round(elem[i],2),)\n i+=1\n state_avg += [state_tup]\n\n # Overall National Prevalence of Diseases\n overall_prev = db.session.query(func.avg(Report.percent_of_beneficiaries_identified_with_cancer),\n func.avg(Report.percent_of_beneficiaries_identified_with_atrial_fibrillation),\n func.avg(\n Report.percent_of_beneficiaries_identified_with_alzheimers_disease_or_dementia),\n func.avg(Report.percent_of_beneficiaries_identified_with_asthma),\n func.avg(Report.percent_of_beneficiaries_identified_with_heart_failure),\n func.avg(Report.percent_of_beneficiaries_identified_with_chronic_kidney_disease),\n func.avg(\n Report.percent_of_beneficiaries_identified_with_chronic_obstructive_pulmonary_disease),\n func.avg(Report.percent_of_beneficiaries_identified_with_depression),\n func.avg(Report.percent_of_beneficiaries_identified_with_diabetes),\n func.avg(Report.percent_of_beneficiaries_identified_with_hyperlipidemia),\n func.avg(Report.percent_of_beneficiaries_identified_with_hypertension),\n func.avg(Report.percent_of_beneficiaries_identified_with_ischemic_heart_disease),\n func.avg(Report.percent_of_beneficiaries_identified_with_osteoporosis),\n func.avg(\n Report.percent_of_beneficiaries_identified_with_rheumatoid_arthritis_osteoarthritis),\n func.avg(\n Report.percent_of_beneficiaries_identified_with_schizophrenia_other_psychotic_disorders),\n func.avg(Report.percent_of_beneficiaries_identified_with_stroke)).all()[0]\n overall_round = []\n for i in range(len(overall_prev)):\n overall_round += [round(float(overall_prev[i]), 2)] # round 2 decimals\n state_avg += [('Total Avg',) + tuple(overall_round)]\n diseases = [\"Cancer\", \"A-Fib\", \"Alzheimers\", \"Asthma\", \"Heart Fail\",\n \"Kidney Dis\", \"Pulmonary Dis\", \"Depression\", \"Diabetes\",\n \"Hyperlipidemia\", \"Hypertension\", \"Ischemic Heart Dis\", \"Osteoporosis\",\\\n \"Rheumatoid Arthritis\", \"Schizophrenia\", \"Stroke\"]\n ## D3 Bar Plot\n overall_freq = []\n for i in range(len(overall_round)):\n overall_freq += [round((overall_round[i] / 100), 5)]\n overall_bar = pd.DataFrame(np.column_stack((diseases, overall_freq)), columns=['diseases', 'frequency']) #DataFrame\n tsv_path = os.path.join(get_abs_path(), 'static', 'tmp', 'overall_prev.tsv') # TSV path\n overall_bar.to_csv(tsv_path, sep='\\t', header=['disease', 'frequency']) # TSV from DataFrame\n\n # Top 5 Prevalent Diseases\n top_diseases = overall_bar.sort_values(['frequency'], ascending=False).as_matrix()[:5] # sort by frequency\n top_perc=[]\n for elem in top_diseases:\n dis = [elem[0]] # disease\n dis +=[str(float(elem[1])*100)] # prevalence\n top_perc += [dis]\n return render_template(\"state.html\", rows=state_avg, top_disease = top_perc,\n prev_js=url_for('static', filename='js/prevalence.v3.min.js'),\n prev_js1=url_for('static', filename='js/prev.tip.v0.6.3.js'),\n prev_file=url_for('static', filename='tmp/overall_prev.tsv'))\n\n\n@app.route('/cancer')\ndef map():\n # State Cancer Prevalence\n rows = db.session.query(Report.provider_state_code, func.avg(Report.percent_of_beneficiaries_identified_with_cancer)).\\\n filter(Report.provider_state_code != 'DC').order_by(Report.provider_state_code).group_by(Report.provider_state_code).all()\n state_lst=[]\n for i in range(len(rows)):\n state = tuple()\n state += (str(rows[i][0]),) # state\n state += (round(rows[i][1],2),) # prevalence -- rounded 2 decimals\n state_lst+=[state]\n dict_state = dict(state_lst) # dictionary (key: state -- value: frequency)\n\n us_avg = np.average(np.array(state_lst)[:,1].astype(np.float64)) # national average\n\n # Average CMS Costs for Cancer Beneficiaries\n rows_cancer_cost = db.session.query(Report.provider_state_code,\n func.avg(Report.percent_of_beneficiaries_identified_with_cancer),\n func.avg(Report.total_medicare_standardized_payment_amount)).order_by(\n Report.provider_state_code). \\\n group_by(Report.provider_state_code).all()\n\n state_costs_lst = []\n for i in range(len(rows_cancer_cost)):\n state_costs = tuple()\n state_costs += (str(rows_cancer_cost[i][0]),)\n state_costs += (round(float(rows_cancer_cost[i][1]) * float(rows_cancer_cost[i][2]), 2),)\n state_costs_lst += [state_costs]\n col = ['state', 'costs']\n state_costs_df = pd.DataFrame(state_costs_lst, columns=col) # DataFrame\n sorted_state_costs_df = state_costs_df.sort_values(['costs'], ascending=False) #sorted by costs\n lowest_state_costs = sorted_state_costs_df.iloc[-5:].as_matrix()[::-1] # States w/ lowest costs\n highest_state_costs = sorted_state_costs_df.iloc[:5].as_matrix() # States w/ highest costs\n max_state = [highest_state_costs[0][0], \"%.2f\" % (highest_state_costs[0][1])] # highest cost state\n min_state = [lowest_state_costs[0][0], \"%.2f\" % (lowest_state_costs[0][1])] # lowest cost state\n ## D3 plot - create CSV file\n cancer_cost_path = os.path.join(get_abs_path(), 'static', 'tmp', 'cancer_costs.tsv') # CSV path\n state_costs_df.to_csv(cancer_cost_path, sep='\\t', header=col) # CSV from DataFrame\n\n # Distribution of Cancer Prevalence\n rows_dist = db.session.query(Report.percent_of_beneficiaries_identified_with_cancer).all()\n dist_df = pd.DataFrame(rows_dist, columns=['cancer_distribution']).dropna()\n ## histogram\n plt.figure()\n h = dist_df['cancer_distribution'].plot.hist(bins=50, figsize=(10, 7), color='green',\n title='Histogram of Cancer Prevalence amongst CMS Beneficiaries')\n h.set_xlabel('Prevalence (%)')\n hist_path = os.path.join(get_abs_path(), 'static', 'tmp', 'cancer_dist.png')\n h.figure.savefig(hist_path, transparent=True) #save figure\n plt.close()\n ## calculate quartiles\n canc_dist = dist_df['cancer_distribution'].copy()\n canc_dist.sort_values(inplace=True)\n q1, q2, q3 =canc_dist.quantile([0.25, 0.5, 0.75]) #quartiles\n irq = q3 - q1\n outlier = {'upper': q3 + 1.5 * irq, 'lower': q3 - 1.5 * irq} #calculate upper & lower bound outliers\n return render_template(\"map.html\", d_state=dict_state, rows=state_lst, us_avg=us_avg, outlier=outlier,\n low_cost = lowest_state_costs, high_cost = highest_state_costs, max_state=max_state,\n min_state=min_state,\n chist_fig=url_for('static', filename='tmp/cancer_dist.png'),\n js_file=url_for('static', filename='js/datamaps.usa.min.js'),\n cancer_js = url_for('static', filename='js/cancer.v3.min.js'),\n topo_js = url_for('static', filename='js/cancer_topojson.v1.min.js'),\n cancer_costs_file=url_for('static', filename='tmp/cancer_costs.tsv'))\n\n\n@app.route('/cancer/risk')\ndef risks():\n # Patients Dx with Cancer -- Age\n rows_age = db.session.query(Report.percent_of_beneficiaries_identified_with_cancer,\n Report.number_of_beneficiaries_age_less_65, Report.number_of_beneficiaries_age_65_to_74,\n Report.number_of_beneficiaries_age_75_to_84,\n Report.number_of_beneficiaries_age_greater_84).all()\n col_c_age = ['prevalence', 'age_less_65', 'age_65_74', 'age_75_84', 'age_greater_84']\n canc_age_df = pd.DataFrame(rows_age, columns=col_c_age) ## DataFrame\n\n ## Extract categorical variables - age\n ### Find Prevalence per Age group\n c_age_prev = canc_age_df['prevalence'] / 100\n prev_age_0 = (c_age_prev * canc_age_df['age_less_65']).dropna().as_matrix()\n prev_age_65 = (c_age_prev * canc_age_df['age_65_74']).dropna().as_matrix()\n prev_age_75 = (c_age_prev * canc_age_df['age_75_84']).dropna().as_matrix()\n prev_age_85 = (c_age_prev * canc_age_df['age_greater_84']).dropna().as_matrix()\n ### Find Number per Age group using Prevalence\n canc_age_0 = np.column_stack((['age_less_65']*len(prev_age_0), prev_age_0))\n canc_age_65 = np.column_stack((['age_65_74']*len(prev_age_65), prev_age_65))\n canc_age_75 = np.column_stack((['age_75_84']*len(prev_age_75), prev_age_75))\n canc_age_85 = np.column_stack((['age_greater_84']*len(prev_age_85), prev_age_85))\n canc_age = np.vstack((canc_age_0, canc_age_65, canc_age_75, canc_age_85))\n age_dist_df = pd.DataFrame({'age': canc_age[:,0],\n 'Num_with_Cancer': canc_age[:,1].astype(np.float64)})\n ## determine outlier bounds\n age_dist = age_dist_df['Num_with_Cancer'].copy()\n age_dist.sort_values(inplace=True)\n aq1, aq2, aq3 =age_dist.quantile([0.25, 0.5, 0.75]) #quartiles\n airq = aq3 - aq1\n outlier = {'upper': aq3 + 1.5 * airq, 'lower': aq3 - 1.5 * airq}\n ## box and whisker plot\n plt.figure(figsize=(16, 10))\n sns.set(font_scale=2.0)\n age_plot = sns.boxplot(x='age', y='Num_with_Cancer',\n data=age_dist_df[age_dist_df['Num_with_Cancer'] < outlier['upper']+30])\n age_plot.set(title='Cancer Prevalence by Age Groups')\n age_box_path = os.path.join(get_abs_path(), 'static', 'tmp', 'age_prev.png')\n age_plot.figure.savefig(age_box_path, transparent=True) #save figure\n plt.close()\n\n # Patients Dx with Cancer -- Age\n rows_race = db.session.query(Report.percent_of_beneficiaries_identified_with_cancer,\n Report.number_of_non_hispanic_white_beneficiaries,\n Report.number_of_african_american_beneficiaries,\n Report.number_of_asian_pacific_islander_beneficiaries,\n Report.number_of_hispanic_beneficiaries,\n Report.number_of_american_indian_alaskan_native_beneficiaries,\n Report.number_of_beneficiaries_with_race_not_elsewhere_classified).all()\n\n col_c_race = ['prevalence', 'white', 'african_am', 'api', 'hispanic', 'native_am', 'other_race']\n canc_race_df = pd.DataFrame(rows_race, columns=col_c_race)\n ## Extract categorical variables - race\n ### Find Prevalence per Race group\n c_race_prev = canc_race_df['prevalence'] / 100\n prev_white = (c_race_prev * canc_race_df['white']).dropna().as_matrix()\n prev_afric = (c_race_prev * canc_race_df['african_am']).dropna().as_matrix()\n prev_api = (c_race_prev * canc_race_df['api']).dropna().as_matrix()\n prev_hispanic = (c_race_prev * canc_race_df['hispanic']).dropna().as_matrix()\n prev_native = (c_race_prev * canc_race_df['native_am']).dropna().as_matrix()\n prev_other = (c_race_prev * canc_race_df['other_race']).dropna().as_matrix()\n ### Find Number per Race group using Prevalence\n canc_white = np.column_stack((['White'] * len(prev_white), prev_white))\n canc_afric = np.column_stack((['African_American'] * len(prev_afric), prev_afric))\n canc_api = np.column_stack((['API'] * len(prev_api), prev_api))\n canc_hispanic = np.column_stack((['Hispanic'] * len(prev_hispanic), prev_hispanic))\n canc_native = np.column_stack((['Native_American'] * len(prev_native), prev_native))\n canc_other = np.column_stack((['Other'] * len(prev_other), prev_other))\n canc_race = np.vstack((canc_white, canc_afric, canc_api, canc_hispanic, canc_native, canc_other))\n race_dist_df = pd.DataFrame({'race': canc_race[:, 0],\n 'Num_with_Cancer': canc_race[:, 1].astype(np.float64)})\n ## determine outlier bounds\n race_dist = race_dist_df['Num_with_Cancer'].copy()\n race_dist.sort_values(inplace=True)\n rq1, rq2, rq3 = race_dist.quantile([0.25, 0.5, 0.75]) # quartiles\n rirq = rq3 - rq1\n outlier = {'upper': rq3 + 1.5 * rirq, 'lower': rq3 - 1.5 * rirq}\n ## box and whisker plot\n plt.figure(figsize=(16, 10))\n sns.set(font_scale=2.0)\n race_plot = sns.boxplot(x='race', y='Num_with_Cancer', data=race_dist_df[race_dist_df['Num_with_Cancer'] <\n outlier['upper'] + 20])\n race_plot.set(title='Cancer Prevalence by Race')\n race_box_path = os.path.join(get_abs_path(), 'static', 'tmp', 'race_prev.png')\n race_plot.figure.savefig(race_box_path, transparent=True) # save figure\n plt.close()\n\n # Cancer Mortality Rate by Race\n mort_rows = db.session.query(Cancer.race, func.avg(Cancer.value)).filter(Cancer.race != 'Multiracial').\\\n filter(Cancer.race != 'All').filter(Cancer.race != 'American Indian/Alaska Native').\\\n group_by(Cancer.race).order_by(Cancer.race).all()\n mort_df = pd.DataFrame(mort_rows, columns=['Race', 'Avg_Mortality_Rate']) #DataFrame\n ## Bar Plot -- Average Mortality Rate\n plt.figure(figsize=(16,10))\n sns.set(font_scale=2.0) #scale font size\n mortbar_plot = sns.barplot(x=\"Race\", y=\"Avg_Mortality_Rate\", data=mort_df)\n mortbar_plot.set(title='Average Mortality Rate by Race')\n mortbar_path = os.path.join(get_abs_path(), 'static', 'tmp', 'mort_bar.png')\n mortbar_plot.figure.savefig(mortbar_path, transparent=True) # save figure\n plt.close()\n\n # Annual Average Mortality Rate by Race\n year_mort_rows = db.session.query(Cancer.year,Cancer.race, func.avg(Cancer.value)).\\\n group_by(Cancer.year, Cancer.race).filter(Cancer.race != 'Multiracial').filter(Cancer.race != 'All').\\\n filter(Cancer.race != 'American Indian/Alaska Native').all()\n annual_mort_df = pd.DataFrame(year_mort_rows, columns=['Year','Race', 'Mortality_Rate']) # DataFrame\n # Line Plot -- Trajectory of Annual Average Mortality Rate\n plt.figure(figsize=(16,10))\n sns.set(font_scale=1.8)\n annual_plot = sns.pointplot(x=\"Year\", y=\"Mortality_Rate\", hue=\"Race\", data =annual_mort_df)\n plt.legend(bbox_to_anchor=(.90, 1), loc=2)\n annual_plot.set(title='Annual Average Mortality Rate by Race')\n annual_path = os.path.join(get_abs_path(), 'static', 'tmp', 'mort_year.png')\n annual_plot.figure.savefig(annual_path, transparent=True) # save figure\n plt.close()\n return render_template('cancer_risks.html', risk_img=url_for('static', filename='img/riskfactor.png'),\n age_boxfig=url_for('static', filename='tmp/age_prev.png'),\n race_boxfig=url_for('static', filename='tmp/race_prev.png'),\n mortbar_fig=url_for('static', filename='tmp/mort_bar.png'),\n mort_fig=url_for('static', filename='tmp/mort_year.png'))\n\n\n@app.route('/cost')\ndef cost():\n # Average CMS Costs by State\n rows = db.session.query(Report.provider_state_code, func.avg(Report.total_medicare_standardized_payment_amount)).\\\n filter(Report.provider_state_code != 'DC').order_by(Report.provider_state_code).group_by(Report.provider_state_code).all()\n\n state_lst=[]\n for i in range(len(rows)):\n state = tuple()\n state += (rows[i][0],) # state\n state += (round(rows[i][1],2),) # average cost -- rounded by 2 decimals\n state_lst+=[state]\n state_cost=pd.DataFrame(state_lst, dtype=int) # DataFrame\n ## D3 Grouped Bar Plot\n csv_path = os.path.join(get_abs_path(), 'static', 'tmp', 'state_cost.csv') # csv path\n state_cost.to_csv(csv_path, index=False, header= [\"name\",\"value\"]) # CSV from DataFrame\n\n # Costs by Age -- Top 5 States\n top_rows = db.session.query(Report.provider_state_code, func.avg(Report.total_medicare_standardized_payment_amount),\n func.avg(Report.number_of_beneficiaries_age_less_65),\n func.avg(Report.number_of_beneficiaries_age_65_to_74),\n func.avg(Report.number_of_beneficiaries_age_75_to_84),\n func.avg(Report.number_of_beneficiaries_age_greater_84)). \\\n filter(Report.provider_state_code != 'DC').order_by(\n func.avg(Report.total_medicare_standardized_payment_amount).desc()). \\\n group_by(Report.provider_state_code).limit(5).all()\n data = []\n for row in top_rows:\n state_sum=float(np.sum(row[2:]))\n state_cost=tuple()\n state_cost+=(row[0],) #state\n state_cost+=(round(row[1], 2),) #total payment amount\n state_cost+=( int(((float(row[2])) / state_sum) * row[1]),) #<65\n state_cost+=( int(((float(row[3])) / state_sum) * row[1]),) #65 to 74\n state_cost+=( int(((float(row[4])) / state_sum) * row[1]),) #75 to 84\n state_cost+=( int(((float(row[5])) / state_sum) * row[1]),) #>85\n data+=[state_cost]\n\n #Costs by facility\n rows_total = db.session.query(Puf.place_of_service,\n func.avg(Report.total_medicare_standardized_payment_amount)).\\\n join(Report, Report.npi == Puf.npi).group_by(Puf.place_of_service).all()\n rows_med = db.session.query(Puf.place_of_service,\n func.avg(Report.total_medical_medicare_standardized_payment_amount)). \\\n join(Report, Report.npi == Puf.npi).group_by(Puf.place_of_service).all()\n rows_drug = db.session.query(Puf.place_of_service,\n func.avg(Report.total_drug_medicare_standardized_payment_amount)). \\\n join(Report, Report.npi == Puf.npi).group_by(Puf.place_of_service).all()\n lst_total = []\n ## Overall CMS Costs\n for elem in rows_total:\n if str(elem[0]) == 'F':\n new_row = ['facility'] + [elem[1]] + ['Total']\n else:\n new_row = ['office'] + [elem[1]] + ['Total']\n lst_total += [new_row]\n ## Medical-related CMS Costs\n for elem in rows_med:\n if str(elem[0]) == 'F':\n new_row = ['facility'] + [elem[1]] + ['Medical']\n else:\n new_row = ['office'] + [elem[1]] + ['Medical']\n lst_total += [new_row]\n ## Drug-related CMS costs\n for elem in rows_drug:\n if str(elem[0]) == 'F':\n new_row = ['facility'] + [elem[1]] + ['Drug']\n else:\n new_row = ['office'] + [elem[1]] + ['Drug']\n lst_total +=[new_row]\n\n facil_df = pd.DataFrame(lst_total, columns=['location', 'amount', 'cost type']) #DataFrame\n facil_grp = facil_df.groupby(facil_df['location']) # group by facility\n facil_mean = facil_grp.mean().as_matrix() # mean\n facil_std = facil_grp.std().as_matrix() # standard deviation\n for i in range(len(facil_mean)):\n facil_mean[i] = round(facil_mean[i], 2)\n facil_std[i] = round(facil_std[i], 2)\n ## Bar Plot\n plt.figure()\n sns.set(font_scale=1.0)\n facil_plot = sns.factorplot(x='cost type', y='amount', hue='location', data = facil_df, kind='bar')\n facil_plot.set_ylabels(\"Average Costs ($)\")\n facil_path = os.path.join(get_abs_path(), 'static', 'tmp', 'facil_cost.png')\n facil_plot.savefig(facil_path, transparent=True) # save figure\n plt.close()\n ## Ratio of number of services amongst facility type\n row_ratio = db.session.query(Puf.place_of_service, func.sum(Report.number_of_services)).\\\n join(Report, Report.npi == Puf.npi).group_by(Puf.place_of_service).all()\n total_services = row_ratio[0][1] + row_ratio[1][1]\n perc_f = (float(row_ratio[0][1]) / total_services) * 100 # % facility\n perc_o = (float(row_ratio[1][1]) / total_services) * 100 # % other\n num_f = [round(perc_f, 2), row_ratio[0][1]] # number of services -- facility\n num_o = [round(perc_o, 2), row_ratio[1][1]] # number of services -- other\n return render_template(\"state_cost.html\", num_f=num_f, num_o=num_o, mean=facil_mean, std=facil_std,\n data_file = url_for('static', filename='tmp/state_cost.csv'), data=data,\n facil_fig = url_for('static', filename='tmp/facil_cost.png'),\n cost_js = url_for('static', filename='js/cost.v3.min.js'))\n\n\n@app.route('/cost/demo')\ndef demographics():\n # CMS Costs by Age groups\n rows_age = db.session.query(func.sum(Report.total_medicare_standardized_payment_amount),\n func.sum(Report.total_medical_medicare_standardized_payment_amount),\n func.sum(Report.total_drug_medicare_standardized_payment_amount),\n func.sum(Report.number_of_beneficiaries_age_less_65),\n func.sum(Report.number_of_beneficiaries_age_65_to_74),\n func.sum(Report.number_of_beneficiaries_age_75_to_84),\n func.sum(Report.number_of_beneficiaries_age_greater_84)).all()\n rows_age = list(rows_age[0])\n ## Costs Amongst Age Groups\n ### Extract Ratio of Age Groups (Categories)\n total_age = sum(rows_age[3:])\n age_0_64 = float(rows_age[3]) / total_age #0-64 yrs\n age_65_74 = float(rows_age[4]) / total_age #65-74 yrs\n age_75_84 = float(rows_age[5]) / total_age #75-84 yrs\n age_85 = float(rows_age[6]) / total_age #85+ yrs\n age = [age_0_64, age_65_74, age_75_84, age_85] #age groups -- categories\n ### Calculate Costs Amongst each Age group\n medicare_amt_age = [] # overall CMS costs\n medicare_medical_amt_age = [] # medical-related CMS costs\n medicare_drug_amt_age = [] # drug-related CMS costs\n for i in range(len(age)):\n medicare_amt_age += [round(rows_age[0] * age[i],2)]\n medicare_medical_amt_age += [round(rows_age[1] * age[i],2)]\n medicare_drug_amt_age += [round(rows_age[2] * age[i],2)]\n costs = ['Medicare Amount ($)', 'Medical Amount ($)', 'Drug Amount ($)']\n age_data = np.vstack((medicare_amt_age, medicare_medical_amt_age, medicare_drug_amt_age))\n age_df = pd.DataFrame({'costs': costs,\n 'age 0-64': age_data[:,0],\n 'age 65-74': age_data[:,1],\n 'age 75-84': age_data[:,2],\n 'age 84+': age_data[:,3]})\n ## D3 Grouped Bar Plot - Costs by Age\n age_path = os.path.join(get_abs_path(), 'static', 'tmp', 'cost_age.csv') # CSV path\n age_df.to_csv(age_path, sep=',', index=False) # CSV from DataFrame\n ## Table - CMS Costs by Age\n costs_age = np.column_stack((costs+['Total'], np.vstack((age_data, np.sum(age_data, axis=0)))))\n ## Table - Age Population Ratio\n age_perc = np.round(age, 4) * 100\n age_ratio = np.column_stack((['Count (n)', 'Percentage(%)'],np.vstack((rows_age[3:], age_perc))))\n\n\n # CMS Costs by Race/Ethnicity groups\n rows_race = db.session.query(func.sum(Report.number_of_non_hispanic_white_beneficiaries),\n func.sum(Report.number_of_african_american_beneficiaries),\n func.sum(Report.number_of_asian_pacific_islander_beneficiaries),\n func.sum(Report.number_of_hispanic_beneficiaries),\n func.sum(Report.number_of_american_indian_alaskan_native_beneficiaries),\n func.sum(Report.number_of_beneficiaries_with_race_not_elsewhere_classified)).all()\n rows_race = rows_race[0]\n ## Costs Amongst Race Groups\n ### Extract Ratio of Race Groups (Categories)\n total_race = sum(rows_race)\n white = float(rows_race[0]) / total_race # Non-hispanic Whites\n african_am = float(rows_race[1]) / total_race # African Americans\n api = float(rows_race[2]) / total_race # Asian Pacific Islanders\n hispanic = float(rows_race[3]) / total_race # Hispanic\n native_am = float(rows_race[4]) / total_race # Native Americans\n other_race = float(rows_race[5]) / total_race # Other\n race = [white, african_am, api, hispanic, native_am, other_race]\n ### Calculate Costs Amongst each Race group\n medicare_amt_race = [] #overall CMS costs\n medicare_medical_amt_race = [] # medical-related CMS costs\n medicare_drug_amt_race = [] # drug-related CMS costs\n for i in range(len(race)):\n medicare_amt_race += [round(rows_age[0] * race[i],2)]\n medicare_medical_amt_race += [round(rows_age[1] * race[i],2)]\n medicare_drug_amt_race += [round(rows_age[2] * race[i],2)]\n race_data = np.vstack((medicare_amt_race, medicare_medical_amt_race, medicare_drug_amt_race))\n race_df = pd.DataFrame({'costs': costs,\n 'White': race_data[:, 0],\n 'African-American': race_data[:, 1],\n 'Asian-Pacific Islander': race_data[:, 2],\n 'Hispanic': race_data[:, 3],\n 'Native American': race_data[:, 4],\n 'Other Race': race_data[:, 5]})\n ## D3 Grouped Bar Plot - Costs by Race\n race_path = os.path.join(get_abs_path(), 'static', 'tmp', 'cost_race.csv') # CSV path\n race_df.to_csv(race_path, sep=',', index=False) # CSV from DataFrame\n ## Table - CMS Costs by Race\n costs_race = np.column_stack((costs+['Total'], np.vstack((race_data, np.sum(race_data, axis=0)))))\n ## Table - Race Population Ratio\n race_perc = np.round(race, 4) * 100\n race_ratio = np.column_stack((['Count (n)', 'Percentage(%)'],np.vstack((rows_race[:6], race_perc))))\n\n # CMS Costs by Sex\n rows_sex = db.session.query(func.sum(Report.number_of_female_beneficiaries),\n func.sum(Report.number_of_male_beneficiaries)).all()\n\n rows_sex = rows_sex[0]\n ## Costs Amongst Sex Groups\n ### Extract Ratio of Sex Groups (Categories)\n total_sex = sum(rows_sex)\n female = float(rows_sex[0]) / total_sex # female\n male = float(rows_sex[1]) / total_sex # male\n sex = [female, male]\n ### Calculate Costs Amongst each Sex group\n medicare_amt_sex = [] # overall CMS costs\n medicare_medical_amt_sex = [] # medical-related CMS costs\n medicare_drug_amt_sex = [] # drug-related CMS costs\n for i in range(len(sex)):\n medicare_amt_sex += [round(rows_age[0] * sex[i], 2)]\n medicare_medical_amt_sex += [round(rows_age[1] * sex[i], 2)]\n medicare_drug_amt_sex += [round(rows_age[2] * sex[i], 2)]\n sex_data = np.vstack((medicare_amt_sex, medicare_medical_amt_sex, medicare_drug_amt_sex))\n sex_df = pd.DataFrame({'costs': costs,\n 'Female': sex_data[:, 0],\n 'Male': sex_data[:, 1]})\n ## D3 Grouped Bar Plot - Costs by Sex\n sex_path = os.path.join(get_abs_path(), 'static', 'tmp', 'cost_sex.csv') # CSV path\n sex_df.to_csv(sex_path, sep=',', index=False) # CSV from DataFrame\n ## Table - CMS Costs by Sex\n costs_sex = np.column_stack((costs+['Total'], np.vstack((sex_data, np.sum(sex_data, axis=0)))))\n ## Table - Sex Population Ratio\n sex_perc = np.round(sex, 4) * 100\n sex_ratio = np.column_stack((['Count (n)', 'Percentage (%)'],np.vstack((rows_sex[:2], sex_perc))))\n\n # Correlation - Costs & Demographics\n rows_heatmap = db.session.query(Report.total_medicare_standardized_payment_amount,\n Report.total_medical_medicare_standardized_payment_amount,\n Report.total_drug_medicare_standardized_payment_amount,\n Report.number_of_beneficiaries_age_less_65,\n Report.number_of_beneficiaries_age_65_to_74,\n Report.number_of_beneficiaries_age_75_to_84,\n Report.number_of_beneficiaries_age_greater_84,\n Report.number_of_non_hispanic_white_beneficiaries,\n Report.number_of_african_american_beneficiaries,\n Report.number_of_asian_pacific_islander_beneficiaries,\n Report.number_of_hispanic_beneficiaries,\n Report.number_of_american_indian_alaskan_native_beneficiaries,\n Report.number_of_beneficiaries_with_race_not_elsewhere_classified,\n Report.number_of_female_beneficiaries, Report.number_of_male_beneficiaries).all()\n col = ['medicare_amount', 'medicare_medical_amount', 'medicare_drug_amount', 'num_age_less_65', 'num_age_65_to_74',\n 'num_age_75-84', 'num_age_greater_84', 'num_white', 'num_african_am', 'num_api', 'num_hispanic',\n 'num_native_am', 'num_other_race', 'num_female', 'num_male']\n demo_df = pd.DataFrame(rows_heatmap, columns=col)\n demo_corr = demo_df.corr() # Pearson correlation\n # Heatmap figure\n plt.figure()\n sns.set(style='white')\n mask = np.zeros_like(demo_corr, dtype=bool)\n mask[np.triu_indices_from(mask)] = True\n f, ax = plt.subplots(figsize=(16, 12))\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n heatmap_plot = sns.heatmap(demo_corr, mask=mask, cmap=cmap, ax=ax)\n heatmap_path = os.path.join(get_abs_path(), 'static', 'tmp', 'heatmap_demo.png')\n heatmap_plot.figure.savefig(heatmap_path, transparent=True) # save fig\n plt.close()\n return render_template(\"cost_demo.html\", costs_age=costs_age, age_ratio=age_ratio,\n costs_race=costs_race, race_ratio=race_ratio,\n costs_sex=costs_sex, sex_ratio=sex_ratio,\n cost_demo_js = url_for('static', filename='js/cost_demo.v3.min.js'),\n age_file=url_for('static', filename='tmp/cost_age.csv'),\n race_file=url_for('static', filename='tmp/cost_race.csv'),\n sex_file=url_for('static', filename='tmp/cost_sex.csv'),\n heatmap_fig=url_for('static', filename='tmp/heatmap_demo.png'))\n\n\n@app.route('/cost/hcpcs')\ndef procedure():\n #Unique HCPCS Services\n ## Histogram - unique hcpcs per npi\n rows_unique = db.session.query(Report.number_of_hcpcs).all()\n hcpcs_dist = pd.DataFrame(rows_unique, columns=['number_of_HCPCS']) # DataFrame\n hcpcs_dist = hcpcs_dist[hcpcs_dist['number_of_HCPCS'] < 200] # Filter for Number of Unique HCPCS < 200\n plt.figure()\n h = hcpcs_dist['number_of_HCPCS'].plot.hist(bins=20, figsize=(10, 7), color='green')\n h.set_xlabel('Number of Unique HCPCS Services & Procedures',fontsize=18)\n h.set_ylabel('Frequency', fontsize=18)\n h.set_title('Histogram of Unique CMS Services & Procedures Provided', fontsize=18)\n h_path = os.path.join(get_abs_path(), 'static', 'tmp', 'hcpcs_dist.png')\n h.figure.savefig(h_path, transparent=True) # save figure\n plt.close()\n\n hcpcs_median = int(hcpcs_dist['number_of_HCPCS'].median()) # median\n hcpcs_mean = int(hcpcs_dist['number_of_HCPCS'].mean()) # mean\n hcpcs_mode = scipy.stats.mode(hcpcs_dist['number_of_HCPCS'].as_matrix().flatten())[0][0] # mode\n\n #Number of Services\n ## Pie Chart - number of services per cost category\n rows_total = db.session.query(func.sum(Report.number_of_services), func.sum(Report.number_of_medical_services),\n func.sum(Report.number_of_drug_services)).all()\n\n total_data = list(rows_total[0])\n pie_service = total_data[1:] + [total_data[0]-sum(total_data[1:])]\n total_df = pd.DataFrame({'type_serv': ['num_medical_services', 'num_drug_services', 'num_other_services'],\n 'num_serv': pie_service})\n serv_sum = total_df['num_serv'].groupby(total_df['type_serv']).sum()\n plt.figure()\n plt.axis=('equal')\n plt.pie(serv_sum, labels=serv_sum.index, autopct=\"%1.1f%%\")\n plt.suptitle('CMS Service Distribution', fontsize=18)\n pie_path = os.path.join(get_abs_path(), 'static', 'tmp', 'num_pie.png')\n plt.savefig(pie_path, transparent=True) # save figure\n plt.close()\n\n # Rankings - Leading HCPCS Services\n ## Most frequently utilized HCPCS\n rows_freq = db.session.query(Puf.hcpcs_code, func.count(Puf.hcpcs_code)).group_by(Puf.hcpcs_code).\\\n order_by(desc(func.count(Puf.hcpcs_code))).limit(10).all()\n\n freq_serv = []\n for i in range(len(rows_freq)):\n code = str(rows_freq[i][0])\n code_info = db.session.query(Puf.hcpcs_description).filter(Puf.hcpcs_code == code).first()\n code_amt = db.session.query(func.avg(Puf.average_medicare_standardized_amount)).\\\n filter(Puf.hcpcs_code == code).first()\n freq_row = (code,) + code_info + (int(rows_freq[i][1]),) + (round(float(code_amt[0]), 2),)\n freq_serv += [freq_row]\n freq_serv = np.array(freq_serv)\n\n ## Most expensive HCPCS\n rows_exp = db.session.query(Puf.hcpcs_code, func.avg(Puf.average_medicare_standardized_amount)).\\\n filter(Puf.hcpcs_code != '').group_by(Puf.hcpcs_code).\\\n order_by(desc(func.avg(Puf.average_medicare_standardized_amount))).limit(10).all()\n\n exp_serv = []\n for i in range(len(rows_exp)):\n exp_code = str(rows_exp[i][0])\n exp_code_info = db.session.query(Puf.hcpcs_description).filter(Puf.hcpcs_code == exp_code).first()\n exp_code_count = db.session.query(func.count(Puf.hcpcs_code)).filter(Puf.hcpcs_code == exp_code).all()\n exp_row = (exp_code,) + exp_code_info + (int(exp_code_count[0][0]),) + (round(rows_exp[i][1], 2),)\n exp_serv += [exp_row]\n exp_serv = np.array(exp_serv)\n\n # Correlation - Cost & Number of Services\n rows_corr = db.session.query(Report.number_of_services, Report.number_of_hcpcs,\n Report.total_medicare_standardized_payment_amount,\n Report.total_medical_medicare_standardized_payment_amount,\n Report.total_drug_medicare_standardized_payment_amount).all()\n service_cost_df = pd.DataFrame(rows_corr, columns=['num_services', 'num_unique_HCPCS','total_overall_cost',\n 'total_medical_costs', 'total_drug_costs'])\n service_corr = service_cost_df.corr() # Pearson correlation\n ## Heatmap figure\n plt.figure()\n sns.set(style='white', font_scale=1.5)\n mask = np.zeros_like(service_corr, dtype=bool)\n mask[np.triu_indices_from(mask)] = True\n f, ax = plt.subplots(figsize=(16, 12))\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n servicecorr_plot = sns.heatmap(service_corr, mask=mask, cmap=cmap, ax=ax)\n scorr_path = os.path.join(get_abs_path(), 'static', 'tmp', 'heatmap_service.png')\n servicecorr_plot.figure.savefig(scorr_path, transparent=True) # save figure\n plt.close()\n return render_template(\"cost_hcpcs.html\", unique_fig=url_for('static', filename='tmp/hcpcs_dist.png'),\n pie_fig=url_for('static', filename='tmp/num_pie.png'), total_serv =total_data,\n median=hcpcs_median, avg=hcpcs_mean, mode=hcpcs_mode, freq_serv=freq_serv, exp_serv=exp_serv,\n scorr_heatmap=url_for('static', filename='tmp/heatmap_service.png'))\n\n\n@app.route('/data')\ndef data():\n return render_template(\"data.html\", cms_img=url_for('static', filename='img/cms_logo.jpg'),\n bchc_img=url_for('static', filename='img/bch_logo.png'))\n\n\n@app.route('/data/report')\ndef report():\n return render_template(\"report_data.html\")\n\n\n@app.route('/data/puf')\ndef puf():\n return render_template(\"puf_data.html\")\n\n\n@app.route('/data/cancer')\ndef cancer():\n return render_template(\"cancer_data.html\")","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":38709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"47102023","text":"#! /usr/bin/env python\n\nimport sys\nimport re \nimport csv\nimport MySQLdb as mdb\nimport pandas as pd\nfrom openpyxl import load_workbook\nimport time\nimport collections\nfrom simple_salesforce import Salesforce\nfrom dateutil.relativedelta import relativedelta\n\nsys.path.insert(0,'/home/analytics/analytics_sandbox/python_libs');\nfrom common_libs import *\nfrom create_mysql import *\nfrom attask_libs import * \nfrom attask_api import StreamClient, ObjCode, AtTaskObject\nfrom sfdc_libs import *\nfrom inspect import currentframe, getframeinfo\n\nstart = time.time()\ncur_datetime = datetime.now() \nexecfile('/home/analytics/analytics_sandbox/python_libs/stuff.py')\n\n# Logging\nimport log_libs as log\nLOG = log.init_logging()\n\npd.set_option('display.width',1000)\npd.set_option('display.max_colwidth',200)\npd.set_option('display.max_rows',400)\n\ncon = None\ncon = mdb.connect('localhost','root','','');\ncur = con.cursor()\n\nCASE = int(sys.argv[1])\n\nACT_START = 10*(CASE-1)\nACT_END = 10*(CASE)\n\nif (ACT_START == 0):\n\tACT_START = 1\n\n##############################################\n##############################################\n##############################################\n############### Main Program #################\n##############################################\n##############################################\n##############################################\n\nTIME_BIN_DELTA = 14\n\nsdata_timeline_df = pd.read_csv('./output/sdata_all_history_RSF.csv')\nsdata_timeline_df = sdata_timeline_df.drop('Unnamed: 0',1)\nsdata_timeline_df['CreatedDate'] = pd.to_datetime(sdata_timeline_df['CreatedDate'])\nsdata_timeline_df['Act_CreatedDate'] = pd.to_datetime(sdata_timeline_df['Act_CreatedDate'])\n\nunique_account = list(set(sdata_timeline_df['AccountId_18']))\n\nif (len(unique_account) == 1):\n\tprint_op = unique_account\nelse:\n\tprint_op = ['0063800000apZD9']\n\nupdate_cols = ['s1','s2','s3','s4','s5','s6','s7']\nprint_cols = ['AccountId_18','OpportunityId','won','lost','CreatedDate','tstart','tstop','Act_CreatedDate','final_day','event','OpportunityType'] + update_cols + ['stageback']\n\nwon_loss_df = sdata_timeline_df[['AccountId_18','tstop','won','lost']][(sdata_timeline_df['won'] > 0) | (sdata_timeline_df['lost'] > 0)]\n\nfor ppp in range(ACT_START,ACT_END):\n\tnew_datetime = cur_datetime - timedelta(days=TIME_BIN_DELTA*ppp) - timedelta(hours = 8) ## deals with UTC vs PST \n\n\tLOG.info('Case {:>3}: {:>3} of {:>3} ... Date: {:>8} ... {:.2f} sec'.format(CASE,ppp,ACT_END,new_datetime.strftime('%Y%m%d'),time.time()-start ) )\n\n\tcv_df = pd.DataFrame(columns = sdata_timeline_df.columns)\n\tfor i in range(0,len(unique_account)):\n\n\t\tif ((i % 500) == 499):\n\t\t\tLOG.info(\"Case {:>3} STAGE DURATION: Unique Op ... {:>5} of {:>5} ... {:.2f} sec\".format(CASE,i+1,len(unique_account),time.time()-start))\n\t\n\t\tidx = all_indices_CASE_SENSITIVE(unique_account[i],sdata_timeline_df['AccountId_18'])\n\n\t\ttest_before_df = sdata_timeline_df.ix[idx][(sdata_timeline_df['CreatedDate'] <= new_datetime)]\t\n\t\ttest_after_df = sdata_timeline_df.ix[idx][(sdata_timeline_df['CreatedDate'] > new_datetime)]\t\n\n\t\ttest = 0 \n\t\tif (len(test_before_df) > 0):\n\t\t\tprev_before_idx = test_before_df.index[len(test_before_df)-2] \n\t\t\tmax_before_idx = test_before_df.index[len(test_before_df)-1] \n\n\t\t\t###################################\n\t\t\t# If you fall between two dates\n\t\t\t###################################\n\t\t\tif (len(test_after_df) > 0):\n\t\t\t\tmin_after_idx = min(test_after_df.index) \n\t\n\t\t\t\ttest_before_df.loc[max_before_idx,'tstop'] = (new_datetime - test_before_df.ix[max_before_idx]['Act_CreatedDate']).days\n\t\t\t\ttest_before_df.loc[:,'final_day'] = test_before_df.ix[max_before_idx]['tstop']\n\t\t\t\ttest_before_df.loc[max_before_idx,'won'] = 0\n\t\t\t\ttest_before_df.loc[max_before_idx,'lost'] = 0\n\n\t\t\t\t#### FOR RSF ONLY\n\t\t\t\ttest_before_df = test_before_df.append(test_before_df.ix[max_before_idx],ignore_index=True)\n\n\t\t\t\t## Get index\n\t\t\t\tcur_idx = len(test_before_df)-1\n\t\t\t\n\t\t\t\t## Update data \n\t\t\t\tfor j in range(0,len(update_cols)):\n\t\t\t\t\tif (test_before_df.ix[cur_idx][update_cols[j]] != test_after_df.ix[min_after_idx][update_cols[j]]): \n\t\t\t\t\t\ttest_before_df.loc[cur_idx,update_cols[j]] = test_before_df.ix[cur_idx-1][update_cols[j]] + (new_datetime - test_before_df.ix[cur_idx]['CreatedDate']).days\n\n\t\t\t\ttest_before_df.loc[cur_idx,'tstart'] = int(test_before_df.ix[cur_idx]['final_day'])\n\t\t\t\ttest_before_df.loc[cur_idx,'CreatedDate'] = test_before_df.ix[cur_idx]['Act_CreatedDate'] + timedelta(days = int(test_before_df.ix[cur_idx]['final_day']))\n\t\n\t\t\t\tcv_df = cv_df.append(test_before_df,ignore_index=True)\n\n\t\t\t##########################################\n\t\t\t# Between final CreatedDate and final_day \n\t\t\t##########################################\n\t\t\telif (len(test_after_df) == 0):\n\t\t\t\tfinal_date = test_before_df.ix[max_before_idx]['CreatedDate']\n\t\t\t\ttdelta_days = test_before_df.ix[max_before_idx]['tstop'] - test_before_df.ix[max_before_idx]['tstart']\n\t\t\t\tif (new_datetime < (final_date + timedelta(days=tdelta_days)) ):\n\t\t\n\t\t\t\t\ttest_before_df.loc[max_before_idx,'won'] = 0\n\t\t\t\t\ttest_before_df.loc[max_before_idx,'lost'] = 0\n\t\t\t\t\ttest_before_df.loc[max_before_idx,'tstop'] = (new_datetime - test_before_df.ix[max_before_idx]['Act_CreatedDate']).days\n\t\t\t\t\ttest_before_df.loc[:,'final_day'] = test_before_df.ix[max_before_idx]['tstop']\n\n\t\t\t\tcv_df = cv_df.append(test_before_df,ignore_index=True)\n\n\tcv_idx = all_indices_CASE_SENSITIVE(print_op[0],cv_df['AccountId_18'])\n\tif (len(cv_idx) > 0):\n\t\tidx = all_indices_CASE_SENSITIVE(print_op[0],sdata_timeline_df['AccountId_18'])\n\t\tLOG.info(\"new_datetime = {}\\n{}\".format(new_datetime,sdata_timeline_df.ix[idx][print_cols]))\n\t\tLOG.info(\"new_datetime = {}\\n{}\".format(new_datetime,cv_df.ix[cv_idx][print_cols]))\n\n\tcv_df.to_csv('./output/sdata_all_history_RSF_' + new_datetime.strftime('%Y%m%d') + '.csv',encoding='utf-8')\n\n","sub_path":"data_integration/forecasting/update_act_cross_validation.py","file_name":"update_act_cross_validation.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"229480674","text":"t=int(input())\r\nfor i in range(t):\r\n s=input()\r\n a=s[0]\r\n for j in range(1,len(s)):\r\n if(ord(a[0])>ord(s[j])):\r\n a=\"\".join(a)+s[j]\r\n else:\r\n a=s[j]+\"\".join(a)\r\n print(\"Case #{0}: {1}\".format(i+1,a)) \r\n","sub_path":"codes/BuildLinks1.10/test_input/CJ/16_1_1_aman15jan_r1.py","file_name":"16_1_1_aman15jan_r1.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568081086","text":"#!/usr/bin/python\nimport os\nimport time\nimport signal\nimport subprocess\n\n# get the command from `ps aux`\ncommand = '/Applications/TextEdit.app/Contents/MacOS/TextEdit'\n\ndef termy(cmd):\n task = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return(task.communicate()) # 0 is out, 1 is err\n\ndef command_finder():\n global command\n \n processes = termy(['ps', 'aux'])[0]\n process_list = processes.split('\\n')\n \n for process in process_list:\n if command in process:\n process_line = process.split(' ')\n command_line = filter(None, process_line)\n pid = command_line[1]\n return(pid)\n \ndef command_killer():\n count = 0\n while count < 4:\n pid = int(command_finder())\n os.kill(pid, signal.SIGTERM)\n \n if command_finder() == None:\n exit(0)\n \n count += 1\n time.sleep(10)\n \n pid = int(command_finder())\n os.kill(pid, signal.SIGKILL)\n\ncommand_killer()\n","sub_path":"python/utilities/command_killer.py","file_name":"command_killer.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"137664488","text":"import sys, json, logging, re\n#import RPi.GPIO as GPIO\nfrom time import sleep, perf_counter, perf_counter_ns\nimport paho.mqtt.client as mqtt\nfrom os import path\nfrom pathlib import Path\nfrom logging.handlers import RotatingFileHandler\nfrom package import *\n\nclass pcolor:\n ''' Add color to print statements '''\n LBLUE = '\\33[36m' # Close to CYAN\n CYAN = '\\033[96m'\n BLUE = '\\033[94m'\n DBLUE = '\\33[34m'\n WOLB = '\\33[46m' # White On LightBlue\n LPURPLE = '\\033[95m'\n PURPLE = '\\33[35m'\n WOP = '\\33[45m' # White On Purple\n GREEN = '\\033[92m'\n DGREEN = '\\33[32m'\n WOG = '\\33[42m' # White On Green\n YELLOW = '\\033[93m'\n YELLOW2 = '\\33[33m'\n RED = '\\033[91m'\n DRED = '\\33[31m'\n WOR = '\\33[41m' # White On Red\n BOW = '\\33[7m' # Black On White\n BOLD = '\\033[1m'\n ENDC = '\\033[0m'\n \nclass CustomFormatter(logging.Formatter):\n \"\"\" Custom logging format with color \"\"\"\n\n grey = \"\\x1b[38;21m\"\n green = \"\\x1b[32m\"\n yellow = \"\\x1b[33;21m\"\n red = \"\\x1b[31;21m\"\n bold_red = \"\\x1b[31;1m\"\n reset = \"\\x1b[0m\"\n format = \"[%(levelname)s]: %(name)s - %(message)s\"\n\n FORMATS = {\n logging.DEBUG: green + format + reset,\n logging.INFO: grey + format + reset,\n logging.WARNING: yellow + format + reset,\n logging.ERROR: red + format + reset,\n logging.CRITICAL: bold_red + format + reset\n }\n\n def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n formatter = logging.Formatter(log_fmt)\n return formatter.format(record)\n\ndef setup_logging(log_dir, logger_type, logger_name=__name__, log_level=logging.INFO, mode=1):\n ''' Create basic or custom loggers with RotatingFileHandler '''\n global _loggers\n # logger_type = basic\n # logger_type = custom with log file options below\n # log_level and mode will determine output\n #log_level, RFHmode| logger.x() | output\n #------------------|-------------|-----------\n # INFO, 1 | info | print\n # INFO, 2 | info | print+logfile\n # INFO, 3 | info | logfile\n # DEBUG,1 | info+debug | print only\n # DEBUG,2 | info+debug | print+logfile\n # DEBUG,3 | info+debug | logfile\n\n if logger_type == 'basic':\n if len(logging.getLogger().handlers) == 0: # Root logger does not already exist, will create it\n logging.basicConfig(level=log_level) # Create Root logger\n custom_logger = logging.getLogger(logger_name) # Set logger to root logging\n else:\n custom_logger = logging.getLogger(logger_name) # Root logger already exists so just linking logger to it\n else:\n if mode == 1:\n logfile_log_level = logging.CRITICAL\n console_log_level = log_level\n elif mode == 2:\n logfile_log_level = log_level\n console_log_level = log_level\n elif mode == 3:\n logfile_log_level = log_level\n console_log_level = logging.CRITICAL\n\n custom_logger = logging.getLogger(logger_name)\n custom_logger.propagate = False\n custom_logger.setLevel(log_level)\n log_file_format = logging.Formatter(\"[%(levelname)s] - %(asctime)s - %(name)s - : %(message)s in %(pathname)s:%(lineno)d\")\n #log_console_format = logging.Formatter(\"[%(levelname)s]: %(message)s\") # Using CustomFormatter Class\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(console_log_level)\n console_handler.setFormatter(CustomFormatter())\n\n log_file_handler = RotatingFileHandler('{}/debug.log'.format(log_dir), maxBytes=10**6, backupCount=5) # 1MB file\n log_file_handler.setLevel(logfile_log_level)\n log_file_handler.setFormatter(log_file_format)\n\n log_errors_file_handler = RotatingFileHandler('{}/error.log'.format(log_dir), maxBytes=10**6, backupCount=5)\n log_errors_file_handler.setLevel(logging.WARNING)\n log_errors_file_handler.setFormatter(log_file_format)\n\n custom_logger.addHandler(console_handler)\n custom_logger.addHandler(log_file_handler)\n custom_logger.addHandler(log_errors_file_handler)\n if custom_logger not in _loggers: _loggers.append(custom_logger)\n return custom_logger\n \ndef on_connect(client, userdata, flags, rc):\n \"\"\" on connect callback verifies a connection established and subscribe to TOPICs\"\"\"\n main_logger.info(\"attempting on_connect\")\n if rc==0:\n mqtt_client.connected = True\n for topic in MQTT_SUB_TOPIC:\n client.subscribe(topic)\n main_logger.info(\"Subscribed to: {0}\".format(topic))\n main_logger.info(\"Successful Connection: {0}\".format(str(rc)))\n else:\n mqtt_client.failed_connection = True # If rc != 0 then failed to connect. Set flag to stop mqtt loop\n main_logger.info(\"Unsuccessful Connection - Code {0}\".format(str(rc)))\n\ndef on_message(client, userdata, msg):\n \"\"\"on message callback will receive messages from the server/broker. Must be subscribed to the topic in on_connect\"\"\"\n global deviceD, MQTT_REGEX\n global mqtt_servoID, mqtt_servoAngle\n global mqtt_controlsD, mqtt_stepreset\n global mqtt_dummy1, mqtt_dummy2\n mqtt_logger.debug(\"Received: {0} with payload: {1}\".format(msg.topic, str(msg.payload)))\n msgmatch = re.match(MQTT_REGEX, msg.topic) # Check for match to subscribed topics\n if msgmatch:\n mqtt_payload = json.loads(str(msg.payload.decode(\"utf-8\", \"ignore\"))) \n mqtt_topic = [msgmatch.group(0), msgmatch.group(1), msgmatch.group(2), type(mqtt_payload)] # breaks msg topic into groups - group/group1/group2\n if mqtt_topic[1] == 'servoZCMD':\n mqtt_servoID = int(mqtt_topic[2])\n mqtt_servoAngle = int(mqtt_payload) # Set the servo angle from mqtt payload\n if mqtt_topic[2] == 'controls':\n mqtt_controlsD = mqtt_payload\n if mqtt_topic[2] == 'stepreset':\n mqtt_stepreset = mqtt_payload\n #if mqtt_topic[2] == 'group2A':\n # mqtt_dummy1 = mqtt_payload\n #if mqtt_topic[2] == 'group2B':\n # mqtt_dummy2 = mqtt_payload\n # If Debugging will print the JSON incoming payload and unpack it\n if mqtt_logger.getEffectiveLevel() == 10:\n mqtt_logger.debug(\"Topic grp0:{0} grp1:{1} grp2:{2}\".format(msgmatch.group(0), msgmatch.group(1), msgmatch.group(2)))\n mqtt_payload = json.loads(str(msg.payload.decode(\"utf-8\", \"ignore\")))\n mqtt_logger.debug(\"Payload type:{0}\".format(type(mqtt_payload)))\n if isinstance(mqtt_payload, (str, bool, int, float)):\n mqtt_logger.debug(mqtt_payload)\n elif isinstance(mqtt_payload, list):\n mqtt_logger.debug(mqtt_payload)\n elif isinstance(mqtt_payload, dict):\n for key, value in mqtt_payload.items(): \n mqtt_logger.debug(\"{0}:{1}\".format(key, value))\n\ndef on_publish(client, userdata, mid):\n \"\"\"on publish will send data to client\"\"\"\n #mqtt_logger.debug(\"msg ID: \" + str(mid)) \n pass \n\ndef on_disconnect(client, userdata,rc=0):\n main_logger.error(\"DisConnected result code \"+str(rc))\n mqtt_client.loop_stop()\n\ndef mqtt_setup(IPaddress):\n global MQTT_SERVER, MQTT_CLIENT_ID, MQTT_USER, MQTT_PASSWORD, MQTT_SUB_TOPIC, MQTT_PUB_LVL1, MQTT_SUB_LVL1, MQTT_REGEX\n global mqtt_client\n home = str(Path.home()) # Import mqtt and wifi info. Remove if hard coding in python script\n with open(path.join(home, \"stem\"),\"r\") as f:\n user_info = f.read().splitlines()\n MQTT_SERVER = IPaddress # Replace with IP address of device running mqtt server/broker\n MQTT_USER = user_info[0] # Replace with your mqtt user ID\n MQTT_PASSWORD = user_info[1] # Replace with your mqtt password\n # Specific MQTT SUBSCRIBE/PUBLISH TOPICS created inside 'setup_device' function\n MQTT_SUB_TOPIC = []\n MQTT_SUB_LVL1 = 'nred2' + MQTT_CLIENT_ID\n MQTT_REGEX = MQTT_SUB_LVL1 + '/([^/]+)/([^/]+)' # 'nred2pi/+' would also work but would not return groups\n # () group capture. Useful for getting topic lvls in on_message\n # [^/] match a char except /. Needed to get topic lvl2, lvl3 groups\n # + will match one or more. Requiring at least 1 match forces a lvl1/lvl2/lvl3 topic structure\n # * could also be used for last group and then a lvl1/lvl2 topic would also be matched\n MQTT_PUB_LVL1 = 'pi2nred/'\n\n # MQTT STRUCTURE - TOPIC/PAYLOAD\n # TOPIC levels --> lvl1/lvl2/lvl3\n # PAYLOAD contains the data (JSON string represinting python/js object with key:value is best format\n # but can also be simple int, str, boolean)\n #\n # MQTT_SUBSCRIBE_TOPIC FORMAT\n # lvl1 = 'nred2' + MQTT_CLIENT -- From nodered to machine. machine can be generic or unique/specific\n # lvl2 = 'device function' -- Example servoZCMD, stepperZCMD\n # lvl3 = free form -- Example controls (stepper controls); 0,1,2 (specific servo)\n #\n # MQTT_PUBLISH_TOPIC FORMAT\n # lvl1 = 'pi2nred'|'esp2nred' -- From machine to nodered. (generic machine)\n # lvl2 = 'device' sending data -- Device examples, adc, ina219, rotary encoder, stepper\n # 'deviceA'|'deviceB' Device can be generic or specific. this is updated in 'create_device' functions\n # 'nredZCMD' May also be machine sending command to nred to update dashboard\n # lvl3 = free form -- May be specific machine (MQTT_CLIENT_ID) or general command\n #\n # MQTT PAYLOAD CONVERSIONS\n # Simple commands/data sent with integer, boolean, string, list payloads\n # Complex commands/data payloads sent with JSON format using python dict/js object notation (key:value)\n # mach2nred\n # PYTHON(publish) -- Convert from python_object to JSON string/payload [json.dumps(python_object) --> JSON_msg.payload ]\n # NODERED(mqtt_in) -- Convert from JSON string/payload to js_object [JSON.parse(JSON_msg.payload) --> js_object ] \n # js_object named 'fields' to align with influxdb naming (values accessed with fields[key]=value or fields.key=value)\n # nred2mach\n # NODERED(mqtt_out) -- mqtt_out: Keep node red data in js_object format (fields[key]=value)\n # PYTHON(on_message) -- Convert JSON string payload to python_object [python_object <-- json.loads(msg.payload.decode)]\n #\n # MSG PAYLOAD KEY:VALUE FORMAT (script is demoMQTT.py, module is lib/Module.py)\n # STEPS -- synchronize python dict keys with NodeRed js object keys using 'mqtt_payload_keys'\n # 1 - Define mqtt_payload_keys (key labels for python/js objects) in python script 'create_device' functions\n # mqtt_payload_keys is then passed to the device module as an argument\n # 2 - Design device module so the 'outgoing' data will be a dictionary using the mqtt_payload_key names\n # 3 - Have Python script retrieve 'outgoing' data (dict) from device and publish using mqtt_payload_key names\n # 4 - NodeRed JSON.parse function will convert msg.payload ('outgoing') to js_object\n # mqtt_payload_key names are used to create js_object (fields) keys\n # fields/js_object items can be used in node red dashboard using fields.key (in nodered will be payload[0].key)\n #\n # NODE-RED BACKGROUND\n # Topic/payload format tries to align with influxdb (TAGS/FIELDS) to make writing to database easy\n # Topic levels are converted to TAGS inside NodeRed\n # JSON string is used to construct js_object with FIELDS (fields[key]=value) \n # The NodeRed msg.payload then becomes an array containing [FIELDS, TAGS]\n # Final NodeRed payload: fields[key] data is accessed with msg.payload[0].key\n # tags(topic levels) are access with msg.payload[1].lvlx (lvl1, lvl2, lvl3)\n\ndef setup_device(device, lvl2, publvl3, data_keys):\n global printcolor, deviceD\n if deviceD.get(device) == None:\n deviceD[device] = {}\n deviceD[device]['data'] = {}\n deviceD[device]['lvl2'] = lvl2 # Sub/Pub lvl2 in topics. Does not have to be unique, can piggy-back on another device lvl2\n topic = f\"{MQTT_SUB_LVL1}/{deviceD[device]['lvl2']}ZCMD/+\"\n if topic not in MQTT_SUB_TOPIC:\n MQTT_SUB_TOPIC.append(topic)\n for key in data_keys:\n deviceD[device]['data'][key] = 0\n else:\n for key in data_keys:\n for item in deviceD:\n if deviceD[item]['data'].get(key) != None:\n main_logger.warning(f\"**DUPLICATE WARNING {device} and {item} are both publishing {key} on {topic}\")\n deviceD[device]['data'][key] = 0\n deviceD[device]['pubtopic'] = MQTT_PUB_LVL1 + lvl2 + '/' + publvl3\n deviceD[device]['send'] = False\n printcolor = not printcolor # change color of every other print statement\n if printcolor: \n main_logger.info(f\"{pcolor.LBLUE}{device} Subscribing to: {topic}{pcolor.ENDC}\")\n main_logger.info(f\"{pcolor.DBLUE}{device} Publishing to: {deviceD[device]['pubtopic']}{pcolor.ENDC}\")\n main_logger.info(f\"JSON payload keys will be:{pcolor.WOLB}{*deviceD[device]['data'],}{pcolor.ENDC}\")\n else:\n main_logger.info(f\"{pcolor.PURPLE}{device} Subscribing to: {topic}{pcolor.ENDC}\")\n main_logger.info(f\"{pcolor.LPURPLE}{device} Publishing to: {deviceD[device]['pubtopic']}{pcolor.ENDC}\")\n main_logger.info(f\"JSON payload keys will be:{pcolor.WOP}{*deviceD[device]['data'],}{pcolor.ENDC}\")\n else:\n main_logger.error(f\"Device {device} already in use. Device name should be unique\")\n sys.exit(f\"{pcolor.RED}Device {device} already in use. Device name should be unique{pcolor.ENDC}\")\n\ndef button_callback(channel):\n global buttonpressed, buttonvalue\n buttonpressed = True\n buttonvalue = 1 # str(GPIO.input(jsbutton))\n\ndef main():\n global deviceD, printcolor # Containers setup in 'create' functions and used for Publishing mqtt\n global MQTT_SERVER, MQTT_USER, MQTT_PASSWORD, MQTT_CLIENT_ID, mqtt_client, MQTT_PUB_LVL1\n global _loggers, main_logger, mqtt_logger\n global buttonpressed, buttonvalue # Joystick variables\n global mqtt_servoID # Servo variables\n global mqtt_controlsD, mqtt_stepreset # Stepper motor controls\n\n # Type of loggers - 'basic' or 'custom'\n # 'custom' type - log level and mode will determine output for custom loggers\n # log_level and mode will determine output\n #log_level, RFHmode| logger.x() | output\n #------------------|-------------|-----------\n # INFO, 1 | info | print\n # INFO, 2 | info | print+logfile\n # INFO, 3 | info | logfile\n # DEBUG,1 | info+debug | print only\n # DEBUG,2 | info+debug | print+logfile\n # DEBUG,3 | info+debug | logfile\n \n _loggers = [] # container to keep track of loggers created # CRITICAL=logging off. DEBUG=get variables. INFO=status messages.\n main_logger = setup_logging(path.dirname(path.abspath(__file__)), 'custom', log_level=logging.DEBUG, mode=1)\n mqtt_logger = setup_logging(path.dirname(path.abspath(__file__)), 'custom', 'mqtt', log_level=logging.INFO, mode=1)\n \n # MQTT structure: lvl1 = from-to (ie Pi-2-NodeRed shortened to pi2nred)\n # lvl2 = device type (ie servoZCMD, stepperZCMD, adc)\n # lvl3 = free form (ie controls, servo IDs, etc)\n MQTT_CLIENT_ID = 'pi' # Can make ID unique if multiple Pi's could be running similar devices (ie servos, ADC's) \n # Node red will need to be linked to unique MQTT_CLIENT_ID\n mqtt_setup('10.0.0.115') # Pass IP address\n \n deviceD = {} # Primary container for storing all devices, topics, and data\n printcolor = True\n #==== HARDWARE SETUP =====#\n rotaryEncoderSet = {}\n logger_rotenc = setup_logging(path.dirname(path.abspath(__file__)), 'custom', 'rotenc', log_level=logging.INFO, mode=2)\n\n device = 'rotEnc1' # Device name should be unique, can not duplicate device ID\n lvl2 = 'rotencoder' # Topic lvl2 name can be a duplicate, meaning multiple devices publishing data on the same topic\n publvl3 = MQTT_CLIENT_ID + \"\" # Will be a tag in influxdb. Optional to modify it and describe experiment being ran\n data_keys = ['RotEnc1Ci', 'RotEnc1Bi'] # If topic lvl2 name repeats would likely want the data_keys to be unique\n clkPin, dtPin, button_rotenc = 17, 27, 24\n setup_device(device, lvl2, publvl3, data_keys)\n rotaryEncoderSet[device] = RotaryEncoder(clkPin, dtPin, button_rotenc, *data_keys, logger_rotenc) #rotaryencoder.RotaryEncoder(clkPin, dtPin, button_rotenc, *data_keys, rotenc_logger)\n #------------#\n ina219Set = {} # ina219 library has an internal logger named ina219. name it something different.\n logger_ina219 = setup_logging(path.dirname(path.abspath(__file__)), 'custom', 'ina219l', log_level=logging.INFO, mode=1)\n \n device = 'ina219A' \n lvl2 = 'ina219A' # Topic lvl2 name can be a duplicate, meaning multiple devices publishing data on the same topic\n publvl3 = MQTT_CLIENT_ID + \"Test1\" # Will be a tag in influxdb. Optional to modify it and describe experiment being ran\n data_keys = ['Vbusf', 'IbusAf', 'PowerWf'] # If topic lvl2 name repeats would likely want the data_keys to be unique\n setup_device(device, lvl2, publvl3, data_keys)\n ina219Set[device] = PiINA219(*data_keys, \"auto\", 0.4, 0x40, logger=logger_ina219) # PiINA219(*data_keys, gainmode=\"auto\", maxA=0.4, address=0x40, logger=ina219_logger) #piina219.PiINA219(*data_keys, gainmode=\"auto\", maxA=0.4, address=0x40, logger=ina219_logger)\n #------------#\n adcSet = {} # Can comment out any ADC type not being used\n adc_logger = setup_logging(path.dirname(path.abspath(__file__)), 'custom', 'adc', log_level=logging.INFO, mode=1)\n\n device = 'ads1115'\n lvl2 = 'ads1115' # Topic lvl2 name can be a duplicate, meaning multiple devices publishing data on the same topic\n publvl3 = MQTT_CLIENT_ID + \"\" # Will be a tag in influxdb. Optional to modify it and describe experiment being ran\n data_keys = ['a0f', 'a1f', 'etc'] # If topic lvl2 name repeats would likely want the data_keys to be unique\n setup_device(device, lvl2, publvl3, data_keys)\n adcSet[device] = ads1115(1, 0.003, 1, 1, 0x48, adc_logger) # numOfChannels, noiseThreshold (V), max interval, gain=1 (+/-4.1V readings), address\n \n device = 'mcp3008'\n lvl2 = 'mcp3008' # Topic lvl2 name can be a duplicate, meaning multiple devices publishing data on the same topic\n publvl3 = MQTT_CLIENT_ID + \"\" # Will be a tag in influxdb. Optional to modify it and describe experiment being ran\n data_keys = ['a0f', 'a1f', 'etc'] # If topic lvl2 name repeats would likely want the data_keys to be unique\n setup_device(device, lvl2, publvl3, data_keys)\n deviceD[device]['pubtopic2'] = f\"{MQTT_SUB_LVL1}/nredZCMD/resetstepgauge\"\n deviceD[device]['data2'] = \"resetstepgauge\"\n adcSet[device] = mcp3008(2, 5, 400, 1, 8, adc_logger) # numOfChannels, vref, noiseThreshold (raw ADC), maxInterval = 1sec, and ChipSelect GPIO pin (7 or 8)\n\n #Joystick button setup\n buttonpressed = False\n buttonvalue = 1\n jsbutton = 15\n #GPIO.setup(jsbutton, GPIO.IN, pull_up_down=GPIO.PUD_UP) \n #GPIO.add_event_detect(jsbutton, GPIO.BOTH, callback=button_callback)\n\n #------------#\n device = 'servoAngle'\n lvl2 = 'servo'\n publvl3 = MQTT_CLIENT_ID + \"\"\n data_keys = ['NA'] # Servo currently does not publish any data back to mqtt\n setup_device(device, lvl2, publvl3, data_keys)\n servoID, mqtt_servoID = 0, 0 # Initialize. Updated in mqtt on_message\n numservos = 16 # Number of servo channels to pass to ServoKit. Must be 8 or 16.\n mqtt_servoAngle = 90\n deviceD[device] = [90]*numservos # Initialize at 90°\n i2caddr = 0x40\n # Other arguments reference_clock_speed=25000000, frequency=50) 50Hz = 20ms period\n pca9685 = [ServoKit(address=i2caddr, channels=numservos)]*numservos\n #main_logger.info(('Servo PCA9685 Kit on address:{0} {1}'.format(i2caddr, pca9685)))\n\n logger_stepper = setup_logging(path.dirname(path.abspath(__file__)), 'custom', 'stepper', log_level=logging.INFO, mode=1)\n device = 'stepper'\n lvl2 = 'stepper'\n publvl3 = MQTT_CLIENT_ID + \"\"\n data_keys = ['delayf', 'cpufreq0i', 'main_msf', 'looptime0f', 'looptime1f', 'steps0i', 'steps1i', 'rpm0f', 'rpm1f', 'speed0i', 'speed1i']\n m1pins = [12, 16, 20, 21]\n m2pins = [19, 13, 6, 5]\n mqtt_stepreset = False # used to reset steps thru nodered gui\n mqtt_controlsD = {\"delay\":[0.8,1.0], \"speed\":[3,3], \"mode\":[0,0], \"inverse\":[False,True], \"step\":[2038, 2038], \"startstep\":[0,0]}\n setup_device(device, lvl2, publvl3, data_keys)\n deviceD[device]['pubtopic2'] = f\"{MQTT_SUB_LVL1}/nredZCMD/resetstepgauge\" # Extra topic used to tell node red to reset the step gauges\n deviceD[device]['data2'] = \"resetstepgauge\"\n motor = Stepper(m1pins, m2pins, logger=logger_stepper) # can enter 1 to 2 list of pins (up to 2 motors)\n\n main_logger.info(\"ALL DICTIONARIES\")\n for device, item in deviceD.items():\n main_logger.info(device)\n if isinstance(item, dict):\n for key in item:\n main_logger.info(\"\\t{0}:{1}\".format(key, item[key]))\n else: main_logger.info(\"\\t{0}\".format(item))\n\n print(\"\\n\")\n for logger in _loggers:\n main_logger.info('{0} is set at level: {1}'.format(logger, logger.getEffectiveLevel()))\n\n #==== START/BIND MQTT FUNCTIONS ====#\n # Create a couple flags to handle a failed attempt at connecting. If user/password is wrong we want to stop the loop.\n mqtt.Client.connected = False # Flag for initial connection (different than mqtt.Client.is_connected)\n mqtt.Client.failed_connection = False # Flag for failed initial connection\n # Create our mqtt_client object and bind/link to our callback functions\n mqtt_client = mqtt.Client(MQTT_CLIENT_ID) # Create mqtt_client object\n mqtt_client.username_pw_set(MQTT_USER, MQTT_PASSWORD) # Need user/password to connect to broker\n mqtt_client.on_connect = on_connect # Bind on connect\n mqtt_client.on_disconnect = on_disconnect # Bind on disconnect\n mqtt_client.on_message = on_message # Bind on message\n mqtt_client.on_publish = on_publish # Bind on publish\n main_logger.info(\"Connecting to: {0}\".format(MQTT_SERVER))\n mqtt_client.connect(MQTT_SERVER, 1883) # Connect to mqtt broker. This is a blocking function. Script will stop while connecting.\n mqtt_client.loop_start() # Start monitoring loop as asynchronous. Starts a new thread and will process incoming/outgoing messages.\n # Monitor if we're in process of connecting or if the connection failed\n while not mqtt_client.connected and not mqtt_client.failed_connection:\n main_logger.info(\"Waiting\")\n sleep(1)\n if mqtt_client.failed_connection: # If connection failed then stop the loop and main program. Use the rc code to trouble shoot\n mqtt_client.loop_stop()\n sys.exit(f\"{pcolor.RED}Connection failed. Check rc code to trouble shoot{pcolor.ENDC}\")\n \n #==== MAIN LOOP ====================#\n # MQTT setup is successful. Initialize dictionaries and start the main loop. \n t0_sec = perf_counter() # sec Counter for getting stepper data. Future feature - update interval in node-red dashboard to link to perf_counter\n msginterval = 0.5 # Adjust interval to increase/decrease number of mqtt updates.\n t0loop_ns = perf_counter_ns() # nanosec Counter for how long it takes to run motor and get messages\n outgoingD = {}\n try:\n while True:\n\n t0main_ns = perf_counter_ns() - t0loop_ns # Monitor how long the main/total loop takes\n t0loop_ns = perf_counter_ns()\n\n if (perf_counter() - t0_sec) > msginterval: # getdata() from devices on msginterval (also publish data). Note - Does not affect on_message/mqtt data. on_message runs in parallel\n for device, ina219 in ina219Set.items():\n deviceD[device]['data'] = ina219.getdata()\n main_logger.debug(\"{} {}\".format(deviceD[device]['pubtopic'], json.dumps(deviceD[device]['data'])))\n #mqtt_client.publish(deviceD[device]['pubtopic'], json.dumps(deviceD[device]['data'])) # publish voltage values\n for device, adc in adcSet.items():\n deviceD[device]['data'] = adc.getdata() # Get the readings from each adc\n if deviceD[device]['data'] is not None:\n main_logger.debug(\"{} {}\".format(deviceD[device]['pubtopic'], json.dumps(deviceD[device]['data'])))\n #mqtt_client.publish(deviceD[device]['pubtopic'], json.dumps(deviceD[device]['data']))\n # For joystick with button\n if buttonpressed or deviceD[device]['data'] is not None:\n if deviceD[device]['data'] is not None:\n outgoingD = deviceD[device]['data']\n outgoingD['buttoni'] = buttonvalue\n #mqtt_client.publish(deviceD[device]['pubtopic'], json.dumps(outgoingD)) # publish voltage values\n buttonpressed = False\n main_logger.debug(outgoingD)\n deviceD['stepper']['data'] = motor.getdata()\n if deviceD['stepper']['data'] != \"na\":\n deviceD['stepper']['data'][\"main_msf\"] = t0main_ns/1000000 # Monitor the main/total loop time\n mqtt_client.publish(deviceD['stepper']['pubtopic'], json.dumps(deviceD['stepper']['data'])) \n if mqtt_stepreset:\n motor.resetsteps()\n mqtt_stepreset = False\n mqtt_client.publish(deviceD['stepper']['pubtopic2'], json.dumps(deviceD['stepper']['data2']))\n t0_sec = perf_counter()\n for device, rotenc in rotaryEncoderSet.items(): # ** Remove rotary encoder from msginterval loop for real application\n deviceD[device]['data'] = rotenc.getdata()\n if deviceD[device]['data'] is not None:\n main_logger.debug(\"{} {}\".format(deviceD[device]['pubtopic'], json.dumps(deviceD[device]['data'])))\n #mqtt_client.publish(deviceD[device]['pubtopic'], json.dumps(deviceD[device]['data']))\n\n motor_controls = mqtt_controlsD # Get updated motor controls from mqtt. Could change this to another source\n motor.step(motor_controls) # Pass instructions for stepper motor for testing\n\n servoID = mqtt_servoID # Servo commands coming from mqtt\n deviceD['servoAngle'][servoID] = mqtt_servoAngle # But could change data source to something other than mqtt\n pca9685[servoID].servo(deviceD['servoAngle'][mqtt_servoID]) # Set the servo angle\n\n #sleep(1)\n except KeyboardInterrupt:\n main_logger.info(f\"{pcolor.WARNING}Exit with ctrl-C{pcolor.ENDC}\")\n finally:\n #GPIO.cleanup()\n main_logger.info(f\"{pcolor.CYAN}GPIO cleaned up{pcolor.ENDC}\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"demoMQTT.py","file_name":"demoMQTT.py","file_ext":"py","file_size_in_byte":28068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214037341","text":"# -*- coding: utf-8 -*-\n# ---\n# @Institution: Automation,T&E,Turing,HQ\n# @Time: 2021/6/6\n# @File: test_byod_provisioning_tests_11_2_3\n# @Author: pengleiyang\n# @E-mail: pengleiyang@huaqin.com\n# @Desc: test_byod_provisioning_tests_11_2_3自动化测试脚本\n# @update: Record important updates\n# ---\n\nimport os\nimport unittest\nimport time\nimport warnings\nimport uiautomator2 as u2\nfrom utils.device_info_util.device_info import DeviceInfo\n\n\nclass CustomTerms(unittest.TestCase):\n def setUp(self):\n warnings.simplefilter('ignore', ResourceWarning) # 屏蔽警报信息\n print(\"测试开始\")\n print(\"获取手机设备信息!\")\n self.device = DeviceInfo()\n self.devices = self.device.check_device()[0]\n device = self.devices[0] # 暂时默认只连接一台手机\n print(device)\n self.d = u2.connect(device) # 连接待测设备\n self.d.unlock()\n print(\"解锁成功\")\n\n def test_custom_terms(self):\n print(\"启动cts测试应用!\")\n os.system(\"adb shell am start -n com.android.cts.verifier/com.android.cts.verifier.CtsVerifierActivity\")\n self.assertFalse(self.d.exists(text=\"Folded\") and self.d.exists(resourceId=\"com.android.cts.verifier:id/export\")\n , \"cts未在主界面,请检查\")\n for i in range(10):\n if self.d.exists(text=\"BYOD Provisioning tests\"):\n self.d(text=\"BYOD Provisioning tests\").click()\n time.sleep(2)\n self.d(text=\"Custom terms\").click()\n time.sleep(2)\n self.d(text=\"GO\").click()\n time.sleep(2)\n self.d(text=\"View terms\").click()\n time.sleep(2)\n self.d(text=\"Company ABC\").click()\n time.sleep(2)\n if self.d.exists(text=\"Company Terms Content. \"):\n print(\"验证段落内容 Company Terms Content存在,测试pass!\")\n self.d.press(\"back\")\n time.sleep(2)\n self.d.press(\"back\")\n time.sleep(2)\n if self.d.exists(text=\"Stop setting up?\"):\n self.d(text=\"YES\").click()\n time.sleep(2)\n self.d(resourceId=\"com.android.cts.verifier:id/pass_button\").click()\n else:\n self.d.press(\"back\")\n time.sleep(2)\n self.d.press(\"back\")\n time.sleep(2)\n if self.d.exists(text=\"Stop setting up?\"):\n self.d(text=\"YES\").click()\n time.sleep(2)\n self.d(resourceId=\"com.android.cts.verifier:id/fail_button\").click()\n self.assertFalse(\"验证段落内容 Company Terms Content不存在,测试fail!\")\n break\n else:\n self.d.swipe(0.5, 0.9, 0.5, 0.2)\n time.sleep(2)\n\n def tearDown(self):\n print(\"测试结束,测试步骤回收!\")\n self.d.app_stop_all() # 停止所有应用\n self.d.press(\"home\")\n self.d.screen_off() # 锁屏","sub_path":"testcases/managedprovisioning/byodprovisioningtests/test_byod_provisioning_tests_11_2_3.py","file_name":"test_byod_provisioning_tests_11_2_3.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"303993170","text":"import inspect\nimport operator\nimport six\n\n\nclass ChoiceMetaclass(type):\n def __init__(cls, name, typeof, other):\n # {value: (display_name, is_specified))\n cls._data = {}\n cls._order_key = 0 if (getattr(cls, \"_order_by\", \"value\") == \"value\") else 1\n\n for name, value in inspect.getmembers(cls):\n if not name.startswith(\"_\") and \\\n not (inspect.isfunction(value) or inspect.ismethod(value) or type(value) is classmethod):\n if isinstance(value, tuple) and len(value) > 1:\n value, display_name, is_specified = value[0], value[1], True\n else:\n generated_name = \" \".join([x.capitalize() for x in name.replace(\"_\", \" \").split(\" \")])\n value, display_name, is_specified = value, generated_name, False\n\n cls._data[value] = display_name\n setattr(cls, name, value)\n\n # So we need to access the ._data attribute of any parent classes so we can access the\n if hasattr(cls.__base__, \"_data\"):\n data = cls.__base__._data\n # Go and patch up our values\n for value, name_data in data.items():\n cls._data[value] = name_data\n\n def __iter__(self):\n for value, data in sorted(self._data.items(), key=lambda i: i[0] if self._order_key == 0 else i[1]):\n yield value, data\n\n\nclass ChoiceBase(object):\n pass\n\n\nclass Choice(six.with_metaclass(ChoiceMetaclass, ChoiceBase)):\n _order_by = \"value\"\n\n def __iter__(self):\n for value, data in sorted(self._data.items(), key=lambda i: i[0] if self._order_key == 0 else i[1]):\n yield value, data\n\n @classmethod\n def GetByValue(cls, value):\n return dict(cls)[value]\n\n @classmethod\n def GetByName(cls, name):\n if name is None:\n return None\n\n for value, data in cls._data.items():\n if name.lower() == data.lower():\n return value\n\n for dirname in dir(cls):\n if dirname.lower() == name.lower():\n return getattr(cls, dirname)\n\n return None","sub_path":"django_choice_object/choice.py","file_name":"choice.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"622654498","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .forms import *\nfrom core.models import *\n\n# The Registrar's Home Screen: Add a team or edit a team.\ndef home(request):\n context = {'no_home': True}\n return render(request, 'registrar/home.html', context)\n\n# Add a team to the database using a form.\ndef addteam(request):\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n team = Team()\n team.team_name = form.data['team_name']\n team.entry_name = form.data['entry_name']\n team.category = Category.objects.get(name=form.data['category'])\n team.registrar = request.user.registrar\n team.save()\n return redirect('/registrar/showqr/' + team.team_name)\n else:\n form = RegistrationForm()\n context = { 'form': form}\n return render(request, 'registrar/addteam.html', context)\n\n# Display the teams separated by whether or not this registrar was the last to update them.\ndef teams(request):\n myteams = Team.objects.filter(registrar=request.user.registrar)\n otherteams = Team.objects.exclude(registrar=request.user.registrar)\n context = {'myteams': myteams, 'otherteams': otherteams}\n return render(request, 'registrar/teams.html', context)\n\n# Edit the team using a form. If the team name changes, create a new entity in the database and delete the old.\ndef editteam(request, team_name):\n try:\n team = Team.objects.get(team_name=team_name)\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n if form.data['team_name'] != team_name:\n team.delete()\n team = Team()\n team.team_name = form.data['team_name']\n team.entry_name = form.data['entry_name']\n team.category = Category.objects.get(name=form.data['category'])\n team.registrar = request.user.registrar\n team.save()\n return redirect('/registrar/showqr/' + team.team_name)\n initial = {'team_name': team.team_name, 'category': team.category.name, 'entry_name': team.entry_name}\n form = RegistrationForm(initial=initial)\n context = {'form': form}\n return render(request, 'registrar/addteam.html', context)\n except:\n return redirect('addteam')\n\n# Display the QR code for hte specified team.\ndef showqr(request, team):\n teamurl = \"https://api.qrserver.com/v1/create-qr-code/?data=\" + team + \"&size=400x400\"\n context = {'header': 'Team ' + team,'qrurl': teamurl, 'redirect': '/registrar/home'}\n return render(request, 'core/showqr.html', context)\n","sub_path":"Code/heartland/registrar/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128577967","text":" # Note that input contians strings, not integers. \"1\" not 1\n\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n if not grid or not grid[0]:\n return 0\n \n \n lands = self.find_all_lands(grid)\n visited = set()\n count = 0\n while lands:\n x, y = lands.pop()\n visited.add((x,y))\n self.bfs(grid, x, y, lands, visited)\n count += 1\n \n return count\n \n def is_valid(self, grid, x, y, visited):\n return x >= 0 and x < len(grid) and y >= 0 and y < len(grid[0]) and grid[x][y] == \"1\" and (x,y) not in visited\n \n def find_all_lands(self, grid):\n s = set()\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"1\":\n s.add((i,j))\n return s\n \n def bfs(self, grid, x, y, lands, visited):\n q = [(x,y)]\n DX, DY = [0,1,0,-1], [1,0,-1,0]\n while q:\n x, y = q.pop(0)\n for dx, dy in zip(DX, DY):\n nx, ny = x+ dx, y + dy\n if self.is_valid(grid, nx, ny, visited):\n visited.add((nx, ny))\n q.append((nx, ny))\n lands.remove((nx, ny))","sub_path":"leetcode/num_of_islands.py","file_name":"num_of_islands.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"252838657","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom embedder import embedder\nfrom layers import MultiDiscriminator, Attention, RGCN, HRGCN, AvgReadout\nimport numpy as np\nimport datetime\nfrom evaluate import evaluate\nfrom utils.process import dropout_adj, drop_feature, one_one_negative_sampling, \\\n one_one_rel_negative_sampling, get_adj_idx, get_adj_batch\n\n\nclass CMVHG(embedder):\n def __init__(self, args):\n embedder.__init__(self, args)\n self.args = args\n\n def training(self):\n dt = datetime.datetime.now()\n date = f\"{dt.year}_{dt.month}_{dt.day}_{dt.time()}\"\n feature = self.features[0].to(self.args.device)\n adj = [adj_.to(self.args.device) for adj_ in self.adj]\n adj_idx = get_adj_idx(adj, self.args.dropadj_1)\n\n model = modeler(self.args).to(self.args.device)\n optimiser = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.l2_coef)\n\n def self_train():\n b_xent = nn.BCEWithLogitsLoss()\n xent = nn.CrossEntropyLoss()\n cnt_wait = 0\n best = 1e9\n for epoch in range(self.args.nb_epochs):\n lbl_1 = torch.ones(1, self.args.nb_nodes * 2)\n lbl_2 = torch.zeros(1, self.args.nb_nodes * 2)\n lbl = torch.cat((lbl_1, lbl_2), 1).to(self.args.device)\n\n # corruption structure: drop adj\n adjs_1 = [dropout_adj(a, self.args.dropadj_1) for a in adj]\n adjs_2 = [dropout_adj(a, self.args.dropadj_2) for a in adj]\n\n # corruption features: drop feats\n ft_1 = drop_feature(feature, self.args.dropfeat_1)\n ft_2 = drop_feature(feature, self.args.dropfeat_2)\n fts = [ft_1, ft_2]\n\n # negative samples: random feature permutation\n idx = np.random.permutation(self.args.nb_nodes)\n shuf_fts = feature[:, idx, :]\n shuf_fts = shuf_fts.to(self.args.device)\n\n # train\n batch_idxs = get_adj_batch(adj_idx, self.args.sample_size)\n model.train()\n optimiser.zero_grad()\n result = model(fts, shuf_fts, adjs_1, adjs_2, self.args.sparse, batch_idxs)\n\n # local-global contrastive loss\n logits = result['logits']\n xent_loss = None\n if torch.is_tensor(logits):\n xent_loss = b_xent(logits, lbl)\n else:\n for view_idx, logit in enumerate(logits):\n if xent_loss is None:\n xent_loss = b_xent(logit, lbl)\n else:\n xent_loss += b_xent(logit, lbl)\n loss = xent_loss\n\n # total loss\n loss += self.args.reg_coef * result['reg_loss'] + self.args.w_node * result['node_loss'] + self.args.w_rel * result['rel_loss']\n\n if loss < best:\n best = loss\n cnt_wait = 0\n torch.save(model.state_dict(),\n 'saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, date))\n else:\n cnt_wait += 1\n if cnt_wait == self.args.patience:\n break\n\n loss.backward()\n optimiser.step()\n\n self_train()\n\n # Evaluation\n model.load_state_dict(torch.load(\n 'saved_model/best_{}_{}_{}.pkl'.format(self.args.dataset, self.args.embedder, date)))\n model.eval()\n embeds = model.embed(feature, adj, self.args.sparse)\n res = evaluate(embeds, self.idx_train, self.idx_val, self.idx_test, self.labels,\n self.args.device)\n return res\n\n\nclass modeler(nn.Module):\n def __init__(self, args):\n super(modeler, self).__init__()\n self.args = args\n self.rgcn_conv = nn.ModuleList(\n [RGCN(args.ft_size, args.hid_units, args.activation, args.drop_prob,\n num_rels=args.nb_graphs, num_bases=args.bases, isBias=args.isBias) for _ in\n range(args.num_layers)])\n self.hrgcn_conv = nn.ModuleList(\n [HRGCN(args.ft_size, args.hid_units, args.activation, args.drop_prob,\n num_rels=self.args.nb_graphs, num_bases=self.args.bases, isBias=args.isBias) for _ in\n range(args.num_layers)])\n self.R = nn.Parameter(torch.FloatTensor(args.nb_graphs, args.hid_units))\n self.cls_rel = torch.nn.Linear(args.hid_units, 1)\n\n # lg parameters\n self.read = AvgReadout()\n self.sigm = nn.Sigmoid()\n self.local_global_disc = MultiDiscriminator(args.hid_units)\n\n # ll parameters\n n_proj_h = args.hid_units\n self.fc1 = torch.nn.Linear(args.hid_units, n_proj_h)\n self.fc2 = torch.nn.Linear(n_proj_h, args.hid_units)\n self.f_k = nn.ModuleList([nn.Bilinear(args.hid_units, args.hid_units, 1) for _ in range(self.args.nb_graphs)])\n self.f_k_node = nn.Bilinear(args.hid_units, args.hid_units, 1)\n for m in self.modules():\n self.weights_init(m)\n\n if args.isAttn:\n self.attn = nn.ModuleList([Attention(args) for _ in range(args.nheads)])\n\n self.init_weight()\n\n def weights_init(self, m):\n if isinstance(m, nn.Bilinear):\n torch.nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n\n def init_weight(self):\n nn.init.xavier_normal_(self.R)\n\n def forward(self, seq1, seq2, adj, adj_2, sparse, batchs=None):\n '''\n seq1: positive samples\n seq2: negative samples\n '''\n result = {}\n logits = []\n\n if len(seq1) == 2:\n seq1_1 = seq1[0]\n seq1_2 = seq1[1]\n seq2_1 = seq2\n seq2_2 = seq2\n else:\n seq1_1 = seq1\n seq1_2 = seq1\n seq2_1 = seq2\n seq2_2 = seq2\n\n # graph encoders\n h_pos_1 = self.rgcn_conv[0](seq1_1, adj) # pos emb: view1\n h_pos_2 = self.hrgcn_conv[0](seq1_2, adj_2) # pos emb: view2\n h_neg_1 = self.rgcn_conv[0](seq2_1, adj) # neg emb: view1\n h_neg_2 = self.hrgcn_conv[0](seq2_2, adj_2) # neg emb: view2\n for i in range(1, self.args.num_layers):\n h_pos_1 = self.rgcn_conv[i](h_pos_1, adj) # pos emb: view1\n h_pos_2 = self.hrgcn_conv[i](h_pos_2, adj_2) # pos emb: view2\n h_neg_1 = self.rgcn_conv[i](h_neg_1, adj) # neg emb: view1\n h_neg_2 = self.hrgcn_conv[i](h_neg_2, adj_2) # neg emb: view2\n c_pos_1 = self.sigm(self.read(h_pos_1))\n c_pos_2 = self.sigm(self.read(h_pos_2))\n\n # Attention or not\n if self.args.isAttn:\n h_pos_1_all_lst = []\n h_neg_1_all_lst = []\n h_pos_2_all_lst = []\n h_neg_2_all_lst = []\n c_1_all_lst = []\n c_2_all_lst = []\n\n for h_idx in range(self.args.nheads):\n h_pos_1_all_, h_neg_1_all_, c_1_all_ = self.attn[h_idx](h_pos_1, h_neg_1, c_pos_1)\n h_pos_2_all_, h_neg_2_all_, c_2_all_ = self.attn[h_idx](h_pos_2, h_neg_2, c_pos_2)\n h_pos_1_all_lst.append(h_pos_1_all_)\n h_neg_1_all_lst.append(h_neg_1_all_)\n h_pos_2_all_lst.append(h_pos_2_all_)\n h_neg_2_all_lst.append(h_neg_2_all_)\n c_1_all_lst.append(c_1_all_)\n c_2_all_lst.append(c_2_all_)\n\n h_pos_1_all = torch.mean(torch.cat(h_pos_1_all_lst, 0), 0).unsqueeze(0)\n h_neg_1_all = torch.mean(torch.cat(h_neg_1_all_lst, 0), 0).unsqueeze(0)\n h_pos_2_all = torch.mean(torch.cat(h_pos_2_all_lst, 0), 0).unsqueeze(0)\n h_neg_2_all = torch.mean(torch.cat(h_neg_2_all_lst, 0), 0).unsqueeze(0)\n c_1_all = torch.mean(torch.cat(c_1_all_lst, 0), 0).unsqueeze(0)\n c_2_all = torch.mean(torch.cat(c_2_all_lst, 0), 0).unsqueeze(0)\n else:\n h_pos_1_all = torch.mean(h_pos_1, 0).unsqueeze(0)\n h_pos_2_all = torch.mean(h_pos_2, 0).unsqueeze(0)\n h_neg_1_all = torch.mean(h_neg_1, 0).unsqueeze(0)\n h_neg_2_all = torch.mean(h_neg_2, 0).unsqueeze(0)\n c_1_all = torch.mean(c_pos_1, 0).unsqueeze(0)\n c_2_all = torch.mean(c_pos_2, 0).unsqueeze(0)\n\n # local-global graph loss\n logit = self.local_global_disc(c_1_all, c_2_all,\n h_pos_1_all, h_pos_2_all,\n h_neg_1_all, h_neg_2_all)\n logits.append(logit)\n\n reg_loss = 0.0\n h_pos_all = (h_pos_1_all + h_pos_2_all) * 0.5\n for i in range(self.args.nb_graphs):\n\n # local-global subgraph loss\n logit = self.local_global_disc(torch.unsqueeze(c_pos_1[i], 0), torch.unsqueeze(c_pos_2[i], 0),\n torch.unsqueeze(h_pos_1[i], 0), torch.unsqueeze(h_pos_2[i], 0),\n torch.unsqueeze(h_neg_1[i], 0), torch.unsqueeze(h_neg_2[i], 0))\n logits.append(logit)\n\n # reg loss\n h_pos = (h_pos_1[i] + h_pos_2[i]) * 0.5\n h_neg = (h_neg_1[i] + h_neg_2[i]) * 0.5\n pos_reg_loss = ((h_pos_all - h_pos) ** 2).sum()\n neg_reg_loss = ((h_pos_all - h_neg) ** 2).sum()\n reg_loss = reg_loss + pos_reg_loss - neg_reg_loss\n\n result['logits'] = logits\n result['reg_loss'] = reg_loss\n\n # local-local contrastive loss\n h_view1 = self.projection(torch.squeeze(h_pos_1_all))\n h_view2 = self.projection(torch.squeeze(h_pos_2_all))\n\n node_loss = self.node_cross_entropy_loss(adj, h_view1, h_view2, batchs)\n result['node_loss'] = node_loss\n\n rel_loss = self.rel_cross_entropy_loss(adj, h_view1, h_view2, batchs)\n result['rel_loss'] = rel_loss\n\n return result\n\n def embed(self, seq1, adj, sparse):\n h_1 = self.rgcn_conv[0](seq1, adj) # pos emb: view1\n h_2 = self.hrgcn_conv[0](seq1, adj) # pos emb: view2\n for i in range(1, self.args.num_layers):\n h_1 = self.rgcn_conv[i](h_1, adj)\n h_2 = self.hrgcn_conv[i](h_2, adj)\n\n if self.args.isAttn:\n h_1_all_lst = []\n h_2_all_lst = []\n for h_idx in range(self.args.nheads):\n h_1_all_ = self.attn[h_idx](h_1)\n h_2_all_ = self.attn[h_idx](h_2)\n h_1_all_lst.append(h_1_all_)\n h_2_all_lst.append(h_2_all_)\n h_1_all = torch.mean(torch.cat(h_1_all_lst, 0), 0).unsqueeze(0)\n h_2_all = torch.mean(torch.cat(h_2_all_lst, 0), 0).unsqueeze(0)\n else:\n h_1_all = torch.mean(h_1, 0).unsqueeze(0)\n h_2_all = torch.mean(h_2, 0).unsqueeze(0)\n\n h_all = (h_1_all + h_2_all) * 0.5\n return h_all.detach()\n\n def projection(self, z: torch.Tensor) -> torch.Tensor:\n z = F.elu(self.fc1(z))\n return self.fc2(z)\n\n # node_loss\n def node_sim(self, z1: torch.Tensor, z2: torch.Tensor):\n if self.args.sigm:\n res = self.sigm(torch.squeeze(self.f_k_node(z1, z2)))\n else:\n res = torch.squeeze(self.f_k_node(z1, z2))\n return res\n\n def node_bilinear_logit(self, i, j, k, z1: torch.Tensor, z2: torch.Tensor):\n k = torch.squeeze(k)\n\n intra_pos_logit = self.node_sim(z1[i], z1[j]).view(-1, )\n inter_pos_logit = self.node_sim(z1[i], z2[j]).view(-1, )\n\n intra_neg_logit = self.node_sim(z1[i], z1[k]).view(-1, )\n inter_neg_logit = self.node_sim(z1[i], z2[k]).view(-1, )\n\n logit = torch.cat([intra_pos_logit, inter_pos_logit, intra_neg_logit, inter_neg_logit], dim=0)\n\n return logit\n\n def node_cross_entropy_loss(self, adjs, z1: torch.Tensor, z2: torch.Tensor, batchs=None, mean: bool = True):\n i, j, k = one_one_negative_sampling(adjs, batchs, 1)\n loss = 0.0\n b_xent = nn.BCEWithLogitsLoss()\n for r in range(self.args.nb_graphs):\n lbl_1 = torch.ones(1, i[r].size(0) * 2)\n lbl_2 = torch.zeros(1, i[r].size(0) * 2)\n lbl = torch.cat((lbl_1, lbl_2), 1).to(self.args.device)\n\n logit = self.node_bilinear_logit(i[r], j[r], k[r], z1, z2)\n logit_ = self.node_bilinear_logit(i[r], j[r], k[r], z2, z1)\n logit = (logit + logit_) * 0.5\n logit = torch.squeeze(logit).unsqueeze(0)\n loss += b_xent(logit, lbl)\n\n return loss\n\n # rel_loss\n def rel_sim(self, r, z1: torch.Tensor, z2: torch.Tensor):\n if self.args.sigm:\n res = self.sigm(torch.squeeze(self.f_k[r](z1, z2)))\n else:\n res = torch.squeeze(self.f_k[r](z1, z2))\n return res\n\n def rel_bilinear_logit(self, r, num_rel, i, j, rs, z1, z2):\n intra_pos_logit = self.rel_sim(r, z1[i], z1[j]).view(-1, )\n inter_pos_logit = self.rel_sim(r, z1[i], z2[j]).view(-1, )\n\n neg_logit = []\n for _r in range(num_rel):\n if _r == r:\n continue\n mask_r = torch.eq(rs, _r)\n rest = mask_r.nonzero(as_tuple=False).view(-1)\n if rest.numel() > 0:\n intra_neg_logit = self.rel_sim(_r, z1[i[mask_r]], z1[j[mask_r]]).view(-1, )\n inter_neg_logit = self.rel_sim(_r, z1[i[mask_r]], z2[j[mask_r]]).view(-1, )\n neg_logit.append(intra_neg_logit)\n neg_logit.append(inter_neg_logit)\n\n neg_logit = torch.cat(neg_logit, dim=0)\n logit = torch.cat([intra_pos_logit, inter_pos_logit, neg_logit], dim=0)\n\n return logit\n\n def rel_cross_entropy_loss(self, adjs, z1: torch.Tensor, z2: torch.Tensor, batchs, mean: bool = True):\n i, j, rs = one_one_rel_negative_sampling(adjs, batchs)\n loss = 0.0\n b_xent = nn.BCEWithLogitsLoss()\n for r in range(self.args.nb_graphs):\n if len(i[r]) == 0:\n continue\n\n lbl_1 = torch.ones(1, i[r].size(0) * 2)\n lbl_2 = torch.zeros(1, i[r].size(0) * 2)\n lbl = torch.cat((lbl_1, lbl_2), 1).to(self.args.device)\n\n logit = self.rel_bilinear_logit(r, self.args.nb_graphs, i[r], j[r], rs[r], z1, z2)\n logit_ = self.rel_bilinear_logit(r, self.args.nb_graphs, i[r], j[r], rs[r], z2, z1)\n logit = (logit + logit_) * 0.5\n logit = torch.squeeze(logit).unsqueeze(0)\n loss += b_xent(logit, lbl)\n\n return loss\n","sub_path":"models/CMVHG.py","file_name":"CMVHG.py","file_ext":"py","file_size_in_byte":14669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"6244856","text":"import bpy\nfrom bpy.props import *\nfrom ... base_types.node import AnimationNode\n\nallowedSocketTypes = {\n \"NodeSocketVector\" : \"an_VectorSocket\",\n \"NodeSocketColor\" : \"an_ColorSocket\",\n \"NodeSocketFloatFactor\" : \"an_FloatSocket\",\n \"NodeSocketFloat\" : \"an_FloatSocket\" }\n\n\nclass CyclesMaterialOutputNode(bpy.types.Node, AnimationNode):\n bl_idname = \"an_CyclesMaterialOutputNode\"\n bl_label = \"Cycles Material Output\"\n\n def getPossibleSocketItems(self, context):\n sockets = self.getPossibleSockets()\n items = []\n for socket in sockets:\n if socket.bl_idname in allowedSocketTypes.keys():\n items.append((socket.identifier, socket.identifier, \"\"))\n return items\n\n def getPossibleSockets(self):\n node = self.getSelectedNode()\n identifiers = []\n if node is not None:\n for socket in node.inputs:\n if socket.bl_idname in allowedSocketTypes.keys():\n identifiers.append(socket)\n return identifiers\n\n def selectedSocketChanged(self, context):\n self.socketIsChanging = True\n self.setInputSocket()\n self.socketIsChanging = False\n\n materialName = StringProperty(update = selectedSocketChanged)\n nodeName = StringProperty(update = selectedSocketChanged)\n socketIdentifier = EnumProperty(items = getPossibleSocketItems, name = \"Socket\", update = selectedSocketChanged)\n socketIsChanging = BoolProperty()\n\n def create(self):\n self.inputs.new(\"an_GenericSocket\", \"Data\", \"data\")\n\n def draw(self, layout):\n layout.prop_search(self, 'materialName', bpy.data, 'materials', text='', icon='MATERIAL_DATA')\n material = bpy.data.materials.get(self.materialName)\n if material is not None:\n nodeTree = material.node_tree\n layout.prop_search(self, 'nodeName', nodeTree, 'nodes', text='', icon='NODE')\n node = material.node_tree.nodes.get(self.nodeName)\n if node is not None:\n layout.prop(self, \"socketIdentifier\", text = \"\")\n\n def execute(self, data):\n socket = self.getSelectedSocket()\n if socket is not None:\n try: socket.default_value = data\n except: pass\n\n def edit(self):\n socket = self.inputs.get(\"Data\")\n if socket is not None and not self.socketIsChanging:\n if len(socket.links) > 0:\n fromType = self.inputs[0].links[0].from_socket.bl_idname\n possibleIdentifiers = self.getInputIdentifiersFromSocketType(fromType)\n if self.inputs[\"Data\"].bl_idname != fromType and len(possibleIdentifiers) > 0:\n self.socketIdentifier = possibleIdentifiers[0]\n self.setInputSocket()\n\n def getInputIdentifiersFromSocketType(self, searchType):\n identifiers = []\n sockets = self.getPossibleSockets()\n for socket in sockets:\n if allowedSocketTypes[socket.bl_idname] == searchType:\n identifiers.append(socket.identifier)\n return identifiers\n\n def getSelectedNode(self):\n material = bpy.data.materials.get(self.materialName)\n if material is not None:\n node = material.node_tree.nodes.get(self.nodeName)\n return node\n return None\n\n def getSelectedSocket(self):\n node = self.getSelectedNode()\n if node is not None:\n socket = self.getInputSocketWithIdentifier(node, self.socketIdentifier)\n return socket\n return None\n\n def getInputSocketWithIdentifier(self, node, identifier):\n for socket in node.inputs:\n if socket.identifier == identifier: return socket\n return None\n\n def setInputSocket(self):\n socket = self.getSelectedSocket()\n self.inputs.clear()\n if socket is None:\n self.inputs.new(\"an_GenericSocket\", \"Data\")\n else:\n data = socket.default_value\n self.inputs.new(allowedSocketTypes[socket.bl_idname], \"Data\")\n self.inputs[\"Data\"].setProperty(data)\n","sub_path":"nodes/material/material_output.py","file_name":"material_output.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"500754788","text":"#!/usr/bin/env python3\n\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\n\nimport json\nimport os\nimport uuid\n\ndef extract_exif_data(file_name: str) -> dict:\n image_data = { \"filename\": file_name}\n image = Image.open(file_name)\n exifdata = image.getexif()\n for tag_id in exifdata:\n tag = TAGS.get(tag_id, tag_id)\n data = exifdata.get(tag_id)\n if isinstance(data, bytes):\n data = data.decode()\n #cleaning step for XP exif data\n data = str(data).replace(\"\\x00\",'')\n image_data[tag] = data\n return image_data\n\ndef main():\n # Original source folder with images\n source_dir = \"Part1/Images/\"\n # Destination folder for converted images\n destination_dir = \"/opt/icons/\"\n count = 0\n\n all_the_data = {}\n for file_name in os.listdir(source_dir):\n exif_data = extract_exif_data(source_dir + file_name)\n all_the_data[str(uuid.uuid4())] = exif_data\n count += 1\n\n #console message for count of pictures\n print(\"number of pictures inspected:\", count)\n\n #write to local file\n destination_file = \"drone_exif_data.json\"\n with open(destination_file, \"w\") as f:\n json.dump(all_the_data, f, ensure_ascii=False, indent=4)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"scripts/exif_extractor.py","file_name":"exif_extractor.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"263663997","text":"#!/usr/bin/env python\n\nimport json\nimport logging\nimport os\nimport sys\nfrom contextlib import ExitStack\n\nimport numpy as np\nfrom sklearn.preprocessing import LabelBinarizer\n\nfrom senti.rand import set_rng\nfrom senti.score import *\nfrom senti.senti_models import *\nfrom senti.utils import BalancedSlice, FieldExtractor, RepeatSr\n\n\ndef main():\n sys.setrecursionlimit(5000)\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n os.chdir('data/twitter')\n with ExitStack() as stack:\n # load data\n labelled_dir = 'semeval'\n train_sr = stack.enter_context(open('{}/train.json'.format(labelled_dir)))\n train_docs = FieldExtractor(train_sr, 'text')\n train_labels = np.fromiter(FieldExtractor(train_sr, 'label'), 'int32')\n distant_srs = [stack.enter_context(open('emote_{}.txt'.format(i), encoding='utf-8')) for i in [0, 2]]\n distant_docs = BalancedSlice(distant_srs)\n distant_labels = BalancedSlice((RepeatSr(0), RepeatSr(2)))\n unsup_sr = stack.enter_context(open('unsup.txt', encoding='utf-8'))\n unsup_docs = BalancedSlice([unsup_sr])\n dev_sr = stack.enter_context(open('{}/dev.json'.format(labelled_dir)))\n dev_docs = FieldExtractor(dev_sr, 'text')\n dev_labels = FieldExtractor(dev_sr, 'label')\n test_sr = stack.enter_context(open('{}/test.json'.format(labelled_dir)))\n test_docs = FieldExtractor(test_sr, 'text')\n test_labels = FieldExtractor(test_sr, 'label')\n\n # fix seed for reproducibility\n set_rng(np.random.RandomState(1234))\n\n # train\n senti_models = SentiModels(\n unsup_docs, distant_docs, distant_labels, train_docs, train_labels, dev_docs, dev_labels, test_docs\n )\n # pipeline_name, pipeline = senti_models.fit_voting()\n # pipeline_name, pipeline = senti_models.fit_logreg()\n # pipeline_name, pipeline = senti_models.fit_word2vec_bayes()\n # pipeline_name, pipeline = senti_models.fit_svm()\n pipeline_name, pipeline = senti_models.fit_nn_word()\n # pipeline_name, pipeline = senti_models.fit_cnn_char()\n # pipeline_name, pipeline = senti_models.fit_cnn_word_char()\n # pipeline_name, pipeline = senti_models.fit_rnn_char_cnn_word()\n\n # test_data = [('dev', dev_docs, dev_labels)]\n test_data = [('dev', dev_docs, dev_labels), ('test', test_docs, test_labels)]\n\n # predict & write results\n classes_ = np.array([0, 1, 2])\n for name, docs, labels in test_data:\n os.makedirs('results/{}'.format(name), exist_ok=True)\n try:\n probs = pipeline.predict_proba(docs)\n except AttributeError:\n probs = LabelBinarizer().fit(classes_).transform(pipeline.predict(docs))\n with open('{}/{}.json'.format(labelled_dir, name)) as sr, \\\n open('results/{}/{}.json'.format(name, pipeline_name), 'w') as results_sr:\n for line, prob in zip(sr, probs):\n results_sr.write(json.dumps({\n 'id': json.loads(line)['id'], 'label': int(classes_[np.argmax(prob)]),\n 'probs': [(c.item(), prob.item()) for c, prob in zip(classes_, prob)]\n }) + '\\n')\n print('{} data: '.format(name))\n labels = np.fromiter(labels, dtype='int32')\n write_score('results/{}/{}'.format(name, pipeline_name), labels, probs, classes_, (0, 2))\n\nif __name__ == '__main__':\n main()\n","sub_path":"senti/bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548191142","text":"import psycopg2 as pg\r\n\r\n# task variables\r\nstudents = {'Ivanov':(4.98, '01.01.1991'), 'Petrov':(3.79, '02.02.1992')}\r\nstudent = {'Sidorov':(5.0, '03.03.1991')}\r\nstudent_id = 3\r\n\r\n#PostgreSQL class\r\nclass Postgre_tool():\r\n def __init__(self, dbname, user, password):\r\n self.dbname = dbname\r\n self.user = user\r\n self.password = password\r\n \r\n def create():\r\n \"\"\"create tables\"\"\"\r\n with pg.connect(dbname = self.dbname, user = self.user) as conn:\r\n with conn.cursor() as cur:\r\n # create table student\r\n cur.execute(\"\"\"\r\n CREATE TABLE IF NOT EXISTS student (\r\n id serial PRIMARY KEY,\r\n name varchar(100),\r\n birth date,\r\n gpa numeric(10, 2),\r\n timestamp with time zone\r\n );\r\n \"\"\")\r\n # create table course\r\n cur.execute(\"\"\"\r\n CREATE TABLE IF NOT EXISTS course (\r\n id serial PRIMARY KEY,\r\n name varchar(100)\r\n );\r\n \"\"\")\r\n # create M2M table student_course\r\n cur.execute(\"\"\"\r\n CREATE TABLE IF NOT EXISTS student_course (\r\n id serial PRIMARY KEY,\r\n student_id INTEGER REFERENCES student(id) ON DELETE CASCADE,\r\n course_id INTEGER REFERENCES course(id) ON DELETE CASCADE\r\n );\r\n \"\"\")\r\n\r\n def get_students(course_id: int):\r\n \"\"\"return students of certain course\"\"\"\r\n with pg.connect(dbname = self.dbname, user = self.user) as conn:\r\n with conn.cursor() as cur:\r\n cur.execute(\"\"\"\r\n select student.name from student\r\n join course on course.id = student_course.course_id\r\n WHERE student_course.course_id = (%s)\r\n \"\"\", (course_id))\r\n result = cur.fetchall()\r\n return result\r\n\r\n def add_students(course_id: int, students: dict):\r\n \"\"\"create students and enlist them on course\"\"\"\r\n students_names =[*students]\r\n for student in students_names:\r\n gpa = students[student][0]\r\n birth = students[student][1]\r\n with pg.connect(dbname = self.dbname, user = self.user) as conn:\r\n with conn.cursor() as cur:\r\n cur.execute(\"\"\"\r\n insert into student (name, gpa, birth) values (%s, %s, %s) RETURNING id\r\n \"\"\", (student, gpa, birth))\r\n current_id = cur.fetch()\r\n cur.execute(\"\"\"\r\n insert into student_course (student_id, course_id) values (%s, %s)\r\n \"\"\", (current_id, course_id))\r\n\r\n def add_student(student: dict):\r\n \"\"\"create student\"\"\"\r\n name = [*student][0]\r\n gpa = student[name][0]\r\n birth = student[name][1]\r\n with pg.connect(dbname = self.dbname, user = self.user) as conn:\r\n with conn.cursor() as cur:\r\n cur.execute(\"\"\"\r\n insert into student (name, gpa, birth) values (%s, %s, %s) RETURNING id\r\n \"\"\", (name, gpa, birth))\r\n current_id = cur.fetch()\r\n return current_id\r\n \r\n def get_student(student_id: int):\r\n \"\"\"get student\"\"\"\r\n with pg.connect(dbname = self.dbname, user = self.user) as conn:\r\n with conn.cursor() as cur:\r\n cur.execute(\"\"\"\r\n select * from student WHERE id = %s\r\n \"\"\", (student_id))\r\n result = cur.fetch()\r\n return result\r\n\r\ndef main():\r\n course_id = int(input('Please enter course id: '))\r\n postgre_tool = Postgre_tool('netology_db', 'netology_user', None)\r\n postgre_tool.create()\r\n students = postgre_tool.get_students(course_id)\r\n postgre_tool.add_students(course_id, students)\r\n current_id = postgre_tool.add_student(student)\r\n print(f'Student was added. Current id: {current_id}')\r\n student = postgre_tool.get_student(student_id)\r\n\r\nif __name__=='__main__':\r\n main()\r\n \r\n","sub_path":"hw_postgres.py","file_name":"hw_postgres.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"454548082","text":"\ndef data():\n global userDB\n global bookDB\n userDB = [['zhang3', '123'], ['li4', '123'], ['小波', '123'], ['admin', '123']]\n bookDB = [['三国演义', '罗贯中', '古典文学', 99.99, '好看'],\n ['水浒传', '施耐庵', '古典文学', 99.99, '好看'],\n ['论语', '不详', '四书品类', 199.99, '好看'],\n ['史记', '司马迁', '历史文献', 299.99, '好看'],\n ['高等数学', '大神', '教育材料', 9.99, '好看'],\n ['时间简史', '霍金', '科学文献', 99.99, '好看'],\n ['简爱', '不详', '国外文学', 99.99, '好看'],\n ['斗罗大陆', '唐家三少', '玄幻小说', 999.99, '好看'],\n ['天龙八部', '金庸', '武侠小说', 99.99, '好看'],\n ['三侠五义', '不详', '武侠小说', 99.99, '好看']]\n\n# 管理图书 增删该查\ndef book_admin():\n pass\n\n# 登录 功能函数\ndef login():\n name = input('请输入账号:')\n pwd = input('请输入密码:')\n # 如果遇到必须循环完毕,才能进行判断,切只能判断一次的情况,可以使用flag\n flag = 0 # 设置一个灯, 此时是灭的\n # 遍历数据库\n for i in userDB:\n # i: 列表 代表 每一个用户所有的信息\n if i[0] == name and i[1] == pwd:\n flag = 1 # 灯亮起来\n break\n if flag == 1:\n print('登录成功!')\n # 开始 管理图书\n # 增删改查\n book_admin()\n else:\n print('登录失败,请重新登录')\n\n\n# 注册 功能函数\ndef register():\n while 1:\n name = input('请输入账号:')\n # 如果遇到必须循环完毕,才能进行判断,切只能判断一次的情况,可以使用flag\n flag = 0 # 设置一个灯, 此时是灭的\n # 遍历数据库\n for i in userDB:\n # i: 列表 代表 每一个用户所有的信息\n if i[0] == name:\n flag = 1 # 灯亮起来\n break\n if flag == 1:\n print('账号已经存在,请重新注册')\n else:\n print('账号不存在,可以注册')\n break\n while 1:\n pwd1 = input('请输入密码:')\n pwd2 = input('请确认密码:')\n if pwd1 == pwd2:\n userDB.append([name, pwd1])\n print('注册成功')\n break\n else:\n print('两次密码不一致,请重新输入')\n\n# 调用 函数\n\n# 函数 单一功能\n# 需要有一个 主程序 主函数。 调用其他函数\ndef main():\n # 打印欢迎界面:\n print('***************欢迎来到周围书屋***********************')\n # 用户 进行选择 是否 登录 还是注册\n n = input('登录:1 注册:2 \\n')\n if n == '1':\n login()\n elif n == '2':\n register()\n else:\n print('指令有误,请重新输入!')\n\nmain()\n\n\n#","sub_path":"Python_Syntax_code/day09/bookAdmin.py","file_name":"bookAdmin.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"129669022","text":"import tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.options\nimport tornado.httpclient\nimport tornado.gen\nimport json\nimport os.path\n\nfrom tornado.options import define, options\ndefine('port', default = 8967, help = \"run on the given port\", type = int)\n\nclass IndexHandler(tornado.web.RequestHandler):\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self):\n try:\n try:\n ipAddr = self.request.headers['X-Forwarded-For']\n except KeyError:\n ipAddr = self.request.remote_ip\n\n client = tornado.httpclient.AsyncHTTPClient()\n geoIpResponse = yield tornado.gen.Task(client.fetch, 'https://freegeoip.net/json/' + ipAddr)\n geoInfo = json.loads(geoIpResponse.body.decode('utf-8'))\n latitude, longitude = str(geoInfo['latitude']), str(geoInfo['longitude'])\n weatherResponse = yield tornado.gen.Task(client.fetch, 'http://api.openweathermap.org/data/2.5/forecast/daily?mode=json&cnt=1&' + 'lon=' + longitude + '&lat=' + latitude)\n weatherInfo = json.loads(weatherResponse.body.decode('utf-8'))\n weather = int(weatherInfo['list'][0]['weather'][0]['id'])\n umbrella = [200, 201, 202, 210, 211, 212, 221, 230, 231, 232, 300, 301, 302, 310, 311, 312, 313, 314, 321, 500, 501, 502, 503, 504, 511, 520, 521, 522, 531, 600, 601, 602, 611, 612, 615, 616, 620, 621, 622, 781, 900, 901, 902, 906]\n if weather in umbrella:\n self.render('index.html', value = 'Yes!')\n else:\n self.render('index.html', value = 'No.')\n self.finish()\n except:\n self.render('index.html', value = 'Unexpected Error, Please refresh.')\n\nif __name__ == '__main__':\n tornado.options.parse_command_line()\n app = tornado.web.Application(\n handlers = [\n (r'^/$', IndexHandler), \n ],\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n )\n httpServer = tornado.httpserver.HTTPServer(app)\n httpServer.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293921582","text":"from setuptools import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name = 'termshape', \n packages = ['termshape'], \n version = '0.0.2', \n license='MIT',\n description = 'Tremshape is a minimalistic Python packgage, that only prints basic shapes on terminal.', \n long_description=long_description,\n long_description_content_type=\"text/markdown\", \n author = 'Zvi Bazak',\n author_email = 'zvibazak@gmail.com',\n url = 'https://github.com/zvibazak/termshape',\n download_url = 'https://github.com/zvibazak/termshape/archive/v_0.0.2.tar.gz',\n keywords = ['terminal', 'shape'],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"77565677","text":"class SERVER_MSG():\n def __init__(self):\n self.commandCode=b'\\x02'\n self.ipFirst=192\n self.ipSecond=168\n self.ipThird=5\n self.ipFourth=49\n self.port=-1\n self.errorType=b'\\x01'\n self.bandWidth=-1\n self.typeStr='=ciiiiici'","sub_path":"msgEntity/SERVER_MSG.py","file_name":"SERVER_MSG.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"216148160","text":"# uncompyle6 version 3.7.0\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.8.1 (tags/v3.8.1:1b293b6, Dec 18 2019, 22:39:24) [MSC v.1916 32 bit (Intel)]\n# Embedded file name: C:\\Cygwin\\home\\toonpub\\player_1_0_46_qa\\toontown\\src\\minigame\\IceGameGlobals.py\nimport math\nfrom pandac.PandaModules import Point3\nfrom toontown.toonbase import ToontownGlobals\nInputTimeout = 15\nTireMovieTimeout = 120\nMinWall = (\n -20.0, -15.0)\nMaxWall = (20.0, 15.0)\nTireRadius = 1.5\nWallMargin = 1 + TireRadius\nStartingPositions = (\n Point3(MinWall[0] + WallMargin, MinWall[1] + WallMargin, TireRadius),\n Point3(MaxWall[0] - WallMargin, MaxWall[1] - WallMargin, TireRadius),\n Point3(MinWall[0] + WallMargin, MaxWall[1] - WallMargin, TireRadius),\n Point3(MaxWall[0] - WallMargin, MinWall[1] + WallMargin, TireRadius))\nNumMatches = 3\nNumRounds = 2\nPointsDeadCenter = {0: 5, 1: 5, 2: 5, 3: 4, 4: 3}\nPointsInCorner = 1\nFarthestLength = math.sqrt((MaxWall[0] - TireRadius) * (MaxWall[0] - TireRadius) + (MaxWall[1] - TireRadius) * (MaxWall[1] - TireRadius))\nBonusPointsForPlace = (3, 2, 1, 0)\nExpandFeetPerSec = 5\nScoreCountUpRate = 0.15\nShowScoresDuration = 4.0\nNumTreasures = {ToontownGlobals.ToontownCentral: 2, ToontownGlobals.DonaldsDock: 2, ToontownGlobals.DaisyGardens: 2, ToontownGlobals.MinniesMelodyland: 2, ToontownGlobals.TheBrrrgh: 1, ToontownGlobals.DonaldsDreamland: 1}\nNumPenalties = {ToontownGlobals.ToontownCentral: 0, ToontownGlobals.DonaldsDock: 1, ToontownGlobals.DaisyGardens: 1, ToontownGlobals.MinniesMelodyland: 1, ToontownGlobals.TheBrrrgh: 2, ToontownGlobals.DonaldsDreamland: 2}\nObstacles = {ToontownGlobals.ToontownCentral: (), ToontownGlobals.DonaldsDock: ((0, 0),), ToontownGlobals.DaisyGardens: ((MinWall[0] / 2, 0), (MaxWall[0] / 2, 0)), ToontownGlobals.MinniesMelodyland: ((0, MinWall[1] / 2), (0, MaxWall[1] / 2)), ToontownGlobals.TheBrrrgh: ((MinWall[0] / 2, 0), (MaxWall[0] / 2, 0), (0, MinWall[1] / 2), (0, MaxWall[1] / 2)), ToontownGlobals.DonaldsDreamland: ((MinWall[0] / 2, MinWall[1] / 2), (MinWall[0] / 2, MaxWall[1] / 2), (MaxWall[0] / 2, MinWall[1] / 2), (MaxWall[0] / 2, MaxWall[1] / 2))}\nObstacleShapes = {ToontownGlobals.ToontownCentral: True, ToontownGlobals.DonaldsDock: True, ToontownGlobals.DaisyGardens: True, ToontownGlobals.MinniesMelodyland: True, ToontownGlobals.TheBrrrgh: False, ToontownGlobals.DonaldsDreamland: False}","sub_path":"2011-web-sv1.0.46.1/modules_decompiled/toontown/minigame/IceGameGlobals.py","file_name":"IceGameGlobals.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"452671699","text":"from django.contrib.contenttypes.models import ContentType\nfrom .models import ReadNum, ReadDetail\nfrom django.utils import timezone\nfrom django.db.models import Sum\nimport datetime\n\n\n# 阅读数+1\ndef read_statistics_once_read(request, object):\n ct = ContentType.objects.get_for_model(object)\n key = \"%s_%s_read\" % (ct.model, object.pk)\n if not request.COOKIES.get(key): # 若该访问用户未设置cookie/未阅读过:\n # 总阅读数+1\n readnum, created = ReadNum.objects.get_or_create(content_type=ct, object_id=object.pk)\n readnum.read_num += 1\n readnum.save()\n\n # 当天阅读数+1\n date = timezone.now().date()\n readDetail, created = ReadDetail.objects.get_or_create(content_type=ct, object_id=object.pk, date=date)\n readDetail.read_num += 1\n readDetail.save()\n return key\n\n\ndef get_seven_days_read_data(content_type):\n today = timezone.now().date()\n read_nums = []\n dates=[]\n for i in range(7, 0, -1):\n date = today - datetime.timedelta(days=i)\n dates.append(date.strftime(\"%m/%d\"))\n read_details = ReadDetail.objects.filter(content_type=content_type, date=date)\n result = read_details.aggregate(read_num_sum=Sum('read_num'))\n read_nums.append(result['read_num_sum'] or 0)\n return dates,read_nums\n\ndef get_today_hot_data(content_type):\n today=timezone.now().date()\n read_details=ReadDetail.objects.filter(content_type=content_type,date=today).order_by('-read_num')\n return read_details[:7]\n\ndef get_yesterday_hot_data(content_type):\n today=timezone.now().date()\n yesterday=today-datetime.timedelta(days=1)\n read_details=ReadDetail.objects.filter(content_type=content_type,date=yesterday).order_by('-read_num')\n return read_details[:7]\n\ndef get_sevenDays_hot_data(content_type):\n today=timezone.now().date()\n date=today-datetime.timedelta(days=7)\n read_details=ReadDetail.objects.filter(content_type=content_type,date__lte=today,date__gte=date).values('content_type','object_id').annotate(read_num_sum=Sum('read_num')).order_by(\"-read_num_sum\")\n return read_details[:7]","sub_path":"read_statistics/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"292604207","text":"#Libraries\nimport RPi.GPIO as GPIO\nimport time\n \n#GPIO Mode (BOARD / BCM)\nGPIO.setmode(GPIO.BCM)\n\n# Constants\nAUTOBRAKE_DIST = 7.0 #cm\nCOLLISION_DIST = 15.0 #cm\nVERIFY_INTERVAL = 0.1 #s\n\n#defines\nGPIO_TRIGGER = 25\nGPIO_ECHO = 16\n\nGPIO_COLLISION = 23\nGPIO_AUTOBRAKE = 24\n\n#set GPIO direction (IN / OUT)\nGPIO.setup(GPIO_TRIGGER, GPIO.OUT)\nGPIO.setup(GPIO_ECHO, GPIO.IN)\nGPIO.setup(GPIO_COLLISION, GPIO.OUT)\nGPIO.setup(GPIO_AUTOBRAKE, GPIO.OUT)\n\nGPIO_LEFT_ENCODER = 14\nCOUNT_LIMIT = 20\n\nCOUNT_MIN_LIMIT = 4\nDIST_4_LIMIT = 8.16814089933\n\n#set GPIO direction (IN / OUT)\nGPIO.setup(GPIO_LEFT_ENCODER, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\nsetado = False\nglobal_count = 0\nStartTime = time.time()\nStopTime = time.time()\n\ndef my_callback(channel):\n global global_count\n global StartTime\n global StopTime\n\n global_count += 1\n\n print (\".\")\n\n if(global_count >= COUNT_MIN_LIMIT):\n # Stop time\n StopTime = time.time()\n # Zera contador\n global_count = 0\n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n print (\"TIME: \" + str(TimeElapsed))\n # Calcula o RPM\n RPM = 1/TimeElapsed*60/5\n print (\"RPM: \" + str(RPM))\n # Calcula velocidade\n speed = DIST_4_LIMIT/TimeElapsed\n print (\"Speed: \" + str(speed) + \" cm/s\")\n # Inicia a contagem novamente\n StartTime = time.time()\n\n\ndef distance():\n # set Trigger to HIGH\n GPIO.output(GPIO_TRIGGER, True)\n \n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(GPIO_TRIGGER, False)\n \n StartTimeInside = time.time()\n StopTimeInside = time.time()\n \n # save StartTime\n while GPIO.input(GPIO_ECHO) == 0:\n StartTimeInside = time.time()\n \n # save time of arrival\n while GPIO.input(GPIO_ECHO) == 1:\n StopTimeInside = time.time()\n \n # time difference between start and arrival\n TimeElapsed = StopTimeInside - StartTimeInside\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n distance = (TimeElapsed * 34300) / 2\n \n return distance\n \n\n# when a falling edge is detected on port 17, regardless of whatever \n# else is happening in the program, the function my_callback will be run \nGPIO.add_event_detect(GPIO_LEFT_ENCODER, GPIO.FALLING, callback=my_callback, bouncetime=1) \n\n\nif __name__ == '__main__':\n try:\n while True:\n dist = distance()\n\n if(dist < AUTOBRAKE_DIST):\n print(\"AUTOBRAKE!\")\n GPIO.output(GPIO_AUTOBRAKE, True)\n GPIO.output(GPIO_COLLISION, True)\n elif (dist < COLLISION_DIST):\n print(\"COLLISION!\")\n GPIO.output(GPIO_AUTOBRAKE, False)\n GPIO.output(GPIO_COLLISION, True)\n else:\n print(\"NORMAL!\")\n GPIO.output(GPIO_AUTOBRAKE, False)\n GPIO.output(GPIO_COLLISION, False)\n\n print (\"Measured Distance = %.1f cm\" % dist)\n time.sleep(VERIFY_INTERVAL)\n \n # Reset by pressing CTRL + C\n except KeyboardInterrupt:\n print(\"Measurement stopped by User\")\n GPIO.cleanup()","sub_path":"ECU_RASP/ultrassonic_plus_encoder.py","file_name":"ultrassonic_plus_encoder.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340535273","text":"import pytest\n\nfrom lunavl.sdk.detectors.base import ImageForRedetection\nfrom lunavl.sdk.detectors.facedetector import FaceDetector\nfrom lunavl.sdk.errors.errors import LunaVLError\nfrom lunavl.sdk.errors.exceptions import LunaSDKException\nfrom lunavl.sdk.faceengine.setting_provider import DetectorType\nfrom lunavl.sdk.image_utils.geometry import Rect\nfrom lunavl.sdk.image_utils.image import VLImage\nfrom tests.detect_test_class import FaceDetectTestClass\nfrom tests.detect_test_class import VLIMAGE_SEVERAL_FACE, VLIMAGE_SMALL, OUTSIDE_AREA, INVALID_RECT, ERROR_CORE_RECT\nfrom tests.resources import CLEAN_ONE_FACE\n\nVLIMAGE_ONE_FACE = VLImage.load(filename=CLEAN_ONE_FACE)\n\n\nclass TestsRedetectFace(FaceDetectTestClass):\n \"\"\"\n Face redetection tests.\n \"\"\"\n\n detector: FaceDetector\n\n @classmethod\n def setup_class(cls):\n super().setup_class()\n cls.detector = cls.faceEngine.createFaceDetector(DetectorType.FACE_DET_DEFAULT)\n\n def test_get_landmarks_for_redetect_one(self):\n \"\"\"\n Test get and check landmark instances for re-detection of one face\n \"\"\"\n for case in self.landmarksCases:\n with self.subTest(landmarks5=case.detect5Landmarks, landmarks68=case.detect68Landmarks):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detectOne = detector.detectOne(image=VLIMAGE_ONE_FACE)\n redetect = detector.redetectOne(\n image=VLIMAGE_ONE_FACE,\n bBox=detectOne,\n detect68Landmarks=case.detect68Landmarks,\n detect5Landmarks=case.detect5Landmarks,\n )\n self.assertDetectionLandmarks(\n detection=redetect, landmarks5=case.detect5Landmarks, landmarks68=case.detect68Landmarks\n )\n\n def test_get_landmarks_for_batch_redetect(self):\n \"\"\"\n Test get and check landmark instances for batch re-detection\n \"\"\"\n for case in self.landmarksCases:\n with self.subTest(landmarks5=case.detect5Landmarks, landmarks68=case.detect68Landmarks):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detectOne = detector.detectOne(image=VLIMAGE_ONE_FACE)\n redetect = detector.redetect(\n images=[ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[detectOne.boundingBox.rect])],\n detect68Landmarks=case.detect68Landmarks,\n detect5Landmarks=case.detect5Landmarks,\n )[0][0]\n self.assertDetectionLandmarks(\n detection=redetect, landmarks5=case.detect5Landmarks, landmarks68=case.detect68Landmarks\n )\n\n def test_redetect_one_with_bbox_option(self):\n \"\"\"\n Test re-detection of one face with bounding box option\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detectOne(image=VLIMAGE_ONE_FACE)\n redetect = detector.redetectOne(image=VLIMAGE_ONE_FACE, bBox=detection.boundingBox.rect)\n self.assertFaceDetection(redetect, VLIMAGE_ONE_FACE)\n\n def test_redetect_one_with_detection_option(self):\n \"\"\"\n Test re-detection of one face with detection options\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detectOne(image=VLIMAGE_ONE_FACE)\n redetect = detector.redetectOne(image=VLIMAGE_ONE_FACE, bBox=detection)\n self.assertFaceDetection(redetect, VLIMAGE_ONE_FACE)\n\n def test_batch_redetect_with_one_face(self):\n \"\"\"\n Test batch re-detection with one face image\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detectOne(image=VLIMAGE_ONE_FACE)\n redetect = detector.redetect(\n images=[ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[detection.boundingBox.rect])]\n )[0]\n self.assertFaceDetection(redetect, VLIMAGE_ONE_FACE)\n\n def test_batch_redetect(self):\n \"\"\"\n Test re-detection batch of images\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detectSeveral = detector.detect(images=[VLIMAGE_ONE_FACE, VLIMAGE_SEVERAL_FACE])\n redetect = detector.redetect(\n images=[\n ImageForRedetection(\n image=VLIMAGE_SEVERAL_FACE, bBoxes=[face.boundingBox.rect for face in detectSeveral[1]]\n ),\n ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[detectSeveral[0][0].boundingBox.rect]),\n ]\n )\n self.assertFaceDetection(redetect[0], VLIMAGE_SEVERAL_FACE)\n self.assertFaceDetection(redetect[1], VLIMAGE_ONE_FACE)\n assert 2 == len(redetect)\n assert 5 == len(redetect[0])\n assert 1 == len(redetect[1])\n\n def test_redetect_by_area_without_face(self):\n \"\"\"\n Test re-detection by area without face\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n redetectOne = detector.redetectOne(image=VLIMAGE_ONE_FACE, bBox=Rect(0, 0, 100, 100))\n redetect = detector.redetect(\n images=[ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[Rect(0, 0, 100, 100)])]\n )[0][0]\n assert redetectOne is None, \"excepted None but found {}\".format(redetectOne)\n assert redetect is None, \"excepted None but found {}\".format(redetectOne)\n\n def test_redetect_one_invalid_rectangle(self):\n \"\"\"\n Test re-detection of one face with an invalid rect\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n with pytest.raises(LunaSDKException) as exceptionInfo:\n detector.redetectOne(image=VLIMAGE_ONE_FACE, bBox=INVALID_RECT)\n self.assertLunaVlError(exceptionInfo, LunaVLError.InvalidRect)\n\n def test_batch_redetect_invalid_rectangle(self):\n \"\"\"\n Test batch re-detection with an invalid rect\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n with pytest.raises(LunaSDKException) as exceptionInfo:\n detector.redetect(\n images=[\n ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[INVALID_RECT]),\n ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[Rect(0, 0, 100, 100)]),\n ]\n )\n self.assertLunaVlError(exceptionInfo, LunaVLError.BatchedInternalError)\n assert len(exceptionInfo.value.context) == 2, \"Expect two error in exception context\"\n self.assertReceivedAndRawExpectedErrors(exceptionInfo.value.context[0], LunaVLError.InvalidRect)\n self.assertReceivedAndRawExpectedErrors(exceptionInfo.value.context[1], LunaVLError.Ok)\n\n @pytest.mark.skip(\"core bug: Fatal error\")\n def test_rect_float(self):\n \"\"\"\n Test re-detection with an invalid rect\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detector.redetect(images=[ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[ERROR_CORE_RECT])])\n\n def test_match_redetect_one_image(self):\n \"\"\"\n Test match of values at different re-detections (redetectOne and redetect) with one image\n \"\"\"\n for image in (VLIMAGE_ONE_FACE, VLIMAGE_SMALL):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n if detector.detectorType.name == \"FACE_DET_V3\":\n self.skipTest(\"Skip for FaceDetV3. Different value\")\n continue\n bBoxRect = detector.detectOne(image=image).boundingBox.rect\n redetectOne = detector.redetectOne(image=image, bBox=bBoxRect, detect68Landmarks=True)\n batchRedetect = detector.redetect(\n images=[ImageForRedetection(image=image, bBoxes=[bBoxRect])] * 3, detect68Landmarks=True\n )\n for redetect in batchRedetect:\n for face in redetect:\n assert face.boundingBox.asDict() == redetectOne.boundingBox.asDict()\n assert face.landmarks5.asDict() == redetectOne.landmarks5.asDict()\n assert face.landmarks68.asDict() == redetectOne.landmarks68.asDict()\n\n def test_redetect_one_in_area_outside_image(self):\n \"\"\"\n Test re-detection of one face in area outside image\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n if detector.detectorType.name == \"FACE_DET_V3\":\n redetectOne = detector.redetectOne(image=VLIMAGE_ONE_FACE, bBox=OUTSIDE_AREA)\n self.assertFaceDetection(redetectOne, VLIMAGE_ONE_FACE)\n else:\n redetectOne = detector.redetectOne(image=VLIMAGE_ONE_FACE, bBox=OUTSIDE_AREA)\n assert redetectOne is None, \"excepted None but found {}\".format(redetectOne)\n\n def test_batch_redetect_in_area_outside_image(self):\n \"\"\"\n Test batch re-detection in area outside image\n \"\"\"\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n if detector.detectorType.name == \"FACE_DET_V3\":\n redetect = detector.redetect(\n images=[ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[OUTSIDE_AREA])]\n )\n self.assertFaceDetection(redetect[0], VLIMAGE_ONE_FACE)\n else:\n redetect = detector.redetect(\n images=[ImageForRedetection(image=VLIMAGE_ONE_FACE, bBoxes=[OUTSIDE_AREA])]\n )\n assert redetect[0][0] is None\n","sub_path":"tests/test_face_redetect.py","file_name":"test_face_redetect.py","file_ext":"py","file_size_in_byte":10872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281659501","text":"import mbuild as mb\nfrom mbuild.tests.base_test import BaseTest\n\nfrom mbuild.lib.surfaces import Betacristobalite\nfrom mbuild.lib.atoms import H\n\n\nclass TestMonolayer(BaseTest):\n\n def test_monolayer(self, ch2):\n n = 8\n m = 8\n pattern = mb.Grid2DPattern(n, m)\n\n chain = mb.Polymer(ch2, n=10)\n monolayer = mb.Monolayer(surface=Betacristobalite(), chain=chain,\n backfill=H(), pattern=pattern)\n\n assert monolayer.n_particles == 1900 + n * m * (10*3) + (100 - n*m)\n assert monolayer.n_bonds == 2400 + n * m * (10 * 2 + 9 + 1) + (100 - n * m)\n\n def test_pattern_kwargs(self, ch2):\n n = 8\n m = 8\n pattern = mb.Grid2DPattern(n, m)\n\n chain = mb.Polymer(ch2, n=10)\n monolayer = mb.Monolayer(surface=Betacristobalite(), chain=H(),\n guest_port_name='up', backfill=chain,\n backfill_port_name='down', pattern=pattern)\n\n chains = 100 - (n*m)\n\n assert monolayer.n_particles == 1900 + chains * (10*3) + (100 - chains)\n assert monolayer.n_bonds == 2400 + chains * (10 * 2 + 9 + 1) + (100 - chains)\n","sub_path":"mbuild/tests/test_monolayer.py","file_name":"test_monolayer.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"122669693","text":"import unittest\nimport project04.src.dateCheck as dateCheck\n\ndef suite():\n suite = unittest.TestLoader().loadTestsFromTestCase(dateCheckTestCase)\n return suite\nclass dateCheckTestCase(unittest.TestCase):\n def setUp(self):\n #i02\n self.badDate = {'NAME': ['Ginger /Bred/'], 'FAMS': ['F01'], 'BIRT': ['10 AUG 1989'], 'SEX': ['M'], 'type': 'INDI', 'ID': 'I02', 'DEAT': ['9 OCT 2020']}\n #i01\n self.goodDate = {'NAME': ['Hersh E. /Bar/'], 'FAMS': ['F01'], 'BIRT': ['21 FEB 1970'], 'SEX': ['M'], 'type': 'INDI', 'ID': 'I01'}\n #i03\n self.badDate2 = {'NAME': ['Candy /Kane/'], 'FAMS': ['F02'], 'BIRT': ['20 DEC 2020'], 'SEX': ['F'], 'type': 'INDI', 'ID': 'I03'}\n #i04\n self.goodDate2 = {'NAME': ['George /Kane/'], 'FAMS': ['F02'], 'BIRT': ['29 FEB 1992'], 'SEX': ['M'], 'type': 'INDI', 'ID': 'I04'}\n\n self.familyBad = {'MARR': ['19 FEB 2021'], 'WIFE': ['I02'], 'CHIL': ['I03', 'I05', 'I06', 'I07'], 'HUSB': ['I01'], 'type': 'FAM', 'ID': 'F01'}\n self.familyGood = {'MARR': ['14 FEB 2000'], 'WIFE': ['I03'], 'CHIL': [ 'I05', 'I06', 'I07'], 'HUSB': ['I04'], 'type': 'FAM', 'ID': 'F02'}\n \n self.familyDict = {'F01': self.familyBad, 'F02' : self.familyGood}\n self.individualDict = {'I02' : self.badDate, 'I01' : self.goodDate, 'I03' : self.badDate2, 'I04' : self.goodDate2}\n\n \n def tearDown(self):\n self.badDate = None\n self.badDate2 = None\n self.goodDate = None\n self.goodDate2 = None\n \n \n def test_dateCheck(self):\n err_msg = dateCheck.dateCheck(self.familyDict, self.individualDict)\n self.assertEqual(3, len(err_msg), \"Individual I02 in Family F01 has an invalid death date, Individual I03 in Family F01 has an invalid birth date, Family F01 has an invalid marriage date\")\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"project04/tests/dateCheckTests.py","file_name":"dateCheckTests.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494303245","text":"import datetime\nimport random\n\nfrom . import models\n\ndef getChartses4Search(nnow):\n nextSearches = models.getNextSearches()\n chartses = models.getChartses()\n result = []\n for newssource in chartses:\n if not newssource.get('active'):\n continue\n\n newssourceSlug = newssource.get('slug')\n nextSearch = nextSearches.get(newssourceSlug, {})\n nextUpdated = nextSearch.get('next')\n if nextUpdated and nextUpdated > nnow:\n continue\n\n nsDynamic = models.getNewssourceDynamic(newssourceSlug)\n if not nsDynamic:\n continue\n items = nsDynamic.get('items')\n if not items:\n continue\n origin = nsDynamic.get('origin')\n if not origin:\n continue\n\n result.append({\n 'hash': nextSearch.get('hash'),\n 'origin': origin,\n 'items': items,\n })\n return result\n\ndef updateNextSearches(chartses, nnow, intervalMinutes):\n newValues = {}\n for charts in chartses:\n minutes = intervalMinutes + random.randint(-5, 5)\n slug = charts['origin']['slug']\n newValues[slug] = {\n 'hash': charts['hash'],\n 'next': nnow + datetime.timedelta(minutes=minutes)\n }\n if newValues:\n models.saveNextSearches(newValues)\n\ndef updateNextSearch(slug, contentHash, nnow, intervalMinutes):\n newValues = {}\n minutes = intervalMinutes + random.randint(-5, 5)\n newValues[slug] = {\n 'hash': contentHash,\n 'next': nnow + datetime.timedelta(minutes=minutes)\n }\n models.saveNextSearches(newValues)\n\n","sub_path":"src/scheduler/bscharts.py","file_name":"bscharts.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"354633007","text":"# You are given coins of different denominations and\n# a total amount of money amount. Write a function to\n# compute the fewest number of coins that you need to\n# make up that amount. If that amount of money cannot\n# be made up by any combination of the coins, return -1.\n#\n# Example 1:\n# coins = [1, 2, 5], amount = 11\n# return 3 (11 = 5 + 5 + 1)\n#\n# Example 2:\n# coins = [2], amount = 3\n# return -1.\n#\n# Note:\n# You may assume that you have an infinite number of each kind of coin.\n\nclass Solution(object):\n def coinChange(self, coins, amount):\n \"\"\"\n :type coins: List[int]\n :type amount: int\n :rtype: int\n \"\"\"\n rs = [amount+1] * (amount+1)\n rs[0] = 0\n for i in xrange(1, amount+1):\n for c in coins:\n if i >= c: # if c can make out of i, so use c\n rs[i] = min(rs[i], rs[i-c] + 1)\n\n if rs[amount] == amount+1:\n return -1\n return rs[amount]\n\n# 这题和300有点像\n\n\n dp = [1] * len(nums)\n for i in range(len(nums)):\n for j in range(i):\n if nums[i] > nums[j]:\n dp[i] = max(dp[i], dp[j]+1) # 最开始d[i]的确是1,但是经过数个j以后d[i]就变大了\n return max(dp) if dp else 0\n\n\n# 很多人首先想到的是Greedy,即总是取最大的硬币,不够再取小的,这种方法得到的结果是不能保证最小硬币数量的, 比如输入是[1, 3, 5, 6], 8, Greedy得到结果是3(6 + 1 + 1),而正确结果是2(3 + 5)\n# https://segmentfault.com/a/1190000004212264\n# http://blog.csdn.net/liyuefeilong/article/details/50687271 (最新)\n\n# http://www.geeksforgeeks.org/find-minimum-number-of-coins-that-make-a-change/\n# If V == 0, then 0 coins required.\n# If V > 0\n# minCoin(coins[0..m-1], V) = min {1 + minCoins(V-coin[i])} \n# where i varies from 0 to m-1 \n# and coin[i] <= V \n# Below is recursive solution based on above recursive formula.\n\n# // A Naive recursive C++ program to find minimum of coins\n# // to make a given change V\n# #include\n# using namespace std;\n \n# // m is size of coins array (number of different coins)\n# int minCoins(int coins[], int m, int V)\n# {\n# // base case\n# if (V == 0) return 0;\n \n# // Initialize result\n# int res = INT_MAX;\n \n# // Try every coin that has smaller value than V\n# for (int i=0; i twitter_data.json')\n\nexec(open(\"../../kmeans.py\").read())\n\nwith open('twitter_data.json') as json_data:\n data = json.load(json_data)\n\ndf = pd.DataFrame.from_dict(data)\ndf = df.transpose()\ndf = df.dropna()\nprint(df)\n\ndata_labels,data_cluster_centers,data_num_each_cluster = kmeansData(df=df,plotFlag=False)\nprint(data_labels)\nprint(data_cluster_centers)\nprint(data_num_each_cluster)","sub_path":"src/twitter/twitter_kmeans.py","file_name":"twitter_kmeans.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606536859","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 6 13:16:53 2017\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef regr(i):\r\n return 5.37*i +525.2\r\n\r\nx = list(range(50,301, 50))\r\nK = [792,1065,1329,1601,1867,2136]\r\n\r\nplt.style.use('seaborn-ticks')\r\nplt.rc('text', usetex = True)\r\nplt.scatter(x, K, label = 'Data', s = 40)\r\ni = np.linspace(min(x), max(x), num = 500)\r\nplt.plot(i, regr(i), label = 'Regresjon', linewidth=1.5)\r\nplt.grid(True)\r\nplt.legend(loc = 'best')\r\nplt.show()\r\n ","sub_path":"løsningsforslag/S2/S2_17V/figs/oppave3_2.py","file_name":"oppave3_2.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555170096","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.mqtt import MQTTUtils\nimport pika\n\n\nsc = SparkContext(\"local[5]\", \"Jesus Christ that's Jason Bourne\")\nssc = StreamingContext(sc, 1)\nssc.checkpoint(\"/tmp/spark-streaming-checkpoints\")\n\nparameters = pika.ConnectionParameters(host='localhost', port=5672)\nconnection = pika.BlockingConnection(parameters)\nchannel = connection.channel()\nbroker_url = 'tcp://localhost:1883'\nexchange_name = \"amq.topic\"\nqueue_name = \"SPARK_POST_STRUCTURES\"\n\npost_structures = MQTTUtils.createStream(ssc, broker_url, queue_name)\nsep = \">>\"\n\n\ndef push_scores_to_queue(time, rdd):\n print(\"======{}======\".format(time))\n elements = None\n\n if rdd.isEmpty():\n print(\"-EMPTY-\")\n else:\n elements = rdd.map(lambda pair: (str(pair[0]), str(pair[1]))).collect()\n\n content = []\n for e in elements:\n print(e)\n content.append(','.join(e))\n\n elements = '>>'.join(content)\n\n parameters = pika.ConnectionParameters(host='localhost', port=5672)\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n exchange_name = \"amq.topic\"\n queue_name = \"SPARK_PROCESSING_RESPONSE\"\n channel.exchange_declare(exchange=exchange_name, type='topic', durable=True)\n\n channel.basic_publish(\n exchange=exchange_name,\n routing_key=queue_name,\n body=str(elements)\n )\n\n channel.close()\n\ndef get_days_difference_between(timestamp1, timestamp2):\n ts_fmt = \"%Y-%m-%dT%H:%M:%S.%f+0000\"\n\n t1 = datetime.strptime(timestamp1, ts_fmt)\n t2 = datetime.strptime(timestamp2, ts_fmt)\n\n return (t1 - t2).days\n\n\ndef update_event(new_event, last_event):\n if new_event:\n return new_event[0]\n\n return last_event\n\nif __name__ == \"__main__\":\n print(\"Processamento de pontuações iniciado\")\n\n active_posts = post_structures.map(\n lambda evt: (evt.split(sep)[1].split('|')[1] + sep + evt.split(sep)[0],\n evt.split(sep)[1:])).flatMapValues(\n lambda evt: evt).map(\n lambda pair: (pair[0].split(sep)[0],\n (pair[0].split(sep)[1], pair[1].split('|')[0]) )\n ).mapValues(\n lambda pair: 10 - get_days_difference_between(pair[0], pair[1])\n ).reduceByKey(lambda x, y: x+y)\n\n active_posts.foreachRDD(push_scores_to_queue)\n\n ssc.start()\n ssc.awaitTermination()\n ssc.stop()\n\n","sub_path":"StreamProcessing/post_structure_processing.py","file_name":"post_structure_processing.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490615264","text":"import timing\n\nimport os\nimport pickle\nimport pandas as pd\nimport timing\nimport distributionDense as dist\nimport sys\nimport glob\nimport helpers as hlp\nfrom functools import reduce\n\n\n\nrunNTimes=5\n\n\nprint('--------> timeInferenceOperationsDense')\nmodelDir = os.path.dirname(sys.argv[1])\nmodelPaths = [os.path.join(modelDir + '/', dir) for dir in os.listdir(modelDir)]\n\ndef evalDir(path,i):\n print(path)\n frame = pd.DataFrame()\n modelPaths = [os.path.join(path + '/', dir) for dir in os.listdir(path)]\n onlyPickles = filter(lambda x: x.endswith('pickle'), modelPaths)\n counter=0\n for modelPath in onlyPickles:\n\n model = pickle.load(open(modelPath,'rb'))\n model = dist.DistributionDense(model.v,model.m.todense(),type='canonical')\n dict = {'Dim' : model.m.shape[0]}\n dict['Sparsity'] = model.sparsity()\n #print('mccc')\n dict['mccc'] = timing.marginalizeCanonicalConditionCanonical(model)\n #print('mccm')\n dict['mccm'] = timing.marginalizeCanonicalConditionMean(model)\n #print('mmcm')\n dict['mmcm'] = timing.marginalizeMeanConditionMean(model)\n #print('mmcc')\n dict['mmcc'] = timing.marginalizeMeanConditionCanonical(model)\n\n dict['conditionOnlyMean'] = timing.conditionOnlyMean(model)\n dict['conditionOnlyCanonical'] = timing.conditionOnlyCanonical(model)\n\n dict['mmccConvert'] = timing.marginalizeMeanConditionCanonicalConvert(model)\n dict['convertMmcc'] = timing.ConvertMarginalizeMeanConditionCanonical(model)\n\n frame = frame.append(dict, ignore_index=True)\n print(frame.shape)\n frame.to_csv(path + '/' + 'inferenceOperationsDense'+ str(i) +'.csv', index=False)\n\n# Ensure sequential order to minimize interference between timings\nfor i in range(0,runNTimes):\n for directory in modelPaths:\n evalDir(directory,i)\nfor directory in modelPaths:\n print(directory)\n names = glob.glob(directory+ '/' + '*.csv')\n frames = map(pd.read_csv,names)\n res = hlp.scalar(frames) \n #res = reduce(lambda a,b: a.add(b),frames)\n res.to_csv(directory + '/' + 'inferenceOperationsDense.csv', index=False)\n parts = glob.glob(directory+'/'+ 'inferenceOperationsDense[0-9].csv')\n\nfor i in parts:\n os.remove(i)\n","sub_path":"timeInferenceOperationsDense.py","file_name":"timeInferenceOperationsDense.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"370266070","text":"import discord\nfrom discord.ext import commands\nbot = commands.Bot(command_prefix='!!') #กำหนด Prefix\n@bot.event\nasync def on_ready() : #เมื่อระบบพร้อมใช้งาน\n print(\"Bot Started!\") #แสดงผลใน CMD\n@bot.event\nasync def on_message(message) : #ดักรอข้อความใน Chat\n if message.content.startswith('!!spotify') : #เมื่อข้อความในตัวแรกมีคำว่า ping\n await message.channel.send(':exclamation:ตรวจสอบค่า spotify กันด้วยนะครับ:exclamation:\\nhttps://11sf.netlify.app/#/spotify/members\\n@spotify family') #ข้อความที่ต้องการตอบกลับ\nbot.run('token') #รันบอท (โดยนำ TOKEN จากบอทที่เราสร้างไว้นำมาวาง)","sub_path":"bot_folk.py","file_name":"bot_folk.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"174598315","text":"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport abc\nimport typing\n\nfrom google import auth\nfrom google.api_core import operations_v1 # type: ignore\nfrom google.auth import credentials # type: ignore\n\nfrom google.longrunning import operations_pb2 as operations # type: ignore\nfrom google.showcase_v1beta1.types import echo as gs_echo\n\n\nclass EchoTransport(metaclass=abc.ABCMeta):\n \"\"\"Abstract transport class for Echo.\"\"\"\n\n AUTH_SCOPES = (\n )\n\n def __init__(\n self, *,\n host: str = 'localhost:7469',\n credentials: credentials.Credentials = None,\n ) -> None:\n \"\"\"Instantiate the transport.\n\n Args:\n host (Optional[str]): The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n \"\"\"\n # Save the hostname. Default to port 443 (HTTPS) if none is specified.\n if ':' not in host:\n host += ':443'\n self._host = host\n\n # If no credentials are provided, then determine the appropriate\n # defaults.\n if credentials is None:\n credentials, _ = auth.default(scopes=self.AUTH_SCOPES)\n\n # Save the credentials.\n self._credentials = credentials\n\n @property\n def operations_client(self) -> operations_v1.OperationsClient:\n \"\"\"Return the client designed to process long-running operations.\"\"\"\n raise NotImplementedError\n\n @property\n def echo(self) -> typing.Callable[\n [gs_echo.EchoRequest],\n gs_echo.EchoResponse]:\n raise NotImplementedError\n\n @property\n def expand(self) -> typing.Callable[\n [gs_echo.ExpandRequest],\n gs_echo.EchoResponse]:\n raise NotImplementedError\n\n @property\n def collect(self) -> typing.Callable[\n [gs_echo.EchoRequest],\n gs_echo.EchoResponse]:\n raise NotImplementedError\n\n @property\n def chat(self) -> typing.Callable[\n [gs_echo.EchoRequest],\n gs_echo.EchoResponse]:\n raise NotImplementedError\n\n @property\n def paged_expand(self) -> typing.Callable[\n [gs_echo.PagedExpandRequest],\n gs_echo.PagedExpandResponse]:\n raise NotImplementedError\n\n @property\n def wait(self) -> typing.Callable[\n [gs_echo.WaitRequest],\n operations.Operation]:\n raise NotImplementedError\n\n @property\n def block(self) -> typing.Callable[\n [gs_echo.BlockRequest],\n gs_echo.BlockResponse]:\n raise NotImplementedError\n\n\n__all__ = (\n 'EchoTransport',\n)\n","sub_path":"showcase_generated/google/showcase_v1beta1/services/echo/transports/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460630639","text":"from flask import Flask, render_template, url_for, request, redirect, flash, session\nfrom datetime import timedelta\nfrom flask_sqlalchemy import SQLAlchemy\nfrom secondary import second\n\napp = Flask(__name__)\napp.register_blueprint(second, url_prefix='')\napp.config['SECRET_KEY'] = 'b91a77d1ed4b44aa71ab1a740f565e13'\napp.permanent_session_lifetime = timedelta(days=1)\n\n\n@app.route('/')\ndef home():\n if 'email' in session:\n user = session['email'].split('@')[0].upper()\n return render_template('home.html', user=user)\n return render_template('home.html')\n\n\n@app.route('/contact', methods=['GET', 'POST'])\ndef contact():\n email = None\n if request.method == 'POST':\n session.permanent = True\n email = request.form['email']\n content = request.form['content']\n session['email'] = email\n\n flash('Sorry cannot accept query right now. Please contact us at example@gmail.com')\n return render_template('contact.html', query=True, email=email)\n else:\n if 'email' in session:\n email = session['email']\n return render_template('contact.html', email=email)\n\n\n@app.route('/about', methods=['GET', 'POST'])\ndef about():\n return render_template('about.html')\n\n\n@app.route('/clearsession')\ndef clearsession():\n if 'email' in session:\n session.pop('email', None)\n flash('Session Cleared')\n return redirect(url_for('home'))\n\n\nif __name__ == '__main__':\n app.run(port=8080, debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"83463347","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport itertools\nimport functools\nfrom collections import Iterable\nfrom copy import deepcopy\n\nfrom .compat import Queue, six\n\n\nclass DAGValidationError(Exception):\n pass\n\n\nclass DAG(object):\n \"\"\"Directed acyclic graph implementation.\"\"\"\n\n def __init__(self):\n self._graph = dict()\n self._map = dict()\n\n def nodes(self):\n return [self._map[n] for n in self._graph]\n\n def contains_node(self, node):\n return id(node) in self._graph\n\n def add_node(self, node, graph=None):\n graph = graph or self._graph\n\n graph[id(node)] = set()\n self._map[id(node)] = node\n\n def remove_node(self, node, graph=None):\n graph = graph or self._graph\n\n if id(node) not in graph:\n raise KeyError('Node does not exist')\n\n graph.pop(id(node))\n self._map.pop(id(node))\n\n for edges in six.itervalues(self._graph):\n if id(node) in edges:\n edges.remove(id(node))\n\n def contains_edge(self, predecessor_node, successor_node):\n if id(predecessor_node) not in self._graph or \\\n id(successor_node) not in self._graph:\n return False\n\n return id(successor_node) in self._graph[id(predecessor_node)]\n\n def add_edge(self, predecessor_node, successor_node, graph=None):\n graph = graph or self._graph\n\n if id(predecessor_node) not in self._graph or \\\n id(successor_node) not in self._graph:\n raise KeyError('Node does not exist')\n\n test_graph = deepcopy(graph)\n test_graph[id(predecessor_node)].add(id(successor_node))\n\n valid, msg = self._validate(test_graph)\n if valid:\n graph[id(predecessor_node)].add(id(successor_node))\n else:\n raise DAGValidationError(msg)\n\n def remove_edge(self, predecessor_node, successor_node, graph=None):\n graph = graph or self._graph\n\n if id(successor_node) not in graph.get(id(predecessor_node), []):\n raise KeyError('Edge does not exist in the graph')\n\n graph[id(predecessor_node)].remove(id(successor_node))\n\n def indep_nodes(self, graph=None):\n graph = graph or self._graph\n\n all_nodes = set(graph.keys())\n ids = all_nodes - set(itertools.chain(*graph.values()))\n return [self._map.get(i) for i in ids]\n\n def predecessors(self, node, graph=None):\n graph = graph or self._graph\n return [self._map.get(node_id) for node_id, deps in six.iteritems(graph)\n if id(node) in deps]\n\n def successors(self, node, graph=None):\n graph = graph or self._graph\n\n if id(node) not in graph:\n raise KeyError('Node does not exist: %s' % node)\n\n return [self._map.get(node_id) for node_id in graph[id(node)]]\n\n def _validate(self, graph=None):\n graph = graph or self._graph\n if len(self.indep_nodes(graph)) == 0:\n return False, 'No independent nodes detected'\n\n try:\n self.topological_sort(graph)\n except ValueError:\n return False, 'Fail to topological sort'\n return True, 'Valid'\n\n def bfs(self, start_nodes, successor=None, cond=None, graph=None):\n graph = graph or self._graph\n cond = cond or (lambda v: True)\n successor = successor or functools.partial(self.successors, graph=graph)\n start_nodes = [start_nodes, ] if not isinstance(start_nodes, Iterable) else start_nodes\n start_nodes = [n for n in start_nodes if cond(n)]\n assert all(id(node) in graph for node in start_nodes)\n\n visited = set(id(node) for node in start_nodes)\n node_queue = Queue()\n [node_queue.put(node) for node in start_nodes]\n while not node_queue.empty():\n cur_node = node_queue.get()\n for up_node in (n for n in successor(cur_node) if cond(n)):\n if id(up_node) not in visited:\n visited.add(id(up_node))\n yield up_node\n node_queue.put(up_node)\n\n def ancestors(self, start_nodes, cond=None, graph=None):\n return list(self.bfs(start_nodes, functools.partial(self.predecessors, graph=graph), cond, graph))\n\n def descendants(self, start_nodes, cond=None, graph=None):\n return list(self.bfs(start_nodes, cond=cond, graph=graph))\n\n def topological_sort(self, graph=None):\n graph = graph or self._graph\n graph = deepcopy(graph)\n\n nodes = []\n\n indep_nodes = self.indep_nodes(graph)\n while len(indep_nodes) != 0:\n n = indep_nodes.pop(0)\n nodes.append(n)\n for dep_id in deepcopy(graph[id(n)]):\n graph[id(n)].remove(dep_id)\n dep = self._map.get(dep_id)\n if len(self.predecessors(dep, graph)) == 0:\n indep_nodes.append(dep)\n\n if len(nodes) != len(graph):\n raise ValueError('Graph is not acyclic')\n\n return nodes\n\n def reset_graph(self):\n self._graph = dict()\n self._map = dict()\n","sub_path":"odps/dag.py","file_name":"dag.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493430919","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n# Simulação dos dados esperados no experimento da fonte não-ideal\neps = 5 # força eletromotriz = 5 V (fixa)\nre = 4.7 # resistor\nri = 70 # resistência interna da fonte\n\nR = np.linspace(0,1000,1000) # potenciômetro\n\ni = eps/(re+ri+R)\nVi = re*i\nVc = R*i\n\nPu = (Vi+Vc)*i\nPt = eps*i\nPd = Pt-Pu\neta = Pu/Pt\n\n# parte (5)\nfig = plt.figure(dpi=80)\n#fig = plt.figure(dpi=500)\nax = fig.add_subplot(1,1,1)\nax.plot(re+R, Pu)\nax.ticklabel_format(style='sci',axis='y',scilimits=(0,0))\nax.set_title(\"Simulação ($\\\\varepsilon$ = 5 V, $r_i$ = 70 $\\\\Omega$)\")\nax.set_xlabel(\"$R + r_e$ ($\\\\Omega$)\")\nax.set_ylabel(\"$P_u$ (W)\")\n#fig.savefig('graficos/pot-util-fonte.png',bbox_inches='tight')\n\n# parte (6)\nfig2 = plt.figure(dpi=80)\n#fig2 = plt.figure(dpi=500)\nax = fig2.add_subplot(1,1,1)\nax.ticklabel_format(style='sci',axis='y',scilimits=(0,0))\nax.plot(re+R, Pt, re+R, Pd, re+R, Pu, [500,444],[-1,-1],'--k')\nax.set_title(\"Simulação ($\\\\varepsilon$ = 5 V, $r_i$ = 70 $\\\\Omega$)\")\nax.set_xlabel(\"$R + r_e$ ($\\\\Omega$)\")\nax.set_ylabel(\"Potência (W)\")\nax.set_ylim([0,25e-2])\nax.legend([\"Potência total\",\"Potência dissipada\",\"Potência útil\",\"Eficiência\"],loc=(0.6,0.4))\n\nax2 = ax.twinx()\nax2.plot(re+R, eta, '--k')\nax2.set_ylim([0,1])\nax2.set_ylabel(\"Eficiência\")\n\n#fig2.savefig('graficos/potencias-eficiencia-fonte.png',bbox_inches='tight')\n","sub_path":"pratica2/pre-fonte.py","file_name":"pre-fonte.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266117111","text":"import sys\nsys.path.append('..//')\n\nimport numpy as np\nimport pickle as pkl\nimport time\n\nimport VNS\n\n\n\nnames = [\n 'captANOR225_9_20.dat',\n 'captANOR400_10_80.dat',\n 'captANOR625_15_100.dat',\n 'captANOR900_15_20.dat',\n 'captANOR1500_15_100.dat',\n 'captANOR1500_21_500.dat']\ninstanceNames = ['../Instances/{}'.format(x) for x in names]\n\n\nvectR = [(1,1), (1,2), (2,2), (2,3)]\n\nvectResult = []\n\nfor instanceName in instanceNames:\n for (Rcapt, Rcom) in vectR:\n t1 = time.time()\n solution, score = VNS.VNS(instanceName, Rcapt, Rcom, dtMax=60*10)\n t2 = time.time()\n result = [instanceName, Rcapt, Rcom, solution, score, t2-t1]\n vectResult.append(result)\n\n print('instanceName : {}'.format(instanceName))\n print('Rcapt : {} ; Rcom : {}'.format(Rcapt, Rcom))\n print(' > score = {}'.format(score))\n print(' > dt = {}'.format(t2 - t1))\n print()\n\n\n\nwith open('backtest_VNS_24-10.pkl', 'wb') as f:\n pkl.dump(vectResult, f)\n","sub_path":"Backtest/backtest_VNS_24-10.py","file_name":"backtest_VNS_24-10.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487232272","text":"from django.conf.urls import url\n\nfrom schedules.views import RoomListView, CourseListView, CourseDetailView, CourseCreateView\nfrom schedules.views import CourseUpdateView, CourseDeleteView, SectionListView\n\nurlpatterns = [\n url(r'^room/$', RoomListView.as_view(), name='room-list'),\n url(r'^course/$', CourseListView.as_view(), name='course-list'),\n url(r'^course/(?P\\d+)/$', CourseDetailView.as_view(), name='course-detail'),\n url(r'^course/add/$', CourseCreateView.as_view(), name='course-add'),\n url(r'^course/modify/(?P\\d+)/$', CourseUpdateView.as_view(), name='course-modify'),\n url(r'^course/delete/(?P\\d+)/$', CourseDeleteView.as_view(), name='course-delete'),\n url(r'^section/$', SectionListView.as_view(), name='section-list'),\n]\n","sub_path":"schedules/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244856833","text":"#-----------------------------------------------------------------------------\n# Title : PyRogue _ad5780 Module\n#-----------------------------------------------------------------------------\n# Description:\n# PyRogue _ad5780 Module\n#-----------------------------------------------------------------------------\n# This file is part of 'SLAC Firmware Standard Library'.\n# It is subject to the license terms in the LICENSE.txt file found in the\n# top-level directory of this distribution and at:\n# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.\n# No part of 'SLAC Firmware Standard Library', including this file,\n# may be copied, modified, propagated, or distributed except according to\n# the terms contained in the LICENSE.txt file.\n#-----------------------------------------------------------------------------\n\nimport pyrogue as pr\n\nclass Ad5780(pr.Device):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.add(pr.RemoteVariable(\n name = 'dacRefreshRate',\n description = 'DAC Rate (in units of Hz)',\n offset = 0x040,\n bitSize = 32,\n bitOffset = 0,\n base = pr.UInt,\n mode = 'RO',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'dacData',\n description = 'DAC Data',\n offset = 0x0C0,\n bitSize = 18,\n bitOffset = 0,\n base = pr.UInt,\n mode = 'RO',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'debugMux',\n description = 'debugMux',\n offset = 0x200,\n bitSize = 1,\n bitOffset = 0,\n base = pr.Bool,\n mode = 'RW',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'debugData',\n description = 'debugData',\n offset = 0x240,\n bitSize = 18,\n bitOffset = 0,\n base = pr.UInt,\n mode = 'RW',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'sdoDisable',\n description = 'sdoDisable',\n offset = 0x280,\n bitSize = 1,\n bitOffset = 0,\n base = pr.Bool,\n mode = 'RW',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'binaryOffset',\n description = 'binaryOffset',\n offset = 0x284,\n bitSize = 1,\n bitOffset = 0,\n base = pr.Bool,\n mode = 'RW',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'dacTriState',\n description = 'dacTriState',\n offset = 0x288,\n bitSize = 1,\n bitOffset = 0,\n base = pr.Bool,\n mode = 'RW',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'opGnd',\n description = 'opGnd',\n offset = 0x28C,\n bitSize = 1,\n bitOffset = 0,\n base = pr.Bool,\n mode = 'RW',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'rbuf',\n description = 'rbuf',\n offset = 0x290,\n bitSize = 1,\n bitOffset = 0,\n base = pr.Bool,\n mode = 'RW',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'halfSckPeriod',\n description = 'halfSckPeriod',\n offset = 0x294,\n bitSize = 32,\n bitOffset = 0,\n base = pr.UInt,\n mode = 'RW',\n ))\n\n self.add(pr.RemoteVariable(\n name = 'hrdRst',\n description = 'hrdRst',\n offset = 0x3F8,\n bitSize = 1,\n bitOffset = 0,\n base = pr.UInt,\n mode = 'WO',\n hidden = False,\n ))\n\n def hardReset(self):\n self.hrdRst.set(1)\n","sub_path":"python/surf/devices/analog_devices/_Ad5780.py","file_name":"_Ad5780.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"391881125","text":"##\n## Imprima el valor maximo y minimo por cada letra de la columa 1.\n##\n## A,9,1\n## B,9,1\n## C,9,0\n## D,7,1\n## E,9,1\n##\nimport re\nListaDeArchivos = []\nArchivo = open('data.csv', 'r')\nPatronDeRemplazoValoresConComa = re.compile(r'(\\d),')\nArchivoEnMemoria = (Archivo.readlines())\nArchivo.close()\nArchivoEnMemoria = [linea.replace(\"\\t\", \" \") for linea in ArchivoEnMemoria]\nArchivoEnMemoria = [linea.replace(\":\", \" \") for linea in ArchivoEnMemoria]\nArchivoEnMemoria = [linea[:-1] for linea in ArchivoEnMemoria]\nArchivoEnMemoria = [PatronDeRemplazoValoresConComa.sub(r\"\\1 \", linea) for linea in ArchivoEnMemoria]\nArchivoEnMemoria = [linea.split(' ') for linea in ArchivoEnMemoria]\nLetras = [Registro[0] for Registro in ArchivoEnMemoria]\nLetrasUnicas = set([Registro[0] for Registro in ArchivoEnMemoria]) \nLetrasUnicas = sorted(LetrasUnicas)\nListaDeValoresPorLetra = {Letra:[] for Letra in LetrasUnicas}\nfor Fila in ArchivoEnMemoria:\n ListaDeValoresPorLetra.get(Fila[0], \"\").append(Fila[1])\nResultado = [(key,max(ListaDeValoresPorLetra[key]),min(ListaDeValoresPorLetra[key]))for key in ListaDeValoresPorLetra]\nfor item in Resultado:\n print(item[0]+\",\"+str(item[1])+\",\"+str(item[2]))","sub_path":"q05.py","file_name":"q05.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387402350","text":"\"\"\"App Test.\"\"\"\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nfrom sqlalchemy.orm.session import Session\n\nfrom app import main_ibovespa, main_nasdaq, main_usd_brl, main_nasdaq_brl_file\nfrom lib.process_data import ProcessData\nfrom lib.process_data_current import ProcessDataCurrent\nfrom lib.process_data_nasdaq_brl import ProcessDataNasdaqBrl\n\n\nclass AppTest(TestCase):\n \"\"\"App Test.\"\"\"\n def setUp(self) -> None:\n \"\"\"SetUp.\"\"\"\n pass\n\n @patch.object(ProcessData , 'process')\n def test_ibovespa(self, process_data):\n \"\"\"Test ibovespa.\"\"\"\n process_data.return_value = None\n self.assertIsNone(main_ibovespa(Session()))\n\n @patch.object(ProcessData , 'process')\n def test_main_nasdaq(self, process_data):\n \"\"\"Test main_nasdaq.\"\"\"\n process_data.return_value = None\n self.assertIsNone(main_nasdaq(Session()))\n\n @patch.object(ProcessDataCurrent , 'process')\n def test_main_usd_brl(self, process_data):\n \"\"\"Test main_usd_brl.\"\"\"\n process_data.return_value = None\n self.assertIsNone(main_usd_brl(Session()))\n\n @patch.object(ProcessDataNasdaqBrl , 'process')\n def test_main_nasdaq_brl_file(self, process_data):\n \"\"\"Test main_nasdaq_brl_file.\"\"\"\n process_data.return_value = None\n self.assertIsNone(main_nasdaq_brl_file(Session()))\n","sub_path":"test/app_test.py","file_name":"app_test.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535063726","text":"## Script (Python) \"getVocabulario\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=vocab\n##title=\n##\natvm = context.portal_vocabularies\nprogramas = atvm.getVocabularyByName(vocab)\nprogramas_dict = programas.getVocabularyDict()\n\nreturn programas_dict\n","sub_path":"automator/skin/skins/automator_skin_templates/getVocabulario.py","file_name":"getVocabulario.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350016632","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-x86_64/egg/netlogger/tests/component/testParseGridftpAuth.py\n# Compiled at: 2009-12-08 17:43:28\n\"\"\"\nUnittests for parse_gridftp_auth.py\n\"\"\"\n__author__ = 'Dan Gunter dkgunter@lbl.gov'\n__rcsid__ = '$Id: testParseGridftpAuth.py 23798 2009-07-14 17:18:22Z dang $'\nfrom netlogger.tests import shared\nimport unittest\nfrom netlogger.parsers.modules import gridftp_auth\n\nclass TestCase(shared.BaseParserTestCase):\n basename = 'gridftp-auth.'\n parser_class = gridftp_auth.Parser\n\n def testBasic(self):\n \"\"\"parse an error-free gridftp_auth log\n \"\"\"\n self.checkGood('basic', self._verifyBasic, num_expected=8)\n\n def _verifyBasic(self, e, num):\n name = e['event']\n if name == gridftp_auth.ns('conn.start'):\n self.assert_(e['host'] == 'some.host.org')\n self.assert_(e['port'] == 8888)\n\n def testErrors(self):\n \"\"\"parse an error-laden gridftp_auth log\n \"\"\"\n self.checkGood('errors', self._verifyErrors, num_expected=23, parser_kw={'error_timeout': '1 hour'})\n\n def _verifyErrors(self, e, num):\n name = e['event']\n if num == 12 or num == 15:\n self.assert_(name.endswith('.error'))\n else:\n self.failIf(name.endswith('.error'))\n\n def testSyslogBasic(self):\n \"\"\"parse an error-free gridftp_auth log with a syslog header\"\"\"\n self.setParseDynamic(True, pattern='(?P[a-zA-Z0-9.]+) ', show_header_groups=True)\n self.checkGood('syslog-basic', self._verifySyslog, num_expected=8)\n self.setParseDynamic(False)\n\n def _verifySyslog(self, e, num):\n self.assert_(e.has_key('syslog.host'))\n\n def testSyslogErrors(self):\n \"\"\"parse an error-laden gridftp_auth log with a syslog header\"\"\"\n self.setParseDynamic(True, pattern='(?P[a-zA-Z0-9.]+) ', show_header_groups=True)\n self.checkGood('syslog-error', self._verifySyslogErrors, num_expected=4)\n self.setParseDynamic(False)\n\n def _verifySyslogErrors(self, e, num):\n self.assert_(e.has_key('syslog.host'))\n host = e['syslog.host']\n name = e['event']\n events = [ gridftp_auth.ns(x) for x in ('conn.transfer.start', 'conn.transfer.end',\n 'conn.transfer.start', 'conn.transfer.error')\n ]\n self.assert_(name == events[num], \"event name %d, '%s' not '%s'\" % (\n num, name, events[num]))\n hosts = ['myhost.mydomain.org'] * 4\n hosts[2] = 'myhost2.mydomain.org'\n self.assert_(host == hosts[num], \"host %d, '%s' not '%s'\" % (\n num, host, hosts[num]))\n\n\ndef suite():\n return shared.suite(TestCase)\n\n\nif __name__ == '__main__':\n shared.main()","sub_path":"pycfiles/netlogger-4.3.1-py2.6/testParseGridftpAuth.py","file_name":"testParseGridftpAuth.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628483116","text":"__author__ = \"Adam 'Algomancer' Hibble\"\n__author__ = 'Adam Hibble'\n\n# #\n# * ---------------------------------------------------------------------------- * #\n# * \"THE BEER-WARE LICENSE\" (Revision 42): * #\n# * Adam Hibble wrote this file. As long as you retain this notice you * #\n# * can do whatever you want with this stuff. If we meet some day, and you think * #\n# * this stuff is worth it, you can buy me a beer in return. * #\n# * ---------------------------------------------------------------------------- * #\n# #\nfrom demo import demoData\nimport time\nimport math\nclass Recommender:\n def __init__(self, data):\n assert len(data) > 1, \"Data needs greater than 1 sample has: %d\" % len(data)\n self.data = data\n\n def get_names(self):\n \"\"\"\n :return: Returns a list of all of the keys in the data set\n \"\"\"\n return [i for i in self.data]\n\n def set_names(self, current, new):\n \"\"\"\n\n :param current: Current Key-name\n :param new: New Key-name\n :return:\n \"\"\"\n assert current in self.data, \"There is no %s in data check get_names\" % current\n self.data[current] = new\n\n def get_shared(self, name1, name2):\n \"\"\"\n\n :param name1: First Name to check\n :param name2: Second Name to compare\n :return: Shared values\n \"\"\"\n assert name1 in self.data, \"%s, not in data\" % name1\n assert name2 in self.data, \"%s, not in data\" % name2\n sharedScore = {}\n for val in self.data[name1]:\n if val in self.data[name2]:\n\n sharedScore[val]=1\n n = len(sharedScore)\n if n == 0:\n return 0, False\n return n, sharedScore\n\n def euclideanDist(self, name1, name2, debug=False):\n # ~5600 nanosecond execution time\n # Note: Will Optimize Later. Slow Pow function\n \"\"\"\n :param name1: First person in data to compare to\n :param name2: Second person in data to compare to\n :param debug: Run Debug\n :return: Euclidean Distance Score\n \"\"\"\n assert name1 in self.data, \"%s, not in data\" % name1\n assert name2 in self.data, \"%s, not in data\" % name2\n n, sharedScore = self.get_shared(name1, name2)\n\n\n if sharedScore == False or debug == True:\n print(\"name1: \",[self.data[name1],\"name2:\", self.data[name2],\"Shared\", sharedScore])\n return 0\n d = self.data\n sum_of_squares=sum([math.pow(d[name1][val]-d[name2][val], 2)\n for val in d[name1] if val in d[name2]])\n\n\n return 1/(1+sum_of_squares)\n\n\n\n def pearsonCorrelation(self, name1, name2, debug=False):\n \"\"\"\n\n :param name1: First person in data to compare to\n :param name2: Second person in data to compare to\n :param debug: Run Debug\n :return:Pearson Correlation Score\n \"\"\"\n assert name1 in self.data, \"%s, not in data\" % name1\n assert name2 in self.data, \"%s, not in data\" % name2\n\n n, sharedScore = self.get_shared(name1, name2)\n\n if sharedScore == False or debug == True:\n print(\"name1: \",[self.data[name1],\"name2:\", self.data[name2],\"Shared\", sharedScore])\n return 0\n d = self.data\n sum1 = sum([d[name1][i] for i in sharedScore])\n sum2 = sum([d[name2][i] for i in sharedScore])\n\n sum1Sq = sum([pow(d[name1][i],2) for i in sharedScore])\n sum2Sq = sum([pow(d[name2][i],2) for i in sharedScore])\n\n productSum = sum([d[name1][i] * d[name2][i] for i in sharedScore])\n\n numerator = productSum - (sum1 * sum2 / n)\n denominator = math.sqrt((sum1Sq-pow(sum1, 2)/n) * (sum2Sq-pow(sum2, 2)/n))\n if denominator == 0:\n return 0\n\n return numerator/denominator\n\n def ranking(self, name, number=10, simFun = \"euclideanDist\"):\n simVal = getattr(self, simFun)\n score = [(simVal(name, other), other) for other in self.data if other != name]\n\n score.sort(reverse=True)\n return score[0:number]\n\n\n\nif __name__ == '__main__':\n\n run = Recommender(demoData)\n print(run.get_names())\n\n # start = time.clock()\n # for i in range(50000):\n test = run.euclideanDist('Lynn Schulz', 'Gustavo Dubrey', debug = False)\n test2 = run.pearsonCorrelation('Lynn Schulz', 'Gustavo Dubrey', debug = False)\n\n print(test)\n print(test2)\n print(run.ranking('Lynn Schulz')) ## works\n print(run.ranking('Lynn Schulz', simFun= \"pearsonCorrelation\")) ## would print the other comparison\n # end = time.clock()\n # time = (end-start)/50000\n # print('%.20f' %time)\n\n","sub_path":"app/recommendation/recUlti.py","file_name":"recUlti.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293653059","text":"#! /usr/bin/env python\nimport platform\nimport subprocess\nimport os\nimport datetime\n\n\"\"\"\n A trick for bootstrapping this module when it's not in the same directory\n as the calling script:\n\n #! /usr/bin/python\n \n import os\n import sys\n import exceptions\n \n try:\n import BrunoUtils\n except exceptions.ImportError as e:\n retry = False\n \n if \"PYTHONPATH\" not in os.environ:\n retry = True\n else:\n retry = (\"$(HOME)s/bin\" % os.environ) not in os.environ[\"PYTHONPATH\"].split(':')\n \n if retry:\n sys.stderr.write(\"Retrying with PYTHONPATH=%(HOME)s/bin to find BrunoUtils...\\n\" % os.environ)\n os.environ[\"PYTHONPATH\"] = \"%(HOME)s/bin\" % os.environ\n os.execvp(\"python\", [\"python\"] + sys.argv)\n else:\n sys.stderr.write(\"`%s` even after retry! Are you sure the module is in $(HOME)s/bin?\\n\" % (str(e), os.environ))\n exit(1)\n\"\"\"\n\nclass BrunoUtils:\n\n @staticmethod\n def cols():\n \"\"\"\n This is designed to return the number of columns in the user's display.\n\n Return value: The number of columns as an integer.\n \"\"\"\n\n tput_cols=None\n try:\n if platform.python_version() >= \"2.7\":\n tput_output = subprocess.check_output([\"tput\", \"cols\"])\n else:\n tput_file = os.popen(\"tput cols\")\n tput_output = tput_file.read()\n tput_file.close()\n tput_cols = int(tput_output)\n except Exception as e:\n pass\n return tput_cols\n\n @staticmethod\n def divmod(a, b):\n x = int(a/b)\n return (x, a-(x*b))\n \n @staticmethod\n def see(o, secondsOnly=False):\n \"\"\"\n This is designed to format a timedelta object or the number of seconds between\n two dates (a floating point number) in a nice standard way, optionally breaking \n it down into hours, minutes, etc.\n\n Arguments:\n - o: A timedelta object or a floating point number\n - secondsOnly: An optional boolean argument that tells the method whether or\n not to break seconds into hours, minutes, etc or just leave them as seconds \n\n Returns:\n A string like:\n \"01.05s\"\n \"02h00m00.00s\"\n \"\"\"\n ret = \"\"\n \n if type(o) in [float, datetime.timedelta]:\n \n if type(o) == float:\n secs = o\n else:\n secs = o.total_seconds()\n \n if secondsOnly:\n days = 0\n hours = 0\n mins = 0\n else:\n (days, secs) = BrunoUtils.divmod(secs, 24*60*60)\n (hours, secs) = BrunoUtils.divmod(secs, 60*60)\n (mins, secs) = BrunoUtils.divmod(secs, 60)\n \n if days > 0:\n ret += \"%dd\" % days\n if ret or (hours > 0):\n ret += \"%02dh\" % hours\n if ret or (mins > 0):\n ret += \"%02dm\" % mins\n ret += \"%05.2fs\" % secs\n else:\n raise Exception(\"Don't know how to handle a %s\" % str(type(o)))\n \n return ret\n\nif __name__ == \"__main__\":\n raise Exception(\"This script contains helper classes and methods and is not intended to be called directly\")\n","sub_path":"bin/BrunoUtils.py","file_name":"BrunoUtils.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"97918283","text":"import boto3\nimport json\nimport csv\nimport psycopg2\n\n\nclass UploadToPostgres():\n def __init__(\n self\n ): \n with open('postgres_info.json') as json_file:\n postgres_info = json.load(json_file)\n \n # loadprofile related\n self.load_profile_year_len = 0\n self.load_profile_result_dict = {}\n self.load_profile_start_year = 0\n with open('aggregate_loadprofile.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n self.load_profile_result_dict['All'] = {}\n for row in csv_reader:\n if row[0] == 'Type':\n self.load_profile_start_year = int(row[2])\n self.load_profile_year_len = len(row) - 2\n\n if row[0] not in self.load_profile_result_dict['All'].keys():\n self.load_profile_result_dict['All'][row[0]] = {}\n\n self.load_profile_result_dict['All'][row[0]][row[1]] = []\n for i in range(2, len(row)):\n self.load_profile_result_dict['All'][row[0]][row[1]].append(row[i])\n\n with open('AllDCFC_loadprofile.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n self.load_profile_result_dict['DC Fast Charger'] = {}\n for row in csv_reader:\n if row[0] not in self.load_profile_result_dict['DC Fast Charger'].keys():\n self.load_profile_result_dict['DC Fast Charger'][row[0]] = {}\n\n self.load_profile_result_dict['DC Fast Charger'][row[0]][row[1]] = []\n for i in range(2, len(row)):\n self.load_profile_result_dict['DC Fast Charger'][row[0]][row[1]].append(row[i])\n \n with open('AllPublicL2_loadprofile.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n self.load_profile_result_dict['Public L2'] = {}\n for row in csv_reader:\n if row[0] not in self.load_profile_result_dict['Public L2'].keys():\n self.load_profile_result_dict['Public L2'][row[0]] = {}\n self.load_profile_result_dict['Public L2'][row[0]][row[1]] = []\n for i in range(2, len(row)):\n self.load_profile_result_dict['Public L2'][row[0]][row[1]].append(row[i])\n \n with open('AllResidential_loadprofile.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n self.load_profile_result_dict['Residential'] = {}\n for row in csv_reader:\n if row[0] not in self.load_profile_result_dict['Residential'].keys():\n self.load_profile_result_dict['Residential'][row[0]] = {}\n self.load_profile_result_dict['Residential'][row[0]][row[1]] = []\n for i in range(2, len(row)):\n self.load_profile_result_dict['Residential'][row[0]][row[1]].append(row[i])\n\n with open('AllWorkplace_loadprofile.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n self.load_profile_result_dict['Workplace'] = {}\n for row in csv_reader:\n if row[0] not in self.load_profile_result_dict['Workplace'].keys():\n self.load_profile_result_dict['Workplace'][row[0]] = {}\n self.load_profile_result_dict['Workplace'][row[0]][row[1]] = []\n for i in range(2, len(row)):\n self.load_profile_result_dict['Workplace'][row[0]][row[1]].append(row[i])\n\n # gas consumption result related \n self.gas_consumption_year_len = 0\n self.gas_consumption_result_dict = {}\n with open('annual_gas_consumption.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n self.gas_consumption_year_len = len(row) - 1\n self.gas_consumption_result_dict[row[0]] = []\n for i in range(1, len(row)):\n self.gas_consumption_result_dict[row[0]].append(row[i])\n\n # cost benefit result related \n self.cost_benefit_year_len = 0\n self.cost_benefit_result_dict = {}\n with open('annual_results.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n self.cost_benefit_year_len = len(row) - 1\n self.cost_benefit_result_dict[row[0]] = []\n for i in range(1, len(row)):\n self.cost_benefit_result_dict[row[0]].append(row[i])\n if \"EV sales as \" in row[0]:\n fill_up_value = [row[1], row[2]]\n for j in range(5):\n self.cost_benefit_result_dict[row[0]].append(fill_up_value[0])\n self.cost_benefit_result_dict[row[0]].append(fill_up_value[1])\n\n # emission result related \n self.emission_year_len = 0\n self.emission_result_dict = {}\n with open('Emissions.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n self.emission_year_len = len(row) - 1\n self.emission_result_dict[row[0]] = []\n for i in range(1, len(row)):\n self.emission_result_dict[row[0]].append(row[i])\n\n # npv result related\n self.npv_result_dict = {}\n with open('npv_results.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n elif line_count == 1:\n self.npv_result_dict[\"Utility Bills\"] = str(row[1])\n line_count += 1\n elif line_count == 2:\n self.npv_result_dict[\"Utility Bills (volumetric)\"] = str(row[1])\n line_count += 1\n elif line_count == 3:\n self.npv_result_dict[\"Utility Bills (demand)\"] = str(row[1])\n line_count += 1\n elif line_count == 4:\n self.npv_result_dict[\"Utility Bills (res)\"] = str(row[1])\n line_count += 1\n elif line_count == 5:\n self.npv_result_dict[\"Utility Bills (work)\"] = str(row[1])\n line_count += 1\n elif line_count == 6:\n self.npv_result_dict[\"Utility Bills (pub L2)\"] = str(row[1])\n line_count += 1\n elif line_count == 7:\n self.npv_result_dict[\"Utility Bills (DCFC)\"] = str(row[1])\n line_count += 1\n elif line_count == 8:\n self.npv_result_dict[\"Incremental upfront vehicle cost\"] = str(row[1])\n line_count += 1\n elif line_count == 9:\n self.npv_result_dict[\"Charging infrastructure cost\"] = str(row[1])\n line_count += 1\n elif line_count == 10:\n self.npv_result_dict[\"Charging infrastructure cost (res)\"] = str(row[1])\n line_count += 1\n elif line_count == 11:\n self.npv_result_dict[\"Charging infrastructure cost (work L2)\"] = str(row[1])\n line_count += 1\n elif line_count == 12:\n self.npv_result_dict[\"Charging infrastructure cost (public L2)\"] = str(row[1])\n line_count += 1\n elif line_count == 13:\n self.npv_result_dict[\"Charging infrastructure cost (DCFC)\"] = str(row[1])\n line_count += 1\n elif line_count == 14:\n self.npv_result_dict[\"Avoided vehicle gasoline\"] = str(row[1])\n line_count += 1\n elif line_count == 15:\n self.npv_result_dict[\"Vehicle O&M Savings\"] = str(row[1])\n line_count += 1\n elif line_count == 16:\n self.npv_result_dict[\"Federal EV Tax Credit\"] = str(row[1])\n line_count += 1\n elif line_count == 17:\n self.npv_result_dict[\"Energy Supply Cost\"] = str(row[1])\n line_count += 1\n elif line_count == 18:\n self.npv_result_dict[\"Energy Cost\"] = str(row[1])\n line_count += 1\n elif line_count == 19:\n self.npv_result_dict[\"Generation Capacity Cost\"] = str(row[1])\n line_count += 1\n elif line_count == 20:\n self.npv_result_dict[\"Vehicle Sales (NPV)\"] = str(row[1])\n line_count += 1\n elif line_count == 21:\n self.npv_result_dict[\"Transmission and Distribution Cost\"] = str(row[1])\n line_count += 1\n elif line_count == 22:\n self.npv_result_dict[\"Distribution Cost\"] = str(row[1])\n line_count += 1\n elif line_count == 23:\n self.npv_result_dict[\"Transmission Cost\"] = str(row[1])\n line_count += 1\n \n\n self.db_host = postgres_info['DB_HOST']\n self.cba_net_present_table_name = \"script_algorithm_cba_net_present_value\"\n self.gas_consumption_table_name = \"script_algorithm_cba_gas_consumption\"\n self.cba_net_emission_table_name = \"script_algorithm_cba_net_emission\"\n self.cba_cost_benefit_table_name = \"script_algorithm_cba_cost_benefit\"\n self.cba_load_profile_table_name = \"script_algorithm_cba_load_profile\"\n\n self.postgres_db = postgres_info['POSTGRES_DB']\n self.postgres_user = postgres_info['POSTGRES_USER']\n self.postgres_password = postgres_info['POSTGRES_PASSWORD']\n \n self.conn = psycopg2.connect(\n host=self.db_host,\n dbname=self.postgres_db,\n user=self.postgres_user,\n password=self.postgres_password,\n port='5432'\n )\n\n self.cur = self.conn.cursor()\n\n self.run_load_profile()\n self.run_cost_benefit()\n #self.run_gas_consumption()\n #self.run_npv()\n self.run_emission()\n\n # Close communication with the database\n self.cur.close()\n self.conn.close()\n\n\n def run_load_profile(self):\n # create table on Postgres\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS \" + self.cba_load_profile_table_name + \" (id serial PRIMARY KEY, config_id INTEGER, poi varchar, year INTEGER, day_type varchar, loads varchar);\")\n\n tmp_load = {}\n\n for i in range(self.load_profile_year_len):\n for poi in self.load_profile_result_dict.keys():\n tmp_load[poi] = {}\n cur_year = str(self.load_profile_start_year + i)\n tmp_load[poi][cur_year] = {}\n for day_type in self.load_profile_result_dict[poi].keys():\n if day_type != \"Type\":\n tmp_load[poi][cur_year][day_type] = []\n for hour in self.load_profile_result_dict[poi][day_type].keys():\n tmp_load[poi][cur_year][day_type].append(\n self.load_profile_result_dict[poi][day_type][hour][i]\n )\n \n self.cur.execute(\"INSERT INTO \" + self.cba_load_profile_table_name + \" (config_id, poi, year, day_type, loads) VALUES (%s, %s, %s, %s, %s)\",\n (\n '1', str(poi), int(cur_year), str(day_type), json.dumps(tmp_load[poi][cur_year][day_type])\n )\n )\n\n print('Insertion finished...')\n # Make the changes to the database persistent\n self.conn.commit()\n \n\n def run_cost_benefit(self):\n # create table on Postgres\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS \" + self.cba_cost_benefit_table_name + \" (id serial PRIMARY KEY, config_id INTEGER, year INTEGER, cost_benefit varchar);\")\n\n print('********** len' + str(self.cost_benefit_year_len))\n for i in range(self.cost_benefit_year_len):\n tmp_res = {}\n\n for key in self.cost_benefit_result_dict.keys():\n if key != 'Year':\n print(\"******** len each row\" + str(len(self.cost_benefit_result_dict[key])))\n print(\"********** key name: \" + key)\n tmp_res[key] = self.cost_benefit_result_dict[key][i]\n\n self.cur.execute(\"INSERT INTO \" + self.cba_cost_benefit_table_name + \" (config_id, year, cost_benefit) VALUES (%s, %s, %s)\",\n (\n '1', str(self.cost_benefit_result_dict['Year'][i]), json.dumps(tmp_res)\n )\n )\n\n print('Insertion finished...')\n # Make the changes to the database persistent\n self.conn.commit()\n\n\n def run_gas_consumption(self):\n\n # create table on Postgres\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS \" + self.gas_consumption_table_name + \" (id serial PRIMARY KEY, config_id INTEGER, year INTEGER, consumption varchar);\")\n\n for i in range(self.gas_consumption_year_len):\n tmp_res = {}\n\n for key in self.gas_consumption_result_dict.keys():\n if key != 'Year':\n tmp_res[key] = self.gas_consumption_result_dict[key][i]\n\n self.cur.execute(\"INSERT INTO \" + self.gas_consumption_table_name + \" (config_id, year, consumption) VALUES (%s, %s, %s)\",\n (\n '1', str(self.gas_consumption_result_dict['Year'][i]), json.dumps(tmp_res)\n )\n )\n\n print('Insertion finished...')\n # Make the changes to the database persistent\n self.conn.commit()\n\n\n def run_npv(self):\n\n # create table on Postgres\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS \" + self.cba_net_present_table_name + \" (id serial PRIMARY KEY, config_id INTEGER, year INTEGER, npv varchar);\")\n\n self.cur.execute(\"INSERT INTO \" + self.cba_net_present_table_name + \" (config_id, year, npv) VALUES (%s, %s, %s)\",\n (\n '1', '2020', json.dumps(self.npv_result_dict)\n )\n )\n\n print('Insertion finished...')\n # Make the changes to the database persistent\n self.conn.commit()\n \n def run_emission(self):\n # create table on Postgres\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS \" + self.cba_net_emission_table_name + \" (id serial PRIMARY KEY, config_id INTEGER, year INTEGER, emissions varchar);\")\n\n for i in range(self.emission_year_len):\n tmp_res = {}\n\n for key in self.emission_result_dict.keys():\n if key != 'Year':\n tmp_res[key] = self.emission_result_dict[key][i]\n\n self.cur.execute(\"INSERT INTO \" + self.cba_net_emission_table_name + \" (config_id, year, emissions) VALUES (%s, %s, %s)\",\n (\n '1', str(self.emission_result_dict['Year'][i]), json.dumps(tmp_res)\n )\n )\n\n print('Insertion finished...')\n # Make the changes to the database persistent\n self.conn.commit()\n\n\nclient = UploadToPostgres()\n","sub_path":"ec2setup/algorithms/CostBenefitAnalysis/cases/basecase/results/UploadToPostgres.py","file_name":"UploadToPostgres.py","file_ext":"py","file_size_in_byte":15505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313649458","text":"import os\nimport utils\nimport numpy as np\nimport time\nfrom sklearn.preprocessing import scale\n\nTEST_INPUT_PATH = '/mnt/sdb/mspark/data/brain_aneurysm_yonsei_test/input'\nTEST_LABEL_PATH = '/mnt/sdb/mspark/data/brain_aneurysm_yonsei_test/label'\nIMG_SIZE = [256, 256]\nN_3D_CONTEXT = 9\n\ndir_list = sorted(os.listdir(TEST_INPUT_PATH))\nprint(dir_list)\n\nfor dir in dir_list:\n input_path = os.path.join(TEST_INPUT_PATH, dir)\n label_path = os.path.join(TEST_LABEL_PATH, dir + '_LabelData')\n\n resampled_dstime = time.time()\n resampled_dcm, resampled_nii = utils.resample_and_resize(input_path, label_path, IMG_SIZE, new_spacing=[0.5, 0.5, 0.5])\n resampled_detime = time.time()\n print('Resampling Finished', resampled_detime-resampled_dstime)\n\n\n seg_dstime = time.time()\n segmented_dcm = utils.save_vessels_seg(resampled_dcm, w=25, filtered_px=3)\n seg_detime = time.time()\n print('Vessels Segmentation Finished', seg_detime-seg_dstime)\n\n filtering_dstime = time.time()\n segmented_dcm = utils.save_cropped_label(resampled_dcm, segmented_dcm)\n filtering_detime = time.time()\n print('Segmentation Filtering Finished', filtering_detime-filtering_dstime)\n\n normalization_dstime = time.time()\n n_files, height, width = resampled_dcm.shape\n resampled_dcm = resampled_dcm.reshape((n_files, -1))\n resampled_dcm = scale(resampled_dcm, axis=1)\n resampled_dcm = resampled_dcm.reshape((n_files, height, width))\n zero_padding = np.zeros((N_3D_CONTEXT//2, height, width))\n resampled_dcm = np.concatenate((zero_padding, resampled_dcm, zero_padding), axis=0)\n normalization_detime = time.time()\n print('Data Normalization Finished', normalization_detime-normalization_dstime)\n\n bbox_dstime = time.time()\n resampled_bbox = utils.make_bbox_label(resampled_nii)\n bbox_detime = time.time()\n print('Bbox Labeling Finished', bbox_detime-bbox_dstime)\n\n\n\n np.savez_compressed('/home/mspark/deepnoid/data/{}.npz'.format(dir),\n re_dcm=resampled_dcm, re_nii=resampled_nii, seg=segmented_dcm, bbox=resampled_bbox)\n print('saved')","sub_path":"aneurysm_detection/seg_3dce_test/loader2.py","file_name":"loader2.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"183028812","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import ZhilianspiderItem\nfrom bs4 import BeautifulSoup\n\nclass ZhilianSpider(scrapy.Spider):\n name = 'zhilian'\n allowed_domains = ['m.zhaopin.com']\n # start_urls = ['https://m.zhaopin.com/hangzhou/']\n #https://m.zhaopin.com/guangzhou-763/?keyword=python&order=0&maprange=3&ishome=0\n start_urls = ['https://m.zhaopin.com/beijing-530/?keyword=python&pageindex=1&maprange=3&islocation=0']\n base_url = 'https://m.zhaopin.com/'\n def parse(self, response):\n print(response.url)\n # 这里是body 而不是text\n soup = BeautifulSoup(response.body,'lxml')\n all_sec = soup.find('div',class_='r_searchlist positiolist').find_all('section')\n for sec in all_sec:\n d_link = sec.find('a',class_='boxsizing')['data-link']\n detail_link = self.base_url+d_link\n if detail_link:\n yield scrapy.Request(detail_link,callback=self.parse_detail)\n\n # 是否有下一页的链接\n if soup.find('a',class_='nextpage'):\n next_url = self.base_url+soup.find('a',class_='nextpage')['href']\n print('next_url ',next_url)\n # 若果有重复的,则不进行过滤\n yield scrapy.Request(next_url,callback=self.parse,dont_filter=True)\n\n\n def parse_detail(self,response):\n item = ZhilianspiderItem()\n item['job_link'] = response.url\n item['job_name'] = response.xpath('//*[@class=\"job-name fl\"]/text()')[0].extract()\n item['company'] = response.xpath('//*[@class=\"comp-name\"]/text()')[0].extract()\n item['address'] = response.xpath('//*[@class=\"add\"]/text()').extract_first()\n item['job_info'] = ''.join(response.xpath('//*[@class=\"about-main\"]/p/text()').extract())\n item['salary'] = response.xpath('//*[@class=\"job-sal fr\"]/text()')[0].extract()\n item['job_tags'] = ';'.join(response.xpath(\"//*[@class='tag']/text()\").extract())\n yield item\n","sub_path":"zhilianSpider/zhilianSpider/spiders/zhilian.py","file_name":"zhilian.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245905841","text":"import os\n\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport cv2\nimport torch.nn.functional as F\nfrom sklearn import metrics\n\n\ndef disp_to_color(disp, max_disp=192):\n \"\"\"\n disp: numpy float32 array of dimension\n \"\"\"\n height, width = disp.shape\n\n # if max_disp < 0:\n # max_disp = np.max(disp) # 当前视差图的最大值\n disp = disp / max_disp # 归一化\n disp = disp.reshape((disp.size, 1)) # disp.size 指所有的像素数量\n # print(disp)\n\n colormap = np.array([\n [0, 0, 0, 114],\n [0, 0, 1, 185],\n [1, 0, 0, 114],\n [1, 0, 1, 174],\n [0, 1, 0, 114],\n [0, 1, 1, 185],\n [1, 1, 0, 114],\n [1, 1, 1, 0]\n ])\n\n # [114., 185., 114., 174., 114., 185., 114.]\n bins = colormap[0:colormap.shape[0] - 1, colormap.shape[1] - 1].astype(float) # map 中的最后一列\n # [[114.], [185.], [114.], [174.], [114.], [185.], [114.]]\n bins = bins.reshape((bins.shape[0], 1)) # (8,1)\n # [ 114., 299., 413., 587., 701., 886., 1000.]\n cbins = np.cumsum(bins) # 累加和\n # [[0.114],[0.185],[0.114],[0.174],[0.114],[0.185],[0.114]]\n bins = bins / cbins[cbins.shape[0] - 1] # 归一化\n # [0.114, 0.299, 0.413, 0.587, 0.701, 0.886]\n cbins = cbins[0:cbins.shape[0] - 1] / cbins[cbins.shape[0] - 1]\n # [[0.114], [0.299], [0.413], [0.587], [0.701], [0.886]]\n cbins = cbins.reshape((cbins.shape[0], 1)) # (6,1)\n\n ind = np.tile(disp.T, (6, 1)) # (6, disp.size) disp 拉成一行,复制了6行\n tmp = np.tile(cbins, (1, disp.size)) # (6, disp.size)\n b = (ind > tmp).astype(int) # 0/1 mask,\n s = np.sum(b, axis=0) # 每个像素的disp值,大于cbins中的几个数, 表示视察所处的bin\n bins = 1 / bins # [[8.77],[5.40],[8.77],[5.74],[8.77],[5.40],[8.77]]\n t = cbins\n cbins = np.zeros((cbins.size + 1, 1)) # (7,1) 一共7个bin,\n cbins[1:] = t # [[0],[0.114],[0.299],[0.413],[0.587],[0.701],[0.886]]\n disp = (disp - cbins[s]) * bins[s]\n disp = colormap[s, 0:3] * np.tile(1 - disp, (1, 3)) + colormap[s + 1, 0:3] * np.tile(disp, (1, 3))\n\n disp = disp.reshape((height, width, 3))\n disp = (disp * 255).astype('uint8')\n # print(disp)\n return disp\n\n\ndef visual_img(img, config, img_name):\n path = os.path.join(config.output_dir, 'visualization')\n if not os.path.exists(path):\n os.mkdir(path)\n\n img = img * 255\n img = img.squeeze()\n img = img.data.cpu().numpy().astype('uint8')\n img = np.transpose(img, (1, 2, 0))\n img = Image.fromarray(img, 'RGB')\n img.save(os.path.join(path, img_name + '.png'))\n\n\ndef visual_disparity(disp, config, name, mask=None):\n path = os.path.join(config.output_dir, 'visualization')\n if not os.path.exists(path):\n os.mkdir(path)\n\n if mask is not None:\n disp[~mask] = 0 # 这里的mask为1时表示有ground truth值,0表示没有ground truth值\n disp = disp.squeeze() # 去掉维度为1的维度\n disp = disp.data.cpu().numpy() # 把数据拷贝到CPU上来\n disp = disp_to_color(disp, 192) # 将视差图转换成RGB图像,RGB值和视差大小相关\n disp = Image.fromarray(disp, 'RGB')\n # print(os.path.join(path, name+'.png'))\n\n disp.save(os.path.join(path, name+'.png'))\n\n\ndef visual_confidence(conf_map, config, name):\n path = os.path.join(config.output_dir, 'visualization')\n if not os.path.exists(path):\n os.mkdir(path)\n\n conf_map = conf_map.squeeze()\n conf_map = conf_map.data.cpu().numpy()\n # 这里操作是基于估计出来confidence值为0~255之间的浮点数值\n conf_map = conf_map.astype('uint8') # TODO:0/1的 confidence map\n\n conf_map = Image.fromarray(conf_map)\n conf_map.save(os.path.join(path, 'confidence_{}.png'.format(name)))\n\n\ndef plot_list(config, inds, list, name):\n path = os.path.join(config.log_dir, 'plot_losslist')\n if not os.path.exists(path):\n os.mkdir(path)\n\n plt.figure()\n plt.xlabel('epoch')\n plt.ylabel('err')\n plt.plot(inds, list)\n\n plt.savefig(os.path.join(path, 'lossplot_{}.png'.format(name)))\n\n\ndef err_unc_plot(config, err_unc, name):\n path = os.path.join(config.output_dir, 'plot')\n if not os.path.exists(path):\n os.mkdir(path)\n\n err_unc = np.array(err_unc) # N行,表示N个点,2列,第一列为误差,第二列为不确定性\n np.save(os.path.join(path, name), err_unc)\n\n err_random = err_unc.copy()\n err_unc = err_unc[np.argsort(-err_unc[:, 1])] # 按照不确定性从高到低重新排列数组\n err_err = err_unc[np.argsort(-err_unc[:, 0])]\n np.random.shuffle(err_random)\n\n err = err_unc[:, 0] # 误差列\n err_e = err_err[:, 0]\n err_r = err_random[:, 0]\n percentiles = np.arange(100) / 100.\n cutoff_inds = (percentiles * err_unc.shape[0]).astype(int) # 每个节点对应的点的个数\n cutoff_inds_err = (percentiles * err_e.shape[0]).astype(int) # 每个节点对应的点的个数\n cutoff_inds_random = (percentiles * err_r.shape[0]).astype(int) # 每个节点对应的点的个数\n\n pix_error = [err[cutoff:].mean() for cutoff in cutoff_inds] # 每个节点之前的所有误差的平均值\n pix_error_e = [err_e[cutoff:].mean() for cutoff in cutoff_inds_err] # 每个节点之前的所有误差的平均值\n pix_error_random = [err_r[cutoff:].mean() for cutoff in cutoff_inds_random] # 每个节点之前的所有误差的平均值\n # pix_error = [metrics.mean_squared_error(err[cutoff:], np.zeros_like(err[cutoff:])) for cutoff in cutoff_inds] # 每个节点之前的所有误差的平均值\n\n plt.figure()\n plt.xlabel('drop_percentile')\n plt.ylabel('err')\n plt.plot(percentiles, pix_error, color='green', label='uncertainty')\n plt.plot(percentiles, pix_error_e, color='aqua', label='oracle')\n plt.plot(percentiles, pix_error_random, color='red', label='random')\n plt.legend()\n plt.savefig(os.path.join(path, name + '.png'))\n\n\ndef pixerr_unc_plot(config, err_unc, threshold=3, name='test'):\n path = os.path.join(config.output_dir, 'plot')\n if not os.path.exists(path):\n os.mkdir(path)\n\n err_unc = np.array(err_unc) # N行,表示N个点,2列,第一列为误差,第二列为不确定性\n np.save(os.path.join(path, name), err_unc)\n\n err_random = err_unc.copy()\n err_unc = err_unc[np.argsort(-err_unc[:, 1])] # 按照不确定性从高到低重新排列数组\n err_err = err_unc[np.argsort(-err_unc[:, 0])]\n np.random.shuffle(err_random)\n\n err = err_unc[:, 0] # 误差列\n err_e = err_err[:, 0]\n err_r = err_random[:, 0]\n percentiles = np.arange(100) / 100.\n cutoff_inds = (percentiles * err_unc.shape[0]).astype(int) # 每个节点对应的点的个数\n cutoff_inds_err = (percentiles * err_e.shape[0]).astype(int) # 每个节点对应的点的个数\n cutoff_inds_random = (percentiles * err_r.shape[0]).astype(int) # 每个节点对应的点的个数\n\n count = len(err)\n err = (err > threshold).astype(float)\n err_e = (err_e > threshold).astype(float)\n err_r = (err_r > threshold).astype(float)\n\n pix_error = [(err[cutoff:].sum())/(count-cutoff) for cutoff in cutoff_inds] # 每个节点之前的所有误差的平均值\n pix_error_e = [err_e[cutoff:].mean() for cutoff in cutoff_inds_err] # 每个节点之前的所有误差的平均值\n pix_error_random = [err_r[cutoff:].mean() for cutoff in cutoff_inds_random] # 每个节点之前的所有误差的平均值\n # pix_error = [metrics.mean_squared_error(err[cutoff:], np.zeros_like(err[cutoff:])) for cutoff in cutoff_inds] # 每个节点之前的所有误差的平均值\n\n plt.figure()\n plt.xlabel('drop_percentile')\n plt.ylabel('{}pix_err'.format(threshold))\n plt.plot(percentiles, pix_error, color='green', label='uncertainty')\n plt.plot(percentiles, pix_error_e, color='aqua', label='oracle')\n plt.plot(percentiles, pix_error_random, color='red', label='random')\n plt.legend()\n plt.savefig(os.path.join(path, name + '.png'))\n\n\ndef visual_uncertainty(config, img, name, mode=cv2.COLORMAP_BONE, norm=False, percent=1, threshold=-1):\n path = os.path.join(config.output_dir, 'visualization')\n if not os.path.exists(path):\n os.mkdir(path)\n\n img = img.data.cpu().numpy()\n\n # 大于阈值的地方,值设为255,小于阈值的地方,值设为0\n if threshold != -1:\n mask = img > threshold\n img[mask] = 255\n img[~mask] = 0\n\n if percent != 1:\n amount = img.size\n amount = int(amount * (1 - percent))\n\n index_zero = np.array(np.argsort(img.ravel()))[:amount]\n index_one = np.array(np.argsort(img.ravel()))[amount:]\n\n mask_zero = np.unravel_index(index_zero, img.shape)\n mask_one = np.unravel_index(index_one, img.shape)\n img[mask_zero] = 0\n img[mask_one] = 255\n\n\n img = img.astype(np.uint8)\n img = img.transpose((1, 2, 0))\n\n img = cv2.applyColorMap(img, mode)\n cv2.imwrite(os.path.join(path, '{}.jpg'.format(name)), img)\n\n\ndef visual_mask(disp, config, name, mask=None, max_disp=-1):\n if mask != None:\n disp[~mask] = 0\n disp = disp.squeeze() # 去掉维度为1的维度\n disp = disp.data.cpu().numpy() # 把数据拷贝到CPU上来\n disp = disp_to_color(disp, max_disp) # 将视差图转换成RGB图像,RGB值和视差大小相关\n disp = Image.fromarray(disp, 'RGB')\n # path = os.path.join(config.output_dir,'visualization', 'mask')\n path = os.path.join(config.output_dir, 'visualization', 'mask')\n if not os.path.exists(path):\n os.mkdir(path)\n disp.save(os.path.join(path, name+'.png'))\n\ndef visual_uncertainty_(disp, config, name, mask=None, max_disp=-1):\n if mask != None:\n disp[~mask] = 0\n disp = disp.squeeze() # 去掉维度为1的维度\n disp = disp.data.cpu().numpy() # 把数据拷贝到CPU上来\n disp = disp_to_color(disp, max_disp) # 将视差图转换成RGB图像,RGB值和视差大小相关\n disp = Image.fromarray(disp, 'RGB')\n # path = os.path.join(config.output_dir,'visualization', 'mask')\n path = os.path.join(config.output_dir, 'visualization')\n if not os.path.exists(path):\n os.mkdir(path)\n disp.save(os.path.join(path, name+'.png'))\n\n\ndef err_to_color(errmap):\n\n cols = np.array([\n [0/3.0, 0.1875/3.0, 49, 54, 149],\n [0.1875/3.0, 0.375/3.0, 69, 117, 180],\n [0.375/3.0, 0.75/3.0, 116, 173, 209],\n [0.75/3.0, 1.5/3.0, 171, 217, 233],\n [1.5/3.0, 3/3.0, 224, 243, 248],\n [3/3.0, 6/3.0, 254, 224, 144],\n [6/3.0, 12/3.0, 253, 174, 97],\n [12/3.0, 24/3.0, 244, 109, 67],\n [24/3.0, 48/3.0, 215, 48, 39],\n [48/3.0, 255, 165, 0, 38]\n ])\n\n # print(errmap.shape)\n\n img = np.zeros((3, errmap.shape[0], errmap.shape[1]))\n # print(img.shape)\n\n\n for i in range(cols.shape[0]):\n\n v,u = np.where((errmap>cols[i][0])&(errmap<=cols[i][1]))\n # print(v)\n # print(u)\n img[0, v, u] = cols[i][2]\n img[1, v, u] = cols[i][3]\n img[2, v, u] = cols[i][4]\n\n img = img.transpose((1, 2, 0)).astype('uint8')\n return img\n\n\ndef disp_error_image(config, errmap, name, mask=None):\n path = os.path.join(config.output_dir, 'visualization')\n if not os.path.exists(path):\n os.mkdir(path)\n\n if mask != None:\n errmap[~mask] = 0\n errmap = errmap.squeeze() # 去掉维度为1的维度\n errmap = errmap.data.cpu().numpy() # 把数据拷贝到CPU上来\n errmap = err_to_color(errmap) # 将视差图转换成RGB图像,RGB值和视差大小相关\n errmap = Image.fromarray(errmap, 'RGB')\n\n errmap.save(os.path.join(path, name + '.png'))","sub_path":"utils/helper_visualization.py","file_name":"helper_visualization.py","file_ext":"py","file_size_in_byte":11698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472342860","text":"import pandas as pd\nimport re\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv('a.csv')\n\n# 多列合并成一个标签\ndf.fillna(value='未知', inplace=True)\ndf['label'] = df['是否合理'].str.cat(\n [df['解决情况'], df['具体理由/未解决细分'], df['具体理由']], sep='-')\n\n# 生成标签的统计数据\ndf['label'].value_counts().to_csv('train_data/dis.csv')\n\n# 数据清洗\ndef clean(line):\n return re.sub('\\s', '', str(line)[:512])\ndf['反馈结果'] = df['反馈结果'].apply(clean)\n\n\n# 查看长度\nplt.figure()\ndf['反馈结果'].apply(len).hist(bins=100)\nplt.show()\n\n\n# 训练集,验证集\ntrain, dev = train_test_split(df, test_size=0.1, shuffle=True, random_state=42)\n\n\n# 数据采样\ntrain_list = []\nfor name, sd in train.groupby('label'):\n if len(sd) < 30:\n train_list.append(\n pd.concat([sd, sd.sample(n=30 - len(sd), replace=True)]))\n elif len(sd) < 50:\n train_list.append(\n pd.concat([sd, sd.sample(n=50 - len(sd), replace=True)]))\n else:\n train_list.append(sd)\nnew_train = pd.concat(train_list)\n\n# 生成标签的统计数据\nnew_train.groupby('label').describe().to_csv('train_data/dis2.csv')\n\nnew_train.to_csv('train_data/train.tsv', sep='\\t', index=False)\ndev.to_csv('train_data/dev.tsv', sep='\\t', index=False)\ndf.sample(frac=1).to_csv('train_data/train_all.tsv', sep='\\t', index=False)\n\n# 权重调整\ntrain['label'].value_counts()\nweights = 1 / (train['label'].value_counts() / (train['label'].value_counts().max()))\nprint(weights.to_list())\nprint(weights.index)\n","sub_path":"CodeReuse/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466360348","text":"import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10 ** 6)\n\n\ndef go(day, late, absent):\n if late == 2:\n return 0\n if absent == 3:\n return 0\n\n if day == n:\n return 1\n\n ret = dp[day][late][absent]\n if ret != -1:\n return ret\n\n ret = go(day+1, late+1, 0) + go(day+1, late, absent+1) + go(day+1, late, 0)\n\n dp[day][late][absent] = ret%1000000\n\n return dp[day][late][absent]\n\n\nn = int(input())\n#dp[x][y][z] x일 y지각 z결석\ndp = [[[-1 for z in range(3)] for y in range(2)] for x in range(n)]\n\nprint(go(0, 0, 0))\n","sub_path":"Hangil/day05_1563_choi.py","file_name":"day05_1563_choi.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244579498","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport re\nimport subprocess\n\nREMOTE_HOST_SPEC = re.compile(r'^\\w+@[\\w\\.]+:')\n\n# =========================\n# Glow TTS Path Conventions\n# =========================\n\nclass RemoteGlow:\n \"\"\"\n Example files:\n ubuntu@104.171.200.145:/home/ubuntu/mount/checkpoints/moistkr1tikal/base/G_4430.pth\n \"\"\"\n @classmethod\n def from_args(cls, args):\n if args.remote:\n return RemoteGlowStaticPath(args.remote)\n else:\n return RemoteGlowDestructured(args)\n\n def full_model_remote_path(self, model_number):\n \"\"\"\n Get the complete path to a model file.\n \"\"\"\n model_filename = self.model_filename(model_number)\n remote_path = self.remote_path()\n return '{}/{}'.format(remote_path, model_filename)\n\n def model_filename(self, model_number):\n \"\"\"\n Filename format for Glow-TTS model files.\n \"\"\"\n return 'G_{}.pth'.format(model_number)\n\n def remote_path(self):\n raise NotImplemented(\"This has not been implemented.\")\n\n\nclass RemoteGlowStaticPath(RemoteGlow):\n def __init__(self, full_path):\n if not REMOTE_HOST_SPEC.match(full_path):\n raise Exception('--remote must be a remote host and path, eg. user@0.0.0.0:/path')\n\n self.full_path = full_path\n\n def remote_path(self):\n return self.full_path\n\n\nclass RemoteGlowDestructured(RemoteGlow):\n def __init__(self, args):\n if not args.ip: raise AssertionError('--ip or --remote not provided')\n if not args.voice_name: raise AssertionError('--voice_name or --remote not provided')\n\n self.username = args.username or 'ubuntu'\n self.ip = args.ip\n self.voice_name = args.voice_name\n\n def remote_path(self):\n return '{}@{}:/home/{}/mount/checkpoints/{}/base'.format(\n self.username,\n self.ip,\n self.username,\n self.voice_name)\n\n\n# =============\n# Download Loop\n# =============\n\ndef download(remote_path, local_path):\n #scp ubuntu@104.171.200.63:/home/ubuntu/code/glow-tts-samuel-l-jackson/logs/base/G_9450.pth .\n command = 'scp {} {}'.format(remote_path, local_path)\n print('Command:\\n{}'.format(command))\n subprocess.call(command, shell=True)\n\ndef download_model_range(args):\n remote_glow = RemoteGlow.from_args(args)\n\n for model_number in range(args.start, args.end + 1, args.skip):\n model_name = 'G_{}.pth'.format(model_number)\n local_name = os.path.join(args.local, model_name)\n if not os.path.exists(local_name):\n remote_model_name = remote_glow.full_model_remote_path(model_number)\n download(remote_model_name, args.local)\n\n\n# ===========\n# Arg Parsing\n# ===========\n\ndef parse_args():\n parser = argparse.ArgumentParser('Split audio into ingestible chunks')\n parser.add_argument('--remote', type=str, help='Full remote path, including user and hostname', required=False)\n parser.add_argument('--voice_name', type=str, required=False)\n parser.add_argument('--ip', type=str, required=False)\n parser.add_argument('--username', type=str, required=False)\n parser.add_argument('--local', type=str, required=True)\n parser.add_argument('--start', type=int, default=4000)\n parser.add_argument('--skip', type=int, default=500)\n parser.add_argument('--end', type=int, required=True)\n\n args = parser.parse_args()\n\n return args\n\nargs = parse_args()\ndownload_model_range(args)\n\n","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"230339454","text":"# gemato: Test utility functions\n# vim:fileencoding=utf-8\n# (c) 2017-2018 Michał Górny\n# Licensed under the terms of 2-clause BSD license\n\nimport errno\nimport functools\nimport io\nimport logging\nimport os\nimport os.path\nimport random\nimport shutil\nimport sys\nimport tempfile\nimport threading\nimport unittest\n\nif sys.hexversion >= 0x03000000:\n from http.server import HTTPServer, BaseHTTPRequestHandler\n from urllib.parse import urlparse, parse_qs\nelse:\n from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler\n from urlparse import urlparse, parse_qs\n\nimport gemato.openpgp\n\n\nclass LoggingTestCase(unittest.TestCase):\n def setUp(self):\n if sys.hexversion < 0x03000000:\n self.log = io.BytesIO()\n else:\n self.log = io.StringIO()\n self.log_handler = logging.getLogger().addHandler(\n logging.StreamHandler(self.log))\n\n def tearDown(self):\n # TODO: make some use of the log output?\n logging.getLogger().removeHandler(self.log_handler)\n\n\nclass TempDirTestCase(LoggingTestCase):\n DIRS = []\n FILES = {}\n\n def setUp(self):\n super(TempDirTestCase, self).setUp()\n self.dir = tempfile.mkdtemp()\n for k in self.DIRS:\n os.mkdir(os.path.join(self.dir, k))\n for k, v in self.FILES.items():\n with io.open(os.path.join(self.dir, k), 'w', encoding='utf8') as f:\n f.write(v)\n\n def tearDown(self):\n shutil.rmtree(self.dir)\n super(TempDirTestCase, self).tearDown()\n\n\nclass HKPServerRequestHandler(BaseHTTPRequestHandler):\n def __init__(self, keys, *args, **kwargs):\n self.keys = keys\n BaseHTTPRequestHandler.__init__(self, *args, **kwargs)\n\n def log_message(self, *args, **kwargs):\n pass\n\n def do_GET(self):\n try:\n parsed = urlparse(self.path)\n assert parsed.path == '/pks/lookup'\n\n qs = parse_qs(parsed.query)\n assert qs.get('op') == ['get']\n assert len(qs.get('search', [])) == 1\n\n key = qs['search'][0]\n assert key.startswith('0x')\n key = key[2:]\n except AssertionError:\n self.send_error(400, \"Bad request\")\n return\n\n if key not in self.keys:\n self.send_error(404, \"Not found\")\n return\n\n self.send_response(200, \"OK\")\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n self.wfile.write(self.keys[key])\n self.wfile.flush()\n\n\nclass HKPServerTestCase(unittest.TestCase):\n \"\"\"\n A test case deploying HKP server for OpenPGP client to use.\n \"\"\"\n\n SERVER_KEYS = {}\n\n def setUp(self):\n # try 10 randomly selected ports before giving up\n for port in random.sample(range(1024, 32768), 10):\n try:\n self.server = HTTPServer(('127.0.0.1', port),\n functools.partial(HKPServerRequestHandler,\n self.SERVER_KEYS))\n except OSError as e:\n if e.errno != errno.EADDRINUSE:\n raise unittest.SkipTest('Unable to bind the HKP server: {}'\n .format(e))\n else:\n break\n else:\n raise unittest.SkipTest('Unable to find a free port for HKP server')\n\n self.server_addr = 'hkp://127.0.0.1:{}'.format(port)\n self.server_thread = threading.Thread(\n target=self.server.serve_forever)\n self.server_thread.start()\n\n def tearDown(self):\n self.server.shutdown()\n self.server.server_close()\n self.server_thread.join()\n\n\nclass MockedWKDOpenPGPEnvironment(gemato.openpgp.OpenPGPEnvironment):\n \"\"\"\n A subclass of OpenPGPEnvironment that partially mocks spawning\n OpenPGP in order to inject keys without having to implement\n full HTTPS server with domain satisfactory to GnuPG.\n \"\"\"\n\n def __init__(self, keys={}):\n self.keys = keys\n super(MockedWKDOpenPGPEnvironment, self).__init__()\n\n def clone(self):\n return MockedWKDOpenPGPEnvironment(self.keys)\n\n def _spawn_gpg(self, args, stdin):\n if '--locate-keys' in args:\n args.remove('--locate-keys')\n assert len(args) == 1\n if args[0] in self.keys:\n ret, sout, serr = super(MockedWKDOpenPGPEnvironment,\n self)._spawn_gpg(['--import'], self.keys[args[0]])\n else:\n ret = 2\n return (ret, b'', b'')\n\n return super(MockedWKDOpenPGPEnvironment, self)._spawn_gpg(\n args, stdin)\n","sub_path":"tests/testutil.py","file_name":"testutil.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5487489","text":"#\n# @lc app=leetcode.cn id=200 lang=python\n#\n# [200] 岛屿数量\n#\n\n# @lc code=start\n# DFS\n# class Solution(object):\n# def numIslands(self, grid):\n# \"\"\"\n# :type grid: List[List[str]]\n# :rtype: int\n# \"\"\"\n# result = 0\n\n# def dfs(i, j):\n# if i < 0 or i >= m or j < 0 or j >= n or grid[i][j] == '0':\n# return\n# grid[i][j] = '0'\n# for x, y in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]:\n# dfs(x, y)\n\n# if grid and grid[0]:\n# m, n = len(grid), len(grid[0])\n# for i in range(m):\n# for j in range(n):\n# if grid[i][j] == '1':\n# result += 1\n# dfs(i, j) \n# return result\n\n# 并查集\nclass UnionFind:\n def __init__(self, grid):\n m, n = len(grid), len(grid[0])\n self.count = 0\n self.parent = [-1] * (m * n)\n self.rank = [0] * (m * n)\n for i in range(m):\n for j in range(n):\n if grid[i][j] == \"1\":\n self.parent[i * n + j] = i * n + j\n self.count += 1\n \n def find(self, i):\n if self.parent[i] != i:\n self.parent[i] = self.find(self.parent[i])\n return self.parent[i]\n \n def union(self, x, y):\n rootx = self.find(x)\n rooty = self.find(y)\n if rootx != rooty:\n if self.rank[rootx] < self.rank[rooty]:\n rootx, rooty = rooty, rootx\n self.parent[rooty] = rootx\n if self.rank[rootx] == self.rank[rooty]:\n self.rank[rootx] += 1\n self.count -= 1\n \n def getCount(self):\n return self.count\n\nclass Solution:\n def numIslands(self, grid):\n nr = len(grid)\n if nr == 0:\n return 0\n nc = len(grid[0])\n\n uf = UnionFind(grid)\n num_islands = 0\n for r in range(nr):\n for c in range(nc):\n if grid[r][c] == \"1\":\n grid[r][c] = \"0\"\n for x, y in [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]:\n if 0 <= x < nr and 0 <= y < nc and grid[x][y] == \"1\":\n uf.union(r * nc + c, x * nc + y)\n \n return uf.getCount()\n\n# @lc code=end\n\n","sub_path":"Week_07/200.岛屿数量.py","file_name":"200.岛屿数量.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"588453427","text":"from quant.lib.main_utils import *\nfrom quant.research import cross\nfrom datetime import datetime as dt, timedelta\n\n\ndef get_week_table(r):\n rtn = r.iloc[-1].dropna().sort_values().to_frame()\n rtn = pd.concat([rtn.iloc[:10], rtn.iloc[-10:]], axis=0)\n rtn.loc[:, 'Week'] = ['%.1f%%' % (100. * x) for x in rtn.iloc[:, 0]]\n rtn.index.name = 'Ticker'\n return rtn[['Week']]\n\n\ndef get_reversal_table(rm):\n rtn = pd.concat([rm.rolling(3, min_periods=1).mean().iloc[-1], rm.rolling(52, min_periods=13).mean().iloc[-4]], axis=1)\n rtn.columns = ['Rev', 'Mom']\n rtn = rtn[rtn.Mom > 0].sort_values('Rev', ascending=True).iloc[:20]\n rtn.index.name = 'Ticker'\n return np.round(rtn, 1)\n\n\ndef get_momentum_table(rm):\n rtn = pd.concat([rm.rolling(3, min_periods=1).mean().iloc[-1], rm.rolling(52, min_periods=13).mean().iloc[-4]], axis=1)\n rtn.columns = ['Rev', 'Mom']\n rtn = rtn[rtn.Rev < 0].sort_values('Mom', ascending=False).iloc[:20]\n rtn.index.name = 'Ticker'\n return np.round(rtn, 1)\n \n\ndef run_check():\n rtn, rm, vol, _ = cross.get_dataset('SMX')\n table = get_week_table(rtn)\n table2 = get_reversal_table(rm)\n table5 = get_momentum_table(rm)\n rtn, rm, vol, _ = cross.get_dataset('FTSE250')\n table3 = get_week_table(rtn)\n table4 = get_reversal_table(rm)\n table6 = get_momentum_table(rm)\n mail = Email('wayne.cq@hotmail.com', ['wayne.cq@hotmail.com'], 'Market Watch')\n mail.add_date(dt.today())\n mail.add_text('SMX Week')\n mail.add_table(table, width=300)\n mail.add_text('SMX Reversal')\n mail.add_table(table2, width=400)\n mail.add_text('SMX Momentum')\n mail.add_table(table5, width=400)\n mail.add_text('FTSE250 Week')\n mail.add_table(table3, width=300)\n mail.add_text('FTSE250 Reversal')\n mail.add_table(table4, width=400)\n mail.add_text('FTSE250 Momentum')\n mail.add_table(table6, width=400)\n mail.send_email()\n\n\ndef main():\n run_check()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"quant/scripts/market_check.py","file_name":"market_check.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"208404104","text":"# self keyowrd is mandatory for calling variable names into method\n# instance and class variables have whole different purpose\n# Constructor name should be __init__\n# The \"new\" keyword is not required when you create a new obj in Python unlike in C# or Java\n\nclass CalculateData:\n num = 100\n\n #Default Constructor\n def __init__(self, a, b): # In Python this is how you declare constructor w/ keyword init. self is c global variable\n self.firstInt = a\n self.secInt = b\n print(\"init is called automatically when obj is created\")\n\n def getData(self):\n print(\"Executing method in class\")\n\n\n def Summation(self):\n return self.firstInt + self.secInt + self.num #or you can use self.num or CalculateData.num is valid too but NOT num.\n\n# In Python, if your at this level of indentation, you are outside of this class already. Python is base on indentation\n# and not bracket {} like in Java and C#\n# In Python if you want to call the method in the class, you dont need to declare the keyword \"new\". You just call the\n# class CalculateData.\n\nobj = CalculateData(2, 3) #As you see we \"dont\" have the \"new\" keyowrd like we do in C# and Java obj = new CalculateData()\nobj.getData()\nprint(obj.Summation())\n\nobj1 = CalculateData(5, 5) #As you see we \"dont\" have the \"new\" keyowrd like we do in C# and Java obj = new CalculateData()\nobj1.getData()\nprint(obj1.Summation())","sub_path":"OOPS.py","file_name":"OOPS.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"35946335","text":"import pygame\nfrom players import *\n# import pygame_textinput\nimport button_template\nfrom shitty_colors import *\n\n\nclass Game:\n\n def update_pts(self):\n user_pts = button_template.Button(self.playground, self.comic_sans, str(self.user.hand.getValue()),\n 200, 500, 100, 100, Colors.white, Colors.white)\n house_pts = button_template.Button(self.playground, self.comic_sans, str(self.house.hand.getValue()),\n 200, 200, 100, 100, Colors.white, Colors.white)\n if self.user.hand.getValue() > 21: # Player has lost\n print(\"PLAYER HAS LOST\")\n drawing_area = pygame.Rect(351, 0, 929, 800)\n self.playground.fill((0,0,0,0), drawing_area)\n self.user_cards_xpos = 500\n self.house_cards_xpos = 500\n self.user.hand.clearHand()\n self.house.hand.clearHand()\n self.initial_draw()\n self.update_pts()\n else:\n pass\n\n\n def blit_card(self, card, isPlayer):\n if isPlayer: # If the given card is the player's card...\n self.playground.blit(card.getVisual(), (self.user_cards_xpos, 450))\n self.user_cards_xpos += 40\n else: # Given card belongs to the house...\n self.playground.blit(card.getVisual(), (self.house_cards_xpos, 100))\n self.house_cards_xpos += 40\n\n def initial_draw(self):\n for i in range(2):\n card = self.deck.getTopCard()\n self.user.hit(card)\n self.blit_card(card, True)\n card = self.deck.getTopCard()\n self.house.hit(card)\n self.blit_card(card, False)\n\n def set_vars(self):\n self.user_cards_xpos = 500\n self.house_cards_xpos = 500\n self.playground = pygame.Surface((1280, 800), pygame.SRCALPHA)\n self.game_background.blit(self.playground, (0, 0))\n\n # bet_input = pygame_textinput.TextInput(\"Enter bet here\") # This line adds ~4s to the loading time\n\n # test_card = game_deck.newDeck[0].get_visual()\n # test_card = pygame.transform.scale(game_deck.deck[0].get_visual(), (130, 180))\n # game_background.blit(test_card, (545, 450))\n\n self.user = Player()\n self.house = Bank()\n\n self.deck = Deck()\n self.deck.shuffleDeck()\n\n self.initial_draw()\n\n # Add font\n self.comic_sans = pygame.font.Font(\"fonts/Comic Sans MS.ttf\", 40)\n\n # Add buttons\n self.buttons = []\n self.hit_button = button_template.Button(self.playground, self.comic_sans, \"hit\", 200, 355, 150, 90,\n Colors.bright_green, Colors.green)\n\n self.buttons.append(self.hit_button)\n\n def __init__(self, win, music_status):\n self.window = win\n self.game_background = pygame.image.load(\"images/blackjack-table.jpg\").convert()\n self.set_vars()\n self.update_pts()\n\n clock = pygame.time.Clock()\n game_active = True\n music_playing = music_status\n\n while game_active:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n game_active = False\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_p:\n if music_playing:\n pygame.mixer_music.pause()\n music_playing = False\n else:\n pygame.mixer_music.unpause()\n music_playing = True\n if pygame.mouse.get_pressed()[0] and self.hit_button.button.collidepoint(pygame.mouse.get_pos()):\n print(\"HIT\")\n try:\n card = self.deck.getTopCard()\n self.user.hit(card)\n self.blit_card(card, True)\n self.update_pts()\n except IndexError:\n print(\"ADDING NEW DECK\")\n self.deck = Deck()\n self.deck.shuffleDeck()\n card = self.deck.getTopCard()\n self.user.hit(card)\n self.blit_card(card, True)\n self.update_pts()\n\n if event.type == pygame.MOUSEMOTION: # When the mouse moves, check for engagement in each button\n for i in range(len(self.buttons)):\n self.buttons[i].engage_button()\n\n # Update 'gameBackground' on the main screen\n self.window.blit(self.game_background, (0, 0))\n self.window.blit(self.playground, (0, 0))\n\n \"\"\"if bet_input.update(events):\n print(bet_input.get_text())\n\n window.blit(bet_input.get_surface(), (650, 775))\"\"\"\n\n pygame.display.update()\n clock.tick(60) # Sets the FPS\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341076489","text":"\nfrom collections import Counter, defaultdict, deque\nfrom functools import reduce\nfrom heapq import heappop, heappush\nfrom itertools import combinations, permutations, product\n\nfrom helpers import distance, distance_sq, grouped_lines, ints, manhattan, neighs, neighs_bounded\n\n\ndef solve(groups):\n return sum(\n len(\n reduce(\n lambda a,b: a|b,\n map(set, group),\n set ()\n )\n )\n for group in groups\n )\n\nif __name__ == '__main__':\n with open('6.txt') as f:\n groups = grouped_lines(f.readlines())\n print(solve(groups))","sub_path":"estomagordo-python3/6a.py","file_name":"6a.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377536479","text":"#HLNCEC001\r\n#Cecil Hlungwana\r\n#Assignment 9\r\n\r\n\r\ndef main():\r\n\t# Read a Sudoku solution.\r\n\tgrid = read_a_solution()\r\n\r\n\tif is_valid(grid):\r\n\t\tprint(\"Sudoku grid is valid\")\r\n\telse:\r\n\t\tprint(\"Sudoku grid is not valid\")\r\n\t\t\r\ndef read_a_solution():\r\n\tgrid = []\r\n\tfor i in range(9):\r\n\t\tline = input()\r\n\t\tgrid.append([eval(x) for x in line])\r\n\r\n\treturn grid\r\n\r\ndef is_valid(grid):\r\n\t# Check if the solution is valid or not.\r\n\tfor i in range(9):\r\n\t\tfor j in range(9):\r\n\t\t\tif grid[i][j] < 1 or grid[i][j] > 9 \\\r\n\t\t\t\tor not is_valid_at(i, j, grid):\r\n\t\t\t\treturn False\r\n\treturn True\r\n\r\ndef is_valid_at(i, j, grid):\r\n\t# Check whether grid[i][j] is valid in i's row.\r\n\tfor column in range(9):\r\n\t\tif column != j and grid[i][column] == grid[i][j]:\r\n\t\t\treturn False\r\n\t\t\t\r\n\t# Check whether grid[i][j] is valid in j's column\r\n\tfor row in range(9):\r\n\t\tif row != i and grid[row][j] == grid[i][j]:\r\n\t\t\treturn False\r\n\r\n\t# Check whether grid[i][j] is valid in the 3-by-3 box\r\n\tfor row in range((i // 3) * 3, (i // 3) * 3 + 3):\r\n\t\tfor col in range((j // 3) * 3, (j // 3) * 3 + 3):\r\n\t\t\tif row != i and col != j and \\\r\n\t\t\t\tgrid[row][col] == grid[i][j]:\r\n\t\t\t\treturn False\r\n\r\n\treturn True # The current value at grid[i][j] is valid\r\n\r\n# Call the main function\r\nmain()\r\n","sub_path":"examples/data/Assignment_9/hlncec001/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602495799","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport argparse\n\ndef color(value):\n '''\n usage: color(value)\n \n input:\n value (int): [0, 1000]\n \n output:\n color (str): red [0, 255], green [0, 255], blue [0, 255]\n '''\n bins = np.array([50, 150, 250, 350, 450, 550, 650, 750, 850, 950, 1000])\n value_bin = np.digitize(np.array([value]), bins, right = True)[0]\n R_bin = np.clip(value_bin, 0, 5)\n G_bin = np.clip(10 - value_bin, 0, 5)\n R = 50 * R_bin + 5 if R_bin != 0 else 0\n G = 50 * G_bin + 5 if G_bin != 0 else 0\n return \",\".join(map(str, [R, G, 0]))\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-f\",\"--file\", help=\"Nombre del fichero con datos de coordenadas y metilación\")\nargs = parser.parse_args()\n\nif args.file:\n\n\tinp=list(open(args.file))\n\tout=open('rgb_%s' % (args.file),'wt')\n\n\tfor i in range(0,len(inp)):\n\t\tline=inp[i].split('\\t')\n\t\trgb=color(int(line[4]))\n\t\tout.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' % (line[0], line[1], line[2], line[3], line[4], line[5].strip(), line[1], line[2], rgb))\n\n\n\n","sub_path":"be6toRGB.py","file_name":"be6toRGB.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"209622635","text":"from sympy import Matrix, zeros\n\n\nclass Solver(object):\n \"\"\"\n :type producers:Matrix\n :type consumers:Matrix\n :type cost_matrix:Matrix\n \"\"\"\n\n def __init__(self, producers, consumers, cost_matrix):\n self.producers = producers\n self.consumers = consumers\n self.cost_matrix = cost_matrix\n a = 0\n b = 0\n for i in range(self.producers.shape[0]):\n a += self.producers[i, 0]\n for i in range(self.consumers.shape[0]):\n b += self.consumers[i, 0]\n c = a - b\n if c < 0:\n self.producers = self.producers.row_insert(self.producers.shape[0], Matrix([[-c]]))\n self.cost_matrix = self.cost_matrix.row_insert(self.cost_matrix.shape[0],\n Matrix([[0] * self.consumers.shape[0]]))\n elif c > 0:\n self.consumers = self.consumers.row_insert(self.consumers.shape[0], Matrix([[c]]))\n self.cost_matrix = self.cost_matrix.col_insert(self.cost_matrix.shape[1],\n Matrix([[0]] * self.producers.shape[0]))\n self.m, self.n = self.producers.shape[0], self.consumers.shape[0]\n\n @staticmethod\n def get_initial_plan(producers, consumers):\n \"\"\"\n :type producers:Matrix\n :type consumers:Matrix\n \"\"\"\n m, n = producers.shape[0], consumers.shape[0]\n transportation_matrix = zeros(m, n)\n basis_set = []\n i = 0\n j = 0\n while True:\n if i >= m or j >= n:\n break\n basis_set.append((i, j))\n if producers[i, 0] < consumers[j, 0]:\n transportation_matrix[i, j] = producers[i, 0]\n consumers[j, 0] -= producers[i, 0]\n producers[i, 0] = 0\n for k in range(j + 1, n):\n transportation_matrix[i, k] = 0\n i += 1\n elif consumers[j, 0] < producers[i, 0]:\n transportation_matrix[i, j] = consumers[j, 0]\n producers[i, 0] -= consumers[j, 0]\n consumers[j, 0] = 0\n for k in range(i + 1, m):\n transportation_matrix[k, j] = 0\n j += 1\n else:\n transportation_matrix[i, j] = consumers[j, 0]\n producers[i, 0] = 0\n consumers[j, 0] = 0\n for k in range(j + 1, n):\n transportation_matrix[i, k] = 0\n i += 1\n return transportation_matrix, basis_set\n\n def solve(self):\n transportation_matrix, basis_set = Solver.get_initial_plan(self.producers, self.consumers)\n while True:\n producer_potentials, consumer_potentials = Solver.get_potentials(self.cost_matrix, basis_set)\n i0_index, j0_index = Solver.get_positive_delta_index(self.cost_matrix, producer_potentials,\n consumer_potentials, basis_set)\n if i0_index == -1 and j0_index == -1:\n break\n cycle_points = self.get_cycle(basis_set, i0_index, j0_index)\n if not cycle_points:\n raise Exception(\"No cycle found\")\n theta_i_index, theta_j_index = -1, -1\n theta = float('inf')\n for k in range(1, len(cycle_points), 2):\n i, j = cycle_points[k]\n if transportation_matrix[i, j] < theta:\n theta_i_index, theta_j_index = i, j\n theta = transportation_matrix[i, j]\n for k, (i, j) in enumerate(cycle_points):\n if k % 2 == 0:\n transportation_matrix[i, j] += theta\n else:\n transportation_matrix[i, j] -= theta\n basis_set.remove((theta_i_index, theta_j_index))\n basis_set.append((i0_index, j0_index))\n return transportation_matrix\n\n @staticmethod\n def get_potentials(cost_matrix, basis_set):\n \"\"\"\n :type cost_matrix:Matrix\n :type basis_set:list[tuple[int, int]]\n \"\"\"\n index_set = list(basis_set)\n i, j = index_set.pop(0)\n producers_potentials = {}\n consumer_potentials = {}\n producers_potentials[i] = 0\n consumer_potentials[j] = cost_matrix[i, j]\n while len(index_set) > 0:\n for k, (i, j) in enumerate(index_set):\n if i in producers_potentials:\n consumer_potentials[j] = cost_matrix[i, j] - producers_potentials[i]\n index_set.pop(k)\n break\n elif j in consumer_potentials:\n producers_potentials[i] = cost_matrix[i, j] - consumer_potentials[j]\n index_set.pop(k)\n break\n return producers_potentials, consumer_potentials\n\n @staticmethod\n def get_positive_delta_index(cost_matrix, producer_potentials, consumer_potentials, basis_set):\n \"\"\"\n :type cost_matrix:Matrix\n :type producer_potentials:dict[int, int]\n :type consumer_potentials:dict[int, int]\n :type basis_set:list[tuple[int, int]]\n \"\"\"\n m, n = cost_matrix.shape\n for i in range(m):\n for j in range(n):\n if (i, j) not in basis_set:\n estimate = producer_potentials[i] + consumer_potentials[j] - cost_matrix[i, j]\n if estimate > 0:\n return i, j\n return -1, -1\n\n def get_cycle(self, basis_set, i0, j0):\n \"\"\"\n :type basis_set:list[tuple[int, int]]\n :type i0:int\n :type j0:int\n \"\"\"\n cycle_points = [(i0, j0)]\n if self.move_horizontally(list(basis_set), cycle_points, i0):\n return cycle_points\n else:\n return None\n\n def move_horizontally(self, basis_set, cycle_points, i_prev):\n \"\"\"\n :type basis_set:list[tuple[int, int]]\n :type cycle_points:list[tuple[int, int]]\n :type i_prev:int\n \"\"\"\n for k, i_j_pair in enumerate(basis_set):\n if i_j_pair is not None:\n i, j = i_j_pair\n if i == i_prev:\n cycle_points.append((i, j))\n basis_set[k] = None\n if j == cycle_points[0][1]:\n return True\n if self.move_vertically(basis_set, cycle_points, j):\n return True\n cycle_points.pop()\n basis_set[k] = (i, j)\n return False\n\n def move_vertically(self, basis_set, cycle_points, j_prev):\n \"\"\"\n :type basis_set:list[tuple[int, int]]\n :type cycle_points:list[tuple[int, int]]\n :type j_prev:int\n \"\"\"\n for k, i_j_pair in enumerate(basis_set):\n if i_j_pair is not None:\n i, j = i_j_pair\n if j == j_prev:\n cycle_points.append((i, j))\n basis_set[k] = None\n if i == cycle_points[0][0]:\n return True\n if self.move_horizontally(basis_set, cycle_points, i):\n return True\n cycle_points.pop()\n basis_set[k] = (i, j)\n return False\n","sub_path":"7term/OM/transportation_problem/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":7372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"8877743","text":"import os, sys, json\nimport datetime\nimport copy\nimport uuid\nimport logging\nfrom flask import Flask, request, jsonify \nfrom flask_cors import CORS\nfrom pymongo import MongoClient\nimport requests\nfrom io import BytesIO\nimport skimage\nimport PIL\nimport numpy as np\n\nfrom analyzer import ResNet18, detect_text\nresnet18 = ResNet18()\n\napplication = Flask(__name__)\nCORS(application)\n\nlogger = logging.getLogger(\"tattle-api\")\n\nmongo_url = os.environ['MONGO_URL']\ncli = MongoClient(mongo_url)\ndb = cli.documents\n\n@application.route('/health')\ndef health_check():\n logger.debug('')\n return \"OK\"\n\n@application.route('/upload_text', methods=['POST'])\ndef upload_text():\n data = request.get_json(force=True)\n text = data.get('text',None)\n if text is None:\n ret = {'failed' : 1, 'error' : 'No text field in json'}\n return jsonify(ret)\n \n date = datetime.datetime.now()\n doc_id = uuid.uuid4().hex\n db.docs.insert_one({\"doc_id\" : doc_id, \n \"has_image\" : False, \n \"has_text\" : True, \n \"date_added\" : date,\n \"date_updated\" : date,\n \"text\" : text})\n\n ret = {'failed' : 0, 'doc_id' : doc_id}\n return jsonify(ret)\n\n@application.route('/find_duplicate', methods=['POST'])\ndef find_duplicate():\n data = request.get_json(force=True)\n text = data.get('text', None)\n image_url = data.get('image_url', None)\n if text is None and image_url is None:\n ret = {'failed' : 1, 'error' : 'No text or image_url found'}\n return jsonify(ret)\n\n duplicate_doc = db.docs.find_one({\"text\" : text})\n if duplicate_doc is None:\n ret = {'failed' : 0, 'duplicate' : 0}\n else:\n ret = {'failed' : 0, 'duplicate' : 1, 'doc_id' : duplicate_doc.get('doc_id')}\n\n return jsonify(ret)\n\n@application.route('/upload_image', methods=['POST'])\ndef upload_image():\n data = request.get_json(force=True)\n image_url = data.get('image_url')\n if image_url is None:\n ret = {'failed' : 1, 'error' : 'No image_url found'}\n else:\n image_dict = image_from_url(image_url)\n image = image_dict['image']\n\n embedding = resnet18.extract_feature(image)\n detected_text = detect_text(image_dict['image_bytes'])\n\n date = datetime.datetime.now()\n doc_id = uuid.uuid4().hex\n #db.docs.insert_one({\"doc_id\" : doc_id, \n # \"has_image\" : True, \n # \"has_text\" : True, \n # \"date_added\" : date,\n # \"date_updated\" : date,\n # \"text\" : text})\n\n ret = {'failed' : 0, 'embedding' : embedding.tolist()}\n return jsonify(ret)\n\ndef image_from_url(image_url):\n resp = requests.get(image_url)\n image_bytes = resp.content\n image = PIL.Image.open(BytesIO(image_bytes))\n image_array = np.array(image)\n return {'image' : image, 'image_array' : image_array, 'image_bytes' : image_bytes}\n\ndef analyze_image(image_url):\n image = skimage.io.imread(image_url)\n image = PIL.Image.fromarray(image)\n embedding = get_image_embedding(image)\n\n\nif __name__ == \"__main__\":\n application.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"362755290","text":"import sys\n\nnumber = int(sys.argv[1])\nprimeNumbers = []\nfor i in range(2, number+1):\n isPrime = True\n for j in range(2, i):\n if i % j == 0:\n isPrime = False\n break\n if isPrime:\n primeNumbers += [i]\n\nprint(primeNumbers)","sub_path":"static/programs/All Prime Numbers 0 to N.py","file_name":"All Prime Numbers 0 to N.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"255564398","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 30 19:44:02 2017\n\n@author: user\n\"\"\"\n\nimport argparse\nfrom flyai.dataset import Dataset\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Conv2D, MaxPool2D, Flatten, \\\n BatchNormalization, MaxPooling2D\nfrom model import Model\nfrom path import MODEL_PATH\nfrom keras.models import load_model\nimport keras\nfrom keras.applications.resnet50 import ResNet50\n\n'''\nTensorflow模版项目下载: https://www.flyai.com/python/tensorflow_template.zip\nPyTorch模版项目下载: https://www.flyai.com/python/pytorch_template.zip\nKeras模版项目下载: https://www.flyai.com/python/keras_template.zip\n第一次使用请看项目中的:第一次使用请读我.html文件\n常见问题请访问:https://www.flyai.com/question\n意见和问题反馈有红包哦!添加客服微信:flyaixzs\n'''\n\n'''\n项目的超参\n'''\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-e\", \"--EPOCHS\", default=20, type=int, help=\"train epochs\")\nparser.add_argument(\"-b\", \"--BATCH\", default=2, type=int, help=\"batch size\")\nargs = parser.parse_args()\n\n'''\nflyai库中的提供的数据处理方法\n传入整个数据训练多少轮,每批次批大小\n'''\ndataset = Dataset(epochs=args.EPOCHS, batch=args.BATCH)\nmodel = Model(dataset)\n\n\n'''\n实现自己的网络机构\n'''\n\n# 必须使用该方法下载模型,然后加载\nfrom flyai.utils import remote_helper\n\ntry:\n weight_path = remote_helper.get_remote_date(\"https://www.flyai.com/m/v0.2|resnet50_weights_th_dim_ordering_th_kernels.h5\")\nexcept OSError:\n weight_path = \"imagenet\"\n\nseque_base = ResNet50(weights=weight_path,\n include_top=True,\n input_shape=(224,224,3))\n\nseque = Sequential()\nseque.add(seque_base)\nseque.add(Dense(128, activation='relu'))\nseque.add(Dense(45, activation='softmax'))\n\n# seque = ResNet50(weights=None, include_top=True, input_shape=(224,224,3),classes=45)\n\n# seque = Sequential()\n# seque.add(Conv2D(32, (3, 3), activation='relu', input_shape=(256, 256, 3)))\n# seque.add(BatchNormalization())\n# seque.add(MaxPooling2D(pool_size=(2, 2)))\n# seque.add(Dropout(0.25))\n\n# seque.add(Conv2D(64, (3, 3), activation='relu'))\n# seque.add(BatchNormalization())\n# seque.add(MaxPooling2D(pool_size=(2, 2)))\n\n# seque.add(Conv2D(128, (3, 3), activation='relu'))\n# seque.add(BatchNormalization())\n# seque.add(MaxPooling2D(pool_size=(2, 2)))\n\n# seque.add(Conv2D(256, (3, 3), activation='relu'))\n# seque.add(BatchNormalization())\n# seque.add(MaxPooling2D(pool_size=(2, 2)))\n\n# seque.add(Conv2D(256, (3, 3), activation='relu'))\n# seque.add(BatchNormalization())\n\n# seque.add(Conv2D(256, (3, 3), activation='relu'))\n# seque.add(BatchNormalization())\n\n# seque.add(Flatten())\n# seque.add(Dense(128, activation='relu'))\n# seque.add(BatchNormalization())\n\n# seque.add(Dense(45, activation='softmax'))\n\nseque.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n'''\ndataset.get_step() 获取数据的总迭代次数\n'''\n\nbest_score = 0\nfor step in range(dataset.get_step()):\n x_train, y_train = dataset.next_train_batch()\n x_val, y_val = dataset.next_validation_batch()\n history = seque.fit(x_train, \n y_train,\n batch_size=args.BATCH,\n epochs = args.EPOCHS,\n verbose=2,\n validation_data=(x_val,y_val))\n \n score = seque.evaluate(x_val, y_val, verbose=0)\n if score[1] > best_score:\n best_score = score[1]\n model.save_model(seque, MODEL_PATH, overwrite=True)\n print(\"step %d, best accuracy %g\" % (step, best_score))\n print(str(step + 1) + \"/\" + str(dataset.get_step()))\n","sub_path":"flyai/keras_sceneclassification/keras_template/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"183467800","text":"# problems pulled from codingbat.com\n# and format adapted from google python class:\n# http://code.google.com/edu/languages/google-python-class\n\n\n#==================SUM~28==============/\n# Problem statement:\n # Given an array of ints,\n # return true if the sum of all the 2's\n# in the array is exactly 8.\n# Example input -> output:\n # sum28([2, 3, 2, 2, 4, 2]) -> true\n # sum28([2, 3, 2, 2, 4, 2, 2]) -> false\n # sum28([1, 2, 3, 4]) -> false\ndef sum28(nums):\n if nums == 28: \n return True\n else:\n return False\n\n\n#=================ONLY~14==================/\n# Problem statement:\n # Given an array of ints, return true if every element is a 1 or a 4.\n# Example input -> output:\n # only14([1, 4, 1, 4]) -> true\n # only14([1, 4, 2, 4]) -> false\n # only14([1, 1]) -> true\n\n\ndef only14(nums):\n # +++your code here+++\n return\n\n# Provided simple test() function in main() to print\n# what each function returns vs. what it's supposed to return\ndef test(got, expected):\n if got == expected:\n prefix = ' OK '\n else:\n prefix = ' X '\n print (f' {prefix} got: {repr(got)} expected {repr(expected)}')\n\n# Provided main() calls the above functions with a ew winputs\n# using test() to check if each result is correct or not\ndef main():\n print ('sum28')\n test(sum28([2, 3, 2, 2, 4, 2]), True)\n test(sum28([1, 2, 3, 4]), False)\n test(sum28([2]), False)\n\n print ('only14')\n test(only14([1, 4, 1, 4]), True)\n test(only14([1, 4, 2, 4]), True)\n test(only14([1, 1, 1, 5]), True)\n\n# Standard boilerplate to call the main() function\nif __name__ == '__main__':\n main()\n","sub_path":"Puzzles.py","file_name":"Puzzles.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"651383826","text":"import pyudev\n\nfrom middlewared.service import Service\n\nfrom .identify_base import DiskIdentifyBase\n\n\nclass DiskService(Service, DiskIdentifyBase):\n\n async def device_to_identifier(self, name, disks=None):\n disks = disks or await self.middleware.call('device.get_disks')\n if name not in disks:\n return ''\n else:\n block_device = disks[name]\n\n if block_device['serial_lunid']:\n return f'{{serial_lunid}}{block_device[\"serial_lunid\"]}'\n elif block_device['serial']:\n return f'{{serial}}{block_device[\"serial\"]}'\n\n dev = pyudev.Devices.from_name(pyudev.Context(), 'block', name)\n for partition in filter(\n lambda p: all(p.get(k) for k in ('ID_PART_ENTRY_TYPE', 'ID_PART_ENTRY_UUID')), dev.children\n ):\n if partition['ID_PART_ENTRY_TYPE'] not in await self.middleware.call(\n 'disk.get_valid_zfs_partition_type_uuids'\n ):\n continue\n return f'{{uuid}}{partition[\"ID_PART_ENTRY_UUID\"]}'\n\n return f'{{devicename}}{name}'\n\n async def identifier_to_device(self, ident, disks=None):\n if not ident:\n return None\n\n search = self.RE_IDENTIFIER.search(ident)\n if not search:\n return None\n\n tp = search.group('type')\n value = search.group('value')\n mapping = {'uuid': 'uuid', 'devicename': 'name', 'serial_lunid': 'serial_lunid', 'serial': 'serial'}\n if tp not in mapping:\n raise NotImplementedError(f'Unknown type {tp!r}')\n elif tp == 'uuid':\n partition = await self.middleware.call('disk.list_all_partitions', [['partition_uuid', '=', value]])\n if partition:\n return partition[0]['disk']\n else:\n disk = next(\n (b for b in (\n disks or await self.middleware.call('device.get_disks')\n ).values() if b[mapping[tp]] == value), None\n )\n return disk['name'] if disk else None\n","sub_path":"src/middlewared/middlewared/plugins/disk_/identify_linux.py","file_name":"identify_linux.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575171845","text":"# -*- coding: UTF-8 -*-\n# @Time : 17-6-22\n# @File : train.py\n# @Author : jian\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport tarfile\nfrom datetime import datetime\nfrom multiprocessing import Process\n\nfrom antgo.ant import flags\nfrom antgo.ant.base import *\nfrom antgo.ant.utils import *\nfrom antgo.dataflow.common import *\nfrom antgo.dataflow.recorder import *\nfrom antgo.measures.statistic import *\nfrom antgo.task.task import *\nfrom antgo.utils.net import *\nfrom antgo.utils.serialize import *\n\nif sys.version > '3':\n PY3 = True\nelse:\n PY3 = False\n\nFLAGS = flags.AntFLAGS\n\n\nclass AntTrain(AntBase):\n def __init__(self, ant_context,\n ant_name,\n ant_data_folder,\n ant_dump_dir,\n ant_token,\n ant_task_config,\n **kwargs):\n super(AntTrain, self).__init__(ant_name, ant_context, ant_token, **kwargs)\n self.ant_data_source = ant_data_folder\n self.ant_dump_dir = ant_dump_dir\n self.ant_context.ant = self\n self.ant_task_config = ant_task_config\n \n def start(self):\n # 0.step loading challenge task\n running_ant_task = None\n if self.token is not None:\n # 0.step load challenge task\n challenge_task_config = self.rpc(\"TASK-CHALLENGE\")\n if challenge_task_config is None:\n # invalid token\n logger.error('couldnt load challenge task')\n self.token = None\n elif challenge_task_config['status'] in ['OK', 'SUSPEND']:\n # maybe user token or task token\n if 'task' in challenge_task_config:\n # task token\n challenge_task = create_task_from_json(challenge_task_config)\n if challenge_task is None:\n logger.error('couldnt load challenge task')\n exit(-1)\n running_ant_task = challenge_task\n else:\n # unknow error\n logger.error('unknow error')\n exit(-1)\n\n self.is_non_mltalker_task = False\n if running_ant_task is None:\n # 0.step load custom task\n if self.ant_task_config is not None:\n custom_task = create_task_from_xml(self.ant_task_config, self.context)\n if custom_task is None:\n logger.error('couldnt load custom task')\n exit(-1)\n running_ant_task = custom_task\n self.is_non_mltalker_task = True\n \n assert(running_ant_task is not None)\n \n # now time stamp\n # train_time_stamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(self.time_stamp))\n train_time_stamp = datetime.fromtimestamp(self.time_stamp).strftime('%Y%m%d.%H%M%S.%f')\n\n # 0.step warp model (main_file and main_param)\n self.stage = 'MODEL'\n # - backup in dump_dir\n main_folder = FLAGS.main_folder()\n main_param = FLAGS.main_param()\n main_file = FLAGS.main_file()\n\n if not os.path.exists(os.path.join(self.ant_dump_dir, train_time_stamp)):\n os.makedirs(os.path.join(self.ant_dump_dir, train_time_stamp))\n\n goldcoin = os.path.join(self.ant_dump_dir, train_time_stamp, '%s-goldcoin.tar.gz'%self.ant_name)\n \n if os.path.exists(goldcoin):\n os.remove(goldcoin)\n\n tar = tarfile.open(goldcoin, 'w:gz')\n tar.add(os.path.join(main_folder, main_file), arcname=main_file)\n if main_param is not None:\n tar.add(os.path.join(main_folder, main_param), arcname=main_param)\n tar.close()\n\n # - backup in cloud\n if os.path.exists(goldcoin):\n file_size = os.path.getsize(goldcoin) / 1024.0\n if file_size < 500:\n if not PY3 and sys.getdefaultencoding() != 'utf8':\n reload(sys)\n sys.setdefaultencoding('utf8')\n # model file shouldn't too large (500KB)\n with open(goldcoin, 'rb') as fp:\n self.context.job.send({'DATA': {'MODEL': fp.read()}})\n\n # 1.step loading training dataset\n logger.info('loading train dataset %s'%running_ant_task.dataset_name)\n ant_train_dataset = running_ant_task.dataset('train',\n os.path.join(self.ant_data_source, running_ant_task.dataset_name),\n running_ant_task.dataset_params)\n # add init func\n # self.context.registry_init_callback(ant_train_dataset.init)\n \n # user custom devices\n apply_devices = getattr(self.context.params, 'devices', [])\n # user model clones\n num_clones = getattr(self.context.params, 'num_clones', 1)\n\n # ablation train (parallel execute if device is OK)\n ablation_experiments = []\n ablation_blocks = getattr(self.context.params, 'ablation', None)\n if ablation_blocks is not None:\n if len(apply_devices) > num_clones and \\\n len(apply_devices) - num_clones >= len(ablation_blocks):\n ablation_experiments_devices = apply_devices[num_clones:]\n apply_devices = apply_devices[:num_clones]\n\n self.context.params.devices = apply_devices\n # assign device to every ablation experiment\n ablation_experiments = self.start_ablation_train_proc(ant_train_dataset,\n running_ant_task,\n ablation_blocks,\n train_time_stamp,\n ablation_experiments_devices)\n for ablation_experiment in ablation_experiments:\n ablation_experiment.start()\n #\n with safe_recorder_manager(ant_train_dataset):\n # 2.step model evaluation (optional)\n if running_ant_task.estimation_procedure is not None and \\\n running_ant_task.estimation_procedure.lower() in [\"holdout\",\"repeated-holdout\",\"bootstrap\",\"kfold\"]:\n logger.info('start model evaluation')\n\n estimation_procedure = running_ant_task.estimation_procedure.lower()\n estimation_procedure_params = running_ant_task.estimation_procedure_params\n evaluation_measures = running_ant_task.evaluation_measures\n\n evaluation_statistic = None\n if estimation_procedure == 'holdout':\n evaluation_statistic = self._holdout_validation(ant_train_dataset, evaluation_measures, train_time_stamp)\n\n logger.info('generate model evaluation report')\n self.stage = 'EVALUATION-HOLDOUT-REPORT'\n # send statistic report\n self.context.job.send({'DATA': {'REPORT': evaluation_statistic}})\n everything_to_html(evaluation_statistic, os.path.join(self.ant_dump_dir, train_time_stamp))\n elif estimation_procedure == \"repeated-holdout\":\n number_repeats = 2 # default value\n is_stratified_sampling = True # default value\n split_ratio = 0.6 # default value\n if estimation_procedure_params is not None:\n number_repeats = int(estimation_procedure_params.get('number_repeats', number_repeats))\n is_stratified_sampling = int(estimation_procedure_params.get('stratified_sampling', is_stratified_sampling))\n split_ratio = float(estimation_procedure_params.get('split_ratio', split_ratio))\n\n # start model estimation procedure\n evaluation_statistic = self._repeated_holdout_validation(number_repeats,\n ant_train_dataset,\n split_ratio,\n is_stratified_sampling,\n evaluation_measures,\n train_time_stamp)\n logger.info('generate model evaluation report')\n self.stage = 'EVALUATION-REPEATEDHOLDOUT-REPORT'\n # send statistic report\n self.context.job.send({'DATA': {'REPORT': evaluation_statistic}})\n everything_to_html(evaluation_statistic, os.path.join(self.ant_dump_dir, train_time_stamp))\n elif estimation_procedure == \"bootstrap\":\n bootstrap_counts = 5\n if estimation_procedure_params is not None:\n bootstrap_counts = int(estimation_procedure_params.get('bootstrap_counts', bootstrap_counts))\n evaluation_statistic = self._bootstrap_validation(bootstrap_counts,\n ant_train_dataset,\n evaluation_measures,\n train_time_stamp)\n logger.info('generate model evaluation report')\n self.stage = 'EVALUATION-BOOTSTRAP-REPORT'\n # send statistic report\n self.context.job.send({'DATA': {'REPORT': evaluation_statistic}})\n everything_to_html(evaluation_statistic, os.path.join(self.ant_dump_dir, train_time_stamp))\n elif estimation_procedure == \"kfold\":\n kfolds = 5\n if estimation_procedure_params is not None:\n kfolds = int(estimation_procedure_params.get('kfold', kfolds))\n evaluation_statistic = self._kfold_cross_validation(kfolds, ant_train_dataset, evaluation_measures, train_time_stamp)\n\n logger.info('generate model evaluation report')\n self.stage = 'EVALUATION-KFOLD-REPORT'\n # send statistic report\n self.context.job.send({'DATA': {'REPORT': evaluation_statistic}})\n everything_to_html(evaluation_statistic, os.path.join(self.ant_dump_dir, train_time_stamp))\n\n # 3.step model training\n self.stage = \"TRAIN\"\n train_dump_dir = os.path.join(self.ant_dump_dir, train_time_stamp, 'train')\n if not os.path.exists(train_dump_dir):\n os.makedirs(train_dump_dir)\n\n logger.info('start training process')\n ant_train_dataset.reset_state()\n self.context.call_training_process(ant_train_dataset, train_dump_dir)\n\n # # 4.step mdoel graph\n # test_graph = Graph(name='testnet')\n # test_graph.add_node(name='conv', label='hello')\n # test_graph.add_node(name='pool', label='world')\n # test_graph.add_node(name='input', label='www')\n # test_graph.add_node(name='ssd', label='hhh')\n # test_graph.add_link('conv','pool',Link('ab',''))\n # test_graph.add_link('input','conv',Link('bd',''))\n # test_graph.add_link('input','ssd',Link('ac',''))\n #\n # ss = Encoder().encode(test_graph)\n # print(ss)\n # dd = Decoder().decode(ss)\n # graph_content = graph_net_visualization(dd, '/Users/zhangken/Downloads/testnet.svg')\n # self.context.job.send({'DATA': {'GRAPH': graph_content}})\n\n # join (waiting until all experiments stop)\n if len(ablation_experiments) > 0:\n for ablation_experiment in ablation_experiments:\n ablation_experiment.join()\n else:\n if ablation_blocks is not None:\n self.start_ablation_train_proc(ant_train_dataset,\n running_ant_task,\n ablation_blocks,\n train_time_stamp)\n\n\n def _holdout_validation(self, train_dataset, evaluation_measures, now_time):\n # 1.step split train set and validation set\n part_train_dataset, part_validation_dataset = train_dataset.split(split_method='holdout')\n part_train_dataset.reset_state()\n\n # dump_dir\n dump_dir = os.path.join(self.ant_dump_dir, now_time, 'train', 'holdout-evaluation')\n if not os.path.exists(dump_dir):\n os.makedirs(dump_dir)\n\n # 2.step training model\n self.stage = 'EVALUATION-HOLDOUT-TRAIN'\n self.context.call_training_process(part_train_dataset, dump_dir)\n\n # 3.step evaluation measures\n # split data and label\n data_annotation_branch = DataAnnotationBranch(Node.inputs(part_validation_dataset))\n self.context.recorder = RecorderNode(Node.inputs(data_annotation_branch.output(1)))\n\n self.stage = 'EVALUATION-HOLDOUT-EVALUATION'\n with safe_recorder_manager(self.context.recorder):\n with running_statistic(self.ant_name):\n self.context.call_infer_process(data_annotation_branch.output(0), dump_dir)\n\n # clear\n self.context.recorder = None\n\n task_running_statictic = get_running_statistic(self.ant_name)\n task_running_statictic = {self.ant_name: task_running_statictic}\n task_running_elapsed_time = task_running_statictic[self.ant_name]['time']['elapsed_time']\n task_running_statictic[self.ant_name]['time']['elapsed_time_per_sample'] = \\\n task_running_elapsed_time / float(part_validation_dataset.size)\n\n logger.info('start evaluation process')\n evaluation_measure_result = []\n\n with safe_recorder_manager(RecordReader(dump_dir)) as record_reader:\n for measure in evaluation_measures:\n record_generator = record_reader.iterate_read('predict', 'groundtruth')\n result = measure.eva(record_generator, None)\n evaluation_measure_result.append(result)\n task_running_statictic[self.ant_name]['measure'] = evaluation_measure_result\n\n return task_running_statictic\n\n def _repeated_holdout_validation(self, repeats,\n train_dataset,\n split_ratio,\n is_stratified_sampling,\n evaluation_measures,\n nowtime):\n repeated_running_statistic = []\n for repeat in range(repeats):\n # 1.step split train set and validation set\n part_train_dataset, part_validation_dataset = train_dataset.split(split_params={'ratio': split_ratio,\n 'is_stratified': is_stratified_sampling},\n split_method='repeated-holdout')\n part_train_dataset.reset_state()\n # dump_dir\n dump_dir = os.path.join(self.ant_dump_dir, nowtime, 'train', 'repeated-holdout-evaluation', 'repeat-%d'%repeat)\n if not os.path.exists(dump_dir):\n os.makedirs(dump_dir)\n\n # 2.step training model\n self.stage = 'EVALUATION-REPEATEDHOLDOUT-TRAIN-%d' % repeat\n self.context.call_training_process(part_train_dataset, dump_dir)\n\n # 3.step evaluation measures\n # split data and label\n data_annotation_branch = DataAnnotationBranch(Node.inputs(part_validation_dataset))\n self.context.recorder = RecorderNode(Node.inputs(data_annotation_branch.output(1)))\n\n self.stage = 'EVALUATION-REPEATEDHOLDOUT-EVALUATION-%d' % repeat\n with safe_recorder_manager(self.context.recorder):\n with running_statistic(self.ant_name):\n self.context.call_infer_process(data_annotation_branch.output(0), dump_dir)\n\n # clear\n self.context.recorder = None\n\n task_running_statictic = get_running_statistic(self.ant_name)\n task_running_statictic = {self.ant_name: task_running_statictic}\n task_running_elapsed_time = task_running_statictic[self.ant_name]['time']['elapsed_time']\n task_running_statictic[self.ant_name]['time']['elapsed_time_per_sample'] = \\\n task_running_elapsed_time / float(part_validation_dataset.size)\n\n logger.info('start evaluation process')\n evaluation_measure_result = []\n\n with safe_recorder_manager(RecordReader(dump_dir)) as record_reader:\n for measure in evaluation_measures:\n record_generator = record_reader.iterate_read('predict', 'groundtruth')\n result = measure.eva(record_generator, None)\n evaluation_measure_result.append(result)\n task_running_statictic[self.ant_name]['measure'] = evaluation_measure_result\n\n repeated_running_statistic.append(task_running_statictic)\n\n evaluation_result = multi_repeats_measures_statistic(repeated_running_statistic, method='repeated-holdout')\n return evaluation_result\n\n def _bootstrap_validation(self, bootstrap_rounds, train_dataset, evaluation_measures, nowtime):\n bootstrap_running_statistic = []\n for bootstrap_i in range(bootstrap_rounds):\n # 1.step split train set and validation set\n part_train_dataset, part_validation_dataset = train_dataset.split(split_params={},\n split_method='bootstrap')\n part_train_dataset.reset_state()\n # dump_dir\n dump_dir = os.path.join(self.ant_dump_dir,\n nowtime,\n 'train',\n 'bootstrap-evaluation',\n 'bootstrap-%d-evaluation' % bootstrap_i)\n if not os.path.exists(dump_dir):\n os.makedirs(dump_dir)\n\n # 2.step training model\n self.stage = 'EVALUATION-BOOTSTRAP-TRAIN-%d' % bootstrap_i\n self.context.call_training_process(part_train_dataset, dump_dir)\n\n # 3.step evaluation measures\n # split data and label\n data_annotation_branch = DataAnnotationBranch(Node.inputs(part_validation_dataset))\n self.context.recorder = RecorderNode(Node.inputs(data_annotation_branch.output(1)))\n\n self.stage = 'EVALUATION-BOOTSTRAP-EVALUATION-%d' % bootstrap_i\n with safe_recorder_manager(self.context.recorder):\n with running_statistic(self.ant_name):\n self.context.call_infer_process(data_annotation_branch.output(0), dump_dir)\n\n # clear\n self.context.recorder = None\n\n task_running_statictic = get_running_statistic(self.ant_name)\n task_running_statictic = {self.ant_name: task_running_statictic}\n task_running_elapsed_time = task_running_statictic[self.ant_name]['time']['elapsed_time']\n task_running_statictic[self.ant_name]['time']['elapsed_time_per_sample'] = \\\n task_running_elapsed_time / float(part_validation_dataset.size)\n\n logger.info('start evaluation process')\n evaluation_measure_result = []\n\n with safe_recorder_manager(RecordReader(dump_dir)) as record_reader:\n for measure in evaluation_measures:\n record_generator = record_reader.iterate_read('predict', 'groundtruth')\n result = measure.eva(record_generator, None)\n evaluation_measure_result.append(result)\n task_running_statictic[self.ant_name]['measure'] = evaluation_measure_result\n\n bootstrap_running_statistic.append(task_running_statictic)\n\n evaluation_result = multi_repeats_measures_statistic(bootstrap_running_statistic, method='bootstrap')\n return evaluation_result\n\n def _kfold_cross_validation(self, kfolds, train_dataset, evaluation_measures, nowtime):\n assert (kfolds in [5, 10])\n kfolds_running_statistic = []\n for k in range(kfolds):\n # 1.step split train set and validation set\n part_train_dataset, part_validation_dataset = train_dataset.split(split_params={'kfold': kfolds,\n 'k': k},\n split_method='kfold')\n part_train_dataset.reset_state()\n # dump_dir\n dump_dir = os.path.join(self.ant_dump_dir, nowtime, 'train', 'kfold-evaluation', 'fold-%d-evaluation' % k)\n if not os.path.exists(dump_dir):\n os.makedirs(dump_dir)\n\n # 2.step training model\n self.stage = 'EVALUATION-KFOLD-TRAIN-%d' % k\n self.context.call_training_process(part_train_dataset, dump_dir)\n\n # 3.step evaluation measures\n # split data and label\n data_annotation_branch = DataAnnotationBranch(Node.inputs(part_validation_dataset))\n self.context.recorder = RecorderNode(Node.inputs(data_annotation_branch.output(1)))\n\n self.stage = 'EVALUATION-KFOLD-EVALUATION-%d' % k\n with safe_recorder_manager(self.context.recorder):\n with running_statistic(self.ant_name):\n self.context.call_infer_process(data_annotation_branch.output(0), dump_dir)\n\n # clear\n self.context.recorder = None\n\n task_running_statictic = get_running_statistic(self.ant_name)\n task_running_statictic = {self.ant_name: task_running_statictic}\n task_running_elapsed_time = task_running_statictic[self.ant_name]['time']['elapsed_time']\n task_running_statictic[self.ant_name]['time']['elapsed_time_per_sample'] = \\\n task_running_elapsed_time / float(part_validation_dataset.size)\n\n logger.info('start evaluation process')\n evaluation_measure_result = []\n\n with safe_recorder_manager(RecordReader(dump_dir)) as record_reader:\n for measure in evaluation_measures:\n record_generator = record_reader.iterate_read('predict', 'groundtruth')\n result = measure.eva(record_generator, None)\n evaluation_measure_result.append(result)\n task_running_statictic[self.ant_name]['measure'] = evaluation_measure_result\n\n kfolds_running_statistic.append(task_running_statictic)\n\n evaluation_result = multi_repeats_measures_statistic(kfolds_running_statistic, method='kfold')\n return evaluation_result\n\n def start_ablation_train_proc(self, data_source, challenge_task, ablation_blocks, time_stamp, spare_devices=None):\n # child func\n def proc_func(handle, experiment_data_source, experiment_challenge_task, ablation_block, root_time_stamp, spare_device):\n # perhaps proc_func is running in a new process\n handle.flash()\n\n # reassign running device\n handle.context.params.devices = [spare_device]\n # only one clone\n handle.context.params.num_clones = 1\n\n part_train_dataset, part_validation_dataset = experiment_data_source.split(split_method='holdout')\n part_train_dataset.reset_state()\n\n handle.context.deactivate_block(ablation_block)\n logger.info('start ablation experiment %s' % ablation_block)\n\n # dump_dir for ablation experiment\n ablation_dump_dir = os.path.join(handle.ant_dump_dir, root_time_stamp, 'train', 'ablation', ablation_block)\n if not os.path.exists(ablation_dump_dir):\n os.makedirs(ablation_dump_dir)\n\n # 2.step training model\n handle.stage = 'ABLATION-%s-TRAIN' % ablation_block\n handle.context.call_training_process(part_train_dataset, ablation_dump_dir)\n\n # 3.step evaluation measures\n # split data and label\n data_annotation_branch = DataAnnotationBranch(Node.inputs(part_validation_dataset))\n handle.context.recorder = RecorderNode(Node.inputs(data_annotation_branch.output(1)))\n\n handle.stage = 'ABLATION-%s-EVALUATION' % ablation_block\n with safe_recorder_manager(handle.context.recorder):\n handle.context.call_infer_process(data_annotation_branch.output(0), ablation_dump_dir)\n\n # clear\n handle.context.recorder = None\n\n ablation_running_statictic = {handle.ant_name: {}}\n ablation_evaluation_measure_result = []\n\n with safe_recorder_manager(RecordReader(ablation_dump_dir)) as record_reader:\n for measure in experiment_challenge_task.evaluation_measures:\n record_generator = record_reader.iterate_read('predict', 'groundtruth')\n result = measure.eva(record_generator, None)\n ablation_evaluation_measure_result.append(result)\n\n ablation_running_statictic[handle.ant_name]['measure'] = ablation_evaluation_measure_result\n handle.stage = 'ABLATION-%s-REPORT' % ablation_block\n\n # send statistic report\n handle.context.job.send({'DATA': {'REPORT': ablation_running_statictic}})\n everything_to_html(ablation_running_statictic, ablation_dump_dir)\n\n handle.context.wait_until_clear()\n\n ablation_experiments = []\n for block_i, block in enumerate(ablation_blocks):\n if spare_devices is not None:\n # apply independent process\n block_ablation_process = Process(target=proc_func,\n args=(self, data_source, challenge_task, block, time_stamp, spare_devices[block_i]),\n name='%s_ablation_block_%s'%(self.ant_name, block))\n ablation_experiments.append(block_ablation_process)\n else:\n # process sequentially in main process\n proc_func(self, data_source, challenge_task, block, time_stamp, 0)\n\n return ablation_experiments","sub_path":"antgo/ant/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":24196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"167350036","text":"import turtle\n\nwin = turtle.Screen() # Create a screen\nwin.title('Paddle') # Set the title to paddle\nwin.bgcolor('black') # Set the color to black\nwin.tracer(0)\nwin.setup(width=600, height=600) # Set the width and height to 600\n\nwhile True:\n win.update() # Show the scree continuously \n","sub_path":"paddle_screen.py","file_name":"paddle_screen.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"629806548","text":"import os\nimport click\nfrom flask import Flask,render_template\nfrom flask_sqlalchemy import SQLAlchemy # 导入扩展类\n\n\napp = Flask(__name__)\ndb = SQLAlchemy(app) # 初始化扩展,传入程序实例 app\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(app.root_path, 'data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # 关闭对模型修改的监控\n\nname = 'Grey Li'\nmovies = [\n {'title': 'My Neighbor Totoro', 'year': '1988'},\n {'title': 'Dead Poets Society', 'year': '1989'},\n {'title': 'A Perfect World', 'year': '1993'},\n {'title': 'Leon', 'year': '1994'},\n {'title': 'Mahjong', 'year': '1996'},\n {'title': 'Swallowtail Butterfly', 'year': '1996'},\n {'title': 'King of Comedy', 'year': '1999'},\n {'title': 'Devils on the Doorstep', 'year': '1999'},\n {'title': 'WALL-E', 'year': '2008'},\n {'title': 'The Pork of Music', 'year': '2012'},\n]\n\n\n\n\n\nclass User(db.Model): # 表名将会是 user(自动生成,小写处理)\n id = db.Column(db.Integer, primary_key=True) # 主键\n name = db.Column(db.String(20)) # 名字\n\n\nclass Movie(db.Model): # 表名将会是 movie\n id = db.Column(db.Integer, primary_key=True) # 主键\n title = db.Column(db.String(60)) # 电影标题\n year = db.Column(db.String(4)) # 电影年份\n\n\n\n@app.route('/')\ndef index():\n #user = User.query.first() # 读取用户记录\n movies = Movie.query.all() # 读取所有电影记录\n return render_template('index.html',movies=movies)\n\n@app.errorhandler(404) # 传入要处理的错误代码\ndef page_not_found(e): # 接受异常对象作为参数\n user = User.query.first()\n return render_template('404.html'), 404 # 返回模板和状态码\n\n@app.context_processor\ndef inject_user(): # 函数名可以随意修改\n user = User.query.first()\n return dict(user=user) # 需要返回字典,等同于return {'user': user}\n\n#db.create_all()\n\nif __name__ =='__main__':\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"526299578","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 23 16:53:37 2015\n\n@author: jordan\n\"\"\"\n\nimport csv\nfrom numpy.linalg import norm\nfrom scipy import *\nfrom pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog\nimport os\nfrom numpy import ones\nfrom Hamil import *\n\nfrom numpy import tanh\n\ndef copyarraytoC(a):\n n = len(a)\n b = mallocPy(n)\n for i in range(n):\n writetomem(b,i,a[i])\n return b\n \ndef copyarrayfromC(a,n):\n b = [0]*n\n for i in range(n):\n b[i] = readfrommem(a,i)\n \n return b\n\n#eta = 1\ndef M0p7initial(xbeg,xend):\n A = xbeg + 0.7*sqrt(68.0/21.0)*tanh(sqrt(21.0/68.0)*xbeg)\n B = xend + 0.7*sqrt(68.0/21.0)*tanh(sqrt(21.0/68.0)*xend)\n \n return B - A \n \n \ndef P0p7initial(xbeg,xend):\n A = sqrt(16.677)*0.7*sqrt(68.0/21.0)*tanh(sqrt(21.0/68.0)*xbeg)\n B = sqrt(16.677)*0.7*sqrt(68.0/21.0)*tanh(sqrt(21.0/68.0)*xend)\n \n return B - A \n \ndef M1initial(xbeg,xend):\n A = xbeg + 2*sqrt(2.0/3.0)*tanh(sqrt(3.0/8.0)*xbeg)\n B = xend + 2*sqrt(2.0/3.0)*tanh(sqrt(3.0/8.0)*xend)\n \n return B - A \n \n \ndef P1initial(xbeg,xend):\n A = 7.23326*tanh(0.612372*xbeg)\n B = 7.23326*tanh(0.612372*xend)\n \n return B - A \n \n \ndef SolE(xbeg,xend):\n AB = 21.0068*tanh(0.555719*xbeg) - 19.2569*arctanh(0.641689*tanh(0.555719*xbeg))\n AE = 21.0068*tanh(0.555719*xend) - 19.2569*arctanh(0.641689*tanh(0.555719*xend))\n \n BB = 9.81*(xbeg) + tanh(0.555719*xbeg)*(2.88329*sech(0.555719*xbeg)**2 + 30.4805)\n BE =9.81*(xend) + tanh(0.555719*xend)*(2.88329*sech(0.555719*xend)**2 + 30.4805)\n \n CB = 307.641*(tanh(0.555719*xbeg)*(0.049539 - 0.00937224*sech(0.555719*xbeg)**2) -0.0625954*arctanh(0.641689*(tanh(0.555719*xbeg))))\n CE = 307.641*(tanh(0.555719*xend)*(0.049539 - 0.00937224*sech(0.555719*xend)**2) -0.0625954*arctanh(0.641689*(tanh(0.555719*xend))))\n\n \n A = AE - AB \n B = BE - BB \n C = CE - CB\n\n\n #1527.68293\n return 0.5*(A + B + C)\n\ndef totalmassa(xb,xe,x0,h0,h1,alpha):\n return 0.5*(h1 + h0)*(xe - xb)\n\ndef totalmomentum():\n return 0\n\ndef Hamilana(xb,xe,x0,h0,h1,alpha,g):\n p1 = (g/8.0)*(xe - xb)*( ((h0+h1)**2) + ((h1 - h0)**2))\n p2 = alpha*(g/4.0)*(h1 - h0)**2*(tanh(0.5*(xb - xe)/alpha))\n \n return p1 + p2\n\ndef midpointtoca(h,dx):\n n = len(h)\n b = zeros(n)\n c = zeros(n)\n a = zeros(n)\n i24 = 1.0/24\n\n for i in range(n): \n a[i-1] = -i24\n b[i] = 26*i24\n c[i] = -i24\n \n #i =0\n i = 0;\n b[i] = 1.0;\n c[i] = 0.0;\n\n #i=n-1\n i = n-1;\n a[i-1] = 0.0;\n b[i] = 1.0; \n \n return TDMApy(a,b,c,h)\n \ndef TDMApy(a,b,c,d):\n n = len(d)\n alpha = []\n beta = []\n x = [0]*n\n \n alpha.append((1.0*c[0])/b[0])\n beta.append((1.0*d[0])/b[0] ) \n \n for i in range(1,n-1):\n m = 1.0 / (b[i] - a[i-1]*alpha[i-1])\n alpha.append(c[i]* m)\n beta.append((d[i] - a[i-1]*beta[i-1]) * m)\n \n m = 1.0 / (b[n-1] - a[n-2]*alpha[n-2])\n beta.append((d[n-1] - a[n-2]*beta[n-2]) * m) \n\n x[n-1] = beta[n-1]\n \n for i in range(n-2,-1,-1):\n x[i] = beta[i] - alpha[i]*x[i+1]\n \n return array(x)\n \n\n\ndef makevar(sx,ex,dx,st,et,dt): \n x = arange(sx, ex, dx)\n t = arange(st, et, dt)\n \n return x,t \n\ndef sech(x):\n a = 2./(exp(x) + exp(-x))\n return a\n\ndef sech2 (x):\n a = 2./(exp(x) + exp(-x))\n return a*a\n\ndef soliton (x,t,g,a0,a1):\n c = sqrt(g*(a0 + a1))\n phi = x - c*t;\n k = sqrt(3.0*a1) / (2.0*a0 *sqrt(a0 + a1))\n return a0 + a1*sech2(k*phi)\n \ndef solitoninit(n,a0,a1,g,x,t0,dx):\n h = zeros(n)\n u = zeros(n)\n c = sqrt(g*(a0 + a1))\n for i in range(n):\n h[i] = soliton(x[i],t0,g,a0,a1)\n u[i] = c* ((h[i] - a0) / h[i])\n \n return h,u\n\n\n\n\"\"\"\nwdirord = \"FDcent\"\n#wdir = \"../../../../data/raw/NEWdata/FDredo/grim/\"\n#sdir = \"../../../../data/postprocessing/scFDallAE/grim/\"\n\nsdir = \"../../../../data/postprocessing/FDREREDON/\"+wdirord+\"/\"\nif not os.path.exists(sdir):\n os.makedirs(sdir)\nMns = []\nPns = []\nMis = []\nPis = []\ndxs = []\nEns = []\nEis = []\n#range(6,19)\ng = 9.81\nfor ki in range(6,20):\n \n #Nonlinear Soliton\n dxw = str(ki)\n \n \n wdir = \"../../../../data/raw/FDreredo/\" +wdirord +\"/\" + dxw + \"/\"\n \n s = wdir + \"outlast.txt\"\n with open(s,'r') as file1:\n readfile = csv.reader(file1, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n \n h = []\n u = []\n he = []\n ue = []\n x = []\n j = -1\n for row in readfile: \n if (j >= 0):\n dx = float(row[0])\n dt = float(row[1])\n t = float(row[2])\n #h.append(float(row[3]))\n x.append(float(row[6]))\n h.append(float(row[7]))\n u.append(float(row[8]))\n he.append(float(row[9]))\n ue.append(float(row[10]))\n j = j + 1 \n \n #plot(x,h)\n #plot(x,he)\n n = len(x)\n o = ones(n)\n \n niBC = 4\n startx = x[0]\n endx = x[0]\n \n u0 = u[0]*ones(niBC)\n u1 = u[-1]*ones(niBC) \n h0 = h[0]*ones(niBC)\n h1 = h[-1]*ones(niBC)\n \n xbeg = arange(startx - niBC*dx,startx,dx)\n xend = arange(endx + dx,endx + (niBC+1)*dx) \n \n xbc = concatenate([xbeg,x,xend])\n hbc = concatenate([h0,h,h1])\n ubc = concatenate([u0,u,u1])\n \n xbc_c = copyarraytoC(xbc)\n hbc_c = copyarraytoC(hbc)\n ubc_c = copyarraytoC(ubc)\n \n #hi,ui = solitoninit(n,1,1,9.81,x,0,dx)\n\n En = HankEnergyall(xbc_c,hbc_c,ubc_c,g,n + 2*niBC,niBC,dx)\n Pn = uhall(xbc_c,hbc_c,ubc_c,n + 2*niBC,niBC,dx)\n Mn = hall(xbc_c,hbc_c,n + 2*niBC,niBC,dx) \n \n xbeg = -50 - 0.5*dx\n xend = 250 + 0.5*dx\n\n Pi = P0p7initial(xbeg,xend)\n Mi = M0p7initial(xbeg,xend)\n Ei = SolE(xbeg,xend)\n \n #Mi = Hamiltonianall(x,hi,o,dx)\n #Pi = Hamiltonianall(x,hi,ui,dx)\n \n Pns.append(Pn)\n Pis.append(Pi)\n Mns.append(Mn)\n Mis.append(Mi)\n dxs.append(dx)\n Ens.append(En)\n Eis.append(Ei)\n\nEns = array(Ens)\nEis = array(Eis) \nMns = array(Mns)\nPns = array(Pns)\nMis = array(Mis)\nPis = array(Pis)\n\nrelerrP = abs(Pis - Pns)/ abs(Pis)\nrelerrM = abs(Mis - Mns)/ abs(Mis)\nrelerrE = abs(Eis - Ens)/ abs(Eis)\n\nn= len(dxs)\ns = sdir + \"con.txt\"\nwith open(s,'a') as file2:\n writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n \n writefile2.writerow(['dx','Pis','Pns' ,'relerrP','Mis','Mns','relerrM']) \n \n for j in range(n):\n writefile2.writerow([str(dxs[j]),str(Pis[j]),str(Pns[j]),str(relerrP[j]),str(Mis[j]),str(Mns[j]),str(relerrM[j])]) \n\ns = sdir + \"conh.dat\"\nwith open(s,'w') as file1:\n for i in range(n):\n s =\"%3.8f%5s%1.15f\\n\" %(dxs[i],\" \",relerrM[i])\n file1.write(s) \n \ns = sdir + \"conuh.dat\"\nwith open(s,'w') as file1:\n for i in range(n):\n s =\"%3.8f%5s%1.15f\\n\" %(dxs[i],\" \",relerrP[i])\n file1.write(s) \n \ns = sdir + \"conH.dat\"\nwith open(s,'w') as file1:\n for i in range(n):\n s =\"%3.8f%5s%1.15f\\n\" %(dxs[i],\" \",relerrE[i])\n file1.write(s) \n\"\"\"\n\nwdirord = \"o3\"\n#wdir = \"../../../../data/raw/NEWdata/FDredo/grim/\"\n#sdir = \"../../../../data/postprocessing/scFDallAE/grim/\"\n\nwdirb = \"../../../../data/raw/bigsmoothtargetted/\"+wdirord+\"/\"\nMns = []\nPns = []\nMis = []\nPis = []\ndxs = []\nEns = []\nEis = []\nalphas = []\n#range(6,19)\ndiffi= \"12\"\ng = 9.81\n\nsdir = \"../../../../data/postprocessing/CONuhHNAT/\"+ diffi + \"/\"\n\nif not os.path.exists(sdir):\n os.makedirs(sdir)\nfor ki in range(1,2):\n \n #Nonlinear Soliton\n dxw = str(2**ki)\n \n \n wdir = wdirb + \"/\" + dxw + \"/\" + diffi + \"/\"\n \n s = wdir + \"outlast.txt\"\n with open(s,'r') as file1:\n readfile = csv.reader(file1, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n \n h = []\n u = []\n he = []\n ue = []\n x = []\n j = -1\n for row in readfile: \n if (j >= 0):\n dx = float(row[0])\n dt = float(row[1])\n t = float(row[2])\n x.append(float(row[3]))\n h.append(float(row[4]))\n u.append(float(row[6]))\n ai = float(float(row[7]))\n j = j + 1 \n \n #plot(x,h)\n #plot(x,he)\n n = len(x)\n o = ones(n)\n \n niBC = 4\n startx = x[0]\n endx = x[0]\n \n u0 = u[0]*ones(niBC)\n u1 = u[-1]*ones(niBC) \n h0 = h[0]*ones(niBC)\n h1 = h[-1]*ones(niBC)\n \n xbeg = arange(startx - niBC*dx,startx,dx)\n xend = arange(endx + dx,endx + (niBC+1)*dx) \n \n xbc = concatenate([xbeg,x,xend])\n hbc = concatenate([h0,h,h1])\n ubc = concatenate([u0,u,u1])\n \n xbc_c = copyarraytoC(xbc)\n hbc_c = copyarraytoC(hbc)\n ubc_c = copyarraytoC(ubc)\n \n #hi,ui = solitoninit(n,1,1,9.81,x,0,dx)\n\n En = HankEnergyall(xbc_c,hbc_c,ubc_c,g,n + 2*niBC,niBC,dx)\n Pn = uhall(xbc_c,hbc_c,ubc_c,n + 2*niBC,niBC,dx) + 0.5*g*30*(h[-1]**2 - h[0]**2)\n Mn = hall(xbc_c,hbc_c,n + 2*niBC,niBC,dx)\n \n xbeg = 0 - 0.5*dx\n xend = 1000 + 0.5*dx\n\n \n Ei = Hamilana(x[0] - 0.5*dx,x[-1] + 0.5*dx,500,1.0,1.8,1.0/ai,g)\n \n Mi = totalmassa(x[0] - 0.5*dx,x[-1] + 0.5*dx,500,1.0,1.8,1.0/ai)\n Pi = totalmomentum() \n \n #Mi = Hamiltonianall(x,hi,o,dx)\n #Pi = Hamiltonianall(x,hi,ui,dx)\n \n alpha = 1.0/ ai\n alphas.append(alpha)\n \n Pns.append(Pn)\n Pis.append(Pi)\n Mns.append(Mn)\n Mis.append(Mi)\n dxs.append(dx)\n Ens.append(En)\n Eis.append(Ei)\n\nEns = array(Ens)\nEis = array(Eis) \nMns = array(Mns)\nPns = array(Pns)\nMis = array(Mis)\nPis = array(Pis)\n\nrelerrP = abs(Pis - Pns)\nrelerrM = abs(Mis - Mns)/ abs(Mis)\nrelerrE = abs(Eis - Ens)/ abs(Eis)\n\n\nn= len(dxs)\ns = sdir + \"con.txt\"\nwith open(s,'a') as file2:\n writefile2 = csv.writer(file2, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n \n writefile2.writerow(['dx','alphas','Pis','Pns' ,'relerrP','Mis','Mns','relerrM']) \n \n for j in range(n):\n writefile2.writerow([str(dxs[j]),str(alphas[j]),str(Pis[j]),str(Pns[j]),str(relerrP[j]),str(Mis[j]),str(Mns[j]),str(relerrM[j])]) \n\ns = sdir + \"conh.dat\"\nwith open(s,'w') as file1:\n for i in range(n):\n s =\"%3.8f%5s%1.15f\\n\" %(dxs[i],\" \",relerrM[i])\n file1.write(s) \n \ns = sdir + \"conuh.dat\"\nwith open(s,'w') as file1:\n for i in range(n):\n s =\"%3.8f%5s%1.15f\\n\" %(dxs[i],\" \",relerrP[i])\n file1.write(s) \n \ns = sdir + \"conH.dat\"\nwith open(s,'w') as file1:\n for i in range(n):\n s =\"%3.8f%5s%1.15f\\n\" %(dxs[i],\" \",relerrE[i])\n file1.write(s) \n","sub_path":"CODE/postprocessing/morecomplex/conservation/conuh.py","file_name":"conuh.py","file_ext":"py","file_size_in_byte":10805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616476037","text":"import arbor\nimport seaborn\nimport pandas\nfrom math import sqrt\n\n# Make a ball and stick cell model\n\ntree = arbor.segment_tree()\n\n# Construct a cell with the following morphology.\n# The soma (at the root of the tree) is marked 's', and\n# the end of each branch i is marked 'bi'.\n#\n# b4\n# /\n# /\n# b1---b3\n# /\n# /\n# s-------b0\n# \\\n# \\\n# b2\n\n# Start with a spherical soma with radius 6 μm,\n# approximated with a cylinder of: length = diameter = 12 μm.\n\ns = tree.append(arbor.mnpos, arbor.mpoint(-12, 0, 0, 6), arbor.mpoint(0, 0, 0, 6), tag=1)\n\n# Add the dendrite cables, labelling those closest to the soma \"dendn\",\n# and those furthest with \"dendx\" because we will set different electrical\n# properties for the two regions.\n\nlabels = arbor.label_dict()\nlabels['soma'] = '(tag 1)'\nlabels['dendn'] = '(tag 5)'\nlabels['dendx'] = '(tag 6)'\n\nb0 = tree.append(s, arbor.mpoint(0, 0, 0, 2), arbor.mpoint(100, 0, 0, 2), tag=5)\n\n# Radius tapers from 2 to 0.5 over the length of the branch.\n\nb1 = tree.append(b0, arbor.mpoint(100, 0, 0, 2), arbor.mpoint(100+50/sqrt(2), 50/sqrt(2), 0, 0.5), tag=5)\nb2 = tree.append(b0, arbor.mpoint(100, 0, 0, 1), arbor.mpoint(100+50/sqrt(2), -50/sqrt(2), 0, 1), tag=5)\nb3 = tree.append(b1, arbor.mpoint(100+50/sqrt(2), 50/sqrt(2), 0, 1), arbor.mpoint(100+50/sqrt(2)+50, 50/sqrt(2), 0, 1), tag=6)\nb4 = tree.append(b1, arbor.mpoint(100+50/sqrt(2), 50/sqrt(2), 0, 1), arbor.mpoint(100+2*50/sqrt(2), 2*50/sqrt(2), 0, 1), tag=6)\n\n# Combine the \"dendn\" and \"dendx\" regions into a single \"dend\" region.\n# The dendrites were labelled as such so that we can set different\n# properties on each sub-region, and then combined so that we can\n# set other properties on the whole dendrites.\nlabels['dend'] = '(join (region \"dendn\") (region \"dendx\"))'\n# Location of stimuli, in the middle of branch 2.\nlabels['stim_site'] = '(location 1 0.5)'\n# The root of the tree (equivalent to '(location 0 0)')\nlabels['root'] = '(root)'\n# The tips of the dendrites (3 locations at b4, b3, b2).\nlabels['dtips'] = '(terminal)'\n\n# Extract the cable cell from the builder.\n# cell = b.build()\ncell = arbor.cable_cell(tree, labels)\n\n# Set initial membrane potential everywhere on the cell to -40 mV.\ncell.set_properties(Vm=-40)\n# Put hh dynamics on soma, and passive properties on the dendrites.\ncell.paint('\"soma\"', 'hh')\ncell.paint('\"dend\"', 'pas')\n# Set axial resistivity in dendrite regions (Ohm.cm)\ncell.paint('\"dendn\"', rL=500)\ncell.paint('\"dendx\"', rL=10000)\n# Attach stimuli with duration of 2 ms and current of 0.8 nA.\n# There are three stimuli, which activate at 10 ms, 50 ms and 80 ms.\ncell.place('\"stim_site\"', arbor.iclamp( 10, 2, 0.8))\ncell.place('\"stim_site\"', arbor.iclamp( 50, 2, 0.8))\ncell.place('\"stim_site\"', arbor.iclamp( 80, 2, 0.8))\n# Add a spike detector with threshold of -10 mV.\ncell.place('\"root\"', arbor.spike_detector(-10))\n\n# Discretization: the default discretization in Arbor is 1 compartment per branch.\n# Let's be a bit more precise and make that every 2 micron:\ncell.compartments_length(2)\n\n# Make single cell model.\nm = arbor.single_cell_model(cell)\n\n# Attach voltage probes, sampling at 10 kHz.\nm.probe('voltage', '(location 0 0)', 10000) # at the soma.\nm.probe('voltage', '\"dtips\"', 10000) # at the tips of the dendrites.\n\n# Run simulation for 100 ms of simulated activity.\ntfinal=100\nm.run(tfinal)\nprint(\"Simulation done.\")\n\n# Print spike times.\nif len(m.spikes)>0:\n print('{} spikes:'.format(len(m.spikes)))\n for s in m.spikes:\n print(' {:7.4f}'.format(s))\nelse:\n print('no spikes')\n\n# Plot the recorded voltages over time.\nprint(\"Plotting results...\")\ndf = pandas.DataFrame()\nfor t in m.traces:\n df=df.append(pandas.DataFrame({'t/ms': t.time, 'U/mV': t.value, 'Location': t.location, \"Variable\": t.variable}) )\n\nseaborn.relplot(data=df, kind=\"line\", x=\"t/ms\", y=\"U/mV\",hue=\"Location\",col=\"Variable\").savefig('single_cell_multi_branch_result.svg')\n","sub_path":"python/example/single_cell_multi_branch.py","file_name":"single_cell_multi_branch.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"138421600","text":"import os\nimport uuid\nimport base64\nimport logging\nimport time\nimport urllib.request\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n\n\ndef temp_directory():\n return os.path.abspath(os.path.join('.', 'data'))\n\n\nMODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'\nGRAPH_PATH = os.path.join(temp_directory(), MODEL_NAME,\n 'frozen_inference_graph.pb')\nLOCAL_TMP_PATH = \"./tmp/\"\n\n\ndef get_mscoco_label_dict():\n mscoco_label_dict = {1: ['/m/01g317', 'person'], 2: ['/m/0199g', 'bicycle'], 3: ['/m/0k4j', 'car'],\n 4: ['/m/04_sv', 'motorcycle'], 5: ['/m/05czz6l', 'airplane'], 6: ['/m/01bjv', 'bus'],\n 7: ['/m/07jdr', 'train'], 8: ['/m/07r04', 'truck'], 9: ['/m/019jd', 'boat'],\n 10: ['/m/015qff', 'traffic light'], 11: ['/m/01pns0', 'fire hydrant'],\n 13: ['/m/02pv19', 'stop sign'], 14: ['/m/015qbp', 'parking meter'], 15: ['/m/0cvnqh', 'bench'],\n 16: ['/m/015p6', 'bird'], 17: ['/m/01yrx', 'cat'], 18: ['/m/0bt9lr', 'dog'],\n 19: ['/m/03k3r', 'horse'], 20: ['/m/07bgp', 'sheep'], 21: ['/m/01xq0k1', 'cow'],\n 22: ['/m/0bwd_0j', 'elephant'], 23: ['/m/01dws', 'bear'], 24: ['/m/0898b', 'zebra'],\n 25: ['/m/03bk1', 'giraffe'], 27: ['/m/01940j', 'backpack'], 28: ['/m/0hnnb', 'umbrella'],\n 31: ['/m/080hkjn', 'handbag'], 32: ['/m/01rkbr', 'tie'], 33: ['/m/01s55n', 'suitcase'],\n 34: ['/m/02wmf', 'frisbee'], 35: ['/m/071p9', 'skis'], 36: ['/m/06__v', 'snowboard'],\n 37: ['/m/018xm', 'sports ball'], 38: ['/m/02zt3', 'kite'], 39: ['/m/03g8mr', 'baseball bat'],\n 40: ['/m/03grzl', 'baseball glove'], 41: ['/m/06_fw', 'skateboard'],\n 42: ['/m/019w40', 'surfboard'], 43: ['/m/0dv9c', 'tennis racket'],\n 44: ['/m/04dr76w', 'bottle'], 46: ['/m/09tvcd', 'wine glass'], 47: ['/m/08gqpm', 'cup'],\n 48: ['/m/0dt3t', 'fork'], 49: ['/m/04ctx', 'knife'], 50: ['/m/0cmx8', 'spoon'],\n 51: ['/m/04kkgm', 'bowl'], 52: ['/m/09qck', 'banana'], 53: ['/m/014j1m', 'apple'],\n 54: ['/m/0l515', 'sandwich'], 55: ['/m/0cyhj_', 'orange'], 56: ['/m/0hkxq', 'broccoli'],\n 57: ['/m/0fj52s', 'carrot'], 58: ['/m/01b9xk', 'hot dog'], 59: ['/m/0663v', 'pizza'],\n 60: ['/m/0jy4k', 'donut'], 61: ['/m/0fszt', 'cake'], 62: ['/m/01mzpv', 'chair'],\n 63: ['/m/02crq1', 'couch'], 64: ['/m/03fp41', 'potted plant'], 65: ['/m/03ssj5', 'bed'],\n 67: ['/m/04bcr3', 'dining table'], 70: ['/m/09g1w', 'toilet'], 72: ['/m/07c52', 'tv'],\n 73: ['/m/01c648', 'laptop'], 74: ['/m/020lf', 'mouse'], 75: ['/m/0qjjc', 'remote'],\n 76: ['/m/01m2v', 'keyboard'], 77: ['/m/050k8', 'cell phone'], 78: ['/m/0fx9l', 'microwave'],\n 79: ['/m/029bxz', 'oven'], 80: ['/m/01k6s3', 'toaster'], 81: ['/m/0130jx', 'sink'],\n 82: ['/m/040b_t', 'refrigerator'], 84: ['/m/0bt_c3', 'book'], 85: ['/m/01x3z', 'clock'],\n 86: ['/m/02s195', 'vase'], 87: ['/m/01lsmm', 'scissors'], 88: ['/m/0kmg4', 'teddy bear'],\n 89: ['/m/03wvsk', 'hair drier'], 90: ['/m/012xff', 'toothbrush']}\n return mscoco_label_dict\n\n\ndef cv2base64(image):\n try:\n tmp_file_name = os.path.join(LOCAL_TMP_PATH, \"%s.jpg\" % uuid.uuid1())\n cv2.imwrite(tmp_file_name, image)\n with open(tmp_file_name, \"rb\") as f:\n base64_data = base64.b64encode(f.read())\n base64_data = base64_data.decode(\"utf-8\")\n return base64_data\n except Exception as e:\n err_msg = \"Convert cv2 object to base64 failed: \"\n logging.error(err_msg, e, exc_info=True)\n raise e\n\n\nclass BoundingBox:\n def __init__(self, x1, y1, x2, y2, score, label=None):\n self.x1 = x1\n self.x2 = x2\n self.y1 = y1\n self.y2 = y2\n self.score = score\n self.label = label\n\n\nclass SSDDetectObject:\n def __init__(self):\n self.fetch_resources()\n self.model_init = False\n self.user_config = self.get_operator_config()\n self.label_dict = get_mscoco_label_dict()\n # initialize model\n try:\n self.graph = self.build_graph()\n with self.graph.as_default():\n with tf.device(self.device_str):\n self.session = tf.Session(\n config=self.user_config, graph=self.graph)\n with self.session.as_default():\n self.bulk_execute(np.zeros((1, 300, 300, 3)))\n except Exception as e:\n logging.error(\n \"unexpected error happen during build graph\",\n exc_info=True)\n raise e\n\n def get_operator_config(self):\n try:\n self.device_str = os.environ.get(\"device_id\", \"/cpu:0\")\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n gpu_mem_limit = float(os.environ.get(\"gpu_mem_limit\", 0.3))\n config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_limit\n # for device debug info print\n if os.environ.get(\"log_device_placement\", False):\n config.log_device_placement = True\n logging.info(\"device id %s, gpu memory limit: %f\",\n self.device_str, gpu_mem_limit)\n\n except Exception as e:\n logging.error(\n \"unexpected error happen during read config\",\n exc_info=True)\n raise e\n logging.info(\"Model device str: %s, session config: %s\",\n self.device_str, config)\n return config\n\n def build_graph(self):\n dnn = tf.Graph()\n return dnn\n\n def load_model(self):\n with self.graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(GRAPH_PATH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n self.model_init = True\n\n def fetch_resources(self):\n # model_tar_path = download_temp_file(DOWNLOAD_BASE + MODEL_FILE)\n # with tarfile.open(model_tar_path) as f:\n # f.extractall(temp_directory())\n # download_temp_file(LABEL_URL)\n pass\n\n def get_bboxes(self, images, boxes, scores, classes, threshold=0.4):\n bboxes = []\n for i in range(len(images)):\n bbox = []\n for (box, score, cls) in zip(boxes[i, :, :].reshape(100, 4),\n scores[i, :].reshape(100, 1),\n classes[i, :].reshape(100, 1)):\n if score > threshold:\n bbox.append(BoundingBox(\n x1=box[1], y1=box[0], x2=box[3], y2=box[2], score=score,\n label=self.label_dict[int(cls[0])][1]))\n bboxes.append(bbox)\n return bboxes\n\n @staticmethod\n def get_obj_image(images, bboxes):\n obj_images = []\n for i, frame_bboxes in enumerate(bboxes):\n frame_object = []\n [h, w] = images[i].shape[:2]\n for j, bbox in enumerate(frame_bboxes):\n tmp = images[i][int(bbox.y1 * h):int(bbox.y2 * h), int(bbox.x1 * w):int(bbox.x2 * w)]\n frame_object.append(cv2base64(tmp))\n obj_images.append(frame_object)\n return obj_images\n\n @staticmethod\n def get_label(bboxes):\n obj_labels = []\n for i, frame_bboxes in enumerate(bboxes):\n frame_object = []\n for j, bboxes in enumerate(frame_bboxes):\n tmp_label = bboxes.label\n frame_object.append(tmp_label)\n obj_labels.append(frame_object)\n return obj_labels\n\n def execute(self, image):\n objs = self.bulk_execute([image])\n return objs[0]\n\n def bulk_execute(self, images):\n with self.graph.as_default():\n with tf.device(self.device_str):\n if not self.model_init:\n self.load_model()\n image_tensor = self.graph.get_tensor_by_name('image_tensor:0')\n boxes = self.graph.get_tensor_by_name('detection_boxes:0')\n scores = self.graph.get_tensor_by_name('detection_scores:0')\n classes = self.graph.get_tensor_by_name('detection_classes:0')\n with self.session.as_default():\n (boxes, scores, classes) = self.session.run([boxes, scores, classes], feed_dict={\n image_tensor: np.concatenate(np.expand_dims(images, axis=0), axis=0)})\n bboxes = self.get_bboxes(images, boxes, scores, classes)\n logging.debug(bboxes)\n # objs = self.get_obj_image(images, bboxes)\n objs = self.get_label(bboxes)\n return objs\n\n @property\n def name(self):\n return \"ssd\"\n\n @property\n def type(self):\n return \"processor\"\n\n @property\n def accept_filetype(self):\n return [\"png\", \"jpg\", \"jepg\"]\n\n @property\n def input(self):\n return \"image\"\n\n @property\n def output(self):\n return \"tags\"\n\n @property\n def dimension(self):\n return \"-1\"\n\n @property\n def metric_type(self):\n return \"-1\"\n\n\ndef save_tmp_file(name, file_data=None, url=None):\n start = time.time()\n extension = 'jpg'\n file_path = os.path.join(LOCAL_TMP_PATH, name + '.' + extension)\n if file_data:\n img_data = file_data.split(\",\")\n if len(img_data) == 2:\n posting = img_data[0]\n data_type = posting.split(\"/\")[1]\n extension = data_type.split(\";\")[0]\n encode_method = data_type.split(\";\")[1]\n if encode_method != \"base64\":\n logging.error(\"Encode method not base64\")\n raise\n # raise DecodeError(\"Encode method not base64\")\n imgstring = img_data[1]\n else:\n imgstring = img_data[0]\n file_path = os.path.join(LOCAL_TMP_PATH, name + '.' + extension)\n with open(file_path, \"wb\") as f:\n f.write(base64.b64decode(imgstring))\n if url:\n try:\n urllib.request.urlretrieve(url, file_path)\n except Exception as e:\n logging.error(\"Download file from url error : %s\", str(e), exc_info=True)\n raise\n # raise DownloadFileError(\"Download file from url %s\" % url, e)\n end = time.time()\n logging.info(' save_tmp_file cost: {:.3f}s'.format(end - start))\n return file_path\n\n\ndef run(detector, images, urls):\n result_images = []\n start = time.time()\n try:\n if images:\n for img in images:\n file_name = \"{}-{}\".format(\"processor\", uuid.uuid4().hex)\n image_path = save_tmp_file(file_name, file_data=img)\n if image_path:\n image = cv2.imread(image_path)\n result_images.extend(detector.bulk_execute([image]))\n else:\n for url in urls:\n file_name = \"{}-{}\".format(\"processor\", uuid.uuid4().hex)\n image_path = save_tmp_file(file_name, url=url)\n if image_path:\n image = cv2.imread(image_path)\n result_images.extend(detector.bulk_execute([image]))\n except Exception as e:\n logging.error(\"something error: %s\", str(e), exc_info=True)\n pass\n end = time.time()\n logging.info('%s cost: {:.3f}s'.format(end - start), \"ssd detector\")\n return result_images\n","sub_path":"ssd-encoder/ssd.py","file_name":"ssd.py","file_ext":"py","file_size_in_byte":11976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"181887861","text":"from django import forms\nfrom django.forms import ValidationError\nfrom django.core import exceptions\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.core.validators import validate_email\n\nclass SignInForm(UserCreationForm):\n #email = forms.EmailField(required=True, label=\"Email\", help_text=\"Required. Add a valid email address\")\n\n class Meta:\n model = User\n fields = ('username', 'email', 'password1', 'password2')\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n try:\n validate_email(email)\n except exceptions.ValidationError as e:\n raise ValidationError('El email no es válido') #ValidationError('The email is not valid')\n if User.objects.filter(email__exact=email):\n raise ValidationError('El email está asociado a otro usuario') #ValidationError('The email is associated to other user')\n else:\n return email\n","sub_path":"src/account/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"32712185","text":"#!/usr/bin/python3\n\n#####################################################################\n# All User Interface, StringTuner, Graphing and Serial code\n#####################################################################\n# Author: Marcio Teixeira\n# Date: August 2018\n# License: Creative Commons Attribution-ShareAlike 3.0\n# https://creativecommons.org/licenses/by-sa/3.0/us/\n######################################################################\n\n\n#####################################################################\n# FFT processing and mathematical routines in SoundProcessor\n#####################################################################\n# Author: Matt Zucker\n# Date: July 2016\n# License: Creative Commons Attribution-ShareAlike 3.0\n# https://creativecommons.org/licenses/by-sa/3.0/us/\n######################################################################\n\nimport numpy as np\nimport pyaudio\nimport peakutils\nfrom peakutils.plot import plot as pplot\n\nimport glob\nimport serial\nimport tkinter as tk\nimport time\nimport matplotlib\nimport matplotlib.pyplot as pyplot\n#import winsound\n\nfrom tkinter import *\nfrom sys import platform\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\n\nmatplotlib.use('TkAgg')\n\n#string_axis = [\"X\", \"Y\", \"Z\", \"E0\", \"E1\"]\nstring_axis = [\"X\", \"Y\", \"Z\"]\n\n######################################################################\n# Feel free to play with these numbers. Might want to change NOTE_MIN\n# and NOTE_MAX especially for guitar/bass. Probably want to keep\n# FRAME_SIZE and FRAMES_PER_FFT to be powers of two.\n\nNOTE_RNG = 13\nNOTE_MIN = 55\nNOTE_MAX = NOTE_MIN + NOTE_RNG - 1\nFSAMP = 22050 # Sampling frequency in Hz\nFRAME_SIZE = 2048 # How many samples per frame?\nFRAMES_PER_FFT = 8 # FFT takes average across how many frames?\n\n######################################################################\n# Derived quantities from constants above. Note that as\n# SAMPLES_PER_FFT goes up, the frequency step size decreases (so\n# resolution increases); however, it will incur more delay to process\n# new sounds.\n\nSAMPLES_PER_FFT = FRAME_SIZE*FRAMES_PER_FFT\nFREQ_STEP = float(FSAMP)/SAMPLES_PER_FFT\n\n######################################################################\n# For printing out notes\n\nNOTE_NAMES = 'C C# D D# E F F# G G# A A# B'.split()\n\n######################################################################\n# These three functions are based upon this very useful webpage:\n# https://newt.phys.unsw.edu.au/jw/notes.html\n\ndef freq_to_number(f): return 69 + 12*np.log2(f/440.0)\ndef number_to_freq(n): return 440 * 2.0**((n-69)/12.0)\ndef note_name(n): return NOTE_NAMES[n % 12] + str(int(n/12) - 1)\n\n######################################################################\n# Ok, ready to go now.\n\n# Get min/max index within FFT of notes we care about.\n# See docs for numpy.rfftfreq()\ndef note_to_fftbin(n): return number_to_freq(n)/FREQ_STEP\n\nclass SoundProcessor:\n def __init__(self):\n self.freqs = []\n self.vals = []\n self.peaks = []\n self.maximum = 0\n self.threshold = 0.20\n \n self.is_maximum = False\n self.is_falling = False\n self.is_finished = False\n self.is_starting = False\n self.is_silent = True\n \n self.peak_increasing = True\n self.peak_previous = 0\n \n self.imin = max(0, int(np.floor(note_to_fftbin(NOTE_MIN-1))))\n self.imax = min(SAMPLES_PER_FFT, int(np.ceil(note_to_fftbin(NOTE_MAX+1))))\n\n # Allocate space to run an FFT. \n self.buf = np.zeros(SAMPLES_PER_FFT, dtype=np.float32)\n self.num_frames = 0\n \n self.freqs = (np.arange(self.imax - self.imin) + self.imin) * FREQ_STEP\n\n # Initialize audio\n self.stream = pyaudio.PyAudio().open(format=pyaudio.paInt16,\n channels=1,\n rate=FSAMP,\n input=True,\n frames_per_buffer=FRAME_SIZE)\n\n self.stream.start_stream()\n\n # Create Hanning window function\n self.window = 0.5 * (1 - np.cos(np.linspace(0, 2*np.pi, SAMPLES_PER_FFT, False)))\n\n # Print initial text\n print('sampling at', FSAMP, 'Hz with max resolution of', FREQ_STEP, 'Hz')\n print()\n \n def detect_maximum(self, peak_max):\n self.is_maximum = False\n if self.is_silent:\n self.is_silent = False\n self.is_starting = True\n else:\n self.is_starting = False\n if peak_max > self.peak_previous:\n if not self.peak_increasing:\n self.peak_increasing = True\n self.peak_max = peak_max\n self.is_falling = False\n else:\n if self.peak_increasing and peak_max < 0.95 * self.peak_max:\n self.peak_increasing = False\n self.is_maximum = True\n self.is_falling = True\n self.peak_previous = peak_max\n return self.is_maximum\n \n def reset_peak_detector(self):\n if self.is_falling:\n self.is_finished = True\n else:\n self.is_finished = False\n self.peak_increasing = False\n self.peak_max = 0\n self.is_maximum = False\n self.is_falling = False\n self.is_silent = True\n \n def set_threshold(self, y):\n self.threshold = y / self.maximum\n \n def clear_buffer(self):\n self.num_frames = 0\n\n def process_audio_data(self):\n # As long as we are getting data:\n while self.stream.is_active():\n \n # Shift the buffer down and new data in\n self.buf[:-FRAME_SIZE] = self.buf[FRAME_SIZE:]\n self.buf[-FRAME_SIZE:] = np.frombuffer(self.stream.read(FRAME_SIZE), np.int16)\n \n # Run the FFT on the windowed buffer\n fft = np.fft.rfft(self.buf * self.window)\n \n # Begin extracting peaks once we have a full buffer\n if self.num_frames >= FRAMES_PER_FFT:\n #print(freqs,ampls)\n #print('freq: {:7.2f} Hz note: {:>3s} {:+.2f}'.format(\n #freq, note_name(n0), n-n0))\n self.vals = np.abs(fft[self.imin:self.imax])\n \n thres = min(1, self.maximum/max(self.vals) * self.threshold)\n self.peaks = peakutils.indexes(self.vals, thres = thres, min_dist=30)\n self.tones = self.freqs[self.peaks]\n if len(self.peaks):\n peak_max = np.amax(self.vals[self.peaks])\n self.maximum = max(self.maximum, peak_max)\n self.detect_maximum(peak_max)\n else:\n self.reset_peak_detector()\n break\n else:\n self.num_frames += 1\n\nclass StringTuner:\n _learning = None\n\n _strings = []\n _note_num = np.arange(NOTE_MAX - NOTE_MIN + 1) + NOTE_MIN\n _note_frq = number_to_freq(_note_num)\n _note_str = [note_name(note) for note in _note_num]\n \n # How much to overshoot and correct to reach a note\n _note_overshoot = 0.20\n _note_wiggle = 3\n \n def __init__(self, parent, axis):\n self._parent = parent\n self._axis = axis\n self._position = 0\n self._fit_m = 0\n self._fit_b = 0\n \n self._last_obs = None\n self._last_tune_time = 0\n self._tune_increment = 0.1\n self._tune_step = 1\n self._tune_time = 0\n \n self._motion_t = []\n self._motion_f = []\n \n self._learn_frq = []\n self._learn_pos = []\n \n self._note = None\n \n self._active = False\n \n StringTuner._strings.append(self)\n \n def send_position(self, position, when_done):\n self._position = position\n self._parent.send_position(self._axis, self._position, when_done)\n \n def learn(self):\n if not StringTuner._learning:\n StringTuner._learning = self\n self._learn_frq = []\n self._learn_pos = []\n self._active = False\n else:\n StringTuner._learning = None\n self._note = int(round(self._last_obs))\n self._active = True\n \n def compute_slope(self):\n if len(self._learn_frq) > 4:\n x = self._learn_frq\n y = self._learn_pos\n m,b = np.polyfit(x, y, 1)\n self._fit_m = m\n self._fit_b = b\n print(\"Compute slope!\", m, b)\n \n def reset(self):\n self._parent.send(\"G92 %s%f\" % (self._axis, 0))\n self._position = 0\n \n def motor_off(self):\n self._parent.send(\"M18 %s\" % (self._axis))\n \n def goto_note(self, n):\n if self._active:\n n0 = int(round(n))\n print(\"Goto note %s\" % note_name(n0))\n self.settle_note(n0)\n self._note = n0\n self._last_obs = None\n self._tune_step = 0.75\n self._tune_time = 0\n\n def tension(self, amount, when_done = None):\n self.position(self._position + amount, when_done)\n \n def position(self, pos, when_done=None):\n self.send_position(pos, when_done)\n\n def interpolate_note_pos(self, n):\n return self._fit_b + self._fit_m * number_to_freq(n)\n \n def adjust_intercept(self, freq):\n self._fit_b = self._position - self._fit_m * freq\n \n def settle_note(self, n0):\n for i in range(self._note_wiggle, 0, -1):\n self.position(self.interpolate_note_pos(n0 + self._note_overshoot*i))\n self.position(self.interpolate_note_pos(n0 - self._note_overshoot*i))\n self.position(self.interpolate_note_pos(n0))\n \n def observed_frequency(self, freq, sp):\n # Find nearest note\n n = freq_to_number(freq)\n n0 = int(round(n))\n d = n-n0\n print('freq: {:7.2f} Hz note: {:>3s} {:+.2f}'.format(\n freq, note_name(n0), d))\n \n self._last_obs = n\n self.record_history(freq)\n \n if sp.is_starting:\n self.clear_history()\n\n # Learn from this observation\n \n if PianoRoll._playing and self._active:\n self.tune_to_note(freq) \n \n if sp.is_maximum and not app._marlin._in_motion and StringTuner._learning == self:\n self._learn_pos.append(self._position)\n self._learn_frq.append(freq)\n self.compute_slope()\n \n def clear_history(self):\n self._motion_t = []\n self._motion_f = []\n \n def record_history(self, freq):\n self._motion_t.append(time.time())\n self._motion_f.append(freq) \n\n def tune_to_note(self, freq):\n if self._note:\n self.adjust_intercept(freq)\n if not app._marlin._in_motion:\n err = number_to_freq(self._note) - freq\n self.tension(err * self._fit_m * self._tune_step, self.motion_done)\n return\n \n if time.time() - self._last_tune_time > self._tune_time:\n # Figure out when the next tune will take place\n self._last_tune_time = time.time()\n self._tune_time = (FRAME_SIZE * FRAMES_PER_FFT)/FSAMP * self._tune_step * 0.25\n self._tune_step = max(self._tune_step*0.75, 0.1)\n \n def motion_done(self):\n print(\"Motion complete\")\n \n @classmethod\n def play_chord(cls, notes):\n print(\"Chord:\",notes)\n strings = StringTuner.assign_strings_to_notes(notes)\n for stng, note in zip(strings, notes):\n if stng:\n stng.goto_note(note)\n \n @classmethod\n def assign_observations(cls, freqs, sp):\n freqs = freqs.tolist()\n notes = [freq_to_number(freq) for freq in freqs]\n strings = StringTuner.assign_strings_to_notes(notes)\n for stng, freq in zip(strings, freqs):\n if stng:\n stng.observed_frequency(freq, sp)\n \n @classmethod\n def assign_strings_to_notes(cls, notes):\n strgs = [s if s._note else None for s in StringTuner._strings]\n \n while len(strgs) < len(notes):\n strgs.append(None)\n \n assignment = None\n distance = 999999\n for perm in itertools.permutations(strgs, len(notes)):\n d = sum([abs(n - s._note) if s and s._note else 99999 for s,n in zip(perm, notes)])\n if d < distance:\n assignment = perm\n distance = d\n \n return assignment\n \n @classmethod\n def print_string_list(cls):\n # Print out the updated string list\n for i, stng in enumerate(StringTuner._strings):\n if stng._last_obs:\n print(\"%d -> %s\" % (i,note_name(stng._last_obs)))\n \n @classmethod\n def update(cls, sp):\n freqs = sp.tones\n \n if StringTuner._learning and sp.is_maximum:\n StringTuner._learning.observed_frequency(freqs[0], sp)\n StringTuner._learning.tension(-5)\n \n cls.assign_observations(freqs, sp)\n \nclass GraphPage(tk.Frame):\n def __init__(self, parent):\n tk.Frame.__init__(self, parent)\n self.pack()\n\n def add_mpl_figure(self, fig):\n self.fig = fig\n self.mpl_canvas = FigureCanvasTkAgg(fig, self)\n self.mpl_canvas.draw()\n self.mpl_canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n self.toolbar = NavigationToolbar2TkAgg(self.mpl_canvas, self)\n self.toolbar.update()\n self.mpl_canvas._tkcanvas.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\n \n self.mpl_canvas.mpl_connect('button_press_event', self.onclick)\n \n def refresh(self, arg):\n self.fig.refresh(arg)\n self.mpl_canvas.draw()\n \n def onclick(self, event):\n #print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %\n # ('double' if event.dblclick else 'single', event.button,\n # event.x, event.y, event.xdata, event.ydata))\n self.fig.onclick(event)\n\nclass FFTGraph(Figure):\n\n def __init__(self):\n Figure.__init__(self, figsize=(5, 5), dpi=100)\n self._subplot = self.add_subplot(111)\n \n def adjustRange(self, y_max):\n if not hasattr(self,\"_range\"):\n self._range = list(self._subplot.axis())\n if y_max != self._range[3]:\n self._range[3] = y_max\n self._subplot.axis(self._range) \n \n def refresh(self, fft):\n self._fft = fft\n t = fft.maximum * fft.threshold\n if not hasattr(self,\"_fft_plot\") or not hasattr(self,\"_fft_dots\"):\n self._subplot.clear()\n self._fft_plot, = self._subplot.plot(fft.freqs, fft.vals)\n self._fft_dots, = self._subplot.plot(fft.freqs[fft.peaks], fft.vals[fft.peaks], 'ro')\n self._range = list(self._subplot.axis())\n self._fft_thrs = self._subplot.axhline(y=t,color='r',linestyle='--')\n self._subplot.set_xlabel('Frequency (Hz)')\n self._subplot.set_ylabel('Amplitude')\n self._subplot.set_title('Audio Spectrum')\n for n in range(NOTE_MAX - NOTE_MIN + 1):\n freq = number_to_freq(n + NOTE_MIN)\n self._subplot.axvline(x=freq,color='g',linestyle=':')\n self._subplot.axvline(x=FSAMP/2,color='b',linestyle='-')\n self._notes = []\n for i,s in enumerate(StringTuner._strings):\n p = self._subplot.axvline(x=number_to_freq(0),color='g',linestyle='-')\n self._notes.append(p)\n else:\n self._fft_plot.set_ydata(fft.vals)\n self._fft_thrs.set_ydata([t,t])\n self._fft_dots.set_xdata(fft.freqs[fft.peaks])\n self._fft_dots.set_ydata(fft.vals[fft.peaks])\n self.adjustRange(fft.maximum)\n for i,s in enumerate(StringTuner._strings):\n if s._note:\n self._notes[i].set_xdata(number_to_freq(s._note))\n \n def onclick(self, event):\n if event.ydata:\n self._fft.set_threshold(event.ydata)\n \nclass PitchGraph(Figure):\n def __init__(self):\n Figure.__init__(self, figsize=(5, 5), dpi=100)\n self._subplot = self.add_subplot(111)\n self._highlight = []\n\n def refresh(self, sp):\n if not hasattr(self,\"_slope\"):\n self._subplot.clear()\n self._subplot.set_xlabel('Pitch')\n self._subplot.set_xticks(StringTuner._note_frq);\n self._subplot.set_xticklabels(StringTuner._note_str, rotation=90)\n self._subplot.set_ylabel('Position')\n self._subplot.set_title('Pitch vs. Position')\n self._lines = []\n self._notes = []\n self._slope = []\n for i,s in enumerate(StringTuner._strings):\n if len(s._learn_frq):\n p, = self._subplot.plot(s._learn_frq, s._learn_pos,'+')\n \n p = self._subplot.axhline(y=s._position,color='g',linestyle=':')\n self._lines.append(p)\n \n p = self._subplot.axvline(x=s._note_frq[0],color='g',linestyle=':')\n self._notes.append(p)\n \n p, = self._subplot.plot([s._note_frq[0], s._note_frq[-1]], [0,0],':',color='r')\n self._slope.append(p)\n #self._subplot.autoscale(False)\n else:\n if len(sp.peaks) == 0:\n self.remove_highlights()\n for i,s in enumerate(StringTuner._strings):\n self._lines[i].set_ydata(s._position)\n if s._note:\n self._notes[i].set_xdata(number_to_freq(s._note))\n if hasattr(s, '_fit_m'):\n m = s._fit_m\n b = s._fit_b\n y0 = s._note_frq[ 0]*m+b\n y1 = s._note_frq[-1]*m+b\n self._slope[i].set_ydata([y0, y1])\n # Highlight the pitches in the data\n if sp.is_maximum:\n for i,s in enumerate(StringTuner._strings):\n if s._last_obs and s._note:\n self.highlight_pitch(s._last_obs,s._note)\n\n def remove_highlights(self):\n for h in self._highlight:\n h.remove()\n self._highlight = []\n \n def highlight_pitch(self, n, n0):\n f = number_to_freq(n)\n f0 = number_to_freq(n0)\n if abs(n-n0) < 0.1:\n c = 'g'\n elif f0 < f:\n c = 'b'\n else:\n c = 'r'\n p = self._subplot.axvspan(min(f,f0), max(f,f0),facecolor=c,alpha=0.5)\n self._highlight.append(p)\n \n def onclick(self, event):\n if event.xdata:\n StringTuner._strings[0].goto_note(freq_to_number(event.xdata))\n \nclass MotionGraph(Figure):\n def __init__(self):\n Figure.__init__(self, figsize=(5, 5), dpi=100)\n self._subplot = self.add_subplot(111)\n \n def refresh(self, sp):\n if sp.is_finished:\n self._subplot.clear()\n self._subplot.set_xlabel('Time')\n for i,s in enumerate(StringTuner._strings):\n t = np.array(s._motion_t)\n if len(t):\n n = freq_to_number(np.array(s._motion_f)) - s._note\n p, = self._subplot.plot(t - t[0], n, linestyle='-',marker='.')\n p = self._subplot.axhline(y=0,color='b',linestyle=':')\n p = self._subplot.axhline(y=0.5,color='r',linestyle=':')\n p = self._subplot.axhline(y=-0.5,color='r',linestyle=':')\n self._subplot.set_ylim(-1,1)\n\nclass PortSelector:\n def __init__(self, parent):\n\n self.parent = parent\n self.portname = None\n\n top = self.top = Toplevel(parent)\n\n Label(top, text=\"Select a serial port:\").pack()\n\n l = self.l = Listbox(top)\n for p in self.serial_ports():\n l.insert(END, p)\n l.pack(padx=5)\n\n b = Button(top, text=\"OK\", command=self.ok)\n b.pack(pady=5)\n\n def serial_ports(self):\n \"\"\" Lists serial port names\n\n :raises EnvironmentError:\n On unsupported or unknown platforms\n :returns:\n A list of the serial ports available on the system\n \"\"\"\n # Reference: https://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python\n\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n # this excludes your current terminal \"/dev/tty\"\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = []\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result.append(port)\n except (OSError, serial.SerialException):\n pass\n return result\n\n def ok(self):\n items = list(map(int, self.l.curselection()))\n if items:\n self.portname = self.l.get(items[0])\n self.top.destroy()\n\nclass AxisControl(Frame):\n def __init__(self, parent, character, tuner):\n self._character = character.strip()\n self._parent = parent\n self._tuner = tuner\n Frame.__init__(self, parent)\n self.pack()\n self.createWidgets()\n\n def increment(self):\n self._tuner.tension(5)\n self.updateLabel()\n\n def decrement(self):\n self._tuner.tension(-5)\n self.updateLabel()\n\n def set_origin(self):\n self._tuner.reset()\n self.updateLabel()\n \n def motor_off(self):\n self._tuner.motor_off()\n \n def learn(self):\n self._tuner.learn()\n \n def updateLabel(self):\n self.l.configure(text=\"%3.1f\" % self._tuner._position)\n\n def createWidgets(self):\n Label(self, text=self._character + \":\").pack({\"side\": \"left\"})\n Button(self, text=\"+\", command=self.increment ).pack({\"side\": \"left\"})\n Button(self, text=\"-\", command=self.decrement ).pack({\"side\": \"left\"})\n Button(self, text=\"Reset\", command=self.set_origin).pack({\"side\": \"left\"})\n Button(self, text=\"Off\", command=self.motor_off).pack({\"side\": \"left\"})\n Button(self, text=\"Learn\", command=self.learn ).pack({\"side\": \"left\"})\n self.l = Label(self)\n self.l.pack({\"side\": \"left\"})\n self.updateLabel()\n \nclass PianoRoll(Frame):\n\n # Sample song, expressed in semitones\n\n # Edvard Grieg - In The Hall Of The Mountain King\n # https://www.youtube.com/watch?v=K0e3IABZt2s\n song = [-1,1,2,4,6,2,6,5,1,5,4,0,4,-1,1,2,4,6,2,6,11 ,9,6,2,6, 9,\n -1,1,2,4,6,2,6,5,1,5,4,0,4,-1,1,2,4,6,2,6,11 ,9,6,2,6, 9]\n \n # Row Row Row Your Boat\n # https://www.youtube.com/watch?v=ROqgdTRa0bE\n song = [0,0,0,2,4,4,2,4,5,7,12,12,12,7,7,7,4,4,4,0,0,0,7,5,4,2,0]\n \n song = [0,2,4,5,7,9]\n\n line_height = 20\n note_width = 50\n left_margin = 70\n \n _playing = None\n _notes = []\n _label = []\n \n def __init__(self, parent):\n labels = StringTuner._note_str\n canvas_width = len(self.song) * self.note_width + self.left_margin\n canvas_height = self.line_height*(len(labels)+1)\n font=(\"Helvetica\", -self.line_height+4)\n \n Frame.__init__(self, parent, bd=2, relief=SUNKEN)\n self.grid_rowconfigure(0, weight=1)\n self.grid_columnconfigure(0, weight=1)\n\n xscrollbar = Scrollbar(self, orient=HORIZONTAL)\n xscrollbar.grid(row=1, column=0, sticky=E+W)\n\n yscrollbar = Scrollbar(self)\n yscrollbar.grid(row=0, column=1, sticky=N+S)\n \n c = Canvas(self, bd=0, \n scrollregion=(0, 0, canvas_width, canvas_height),\n xscrollcommand=xscrollbar.set,\n yscrollcommand=yscrollbar.set,\n width=300, height=canvas_height)\n self.canvas = c\n\n c.grid(row=0, column=0, sticky=N+S+E+W)\n\n xscrollbar.config(command=c.xview)\n yscrollbar.config(command=c.yview)\n\n for i in range(0,len(labels)):\n c.create_line(0, self.line_height*(i+1 ), 90000, self.line_height*(i+1 ))\n c.create_text(self.left_margin - 15, self.line_height*(i+1.5), text=labels[i], anchor=E, font=font)\n\n #min_note = min(self.song) \n #for i,n in enumerate(self.song):\n # self.add_note_to_canvas(i, n-min_note+NOTE_MIN):\n\n self.t = 0\n self.t_line = c.create_line(self.left_margin, 0, self.left_margin, canvas_height, fill=\"red\", dash=(4, 4))\n self.update_time()\n \n def add_notes(self, notes, label=\"\"):\n pos = len(self._notes)\n fill = \"red\"\n for n in notes:\n self.add_note_to_canvas(pos, n, fill)\n fill = \"gray\"\n if label:\n self.add_label_to_canvas(pos, label)\n self._notes.append(notes)\n self._label.append(label)\n\n def add_label_to_canvas(self, pos, label):\n x = self.left_margin + (pos+0.5) * self.note_width\n y = self.line_height * 0.5\n self.canvas.create_text(x, y, text=label)\n \n def add_note_to_canvas(self, pos, note, fill=\"gray\"):\n x0 = self.left_margin + pos * self.note_width\n x1 = self.left_margin + (pos+1) * self.note_width\n y0 = self.line_height * (note-NOTE_MIN+1)\n y1 = self.line_height * (note-NOTE_MIN+2)\n self.canvas.create_rectangle(x0, y0, x1, y1, fill=fill)\n \n def update_time(self):\n x = self.left_margin + (self.t + 0.5) * self.note_width\n self.canvas.coords(self.t_line, x, 0, x, self.canvas.cget('height'))\n \n def update(self, sp):\n if sp.is_maximum:\n self.next_note()\n\n def play(self):\n if not self._playing:\n self._playing = True\n self.t = 0\n \n def stop(self):\n self._playing = False\n\n def next_note(self):\n if self._playing:\n StringTuner.play_chord(self._notes[self.t])\n self.t = (self.t+1) % len(self._notes)\n self.update_time()\n\nclass ChordPicker(Frame):\n def __init__(self, parent):\n self.parent = parent\n \n Frame.__init__(self, parent, bd=2, relief=SUNKEN)\n Label(self, text=\"Chords\").grid(row=0,columnspan=12)\n\n def makeFunc(note, root, chord):\n return lambda: self.selectChord(note,root,chord)\n\n for i, root in enumerate([\"C\",\"C#\",\"D\",\"D#\",\"E\",\"F\",\"F#\",\"G\",\"G#\",\"A\",\"A#\",\"B\"]):\n b = Button(self, text=root+\"maj\", command=makeFunc(i+60, root, \"maj\")).grid(row=1,column=i)\n b = Button(self, text=root+\"min\", command=makeFunc(i+60, root, \"min\")).grid(row=2,column=i)\n \n def selectChord(self, note, root, chord):\n if chord == \"maj\":\n notes = [note+0, note+4, note+7]\n elif chord == \"min\":\n notes = [note+0, note+3, note+7]\n \n # If the notes do fall in the range of the instrument,\n # find a chord inversion that does.\n notes = [self.findNoteInRange(note) for note in notes]\n app._pianoroll.add_notes(notes, root+chord)\n StringTuner.play_chord(notes)\n \n def findNoteInRange(self, note):\n \"\"\"Shift a note up or down an octave until it fits the note range of the instrument\"\"\"\n while note < NOTE_MIN:\n note += 12\n while note > NOTE_MAX:\n note -= 12\n return note\n \nclass Marlin:\n def __init__(self):\n self._serial = None\n self._in_motion = False\n self._when_done = None\n\n def connect(self, port):\n self._serial = serial.Serial(port, baudrate=250000)\n \n def disconnect(self):\n if self._serial:\n self._serial.close()\n self._serial = None\n\n def send(self, cmd):\n if self._serial:\n self._serial.write((cmd+'\\n').encode())\n print(cmd)\n\n def send_position(self, axis, position, when_done = None):\n if self._serial:\n self.send('G0 %s%f F90000' % (axis, position))\n if when_done:\n self._in_motion = True\n self.send('M400\\nM114')\n self._when_done = when_done\n \n def update(self):\n if not self._serial:\n return\n \n if self._serial.inWaiting():\n line = self._serial.readline()\n print(line.decode().strip())\n \n if self._in_motion and line.startswith(b\"X:\"):\n self._in_motion = False\n if self._when_done:\n self._when_done()\n \n if line.startswith(b\"start\"):\n self.initMarlin()\n\n def initMarlin(self):\n self.send(\"M211 S0\") # Turn off endstops\n self.send(\"M203 X90000 Y90000 Z90000\") # Max feedrate\n self.send(\"M201 X18000 Y18000 Z18000\") # Max acceleration\n self.send(\"M204 T18000\") # Max starting accelration\n self.send(\"M907 S870\") # Set motor current\n \nclass Application(Frame):\n def connect(self):\n d = PortSelector(root)\n root.wait_window(d.top)\n if d.portname:\n self._marlin.connect(d.portname)\n self.connect_btn[\"text\"] = \"Disconnect\"\n self.connect_btn[\"command\"] = self.disconnect\n\n def disconnect(self, exiting = False):\n self._marlin.disconnect()\n if not exiting:\n self.connect_btn[\"text\"] = \"Connect\"\n self.connect_btn[\"command\"] = self.connect\n \n def send_position(self, axis, position, when_done = None):\n self._marlin.send_position(axis, position, when_done)\n \n def send(self, cmd):\n self._marlin.send(cmd)\n \n def showSpectrum(self):\n t = tk.Toplevel(self)\n self.fig = FFTGraph()\n self.spec_page = GraphPage(t)\n self.spec_page.add_mpl_figure(self.fig)\n \n def showPitches(self):\n t = tk.Toplevel(self)\n self.fig = PitchGraph()\n self.map_page = GraphPage(t)\n self.map_page.add_mpl_figure(self.fig)\n \n def showMotion(self):\n t = tk.Toplevel(self)\n self.fig = MotionGraph()\n self.motion_page = GraphPage(t)\n self.motion_page.add_mpl_figure(self.fig)\n \n def startPlaying(self):\n self._pianoroll.play()\n self.play_btn[\"text\"] = \"Stop\"\n self.play_btn[\"command\"] = self.stopPlaying\n \n def stopPlaying(self):\n self._pianoroll.stop()\n self.play_btn[\"text\"] = \"Play\"\n self.play_btn[\"command\"] = self.startPlaying\n\n def createWidgets(self):\n # Buttons\n f = Frame(self)\n f.pack({\"side\": \"top\", \"pady\" : 20})\n self.quit_btn = Button(f, text = \"QUIT\", command = self.quit, fg = \"red\")\n self.connect_btn = Button(f, text = \"Connect\", command = self.connect )\n self.spec_btn = Button(f, text = \"Spectrum\", command = self.showSpectrum )\n self.pitch_btn = Button(f, text = \"Pitches\", command = self.showPitches )\n self.motion_btn = Button(f, text = \"Motion\", command = self.showMotion )\n self.quit_btn.pack( {\"side\": \"left\"})\n self.connect_btn.pack({\"side\": \"left\"})\n self.spec_btn.pack({\"side\": \"left\"})\n self.pitch_btn.pack({\"side\": \"left\"})\n self.motion_btn.pack({\"side\": \"left\"})\n \n f = Frame(self)\n # Chord Picker\n self._chord_picker = ChordPicker(f)\n self._chord_picker.pack({\"side\": \"left\"})\n \n self.play_btn = Button(f, text = \"Play\", command = self.startPlaying, width=10, font=(\"Helvetica\",16,\"bold\"))\n self.play_btn.pack({\"side\": \"right\", \"fill\":\"y\", \"padx\" : 10, \"pady\" : 10})\n f.pack({\"side\": \"top\", \"padx\" : 10, \"fill\":\"x\", \"expand\":1})\n \n # Scrolling Canvas\n self._pianoroll = PianoRoll(self)\n self._pianoroll.pack({\"side\": \"top\", \"padx\" : 10, \"fill\":\"x\"})\n \n # Axis Control Buttons\n f = Frame(self)\n for i in range(len(string_axis)):\n AxisControl(f, string_axis[i], self._strings[i]).pack({\"side\": \"top\"})\n f.pack({\"side\": \"top\", \"pady\" : 20})\n \n def __init__(self, master=None):\n self._marlin = Marlin()\n self._sp = SoundProcessor()\n self._in_motion = False\n \n self._strings = []\n for a in string_axis:\n self._strings.append(StringTuner(self, a)) \n \n Frame.__init__(self, master)\n self.pack({\"fill\":\"both\"})\n self.createWidgets()\n self.idle()\n\n def idle(self):\n self.after(100, self.idle)\n \n self._marlin.update()\n self._sp.process_audio_data()\n \n StringTuner.update(self._sp)\n \n if hasattr(self,\"spec_page\"):\n self.spec_page.refresh(self._sp)\n \n if hasattr(self,\"map_page\"):\n self.map_page.refresh(self._sp)\n \n if hasattr(self,\"motion_page\"):\n self.motion_page.refresh(self._sp)\n \n self._pianoroll.update(self._sp)\n \nroot = Tk()\napp = Application(master=root)\napp.mainloop()\napp.disconnect(exiting = True)\n#root.destroy()","sub_path":"tuner_gui.py","file_name":"tuner_gui.py","file_ext":"py","file_size_in_byte":34030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"281057534","text":"'''\n DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE\n Version 2, December 2004\n\nCopyright (C) 2004 Sam Hocevar \n\nEveryone is permitted to copy and distribute verbatim or modified\ncopies of this license document, and changing it is allowed as long\nas the name is changed.\n\n DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE\n TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n 0. You just DO WHAT THE FUCK YOU WANT TO.\n'''\n\n\nfrom datetime import datetime\nfrom math import log, exp, sqrt,factorial\nimport pickle\n\n# TL; DR\n# the main learning process start at line 122\n\n\n# parameters #################################################################\n\nimport sys\ndata_dir=sys.argv[1]\nsub_dir=sys.argv[2]\ntrain = data_dir+'train.csv' # path to training file\nlabel = data_dir+'trainLabels.csv' # path to label file of training data\ntest = data_dir+'test.csv' # path to testing file\n\nD = 2 ** 23 # number of weights use for each model, we have 32 of them\nalpha = .1 # learning rate for sgd optimization\n\n\n# function, generator definitions ############################################\n\n# A. x, y generator\n# INPUT:\n# path: path to train.csv or test.csv\n# label_path: (optional) path to trainLabels.csv\n# YIELDS:\n# ID: id of the instance (can also acts as instance count)\n# x: a list of indices that its value is 1\n# y: (if label_path is present) label value of y1 to y33\nhash_cols = [35,65,61,62,91,92,142,3,4,61,34,91,94,95]\nhh=len(hash_cols)\nhh=hh*(hh-1)/2+1\ndef data(path, label_path=None):\n for t, line in enumerate(open(path)):\n # initialize our generator\n \n #hash_cols = [61,62,91,92,142,3,4,34,35,61,64,65,91,94,95]\n if t == 0:\n # create a static x,\n # so we don't have to construct a new x for every instance\n \n x = [0] * (146+hh+32)\n if label_path:\n label = open(label_path)\n label.readline() # we don't need the headers\n continue\n # parse x\n for m, feat in enumerate(line.rstrip().split(',')):\n if m == 0:\n ID = int(feat)\n elif m in [23,22,113,114,53,54,138,139]:\n x[m] =-10\n else:\n # one-hot encode everything with hash trick\n # categorical: one-hotted\n # boolean: ONE-HOTTED\n # numerical: ONE-HOTTED!\n # note, the build in hash(), although fast is not stable,\n # i.e., same value won't always have the same hash\n # on different machines\n x[m] = abs(hash(str(m) + '_' + feat)) % D\n # parse y, if provided\n row=line.rstrip().split(',')\n \n t = 146\n for i in xrange(len(hash_cols)):\n for j in xrange(i+1,len(hash_cols)):\n t += 1\n x[t] = abs(hash(str(i)+'_'+str(j)+'_'+row[hash_cols[i]]+\"_x_\"+row[hash_cols[j]])) % D\n #print t #t=145+hh\n #assert(false)\n \n \n \n if label_path:\n # use float() to prevent future type casting, [1:] to ignore id\n y = [float(y) for y in label.readline().split(',')[1:]]\n yield (ID, x, y) if label_path else (ID, x)\n\n\n# B. Bounded logloss\n# INPUT:\n# p: our prediction\n# y: real answer\n# OUTPUT\n# bounded logarithmic loss of p given y\ndef logloss(p, y):\n p = max(min(p, 1. - 10e-15), 10e-15)\n return -log(p) if y == 1. else -log(1. - p)\n\n\n# C. Get probability estimation on x\n# INPUT:\n# x: features\n# w: weights\n# OUTPUT:\n# probability of p(y = 1 | x; w)\ndef predict2(x, w):\n wTx = 0.\n for i in x[:146+hh]: # do wTx\n if i <0:\n continue\n wTx += w[i] * 1.\n for c,i in enumerate(x[146+hh:]): # do wTx\n \n wTx += w[D+c] * i # w[i] * x[i], but if i in x we got x[i] = 1.\n return 1. / (1. + exp(-max(min(wTx, 20.), -20.))) # bounded sigmoid\ndef predict(x, w):\n wTx = 0.\n for i in x[:146+hh]: # do wTx\n if i <0:\n continue\n wTx += w[i] * 1. # w[i] * x[i], but if i in x we got x[i] = 1.\n \n return 1. / (1. + exp(-max(min(wTx, 20.), -20.))) # bounded sigmoid\n\n\n# D. Update given model\n# INPUT:\n# alpha: learning rate\n# w: weights\n# n: sum of previous absolute gradients for a given feature\n# this is used for adaptive learning rate\n# x: feature, a list of indices\n# p: prediction of our model\n# y: answer\n# MODIFIES:\n# w: weights\n# n: sum of past absolute gradients\ndef update(alpha, w, n, x, p, y):\n for i in x[:146+hh]:\n if i <0:\n continue\n # alpha / sqrt(n) is the adaptive learning rate\n # (p - y) * x[i] is the current gradient\n # note that in our case, if i in x then x[i] = 1.\n n[i] += abs(p - y)\n w[i] -= (p - y) * 1. * alpha / sqrt(n[i])\n \ndef update2(alpha, w, n, x, p, y):\n \n for c,i in enumerate(x[146+hh:]):\n # alpha / sqrt(n) is the adaptive learning rate\n # (p - y) * x[i] is the current gradient\n # note that in our case, if i in x then x[i] = 1.\n n[D+c] += abs(p - y)\n w[D+c] -= ((p - y) * i * alpha/10 +0*w[D+c]) # /2 is good!\n\n\n# training and testing #######################################################\nstart = datetime.now()\n\n# a list for range(0, 33) - 13, no need to learn y14 since it is always 0\nK = [k for k in range(33) if k != 13]\n\n# initialize our model, all 32 of them, again ignoring y14\nw = [[0.] * (D+32) if k != 13 else None for k in range(33)]\nn = [[0.] * (D+32) if k != 13 else None for k in range(33)]\n\nloss = 0.\nloss2 = 0.\nloss_y14 = log(1. - 10**-15)\n\nfor ID, x, y in data(train, label):\n\n # get predictions and train on all labels\n P=[]\n for k in K:\n p = predict(x, w[k])\n P.append(p)\n # update(alpha, w[k], n[k], x, p, y[k])\n if k<13:\n x[146+hh+k]=p\n else:\n x[145+hh+k]=p\n loss += logloss(p, y[k]) \n for k,p in zip(K,P):\n p2 = predict2(x, w[k])\n update(alpha, w[k], n[k], x, p2, y[k])\n update2(alpha, w[k], n[k], x, p2, y[k])\n loss2 += logloss(p2, y[k]) # for progressive validation\n loss += loss_y14 # the loss of y14, logloss is never zero\n loss2 += loss_y14\n # print out progress, so that we know everything is working\n if ID % 100000 == 0:\n \n print('%s encountered: %d current logloss: %f logloss2: %f' % (\n datetime.now(), ID, (loss/33.)/ID,(loss2/33.)/ID))\n # break\n\nwith open(sub_dir+'./submissiontk19.csv', 'w') as outfile:\n outfile.write('id_label,pred\\n')\n \n for ID, x in data(test):\n for k in K:\n p = predict(x, w[k])\n if k<13:\n x[146+hh+k]=p\n else:\n x[145+hh+k]=p\n for k in K:\n p = predict2(x, w[k])\n outfile.write('%s_y%d,%s\\n' % (ID, k+1, str(p)))\n if k == 12:\n outfile.write('%s_y14,0.0\\n' % ID)\n#pickle.dump( w, open( \"tk16-weights.p\", \"wb\" ) )\nprint('Done, elapsed time: %s' % str(datetime.now() - start))\n","sub_path":"data/external/repositories/120243/tradeshift-text-classification-master/src/online-model/tk19_solution.py","file_name":"tk19_solution.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308283184","text":"def list_of_words(f):\n \"\"\"\n INPUT: file\n OUTPUT: list of words\n\n Create a list of all the unique words in the text file given.\n \"\"\"\n\n words = set() # Use a set instead of a list if you need to check\n # membership in it!\n for line in f:\n for word in line.strip().split():\n if word not in words:\n words.add(word)\n return list(words)\n\n\ndef find_new_words(f, word_dict):\n \"\"\"\n INPUT: file, dictionary\n OUTPUT: list\n\n Given a text file and a dictionary whose keys are words, return a list\n of the words in the file which are not in the dictionary.\n \"\"\"\n\n words = []\n for line in f:\n for word in line.strip().split():\n if word not in word_dict: # using the keys method converts your\n # dictionary to a list, making it take\n # much longer to check membership\n words.append(word)\n return words\n\n\ndef get_average_score(f, word_dict):\n \"\"\"\n INPUT: file, dictionary\n OUTPUT: float\n\n Given a text file and a dictionary whose keys are words and values are a\n score for the word, return the average score of all the words in the\n document. You should assume that missing words have a score of 1.\n \"\"\"\n\n score = 0\n count = 0\n for line in f:\n for word in line.strip().split():\n # Don't use try excepts if it can be avoided!\n score += word_dict.get(word, 1)\n count += 1\n return float(score) / count\n\n\ndef find_high_valued_words(word_dict, value):\n \"\"\"\n INPUT: dict, float\n OUTPUT: list\n\n Return the items from word_dict whose values are larger than value.\n \"\"\"\n\n # iteritems is a generator so will be more efficient than items, which\n # returns a list.\n return [key for key, val in word_dict.iteritems() if val > value]\n","sub_path":"1-software-engineering-and-eda/1-python-intro/efficiency.py","file_name":"efficiency.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115353854","text":"S = list(input())\nS.sort()\nans = True\nfor i in range(len(S)-1):\n if S[i] == S[i + 1]:\n ans = False\n break\nif ans:\n print(\"yes\")\nelse:\n print(\"no\")","sub_path":"Python_codes/p03698/s419484452.py","file_name":"s419484452.py","file_ext":"py","file_size_in_byte":155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"239465188","text":"import scrapy\nimport re\n\n# this spider downloads data about posts in specific threads\n\nclass PostsSpider(scrapy.Spider):\n name = \"posts\"\n\n start_urls = []\n\n baseurl = 'https://www.margonem.pl/?task=forum&show=posts&id=7606&ps='\n\n for i in range(0,1000):\n start_urls.append(baseurl+str(i))\n\n def parse(self, response):\n count_name = 0\n count_rep = 0\n for i in range(50):\n\n id = response.xpath('//td[contains(@class, \"puser\")]/a/@name')[i].get()\n id = id[4:]\n\n nick = response.xpath('//td[contains(@class, \"puser\")]/text()[2]')[i].get()\n if nick != \"KONTO USUNIĘTE\":\n nick = response.xpath('//div[@class=\"nickwood\"]/h3/text()')[count_name].get()\n count_name+=1\n\n rep = response.xpath('//div[@class=\"repgive\"]')[i].get()\n if len(rep) > 50:\n rep = response.xpath('//div[@class=\"repgive\"]/span[@ctip=\"rep\"]')[count_rep].get()\n rep = rep.split(' ')[-1]\n rep = rep.split('\\t')[0]\n count_rep += 1\n else:\n rep = \"0\"\n\n date = response.xpath('//td[contains(@class, \"postid\")]/text()')[i].get()\n date = date[2:-9]\n\n yield{\n 'id': int(id), \n 'date': date,\n #'nick': nick,\n #'rep': int(rep),\n }\n\n","sub_path":"tutorial/spiders/thread_scraper.py","file_name":"thread_scraper.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"517764032","text":"#ayy lmo\nimport time\nimport sys\n\n\ncalls = [['calls', 'str(calls)'],['help', 'help()'], ['error']]\n\ndef help():\n\tcommands = ''\n\tfor x in calls:\n\t\tcommands += x[0]+'\\n'\n\treturn commands[0:-1]\nclass Item(object):\n\t\"\"\"docstring for Item\"\"\"\n\tdef __init__(self, name):\n\t\tsuper(Item, self).__init__()\n\t\tself.name = name\n\t\tself.requirements = []\n\nclass Inventory(object):\n\t\"\"\"docstring for Inventory\"\"\"\n\tdef __init__(self, itemList):\n\t\tsuper(Inventory, self).__init__()\n\t\tself.itemList = itemList\n\tdef add(self, items):\n\t\tfor x in items:\n\t\t\tself.itemList.append(x)\n\tdef remove(self, items):\n\t\tfor x in items:\n\t\t\tself.itemList.remove(x)\n\tdef contents(self):\n\t\tcontents = ''\n\t\tfor x in self.itemList:\n\t\t\tcontents += x.name + ', '\n\t\treturn str(contents[0:-2])\ncalls.append(['inventory', 'contents', 'inv', 'Player.inventory.contents()'])\ncalls.append(['take', 'pickup', 'Player.move(\\'$\\')'])\n\nclass Area(object):\n\t\"\"\"docstring for area\"\"\"\n\tdef __init__(self, name, inventory):\n\t\tsuper(Area, self).__init__()\n\t\tself.name = name\n\t\tself.inventory = Inventory(inventory)\n\tdef pos(self):\n\t\treturn(self.name +'\\n' + self.inventory.contents())\ncalls.append(['look', 'area', 'pos', 'position', 'where', 'location', 'Player.location.pos()'])\ncalls.append(['enter', 'move', 'go', 'Player.enter(\\'$\\')'])\n\nclass Character(object):\n\t\"\"\"docstring for Character\"\"\"\n\tdef __init__(self, name):\n\t\tsuper(Character, self).__init__()\n\t\tself.name = name\n\t\tself.inventory = Inventory([])\n\t\tself.location = Area('', [])\n\tdef enter(self, area):\n\t\tself.location = area\n\tdef move(self, item):\n\t\tmatches = []\n\t\tfor loot in self.location.inventory.itemList:\n\t\t\tif item.lower() in loot.name.lower():\n\t\t\t\tmatches.append(loot)\n\t\tif len(matches)==1:\n\t\t\tself.inventory.add([matches[0]])\n\t\t\tself.location.inventory.remove([matches[0]])\n\t\t\treturn matches[0].name+\" Taken.\"\n\t\telif len(matches)>1:\n\t\t\treturn \"Too many matches to '\"+item+\"'\"\n\t\telif len(matches)==0:\n\t\t\treturn \"'\"+item+\"' not found\"\n\ndef wait(time_lapse):\n\t\"\"\"Alternative to using sleep() by Daniel Gibbs\"\"\"\n\ttime_start = time.time()\n\ttime_end = (time_start + time_lapse)\n \n\twhile time_end > time.time():\n\t\tpass\n\ndef gprint(s, t=0):\n\t\"\"\"Gradual Printing.\"\"\"\n\tif t == 0:\n\t\tt = len(s)/1337\n\t\tif t*len(s)>2: t = 1.5/len(s)\n\tfor c in s:\n\t\twait(t)\n\t\tsys.stdout.write(c)\n\t\tsys.stdout.flush() \n\tprint('')\n\n\ndef main():\t\n\terror = 'No errors'\n\twhile 1:\n\t\tuserInput = input('>')\n\t\tif userInput == 'quit':\n\t\t\tbreak\n\n\t\tuserInput = userInput.split(' ', 1)\n\t\tresponse = response = \"'\"+ userInput[0] +\"' is not a recognized command.\"\n\n\t\tfor call in calls:\n\t\t\tfor ID in call:\n\t\t\t\tif ID == userInput[0]:\n\t\t\t\t\tcommand = call[-1]\n\t\t\t\t\ttry: command = command.replace('$', str(userInput[1]))\n\t\t\t\t\texcept: userInput.append('')\n\n\t\t\t\t\ttry: response = eval(command)\n\t\t\t\t\texcept Exception as e: \n\t\t\t\t\t\tresponse =\"'\"+ userInput[1] +\"' not found.\"\n\t\t\t\t\t\terror = \"Error at (\"+command+\"), {\"+str(e)+\"}\"\n\t\t\t\t\tbreak\n\t\tgprint('\\n'+ str(response))\n\n\t\tif BlueChair in Player.inventory.itemList:\n\t\t\tgprint(\"You Win!\", 0.2)\n\t\t\tbreak\n\n\nPlayer = Character(\"Player\")\nBlueChair = Item(\"Blue Chair\")\nSpoon = Item(\"Rusty Spoon\")\nWoodenChair = Item(\"Wooden Chair\")\nCupcake = Item(\"Poisonous liar cupcake\")\nBasement = Area('Basement', [BlueChair, WoodenChair, Cupcake])\n\nPlayer.inventory.add([Spoon])\nPlayer.enter(Basement)\n\ngprint(\"WELCOME TO BLUECHAIRS!\", 0.07)\nmain()","sub_path":"HowToPickUpBlueChairs.py","file_name":"HowToPickUpBlueChairs.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"280644950","text":"from rest_framework import generics, permissions\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import get_object_or_404\nfrom . import serializers\nfrom . import models\n\n\"\"\"\nRecipe Views.\n\nRecipes can only be updated or deleted through the `RecipeDetailUser`\nview which is restricted to the current authenticated user. This prevents\nunauthorised modifications.\n\"\"\"\n\n\ndef filter_by_user(user_id):\n \"\"\"\n Return a queryset of recipes belonging to the supplied user by\n navigating the reverse relationship.\n\n Returns 404 response if the user doesn't exist.\n \"\"\"\n user_model = get_user_model()\n user = get_object_or_404(user_model, pk=user_id)\n return user.recipes\n\n\nclass RecipeCreate(generics.CreateAPIView):\n \"\"\"\n POST - Create a new recipe entry from the request data.\n\n Restricted to the current logged-in user, who is set as the recipe owner.\n \"\"\"\n queryset = models.Recipe.objects.all()\n serializer_class = serializers.RecipeSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def perform_create(self, serializer):\n \"\"\"\n Get the current user from the request and set as the `user` field on\n the model instance.\n \"\"\"\n serializer.save(user=self.request.user)\n\n\nclass RecipeListPublic(generics.ListAPIView):\n \"\"\"\n GET - Return all user created recipes that are publicly shared.\n \"\"\"\n queryset = models.Recipe.objects.filter(is_public=True)\n serializer_class = serializers.ListRecipeSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\n\nclass RecipeDetailPublic(generics.RetrieveAPIView):\n \"\"\"\n GET - Return the detail of a public recipe by its ID.\n \"\"\"\n queryset = models.Recipe.objects.filter(is_public=True)\n serializer_class = serializers.RecipeSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,)\n\n\nclass RecipeListUser(generics.ListAPIView):\n \"\"\"\n GET - Return all recipes for the user.\n \"\"\"\n serializer_class = serializers.ListRecipeSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def get_queryset(self):\n return filter_by_user(self.kwargs['user'])\n\n\nclass RecipeDetailUser(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n GET, PUT, DELETE\n\n Retrieve, update or delete a recipe by its ID. Restricted to\n recipes for the currently logged-in user.\n \"\"\"\n serializer_class = serializers.RecipeSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def get_queryset(self):\n return filter_by_user(self.kwargs['user'])\n","sub_path":"recipe-api/recipe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448215921","text":"def dynamic(a):\n b = [] #结果\n length = len(a)\n for i in range(length):\n c = []\n sum1 = 0\n for j in range(i):\n c.append(sum1)\n for j in range(i, length):\n sum1 += a[j]\n c.append(sum1)\n b.append(c)\n for i in range(length-1): #求每位最大序列和\n for j in range(i+1, length):\n b[i][j] = max(b[i][j], b[i][j-1])\n d = [b[0][0]] #0--i-1最大序列和\n e = [b[-1][-1]] #i--n-1最大序列和\n for j in range(1, length):\n key = b[0][j]\n for i in range(1, j+1):\n if key < b[i][j]:\n key = b[i][j]\n d.append(key)\n for j in range(1, length):\n key = b[-1][-1]\n for i in range(-j-1, -1):\n if key < b[i][-1]:\n key = b[i][-1]\n e.append(key)\n ans = [e[-1]] #n个元素从左到右的分界线为下标\n for i in range(1, length):\n ans.append(d[i-1]+e[-i-1])\n ans.append(d[-1])\n key = ans[0]\n for i in range(1, length+1):\n if key < ans[i]:\n key = ans[i]\n return key\n","sub_path":"IntroductionWork/Dynamic.py","file_name":"Dynamic.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270250079","text":"\"\"\"Process application events.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport logging\nimport os\nimport time\n\nimport kazoo.client\nimport six\n\nfrom treadmill import fs\nfrom treadmill import dirwatch\nfrom treadmill import sysinfo\nfrom treadmill import utils\nfrom treadmill import yamlwrapper as yaml\nfrom treadmill import zknamespace as z\nfrom treadmill import zkutils\n\n\n_LOGGER = logging.getLogger(__name__)\n\n_SERVERS_ACL = zkutils.make_role_acl('servers', 'rwcda')\n\n_HOSTNAME = sysinfo.hostname()\n\n\ndef _publish_zk(zkclient, when, instanceid, event_type, event_data, payload):\n \"\"\"Publish application event to ZK.\n \"\"\"\n eventnode = '%s,%s,%s,%s' % (when, _HOSTNAME, event_type, event_data)\n _LOGGER.debug('Creating %s', z.path.trace(instanceid, eventnode))\n try:\n zkutils.with_retry(\n zkutils.create,\n zkclient,\n z.path.trace(instanceid, eventnode),\n payload,\n acl=[_SERVERS_ACL]\n )\n except kazoo.client.NodeExistsError:\n pass\n\n if event_type in ['aborted', 'killed', 'finished']:\n # For terminal state, update the finished node with exit summary.\n zkutils.with_retry(\n zkutils.put,\n zkclient,\n z.path.finished(instanceid),\n {'state': event_type,\n 'when': when,\n 'host': _HOSTNAME,\n 'data': event_data},\n acl=[_SERVERS_ACL],\n )\n\n _unschedule(zkclient, instanceid)\n\n\ndef _unschedule(zkclient, instanceid):\n \"\"\"Safely delete scheduled node.\"\"\"\n scheduled_node = z.path.scheduled(instanceid)\n\n # Check placement node. Only delete scheduled app if it is currently\n # placed on the server.\n #\n # If we are processing stale events, app can be placed elsewhere, and in\n # this case this server does not own placement and should not delete\n # scheduled node.\n placement_node = z.path.placement(_HOSTNAME, instanceid)\n\n if zkclient.exists(placement_node):\n _LOGGER.info('Unscheduling: %s', scheduled_node)\n zkutils.with_retry(\n zkutils.ensure_deleted, zkclient,\n scheduled_node\n )\n else:\n _LOGGER.info('Stale event, placement does not exist: %s',\n placement_node)\n\n\ndef post_zk(zkclient, event):\n \"\"\"Post and publish application event directly to ZK.\n\n Can be used if event directory is unknown (i.e. master/scheduler \"API\").\n \"\"\"\n _LOGGER.debug('post_zk: %r', event)\n\n (\n _ts,\n _src,\n instanceid,\n event_type,\n event_data,\n payload\n ) = event.to_data()\n _publish_zk(\n zkclient, str(time.time()), instanceid, event_type, event_data, payload\n )\n\n\ndef post(events_dir, event):\n \"\"\"Post application event to event directory.\n \"\"\"\n _LOGGER.debug('post: %s: %r', events_dir, event)\n\n (\n _ts,\n _src,\n instanceid,\n event_type,\n event_data,\n payload\n ) = event.to_data()\n filename = '%s,%s,%s,%s' % (\n time.time(),\n instanceid,\n event_type,\n event_data\n )\n\n def _write_temp(temp):\n if payload is None:\n pass\n elif isinstance(payload, six.string_types):\n temp.write(payload)\n else:\n yaml.dump(payload, stream=temp)\n\n fs.write_safe(\n os.path.join(events_dir, filename),\n _write_temp,\n prefix='.tmp',\n mode='w',\n permission=0o644\n )\n\n\nclass AppEventsWatcher(object):\n \"\"\"Publish app events from the queue.\"\"\"\n\n def __init__(self, zkclient, events_dir):\n self.zkclient = zkclient\n self.events_dir = events_dir\n\n def run(self):\n \"\"\"Monitores events directory and publish events.\"\"\"\n\n watch = dirwatch.DirWatcher(self.events_dir)\n watch.on_created = self._on_created\n\n for eventfile in os.listdir(self.events_dir):\n filename = os.path.join(self.events_dir, eventfile)\n self._on_created(filename)\n\n while True:\n if watch.wait_for_events(60):\n watch.process_events()\n\n @utils.exit_on_unhandled\n def _on_created(self, path):\n \"\"\"This is the handler function when new files are seen\"\"\"\n if not os.path.exists(path):\n return\n\n localpath = os.path.basename(path)\n if localpath.startswith('.'):\n return\n\n _LOGGER.info('New event file - %r', path)\n\n when, instanceid, event_type, event_data = localpath.split(',', 4)\n with io.open(path) as f:\n payload = f.read()\n _publish_zk(\n self.zkclient, when, instanceid, event_type, event_data, payload\n )\n os.unlink(path)\n","sub_path":"lib/python/treadmill/appevents.py","file_name":"appevents.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307583720","text":"import sys\nimport requests\nimport bs4\nimport webbrowser\nimport pprint\nimport pyperclip as clipboard\n\nprint('Searching...')\n\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'}\nurl = 'https://www.google.com/search?q='\n\nif len(sys.argv) > 1:\n keyword = ' '.join(sys.argv[1:])\nelse:\n keyword = clipboard.paste()\n\nquery = url + keyword\nres = requests.get(query, headers= headers)\n\nparsed = bs4.BeautifulSoup(res.text, 'html.parser')\ng_link = parsed.select('.r a')\nlinks_filtered = [link.get('href') for link in g_link if '#' not in str(link) if 'webcache' not in str(link)]\n\nfor link in links_filtered:\n webbrowser.open_new_tab(link)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"188164813","text":"#! @PYTHON@\n#\n# $Id$\n#\n# @Copyright@\n# \t\t\t\tRocks(r)\n# \t\t www.rocksclusters.org\n# \t\t version 5.4 (Maverick)\n# \n# Copyright (c) 2000 - 2010 The Regents of the University of California.\n# All rights reserved.\t\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n# \n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright\n# notice unmodified and in its entirety, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided \n# with the distribution.\n# \n# 3. All advertising and press materials, printed or electronic, mentioning\n# features or use of this software must display the following acknowledgement: \n# \n# \t\"This product includes software developed by the Rocks(r)\n# \tCluster Group at the San Diego Supercomputer Center at the\n# \tUniversity of California, San Diego and its contributors.\"\n# \n# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,\n# neither the name or logo of this software nor the names of its\n# authors may be used to endorse or promote products derived from this\n# software without specific prior written permission. The name of the\n# software includes the following terms, and any derivatives thereof:\n# \"Rocks\", \"Rocks Clusters\", and \"Avalanche Installer\". For licensing of \n# the associated name, interested parties should contact Technology \n# Transfer & Intellectual Property Services, University of California, \n# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910, \n# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu\n# \n# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\n# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\n# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# @Copyright@\n#\n# $Log$\n# Revision 1.19 2010/09/07 23:53:07 bruno\n# star power for gb\n#\n# Revision 1.18 2009/05/01 19:07:07 mjk\n# chimi con queso\n#\n# Revision 1.17 2009/04/27 18:03:33 bruno\n# remove dead setRCS* and getRCS* functions\n#\n# Revision 1.16 2009/01/08 01:20:58 bruno\n# for anoop\n#\n# Revision 1.15 2008/10/18 00:56:01 mjk\n# copyright 5.1\n#\n# Revision 1.14 2008/08/20 23:41:22 bruno\n# 'lan' is no longer part of the distro path\n#\n# Revision 1.13 2008/03/06 23:41:43 mjk\n# copyright storm on\n#\n# Revision 1.12 2008/01/04 21:40:53 bruno\n# closer to V\n#\n# Revision 1.11 2007/06/23 04:03:23 mjk\n# mars hill copyright\n#\n# Revision 1.10 2006/09/11 22:47:16 mjk\n# monkey face copyright\n#\n# Revision 1.9 2006/08/10 00:09:37 mjk\n# 4.2 copyright\n#\n# Revision 1.8 2006/01/16 06:48:58 mjk\n# fix python path for source built foundation python\n#\n# Revision 1.7 2005/10/12 18:08:39 mjk\n# final copyright for 4.1\n#\n# Revision 1.6 2005/09/16 01:02:19 mjk\n# updated copyright\n#\n# Revision 1.5 2005/07/11 23:51:35 mjk\n# use rocks version of python\n#\n# Revision 1.4 2005/06/01 20:33:00 mjk\n# fixed for lastest pylib, still a bad idea\n#\n# Revision 1.3 2005/05/24 21:21:54 mjk\n# update copyright, release is not any closer\n#\n# Revision 1.2 2005/04/06 17:54:13 mjk\n# fix extra newlines (bug reported by platform\n#\n# Revision 1.1 2005/03/01 02:02:48 mjk\n# moved from core to base\n#\n# Revision 1.4 2005/02/11 23:38:16 mjk\n# - blow up the bridge\n# - kgen and kroll do actually work (but kroll is not complete)\n# - file,roll attrs added to all tags by kpp\n# - gen has generator,nodefilter base classes\n# - replaced rcs ci/co code with new stuff\n# - very close to adding rolls on the fly\n#\n# Revision 1.3 2005/02/02 00:58:08 mjk\n# DOM\n#\n# Revision 1.2 2005/02/02 00:16:56 mjk\n# *** empty log message ***\n#\n# Revision 1.1 2005/02/01 01:49:21 mjk\n# KRoll\n#\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport string\nimport socket\nimport syslog\nimport stack.kickstart\nimport stack.gen\nimport re\nimport time\nfrom xml.dom\t\t\timport ext\nfrom xml.dom.ext.reader\t\timport Sax2\nfrom xml.sax._exceptions\timport SAXParseException\n\nclass NodeFilter(stack.gen.NodeFilter):\n\n\tdef __init__(self, arch, rolls):\n\t\tself.os = os.uname()[0].lower()\n\t\tstack.gen.NodeFilter.__init__(self, arch, self.os)\n\t\tself.rolls = rolls\n\t\t\n\tdef isFromRolls(self, node):\n\t\ttry:\n\t\t\troll = node.attributes.getNamedItem((None, 'roll'))\n\t\t\tif roll.value in self.rolls:\n\t\t\t\treturn 1\n\t\texcept AttributeError:\n\t\t\tpass\n\t\treturn 0\n\t\t\n\tdef acceptNode(self, node):\n\t\tif node.nodeName == 'kickstart':\n\t\t\treturn self.FILTER_ACCEPT\n\t\t\t\n\t\tif not self.isCorrectCond(node):\n\t\t\treturn self.FILTER_SKIP\n\n\t\tif not self.isFromRolls(node):\n\t\t\treturn self.FILTER_SKIP\n\n\t\ttags = [ 'description', 'package', 'post' ]\n\t\tif node.nodeName not in tags:\n\t\t\treturn self.FILTER_SKIP\t\t\t\n\n\t\treturn self.FILTER_ACCEPT\n\n\nclass Generator(stack.gen.Generator):\n\n\tdef __init__(self):\n\t\tstack.gen.Generator.__init__(self)\n\t\tself.ks\t\t\t= {}\n\t\tself.ks['order']\t= []\n\t\tself.ks['rpms']\t\t= []\n\t\tself.ks['post']\t\t= []\n\t\tself.rolls\t\t= []\n\t\tself.rpms\t\t= None\n\t\t\n\tdef setRolls(self, rolls):\n\t\tself.rolls = rolls\n\t\t\n\tdef getRolls(self):\n\t\treturn self.rolls\n\t\t\n\tdef setRPMS(self, rpms):\n\t\tself.rpms = rpms\n\t\t\n\tdef getRPMS(self):\n\t\treturn self.rpms\n\t\t\n\t##\n\t## Parsing Section\n\t##\n\t\n\tdef parse(self, doc):\n\t\tfilter = NodeFilter(self.getArch(), self.getRolls())\n\t\titer = doc.createTreeWalker(doc, filter.SHOW_ELEMENT,\n\t\t\tfilter, 0)\n\t\tnode = iter.nextNode()\n\t\twhile node:\n\t\t\tif node.nodeName != 'kickstart':\n\t\t\t\teval('self.handle_%s(node)' % node.nodeName)\n\t\t\tnode = iter.nextNode()\n\t\t\t\n\t# \n\t\t\t\t\n\tdef handle_description(self, node):\n\t\tattr = node.attributes\n\t\tfile = attr.getNamedItem((None, 'file'))\n\t\tself.ks['order'].append(file.value)\n\t\t\n\t# \n\t\t\n\tdef handle_package(self, node):\n\n\t\t# ignore disabled packages\n\t\t\n\t\tif self.isDisabled(node):\n\t\t\treturn\n\t\t\t\n\t\t# we don't support meta packages, so ignore them\n\t\t\n\t\tif self.isMeta(node):\n\t\t\treturn\n\t\t\n\t\tbasename = string.strip(self.getChildText(node))\n\t\tfor rpm in self.rpms:\n\t\t\tif rpm.getBaseName() == basename:\n\t\t\t\tself.ks['rpms'].append(rpm.getFullName())\n\n\t# \n\t\n\tdef handle_post(self, node):\n\t\tattr = node.attributes\n\t\tif attr.getNamedItem((None, 'arg')):\n\t\t\targ = attr.getNamedItem((None, 'arg')).value\n\t\telse:\n\t\t\targ = ''\n\t\tlist = []\n\t\tlist.append(arg)\n\t\tlist.append(self.getChildText(node))\n\t\tself.ks['post'].append(list)\n\t\t\n\t# <*>\n\t#\t<*> - tags that can go inside any other tags\n\t# \n\n\tdef getChildText(self, node):\n\t\ttext = ''\n\t\tfor child in node.childNodes:\n\t\t\tif child.nodeType == child.TEXT_NODE:\n\t\t\t\ttext += child.nodeValue\n\t\t\telif child.nodeType == child.ELEMENT_NODE:\n\t\t\t\ttext += eval('self.handle_child_%s(child)' \\\n\t\t\t\t\t% (child.nodeName))\n\t\treturn text\n\n\t\n\t# <*>\n\t#\t\n\t# \n\n\tdef handle_child_file(self, node):\n\t\treturn self.parseFile(node)\n\t\n\t\t\n\n\t##\n\t## Generator Section\n\t##\n\t\t\t\n\tdef generate(self, section):\n\t\t\"\"\"Dump the requested section of the kickstart file. If none \n\t\texists do nothing.\"\"\"\n\t\ttry:\n\t\t\tf = getattr(self, \"generate_%s\" % section)\n\t\t\tf()\n\t\texcept:\n\t\t\tpass\n\t\t\n\tdef generate_order(self):\n\t\tprint('#')\n\t\tprint('# Node Traversal Order')\n\t\tprint('#')\n\t\tfor line in self.ks['order']:\n\t\t\tprint('#', line)\n\t\tprint('#')\n\t\tprint()\n\n\tdef generate_packages(self):\n\t\tprint('#')\n\t\tprint('# Kickstart Packages Section')\n\t\tprint('#')\n\t\tfor rpm in self.ks['rpms']:\n\t\t\tprint('rpm -Uvh --force --nodeps ', rpm)\n\t\tprint()\n\n\tdef generate_post(self):\n\t\tprint('#')\n\t\tprint('# Kickstart Post Section')\n\t\tprint('#')\n\t\tfor list in self.ks['post']:\n\t\t\tprint('#', list[0])\n\t\t\tprint(string.join(list[1:], '\\n'))\n\t\tprint()\n\t\t\t\n\n\nclass App(stack.kickstart.Application):\n\n\tdef __init__(self, argv):\n\t\tstack.kickstart.Application.__init__(self, argv)\n\t\tself.usage_name\t\t= 'Kickstart Roll'\n\t\tself.usage_version\t= '@VERSION@'\n\t\tself.generator\t\t= Generator()\n\n\tdef run(self):\n\t\trolls = self.args\n\t\tif not rolls:\n\t\t\tself.usage()\n\t\t\tsys.exit(-1)\n\n\t\tself.connect()\n\t\t\n\t\ttry:\n\t\t\thostname = string.split(socket.gethostname(), '.')[0]\n\t\t\thostaddr = socket.gethostbyname(hostname)\n\t\texcept:\n\t\t\tprint('error - cannot determine hostname')\n\t\t\tsys.exit(-1)\n\n\t\ttry:\n\t\t\tself.execute('select '\n\t\t\t\t'appliances.graph, '\n\t\t\t\t'appliances.node, '\n\t\t\t\t'distributions.name '\n\t\t\t\t'from nodes, memberships, appliances, '\n\t\t\t\t'distributions where nodes.name=\"%s\" and '\n\t\t\t\t'nodes.membership=memberships.id and '\n\t\t\t\t'memberships.appliance=appliances.id and '\n\t\t\t\t'memberships.distribution=distributions.id'\n\t\t\t\t% hostname)\n\t\t\tgraph, node, dist = self.fetchone()\n\t\texcept:\n\t\t\tprint('error = cannot find host in database')\n\t\t\tsys.exit(-1)\n\t\t\n\t\tself.dist.setDist(os.path.join(dist))\n\t\tself.dist.setArch(self.arch)\n\t\tself.dist.build()\n\t\tdistroot = self.dist.getReleasePath()\n\t\tbuildroot = os.path.join(distroot, 'build')\n\n\t\tkpp = 'kpp --graph=%s --client=%s --client-ip=%s ' \\\n\t\t\t'--arch=%s --distribution=%s %s' % (\n\t\t\tgraph, hostname, hostaddr, self.arch, \n\t\t\tself.dist.getReleasePath(), node)\n\n\t\ttry:\n\t\t\tos.chdir(buildroot)\n\t\texcept:\n\t\t\tprint('error - cannot find distribution (%s)' % \\\n\t\t\t\tbuildroot)\n\t\t\tsys.exit(-1)\n\n\t\ttext = []\n\t\tfor line in os.popen(kpp).readlines():\n\t\t\ttext.append(line[:-1])\n\t\treader = Sax2.Reader()\n\t\tdoc = reader.fromString(string.join(text, '\\n'))\n\t\t\t\t\n\t\tself.generator.setArch(self.arch)\n\t\tself.generator.setRolls(rolls)\n\t\tself.generator.setRPMS(self.dist.getRPMS())\n\t\tself.generator.parse(doc)\n\n\t\tprint('#!/bin/sh')\t\t\n\t\tprint('#')\n\t\tprint('# %s version %s' % (self.usage_name, self.usage_version))\n\t\tprint('#')\n\n\t\tfor s in [ 'order', 'packages', 'post' ]:\n\t\t\tself.generator.generate(s)\n\n\n\napp = App(sys.argv)\napp.parseArgs()\napp.run()\n\n","sub_path":"src/stack/kickstart/kroll.py","file_name":"kroll.py","file_ext":"py","file_size_in_byte":10270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"484707032","text":"import os\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nNAME = 'zope2makesite'\n\nVERSION = '0.1'\n\nsetup(name='Products.%s' % NAME,\n version=VERSION,\n description='Makes the zope2-root a site (providing a componentregistry).',\n author=\"Daniel Havlik\",\n author_email=\"dh@gocept.com\",\n license=\"ZPL 2.1 (http://www.zope.org/Resources/License/ZPL-2.1)\",\n packages=find_packages(),\n include_package_data=True,\n namespace_packages=['Products'],\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'five.localsitemanager',\n ],\n entry_points=\"\"\"\n [zope2.initialize]\n Products.%s = Products.%s:initialize\n \"\"\" % (NAME, NAME),\n )\n\n\n","sub_path":"pypi_install_script/Products.zope2makesite-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60823742","text":"# Time: O(logn)\n# Space: O(1)\n#\n# 70\n# You are climbing a stair case. It takes n steps to reach to the top.\n#\n# Each time you can either climb 1 or 2 steps.\n# In how many distinct ways can you climb to the top?\n\nimport itertools\n\n# Use MATRIX MULTIPLICATION to obtain the nth Fibonacci Number. The matrix takes the following form:\n# Q = | 1 1 |\n# | 1 0 |\n#\n# Q^2 = | 2 1 | Q^3 = | 3 2 |\n# | 1 1 | | 2 1 |\n# As per the method, the nth Fibonacci Number is given by Q^(n-1)[0,0].\n#\n# Let's look at the proof of this method.\n# Assume F_n = Q^(n-1)[0,0] where\n# Q^(n-1) = | F_n F_n-1 |\n# | F_n-1 F_n-2 |\n# Then\n# Q^n = | F_n F_n-1 | * | 1 1 | = | F_n + F_n-1 F_n | = | F_n+1 F_n |\n# | F_n-1 F_n-2 | | 1 0 | | F_n-1 + F_n-2 F_n-1 | | F_n F_n-1 |\n# Thus F_n+1 = Q^n[0,0]\n\nclass Solution(object):\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n def matrix_expo(A, K):\n N = len(A)\n result = [[int(i==j) for j in range(N)] for i in range(N)]\n while K:\n if K % 2:\n result = matrix_mult(result, A)\n A = matrix_mult(A, A)\n K //= 2\n return result\n\n def matrix_mult(A, B):\n return [[sum(a*b for a, b in zip(row, col)) for col in zip(*B)]\n for row in A]\n ''' WRONG cannot write: ZB is iterated only once. Should use a new zip(*B) for each row.\n ZB = zip(*B)\n return [[sum(a*b for a, b in zip(row, col)) for col in ZB]\n for row in A]\n '''\n\n T = [[1, 1],\n [1, 0]]\n return matrix_expo(T, n)[0][0]\n\n\n# Time: O(n)\n# Space: O(1)\nclass Solution2(object):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n def climbStairs(self, n): # Fibonacci Number\n prev, current = 0, 1\n for i in range(n):\n prev, current = current, prev + current,\n return current\n\n # Time: O(2^n), size of recursion tree is 2^n\n # Space: O(n)\n def climbStairs_bruteForce(self, n):\n if n == 1:\n return 1\n if n == 2:\n return 2\n return self.climbStairs(n - 1) + self.climbStairs(n - 2)\n\n # Math formula:\n # F_n = [ Phi^n - phi^n ]/Sqrt[5].\n # where Phi=(1+Sqrt[5])/2,phi=(1-Sqrt[5])/2\n def climbStairs_math(self, n):\n sqrt5 = math.sqrt(5)\n Phi = (1 + sqrt5) / 2\n phi = (1 - sqrt5) / 2\n return int((Phi ** (n + 1) - phi ** (n + 1)) / sqrt5)\n\n\nif __name__ == \"__main__\":\n print(Solution().climbStairs(5)) # 8\n print(Solution().climbStairs(10)) # 89\n","sub_path":"Python/climbing-stairs.py","file_name":"climbing-stairs.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"147173791","text":"import matplotlib.pyplot as plt\nimport netCDF4\nimport os\nimport pandas\n\ndef read_netcdf(nc_full_name,variable_list=[]):\n \"\"\"\n Purpose:\n Read an OzFlux netCDF file and return the data in an Pandas data frame.\n Usage:\n df = qcio.nc_read_todf(nc_full_name,variable_list=variable_list)\n where nc_full_name is the full name of the netCDF file.\n variable_list (optional) is an list of variables to be read\n If variable_list is not passed, all variables in the netCDF are returned.\n Side effects:\n Returns a Pandas data frame containing the data indexed by datetime and\n a dictionary containing the global and variable attributes.\n Author: PRI using code originally written by Ian McHugh\n Date: June 2015\n \"\"\"\n # check to see if the file exists\n if \"http\" not in nc_full_name.lower():\n if not os.path.exists(nc_full_name):\n raise Exception(\"read_netcdf: input file \"+nc_full_name+\" not found\")\n # read the netCDF file\n nc_file = netCDF4.Dataset(nc_full_name,\"r\")\n # create a dictionary to hold the global and variable attributes\n attr = {}\n attr[\"global\"] = {}\n attr[\"variable\"] = {}\n # now deal with the global attributes\n gattrlist = nc_file.ncattrs()\n if len(gattrlist)!=0:\n for item in gattrlist:\n attr[\"global\"][item] = getattr(nc_file,item)\n # get a list of Python datetimes from the xlDatetime\n time = time = nc_file.variables[\"time\"][:]\n time_units = getattr(nc_file.variables[\"time\"],\"units\")\n dates_list = list(netCDF4.num2date(time,time_units))\n # get a list of variables to read from the netCDF file\n # was a variable list passed in as variable_list?\n if len(variable_list)==0:\n # if not, get the variable list from the netCDF file contents\n variable_list = list(nc_file.variables.keys())\n else:\n # if so, add the QC flags to the list entered as an argument\n flag_list = []\n for item in variable_list: flag_list.append(item+\"_QCFlag\")\n variable_list = variable_list+flag_list\n # read the variables and attributes from the netCDF file\n # create a dictionary to hold the data\n data = {}\n # loop over the variables to be read\n for item in variable_list:\n # get the number of dimensions\n # variables in OzFlux netCDF files can have 1 (time) or 3 dimensions (time,latitude,longitude)\n ndims = len(nc_file.variables[item].shape)\n if ndims==1:\n data[item] = ncFile.variables[item][:]\n elif ndims==3:\n # drop the degenerate dimensions (latitude and longitude)\n data[item] = nc_file.variables[item][:,0,0]\n else:\n raise Exception(\"unrecognised number of dimensions for variable\"+str(item))\n # get the variable attributes\n vattrlist = nc_file.variables[item].ncattrs()\n if len(vattrlist)!=0:\n attr[\"variable\"][item] = {}\n for vattr in vattrlist:\n attr[\"variable\"][item][vattr] = getattr(nc_file.variables[item],vattr)\n nc_file.close()\n # convert the dictionary to a Pandas data frame\n df = pandas.DataFrame(data,index=dates_list)\n return df,attr\n\n# read the variables from the local netCDF file\nnc_full_name = \"../../Sites/Whroo/Data/Processed/all/Whroo_2011_to_2014_L6.nc\"\nvariable_list = ['Fsd','Ta','VPD','NEE_SOLO']\nprint(\"reading local netCDF file\")\ndf,attr = read_netcdf(nc_full_name,variable_list=variable_list)\n\n# plot the variables\nprint(\"plotting local netCDF file\")\nfig = plt.figure(1)\nplt.figtext(0.5,0.95,\"Local file\",horizontalalignment='center')\nax1 = plt.subplot(411)\nax1.plot(df.index.values,df['Fsd'])\nax2 = plt.subplot(412,sharex=ax1)\nax2.plot(df.index.values,df['Ta'])\nax3 = plt.subplot(413,sharex=ax1)\nax3.plot(df.index.values,df['VPD'])\nax4 = plt.subplot(414,sharex=ax1)\nax4.plot(df.index.values,df['NEE_SOLO'])\nplt.show()\n\n# read the variables from the remote netCDF file\nnc_dap_name = \"http://dap.ozflux.org.au/thredds/dodsC/ozflux/sites/Whroo/L6/Whroo_2011_to_2014_L6.nc\"\nvariable_list = ['Fsd','Ta','VPD','NEE_SOLO']\nprint(\"reading remote netCDF file\")\ndf,attr = read_netcdf(nc_dap_name,variable_list=variable_list)\n\n# plot the variables\nprint(\"plotting remote netCDF file\")\nfig = plt.figure(2)\nplt.figtext(0.5,0.95,\"OPeNDAP file\",horizontalalignment='center')\nax1 = plt.subplot(411)\nax1.plot(df.index.values,df['Fsd'])\nax2 = plt.subplot(412,sharex=ax1)\nax2.plot(df.index.values,df['Ta'])\nax3 = plt.subplot(413,sharex=ax1)\nax3.plot(df.index.values,df['VPD'])\nax4 = plt.subplot(414,sharex=ax1)\nax4.plot(df.index.values,df['NEE_SOLO'])\nplt.show()\n","sub_path":"utilities/read_example.py","file_name":"read_example.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"562623865","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/3/30 20:47\n# @Author : 潘师傅\n# @File : Clue_case.py\n\n\"\"\"客户相关\"\"\"\nfrom XFP.PubilcAPI.XfpApi import *\n\n\nclass ClueTestCase(unittest.TestCase):\n \"\"\"小秘——客户列表\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ClueTestCase, self).__init__(*args, **kwargs)\n self.XfpRequest = XfpApi()\n self.XmfpEXT = GlobalMap()\n\n @classmethod\n def setUpClass(cls):\n \"\"\"登录幸福派 只执行一次\n 登录经纪人 获取ID\"\"\"\n cls.do_request = XfpApi()\n cls.XfpRequest = cls.do_request\n cls.XfpRequest.Login()\n cls.XfpRequest.GetUserData()\n # cls.XfpRequest.ClueSave()\n\n def test_1_AddNewClue(self):\n \"\"\"新增一条线索\"\"\"\n try:\n self.XfpRequest.ClueSave(clueNickName=self.XfpRequest.RandomText(textArr=surname))\n # 在搜索列表进行查找\n globals()['CluePhone'] = self.XmfpEXT.get('CluePhone')\n self.XfpRequest.ClueList(keyWord=(self.XmfpEXT.get('CluePhone')))\n self.assertEqual(self.XmfpEXT.get('CluePhone'), globals()['CluePhone'])\n except BaseException as e:\n print(\"断言错误,错误原因:%s\" % e)\n raise RuntimeError(self.XmfpEXT.get('ApiXfpUrl'))\n\n def test_2_FollowClue(self):\n \"\"\"跟进线索\"\"\"\n self.XfpRequest.GetLabelList(labelNo='GJLXFS') # 联系方式 labelName: 电话 微信 QQ 短信\n self.XfpRequest.ClueFollowSave()\n self.XfpRequest.ClueFollowList()\n try:\n self.assertEqual('python-线索跟进', self.XmfpEXT.get('followContent'))\n except BaseException as e:\n print(\"断言错误,错误原因:%s\" % e)\n self.XfpRequest.ClueFollowList(value=0)\n self.assertEqual('python-线索跟进', self.XmfpEXT.get('followContent'))\n\n def test_3_FollowClueElse(self):\n \"\"\"跟进线索\"\"\"\n self.XfpRequest.ClueFollowSaveElse()\n self.XfpRequest.ClueFollowList(value=1)\n try:\n self.assertEqual('无人接听/用户忙', self.XmfpEXT.get('followContent'))\n except BaseException as e:\n print(\"断言错误,错误原因:%s\" % e)\n self.XfpRequest.ClueFollowList(value=0)\n self.assertEqual('无人接听/用户忙', self.XmfpEXT.get('followContent'))\n\n def test_4_ClueContinue(self):\n \"\"\"线索创建跟进计划\"\"\"\n self.XfpRequest.GetLabelList(labelNo='GJLXFS', labelName='电话')\n self.XfpRequest.ClueContinue(taskEndTime=time.strftime(\"%Y-%m-%d\") + ' 22:00:00')\n # 通过线索状态来判断\n self.XfpRequest.GetClueStatus()\n self.assertEqual(self.XmfpEXT.get('endTimeStr'), ('今天' + ' 22:00'))\n # 创建任务后,在首页的个人待办进行验证\n self.XfpRequest.GetUserAgenda(keyWord=self.XmfpEXT.get('cluePhone'))\n self.assertEqual(1, self.XmfpEXT.get('pages'))\n\n def text_5_ExileSea(self):\n \"\"\"流放公海\"\"\"\n self.XfpRequest.ExileSea()\n # 流放公海 在首页进行验证\n self.XfpRequest.GetUserAgenda(keyWord=self.XmfpEXT.get('cluePhone'))\n self.assertEqual('0', self.XmfpEXT.get('pages'))\n\n def test_ChangeClient(self):\n \"\"\"线索转为客户\"\"\"\n self.XfpRequest.ClueSave(clueNickName=self.XfpRequest.RandomText(textArr=surname))\n self.XfpRequest.ClientEntering(callName=self.XfpRequest.RandomText(textArr=surname),loanSituation='阿萨德科技看见拉丝的加拉索拉卡阿拉山口的卢卡斯拉垃圾当老师拉上来的沙拉到拉萨')\n # 转化为客户后,在首页进行验证\n self.XfpRequest.GetUserAgenda(keyWord=self.XmfpEXT.get('cluePhone'))\n self.assertEqual(0, self.XmfpEXT.get('pages'))\n\n\n\n\n\n","sub_path":"XFP/XfpApi/test_casc/Clue_case.py","file_name":"Clue_case.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"632303714","text":"# python 3.8.1\nfrom os import walk\nfrom re import match, search\nfrom timeit import timeit\nfrom win32api import GetLogicalDriveStrings\n\n\n\n\n\n\ndefault_root_paths = GetLogicalDriveStrings().split('\\000')[:-1]\ndefault_path_filter = r''\ndefault_query = r''\ndefault_filter_flag = 0\ndefault_parallel = 1\n\n\n\n\n\nnotNone = lambda val, method: val if val != None else method()\n\nclass Search:\n def __init__(\n self,\n root_paths = None,\n path_filter = None,\n query = None,\n filter_flag = None,\n ):\n self.root_paths = notNone(root_paths, self.get_root_paths)\n self.path_filter = notNone(path_filter, self.get_path_filter)\n self.query = notNone(query, self.get_query)\n self.filter_flag = notNone(filter_flag, self.get_filter_flag)\n self.report = ''\n \n\n\n def get_root_paths(self):\n message = \"\"\"\nYou can add one or more rootpaths separated by semicolons.\nProgram will scan only these paths.\n\nExample: c:\\Program Files; \nExample: d:\\\\\nExample: c:\\\\; d:\\\\; e:\\\\Documents\n\n[+] \"\"\"\n userinput = input(message)\n paths = default_root_paths\n if userinput: \n paths = [path.strip() for path in userinput.split(';') if path.strip()]\n print(paths)\n return paths\n\n\n def get_path_filter(self):\n message = \"\"\"\nYou can Add path-filter. \nTakes python regex string as input.\n\nExample: files\nExample: .*[Pp]rogram files( \\(x86\\))?\n\n[+] \"\"\"\n regex = default_path_filter\n userinput = input(message)\n if userinput:\n start = userinput[0] == '^' # 0 or 1\n end = -(userinput[-1]=='$') or None # -1 or None\n regex = userinput[start : end]\n print(regex)\n return regex\n\n def get_query(self):\n message = \"\"\"\nYou can add search query (optional). \nTakes python regex as input.\n\nExample: music\nExample: \\.(mp3|wav)$\n\n[+] \"\"\"\n query = default_query\n userinput = input(message)\n if userinput:\n query = userinput \n print(query)\n return query\n \n\n def get_filter_flag(self):\n message = \"\"\"\nYou can add one search flag (optional)\nd/f/e (d: dirs-only, f: files-only, e: everything,)\n\nExample: d\nExample: f\n\n[+] \"\"\"\n userinput = input(message)\n flag = default_filter_flag\n if userinput:\n index = \"edf\".find(userinput)\n if index >= 0:\n flag = index\n print(\"edf\"[flag])\n return flag\n\n\n def genetate_report(self, name, data):\n match_str = '\\n '.join(data)\n self.report = f\"\"\"{self.report} {name}:\n {match_str}\n\"\"\"\n\n\n def scan_root_path(self, root_path):\n if self.filter_flag == 1:\n if self.path_filter and self.query:\n query = f\"({self.path_filter}|{self.query})\"\n else:\n query = self.path_filter or self.query\n [print(values[0]) for values in walk(root_path) if match(query, values[0].rpartition('\\\\')[-1])]\n elif self.path_filter:\n [self.scan_dirs_and_files(values) for values in walk(root_path) \\\n if search(self.path_filter, values[0].rpartition('\\\\')[-1])]\n else:\n list(map(self.scan_dirs_and_files, walk(root_path)))\n\n def scan_dirs_and_files(self, values):\n root, dirs, files = values\n\n conditionC, conditionD = False, False\n\n if self.filter_flag != 2:\n dirs_match = [dir for dir in dirs if search(self.query, dir)]\n conditionC = bool(dirs_match)\n if conditionC:\n self.genetate_report(\"dirs\", dirs_match)\n\n if self.filter_flag != 1:\n files_match = [file for file in files if search(self.query, file)]\n conditionD = bool(files_match)\n if conditionD:\n self.genetate_report(\"files\", files_match)\n if conditionC or conditionD:\n self.report = f\"\"\"----------------------------------\n{root}\n{self.report}\n\"\"\"\n print(self.report)\n\n\n\n def begin(self):\n print('searching...\\n')\n [self.scan_root_path(path) for path in self.root_paths]\n \n\n#-----------------end of search class-----------------\n\ndef restart(func):\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n input('Press any key to continue...')\n wrapper(*args, **kwargs)\n return wrapper\n\n\n@restart\ndef main():\n search = Search()\n t = timeit(search.begin, number=1)\n print(\"Execution time:\", t)\n\n\n \nif __name__ == '__main__':\n print(('='*50) + '\\nSIMPLE SEARCH\\n')\n print(f'''Defaults: \n default_root_paths = {default_root_paths}\n default_path_filter = {default_path_filter}\n default_query = {default_query}\n default_filter_flag = {default_filter_flag}\n''')\n print('='*50)\n main()\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"108091043","text":"from paddle.io import Dataset\r\nfrom paddle.vision import transforms\r\nfrom mxnet_reader import recordio\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\nimport numpy as np\r\nimport numbers\r\nimport paddle\r\nimport os\r\n\r\n\r\n__Author__ = 'Quanhao Guo'\r\n__Date__ = '2021.04.24.16.23'\r\n\r\n\r\nclass MXFaceDataset(Dataset):\r\n def __init__(self, root_dir):\r\n super(MXFaceDataset, self).__init__()\r\n self.transform = transforms.Compose(\r\n [\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\r\n ])\r\n self.root_dir = root_dir\r\n path_imgrec = os.path.join(root_dir, 'train.rec')\r\n path_imgidx = os.path.join(root_dir, 'train.idx')\r\n self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')\r\n s = self.imgrec.read_idx(0)\r\n header, _ = recordio.unpack(s)\r\n if header.flag > 0:\r\n self.header0 = (int(header.label[0]), int(header.label[1]))\r\n self.imgidx = np.array(range(1, int(header.label[0])))\r\n else:\r\n self.imgidx = np.array(list(self.imgrec.keys))\r\n\r\n def __getitem__(self, index):\r\n idx = self.imgidx[index]\r\n s = self.imgrec.read_idx(idx)\r\n header, img = recordio.unpack(s)\r\n label = header.label\r\n if not isinstance(label, numbers.Number):\r\n label = label[0]\r\n label = paddle.to_tensor(label, dtype='int64')\r\n # sample = image.imdecode(img).asnumpy()\r\n sample = np.array(Image.open(BytesIO(img)))\r\n if self.transform is not None:\r\n sample = self.transform(sample)\r\n return sample, label\r\n\r\n def __len__(self):\r\n return len(self.imgidx)\r\n","sub_path":"arcface/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"628028020","text":"from datetime import datetime\nfrom random import randint\n\ncpu_data = connection.ExecuteScalar('''\n CREATE TEMPORARY TABLE omnidb_monitor_result (result TEXT);\n DO LANGUAGE plpythonu\n $$\n import sys\n import StringIO\n import subprocess\n codeOut = StringIO.StringIO()\n codeErr = StringIO.StringIO()\n sys.stdout = codeOut\n sys.stderr = codeErr\n print subprocess.Popen(\"mpstat -P ALL 1 1 | grep 'Average:' | tail -n +2 | tr -s ' ' | cut -f2,3 -d' '\", shell=True, stdout=subprocess.PIPE).stdout.read()\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n result = codeOut.getvalue()\n plpy.execute(\"INSERT INTO omnidb_monitor_result VALUES ('{0}')\".format(result))\n $$;\n SELECT * FROM omnidb_monitor_result;\n''')\n\ndatasets = []\nfor cpu in cpu_data.split('\\n'):\n if cpu!='':\n cpu_split = cpu.split(' ')\n color = \"rgb(\" + str(randint(125, 225)) + \",\" + str(randint(125, 225)) + \",\" + str(randint(125, 225)) + \")\"\n datasets.append({\n \"label\": cpu_split[0],\n \"fill\": False,\n \"backgroundColor\": color,\n \"borderColor\": color,\n \"lineTension\": 0,\n \"pointRadius\": 1,\n \"borderWidth\": 1,\n \"data\": [cpu_split[1]]\n })\n\nresult = {\n \"labels\": [datetime.now().strftime('%H:%M:%S')],\n \"datasets\": datasets\n}\n","sub_path":"PostgreSQL/chart_append/07_cpuusage_data.py","file_name":"07_cpuusage_data.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"71589088","text":"import urllib2\r\nfrom BeautifulSoup import BeautifulSoup\r\nfrom mechanize import Browser\r\nimport re\r\n\r\ndef getunicode(soup):\r\n\tbody=''\r\n\tif isinstance(soup, unicode):\r\n\t\tsoup = soup.replace(''',\"'\")\r\n\t\tsoup = soup.replace('"','\"')\r\n\t\tsoup = soup.replace(' ',' ')\r\n\t\tbody = body + soup\r\n\telse:\r\n\t\tif not soup.contents:\r\n\t\t\treturn ''\r\n\t\tcon_list = soup.contents\r\n\t\tfor con in con_list:\r\n\t\t\tbody = body + getunicode(con)\r\n\treturn body\r\n\r\n\r\ndef main():\r\n\t\r\n urlhere='http://www.imdb.com/genre/action/?ref_=gnr_mn_ac_mp'\r\n title_search = re.compile('/title/tt\\d+')\r\n \r\n br = Browser()\r\n br.open(urlhere)\r\n\r\n link = br.find_link(url_regex = re.compile(r'/title/tt.*'))\r\n res = br.follow_link(link)\r\n \r\n soup = BeautifulSoup(res.read())\r\n movie_title = getunicode(soup.find('title'))\r\n print(movie_title)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"pythonold.py","file_name":"pythonold.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"6081013","text":"from nn.training_NN import *\nfrom nn.DNA_prediction import *\nimport os\n\n\ndef create_baskets(binding_sites_input, number_of_groups):\n baskets_boundaries = np.linspace(0, binding_sites_input.shape[0] - 1, num = number_of_groups + 1, dtype = int)\n return baskets_boundaries\n\n\ndef training_test_partition(binding_sites_input, binding_sites_output, training_indices_list, positive_set):\n test_input_matr = np.zeros(shape = (len(training_indices_list), positive_set.shape[1]))\n test_output_matr = np.zeros(shape = (len(training_indices_list), 1))\n for y in range(len(training_indices_list)):\n ind = training_indices_list[y]\n test_input_matr[y] = binding_sites_input[ind]\n test_output_matr[y] = binding_sites_output[ind]\n big_indices_list = [x for x in range(binding_sites_input.shape[0])]\n remain = list(set(big_indices_list) - set(training_indices_list))\n random.shuffle(remain)\n train_input_matr = np.zeros(shape = (len(remain), positive_set.shape[1]))\n train_output_matr = np.zeros(shape = (len(remain), 1))\n for y in range(len(remain)):\n ind = remain[y]\n train_input_matr[y] = binding_sites_input[ind]\n train_output_matr[y] = binding_sites_output[ind]\n return test_input_matr, test_output_matr, train_input_matr, train_output_matr\n\n\ndef cross_validation(binding_sites_input, binding_sites_output, positive_set, number_of_groups):\n baskets_boundaries = create_baskets(binding_sites_input, number_of_groups)\n random.seed(0)\n indices_list = [x for x in range(binding_sites_input.shape[0])]\n random.shuffle(indices_list)\n #plt.figure(figsize=(8, 8), dpi=80)\n #SetupROCCurvePlot(plt)\n for k in range(len(baskets_boundaries) - 1):\n low_b = baskets_boundaries[k]\n high_b = baskets_boundaries[k+1]\n training_indices_list = indices_list[low_b:high_b]\n test_input_matr, test_output_matr, train_input_matr, train_output_matr = \\\n training_test_partition(binding_sites_input, binding_sites_output, training_indices_list, positive_set)\n W2_glob, W3_glob, b2_glob, b3_glob = train_NN(train_input_matr, train_output_matr, \\\n input_size=68, hidden_size=8, output_size=1, iterations=1000)\n actives = []\n scores = []\n totally = 0\n incorrect = 0\n thresh1 = 0.9\n thresh2 = 0.25\n for i in range(test_input_matr.shape[0]):\n curr_inp = np.transpose(test_input_matr[[i]])\n W2, W3, a1, a2, a3 = feed_forward(W2_glob, W3_glob, b2_glob, b3_glob, curr_inp)\n totally += 1\n if test_output_matr[i][0] == 1.0:\n if a3[0][0] < thresh1:\n incorrect += 1\n else:\n if a3[0][0] > 1 - thresh1:\n incorrect += 1\n ratio = incorrect / float(totally)\n assert(ratio < thresh2)\n scores = sorted(scores, key=lambda x: x[1], reverse = True)\n\ndef test_DNA_predictions():\n project_folder = 'data/'\n positives_filename = os.path.join(project_folder, 'rap1-lieb-positives.txt')\n negatives_filename = os.path.join(project_folder, 'yeast-upstream-1k-negative.fa')\n positive_native_set = get_positives_set(positives_filename)\n negative_native_set = get_negatives_set(negatives_filename, positive_native_set)\n positive_set = convert_to_vectors(positive_native_set)\n negative_set = convert_to_vectors(negative_native_set)\n\n binding_sites_input = np.vstack((positive_set,negative_set))\n binding_sites_output = np.concatenate((np.ones(shape=(positive_set.shape[0],1)),\\\n np.zeros(shape=(negative_set.shape[0],1))))\n cross_validation(binding_sites_input, binding_sites_output, positive_set, 5)\n\n\ndef test_autoencoder():\n # initialize the vectors\n input_glob = np.identity(8)\n\n # train the neural network\n W2, W3, b2, b3 = train_NN(input_glob, input_glob, \\\n input_size=8, hidden_size=3, output_size=8, iterations=20000)\n\n #test_autoencoder(input_glob, W2_glob, W3_glob, b2_glob, b3_glob)\n for i in range(1, input_glob.shape[1] + 1):\n curr_inp = np.transpose(input_glob[[i - 1]])\n W2, W3, a1, a2, a3 = feed_forward(W2, W3, b2, b3, curr_inp)\n assert(np.argmax(curr_inp) == np.argmax(a3))","sub_path":"test/test_nn.py","file_name":"test_nn.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"188500618","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom po_spiders.items import PoSpidersItem\nfrom scrapy.selector import Selector\n\nclass MaoyanSpider(scrapy.Spider):\n name = 'maoyan'\n allowed_domains = ['maoyan.com']\n start_urls = ['http://maoyan.com/films?showType=3/']\n\n # def parse(self, response):\n # pass\n # 打开猫眼的本地文件\n # f = open(\"Week01/work2/经典影片_电影大全_经典高清电影-猫眼电影.htm\",'r')\n # response = f.read()\n # f.close\n\n def start_requests(self):\n url = f'http://maoyan.com/films?showType=3/'\n yield scrapy.Request(url=url, callback=self.parse, dont_filter=False)\n\n def parse(self, response):\n movies = Selector(response=response).xpath('//div[@class=\"movie-hover-info\"]')\n a = 0\n print('movies.extract(): ',movies.extract())\n \n for movie in movies:\n #print(movie.extract())\n item = PoSpidersItem()\n if a < 10:\n film_name = movie.xpath('./div[1]/span/text()')\n #print(film_name.extract_first())\n film_name = film_name.extract_first()\n print(film_name)\n \n film_type = movie.xpath('./div[2]/text()')\n #print(film_type.extract()[1].strip()) \n film_type = film_type.extract()[1].strip()\n print(film_type)\n\n film_time = movie.xpath('./div[4]/text()')\n #print(film_time.extract()[1].strip())\n film_time = film_time.extract()[1].strip()\n print(film_time)\n\n a+=1\n\n item['film_name'] = film_name\n item['film_type'] = film_type\n item['film_time'] = film_time\n yield item \n else:\n break\n \n\n\n\n#//div[@class=\"movie-item film-channel\"]/div[2]/a/div/div[2]\n\n\n# //*[@id=\"app\"]/div/div[2]/div[2]/dl/dd[2]/div[1]/div[2]/a/div/div[2]\n# //*[@id=\"app\"]/div/div[2]/div[2]/dl/dd[3]/div[1]/div[3]/a/div/div[2]\n#//*[@id=\"app\"]/div/div[2]/div[2]/dl/dd[1]/div[1]/div[2]/a/div/div[1]\n# //*[@id=\"app\"]/div/div[2]/div[2]/dl/dd[1]/div[1]/div[2]/a/div/div[1]/span\n# //*[@id=\"app\"]/div/div[2]/div[2]/dl/dd[1]/div[1]/div[2]/a/div/div[4]/text()","sub_path":"Week01/work2/po_spiders/po_spiders/spiders/maoyan.py","file_name":"maoyan.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"546932370","text":"from proteus import *\nfrom proteus.default_p import *\nfrom suboff import *\nfrom proteus.mprans import RANS2P\n\nLevelModelType = RANS2P.LevelModel\ncoefficients = RANS2P.Coefficients(epsFact=epsFact_viscosity,\n sigma=0.0,\n rho_0 = rho_0,\n nu_0 = nu_0,\n rho_1 = rho_0,\n nu_1 = nu_0,\n g=g,\n nd=nd,\n LS_model=1,\n Closure_0_model=3,\n Closure_1_model=4,\n turbulenceClosureModel=3,\n epsFact_density=epsFact_density,\n stokes=False,\n useRBLES=useRBLES,\n useMetrics=useMetrics)\n\nbcsTimeDependent = True\n\nperiodic = False\n\ngetDBC_p = twpflowPressure\ngetDBC_u = twpflowVelocity_u\ngetDBC_v = twpflowVelocity_v\ngetDBC_w = twpflowVelocity_w\n\ndirichletConditions = {0:getDBC_p,\n 1:getDBC_u,\n 2:getDBC_v,\n 3:getDBC_w}\n\nfluxBoundaryConditions = {0:'outFlow',\n 1:'outFlow',\n 2:'outFlow',\n 3:'outFlow'}\n\ndef getAFBC_p(x,flag):\n if flag == 0:\n return lambda x,t: 0.0\n if flag == boundaryTags['left']:\n return lambda x,t: -inflow*velRamp(t)\n if (flag in [boundaryTags['front'],boundaryTags['back'],boundaryTags['top']]+bottom):\n return lambda x,t: 0.0\n\ndef getAFBC_u(x,flag):\n if flag == 0:\n return lambda x,t: 0.0\n if (flag in [boundaryTags['front'],boundaryTags['back']]):\n return lambda x,t: 0.0\n \ndef getAFBC_v(x,flag):\n if flag == 0:\n return lambda x,t: 0.0\n if (flag in [boundaryTags['front'],boundaryTags['back']]):\n return lambda x,t: 0.0\n \ndef getAFBC_w(x,flag):\n if flag == 0:\n return lambda x,t: 0.0\n if (flag in [boundaryTags['front'],boundaryTags['back']]):\n return lambda x,t: 0.0\n\nadvectiveFluxBoundaryConditions = {0:getAFBC_p,\n 1:getAFBC_u,\n 2:getAFBC_v,\n 3:getAFBC_w}\ndef getDFBC_u(x,flag):\n if flag == 0:\n return lambda x,t: 0.0\n if not (flag == boundaryTags['left'] or\n flag == boundaryTags['top'] or\n flag in bottom):\n return lambda x,t: 0.0\n\ndef getDFBC_v(x,flag):\n if flag == 0:\n return lambda x,t: 0.0\n if not (flag == boundaryTags['top'] or\n flag in bottom):\n return lambda x,t: 0.0\n\ndef getDFBC_w(x,flag):\n if flag == 0:\n return lambda x,t: 0.0\n if not (flag == boundaryTags['top'] or\n flag in bottom):\n return lambda x,t: 0.0\n\ndiffusiveFluxBoundaryConditions = {0:{},\n 1:{1:getDFBC_u},\n 2:{2:getDFBC_v},\n 3:{3:getDFBC_w}}\n\nclass Steady_p:\n def __init__(self):\n pass\n def uOfXT(self,x,t):\n return -(L[2]-x[1])*coefficients.rho*coefficients.g[1]\n\nclass Steady_u:\n def __init__(self):\n pass\n def uOfXT(self,x,t):\n return 0.0\n\nclass Steady_v:\n def __init__(self):\n pass\n def uOfXT(self,x,t):\n return 0.0\n\nclass Steady_w:\n def __init__(self):\n pass\n def uOfXT(self,x,t):\n return 0.0\n\ninitialConditions = {0:Steady_p(),\n 1:Steady_u(),\n 2:Steady_v(),\n 3:Steady_w()}\n\n## @}\n","sub_path":"benchmarks/suboff/twp_navier_stokes_p.py","file_name":"twp_navier_stokes_p.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381378905","text":"#\n# @lc app=leetcode.cn id=455 lang=python3\n#\n# [455] 分发饼干\n#\n\n# @lc code=start\nclass Solution:\n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n g.sort()\n s.sort()\n pg, ps = 0, 0\n while pg < len(g) and ps < len(s):\n if s[ps] >= g[pg]:\n pg += 1\n ps += 1\n return pg\n\n# @lc code=end\n\n","sub_path":"Week_03/五毒神掌第一掌/455.分发饼干.py","file_name":"455.分发饼干.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"345217937","text":"a = \"hello\"\nb = \"world\"\nc = a.upper() + \" \" + b.upper()\nd = c + \"!\"\nprint(d)\n\ne = d + \" This {} my first type in {} when my age {} {}\" #The format() method takes unlimited number of arguments, and are placed into the respective placeholders\nf = \"was\"\ng = 23\nh = \"python\"\n\ni = e.format(f,h,f,g)\nprint(i) \n\np = \"I want to pay {} dollars for {} pieces of the item {}\"\n\nprice = 20.25\npiece = 10\nitemNo = 12\n\nq = p.format(price,piece,itemNo)\nprint(q)\n\np = \"I want to pay {0} dollars for {2} pieces of the item {1}\" # we can define index number from format\nq = p.format(price,piece,itemNo)\nprint(q)\n\n","sub_path":"basic/10_string_concatenation.py","file_name":"10_string_concatenation.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"573408797","text":"################## Merging files and Genbank file extracting\r\n\r\n#Readme\r\n#example to use:\r\n#on terminal (e.g. anaconda) write >>> a b c d\r\n#a is this code and directory \"...\\Merging_gbfiles_to_xlsx0_432.py\"\r\n#b is the gb files directory \"C:\\gb\\\"\r\n#c is the output directory \"C:\\output\\\"\r\n#d is for convenience to use name of organism \"Fungi_genus\"\r\n\r\n##when there is no libraries. Prefer to create \"environments\" to specific libraries (e.g. conda create environment)\r\n##However, you can use as follows; installing libraries out of environments\r\nimport os,sys\r\n\r\ntry:\r\n import Bio\r\nexcept ImportError:\r\n os.system('pip install biopython')\r\n\t\r\ntry:\r\n import Scipy\r\nexcept ImportError:\r\n os.system('pip install scipy')\r\n\t\r\ntry:\r\n import numpy\r\nexcept ImportError:\r\n os.system('pip install numpy')\r\n\t\r\ntry:\r\n import openpyxl\r\nexcept ImportError:\r\n os.system('pip install openpyxl')\r\n\t\r\nimport Bio\t\r\nfrom Bio import SeqIO\r\n\r\nimport sys\r\nfrom sys import argv\r\n\r\n################## concatenating gb files###\r\n\r\nAll_GBfiles = '{}{}_All_GBfiles.gb'.format(argv[2],argv[3]) #'F:/Python/room/All_GBfiles.gb' \r\n\r\n\r\ndef concatFiles(): # concatenate txt files according to https://stackoverflow.com/questions/13613336/python-concatenate-text-files\r\n path = '{}'.format(argv[1])\r\n files = os.listdir(path)\r\n for idx, infile in enumerate(files):\r\n print (\"File #\" + str(idx) + \" \" + infile)\r\n concat = ''.join([open(path + f).read() for f in files])\r\n with open(All_GBfiles, \"w\") as fo:\r\n fo.write(concat)\r\n\r\nif __name__ == \"__main__\":\r\n concatFiles()\r\n\r\n\t\r\n################## extracting data from Genbank file###\r\n\r\nfrom Bio import SeqIO\r\nimport os\r\ninput = All_GBfiles\r\n \r\noutput = '{}{}_Alldata.txt'.format(argv[2],argv[3]) #\"F:/Python/room/Alldata.txt\"\r\n\r\nhandle = open(input)\r\n\r\nif not os.path.exists(output): #checks for a pre-existing file with the same name as the output\r\n for seq_record in SeqIO.parse(handle, \"genbank\"):\r\n for seq_feature in seq_record.features:\r\n if seq_feature.type==\"source\":\r\n try: #If you would like your script to run to completion even when that information is not present, you can wrap each access to seq_feature.qualifiers in a try-except block catching KeyError and IndexError\r\n strain = seq_feature.qualifiers.get('strain')\r\n except (KeyError, IndexError):\r\n strain = None\r\n try:\r\n country = seq_feature.qualifiers.get('country')\r\n except (KeyError, IndexError):\r\n country = None\r\n try:\r\n isolation_source = seq_feature.qualifiers.get('isolation_source')\r\n except (KeyError, IndexError):\r\n isolation_source = None\r\n try:\r\n host = seq_feature.qualifiers.get('host')\r\n except (KeyError, IndexError):\r\n host = None\r\n try:\r\n order = seq_record.annotations[\"taxonomy\"][-3]\r\n except (KeyError, IndexError):\r\n order = None\r\n try:\r\n family = seq_record.annotations[\"taxonomy\"][-2]\r\n except (KeyError, IndexError):\r\n family = None\r\n try:\r\n genus = seq_record.annotations[\"taxonomy\"][-1]\r\n except (KeyError, IndexError):\r\n genus = None\r\n try:\r\n author = seq_record.annotations['references'][0].authors\r\n except (KeyError, IndexError):\r\n author = None\r\n try:\r\n title = seq_record.annotations['references'][0].title\r\n except (KeyError, IndexError):\r\n title = None\r\n try:\r\n journal = seq_record.annotations['references'][0].journal\r\n except (KeyError, IndexError):\r\n journal = None\r\n with open(output, \"a\") as ofile:\r\n ofile.write(\">{0}xyx{1}xyx{2}xyx{3}xyx{4}xyx{5}xyx{6}xyx{7}xyx{8}xyx{9}xyx{10}xyx{11}xyx{12}xyx{13}xyx{14}zyz\\n\".format(seq_record.annotations[\"organism\"], \r\n order, family, genus, seq_record.name, seq_record.description, strain, country, isolation_source, host, \r\n len(seq_record), author, title, journal, seq_record.seq))\r\n \r\nelse:\r\n print (\"The output file already seem to exist in the current working directory {0}. Please change the name of the output file\".format(os.getcwd())) #error msg\r\n\r\nhandle.close()\r\n\r\n################## Removing duplicate elements###\r\n\r\nnone_duplicate = '{}{}_no_duplicates.txt'.format(argv[2],argv[3])#\"F:/Python/room/none_duplicate.txt\"\r\n\r\n\r\nimport os,sys\r\n \r\ndef Remove(duplicate): # https://www.geeksforgeeks.org/python-remove-duplicates-list/\r\n final_list = [] \r\n for data in duplicate: \r\n if data not in final_list: \r\n final_list.append(data) \r\n return final_list\r\n\r\n# Driver Code \r\nduplicate = open(output) #https://qiita.com/visualskyrim/items/1922429a07ca5f974467\r\n\r\nsys.stdout=open(none_duplicate,\"w\") #https://stackoverflow.com/questions/7152762/how-to-redirect-print-output-to-a-file-using-python/38186276\r\nprint(Remove(duplicate))\r\n#sys.stdout.close()\r\n#duplicate.close()\r\n\r\n################## Adding first line for headernames###\r\n\r\nf1 = open('{}{}_no_duplicates.txt'.format(argv[2],argv[3]), 'r')#open(\"F:/Python/room/none_duplicate.txt\", 'r')\r\nf2 = open('{}{}_no_duplicates_fw.txt'.format(argv[2],argv[3]), 'w')#open('F:/Python/room/none_duplicate_re.txt', 'w')#output final file\r\n\r\n\r\nfor j in f1:\r\n f2.write(\"\\n\" + j)\r\n\r\n\r\n################## Replace the target string###\r\n\r\nf3 = open('{}{}_no_duplicates_fw.txt'.format(argv[2],argv[3]), 'r')#open('F:/Python/room/none_duplicate_re.txt', 'r')\r\nf4 = open('{}{}_no_duplicates_fw2.txt'.format(argv[2],argv[3]), 'w')#open('F:/Python/room/none_duplicate_re2.txt', 'w')\r\n\r\n\r\nfor line in f3:\r\n f4.write(line.replace('xyx','\\t').replace('zyz','\\n').replace('n\", \">','>').replace('[\">','>')\r\n\t.replace('n\"]','').replace(\"['\",'').replace(\"']\",'').replace(\"\\>\",'>').replace(\"n', \",'').replace('n\", ','')\r\n\t.replace(\"\\'>\",'>').replace('\\\">','>').replace('\\>','>').replace(\"[\\'\",'').replace('[\"','').replace(\"[\\'\",'')\r\n\t.replace(\"[\",\"\").replace(\"]\",\"\").replace(\"\\'\",''))\r\n\r\n\r\nf3.close()\r\nf4.close()\r\n\r\n\r\n################## header to xls###\r\n\r\n\r\nimport csv\r\nimport openpyxl\r\nfrom openpyxl import Workbook\r\n\r\ncsv.field_size_limit()\r\n\r\ncsv.field_size_limit(100000000)\r\n\r\ncsv.field_size_limit()\r\n\r\n################## txt to xlsx###\r\n\r\ninput_file = '{}{}_no_duplicates_fw2.txt'.format(argv[2],argv[3])#'F:/Python/room/none_duplicate_re2.txt'\r\noutput_file = '{}{}_SpecimensList.xlsx'.format(argv[2],argv[3])#'F:/Python/room/none_duplicate_re2.xlsx'\r\n\r\nheadernames = ['Species', 'order', 'family', 'genus', 'GBn', 'Description', 'Voucher', 'Country', \r\n'isolation_source', 'host', 'bp', 'authors', 'title', 'journal', 'sequence']\r\n\r\nwb = openpyxl.Workbook()\r\nws = wb.worksheets[0]\r\n\r\nwith open(input_file, 'r') as data:\r\n reader = csv.reader(data, delimiter='\\t')\r\n for row in reader:\r\n ws.append(row)\r\n for i, j in enumerate(headernames):\r\n ws.cell(row=1, column = i+1, value= j)\r\n\r\nwb.save(output_file)\r\n\r\nfrom openpyxl import load_workbook\r\nworkbook = load_workbook('{}{}_SpecimensList.xlsx'.format(argv[2],argv[3]))\r\nsheet = workbook.active\r\n\r\nsheet.delete_rows(idx=sheet.max_row)#https://medium.com/aubergine-solutions/working-with-excel-sheets-in-python-using-openpyxl-4f9fd32de87f\r\n\r\nworkbook.save('{}{}_SpecimensList.xlsx'.format(argv[2],argv[3]))\r\n\r\nsys.stdout.close()\r\n","sub_path":"Merging_gbfiles_to_xlsx0_432.py","file_name":"Merging_gbfiles_to_xlsx0_432.py","file_ext":"py","file_size_in_byte":7821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198406491","text":"import click\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nimport matplotlib.pyplot as plt\n\nfrom tensorboardX import SummaryWriter\n\n# aqui solo definimos nuestra arquitectura\n\n\nclass Red_ArquitecturaB(nn.Module):\n def __init__(self, input_size, output_size, p_drop):\n super(Red_ArquitecturaB, self).__init__()\n\n # In\n self.layer_in = nn.Linear(input_size, 410)\n self.relu_in = nn.ReLU()\n # Hidden 1\n self.layer_hidden1 = nn.Linear(410, 410)\n self.drop = nn.Dropout(p=p_drop)\n self.relu_hidden1 = nn.ReLU()\n # Hidden 2\n self.layer_hidden2 = nn.Linear(410, 200)\n self.drop2 = nn.Dropout(p=p_drop)\n self.relu_hidden2 = nn.ReLU()\n # Out\n self.layer_output = nn.Linear(200, output_size)\n\n def forward(self, x):\n out = self.layer_in(x)\n out = self.relu_in(out)\n out = self.layer_hidden1(out)\n out = self.drop(out)\n out = self.relu_hidden1(out)\n out = self.layer_hidden2(out)\n out = self.drop2(out)\n out = self.relu_hidden2(out)\n out = self.layer_output(out)\n return out\n\n\nclass TemperaturaDataSet(Dataset):\n def __init__(self, data, dias):\n \"\"\"\n Dataset a medida para el ejemplo\n In:\n data: numpy\n\n Out:\n tupla (entradas, salida)\n \"\"\"\n super().__init__()\n self.datos = data\n self.dias = dias\n\n def __len__(self):\n return(self.datos.shape[0])\n\n def __getitem__(self, index):\n return (self.datos[index,(7 - self.dias) * 24:-24], self.datos[index, -24:])\n\n\n@click.command()\n@click.option('--dataset_train', prompt='Ruta del dataset entrenamiento: ', default='.\\\\dataset_generalista.csv')\n@click.option('--mmscaler', prompt='Ruta del escalador: ', default='.\\\\mmscaler.pickle')\n@click.option('--outmodel', prompt='Ruta para guardar el modelo', default='.\\\\B_100_50_05_7.model.pt')\ndef main(dataset_train, mmscaler, outmodel, batch_size=100, num_epoch=50, p_dropout=0.5, dias=7):\n\n red = Red_ArquitecturaB(input_size=(24 * dias), output_size=24, p_drop=p_dropout)\n\n funcion_perdida = nn.MSELoss()\n optimizer = torch.optim.Adam(params=red.parameters(), lr=0.001)\n\n train = np.array(pd.read_csv(dataset_train, decimal=\".\", sep=\",\", header=None).values)\n train_ds = TemperaturaDataSet(train, dias=dias)\n train_dataloader = DataLoader(dataset=train_ds, shuffle=True, batch_size=batch_size)\n\n # entrenamiento de la red\n red.train()\n for epoch in range(num_epoch):\n batch = 0\n for x_train, y_train in train_dataloader:\n optimizer.zero_grad()\n x_train = x_train.type(torch.float)\n y_train = y_train.type(torch.float)\n\n y_pred_train = red(x_train) # es lo mismo que red.forward(entrada)\n\n loss = funcion_perdida(y_pred_train, y_train)\n loss.backward()\n optimizer.step()\n print(\"Epoch: %2d Batch: %6d Loss: %2.8f ErrorMean: %2.8f\" %\n (epoch, batch, loss.item(), (y_pred_train - y_train).mean()))\n batch = batch + 1\n\n torch.save(red.state_dict(), outmodel)\n print(\"Modelo guardado con éxito!\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Modelo General/8.guardar_modelo_seleccionado.GM.py","file_name":"8.guardar_modelo_seleccionado.GM.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"124145489","text":"import math\nimport statistics\nimport numpy as np\n\n\nclass Model:\n def __init__(self, data_train, target_train, k):\n self._data_train = data_train\n self._target_train = target_train\n self._k = k\n\n def __euclidean_distance(self, x, y):\n distance = 0.0\n for i in range(0, len(x)):\n distance += (x[i] - y[i]) ** 2\n distance = math.sqrt(distance)\n return distance\n\n def predict(self, data_test):\n distances = []\n predictions = []\n\n for i in range(0, len(data_test)):\n for j in range(0, len(self._data_train)):\n # print(data_test[i], self._data_train[j])\n distances.append([self.__euclidean_distance(data_test[i], self._data_train[j]),\n self._target_train[j]])\n knn = sorted(distances, key=lambda x: x[0])[0:int(self._k)]\n knn_class = np.copy(knn)[:, 1]\n try:\n predictions.append(statistics.mode(knn_class))\n except statistics.StatisticsError:\n predictions.append(knn_class[0])\n\n distances = []\n\n return predictions\n\n _data_train = []\n _target_train = []\n _k = int()\n\n\nclass Classifier:\n def fit(self, data_train, target_train):\n k = input(\"Choose a k: \")\n return Model(data_train, target_train, k)","sub_path":"prove02/k_nearest_neighbors.py","file_name":"k_nearest_neighbors.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156604988","text":"import json\nfrom flask import Flask, request\nfrom flask_jwt import JWT, jwt_required, current_identity\nfrom sqlalchemy.exc import IntegrityError\nfrom datetime import timedelta\nfrom flask_cors import CORS\n\nfrom models import db, User, Book, myBooks\n\n''' Begin boilerplate code '''\ndef create_app():\n app = Flask(__name__, static_url_path='')\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n app.config['JWT_EXPIRATION_DELTA'] = timedelta(days = 7)\n app.config['SECRET_KEY'] = \"MYSECRET\"\n CORS(app)\n db.init_app(app)\n return app\n\napp = create_app()\n\napp.app_context().push()\n\n''' End Boilerplate Code '''\n\n''' Set up JWT here '''\ndef authenticate(uname, password):\n #search for the specified user\n user = User.query.filter_by(username=uname).first()\n #if user is found and password matches\n if user and user.check_password(password):\n return user\n\n#Payload is a dictionary which is passed to the function by Flask JWT\ndef identity(payload):\n return User.query.get(payload['identity'])\n\njwt = JWT(app, authenticate, identity)\n\n''' End JWT Setup '''\n\n@app.route('/')\ndef client_app():\n return app.send_static_file('index.html')\n\n@app.route('/app')\ndef client_app():\n return app.send_static_file('app.html')\n\n@app.route('/signup', methods=['POST'])\ndef signup():\n userdata = request.get_json() # get userdata\n newuser = User(username=userdata['username'], email=userdata['email']) # create user object\n newuser.set_password(userdata['password']) # set password\n try:\n db.session.add(newuser)\n db.session.commit() # save user\n except IntegrityError: # attempted to insert a duplicate user\n db.session.rollback()\n return 'username or email already exists' # error message\n return 'user created' # success\n\n@app.route('/myBooks', methods=['POST'])\n@jwt_required()\ndef create_my_Book():\n data= request.get_json()\n rec = myBooks(mbid=data[\"mbid\"], id=current_identity.id,bid=data[\"bid\"], name=data[\"name\"], author=data[\"author\"])\n db.session.add(rec)\n db.session.commit()\n return \"Added\", 201\n\n@app.route('/book', methods=['GET'])\ndef get_Books():\n books= Book.query.all()\n books= [book.toDict() for book in Books]\n return json.dumps(books)\n\n@app.route('/myBooks', methods=['GET'])\n@jwt_required()\ndef get_my_books():\n queryset = myBooks.query.filter_by(id=current_identity.id).all()\n if queryset == None:\n return 'Invalid id or unauthorized'\n if len(queryset) == 0:\n return 'No Books stored'\n book = [book.toDict() for book in queryset]\n return json.dumps(book)\n\n@app.route('/myBooks/', methods=['GET'])\n@jwt_required()\ndef get_my_books(num):\n num = int(num)\n queryset = myBooks.query.filter_by(id=current_identity.id).all()\n if queryset == None:\n return 'Invalid id or unauthorized'\n if len(queryset) == 0:\n return 'No Books stored'\n if num > len(queryset):\n return 'Invalid num specified'\n return json.dumps(queryset[num-1].toDict())\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244078143","text":"from torch.utils.data import TensorDataset, DataLoader\nimport torch\nimport numpy as np\n\nX_train = np.loadtxt('./data/X_train.txt', delimiter=' ')\nX_train = torch.tensor(X_train, dtype=torch.float32)\nW = torch.randn(300, 4)\nsoftmax = torch.nn.Softmax(dim=1)\n\ny_train = np.loadtxt('./data/y_train.txt')\ny_train = torch.tensor(y_train, dtype=torch.int64)\n\nclass LogisticRegression(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.net = torch.nn.Sequential(\n torch.nn.Linear(300, 4),\n )\n def forward(self, X):\n return self.net(X)\n\nmodel = LogisticRegression()\nds = TensorDataset(X_train, y_train)\n# DataLoaderを作成\nloader = DataLoader(ds, batch_size=1, shuffle=True)\n\nloss_fn = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.net.parameters(), lr=1e-1)\n\nfor epoch in range(10):\n for xx, yy in loader:\n y_pred = model(xx)\n loss = loss_fn(y_pred, yy)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\nmodel_path = './bin/model.bin'\ntorch.save(model.state_dict(), model_path)","sub_path":"seiichi/chapter08/73.py","file_name":"73.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"238671585","text":"#Par_Impar\n\n\n#Lectura de Datos\nn=int(input(\"Ingrese el numero a verificar: \"))\n#Proceso\nif n%2==0:\n tipo=\"par\"\nelse:\n tipo=\"impar\"\nprint(f\"El numero {n} es {tipo}\")\n\n","sub_path":"Par_Impar.py","file_name":"Par_Impar.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214468444","text":"import json\nimport os\n\nclass Colour():\n \"\"\"\n Manipulate the \"NRC-Colour-Lexicon\" dataset for our use\n\n FILE FORMAT (from their readme):\n Each line has the following format:\n --Colour=VotesForThisColour=TotalVotesCast=\n\n is a word for which the annotators provided colour associations;\n\n is one or more comma-separated words that indicate the sense of the target word for which the annotations are provided;\n\n is the colour most associated with the target word. It is one of eleven colours---white, black, red, green, yellow, blue, brown, pink, purple, orange, grey. If each of the annotators suggested a different colour association for the target word, then is set to None.\n\n is the number of annotators who chose for the target word. It is set to None if is None.\n\n is the total number of annotators who gave colour associations for the target word.\n\n\n EX:\n aaaaaaah\t0.479\t0.606\t0.291\n \"\"\"\n\n def __init__(self):\n path = \"../../resources/NRC-Sentiment-Emotion-Lexicons/NRC-Colour-Lexicon-v0.92/NRC-color-lexicon-senselevel-v0.92.txt\"\n self.lines = open(path, 'r').readlines()\n self.outfile = \"out/colour.json\"\n self.dataset = {}\n\n def process_line(self, line):\n \"\"\"\n process a line\n \n @params: line -- a line of text as per the file format:\n - --Colour=VotesForThisColour=TotalVotesCast=\n Sections:\n - --\n Colour=\n VotesForThisColour=\n TotalVotesCast=\n @returns: \n - complete: boolean indicating finished state of entry\n - res: dictionary representing the entry\n \"\"\"\n sections = line.split('\\t')\n wordsection = sections[0].split('--')\n word = wordsection[0].strip()\n sense = [x.strip() for x in wordsection[1].strip().split(',')]\n color = sections[1].split('=')[1].strip()\n votes = sections[2].split('=')[1].strip()\n totalvotes = sections[3].split('=')[1].strip()\n return {\n \"word\": word,\n \"sense\": sense,\n \"color\": color,\n \"votes\": votes,\n \"totalvotes\": totalvotes\n }\n\n def run(self):\n \"\"\"\n Runner that processes all words in [self.lines]\n Writes to files by first letter\n \"\"\"\n for line in self.lines:\n line = line.lower().strip()\n entry = self.process_line(line)\n self.write(entry, f\"tmp/colour-{entry['word'][0]}.json\", True)\n self.fix_json()\n self.make_dataset()\n\n def fix_json(self):\n directory = os.scandir('./tmp')\n for filename in directory:\n if filename.is_file():\n if filename.name.endswith('json'):\n filestr = \"\"\n with open('./tmp/'+filename.name, \"r\") as f:\n filestr = f.read().replace(\"}{\", \"},{\")\n with open('./tmp/'+filename.name, \"w\") as f:\n f.write(\"[\\n\" + filestr + \"\\n]\")\n\n def write(self, entry, filename, append=False):\n with open(filename, 'a' if append else 'w') as outfile:\n json.dump(entry, outfile, sort_keys = True, indent = 2)\n\n def make_dataset(self):\n directory = os.scandir('./tmp')\n dataset = {}\n \"\"\"\n {\n \"\": {\n \"word\": ,\n \"valence\": ,\n \"arousal\": ,\n \"dominance\": \n }\n }\n \"\"\"\n for filename in directory:\n if filename.is_file():\n if filename.name.endswith('json') and filename.name.startswith(\"colour-\"):\n with open('./tmp/'+filename.name, \"r\") as f:\n currentList = json.load(f)\n for entry in currentList:\n curr = list(dataset.get(entry[\"word\"], []))\n curr.append(entry)\n dataset[entry[\"word\"]] = curr\n os.remove('./tmp/'+filename.name)\n with open(self.outfile, \"w\") as f:\n json.dump(dataset, f, indent=2, sort_keys=True)\n \nif __name__ == \"__main__\":\n Colour = Colour()\n Colour.run()","sub_path":"db/NRC-Colour-Lexicon/colour.py","file_name":"colour.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"380229610","text":"from django.db import models\nfrom ex_user.models import User\n\n\nclass Category(models.Model):\n class Meta:\n db_table = 'category'\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n name = models.CharField('Название', max_length=50)\n parent_category = models.ForeignKey('Category', verbose_name='Родительская категория', null=True, blank=True,\n related_name=\"child_category_set\")\n\n def __str__(self):\n return self.name\n\n\nclass Rating(models.Model):\n class Meta:\n db_table = 'rating'\n verbose_name = 'Оценка'\n verbose_name_plural = 'Оценки'\n unique_together = (('master', 'valuer'),)\n\n VALUE_EXCELLENT = 5\n VALUE_GOOD = 4\n VALUE_NORMAL = 3\n VALUE_POOR = 2\n VALUE_DISGUSTING = 1\n\n VALUE_CHOICES = (\n (VALUE_EXCELLENT, 'Отлично'),\n (VALUE_GOOD, 'Хорошо'),\n (VALUE_NORMAL, 'Нормально'),\n (VALUE_POOR, 'Плохо'),\n (VALUE_DISGUSTING, 'Отвратительно'),\n )\n\n master = models.ForeignKey(User, verbose_name='Мастер', related_name='master_ratings')\n valuer = models.ForeignKey(User, verbose_name='Оценщик', related_name='valuer_ratings')\n value = models.IntegerField('Оценка', choices=VALUE_CHOICES)\n comment = models.TextField('Комментарий')\n\n\nclass Task(models.Model):\n class Meta:\n db_table = 'task'\n verbose_name = 'Заявка'\n verbose_name_plural = 'Заявки'\n\n owner = models.ForeignKey(User, verbose_name='Чья заявка')\n category = models.ForeignKey(Category, verbose_name='Категория')\n description = models.TextField('Описание')\n\n\nclass Offer(models.Model):\n class Meta:\n db_table = 'offer'\n verbose_name = 'Предложение'\n verbose_name_plural = 'Предложения'\n\n task = models.ForeignKey(Task, verbose_name='Заявка')\n user = models.ForeignKey(User, verbose_name='Пользователь')\n description = models.TextField('Описание')\n","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"188199064","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 30 15:00:47 2019\n\n@author: clair\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport argparse\n\nSUBTRACTOR = {\n \"mog\": cv2.bgsegm.createBackgroundSubtractorMOG,\n \"mog2\": cv2.createBackgroundSubtractorMOG2,\n \"gmg\": cv2.bgsegm.createBackgroundSubtractorGMG\n }\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-v\", \"--video\", type=str, help=\"path to input video file\")\nap.add_argument(\"-s\", \"--subtractor\", type=str, help=\"background subtractor to apply on the video\")\nargs = vars(ap.parse_args())\n\ncap = cv2.VideoCapture(args[\"video\"])\nbackground_subtractor = args[\"subtractor\"]\n# eg. '../KCF_tracker/videos/angle90.mp4'\n\nfgbg = SUBTRACTOR[background_subtractor]()\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\n\nwhile(1):\n ret, frame = cap.read()\n \n if frame is None:\n break\n \n fgmask = fgbg.apply(frame)\n if background_subtractor == \"gmg\":\n fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n cv2.imshow('fgmask',frame)\n cv2.imshow('frame',fgmask)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n\ncap.release()\ncv2.destroyAllWindows()\ncv2.waitKey(1)\n\n","sub_path":"background_reduction/background_reduction.py","file_name":"background_reduction.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350406181","text":"#!/usr/bin/env python \n# encoding: utf-8 \n\n\"\"\" \n@version: v1.0 \n@author: xag \n@license: Apache Licence \n@contact: xinganguo@gmail.com \n@site: http://www.xingag.top \n@software: PyCharm \n@file: send_news.py \n@time: 2018/5/4 17:10 \n@description:itchat 自动发送信息给好友\n\"\"\"\n\n# https://mp.weixin.qq.com/s?__biz=MzIxMzgyOTg1MQ==&mid=2247484131&idx=1&sn=001de6693c2bf243abba531e9aa6e5cb&chksm=97b19357a0c61a41ec8f31faf5aa1a6eb5984f3e5cd4bb7a1e64a28ba1c411badef60187e8cd&mpshare=1&scene=1&srcid=05048vyPLZVqtIZ6suVGIHrM#rd\n\nimport requests\nimport itchat\nimport time\nimport re\n\n# 利用线程去执行定时任务\nimport threading as thd\n\n\ndef get_news():\n\turl = \"http://open.iciba.com/dsapi\"\n\t# 具体的内容\n\trequest_result = requests.get(url).json()\n\tcontent = request_result['content']\n\ttranslation = request_result['translation']\n\timg = request_result['picture2']\n\t# print('content:%s\\ntranslation:%s\\nimg:%s' % (content, translation, img))\n\treturn content, translation, img\n\n\ndef send_news():\n\tprint('发送消息咯~')\n\ttry:\n\t\t# 短时间关闭程序后重连\n\t\t# 这样即使程序关闭,一定时间内重新开启也可以不用重新扫码。\n\t\titchat.auto_login(hotReload=True)\n\t\tdear_friend = (itchat.search_friends(name=u'小敏子'))[0]\n\t\t# 获取对应名称的一串数字\n\t\tYouPingNian = dear_friend['UserName']\n\t\t# 待发送的内容\n\t\tmessage1 = str(get_news()[0])\n\t\tcontent = str(get_news()[1])\n\t\timg = str(get_news()[2])\n\n\t\t# 用正则表达式截取中文引号 ‘ 之间的文字。\n\t\t# str = \"词霸小编:卓别林的一句话送你们“用特写镜头看生活,生活是一个悲剧,但用长镜头看生活,就是一部喜剧。”\"\n\t\t# str1 = result = re.findall(\".*“(.*)”.*\", str)\n\t\tmessage2 = str(re.findall(\".*“(.*)”.*\", content)[0])\n\t\tmessage3 = \"我是机器人\"\n\n\t\t# 发送消息\n\t\tprint('开始发送消息')\n\t\titchat.send(message1, toUserName=YouPingNian)\n\t\titchat.send(message2, toUserName=YouPingNian)\n\t\titchat.send(message3, toUserName=YouPingNian)\n\t\tprint('发送消息完成')\n\n\t\t# itchat.send_image()\n\t\t# 每86400秒(1天)发送1次\n\t\t# 不用Linux的定时任务是因为每次登陆都需要扫描二维码登陆\n\t\t# 很麻烦的事,就让他一直挂着吧\n\t\t# thd.Timer(10, send_news).start()\n\t\tpass\n\texcept:\n\t\tmessage4 = u'噗噗噗~出现bug了~'\n\t\titchat.send(message4, toUserName=YouPingNian)\n\n\nif __name__ == \"__main__\":\n\tsend_news()\n\n# ==============================================================\n\n# Python 正则表达式匹配两个字符之间的字符\n# https://zhidao.baidu.com/question/433631377743612444.html\n\n# -*- coding: cp936 -*-\n# import re\n\n# string = \"xxxxxxxxxxxxxxxxxxxxxxxx entry '某某内容' for aaaaaaaaaaaaaaaaaa\"\n\n# result = re.findall(\".*entry(.*)for.*\", string)\n# for x in result:\n# \tprint x\n\n# '某某内容'\n\n\n# ==============================================================\n","sub_path":"Python/词云/使用 itchat 爬取微信朋友的签名并生成词云/itchatDemo/send_news.py","file_name":"send_news.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413182389","text":"\nimport time\nimport pandas as pd\nimport re\nimport nltk.classify.util\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom nltk import pos_tag\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.feature_selection import SelectKBest, chi2\n\n###############################################################################\n# Load the raw text dataset.\n###############################################################################\n\nprint(\"Loading dataset...\")\n\n#raw_text_dataset = pd.read_csv(\"love_hate.csv\")\n\n#train_data = raw_text_dataset[0:1800]\n#test_data = raw_text_dataset[1800:2586]\n\ntrain_data = pd.read_csv(\"train_data.csv\")\ntest_data = pd.read_csv(\"test_data.csv\")\n\nprint(len(train_data))\nprint(len(test_data))\n\nX_train_raw = train_data['content']\ny_train_labels = train_data['sentiment']\nX_test_raw = test_data['content']\ny_test_labels = test_data['sentiment']\n\n\ndef review_to_words( review_text ):\n\n #\n # 2. Remove non-letters\n letters_only = re.sub(\"[^a-zA-Z]\", \" \", review_text)\n #\n # 3. Convert to lower case, split into individual words\n words = letters_only.lower().split()\n #\n # 4. In Python, searching a set is much faster than searching\n # a list, so convert the stop words to a set\n stops = nltk.corpus.stopwords.words('english')\n stops += ['.', '://', 'http', 'com', '...']\n # 5. Remove stop words\n meaningful_words = [w for w in words if not w in stops]\n\n # 6. Join the words back into one string separated by space,\n # and return the result.\n return( \" \".join( meaningful_words ))\n\n\nprint(\"Cleaning and parsing the training set tweets...\\n\")\nnum_reviews = X_train_raw.size\nclean_train_reviews = []\nfor i in range( 0, num_reviews ):\n # If the index is evenly divisible by 1000, print a message\n if( (i+1)%1000 == 0 ):\n print( \"Review %d of %d\\n\" % ( i+1, num_reviews ))\n clean_train_reviews.append( review_to_words( X_train_raw[i] ))\n\npostag_train_reviews = []\nfor d in clean_train_reviews:\n data_tokens = nltk.wordpunct_tokenize(d)\n temp = [\"%s_%s\" % (w,t) for w, t in pos_tag(data_tokens)]\n postag_train_reviews.append(\" \".join(temp))\n\n\nprint(\"Cleaning and parsing the testing set tweets...\\n\")\ntest_tweets = X_test_raw.size\nclean_test_reviews = []\nfor i in range( 0 , test_tweets ):\n # If the index is evenly divisible by 1000, print a message\n if( (i+1)%1000 == 0 ):\n print( \"Review %d of %d\\n\" % ( i+1, test_tweets ))\n clean_test_reviews.append( review_to_words( X_test_raw[i] ))\n\n\npostag_test_reviews = []\nfor d in clean_test_reviews:\n data_tokens = nltk.wordpunct_tokenize(d)\n temp = [\"%s_%s\" % (w,t) for w, t in pos_tag(data_tokens)]\n postag_test_reviews.append(\" \".join(temp))\n\n\n\n\n###############################################################################\n# Use LSA to vectorize the articles.\n###############################################################################\n\n# Tfidf vectorizer:\n# - Strips out “stop words”\n# - Filters out terms that occur in more than half of the docs (max_df=0.5)\n# - Filters out terms that occur in only one document (min_df=2).\n# - Selects the 10,000 most frequently occuring words in the corpus.\n# - Normalizes the vector (L2 norm of 1.0) to normalize the effect of\n# document length on the tf-idf values.\nvectorizer = TfidfVectorizer( min_df=2, max_features=20000 ,stop_words=None, ngram_range= (1,3),\n use_idf=True)\n\n# Build the tfidf vectorizer from the training data (\"fit\"), and apply it\n# (\"transform\").\nX_train_tfidf = vectorizer.fit_transform(postag_train_reviews)\n\nprint(\" Actual number of tfidf features: %d\" % X_train_tfidf.get_shape()[1])\n\nprint(\"\\nPerforming dimensionality reduction using LSA\")\nt0 = time.time()\n\n# Project the tfidf vectors onto the first 150 principal components.\n# Though this is significantly fewer features than the original tfidf vector,\n# they are stronger features, and the accuracy is higher.\nsvd = TruncatedSVD(800)\nlsa = make_pipeline(svd, Normalizer(copy=False))\n\n# Run SVD on the training data, then project the training data.\nX_train_lsa = lsa.fit_transform(X_train_tfidf)\n\n\nprint(\" done in %.3fsec\" % (time.time() - t0))\n\nexplained_variance = svd.explained_variance_ratio_.sum()\nprint(\" Explained variance of the SVD step: {}%\".format(int(explained_variance * 100)))\n\n\n# Now apply the transformations to the test data as well.\nX_test_tfidf = vectorizer.transform(postag_test_reviews)\nX_test_lsa = lsa.transform(X_test_tfidf)\n\nprint(\" train tfidf size\",X_train_tfidf.size)\nprint(\"test tfidf size\" ,X_test_tfidf.size)\n\n\n###############################################################################\n# Run classification of the test articles\n###############################################################################\n\nprint(\"\\nUsing KNN on LSA vectors...\")\n\n# Time this step.\nt0 = time.time()\n\n# Build a k-NN classifier. Use k = 5 (majority wins), the cosine distance,\n# and brute-force calculation of distances.\nknn_lsa = KNeighborsClassifier(n_neighbors=20, algorithm='brute', metric='cosine')\nknn_lsa.fit(X_train_lsa, y_train_labels)\n\n# Classify the test vectors.\np = knn_lsa.predict(X_test_lsa)\n\n\n# Measure accuracy\n\nprint(metrics.accuracy_score(y_test_labels, p))\n#print(metrics.classification_report(y_test_labels, p))\n# Calculate the elapsed time (in seconds)\nelapsed = (time.time() - t0)\nprint(\" done in %.3fsec\" % elapsed)\n\n\n\nprint(\"\\nUsing LinearSVC on LSA features\")\nlsvc = LinearSVC()\nlsvc.fit(X_train_lsa, y_train_labels)\nresult = lsvc.predict(X_test_lsa)\nprint(metrics.accuracy_score(y_test_labels, result))\n#print(metrics.confusion_matrix(y_test_labels,result))\n#print(metrics.classification_report(y_test_labels, result))\n\nprint(\"\\nUsing LIBSVC on LSA features\")\nlibsvc = SVC(kernel= 'linear')\nlibsvc.fit(X_train_lsa, y_train_labels)\nresult = libsvc.predict(X_test_lsa)\nprint(metrics.accuracy_score(y_test_labels, result))\n#print(metrics.classification_report(y_test_labels, result))\n\nprint(\"\\nUsing MNB on tfidf\")\nmnb = MultinomialNB()\nmnb.fit(X_train_tfidf, y_train_labels)\nresult = mnb.predict(X_test_tfidf)\nprint(metrics.accuracy_score(y_test_labels, result))\n#print(metrics.classification_report(y_test_labels, result))\n","sub_path":"lsaclassification.py","file_name":"lsaclassification.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340481545","text":"import sqlite3\n\n# Create the new database because it doesn't exist yet\nconn = sqlite3.connect(\"cars.db\")\n\n#get a cursor object used to execute SQL commands\ncursor = conn.cursor()\n\n#create a new table called inventory that includes the following fields: \" Make\", \"Model\", and \"Quantity\". Don't forget to include the proper data-types\n\ncursor.execute(\"\"\"CREATE TABLE inventory\n\t (make TEXT, model TEXT, quantity INT)\n\t\t\t\t\"\"\")\n\nconn.close()\n","sub_path":"sqlhw.py","file_name":"sqlhw.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560010289","text":"def remove_duplicates(seq):\n \"\"\"\n With while statements, change a list so that\n duplicate values inside of the list are removed. \n The ordering of the list is kept\n \"\"\"\n unique_len = len(set(seq))\n current_position = 0\n for current_position in range(unique_len):\n current_value = seq[current_position]\n tmp_seq = seq[current_position+1:]\n #print(seq)\n tmp_position = 0\n while tmp_position != len(tmp_seq):\n if tmp_seq[tmp_position] == current_value:\n del tmp_seq[tmp_position]\n else:\n tmp_position += 1\n seq[current_position+1:] = tmp_seq\n\n# Assert the statement in textbook\nlst = [7, 4, 12, 7, 1, 25, 7, 4, 0, 1, 20]\nremove_duplicates(lst)\nassert lst == [7, 4, 12, 1, 25, 0, 20]\n\n# Assert other ordering close to each other\nlst = [7, 7, 7, 4, 4, 12, 1, 1, 25, 0, 20]\nremove_duplicates(lst)\nassert lst == [7, 4, 12, 1, 25, 0, 20]","sub_path":"CH7_lists/remove_duplicates_for.py","file_name":"remove_duplicates_for.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381716506","text":"#linked list theke element delete\r\nclass Node:\r\n def __init__(self,data):\r\n self.value = data\r\n self.next = None\r\n\r\nclass linkedlist:\r\n def __init__(self):\r\n self.head = None #self.head emty kore nilam\r\n\r\n def Push(self,new_data):\r\n current_data = Node(new_data)\r\n current_data.next = self.head #head ta ke 1ghor soriye dilam\r\n self.head = current_data #tarpor head a current_data rakhlam\r\n\r\n def Delete(self,key):\r\n if self.head is None:\r\n return\r\n\r\n if self.head.value == key:\r\n self.head = self.head.next\r\n return\r\n\r\n temp = self.head\r\n while temp.next is not None:\r\n if temp.next.value == key:\r\n temp.next = temp.next.next\r\n else:\r\n temp = temp.next\r\n return temp\r\n\r\n def Print(self):\r\n temp = self.head\r\n while temp is not None:\r\n print(temp.value,end=\" \")\r\n temp = temp.next\r\n\r\nif __name__==\"__main__\":\r\n info = linkedlist()\r\n info.Push(9)\r\n info.Push(8)\r\n info.Push(7)\r\n info.Push(6)\r\n info.Push(56)\r\n info.Print()\r\n print()\r\n #delete\r\n n = int(input(\"Enter which you want delete: \"))\r\n info.Delete(n)\r\n info.Print()","sub_path":"Linked list/me. data deletion with key in linkedlist.py","file_name":"me. data deletion with key in linkedlist.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590063297","text":"from django.conf.urls import url\n\nfrom .views import albums, users, gallery, images\n\n\nurlpatterns = [\n url(r'^$', gallery.GalleryIndex.as_view(), name='index'),\n\n url(r'^signup/$', users.SignupUser.as_view(), name='signup'),\n url(r'^signin/$', users.signin, name='signin'),\n url(r'^signout/$', users.signout, name='signout'),\n url(r'^users/(?P[0-9]+)/$', users.UpdateProfileUser.as_view(), name='profile'),\n\n url(r'^albums/$', albums.ListAlbum.as_view(), name='list-albums'),\n url(r'^albums/new/$', albums.CreateAlbum.as_view(), name='create-album'),\n url(r'^albums/(?P[0-9]+)/$', albums.DetailAlbum.as_view(), name='detail-album'),\n url(r'^albums/(?P[0-9]+)/edit/$', albums.UpdateAlbum.as_view(), name='update-album'),\n url(r'^albums/(?P[0-9]+)/delete/$', albums.DeleteAlbum.as_view(), name='delete-album'),\n\n url(r'^images/new/$', images.CreateImage.as_view(), name='create-image'),\n\n]\n","sub_path":"gallery/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"113545946","text":"\"\"\" Scraping_html_tables.py\n Inspired by\n https://towardsdatascience.com/web-scraping-html-tables-with-python-c9baba21059\n https://python-docs.readthedocs.io/en/latest/scenarios/scrape.html\n https://www.youtube.com/watch?v=W-lZttZhsUY\n https://github.com/codereport/Talks/tree/master/2020-04-PyCon/BeautifulPythonRefactoring\n XPath is a way of locating information in structured documents such as HTML\n\n\"\"\"\nimport requests\nimport lxml.html as lh\nimport pandas as pd\n\nurl='http://pokemondb.net/pokedex/all'\n\npage = requests.get(url) # Get html content\ntree = lh.fromstring(page.content) # Use page.content vs page.text b/c html.fromstring expects bytes\ntable = tree.xpath('//tr') # Parse data stored between .. of HTML\n\n# .text_content() returns text contained within an HTML tag w/o HTML markup. Grap 1st row with headers\ntitles = [name.text_content() for name in table[0]]\n\n# Use conditional to update data\nfmt = lambda data : int(data) if data.isnumeric() else data\n\n# Then populate the column content. Iterate over all the remaining row amd oterate over each elements in row\n# Convert the elements if needed using the lambda and then the zip(*) is a transpose function\ncols = zip(*[[fmt(elem.text_content()) for elem in row.iterchildren()]\n for row in table[1:]])\n\nDict = {title:column for title, column in zip(titles, cols)}\ndf = pd.DataFrame(Dict)\n\nprint(df.head())\n\n# An alternative way of doing this\nheader = {\n \"User-Agent\": \"Mozilla /5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\" }\n#r = requests.get(url, headers=header)\nr = requests.get(url)\ndf2 = pd.read_html(r.text)[0]\nprint(df2.head())","sub_path":"BeautifulSoup/Scraping_html_tables3.py","file_name":"Scraping_html_tables3.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"298883147","text":"\"\"\"\n@Name: recursion\n@Version: \n@Project: PyCharm\n@Author: wangmin\n@Data: 2018/3/18\n\"\"\"\n\n\"\"\"\n 这是一个递归函数的python包\n 使用例子:\n 1 :get_value(dict, get_values_key)\n data = {\"code\": 0, \"message\": \"操作成功\",\"result\": {\"amount\": 950.0000,\n \"totalBaoLiFee\": None, \"pageNo\": 1,\"data\": [{\"goodsId\": 100}]}}\n print(get_value(data, \"code\"))\n 2: list_for_key_to_dict(*args, dict)\n print(list_for_key_to_dict(\"code\", \"pageNo\", \"goodsId\", my_dict=data))\n\"\"\"\nclass GetDictParam:\n \"\"\"\n 这是一个解析dict 参数的类\n 可以用于多参数的指定key 、 指定key集合解析key\n \"\"\"\n def __init__(self):\n \"\"\"\n 初始化函数\n \"\"\"\n pass\n\n def get_value(self, my_dict, key):\n \"\"\"\n 这是一个递归函数\n data = {\n 'a': 1,\n 'b': 2,\n 'c': {\n 'd': 3,\n 'e': 4,\n 'f':{\n 1: 2,\n 2: 3,\n 34: 5\n }\n }\n }\n \"\"\"\n\n if isinstance(my_dict, dict):\n\n if my_dict.get(key) or my_dict.get(key) == 0 or my_dict.get(key) == ''\\\n and my_dict.get(key) is False:\n return my_dict.get(key)\n\n for my_dict_key in my_dict:\n if self.get_value(my_dict.get(my_dict_key), key) or \\\n self.get_value(my_dict.get(my_dict_key), key) is False:\n return self.get_value(my_dict.get(my_dict_key), key)\n\n if isinstance(my_dict, list):\n for my_dict_arr in my_dict:\n if self.get_value(my_dict_arr, key) \\\n or self.get_value(my_dict_arr, key) is False:\n return self.get_value(my_dict_arr, key)\n\n def list_for_key_to_dict(self, *args, my_dict):\n \"\"\"\n 接收需要解析的dict和 需要包含需要解析my_dict的keys的list\n :param my_dict: 需要解析的字典\n :param args: 包含需要解析的key的多个字符串\n # list_for_key_to_dict(\"code\", \"pageNo\", \"goodsId\", my_dict=dict)\n :return: 一个解析后重新拼装的dict\n \"\"\"\n result = {}\n if len(args) > 0:\n for key in args:\n result.update({key: self.get_value(my_dict, key)})\n return result","sub_path":"learn2020_03/practice/ApiTest/lib/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"253005714","text":"from django.contrib.auth.models import User\nfrom rest_framework import serializers\n\nfrom .models import (Profile, ProductStatus, ProductType, Product, OrderStatus, OrderType, Order, Tag, OrderSerialNo)\n\n\n\n\n# ================================ Order Type & Status =======================#\nclass OrderStatusListSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrderStatus\n fields = '__all__'\n\nclass OrderTypeListSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrderType\n fields = '__all__'\n# ================================ Tag =======================================#\n\nclass TagListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = '__all__'\n\nclass OrderSerialNoListSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrderSerialNo\n fields = '__all__'\n# ================================ ProductsType & Status =====================#\n\nclass ProductTypeListSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductType\n fields = '__all__'\n\nclass ProductStatusListSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductStatus\n fields = '__all__'\n\n# ================================ Profile =====================================#\n\nclass ProfileListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Profile\n fields = '__all__'\n\n# ================================ Users =====================================#\nclass UserProductListSerializer(serializers.ModelSerializer):\n type= ProductTypeListSerializer()\n status = ProductStatusListSerializer()\n tag =TagListSerializer(many=True)\n\n\n class Meta:\n model = Product\n fields = ['id','name','description','created_on','updated_on','pic','type','status','tag','quantity' ]\n\nclass UserOrderListSerializer(serializers.ModelSerializer):\n status = OrderStatusListSerializer()\n class Meta:\n model = Order\n fields = ['id','created_on','updated_on','status' ]\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n password = serializers.CharField(write_only=True)\n class Meta:\n model = User\n fields = ['username', 'password']\n\n def create(self, validated_data):\n username = validated_data['username']\n password = validated_data['password']\n new_user = User(username=username)\n new_user.set_password(password)\n new_user.save()\n return validated_data\n\nclass UserListSerializer(serializers.ModelSerializer):\n profile = ProfileListSerializer()\n orders = serializers.SerializerMethodField()\n products = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = ['id','first_name','last_name','username','email','is_superuser','is_staff','is_active','date_joined','last_login','profile', 'orders', 'products']\n\n def get_orders(self, obj):\n orders = obj.createdby.all()\n return UserOrderListSerializer(orders, many=True).data\n\n def get_products(self, obj):\n products = obj.product_set.all()\n return UserProductListSerializer(products, many=True).data\n\nclass UserUpdateSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['id','first_name','last_name','username','email','is_superuser','is_staff','is_active']\n\n# ================================ Orders =====================================#\n\nclass CommonUserListSerializer(serializers.ModelSerializer):\n profile = ProfileListSerializer()\n\n class Meta:\n model = User\n fields = ['id','first_name','last_name','username','email','is_superuser','is_staff','is_active','date_joined','last_login','profile']\n\nclass OrderProductIDsListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ['id','quantity']\n\nclass OrderListSerializer(serializers.ModelSerializer):\n created_by = CommonUserListSerializer()\n status = OrderStatusListSerializer()\n products = serializers.SerializerMethodField()\n orderSerialNo = OrderSerialNoListSerializer(many=True)\n\n class Meta:\n model = Order\n fields = ['id','created_on','updated_on','status','price', 'created_by', 'products' , 'orderSerialNo' ]\n\n def get_products(self, obj):\n products = obj.product_set.all()\n return OrderProductIDsListSerializer(products, many=True).data\n\nclass OrderCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = Order\n fields = ['id','created_on','updated_on','price','status', 'created_by', 'updated_by','orderSerialNo' ]\n\n# ================================ Product =====================================#\n\nclass ProductListSerializer(serializers.ModelSerializer):\n created_by = CommonUserListSerializer()\n type= ProductTypeListSerializer()\n status = ProductStatusListSerializer()\n order = OrderListSerializer()\n tag =TagListSerializer(many=True)\n\n class Meta:\n model = Product\n fields = ['id','name','description','created_on','updated_on','pic','type','status','tag', 'created_by', 'order','price', 'quantity']\n\nclass ProductCreateSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ['id','name','description','created_on','updated_on','pic','type','status','tag', 'created_by' ,'price','quantity']\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"555410999","text":"# This script consists of all logical components responsible for analysing the video\n\nimport cv2\nimport numpy as np\nimport csv\nimport os\nimport time\nimport random\n\n\nclass Traffic_Analyzer:\n\n def __init__(self, video_path): # constructor takes path to the video as an argument\n self.FONT = cv2.FONT_HERSHEY_SIMPLEX\n self.COLOUR = (50, 200, 0)\n self.max_frames = 10 # for testing\n self.video_path = video_path\n self.dir_path = os.path.dirname(self.video_path) # directory path of the video\n self.output_video_file = \"\"\n self.output_csv_file = \"\"\n self.yolo_weights = \"yolov3.weights\" # path to the YOLO weights\n self.yolo_cfg = \"yolov3.cfg\" # path to the YOLO.cfg\n self.coco_names = \"coco.names\" # path to the possible analyzing classes\n\n # alternatives\n # self.yolo_weights = \"yolov3-tiny_final.weights\"\n # self.yolo_cfg = \"yolov3_tiny.cfg\"\n # self.coco_names = \"coco_tiny.names\"\n # self.yolo_weights = \"yolov3_320.weights\"\n # self.yolo_cfg = \"yolov3_320.cfg\"\n\n # Class responsible for finding objects, drawing them and creating the output video\n def video_analyze(self):\n\n # creating filename of output video\n t = time.localtime() # current time to have unique names\n self.current_time = time.strftime(\"%H_%M_%S\", t)\n self.random = random.randint(10,99)\n self.output_video_file = self.current_time + \"_\" + str(self.random) + \"_\" + os.path.split(self.video_path)[1].split(\".\")[0] + \".avi\"\n\n # loading the Deep Learning network\n net = cv2.dnn.readNet(self.yolo_weights, self.yolo_cfg)\n classes = [] # all possible classes\n with open(self.coco_names, \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\n\n layers_names = net.getLayerNames()\n layers_output = [layers_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n video = cv2.VideoCapture(self.video_path)\n\n result = cv2.VideoWriter(self.output_video_file,\n cv2.VideoWriter_fourcc(*'MJPG'),\n 10, (int(video.get(3)), int(video.get(4))))\n\n # tab of objects of each category\n self.total_vehicles = []\n self.total_unknown = []\n self.total_cars = []\n self.total_trucks = []\n self.total_two_wheelers = []\n\n self.frame = 0\n success, img = video.read()\n while success:\n self.frame += 1\n # no. objects of each category in one frame\n unknown_number = 0\n vehicles_number = 0\n cars_number = 0\n two_wheelers_number = 0\n trucks_number = 0\n\n # analyzing only one of two consecutive frames to improve performance\n if self.frame % 2 ==1 and self.frame != 1:\n success, img = video.read()\n if self.frame % 2 == 1 and success:\n blob_results = cv2.dnn.blobFromImage(img, 0.00392, (320, 320), (0, 0, 0), True, False)\n img_height, img_width, img_channels = img.shape\n net.setInput(blob_results)\n outs = net.forward(layers_output)\n\n class_ids = []\n confidences = []\n boxes = []\n\n # detecting through network layers\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n # passing objects only if the confidence level is higher then 30%\n if confidence > 0.3:\n # Object detected\n center_x = int(detection[0] * img_width)\n center_y = int(detection[1] * img_height)\n width = int(detection[2] * img_width)\n height = int(detection[3] * img_height)\n\n # Rectangle\n x = int(center_x - width / 2)\n y = int(center_y - height / 2)\n\n boxes.append([x, y, width, height])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.6, 0.4)\n\n else:\n success, img = video.read()\n if success:\n for i in indexes:\n i = int(i)\n\n # drawing and counting objects basing on the previous frame analysis\n if class_ids[i] in [0, 1, 2, 3, 5,\n 7]: # ids of analyzed classes (unknown, two-wheelers, cars, bus or trucks)\n vehicles_number += 1\n x, y, width, height = boxes[i]\n label = classes[class_ids[i]].upper()\n conf = str(round(confidences[i] * 100, 1)) + \"%\"\n cv2.rectangle(img, (x, y), (x + width, y + height), self.COLOUR, 2)\n cv2.putText(img, label, (x + 5, y + 15), self.FONT, 0.5, self.COLOUR, 1)\n cv2.putText(img, conf, (x + 5, y + height - 5), self.FONT, 0.5, self.COLOUR, 1)\n\n # counting objects per category\n if class_ids[i] == 0: # unknown\n unknown_number += 1\n if class_ids[i] in [1, 3]: # two-wheelers\n two_wheelers_number += 1\n elif class_ids[i] == 2: # cars\n cars_number += 1\n elif class_ids in [5, 7]: # buses or trucks\n trucks_number += 1\n\n self.total_vehicles.append(vehicles_number)\n self.total_cars.append(cars_number)\n self.total_trucks.append(trucks_number)\n self.total_two_wheelers.append(two_wheelers_number)\n self.total_unknown.append(unknown_number)\n\n\n cv2.putText(img, str(self.frame), (20, 20), self.FONT, 0.5, self.COLOUR, 2)\n # cv2.imshow(\"Video\", img)\n result.write(img)\n print(\".\", end='')\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n video.release()\n result.release()\n cv2.destroyAllWindows()\n\n # Function responsible for timestamp counting and saving the results to CSV file\n def write_timestamps(self):\n\n # creating filename of output video\n self.output_csv_file = self.current_time + \"_\" + os.path.split(self.video_path)[1].split(\".\")[\n 0] + \".csv\" # current time to have unique names\n cameraCapture = cv2.VideoCapture(self.video_path)\n\n success, frame = cameraCapture.read()\n fps = cameraCapture.get(cv2.CAP_PROP_FPS)\n\n total_timestamp = []\n\n count = 0\n # counting all timestamps\n while success:\n success, frame = cameraCapture.read()\n count += 1\n time_stamp = count / fps\n total_timestamp.append(time_stamp)\n\n cv2.destroyAllWindows()\n cameraCapture.release()\n\n # saving the results\n with open(self.output_csv_file, 'w', newline='') as file:\n print(len(total_timestamp))\n print(len(self.total_unknown))\n print(len(self.total_vehicles))\n print(len(self.total_cars))\n print(len(self.total_trucks))\n print(len(self.total_two_wheelers))\n writer = csv.writer(file)\n writer.writerow([\"timestamp klatki\", \"liczba pojazdów\", \"liczba samochodów\", \"liczba jednośladów\",\n \"liczba samochodów wielkogabarytowych\", \"liczba obiektów nierozpoznanych\"])\n for i in range(len(self.total_vehicles)):\n writer = csv.writer(file)\n writer.writerow([total_timestamp[i], self.total_vehicles[i], self.total_cars[i], self.total_two_wheelers[i], self.total_trucks[i], self.total_unknown[i]])","sub_path":"executable/gui/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"404518324","text":"from django.contrib.auth.forms import User\nfrom django import forms\nfrom .models import Threat, Vulnerability, Likelihood, Impact, Risk, Control\n\nclass ThreatForm(forms.Form):\n THREAT_CHOICES = (\n ('Internal Personnel/Student', 'Internal Personnel/Student'),\n ('Acts of God/Nature', 'Acts of God/Nature'),\n ('Hacking', 'Hacking'),\n ('Theft by Individuals', 'Theft by Individuals'),\n ('Data misused by Third Party Provider', 'Data misused by Third Party Provider'),\n ('Others', 'Others (specify through description)'),\n )\n\n thr_name = forms.ChoiceField(label = 'Type of Threat', choices = THREAT_CHOICES)\n thr_description = forms.CharField(label = 'Description (Additional Information)', max_length = 250, widget = forms.Textarea())\n\nclass VulnerabilityForm(forms.Form):\n VUL_CHOICES = (\n ('Unkown/Unclassified Information', 'Unkown/Unclassified Information'),\n ('Lack of existing policies/procedures', 'Lack of existing policies/procedures'),\n ('Inconsistent implementation of policies/procedures', 'Inconsistent implementation of policies/procedures'),\n ('Lack of awareness training/campaign', 'Lack of awareness training/campaign'),\n ('Lack of physical/technical controls', 'Lack of physical/technical controls'),\n ('Lack of time/resources to implement policies and procedure', 'Lack of time/resources to implement policies and procedure'),\n ('Urgent needs and demands that contradict compliance', 'Urgent needs and demands that contradict compliance'),\n ('Negligence of individuals', 'Negligence of individuals'),\n ('Limitation of capabilities of processes/resources', 'Limitation of capabilities of processes/resources'),\n ('Lack of Data Sharing Agreement/Internal Sharing Policies', 'Lack of Data Sharing Agreement/Internal Sharing Policies'),\n ('Others', 'Others (specify through description)'),\n )\n\n vul_name = forms.ChoiceField(label = 'Type of Vulnerability', choices = VUL_CHOICES)\n vul_description = forms.CharField(label = 'Description (Additional Information)', max_length = 250, widget = forms.Textarea())\n\nclass LikelihoodForm(forms.Form):\n SCORE_CHOICES = (\n (1, '[1] Unlikely'),\n (2, '[2] Possible'),\n (3, '[3] Likely'),\n (4, '[4] Almost Certain'),\n )\n\n lk_score = forms.ChoiceField(label = 'Likelihood Score', choices = SCORE_CHOICES)\n lk_description = forms.CharField(label = 'Description (Additional Information)', max_length = 250, widget = forms.Textarea())\n\nclass ImpactForm(forms.Form):\n SCORE_CHOICES = (\n (1, '[1] Negligible'),\n (2, '[2] Limited'),\n (3, '[3] Significant'),\n (4, '[4] Maximum'),\n )\n\n imp_score = forms.ChoiceField(label = 'Impact Score', choices = SCORE_CHOICES)\n imp_description = forms.CharField(label = 'Description (Additional Information)', max_length = 250, widget = forms.Textarea())\n\n def __str__(self):\n template = '{0.id} - {0.description} - {0.score}'\n return template.format(self)\n\nclass RiskForm(forms.Form):\n\n TYPE_CHOICES = (\n ('Unauthorized Disclosure', 'Unauthorized Disclosure'),\n ('Unauthorized Purposes', 'Unauthorized Purposes'),\n ('Unauthorized Processing', 'Unauthorized Processing'),\n ('Access due to Negligence', 'Access due to Negligence'),\n ('Malicious Disclosure', 'Malicious Disclosure'),\n ('Improper Disposal', 'Improper Disposal'),\n ('Intentional Breach', 'Intentional Breach'),\n ('Concealment of Breach', 'Concealment of Breach'),\n ('Incorrect information stored', 'Incorrect information stored'),\n ('Loss of information', 'Loss of information'),\n ('Others', 'Others (specify through description)')\n )\n\n r_type = forms.ChoiceField(label = 'Type of Risk', choices = TYPE_CHOICES)\n r_description = forms.CharField(label = 'Description (Additional Information)', max_length = 250, widget = forms.Textarea())\n\nclass ControlForm(forms.Form):\n title = forms.CharField(max_length = 100)\n rec_controls = forms.CharField(max_length = 500, widget = forms.Textarea())\n","sub_path":"DaPComMS/PIA/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23078460","text":"from math import cos\r\n\r\nPI = 3.14\r\nEPS = 0.001\r\n\r\ndef func(var):\r\n return cos(PI/3 * var)\r\n\r\n\r\ndef is_more_index(var, array):\r\n index = 0\r\n for i in range(len(array)):\r\n if (array[i] >= var):\r\n index = i\r\n break \r\n\r\n return index\r\n\r\ndef get_values(var, vars_array, vars_result_array, count):\r\n index = is_more_index(var, vars_array)\r\n move = 0\r\n trbl = 0\r\n\r\n if (count > len(vars_array)):\r\n print(\"Too big polynom power\\n\")\r\n else:\r\n for i in range(count):\r\n if (index >= len(vars_array)):\r\n index = index - move - 1\r\n trbl += 1\r\n elif (index < 0):\r\n index = index - move + 1\r\n trbl += 1\r\n\r\n vars_result_array.append(vars_array[index])\r\n if ((i + trbl) % 2):\r\n move = -move + 1\r\n index += move\r\n else:\r\n move = -move - 1 - trbl\r\n index += move\r\n \r\n vars_result_array.sort()\r\n\r\ndef fill_results_y(y_array, result_y_array, x_array, result_x_array):\r\n for i in range(len(result_x_array)):\r\n for j in range(len(x_array)):\r\n if (result_x_array[i] == x_array[j]):\r\n result_y_array.append(y_array[j])\r\n\r\n\r\n\r\ndef divided_diff(x0 ,y0, x1, y1):\r\n return (y0 - y1) / (x0 - x1)\r\n\r\ndef big_divided_diff(x_array, y_array, result_array, result_divided_difs, count):\r\n step = 1\r\n result_divided_difs[0] = y_array[0]\r\n index = 1\r\n while (step != count):\r\n for i in range(count - step):\r\n y_array[i] = (divided_diff(x_array[i], y_array[i], x_array[i + step], y_array[i + 1]))\r\n \r\n step += 1\r\n result_divided_difs[index] = y_array[0]\r\n index += 1\r\n \r\n\r\ndef polynom(var, x_array, y_array):\r\n result = y_array[0]\r\n multiplier = 1\r\n for i in range(1, len(y_array)):\r\n multiplier *= (var - x_array[i - 1])\r\n result = result + multiplier * y_array[i]\r\n\r\n return result \r\n\r\n\r\ndef extrapolation_check(var, x_array):\r\n if (var < x_array[0] or var > x_array[len(x_array) - 1]):\r\n return True\r\n\r\n\r\ndef average(array):\r\n result = 0\r\n length = len(array)\r\n for i in range(length):\r\n result += array[i]\r\n \r\n return result / length\r\n\r\ndef dichotomy(start, finish):\r\n if (func(start) == 0):\r\n return start\r\n if (func(finish) == 0):\r\n return finish\r\n \r\n mid = (finish + start) / 2 \r\n if (func(mid) * func(start) < 0):\r\n finish = mid\r\n else:\r\n start = mid\r\n \r\n while (abs(start - finish) >= EPS):\r\n mid = (finish + start) / 2\r\n #mid = start + step\r\n if (func(mid) * func(start) <= 0):\r\n finish = mid\r\n else:\r\n start = mid\r\n\r\n return mid\r\n'''\r\ndef dichotomy(start, finish):\r\n if (func(start) == 0):\r\n return start\r\n if (func(finish) == 0):\r\n return finish\r\n\r\n while ((finish - start) / 2 > EPS):\r\n step = (finish - start) / 2\r\n mid = start + step\r\n if (func(mid) * func(start) < 0):\r\n finish = mid\r\n else:\r\n start = mid\r\n\r\n return mid\r\n'''\r\nf = open('table1.txt', 'r')\r\n\r\nx_array = []\r\ny_array = []\r\n\r\nfor str in f:\r\n string = str.split()\r\n x_array.append(float(string[0]))\r\n y_array.append(float(string[1]))\r\n\r\n\r\nf.close()\r\n\r\nvar = float(input(\"Input var: \"))\r\ngrade = int(input(\"Input polynom grade: \"))\r\n\r\nwhile (extrapolation_check(var, x_array)):\r\n print(\"Extrapolation is not permitted!\")\r\n try:\r\n var = float(input(\"Input another var: \"))\r\n except:\r\n print(\"Incorrect input! Try again.\")\r\n\r\n\r\nfor i in range(len(x_array)):\r\n print(x_array[i],\"\\n\")\r\n\r\n\r\nresults_x = []\r\nresults_y = []\r\n\r\n\r\n\r\nget_values(var, x_array, results_x , grade + 1)\r\nfill_results_y(y_array, results_y, x_array, results_x)\r\n\r\ndivided_difs = [0] * (len(results_x) - 1)\r\nresult_divided_difs = [0] * len(results_x)\r\n\r\nfor i in range(len(results_x)):\r\n print(\"res[%d] = %.4f\" % (i, results_x[i]))\r\nprint()\r\n\r\nfor i in range(len(results_y)):\r\n print(\"res[%d] = %.4f\" % (i, results_y[i]))\r\n\r\n\r\nbig_divided_diff(results_x, results_y, divided_difs, result_divided_difs, grade + 1)\r\n#big_divided_diff(results_x, divided_difs, divided_difs, result_divided_difs, 4)\r\nprint()\r\nfor i in range(len(result_divided_difs)):\r\n print(\"res1[%d] = %.4f\" % (i, result_divided_difs[i]))\r\n #print(\"res = \", divided_difs[i],\"\\n\")\r\n\r\nanswer = polynom(var, results_x, result_divided_difs)\r\n\r\nprint(\"Answer by polynom = %.3f\" % answer)\r\n#print(\"DIIIIIIO = %.3f\" % func(0.6))\r\nprint(\"Root by dichotomy = %.3f\" % dichotomy(x_array[12], x_array[len(x_array) - 1]))\r\n","sub_path":"Algorithms/algorithms0.py","file_name":"algorithms0.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23915475","text":"import camelot\nimport pandas as pd\nimport requests\nimport us\n\nfrom can_tools.scrapers import CMU\nfrom can_tools.scrapers.official.base import StateDashboard\n\n\nclass FloridaCountyVaccine(StateDashboard):\n has_location = False\n source = \"https://floridahealthcovid19.gov/#latest-stats\"\n location_type = \"county\"\n state_fips = int(us.states.lookup(\"Florida\").fips)\n fetch_url = \"http://ww11.doh.state.fl.us/comm/_partners/covid19_report_archive/vaccine/vaccine_report_latest.pdf\"\n source_name = \"Florida Department of Health\"\n\n def fetch(self):\n return camelot.read_pdf(self.fetch_url, pages=\"2-end\", flavor=\"stream\")\n\n def normalize(self, data):\n # read in data, remove extra header cols, rename column names\n dfs = []\n for el in data:\n dfs.append(self._truncate_data(el.df))\n df = pd.concat(dfs)\n\n # # Ignore data from unknown region (no fips code) and fix naming convention for problem counties, and total state vals\n df = df.query(\n \"location_name != 'Unknown' &\"\n \"location_name != 'Out-Of-State' &\"\n \"location_name != 'Total'\"\n )\n df = df.replace({\"location_name\": {\"Desoto\": \"DeSoto\", \"Dade\": \"Miami-Dade\"}})\n\n # Make all columns (except location) numeric\n for col in df.columns:\n if col == \"location_name\":\n continue\n else:\n df.loc[:, col] = pd.to_numeric(df.loc[:, col].str.replace(\",\", \"\"))\n\n # First dose and second dose need to be added together to get at least one vaccinated\n df.loc[:, \"first_dose_total\"] = df.eval(\n \"first_dose_total + series_complete_total\"\n )\n\n crename = {\n \"first_dose_new\": CMU(\n category=\"total_vaccine_initiated\",\n measurement=\"new\",\n unit=\"people\",\n ),\n \"series_complete_new\": CMU(\n category=\"total_vaccine_completed\",\n measurement=\"new\",\n unit=\"people\",\n ),\n \"total_people_vaccinated_new\": CMU(\n category=\"total_vaccine_doses_administered\",\n measurement=\"new\",\n unit=\"doses\",\n ),\n \"first_dose_total\": CMU(\n category=\"total_vaccine_initiated\",\n measurement=\"cumulative\",\n unit=\"people\",\n ),\n \"series_complete_total\": CMU(\n category=\"total_vaccine_completed\",\n measurement=\"cumulative\",\n unit=\"people\",\n ),\n \"total_people_vaccinated_total\": CMU(\n category=\"total_vaccine_doses_administered\",\n measurement=\"cumulative\",\n unit=\"doses\",\n ),\n }\n\n out = df.melt(id_vars=[\"location_name\"], value_vars=crename.keys()).dropna()\n out = self.extract_CMU(out, crename)\n out[\"vintage\"] = self._retrieve_vintage()\n out[\"dt\"] = self._get_date()\n\n cols_to_keep = [\n \"vintage\",\n \"dt\",\n \"location_name\",\n \"category\",\n \"measurement\",\n \"unit\",\n \"age\",\n \"race\",\n \"ethnicity\",\n \"sex\",\n \"value\",\n ]\n return out.loc[:, cols_to_keep]\n\n def _get_date(self):\n \"\"\"\n retrieve the date that the PDF was last updated minus one day, return as date.\n if connection to source cannot be made, use yesterday's date.\n \"\"\"\n res = requests.get(self.fetch_url)\n # if the connection fails, use yesterday's date as date\n if not res.ok:\n dt = self._retrieve_dtm1d(\"US/Eastern\")\n else:\n dt = pd.to_datetime(\n res.headers[\"Last-Modified\"], format=\"%a, %d %b %Y %H:%M:%S GMT\"\n ) - pd.Timedelta(days=1)\n return dt.date()\n\n def _truncate_data(self, data):\n \"\"\"\n fix the column names and remove all the gibberesh data before the first county/real entry in the table.\n **this method feels like a band-aid fix, but it has worked for the past two weeks and im not sure of a better way**\n \"\"\"\n data.columns = [\n \"location_name\",\n \"first_dose_new\",\n \"series_complete_new\",\n \"total_people_vaccinated_new\",\n \"first_dose_total\",\n \"series_complete_total\",\n \"total_people_vaccinated_total\",\n ]\n\n # the data/table starts two lines after 'County of residence' appears in the location column\n return data[data.query(\"location_name == 'County of residence'\").index[0] + 2 :]\n","sub_path":"can_tools/scrapers/official/FL/fl_vaccine.py","file_name":"fl_vaccine.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"556528720","text":"import pickle\nimport time\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\n\nfrom alexnet import AlexNet\n\nnb_classes = 43\nepochs = 2\nbatch_size = 128\n\ntry:\n f = open(\"train.p\",\"rb\")\n data = pickle.load(f)\n f.close()\nexcept Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n\nX_train, X_val, y_train, y_val = train_test_split(data['features'], data['labels'], test_size=0.33, random_state=0)\n\nx = tf.placeholder(tf.float32, (None, 32, 32, 3))\ny = tf.placeholder(tf.int64, None)\none_hot_y = tf.one_hot(y, nb_classes)\nresized = tf.image.resize_images(x, (227, 227))\n\n# Returns the second final layer of the AlexNet model,\n# this allows us to redo the last layer for the traffic signs\n# model.\nfc7 = AlexNet(resized, feature_extract=True)\nfc7 = tf.stop_gradient(fc7)\nshape = (fc7.get_shape().as_list()[-1], nb_classes)\nfc8W = tf.Variable(tf.truncated_normal(shape, stddev=1e-2))\nfc8b = tf.Variable(tf.zeros(nb_classes))\nlogits = tf.matmul(fc7, fc8W) + fc8b\n\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)\nloss_op = tf.reduce_mean(cross_entropy)\nopt = tf.train.AdamOptimizer()\ntrain_op = opt.minimize(loss_op, var_list=[fc8W, fc8b])\ninit_op = tf.global_variables_initializer()\n\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\ndef evaluate(X_data, y_data, sess):\n num_examples = len(X_data)\n total_accuracy = 0\n #sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\nwith tf.Session() as sess:\n sess.run(init_op)\n\n for i in range(epochs):\n # training\n print(\"Training Epoch: \", i+1)\n X_train, y_train = shuffle(X_train, y_train)\n \n for offset in range(0, X_train.shape[0], batch_size):\n end = offset + batch_size\n sess.run(train_op, feed_dict={x: X_train[offset:end], y: y_train[offset:end]})\n \n validation_accuracy = evaluate(X_val, y_val, sess)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n #print()\n print(\"\")\n","sub_path":"train_feature_extraction.py","file_name":"train_feature_extraction.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234470852","text":"from mpi4py import MPI\nimport math\nimport numpy as np\n\ndef chunkify(l, wp):\n #Yield successive n-sized chunks from l.\n for i in range(0, len(l), wp): \n yield l[i:i + wp]\n\ndef serial_dot_product(a, b):\n dot_product = 0\n for i, j in zip(a, b):\n dot_product += i * j\n return dot_product\n\ndef main():\n #initializate mpi common world\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n \n #split the data\n if rank == 0:\n a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n b = [30, 40, 50, 60, 70, 35, 91, 100, 125, 0, 5, 3]\n wp = math.ceil(len(a) / size)#work per processor\n chunks_a = chunkify(a, wp)\n chunks_b = chunkify(b, wp)\n else:\n chunks_a = None\n chunks_b = None\n\n my_chunk_a = comm.scatter(chunks_a, root=0)\n my_chunk_b = comm.scatter(chunks_b, root=0)\n\n #process the data\n dot_product_per_chunk = np.array(serial_dot_product(my_chunk_a, my_chunk_b))\n\n #collect the results\n dot_product = np.array(0, dtype=np.int)\n comm.Reduce([dot_product_per_chunk, MPI.INT],\n [dot_product, MPI.INT],\n op=MPI.SUM,\n root=0)\n\n #print result\n if rank == 0:\n print(\"dot product:\", dot_product)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Semilleros/HPC/MPI/Python/parallel_dot_product.py","file_name":"parallel_dot_product.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"335800523","text":"#-*-coding:utf8-*-\r\nfrom fonctions import *\r\nimport pygame\r\nfrom random import randint\r\nfrom pygame.locals import *\r\nimport time\r\nimport traceback\r\npygame.init()\r\n\r\ntry:\r\n #initialisation\r\n background = (116,197,205) #Permet de mettre un fond de couleur bleu ciel\r\n fenetre=pygame.display.set_mode((800,600)) #Permet d'avoir une fenetre de\r\n #800px de large sur 600 pixels de haut\r\n pygame.display.set_caption(\"Flappy Bird - Classique\") #Permet de nommer la fenetre\r\n\r\n police = pygame.font.Font(None,50) #Permet d'utiliser la police par defaut\r\n #(taille 50) pour l'affichage du score\r\n\r\n perso = pygame.image.load(\"perso_blue.png\").convert_alpha() #Permet de\r\n #rendre le fond du personnage transparent\r\n perso = pygame.transform.scale(perso,(40,40)) #Permet de redimensionner l'image\r\n #du personnage en 40 par 40\r\n pygame.display.set_icon(perso) #Pour mettre le personnage en icone du programme\r\n\r\n #Importation des sons\r\n sound_jump = pygame.mixer.Sound(\"jump.wav\")\r\n sound_collision = pygame.mixer.Sound(\"collision.wav\")\r\n sound_gameover = pygame.mixer.Sound(\"gameover.wav\")\r\n sound_score = pygame.mixer.Sound(\"score.wav\")\r\n #Ajustement du volume des sons\r\n sound_jump.set_volume(1.0)\r\n sound_collision.set_volume(0.2)\r\n sound_gameover.set_volume(1.0)\r\n sound_score.set_volume(0.5)\r\n\r\n img_gameover = pygame.image.load(\"gameover.png\").convert_alpha() #Permet\r\n #d'importer l'image qui s'affichera lors d'un gameover\r\n\r\n obs = pygame.image.load(\"tuyau.png\") #Permet d'importer\r\n #l'image des tuyaux (obstacles)\r\n obs = pygame.transform.scale(obs, (50,1000)) #Permet de redimensionner\r\n #l'image de l'obstacle en 50 par 1000\r\n\r\n perso_rect, liste_obs, score, gameover, gravite, saut, continuer, clock = initialisation_jeu(background, fenetre, perso, obs) #Appel la fonction qui initialise le jeu\r\n\r\n #programme\r\n while continuer:\r\n clock.tick(60) #Genere une image toutes les 60 ms\r\n for event in pygame.event.get():\t#evenements\r\n if event.type==QUIT: #Ferme la fenetre lorsqu'on appuie sur la croix\r\n continuer=0\r\n elif event.type == KEYDOWN:\r\n if event.key == K_SPACE and gameover == 0:\r\n #Lorsque l'utilisateur appuie sur la touche SPACE, la variable\r\n #gravite passe à 0 : donc le personnage passe en phase de saut\r\n gravite = 0\r\n sound_jump.play() #Joue le son sound_jump lorsque le personnage saute\r\n elif event.key == K_UP and gameover == 1:\r\n #Lorsqu'on est en game over et qu'on appuie sur la touche UP,\r\n #on rappelle la fonction \"initialisation_jeu\"\r\n gameover = 0\r\n perso_rect, liste_obs, score, gameover, gravite, saut, continuer, clock = initialisation_jeu(background, fenetre, perso, obs)\r\n\r\n #tout ce qui suit se passe 1 fois par image\r\n #Phase de gravite : Le personnage descend de 3 pixels\r\n if gravite == 1:\r\n perso_rect = perso_rect.move(0,3)\r\n else:\r\n #Phase de saut : le personnage fait 10 sauts de 7 pixels\r\n if saut >= 10:\r\n gravite = 1\r\n saut = 0\r\n else:\r\n perso_rect = monte_perso(perso_rect)\r\n saut += 1\r\n\r\n if gameover: #Lorsque la variable \"gameover\" est égal à 1, on affiche\r\n #l'image \"img_gameover\"\r\n fenetre.blit(img_gameover, (0,0))\r\n pygame.display.flip()\r\n else:\r\n liste_obs = deplac_obs(liste_obs) #on deplace les obstacles\r\n gameover = collision(liste_obs, perso_rect, gameover, sound_collision, sound_gameover, time)\r\n #on teste les collisions du personnage\r\n score = compteurScore(score,liste_obs, sound_score) #on incremente le score\r\n affichage_fenetre(fenetre, background, score, police, perso, perso_rect, obs,liste_obs)\t#on rafraichit l'ecran\r\n\r\nexcept :\r\n traceback.print_exc()\r\nfinally:\r\n pygame.quit()\r\n exit()\r\n","sub_path":"classique.py","file_name":"classique.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"349918681","text":"import networkx as nx\nimport pytest\n\nfrom cascade.input_data.db.locations import (\n get_descendants, location_id_from_location_and_level, location_hierarchy,\n location_id_from_start_and_finish, all_locations_with_these_parents\n)\n\n\nclass MockLocation:\n def __init__(self, node_id, nodes):\n self._node_id = node_id\n self._nodes = nodes\n self._node_parents = {v: k for k, vs in nodes.items() for v in vs}\n\n def get_node_by_id(self, node_id):\n return MockLocation(node_id, self._nodes)\n\n def get_nodelvl_by_id(self, node_id):\n def ancestors(node):\n parent = node.parent\n if parent is None:\n return 0\n else:\n return 1 + ancestors(parent)\n\n return ancestors(self.get_node_by_id(node_id))\n\n @property\n def id(self):\n return self._node_id\n\n @property\n def parent(self):\n parent_id = self._node_parents.get(self._node_id)\n if parent_id is not None:\n return self.get_node_by_id(parent_id)\n else:\n return None\n\n @property\n def children(self):\n return [self.get_node_by_id(child_id) for child_id in self._nodes[self.id]]\n\n def all_descendants(self):\n def recursive_children(node):\n result = []\n for child in node.children:\n result.append(child)\n result.extend(recursive_children(child))\n return result\n\n return recursive_children(self)\n\n\n@pytest.fixture\ndef sample_locations():\n G = nx.DiGraph()\n G.add_nodes_from(list(range(8)))\n G.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6), (6, 7)])\n assert len(G.nodes) == 8\n return G\n\n\n@pytest.mark.parametrize(\"parents,expected\", [\n ([0], [0, 1, 2]),\n ([0, 1], [0, 1, 2, 3, 4]),\n ([2], [2, 5, 6]),\n ([2, 5], [2, 5, 6]),\n ([2, 6], [2, 5, 6, 7]),\n ([6], [6, 7]),\n ([7], [7]),\n])\ndef test_all_with_parents__happy(sample_locations, parents, expected):\n result = all_locations_with_these_parents(sample_locations, parents)\n result.sort()\n assert result == expected\n\n\ndef test_get_descendants__all_descendants(sample_locations):\n parent_location_id = 0\n # descendants of global\n assert set(get_descendants(sample_locations, parent_location_id)) == set(range(1, 8))\n\n parent_location_id = 7\n # descendants of a leaf (ie. nothing)\n assert set(get_descendants(sample_locations, parent_location_id)) == set()\n\n\ndef test_get_descendants__only_children(sample_locations):\n parent_location_id = 0\n # children of global\n assert set(get_descendants(sample_locations, parent_location_id, children_only=True)) == {1, 2}\n\n parent_location_id = 5\n # children of a leaf\n assert set(get_descendants(sample_locations, parent_location_id, children_only=True)) == set()\n\n\ndef test_get_descendants__include_parent(sample_locations):\n parent_location_id = 0\n # descendants of global and iteslf\n assert set(get_descendants(sample_locations, parent_location_id, include_parent=True)) == set(range(0, 8))\n # children of global and iteslf\n assert set(get_descendants(\n sample_locations, parent_location_id, children_only=True, include_parent=True)) == {0, 1, 2}\n\n parent_location_id = 5\n # descendants of a leaf and itself\n assert set(get_descendants(sample_locations, parent_location_id, include_parent=True, children_only=True)) == {5}\n # children of a leaf and itself\n assert set(get_descendants(sample_locations, parent_location_id, include_parent=True, children_only=True)) == {5}\n\n\ndef test_location_id_from_location_and_level__happy_path(sample_locations):\n assert location_id_from_location_and_level(sample_locations, 0, 1)[0] == 0\n assert location_id_from_location_and_level(sample_locations, 7, 1)[0] == 0\n assert location_id_from_location_and_level(sample_locations, 7, 2)[0] == 2\n assert location_id_from_location_and_level(sample_locations, 4, 2)[0] == 1\n assert location_id_from_location_and_level(sample_locations, 7, \"most_detailed\")[0] == 7\n\n\ndef test_drill_from_location_and_level__happy_path(sample_locations):\n assert location_id_from_location_and_level(sample_locations, 0, 1) == [0]\n assert location_id_from_location_and_level(sample_locations, 7, 1) == [0, 2, 6, 7]\n assert location_id_from_location_and_level(sample_locations, 7, 2) == [2, 6, 7]\n assert location_id_from_location_and_level(sample_locations, 4, 2) == [1, 4]\n assert location_id_from_location_and_level(sample_locations, 7, \"most_detailed\") == [7]\n\n\n@pytest.mark.parametrize(\"start,finish,ans\", [\n (0, 1, [0, 1]),\n (None, 1, [0, 1]),\n (None, 4, [0, 1, 4]),\n (None, \"4\", [0, 1, 4]),\n (2, 7, [2, 6, 7]),\n (None, 7, [0, 2, 6, 7]),\n])\ndef test_drill_from_location_and_level__no_start(sample_locations, start, finish, ans):\n assert location_id_from_start_and_finish(sample_locations, start, finish) == ans\n\n\ndef test_location_id_from_location_and_level__too_low(sample_locations):\n with pytest.raises(Exception):\n location_id_from_location_and_level(sample_locations, 0, \"most_detailed\")\n\n with pytest.raises(Exception):\n location_id_from_location_and_level(sample_locations, 2, 3)\n\n\ndef test_location_hierarchy_networkx(ihme):\n locs = location_hierarchy(6, location_set_id=35)\n assert nx.is_directed_acyclic_graph(locs)\n assert nx.dag_longest_path_length(locs) == 6\n assert nx.dag_longest_path(locs)[0] == 1\n assert locs.nodes[1][\"level\"] == 0\n assert locs.nodes[13][\"location_name\"] == \"Malaysia\"\n\n\ndef test_ancestors_level(ihme):\n locs = location_hierarchy(6, location_set_id=35)\n drill = list(nx.topological_sort(nx.subgraph(locs, nbunch=nx.ancestors(locs, 491))))\n assert drill == [1, 4, 5, 6]\n","sub_path":"tests/input_data/db/test_location.py","file_name":"test_location.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586323133","text":"import time\nimport cv2\nfrom kafka import SimpleProducer, KafkaClient\n# connect to Kafka\nkafka = KafkaClient('localhost:9092')\nproducer = SimpleProducer(kafka)\ntopic = 'webcam'\n\n\ndef video_emitter(video=0):\n # Open the video\n video = cv2.VideoCapture(video)\n print(' emitting.....')\n\n # read the file\n while (video.isOpened):\n # read the image in each frame\n success, image = video.read()\n # check if the file has read to the end\n if not success:\n break\n ret, jpeg = cv2.imencode('.png', image)\n cv2.imshow('my webcam', image)\n \n producer.send_messages(topic, jpeg.tobytes())\n if cv2.waitKey(25) == 27: # esc to exit\n break\n time.sleep(0.5)\n cv2.destroyAllWindows()\n # clear the capture\n video.release()\n print('done emitting')\n\nif __name__ == '__main__':\n video_emitter()\n","sub_path":"producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"50650234","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nmp=np.arange(0,100);\r\n\r\nfor i in range(0,100):\r\n if mp[i]<=9:\r\n mp[i] = ((mp[i]**2)-7)\r\n \r\n elif mp[i]>=10:\r\n mp[i] = mp[i-10]\r\n \r\n plt.stem(mp,use_line_collection=True)\r\n plt.xlabel(\"n = 0 to = 99\")\r\n plt.ylabel(\"f(n)\")\r\n plt.title('Graph of the function')\r\n \r\n plt.figure()\r\n plt.show()","sub_path":"Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"212655111","text":"import time\nimport webbrowser\n\ntotalBreak = 3\ncurrentBreak = 0\nprint(\"This programe started on\" + time.ctime())\nwhile currentBreak < totalBreak:\n time.sleep(5)\n webbrowser.open(\"http://www.bilibili.com\")\n currentBreak = currentBreak + 1\n","sub_path":"基本语法/webOpen.py","file_name":"webOpen.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221408297","text":"from os import path\nfrom sys import argv\n\ndef Nerum(Class, Args=()):\n \n if len(argv) < 2:\n print('usage: {} func_name [args]'.\n format(path.basename(argv[0])))\n return\n\n func = Class.__getattribute__(Class, argv[1])\n try:\n ret = eval('func(Class(*Args),{})'.\n format(''.join(argv[2:])))\n except Exception as e:\n print('error: {}'.format(e))\n return\n print(ret)\n","sub_path":"nerum.py","file_name":"nerum.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507460613","text":"\"\"\"\ndesispec.io.frame\n=================\n\nIO routines for frame.\n\"\"\"\nimport os.path\n\nimport numpy as np\nimport scipy,scipy.sparse\nfrom astropy.io import fits\n\nfrom desispec.frame import Frame\nfrom desispec.io import findfile\nfrom desispec.io.util import fitsheader, native_endian, makepath\nfrom desispec.log import get_logger\n\nlog = get_logger()\n\ndef write_frame(outfile, frame, header=None):\n \"\"\"Write a frame fits file and returns path to file written.\n\n Args:\n outfile: full path to output file, or tuple (night, expid, channel)\n frame: desispec.frame.Frame object with wave, flux, ivar...\n header: optional astropy.io.fits.Header or dict to override frame.header\n \n Returns:\n full filepath of output file that was written\n \n Note:\n to create a Frame object to pass into write_frame,\n frame = Frame(wave, flux, ivar, resolution_data)\n \"\"\"\n outfile = makepath(outfile, 'frame')\n\n if header is not None:\n hdr = fitsheader(header)\n else:\n hdr = fitsheader(frame.header)\n\n if 'SPECMIN' not in hdr:\n hdr['SPECMIN'] = 0\n if 'SPECMAX' not in hdr:\n hdr['SPECMAX'] = hdr['SPECMIN'] + frame.nspec\n\n hdus = fits.HDUList()\n x = fits.PrimaryHDU(frame.flux, header=hdr)\n x.header['EXTNAME'] = 'FLUX'\n hdus.append(x)\n\n hdus.append( fits.ImageHDU(frame.ivar, name='IVAR') )\n hdus.append( fits.ImageHDU(frame.mask, name='MASK') )\n hdus.append( fits.ImageHDU(frame.wave, name='WAVELENGTH') )\n hdus.append( fits.ImageHDU(frame.resolution_data, name='RESOLUTION' ) )\n \n hdus.writeto(outfile, clobber=True)\n\n return outfile\n\ndef read_frame(filename, nspec=None):\n \"\"\"Reads a frame fits file and returns its data.\n\n Args:\n filename: path to a file, or (night, expid, camera) tuple where\n night = string YEARMMDD\n expid = integer exposure ID\n camera = b0, r1, .. z9\n\n Returns:\n desispec.Frame object with attributes wave, flux, ivar, etc.\n \"\"\"\n #- check if filename is (night, expid, camera) tuple instead\n if not isinstance(filename, (str, unicode)):\n night, expid, camera = filename\n filename = findfile('frame', night, expid, camera)\n\n if not os.path.isfile(filename) :\n raise IOError(\"cannot open\"+filename)\n\n fx = fits.open(filename, uint=True)\n hdr = fx[0].header\n flux = native_endian(fx['FLUX'].data)\n ivar = native_endian(fx['IVAR'].data)\n wave = native_endian(fx['WAVELENGTH'].data)\n if 'MASK' in fx:\n mask = native_endian(fx['MASK'].data)\n else:\n mask = None #- let the Frame object create the default mask\n \n resolution_data = native_endian(fx['RESOLUTION'].data)\n fx.close()\n\n if nspec is not None:\n flux = flux[0:nspec]\n ivar = ivar[0:nspec]\n resolution_data = resolution_data[0:nspec]\n\n # return flux,ivar,wave,resolution_data, hdr\n return Frame(wave, flux, ivar, mask, resolution_data, hdr)\n","sub_path":"py/desispec/io/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271931675","text":"#!/usr/bin/env python2\nimport json\nwith open('config.json.default') as data:\n d = json.load(data)\n data.close()\ntoken = raw_input(\"Please enter your Discord Token: \")\nd['Token']=token\nmongodb = raw_input(\"Please enter your MongoDB Connect String: \")\nd['MongoDB']=mongodb\nconfig = json.dumps(d)\nconfig_file = open(\"config.json\", \"w\")\nconfig_file.write(config)\n","sub_path":"setup_default.py","file_name":"setup_default.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112921237","text":"#!/usr/bin/env python3\n\n\"\"\"\nComputes statistics from stored FID measurements.\n\"\"\"\n\nimport pathlib\nimport numpy as np\n\n\nmap_light_to_index = {\n \"blue\": 0,\n \"green\": 1,\n \"red\": 2,\n \"b_white\": 3,\n \"m_white\": 4,\n \"l_white\": 5,\n}\n\n\ndef print_latex_table(mat: np.ndarray):\n assert mat.shape == (6, 6)\n for i in range(6):\n txt = []\n for j in range(6):\n txt.append(\" & \" + str(mat[i, j]))\n print(\"\".join(txt))\n\n\ndef make_fid_stats(path: pathlib.Path) -> np.ndarray:\n txt = path.read_text()\n txt = txt.strip().split(\"\\n\")\n mat = np.zeros((6, 6), dtype=np.float)\n for line in txt:\n light1, light2, fid = line.split(\",\")\n idx1 = map_light_to_index[light1]\n idx2 = map_light_to_index[light2]\n mat[idx1, idx2] = fid\n mat[idx2, idx1] = fid\n print(\"-\"*50)\n print(path)\n # print_latex_table(mat)\n mat = np.tril(mat)\n array = mat.flatten()\n array = array[array.nonzero()]\n print(array)\n print(\"\\tmean:\\t\", np.mean(array))\n print(\"\\tstd:\\t\", np.std(array))\n\n return np.tril(mat)\n\n\ndef main():\n fid_dir = pathlib.Path(\"/home/dominik/data/ETH/MasterThesis/data\")\n make_fid_stats(fid_dir / \"fid_original.txt\")\n make_fid_stats(fid_dir / \"fid_style3.txt\")\n make_fid_stats(fid_dir / \"fid_pix2pix.txt\")\n make_fid_stats(fid_dir / \"fid_20_sib_cropped.txt\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/fid_statistics.py","file_name":"fid_statistics.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"296432376","text":"import os\nimport json\nimport sqlite3\nimport urllib2\nimport sys\n\nimport pynstagram\nfrom PIL import Image\n\ndef post():\n\t# create necessary directories\n\tif not os.path.exists('raw/'):\n\t\tos.makedirs('raw/')\n\tif not os.path.exists('pics/'):\n\t\tos.makedirs('pics/')\n\n\tconn = sqlite3.connect('ifunny.db')\n\tc = conn.cursor()\n\n\tstatus = 1\n\n\trow = c.execute('SELECT * FROM images WHERE posted=0 ORDER BY id ASC').fetchone()\n\n\tif row is None:\n\t\tconn.close()\n\t\tsys.exit(0)\n\n\timage_path = 'raw/{}.{}'.format(row[1], row[2][row[2].rindex('.')+1:])\n\tpic_path = image_path.replace('raw', 'pic')\n\twith open(image_path, 'wb') as file:\n\t\tfile.write(urllib2.urlopen(row[2]).read())\n\n\t# load credentials\n\twith open('credentials.json') as file:\n\t\tcredentials = json.load(file)\n\n\t# edit image\n\timage = Image.open(image_path)\n\twidth, height = image.size\n\tif width > height * 2 or height > width * 2 or image_path.endswith('.gif'):\n\t\tstatus = -1\n\telse:\n\t\t# convert photo to jpg and make square\n\t\tif width == height:\n\t\t\timage.convert('RGB').save(pic_path, 'JPEG')\n\t\telif width > height:\n\t\t\tpadded = Image.new('RGB', (width, width), (0, 0, 0))\n\t\t\tpadded.paste(image, (0, width / 2 - height / 2))\n\t\t\tpadded.save(pic_path, 'JPEG')\n\t\telif height > width:\n\t\t\tpadded = Image.new('RGB', (height, height), (0, 0, 0))\n\t\t\tpadded.paste(image, (height / 2 - width / 2, 0))\n\t\t\tpadded.save(pic_path, 'JPEG')\n\n\t\t# upload photo\n\t\twith pynstagram.client(credentials['username'], credentials['password']) as client:\n\t\t\ttry:\n\t\t\t\tclient.upload(pic_path)\n\t\t\texcept:\n\t\t\t\tstatus = -2\n\n\t# set posted status\n\tc.execute('UPDATE images SET posted=? WHERE id=?', (status, row[0]))\n\n\tconn.commit()\n\tconn.close()\n\nif __name__ == '__main__':\n\tpost()\n","sub_path":"post_instagram.py","file_name":"post_instagram.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"455118503","text":"# -*- coding: utf-8 -*-\n\nimport psycopg2\nimport logging\nfrom openerp.osv import osv\nfrom openerp import SUPERUSER_ID\nfrom abc import ABCMeta, abstractmethod\n\nlogger = logging.getLogger(__name__)\nABSTRACT_MODEL_NAME = 'abstract.materialized.sql.view'\n\n\nclass AbstractMaterializedSqlView(osv.AbstractModel):\n \"\"\"This class is an abstract model to help developer to create/refresh/update\n materialized view.\n \"\"\"\n _name = ABSTRACT_MODEL_NAME\n _description = u\"This is an helper class to manage materialized SQL view\"\n _auto = False\n\n _sql_mat_view_name = ''\n \"\"\"The name of the materialized sql view.\n Must be defined in inherit class (using inherit = [])\n \"\"\"\n _sql_view_name = ''\n \"\"\"The name of the sql view used to generate the materialized view\n Must be defined in inherit class (using inherit = [])\n \"\"\"\n _sql_view_definition = ''\n \"\"\"The sql query to generate the view (without any create views)\n \"\"\"\n\n def init(self, cr):\n \"\"\"Init method is called when installing or updating the module.\n As we can't know if the model of the sql changed, we have to drop materialized view\n and recreate it.\n \"\"\"\n if hasattr(super(AbstractMaterializedSqlView, self), 'init'):\n super(self, AbstractMaterializedSqlView).init(cr)\n\n # prevent against Abstract class initialization\n if self._name == ABSTRACT_MODEL_NAME:\n return\n\n logger.info(u\"Init materialized view, using Postgresql %r\",\n cr._cnx.server_version)\n self.create_or_upgrade_pg_matview_if_needs(cr, SUPERUSER_ID)\n\n def safe_properties(self):\n if not self._sql_view_definition:\n raise osv.except_osv(u\"Properties must be defined in subclass\",\n u\"_sql_view_definition properties should be redefined in subclass\"\n )\n if not self._sql_mat_view_name:\n self._sql_mat_view_name = self._table\n if not self._sql_view_name:\n self._sql_view_name = self._table + '_view'\n\n def create_materialized_view(self, cr, uid, context=None):\n self.safe_properties()\n if not context:\n context = {}\n result = []\n logger.info(\"Create Materialized view %r\", self._sql_mat_view_name)\n pg_version = context.get('force_pg_version', cr._cnx.server_version)\n self.change_matview_state(cr, uid, 'before_create_view', pg_version, context=context)\n try:\n pg = PGMaterializedViewManager.getInstance(pg_version)\n # make sure there is no existing views create uppon the same version\n # this could be possible if materialized.sql.view entry is deleted\n # TODO: maybe move it in create_or_upgrade_pg_matview_if_needs and\n # automaticly detect if it's a mat view or a table cf utests\n pg.drop_mat_view(cr, self._sql_view_name, self._sql_mat_view_name)\n self.before_create_materialized_view(cr, uid, context=context)\n pg.create_mat_view(cr, self._sql_view_definition, self._sql_view_name,\n self._sql_mat_view_name)\n self.after_create_materialized_view(cr, uid, context=context)\n except psycopg2.Error as e:\n self.report_sql_error(cr, uid, e, pg_version, context=context)\n else:\n result = self.change_matview_state(cr, uid, 'after_refresh_view', pg_version,\n context=context)\n return result\n\n def refresh_materialized_view(self, cr, uid, context=None):\n self.safe_properties()\n result = self.create_or_upgrade_pg_matview_if_needs(cr, uid, context=context)\n if not result:\n logger.info(\"Refresh Materialized view %r\", self._sql_mat_view_name)\n if not context:\n context = {}\n pg_version = context.get('force_pg_version', cr._cnx.server_version)\n self.change_matview_state(cr, uid, 'before_refresh_view', pg_version,\n context)\n try:\n self.before_refresh_materialized_view(cr, uid, context=context)\n pg = PGMaterializedViewManager.getInstance(pg_version)\n pg.refresh_mat_view(cr, self._sql_view_name, self._sql_mat_view_name)\n self.after_refresh_materialized_view(cr, uid, context=context)\n except psycopg2.Error as e:\n self.report_sql_error(cr, uid, e, pg_version, context=context)\n else:\n result = self.change_matview_state(cr, uid, 'after_refresh_view', pg_version,\n context=context)\n return result\n\n def create_or_upgrade_pg_matview_if_needs(self, cr, uid, context=None):\n \"\"\"Compare everything that can cause the needs to drop and recreate materialized view\n Return True if something done\n \"\"\"\n self.safe_properties()\n matview_mdl = self.pool.get('materialized.sql.view')\n if not context:\n context = {}\n ids = matview_mdl.search_materialized_sql_view_ids_from_matview_name(\n cr, uid, self._sql_mat_view_name, context=context)\n if ids:\n # As far matview_mdl is refered by its view name, to get one or more instance\n # is technicly the same.\n id = ids[0]\n rec = matview_mdl.read(cr, uid, id, ['pg_version', 'sql_definition', 'view_name',\n 'state'],\n context=context)\n pg_version = context.get('force_pg_version', cr._cnx.server_version)\n pg = PGMaterializedViewManager.getInstance(cr._cnx.server_version)\n if(rec['pg_version'] != pg_version or\n rec['sql_definition'] != self._sql_view_definition or\n rec['view_name'] != self._sql_view_name or\n rec['state'] in ['nonexistent', 'aborted'] or\n not pg.is_existed_relation(cr, self._sql_view_name) or\n not pg.is_existed_relation(cr, self._sql_mat_view_name)):\n self.drop_materialized_view_if_exist(\n cr, uid, rec['pg_version'], view_name=rec['view_name'], context=context)\n else:\n return []\n\n return self.create_materialized_view(cr, uid, context=context)\n\n def change_matview_state(self, cr, uid, method_name, pg_version, context=None):\n matview_mdl = self.pool.get('materialized.sql.view')\n # Make sure object exist or create it\n values = {\n 'model_name': self._name,\n 'view_name': self._sql_view_name,\n 'matview_name': self._sql_mat_view_name,\n 'sql_definition': self._sql_view_definition,\n 'pg_version': pg_version,\n }\n matview_mdl.create_if_not_exist(cr, uid, values, context=context)\n method = getattr(matview_mdl, method_name)\n context.update({'values': values})\n return method(cr, uid, self._sql_mat_view_name, context=context)\n\n def drop_materialized_view_if_exist(self, cr, uid, pg_version, view_name=None,\n mat_view_name=None, context=None):\n self.safe_properties()\n result = []\n logger.info(\"Drop Materialized view %r\", self._sql_mat_view_name)\n try:\n self.before_drop_materialized_view(cr, uid, context=context)\n pg = PGMaterializedViewManager.getInstance(pg_version)\n if not view_name:\n view_name = self._sql_view_name\n if not mat_view_name:\n mat_view_name = self._sql_mat_view_name\n pg.drop_mat_view(cr, view_name, mat_view_name)\n self.after_drop_materialized_view(cr, uid, context=context)\n except psycopg2.Error as e:\n self.report_sql_error(cr, uid, e, pg_version, context=context)\n else:\n result = self.change_matview_state(cr, uid, 'after_drop_view', pg_version,\n context=context)\n return result\n\n def report_sql_error(self, cr, uid, err, pg_version, context=None):\n if not context:\n context = {}\n context.update({'error_message': err.pgerror})\n cr.rollback()\n self.change_matview_state(cr, uid, 'aborted_matview', pg_version, context=context)\n\n def before_drop_materialized_view(self, cr, uid, context=None):\n \"\"\"Method called before drop materialized view and view,\n Nothing done in abstract method, it's hook to used in subclass\n \"\"\"\n\n def after_drop_materialized_view(self, cr, uid, context=None):\n \"\"\"Method called after drop materialized view and view,\n Nothing done in abstract method, it's hook to used in subclass\n \"\"\"\n\n def before_create_materialized_view(self, cr, uid, context=None):\n \"\"\"Method called before create materialized view and view,\n Nothing done in abstract method, it's hook to used in subclass\n \"\"\"\n\n def after_create_materialized_view(self, cr, uid, context=None):\n \"\"\"Method called after create materialized view and view,\n Nothing done in abstract method, it's hook to used in subclass\n \"\"\"\n\n def before_refresh_materialized_view(self, cr, uid, context=None):\n \"\"\"Method called before refresh materialized view,\n this was made to do things like drop index before in the same transaction.\n\n Nothing done in abstract method, it's hook to used in subclass\n \"\"\"\n\n def after_refresh_materialized_view(self, cr, uid, context=None):\n \"\"\"Method called after refresh materialized view,\n this was made to do things like add index after refresh data\n\n Nothing done in abstract method, it's hook to used in subclass\n \"\"\"\n\n def write(self, cr, uid, ids, context=None):\n raise osv.except_osv(u\"Write on materialized view is forbidden\",\n u\"Write on materialized view is forbidden,\"\n u\"because data would be lost at the next refresh\"\n )\n\n def create(self, cr, uid, ids, context=None):\n raise osv.except_osv(u\"Create data on materialized view is forbidden\",\n u\"Create data on materialized view is forbidden,\"\n u\"because data would be lost at the next refresh\"\n )\n\n def unlink(self, cr, uid, ids, context=None):\n raise osv.except_osv(u\"Remove data on materialized view is forbidden\",\n u\"Remove data on materialized view is forbidden,\"\n u\"because data would be lost at the next refresh\"\n )\n\n\nclass PGMaterializedViewManager(object):\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def create_mat_view(self, cr, sql, view_name, mat_view_name):\n \"\"\"Abstract Method to overwrite in subclass to create sql view\n and materialized sql view from sql query.\n \"\"\"\n\n @abstractmethod\n def refresh_mat_view(self, cr, view_name, mat_view_name):\n \"\"\"Abstract Method to overwrite in subclass to refresh\n materialized sql view\n \"\"\"\n\n @abstractmethod\n def drop_mat_view(self, cr, view_name, mat_view_name):\n \"\"\"Abstract Method to overwrite in subclass to drop materialized view and clean\n every thing to its authority\n \"\"\"\n\n def is_existed_relation(self, cr, relname):\n cr.execute(\"select count(*) from pg_class where relname like '%(relname)s'\" %\n {'relname': relname})\n return cr.fetchone()[0] > 0\n\n @classmethod\n def getInstance(cls, version):\n \"\"\"Method that return the class depending pg server_version\n \"\"\"\n if version >= 90300:\n return PG090300()\n else:\n return PGNoMaterializedViewSupport()\n\n\nclass PGNoMaterializedViewSupport(PGMaterializedViewManager):\n\n def create_mat_view(self, cr, sql, view_name, mat_view_name):\n cr.execute(\"CREATE VIEW %(view_name)s AS (%(sql)s)\" %\n dict(view_name=view_name, sql=sql, ))\n cr.execute(\"CREATE TABLE %(mat_view_name)s AS SELECT * FROM %(view_name)s\" %\n dict(mat_view_name=mat_view_name,\n view_name=view_name,\n ))\n\n def refresh_mat_view(self, cr, view_name, mat_view_name):\n cr.execute(\"DELETE FROM %(mat_view_name)s\" % dict(mat_view_name=mat_view_name,\n ))\n cr.execute(\"INSERT INTO %(mat_view_name)s SELECT * FROM %(view_name)s\" %\n dict(mat_view_name=mat_view_name,\n view_name=view_name,\n ))\n\n def drop_mat_view(self, cr, view_name, mat_view_name):\n cr.execute(\"DROP TABLE IF EXISTS %s CASCADE\" % (mat_view_name))\n cr.execute(\"DROP VIEW IF EXISTS %s CASCADE\" % (view_name,))\n\n\nclass PG090300(PGMaterializedViewManager):\n\n def create_mat_view(self, cr, sql, view_name, mat_view_name):\n cr.execute(\"CREATE VIEW %(view_name)s AS (%(sql)s)\" %\n dict(view_name=view_name, sql=sql, ))\n cr.execute(\"CREATE MATERIALIZED VIEW %(mat_view_name)s AS SELECT * FROM %(view_name)s\" %\n dict(mat_view_name=mat_view_name,\n view_name=view_name,\n ))\n\n def refresh_mat_view(self, cr, view_name, mat_view_name):\n cr.execute(\"REFRESH MATERIALIZED VIEW %(mat_view_name)s\" %\n dict(mat_view_name=mat_view_name,\n ))\n\n def drop_mat_view(self, cr, view_name, mat_view_name):\n cr.execute(\"DROP MATERIALIZED VIEW IF EXISTS %s CASCADE\" % (mat_view_name))\n cr.execute(\"DROP VIEW IF EXISTS %s CASCADE\" % (view_name,))\n","sub_path":"materialized_sql_view/model/abstract_materialized_sql_view.py","file_name":"abstract_materialized_sql_view.py","file_ext":"py","file_size_in_byte":14030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5523114","text":"#!/usr/bin/env python\n\nfrom math import log2\n\ndef is_tree(thing):\n if not isinstance(thing, list):\n print(\"The input is not a list\")\n return False\n if len(thing)!=3:\n print(\"The size of the input is not 3\")\n return False\n if not isinstance(thing[0], tuple):\n print(\"The first element of the input is not a tuple\")\n return False\n return True\n\ndef classify(tree, x):\n if is_tree(tree):\n sub_tree = tree[1] if x[tree[0][0]] arcsec\n return RAs * 15\n\n\ndef DC_conv(DCbyt):\n '''Convert declination string of +DD_AM_AS.ssssssss into float.\n '''\n DCstr = str(DCbyt, encoding=\"utf8\")\n degs, ams, ass = DCstr.split('_')\n # Determine the sign\n if DCstr[0] is '+':\n return (float(degs) * 60 + float(ams)) * 60 + float(ass)\n else:\n return (float(degs) * 60 - float(ams)) * 60 - float(ass)\n# ------------------------------ END -----------------------------------\n","sub_path":"pos_conv.py","file_name":"pos_conv.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"114040366","text":"# Copyright 2013-2015 Pervasive Displays, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\nimport os\nimport re\n\nfrom PIL import Image\nfrom PIL import ImageOps\n\nfrom papirus import LM75B\nfrom papirus.panel import Panel, DisplayError\n\n\nclass EPDError(DisplayError):\n pass\n\n\nclass EPD(Panel):\n \"\"\"\n EPD E-Ink interface\n\n to use:\n from EPD import EPD\n\n epd = EPD([path='/path/to/epd'], [auto_update=boolean], [rotation = 0|90|180|270])\n\n image = Image.new('1', epd.size, 0)\n # draw on image\n epd.clear() # clear the panel\n epd.display(image) # transfer image data\n epd.update() # refresh the panel image - not needed if auto_update is True\n \"\"\"\n\n PANEL_RE = re.compile('^([A-Za-z]+)\\s+(\\d+\\.\\d+)\\s+(\\d+)x(\\d+)\\s+COG\\s+(\\d+)\\s+FILM\\s+(\\d+)\\s*$', flags=0)\n\n def __init__(self, epd_path='/dev/epd', rotation=0, auto_update=False):\n self._epd_path = epd_path\n self._panel = 'EPD 2.0'\n self._cog = 0\n self._film = 0\n\n self.use_temp_sensor = True\n self._lm75b = LM75B()\n\n with open(os.path.join(self._epd_path, 'version')) as f:\n self._version = f.readline().rstrip('\\n')\n\n width, height = 0, 0\n with open(os.path.join(self._epd_path, 'panel')) as f:\n m = self.PANEL_RE.match(f.readline().rstrip('\\n'))\n if not m:\n raise EPDError('invalid panel string')\n self._panel = m.group(1) + ' ' + m.group(2)\n width = int(m.group(3))\n height = int(m.group(4))\n self._cog = int(m.group(5))\n self._film = int(m.group(6))\n\n super(EPD, self).__init__(width=width, height=height, rotation=rotation, auto_update=auto_update)\n\n @property\n def panel(self):\n return self._panel\n\n @property\n def version(self):\n return self._version\n\n @property\n def cog(self):\n return self._cog\n\n @property\n def film(self):\n return self._film\n\n def error_status(self):\n with open(os.path.join(self._epd_path, 'error'), 'r') as f:\n return f.readline().rstrip('\\n')\n\n def _write(self, image):\n with open(os.path.join(self._epd_path, 'LE', 'display_inverse'), 'r+b') as f:\n f.write(image.tobytes())\n\n def update(self):\n self._command('U')\n\n def partial_update(self):\n self._command('P')\n\n def fast_update(self):\n self._command('F')\n\n def clear(self):\n self._command('C')\n\n def _command(self, c):\n if self.use_temp_sensor:\n with open(os.path.join(self._epd_path, 'temperature'), 'wb') as f:\n f.write(str(self._lm75b.getTempC()).encode(encoding='ISO-8859-1'))\n with open(os.path.join(self._epd_path, 'command'), 'wb') as f:\n f.write(c.encode('ISO-8859-1'))\n","sub_path":"papirus/epd.py","file_name":"epd.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"352459039","text":"import torch\nimport torch.nn\nimport torch.nn.functional as F\nfrom .multi_head_attention import MultiHeadAttention, AttentionMask\nfrom typing import Optional, Callable, Dict\nfrom dataclasses import dataclass\n# This file is based on PyTorch's internal implementation\n\nActivationFunction = Callable[[torch.Tensor], torch.Tensor]\n\n\nclass TransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0):\n super(TransformerEncoderLayer, self).__init__()\n self.self_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout)\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None) -> torch.Tensor:\n src2 = self.self_attn(src, src, mask)\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu') \\\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)\n\n\nclass TransformerDecoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0):\n super(TransformerDecoderLayer, self).__init__()\n\n self.self_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)\n self.multihead_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)\n # Implementation of Feedforward model\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout)\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.norm3 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n self.dropout3 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.reset_parameters()\n\n def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,\n memory_key_padding_mask: Optional[torch.Tensor] = None,\n full_target: Optional[torch.Tensor] = None, pos_offset: int = 0) -> torch.Tensor:\n \n assert pos_offset == 0 or tgt_mask is None\n tgt2 = self.self_attn(tgt, tgt if full_target is None else full_target, mask=AttentionMask(None, tgt_mask))\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n tgt2 = self.multihead_attn(tgt, memory, mask=AttentionMask(memory_key_padding_mask, None))\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n def reset_parameters(self):\n torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu') \\\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)\n\n\nclass TransformerDecoderBase(torch.nn.Module):\n @dataclass\n class State:\n step: int\n state: Dict[int, torch.Tensor]\n\n def __init__(self, d_model: int):\n super().__init__()\n self.d_model = d_model\n\n def create_state(self, batch_size: int, max_length: int, device: torch.device) -> State:\n return self.State(0, {i: torch.empty([batch_size, max_length, self.d_model], device=device)\n for i in range(len(self.layers))})\n\n def one_step_forward(self, state: State, data: torch.Tensor, *args, **kwargs):\n assert data.shape[1] == 1, f\"For one-step forward should have one timesteps, but shape is {data.shape}\"\n assert state.step < state.state[0].shape[1]\n\n for i, l in enumerate(self.layers):\n state.state[i][:, state.step:state.step + 1] = data\n data = l(data, *args, **kwargs, full_target=state.state[i][:, :state.step + 1],\n pos_offset=state.step)\n\n state.step += 1\n return data\n\n\nclass TransformerEncoder(torch.nn.Module):\n def __init__(self, layer, n_layers: int, *args, **kwargs):\n super().__init__()\n self.layers = torch.nn.ModuleList([layer(*args, **kwargs) for _ in range(n_layers)])\n\n def forward(self, data: torch.Tensor, *args, **kwargs):\n for l in self.layers:\n data = l(data, *args, **kwargs)\n return data\n\n\nclass TransformerDecoder(TransformerDecoderBase):\n def __init__(self, layer, n_layers: int, d_model: int, *args, **kwargs):\n super().__init__(d_model)\n self.layers = torch.nn.ModuleList([layer(d_model, *args, **kwargs) for _ in range(n_layers)])\n\n def forward(self, data: torch.Tensor, *args, **kwargs):\n for l in self.layers:\n data = l(data, *args, **kwargs)\n return data\n\n\ndef TransformerEncoderWithLayer(layer = TransformerEncoder):\n return lambda *args, **kwargs: TransformerEncoder(layer, *args, **kwargs)\n\n\ndef TransformerDecoderWithLayer(layer = TransformerDecoder):\n return lambda *args, **kwargs: TransformerDecoder(layer, *args, **kwargs)\n\n\nclass Transformer(torch.nn.Module):\n def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,\n num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,\n activation: ActivationFunction = F.relu, encoder_layer=TransformerEncoderWithLayer(),\n decoder_layer=TransformerDecoderWithLayer(), attention_dropout: float = 0):\n super().__init__()\n\n self.encoder = encoder_layer(num_encoder_layers, d_model, nhead, dim_feedforward,\n dropout, activation, attention_dropout)\n self.decoder = decoder_layer(num_decoder_layers, d_model, nhead, dim_feedforward,\n dropout, activation, attention_dropout)\n\n def forward(self, src: torch.Tensor, tgt: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,\n src_mask: Optional[AttentionMask] = None):\n\n memory = self.encoder(src, src_mask)\n return self.decoder(tgt, memory, tgt_mask, src_mask.src_length_mask if src_mask is not None else None)\n\n @staticmethod\n def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.triu(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=1)\n","sub_path":"layers/transformer/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":7297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"654055350","text":"#!/usr/bin/env python\nimport sys\nimport math\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg', force=True)\nimport matplotlib.pyplot as plt\nfrom utils import CoreSnapshot, SLOEvent\nfrom utils import read_core_snapshot\n\nstats_dir = \"../stats_0314/\"\n\ndef cdf_plot(x_data, title=None):\n _, ax = plt.subplots()\n\n sorted_x_data = sorted(x_data)\n n = len(sorted_x_data)\n y_pos = np.arange(n) / float(n - 1)\n\n ax.plot(sorted_x_data, y_pos, marker='.', color='purple')\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(\"Percentile (%)\")\n if title:\n ax.set_title(title)\n plt.grid(True)\n\ndef get_percentile(p_val, x_data):\n if len(x_data) < 100:\n return \"Na (not enough samples)\"\n if p_val < 0 or p_val > 100:\n return \"Na (p val out of range)\"\n\n sorted_x_data = sorted(x_data)\n n = len(sorted_x_data)\n p_pos = int(p_val * (n - 1) / 100)\n return sorted_x_data[p_pos]\n\ndef parse_core_snapshot(core_id):\n stats_filename = \"stats%d.txt\" %(core_id)\n stats_abs_dir = stats_dir + stats_filename\n snapshots = read_core_snapshot(stats_abs_dir)\n return snapshots\n\ndef generate_epoch_distribution_plot(slo_events, title):\n epochs = [se._epoch_cnt for se in slo_events]\n cdf_plot(epochs, title)\n plt.savefig('slo_vio_epoch_dist_%s.png' %(title), dpi=600)\n\n\n# Get SLO-violation events.\ndef stats_analysis(snapshots):\n short_term_slo_vio = []\n long_term_slo_vio = []\n # Count short/long-term SLO violation\n total_pkts = 0\n epoch_with_slo_vio = 0\n pkt_with_slo_vio = 0\n for s in snapshots:\n total_pkts += s._pkt_rate\n if s._slo_violations > 0:\n epoch_with_slo_vio += 1\n pkt_with_slo_vio += s._slo_violations\n else:\n if epoch_with_slo_vio >= 1:\n e = SLOEvent(epoch_with_slo_vio, pkt_with_slo_vio)\n if epoch_with_slo_vio >= 20:\n long_term_slo_vio.append(e)\n elif epoch_with_slo_vio < 20:\n short_term_slo_vio.append(e)\n\n epoch_with_slo_vio = 0\n pkt_with_slo_vio = 0\n\n print(\"Total epochs: %d\" %(len(snapshots)))\n print(\"Short term SLO violations: %d\" %(len(short_term_slo_vio)))\n print(\"Long term SLO violations: %d\" %(len(long_term_slo_vio)))\n return (total_pkts, len(snapshots), short_term_slo_vio, long_term_slo_vio)\n\n\ndef main():\n core_snapshots = []\n for core_id in range(8):\n core_snapshots.append(parse_core_snapshot(core_id))\n\n # Cluster-scale\n total_epochs = 0\n total_pkts = 0\n short_term_slo_vio_epochs = 0\n short_term_slo_vio_epoch_ratio = 0.0\n short_term_slo_vio_pkts = 0\n short_term_slo_vio_pkt_ratio = 0.0\n long_term_slo_vio_epochs = 0\n long_term_slo_vio_epoch_ratio = 0.0\n long_term_slo_vio_pkts = 0\n long_term_slo_vio_pkt_ratio = 0.0\n\n all_slo_vio = []\n short_term_slo_vio = []\n long_term_slo_vio = []\n\n # SLO-event statistics\n per_core_results = [None]\n for i in range(1, 8):\n print(\"Core %d statistics analysis:\" %(i))\n per_core_results.append(stats_analysis(core_snapshots[i]))\n total_pkts += per_core_results[i][0]\n total_epochs += per_core_results[i][1]\n short_term_slo_vio += per_core_results[i][2]\n long_term_slo_vio += per_core_results[i][3]\n\n all_slo_vio = short_term_slo_vio + long_term_slo_vio\n\n for x in short_term_slo_vio:\n if x:\n short_term_slo_vio_epochs += x._epoch_cnt\n short_term_slo_vio_pkts += x._pkt_cnt\n for x in long_term_slo_vio:\n if x:\n long_term_slo_vio_epochs += x._epoch_cnt\n long_term_slo_vio_pkts += x._pkt_cnt\n\n short_term_slo_vio_epoch_ratio = short_term_slo_vio_epochs * 100 / total_epochs\n short_term_slo_vio_pkt_ratio = short_term_slo_vio_pkts * 100 / total_pkts\n long_term_slo_vio_epoch_ratio = long_term_slo_vio_epochs * 100 / total_epochs\n long_term_slo_vio_pkt_ratio = long_term_slo_vio_pkts * 100 / total_pkts\n\n print(\"-\" * 84)\n print(\"Cluster-scale analysis:\")\n print(\"Total epochs: %d\" %(total_epochs))\n print(\"Total pkts: %d\" %(total_pkts))\n print(\"Short term SLO violations:\")\n print(\" - cnt=%d; epochs=%d [%.2f%%]; pkts=%d [%.2f%%]\" \\\n %(len(short_term_slo_vio), short_term_slo_vio_epochs, short_term_slo_vio_epoch_ratio, \\\n short_term_slo_vio_pkts, short_term_slo_vio_pkt_ratio))\n print(\"Long term SLO violations:\")\n print(\" - cnt=%d; epochs=%d [%.2f%%]; pkts=%d [%.2f%%]\" %(len(long_term_slo_vio), long_term_slo_vio_epochs, long_term_slo_vio_epoch_ratio, \\\n long_term_slo_vio_pkts, long_term_slo_vio_pkt_ratio))\n\n # SLO-violating event plots\n generate_epoch_distribution_plot(short_term_slo_vio, \"short\")\n generate_epoch_distribution_plot(long_term_slo_vio, \"long\")\n generate_epoch_distribution_plot(short_term_slo_vio + long_term_slo_vio, \"Epoch distribution of SLO violation events\")\n\n # Percentiles of SLO-violating events' duration\n slo_vio_event_epochs = [ss._epoch_cnt for ss in all_slo_vio]\n\n print(get_percentile(90, slo_vio_event_epochs))\n print(get_percentile(95, slo_vio_event_epochs))\n print(get_percentile(99, slo_vio_event_epochs))\n\n return\n\nif __name__ == '__main__':\n main()\n","sub_path":"measurement_scripts/slo_vio_event_cdf_plot.py","file_name":"slo_vio_event_cdf_plot.py","file_ext":"py","file_size_in_byte":5268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"422518579","text":"from django.shortcuts import reverse\r\nfrom django.utils import timezone\r\n\r\nfrom general_test import ViewsTest\r\nfrom .models import FeedbackReport\r\n\r\n\r\nclass IndexViewsTest(ViewsTest):\r\n def setUp(self):\r\n self.app = 'index'\r\n super().setUp()\r\n\r\n self.fb = FeedbackReport(\r\n Reporter=self.users['sup'],\r\n Url='/',\r\n Feedback='Test Feedback',\r\n Timestamp=timezone.now(),\r\n Status=1,\r\n )\r\n self.fb.save()\r\n\r\n def test_index_views(self):\r\n self.info = {}\r\n codes = [\r\n [['index', None], self.p_anonymous],\r\n [['about', None], self.p_anonymous],\r\n [['profile', None], self.p_all],\r\n [['feedback_form', None], self.p_all],\r\n [['feedback_submit', None], self.p_all],\r\n [['list_feedback', None], self.p_superuser],\r\n [['confirm_feedback', {'pk': self.fb.id}], self.p_superuser],\r\n [['close_feedback', {'pk': self.fb.id}], self.p_superuser],\r\n [['changesettings', None], self.p_all],\r\n [['termsaccept', None], self.p_redirect],\r\n [['markdown_upload', None], self.p_forbidden], # forbidden because referer is missing in request.\r\n [['robots', None], self.p_anonymous],\r\n ]\r\n self.loop_code_user(codes)\r\n # check if all urls are processed, except login and logout\r\n self.assertListEqual(self.allurls, ['login', 'logout'], msg=\"Not all URLs of this app are tested!\")\r\n\r\n def test_links_visible(self):\r\n \"\"\"\r\n Test if all links shown in a view go to a page with status 200.\r\n Used to test if all visible menu items are actually available for the given user in the given view.\r\n\r\n :return:\r\n \"\"\"\r\n self.info = {}\r\n views = [\"index:index\", 'index:profile']\r\n for phase in range(1, 8):\r\n for view in views:\r\n self.info['view'] = view\r\n ViewsTest.links_in_view_test(self, reverse(view))\r\n","sub_path":"index/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369515777","text":"# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\n\ndef test_should_capture_local_storage(context, is_webkit, is_win):\n if is_webkit and is_win:\n pytest.skip()\n page1 = context.newPage()\n page1.route(\"**/*\", lambda route: route.fulfill(body=\"\"))\n page1.goto(\"https://www.example.com\")\n page1.evaluate(\"localStorage['name1'] = 'value1'\")\n page1.goto(\"https://www.domain.com\")\n page1.evaluate(\"localStorage['name2'] = 'value2'\")\n\n state = context.storageState()\n origins = state[\"origins\"]\n assert len(origins) == 2\n assert origins[0] == {\n \"origin\": \"https://www.example.com\",\n \"localStorage\": [{\"name\": \"name1\", \"value\": \"value1\"}],\n }\n assert origins[1] == {\n \"origin\": \"https://www.domain.com\",\n \"localStorage\": [{\"name\": \"name2\", \"value\": \"value2\"}],\n }\n\n\ndef test_should_set_local_storage(browser, is_webkit, is_win):\n if is_webkit and is_win:\n pytest.skip()\n context = browser.newContext(\n storageState={\n \"origins\": [\n {\n \"origin\": \"https://www.example.com\",\n \"localStorage\": [{\"name\": \"name1\", \"value\": \"value1\"}],\n }\n ]\n }\n )\n\n page = context.newPage()\n page.route(\"**/*\", lambda route: route.fulfill(body=\"\"))\n page.goto(\"https://www.example.com\")\n local_storage = page.evaluate(\"window.localStorage\")\n assert local_storage == {\"name1\": \"value1\"}\n context.close()\n","sub_path":"tests/sync/test_browsercontext_storage_state.py","file_name":"test_browsercontext_storage_state.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293383273","text":"import RPi.GPIO as GPIO\nimport time\n\"\"\"\nGPIO.setmode(GPIO.BCM)\nPIR_PIN = 4\nGPIO.setup(PIR_PIN, GPIO.IN)\n\ndef MOTION(PIR_PIN):\n print('Motion Detected!')\n\nprint('PIR Module Test (CTRL+C to exit)')\ntime.sleep(2)\nprint('Ready')\n\ntry:\n GPIO.add_event_detect(PIR_PIN, GPIO.RISING, callback=MOTION)\n while 1:\n time.sleep(5)\nexcept KeyboardInterrupt:\n print('Quit')\n GPIO.cleanup()\n\"\"\"\nimport time\n \nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nPIR_PIN = 4\nGPIO.setup(PIR_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\nprint('Starting up the PIR Module (click on STOP to exit)')\ntime.sleep(1)\nprint ('Ready')\n\nwhile True:\n if GPIO.input(PIR_PIN):\n print('Motion Detected')\n time.sleep(1)\n else:\n print('Motion Not Detected')\n time.sleep(1)\n","sub_path":"motion_control.py","file_name":"motion_control.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369817593","text":"import rclpy\nfrom rclpy.node import Node\n\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Pose\nimport numpy as np\n\nclass MinimalPublisher(Node):\n\n def __init__(self):\n super().__init__('dummy_object_detector')\n self.publisher_ = self.create_publisher(Pose, 'dummy_objects', 10)\n timer_period = 0.5 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n self.q = [0.4619398,-0.1913417,0.3314136,0.8001031]\n\n def timer_callback(self):\n msg = Pose()\n msg.position.x = 1.5\n msg.position.y = 0.25\n msg.position.z = 0.01\n msg.orientation.x = self.q[0]\n msg.orientation.y = self.q[1]\n msg.orientation.z = self.q[2]\n msg.orientation.w = self.q[3]\n\n self.publisher_.publish(msg)\n\ndef main(args=None):\n rclpy.init(args=args)\n\n minimal_publisher = MinimalPublisher()\n\n rclpy.spin(minimal_publisher)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n minimal_publisher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()","sub_path":"class2_ws/build/assignment2/build/lib/assignment2/dummy_object_detector.py","file_name":"dummy_object_detector.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618123500","text":"import numpy as np\n\n#task1\na1 = np.array(range(15), float)\na2 = np.array(range(6), float)\na1 = a1.reshape(5, 3)\na2 = a2.reshape(3, 2)\nprint(np.dot(a1, a2))\n\n#task3\nb2 = np.random.rand(30)\nprint(np.mean(b2))","sub_path":"Problems/numpy.py","file_name":"numpy.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"328009536","text":"'''\nДано три числа. Упорядочите их в порядке неубывания. \nрограмма должна считывать три числа a,b,c, затем программа должна менять их значения так, \nчтобы стали выполнены условия a≤b≤c, затем программа выводит тройку a,b,c.\n\nФормат ввода\nВводятся три числа.\n\nФормат вывода\nВыведите ответ на задачу.\n'''\na, b, c = int(input()), int(input()), int(input())\nfor i in sorted([a, b, c]):\n print(i, end=' ')","sub_path":"week_2_if_n_while/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590858836","text":"import threading\nimport time\n\nclass output (threading.Thread):\n\tdef __init__(self, out):\n\t\tthreading.Thread.__init__(self)\n\t\tself.outputString = out\n\n\tdef run(self):\n\t\tfor x in range(30):\n\t\t\tprint(self.outputString)\n\t\t\ttime.sleep(1)\n\n\n\n#thread.start_new_thread( output, \"hello\" )\n\nout1 = output(\"hello\")\nout1.start()\n\nwhile 1:\n\tinp = input(\"What?\")\n\tif inp == 'x':\n\t\tbreak\n\telse:\n\t\tprint(\"yep yep yep\" + inp)\n\t\t#print(inp)\n","sub_path":"Python/ThreadTest.py","file_name":"ThreadTest.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"205787654","text":"import math\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\n\r\ndef do(ao):\r\n # 点\r\n class Point(object):\r\n\r\n def __init__(self, x, y):\r\n self.x, self.y = x, y\r\n\r\n # 向量\r\n class Vector(object):\r\n\r\n def __init__(self, start_point, end_point):\r\n self.start, self.end = start_point, end_point\r\n self.x = end_point.x - start_point.x\r\n self.y = end_point.y - start_point.y\r\n\r\n def __repr__(self):\r\n return repr(self.start_point, self.end_point)\r\n\r\n def get__start_point(self):\r\n return self.start\r\n\r\n def get__end_point(self):\r\n return self.end\r\n\r\n ZERO = 1e-9\r\n\r\n def negative(vector):\r\n \"\"\"取反\"\"\"\r\n return Vector(vector.get__end_point(), vector.get__start_point())\r\n\r\n def vector_product(vectorA, vectorB):\r\n '''计算 x_1 * y_2 - x_2 * y_1'''\r\n return vectorA.x * vectorB.y - vectorB.x * vectorA.y\r\n\r\n def is_intersected(A, B, C, D):\r\n '''A, B, C, D 为 Point 类型'''\r\n AC = Vector(A, C)\r\n AD = Vector(A, D)\r\n BC = Vector(B, C)\r\n BD = Vector(B, D)\r\n CA = negative(AC)\r\n CB = negative(BC)\r\n DA = negative(AD)\r\n DB = negative(BD)\r\n if (A.x == C.x and A.y == C.y) or (A.x == D.x and A.y == D.y) or (B.x == C.x and B.y == C.y) or (\r\n B.x == D.x and B.y == D.y):\r\n return False\r\n else:\r\n return (vector_product(AC, AD) * vector_product(BC, BD) <= ZERO) \\\r\n and (vector_product(CA, CB) * vector_product(DA, DB) <= ZERO)\r\n\r\n A = Point(35, 82)\r\n B = Point(2, 84)\r\n C = Point(5, 82)\r\n D = Point(7, 21)\r\n print(is_intersected(A, B, C, B))\r\n\r\n def isInsidePolygon(pt, poly):\r\n c = False\r\n i = -1\r\n l = len(poly)\r\n j = l - 1\r\n while i < l - 1:\r\n i += 1\r\n print\r\n i, poly[i], j, poly[j]\r\n if ((poly[i][\"lat\"] <= pt[\"lat\"] and pt[\"lat\"] < poly[j][\"lat\"]) or (\r\n poly[j][\"lat\"] <= pt[\"lat\"] and pt[\"lat\"] < poly[i][\"lat\"])):\r\n if (pt[\"lng\"] < (poly[j][\"lng\"] - poly[i][\"lng\"]) * (pt[\"lat\"] - poly[i][\"lat\"]) / (\r\n poly[j][\"lat\"] - poly[i][\"lat\"]) + poly[i][\"lng\"]):\r\n c = not c\r\n j = i\r\n return c\r\n list = []\r\n polygon = []\r\n for a in range(0, 3):\r\n x = random.randint(0, 99)\r\n y = random.randint(0, 99)\r\n point = (x, y)\r\n list.append(point)\r\n polygon.append({'lat': x, 'lng': y})\r\n nu = 1\r\n while nu <= ao:\r\n x=random.randint(0, 99)\r\n y=random.randint(0, 99)\r\n con = 0\r\n # if isInsidePolygon({'lat': x, 'lng': y}, polygon) ==0 :\r\n if point.__contains__((x,y))==0 and isInsidePolygon({'lat': x, 'lng': y}, polygon) ==0:\r\n\r\n for a in range(0,len(list)-1):\r\n if con == 0:\r\n for b in range(0, len(list)):\r\n print(list)\r\n print(list[a][0], list[a][1])\r\n print(list[a+1][0], list[a+1][1])\r\n print(list[b][0], list[b][1])\r\n\r\n\r\n A = Point(x, y)\r\n B = Point(list[a][0], list[a][1])\r\n C = Point(list[a+1][0], list[a+1][1])\r\n D = Point(list[b][0], list[b][1])\r\n if b == len(list)-1 :\r\n E = Point(list[0][0], list[0][1])\r\n print(list[0][0], list[0][1])\r\n else:\r\n E = Point(list[b+1][0], list[b+1][1])\r\n print(list[b + 1][0], list[b + 1][1])\r\n print(is_intersected(A, B, D, E))\r\n print(is_intersected(A, C, D, E))\r\n if is_intersected(A, B, D, E)==1 or is_intersected(A, C, D, E)==1:\r\n break\r\n if b ==len(list)-1:\r\n point = (x, y)\r\n nu += 1\r\n print(a)\r\n list.insert(a+1, point)\r\n polygon.insert(a+1, {'lat': x, 'lng': y})\r\n con=1\r\n #print(A.x,A.y,B.x,B.y,C.x,C.y,D.x,D.y,E.x,E.y)\r\n\r\n import csv\r\n with open('data.csv','w') as csvfile:\r\n fieldmanes =['X','Y']\r\n writer = csv.DictWriter(csvfile,fieldnames=fieldmanes)\r\n writer.writeheader()\r\n for a in range(0, len(list)):\r\n writer.writerow({'X':list[a][0],'Y':list[a][1]})\r\n\r\n listxx = []\r\n listyy = []\r\n for a in range(0,len(list)):\r\n listxx.append(list[a][0])\r\n listyy.append(list[a][1])\r\n print(list)\r\n plt.plot(listxx, listyy)\r\n plt.plot([listxx[0],listxx[len(listxx)-1]],[listyy[0],listyy[len(listyy)-1]])\r\n plt.show()","sub_path":"data/widePolygon.py","file_name":"widePolygon.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"460871829","text":"import handler\nimport osvcd_shared as shared\n\nclass Handler(handler.Handler):\n \"\"\"\n Clear the object monitor status. For example, a \"start failed\".\n Transient status are not clearable (those ending with 'ing', like starting).\n \"\"\"\n routes = (\n (\"POST\", \"object_clear\"),\n (None, \"clear\"),\n )\n prototype = [\n {\n \"name\": \"path\",\n \"desc\": \"The object path.\",\n \"required\": True,\n \"format\": \"object_path\",\n },\n ]\n access = {\n \"roles\": [\"operator\"],\n \"namespaces\": \"FROM:path\",\n }\n\n def action(self, nodename, thr=None, **kwargs):\n options = self.parse_options(kwargs)\n smon = thr.get_service_monitor(options.path)\n if smon.status.endswith(\"ing\"):\n return {\"info\": \"skip clear on %s instance\" % smon.status, \"status\": 0}\n thr.log_request(\"clear %s monitor status\" % options.path, nodename, **kwargs)\n thr.set_smon(options.path, status=\"idle\", reset_retries=True)\n return {\"status\": 0, \"info\": \"%s instance cleared\" % options.path}\n\n","sub_path":"lib/handlerPostObjectClear.py","file_name":"handlerPostObjectClear.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"366421847","text":"from charm.toolbox.pairinggroup import PairingGroup, GT\nfrom cp_abe import CP_ABE\n\n\ndef main():\n pairing_group = PairingGroup('MNT224')\n \n cpabe = CP_ABE(pairing_group, 2)\n\n # run the set up\n (public_key, master_secrete_key) = cpabe.setup()\n #print(master_secrete_key)\n\n # generate a key\n attribute_lst = ['ONE', 'TWO', 'THREE']\n key = cpabe.keygen(public_key, master_secrete_key, attribute_lst)\n print(\"key \", key,\"\\n\\n\")\n\n # choose a random message\n message = pairing_group.random(GT)\n print(message,\"\\n\\n\")\n \n # generate a ciphertext\n policy = '((ONE and THREE) and (TWO OR FOUR))'\n cipher_text = cpabe.encrypt(public_key, message, policy)\n print(cipher_text,\"\\n\\n\")\n\n # decryption\n recieved_message = cpabe.decrypt(public_key, cipher_text, key)\n print(recieved_message,\"\\n\\n\")\n if debug:\n if recieved_message == message:\n print (\"Successful decryption.\")\n else:\n print (\"Decryption failed.\")\n\n\nif __name__ == \"__main__\":\n debug = True\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"523364678","text":"import tkinter as tk\r\nimport tkinter.ttk\r\n\r\nscreens = [\"Screen 1\", \"Screen 2\", \"Screen 3\", \"Screen 4\", \"Screen 5\", \"Screen 6\"]\r\n\r\nmovies = {\"Horror\": [\"Hereditary\",\"A Quiet Place\",\"The Conjuring 2\",\"The Grudge\",\"Anabelle Comes Home\"],\r\n \"Action\": [\"Avengers End Game\",\"John Wick Chapter 3\", \"Aquaman\", \"Black Panther\", \"Mission Impossible\"],\r\n \"Drama\": [\"Joker\",\"Spotlight\",\"Little Women\",\"The IrishMan\", \"A Star is Born\"],\r\n \"Comedy\": [\"Step Brothers\",\"BookSmart\",\"Horrible Bosses\",\"The Other Guys\",\"SuperBad\"],\r\n \"Sci-Fi\": [\"Star Wars\",\"Annihilation\",\"Arrival\",\"Interstellar\",\"The Martian\"],\r\n \"Romance\": [\"The Fault in Our Stars\",\"The Notebook\",\"The Tourist\",\"Titanic\",\"Crazy Rich Asians\"],\r\n \"Bollywood\": [\"Baahubali The Beginning\",\"Ghajini\",\"Petta\",\"Bhajirao Mastani\",\"Maharshi\",\"Master\"]\r\n }\r\n\r\ntimes = [\"12:00\",\"12:30\",\"13:00\",\"13:30\",\"15:00\",\"15:30\",\"18:00\",\"18:30\",\"19:00\",\"19:30\",\"21:00\",\"21:30\",\"22:00\",\"22:30\"]\r\n\r\nseatList = []\r\nseatSelected = []\r\n\r\nclass Application(tk.Tk):\r\n def __init__(self):\r\n super().__init__()\r\n self.title(\"Cinema Booking\")\r\n self.createWidgets()\r\n\r\n def updateMovies(self, event=None):\r\n self.movieCombo['values'] = movies[self.genreCombo.get()]\r\n\r\n def createWidgets(self):\r\n headingLabel = tk.Label(self, text=\"Cinema Seat Booking Portal\", font = \"Aries 12 bold\")\r\n headingLabel.grid(row=0, column=0, columnspan=5, padx=10, pady=10, sticky=\"w\")\r\n tkinter.ttk.Separator(self, orient=\"horizontal\").grid(row=1, column=0, columnspan=5, sticky=\"ew\")\r\n\r\n day = tk.Frame(self)\r\n tk.Label(day, text=\"________\").pack()\r\n tk.Label(day, text=\"Today\", font=\"Aries 10 underline\").pack()\r\n tk.Label(day, text=\"\").pack()\r\n day.grid(row=2, column=0, padx=10)\r\n tk.Label(self,text=\"Genre: \").grid(row=2, column=1, padx=(10,0))\r\n self.genreCombo = tkinter.ttk.Combobox(self, width=15, values=list(movies.keys()), state=\"readonly\")\r\n self.genreCombo.set(\"Select Genre\")\r\n self.genreCombo.bind('<>', self.updateMovies)\r\n self.genreCombo.grid(row=2, column=2)\r\n\r\n tk.Label(self, text=\"Film Name: \").grid(row=2, column=3, padx=(10,0))\r\n self.movieCombo = tkinter.ttk.Combobox(width=15, state=\"readonly\")\r\n self.movieCombo.bind('<>', self.createTimeButtons)\r\n self.movieCombo.set(\"Select Film\")\r\n self.movieCombo.grid(row=2, column=4, padx=(10,0))\r\n tkinter.ttk.Separator(self, orient=\"horizontal\").grid(row=3, column=0, columnspan=5, sticky=\"ew\")\r\n\r\n def createTimeButtons(self, event=None):\r\n tk.Label(self, text=\"Select Time Slot\", font=\"Aries 11 bold underline\").grid(row=4, column=2, columnspan=2, pady=5)\r\n Time = tk.Frame(self)\r\n Time.grid(row=5, column=0, columnspan=5)\r\n for i in range(14):\r\n tk.Button(Time, text=times[i],\r\n command = self.seatSelection).grid(row=4+i//7, column=i%7)\r\n\r\n def seatSelection(self):\r\n window = tk.Toplevel()\r\n window.title(\"Select your seat\")\r\n checkoutHeading = tk.Label(window, text=\"Seat(s) Selection\", font=\"Aries 12\")\r\n checkoutHeading.grid(row=0, column=0, columnspan=5, padx=10, pady=(10,0), sticky=\"w\")\r\n\r\n infer = tk.Frame(window)\r\n infer.grid(row=1, column=0)\r\n tk.Label(infer, text=\"BLUE = SELECTED\", fg='blue').grid(row=0, column=0, padx=10)\r\n tk.Label(infer, text=\"RED = BOOKED\", fg='brown').grid(row=0, column=1, padx=10)\r\n tk.Label(infer, text=\"GREEN = AVAILABLE\", fg='green').grid(row=0, column=2, padx=10)\r\n tkinter.ttk.Separator(window, orient=\"horizontal\").grid(row=2, column=0, pady=(0,5), sticky=\"ew\")\r\n\r\n w = tk.Canvas(window, width=500, height=15)\r\n w.create_rectangle(10,0,490,10,fill='black')\r\n w.grid(row=3, column=0)\r\n tk.Label(window, text=\"SCREEN\").grid(row=4, column=0, pady=(0,10))\r\n seats = tk.Frame(window)\r\n seats.grid(row=5, column=0)\r\n seatList.clear()\r\n seatSelected.clear()\r\n for i in range(4):\r\n temp=[]\r\n for j in range(15):\r\n but = tk.Button(seats, bd=2, bg='Green', activebackground='forestGreen', command=lambda x=i, y=j: self.selected(x,y))\r\n temp.append(but)\r\n but.grid(row=i, column=j, padx=5, pady=5)\r\n seatList.append(temp)\r\n tk.Button(window, text = \"Book Seats\", bg='black', fg=\"white\", command=self.bookseat).grid(row=6,column=0,pady=10)\r\n\r\n\r\n def selected(self,i,j):\r\n if seatList[i][j]['bg'] == 'blue':\r\n seatList[i][j]['bg'] == 'green'\r\n seatList[i][j]['activebackground'] = \"forestGreen\"\r\n seatSelected.remove((i,j))\r\n return\r\n seatList[i][j]['bg'] = 'blue'\r\n seatList[i][j]['activebackground'] = 'blue'\r\n seatSelected.append((i, j))\r\n\r\n def bookseat(self):\r\n for i in seatSelected:\r\n seatList[i[0]][i[1]]['bg'] = 'brown'\r\n seatList[i[0]][i[1]]['activebackground'] = 'brown'\r\n seatList[i[0]][i[1]]['relief'] = 'sunken'\r\n\r\n\r\napp = Application()\r\napp.mainloop()","sub_path":"booking.py","file_name":"booking.py","file_ext":"py","file_size_in_byte":5234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"126547018","text":"# -*- coding: utf8 -*-\nfrom django.db import models\nfrom smartway.configs.variables import TablaPrincipal\nfrom smartway.util.decorators import asStr\nfrom smartway.ui.vehicles.models import (\n Vehiculo,\n VersionNorma,\n)\nfrom smartway.ui.data.opciones import (\n ERROR_CHOICES,\n DTC_TYPE_CHOICES,\n DTC_STATUS_CHOICES,\n ALARM_CHOICES,\n error_map,\n TIPOS_NOTIFICACIONES_CHOICES,\n MECANISMO_ENVIO_NOTIFICACIONES_CHOICES,\n)\n\n\n\n@asStr(\"descripcion\")\nclass TipoVariable(models.Model):\n descripcion = models.CharField(max_length=100)\n\n class Meta:\n verbose_name_plural = \"Tipos de variable\"\n\n\n@asStr(\"descripcion\")\nclass Unidad(models.Model):\n descripcion = models.CharField(max_length=700, unique=True)\n\n class Meta:\n verbose_name_plural = \"Unidades\"\n\n\n@asStr(\"descripcion\")\nclass VariableGeneral(models.Model):\n descripcion = models.CharField(max_length=100)\n unidad = models.ForeignKey(Unidad)\n tipo = models.ForeignKey(TipoVariable, null=True)\n publica = models.BooleanField(default=True)\n\n tracking = models.BooleanField(help_text='indica si se puede monitorear periodicamente la variable')\n alarma = models.BooleanField(help_text='indica si se pueden establecer alarmas sobre la variable')\n valor_minimo = models.FloatField(null=True)\n valor_maximo = models.FloatField(null=True)\n\n def __unicode__(self):\n return u'%s - %s ' % (str(self.id), str(self.descripcion))\n\n\nclass Dato(models.Model):\n variable = models.ForeignKey(VariableGeneral)\n vehiculo = models.ForeignKey(Vehiculo)\n valor = models.CharField(max_length=15, null=True)\n timestamp = models.DateTimeField(db_index=True)\n es_alarma = models.IntegerField(help_text='Indica: 0=Sin Alarma, 1=Alarma ON, 2= Alarma OFF')\n codigo_error = models.IntegerField(choices=ERROR_CHOICES)\n\n def obtenerValor(self):\n retorno = str(self.valor)\n #TODO: PARCHE MIENTRAS SE HACE LA MIGRACION\n if self.variable_id == 1:\n #Variable encendido\n dicc = {0: 'Apagado', 1: 'Apagado', 2: 'Contacto', 3: 'Encendido', 4: 'Moderando', 5: 'Moderando sin PTO',\n 6: 'Moderando con PTO', 7: 'Conduccion', 8: 'Conduccion Sin Cruise Control',\n 9: 'Conduccion Con Cruise Control', 10: 'Cosechando'}\n retorno = dicc[int(self.valor)]\n else:\n try:\n encuentra = False\n valorDefault = None\n if self.variable.unidad.descripcion.startswith('{'):\n descs = self.variable.unidad.descripcion.strip('{}').split(',')\n for desc in descs:\n valores = desc.split('=')\n if valores[0] == '*':\n valorDefault = valores[1]\n elif float(self.valor) == float(valores[0]):\n retorno = valores[1]\n encuentra = True\n\n if (not encuentra) and valorDefault:\n retorno = valorDefault\n else:\n retF = float(retorno)\n if not retF % 1:\n retorno = str(int(retF))\n else:\n retorno = \"%5.3f\" % retF\n # Agrega la unidad para visualizacion\n retorno += \" \" + self.variable.unidad.descripcion\n\n except:\n pass\n\n return retorno\n\n class Meta:\n unique_together = ('variable', 'vehiculo', 'timestamp')\n\n def __unicode__(self):\n if not (self.codigo_error == 0):\n return u'%s' % error_map(self.codigo_error)\n if self.variable.unidad.descripcion:\n pass\n return u'%s' % self.obtenerValor()\n# def __init__(self, *args, **kwargs):\n# super(Dato, self).__init__(*args, **kwargs)\n# self.timestamp.replace(tzinfo=TIMEZONE_DATO)\n\n\nclass DatoPosicion(models.Model):\n vehiculo = models.ForeignKey(Vehiculo)\n timestamp = models.DateTimeField()\n latitud = models.FloatField(null=True, blank=True)\n longitud = models.FloatField(null=True, blank=True)\n altitud = models.FloatField(null=True, blank=True)\n course = models.IntegerField(null=True, blank=True)\n\n pto_cardinal = models.CharField(max_length=20, null=True)\n area_type = models.CharField(max_length=20, null=True)\n cant_satelites = models.IntegerField(null=True, blank=True)\n speed = models.IntegerField(null=True, blank=True)\n position_valid = models.NullBooleanField()\n diferential_gps = models.NullBooleanField()\n heading_valid = models.NullBooleanField()\n type_coding = models.CharField(max_length=20, null=True)\n\n def __str__(self):\n ns = \"N\" if self.latitud >= 0 else \"S\"\n ew = \"E\" if self.longitud >= 0 else \"W\"\n return \"%.3f%s %.3f%s\" % (abs(self.latitud), ns, abs(self.longitud), ew)\n\n def codificar(self):\n return \"%s,%s,%s,%s\" % (str(self.latitud), str(self.longitud),\n str(self.altitud), str(self.course))\n\n # class Meta:\n # unique_together = ('vehiculo', 'timestamp')\n\n\nclass ValorTablaPrincipal(models.Model):\n vehiculo = models.ForeignKey(Vehiculo)\n tabla_principal = models.IntegerField()\n valor = models.CharField(max_length=100)\n timestamp = models.DateTimeField()\n\n class Meta:\n verbose_name = \"Valor en tabla principal\"\n verbose_name_plural = \"Valores en Tabla Principal\"\n unique_together = (\"vehiculo\", \"tabla_principal\")\n\n\nclass TipoDatoMedido(models.Model):\n descripcion = models.CharField(max_length=100)\n\n class Meta:\n verbose_name = \"Tipo de dato medido\"\n verbose_name_plural = \"Tipos de datos medidos\"\n\n\nclass UltimoValorMedido(models.Model):\n variable = models.ForeignKey(VariableGeneral)\n vehiculo = models.ForeignKey(Vehiculo)\n valor = models.CharField(max_length=100)\n timestamp = models.DateTimeField()\n data_dato_value = models.IntegerField(null=True)\n# tipo = models.ForeignKey(TipoDatoMedido)\n\n class Meta:\n verbose_name = \"Ultimo Valor Medido\"\n verbose_name_plural = \"Ultimos valores medidos\"\n unique_together = (\"vehiculo\", \"variable\")\n\n\nclass ConfigVariable(models.Model):\n variable = models.ForeignKey(VariableGeneral)\n vehiculo = models.ForeignKey(Vehiculo)\n periodo = models.IntegerField(default=0)\n fecha_configuracion = models.DateTimeField(auto_now=True)\n en_proceso = models.IntegerField(default=0)\n error = models.IntegerField(choices=ERROR_CHOICES, null=True, default=0)\n confirmada = models.BooleanField(default=True)\n tracking = models.BooleanField()\n # Cuando se marca el campo eliminar, y se confirma, indica que la configuracion\n # fue eliminada del vehiculo\n eliminar = models.BooleanField(default=False)\n activa = models.BooleanField()\n\n class Meta:\n verbose_name = \"Configuración de variable\"\n verbose_name_plural = \"Configuraciones de variables\"\n unique_together = (\"vehiculo\", \"variable\")\n\n def __str__(self):\n return '%s - %s' % (self.vehiculo.descripcion, self.variable.descripcion)\n\n\n@asStr(\"texto_error\")\nclass ErrorGeneralBD(models.Model):\n timestamp = models.DateTimeField()\n texto_error = models.TextField()\n vehiculo = models.ForeignKey(Vehiculo, null=True)\n\n class Meta:\n verbose_name_plural = \"Errores en la aplicación\"\n\n\nclass ErrorTramaBD(ErrorGeneralBD):\n trama_error = models.CharField(max_length=2500)\n\n class Meta:\n verbose_name_plural = \"Errores de Trama en la aplicación\"\n\n\nclass ErrorReplyBD(ErrorGeneralBD):\n codigo_error = models.IntegerField()\n variable = models.ForeignKey(VariableGeneral, null=True)\n\n class Meta:\n verbose_name_plural = \"Errores de Respuesta\"\n\n\nclass ConfigAlarma(models.Model):\n configuracion = models.ForeignKey(ConfigVariable)\n umbral_h = models.CharField(max_length=15)\n umbral_l = models.CharField(max_length=15)\n fecha_configuracion = models.DateTimeField()\n destino_aviso_sms = models.CharField(max_length=50, null=True)\n destino_aviso_mail = models.EmailField(null=True)\n tiempo_filtro = models.IntegerField(default=40)\n activa = models.BooleanField(help_text='Indica si la configuracion de alarma de la variable esta activada')\n # Cuando se agregan nuevos umbrales para la variable cambia la configuracion de alarma\n # es decir, se crea una nueva configuracion vigente y esta se marca como vigente=False\n vigente = models.BooleanField(help_text='Indica si es la ulitma configuracion vigente para ese vehiculo')\n ultimo_estado = models.BooleanField(default=False, help_text='Indica el ultimo estado conocido de la alarma')\n fecha_ultimo_cambio_estado = models.DateTimeField(null=True)\n\n # def save(self, cambiarConfiguracion=True):\n # # codigo ejecutado previo a guardar el objeto DirApp.\n # if cambiarConfiguracion and self.vigente:\n # self.configuracion.confirmada = False\n # self.configuracion.save()\n # models.Model.save(self)\n\n class Meta:\n verbose_name_plural = \"Configuraciones de Alarma\"\n\n def __str__(self):\n return '%s' % (self.configuracion)\n\n\nclass FallaGeneral(models.Model):\n vehiculo = models.ForeignKey(Vehiculo)\n posicion = models.ForeignKey(DatoPosicion, null=True)\n timestamp = models.DateTimeField()\n estado = models.BooleanField(help_text='Indica si la falla sigue activa o llego notificacion de desactivada')\n tipo = models.IntegerField(choices=DTC_TYPE_CHOICES)\n\n# def __unicode__(self):\n# return u'Tipo: %s - Estado: %s' % (str(self.tipo),str(self.estado))\n\n def get_status_string(self):\n if self.estado:\n return \"Activa\"\n else:\n return \"Inactiva\"\n\n class Meta:\n verbose_name_plural = \"Fallas de Vehiculos\"\n\n\nclass FallaFF(models.Model):\n dato = models.ForeignKey(Dato, primary_key=True)\n falla = models.ForeignKey(FallaGeneral)\n\n class Meta:\n verbose_name_plural = \"Freeze Frames\"\n\n\nclass FallaActiva(models.Model):\n vehiculo = models.ForeignKey(Vehiculo)\n posicion = models.ForeignKey(DatoPosicion, null=True)\n timestamp = models.DateTimeField()\n estado = models.BooleanField(help_text='Indica si la falla sigue activa o llego notificacion de desactivada')\n tipo = models.IntegerField(choices=DTC_TYPE_CHOICES)\n\n class Meta:\n verbose_name = \"Falla Activa\"\n verbose_name_plural = \"Fallas Actualmente Activas\"\n\n\nclass VariablesMonitor(models.Model):\n descripcion = models.TextField()\n latitud = models.ForeignKey(VariableGeneral, related_name='latitud', null=True)\n longitud = models.ForeignKey(VariableGeneral, related_name='longitud', null=True)\n altitud = models.ForeignKey(VariableGeneral, related_name='altitud', null=True)\n\n child_class_names = (\n 'VariablesSiembra',\n 'VariablesCosecha',\n )\n\n def child_object(self):\n for child_class_name in self.child_class_names:\n try:\n return self.__getattribute__(child_class_name.lower())\n except eval(child_class_name).DoesNotExist:\n pass\n return self\n\n def child_class_name(self):\n for child_class_name in self.child_class_names:\n try:\n self.__getattribute__(child_class_name.lower())\n return child_class_name\n except eval(child_class_name).DoesNotExist:\n pass\n return 'VariablesMonitor'\n\n class Meta:\n verbose_name_plural = 'Variables del Monitor'\n\n\nclass VariablesCosecha(VariablesMonitor):\n humedad_cosecha = models.ForeignKey(VariableGeneral, related_name='humedad_cosecha')\n velocidad_cosecha = models.ForeignKey(VariableGeneral, related_name='velocidad_cosecha')\n flujo_grano_cosecha = models.ForeignKey(VariableGeneral, related_name='flujo_grano_cosecha')\n\n class Meta:\n verbose_name_plural = 'Variables del Monitor de Cosecha'\n\n\nclass VariablesSiembra(VariablesMonitor):\n velocidad_siembra = models.ForeignKey(VariableGeneral, related_name='velocidad_siembra')\n\n class Meta:\n verbose_name_plural = 'Variables del Monitor de Siembra'\n\n\nclass VersionVariableMonitor(models.Model):\n version = models.ForeignKey(VersionNorma, unique=True)\n archivo = models.ForeignKey(VariablesMonitor)\n\n\nclass Calibracion(models.Model):\n vehiculo = models.ForeignKey(Vehiculo)\n timestamp = models.DateTimeField()\n campo = models.TextField()\n funcionario = models.CharField(max_length=200)\n\n child_class_names = (\n 'CalibracionCosecha',\n 'CalibracionSiembra',\n )\n\n def child_object(self):\n for child_class_name in self.child_class_names:\n try:\n return self.__getattribute__(child_class_name.lower())\n except eval(child_class_name).DoesNotExist:\n pass\n return self\n\n def child_class_name(self):\n for child_class_name in self.child_class_names:\n try:\n self.__getattribute__(child_class_name.lower())\n return child_class_name\n except eval(child_class_name).DoesNotExist:\n pass\n return 'Calibracion'\n\n class Meta:\n verbose_name_plural = 'Calibraciones del Monitor'\n\n\nclass CalibracionCosecha(Calibracion):\n ancho_corte = models.FloatField()\n peso_especifico = models.FloatField()\n calibracion_peso = models.FloatField()\n calibracion_humedad = models.FloatField()\n tipo_grano = models.CharField(max_length=200)\n\n\nclass CalibracionSiembra(Calibracion):\n ancho_corte = models.FloatField()\n\n\nclass ConfigNotificaciones(models.Model):\n vehiculo = models.ForeignKey(Vehiculo)\n tipo = models.IntegerField(choices=TIPOS_NOTIFICACIONES_CHOICES)\n mecanismo = models.IntegerField(choices=MECANISMO_ENVIO_NOTIFICACIONES_CHOICES)\n activado = models.BooleanField(default=True)\n\n\nclass VariableEnProcesoActualizacion(models.Model):\n configuracion = models.ForeignKey(ConfigVariable, unique=True)\n fecha_desactivada = models.DateTimeField(auto_now=True)\n\n\nclass ConfiguracionActualVehiculo(models.Model):\n configuracion = models.TextField()\n vehiculo = models.ForeignKey(Vehiculo)\n timestamp = models.DateTimeField()\n\n\nclass VehicleAlarms(models.Model):\n vehicle = models.ForeignKey(Vehiculo)\n variable = models.ForeignKey(VariableGeneral)\n timestamp = models.DateTimeField()\n state = models.IntegerField(choices=ALARM_CHOICES)\n\n class Meta:\n unique_together = ('vehicle', 'variable')\n\nclass VehicleDtcs(models.Model):\n vehicle = models.ForeignKey(Vehiculo)\n timestamp = models.DateTimeField()\n dtc_status = models.IntegerField(choices=DTC_STATUS_CHOICES)\n description = models.CharField(max_length=200)\n ecu_address = models.IntegerField()\n dtc_type = models.IntegerField(choices=DTC_TYPE_CHOICES)\n value = models.BigIntegerField()\n dtc_key = models.CharField(max_length=200)\n\n class Meta:\n unique_together = ('vehicle', 'dtc_key')\n","sub_path":"smartway/ui/data/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"477147746","text":"#Жуков Василий ФИТ ИВТ 4 курс\r\n#21.23-22.22\r\nimport time\r\nfib1 = 1\r\nfib2 = 1\r\na = [1, 1]\r\nb = []\r\ni = 0\r\nx=0\r\nn = 30\r\nc=2\r\ntest=input(\"test \")\r\nif test == '1':\r\n x = 47\r\nelif test == '2':\r\n x = 83\r\nelif test == '3':\r\n x = 100\r\nelif test == '4':\r\n x = 213\r\nelse:\r\n x=int(input())\r\n\r\nwhile c < n:\r\n fib_sum = fib2 + fib1\r\n fib1 = fib2\r\n fib2 = fib_sum\r\n c= c + 1\r\n a.append(fib_sum)\r\n\r\na.reverse()\r\n\r\nwhile i < 30:\r\n if x < a[i]:\r\n b.append(0)\r\n i = i + 1\r\n else:\r\n b.append(1)\r\n x = x - a[i]\r\n i = i + 1\r\n\r\nz = 0\r\nwhile b[z] < 1:\r\n b.remove(0)\r\n i = i + 1\r\n\r\nb.pop()\r\nprint(''.join(map(str, b)))\r\ntime.sleep(5)\r\n","sub_path":"fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641858169","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 31 17:16:51 2018\n\n@author: romie\n\"\"\"\n#Hello Git\n\nimport numpy as np\nimport pandas as pd \nimport json\nimport itertools\n\n\ndef flatten(A): #flattens a nested list\n \n if A == []: return A\n if type(A[0]) == list:\n return flatten(A[0]) + flatten(A[1:])\n else: return [A[0]] + flatten(A[1:])\n \n\ndef cart(A,B): # cartesian product of two lists\n \n if A == []: return B\n elif B == []: return A\n else: return [flatten([x,y]) for x in A for y in B]\n\n\ndef most_common(A): #Find most common element\n df = pd.DataFrame({\"A\":A})\n return df.mode().values.tolist()[0]\n\n\n\nclass k_Nerve():\n\n def __init__(self, n_components = None, clusterer_params = (0.1, 5), covering_size = 200, overlap = 2):\n\n self.n_components = n_components\n self.covering_size = covering_size\n self.clusterer_params = clusterer_params\n self.overlap = overlap\n \n def project_data(self,data,labels):\n \n # ---------dimension of the projected data -----------------------------------------------------\n\n k = self.n_components \n \n #------------data frame to record data and the projection to lower dimensional space-------------------\n \n frame = pd.DataFrame({\"data\":data, \"labels\":labels})\n\n from sklearn.decomposition import PCA\n pca = PCA(n_components = k)\n frame[\"proj\"] = pca.fit_transform(data).tolist()\n return frame\n\n def make_covering(self,data,labels):\n\n print(\"building cover ....\")\n\n frame = self.project_data(data,labels)\n\n covering_size = self.covering_size\n\n #-------------- N = lattic length ---------------------------\n\n N = int(np.exp(np.log(covering_size)/self.n_components))\n \n\n # -----------Determine the range of projection map------------------------------------------------\n Y = []\n for i in range(self.n_components):\n Y.append(np.array(self.project_data(data,labels)[\"proj\"].values.tolist())[:,i])\n\n r_max = []\n r_min = []\n for i in range(self.n_components):\n r_max.append(np.amax(Y[i]))\n r_min.append(np.amin(Y[i]))\n\n \n # ------------------------- Make lattice inside projected data -----------------------------------\n\n sub_intervals = []\n for i in range(self.n_components):\n sub_intervals.append([r_min[i] + (r_max[i] - r_min[i])*j/N for j in range(N)] + [r_max[i]])\n\n #print(sub_intervals)\n\n LATTICE = []\n for k in range(len(sub_intervals)):\n LATTICE = cart(LATTICE, sub_intervals[k])\n\n\n # ----------cover projected data with k-balls centered around the lattice points----------------------\n\n R = []\n for i in range(self.n_components):\n R.append((r_max[i] - r_min[i])/N)\n overlap = self.overlap\n ball_radius = overlap*np.amax(R)\n \n from sklearn.metrics.pairwise import euclidean_distances as ED\n k_balls_covering_frames = [None]*((N+1)**(self.n_components ))\n\n\n #-----------cover original data using pullback of k-balls covering along the projection map----------------\n\n covering_frames = [None]*((N+1)**(self.n_components))\n\n for i in range((N+1)**(self.n_components)):\n covering_frames[i] = frame[ ED( frame[\"proj\"].values.tolist(), [LATTICE[i]] ) < ball_radius] \n\n covering_frames_sorted = sorted(covering_frames, key = lambda x:x['labels'].max(axis = 0))\n\n #return covering_frames_sorted\n\n\n return [list(group)[0] for _,group in itertools.groupby(covering_frames_sorted, key = lambda x:x[\"data\"].values.tolist())]\n\n\n\n def cluster(self,data,labels):\n \n #------------Get connected components of each pullback cover-----------\n \n covering = self.make_covering(data, labels)\n\n print(\"clustering covers by connected components ......\")\n\n cluster_frames = [[]]*len(covering)\n index = [[]]*len(covering)\n\n #--------------Use clusterer DBSCAN to get connected components------------------\n \n from sklearn.cluster import DBSCAN\n\n eps, min_samples = self.clusterer_params #------set DBSCAN parameters-------------\n\n \n for i in range(len(covering)): \n C = covering[i][\"data\"].values.tolist()\n\n if C != []:\n dbscan = DBSCAN(eps = eps, min_samples = min_samples).fit(C)\n covering[i][\"cluster\"] = dbscan.labels_\n cluster_frames[i] = [covering[i][covering[i][\"cluster\"] == label] for label in set(dbscan.labels_)]\n index[i] = [str(i) + \",\" + str(j) + \",\" + str(len(cluster_frames[i][j])) + \",\" + str(cluster_frames[i][j][\"labels\"].mode().values[0]) for j in range(len(set(dbscan.labels_)))]\n \n return cluster_frames, index\n\n\n def fit(self,data, labels):\n\n\n #------ make (two-dim) nerve of covering----------------------------------\n\n cluster_frames, index = self.cluster(data, labels)\n\n print(\"building nerve .......\")\n\n #------- vertices = clusters ------------------\n\n print(\" making vertices ....\")\n V = flatten(index)\n \n\n #-------- edges = cluster pairs with nonempty intersection -------------------------\n \n print(\" making edges .....\")\n pairs = [(x,y) for x in V for y in V if V.index(x) < V.index(y)] # edges are non-degenerate\n \n E = [(x, y) for (x,y) in pairs if [a for a in cluster_frames[int(x.split(\",\")[0])][int(x.split(\",\")[1])][\"data\"].values.tolist() if a in cluster_frames[int(y.split(\",\")[0])][int(y.split(\",\")[1])][\"data\"].values.tolist()] != []]\n \n\n #---------- faces = cluster triples with nonempty intersection -----------------------\n\n print(\" making faces ......\")\n triples = [(x,y,z) for x in V for y in V for z in V if V.index(x) < V.index(y) and V.index(y) < V.index(z)] #faces are non-degenerate\n\n F = [(x,y,z) for (x,y,z) in triples if [a for a in cluster_frames[int(x.split(\",\")[0])][int(x.split(\",\")[1])][\"data\"].values.tolist() if a in cluster_frames[int(y.split(\",\")[0])][int(y.split(\",\")[1])][\"data\"].values.tolist() if a in cluster_frames[int(z.split(\",\")[0])][int(z.split(\",\")[1])][\"data\"].values.tolist()] != []]\n\n\n return V,E,F\n\n \n def draw(self,data,labels):\n\n V, E, F = self.fit(data,labels)\n\n #------------ encode nerve simplicial complex in json ---------------------\n\n print(\"building json data..........\")\n if [int(v.split(\",\")[2]) for v in V] != []:\n max_weight = max([int(v.split(\",\")[2]) for v in V])\n else: max_weight = 0\n\n LABELS = set(labels)\n\n\n nodes = [{\"id\": v, \"weight\": int(v.split(\",\")[2]), \"label\": int(v.split(\",\")[3])} for v in V]\n links = [{\"source\": V.index(link[0]), \"target\": V.index(link[1]), \"value\": 1} for link in E]\n paths = [{ \"vertices\":[{\"node\": V.index(node[0]) }, {\"node\": V.index(node[1])}, {\"node\": V.index(node[2])}] , \"label\": most_common([int(node[0].split(\",\")[3]), int(node[1].split(\",\")[3]), int(node[2].split(\",\")[3])] ) } for node in F]\n\n\n \n viz = {\"max_weight\": max_weight, \"labels_size\": len(LABELS), \"nodes\":nodes, \"links\": links, \"paths\": paths}\n\n viz_json = json.dumps(viz)\n\n file = open(\"kNerve.json\", 'w')\n file.write(viz_json)\n file.close()\n print(\"DONE!!\")\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n\n","sub_path":"k_nerve.py","file_name":"k_nerve.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"416409566","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 25 22:06:02 2017\n\n@author: misWin\n\nThis is a template of python files for simulation algorithem on Rivanna\n\"\"\"\n\nfrom milp4_1 import *\nseed = 0\nnp.random.seed(seed)\nkwargs = {'miphint':True, 'mipfocus':1, 'mipgap':0.001, 'presolve':2, 'logfile':'log_qpsk.log'}\n\nbatch_id = 0\nnetwork_cost = pd.read_csv('nsf-24nodes.csv', header=None, index_col=None)\nnetwork_cost = network_cost.as_matrix()\nsn = Network(network_cost, modulation='qpsk')\ndemands_file = 'demands_template_'+str(batch_id)+'.csv'\ndemands = pd.read_csv(demands_file)\n\niteration_history_tr, iteration_history_gn = sn.iterate(demands, **kwargs)\n\niteration_history = (iteration_history_tr, iteration_history_gn)\noutput_file = 'output-GN-vs-TR-qpsk-nsf24'+str(batch_id)+'.pkl'\nsave_data(output_file, (sn, iteration_history))","sub_path":"total_bounded/python_template_qpsk.py","file_name":"python_template_qpsk.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"299681870","text":"import re\nimport telegram\nfrom telegram import Telegram\nimport bot_handler\nfrom tunnel import Tunnel\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass Error(Exception):\n pass\n\nclass FileServeBot():\n def __init__(self, auth, offset=None, sources=(), admins=()):\n \"\"\"Sets up a bot that provides access to a list of files\n\n Args:\n auth (str): Token recieved from @BotFather\n offset (Optional[int]): Initial offset to be used\n If not provided offset is found with Telegram.findOffset\n sources (Optional[List]): A list of telegram user ids who can\n provide new files.\n admins (Optional[List]): A list of telegram user ids who can\n use administrative commands such as stopping the bot.\n \"\"\"\n self.auth = auth\n self.bot = Telegram(self.auth)\n bot = self.bot\n self.sources = sources\n self.admins = admins\n self.offset = offset\n \n self.config = {'sources':tuple(sources), 'admins':tuple(admins)}\n self.handlers = [Tunnel(bot, self.config)]\n for handler in self.handlers:\n handler.postInit()\n\n def start(self):\n \"\"\"pls\"\"\"\n bot = self.bot\n \n try:\n logger.debug(bot.getMe())\n except telegram.Error:\n logger.exception(\"getMe() failed\")\n \n if self.offset == None:\n try:\n bot.findOffset()#offset now set to most recent message's id+1\n except telegram.Error:\n logger.critical(\"Could not get offset.\")\n raise\n else:\n bot.offset = self.offset\n logger.info(\"Initial offset set to {0}\".format(bot.offset))\n logger.debug(\"armed\")\n\n done = False\n failed = False\n while not done:\n if failed:\n time.sleep(4)\n failed = False\n try:\n recv = bot.getUpdates()\n except telegram.Error:\n logger.exception(\"getUpdates() failed\")\n failed=True\n continue\n \n try:\n result = recv['result']\n except KeyError:\n logger.error(\"No result key in response.\")\n continue\n \n for update in result:\n #offset = update['update_id']+1\n for handler in self.handlers:\n try:\n handler.update(update)\n except telegram.Error:\n logger.exception(\"Telegram error from handler \" + str(handler.__repr__()))\n\n logger.debug(\"\\n\")\n logger.debug(update)\n try:\n from_id = update['message']['from']['id']\n chat_id = update['message']['chat']['id']\n text = update['message']['text']\n except KeyError:\n pass\n else:\n logger.info(\"{0}: {1}\".format(str(from_id), text))\n if text == \"/q\" and from_id in self.admins:\n done = True\n\ndef main(offset=None):\n \"\"\"\n config\n auth token for bot\n comma separated list of ids for admins\n comma separated list of ids for sources\n use empty lines if not providing sources or admins\n \"\"\"\n with open(\"config.txt\", 'r') as f:\n auth = f.readline().strip()\n \n admins = (n.strip() for n in f.readline().split(\",\"))\n admins = [int(n) for n in admins if n]\n \n sources = (n.strip() for n in f.readline().split(\",\"))\n sources = [int(n) for n in sources if n]\n \n bot = FileServeBot(auth, offset=offset, admins=admins, sources=sources)\n bot.start()\n\nif __name__ == \"__main__\":\n #FORMAT = \"%(message)s\"\n #FORMAT = \"%(asctime)s %(message)s\"\n #FORMAT = \"%(asctime)s %(levelname)s %(message)s\"\n FORMAT = \"%(asctime)s %(filename)s %(lineno)s %(funcName)s %(levelname)s %(message)s\"\n logging.basicConfig(format=FORMAT, level=\"DEBUG\")\n # import argparse\n import time\n\n debug = True\n if debug:\n main()\n else:\n while True:\n try:\n main()\n except KeyboardInterrupt:\n break\n except:#pylint: disable=all\n logger.exception(\"Error in main\")\n try:\n time.sleep(5)\n except:\n logger.exception(\"Error while waiting for restart.\")\n","sub_path":"digbot/tunbot.py","file_name":"tunbot.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"666485","text":"#Edited some parts of the code to work in a linux terminal instead of a windows CMD command window.\n#And used \"print\" because that's what python uses to send messages to the terminal/windows CMD.\n#Also removed the \"[title]\" stuff as this is for the linux shell and not CMD for windows so it's not necessary.\n\nfrom colors import green, red, reset\nimport threading, requests, random, string, sys, os; from time import sleep\n\nprint('[Lightshot Brute Forcer] - Scraping Screenshots \\n Loading...')\nif not os.path.exists('Images'): os.mkdir('Images')\n #mkdir is the linux equivalent to make a folder/make directory.\n\nvalid = 0\ninvalid = 0\nretries = 0\nproxies = []\nproxy_num = 0\nlock = threading.Lock()\n\nheaders = {\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'\n}\n\ndef grab_proxies():\n while True:\n all_proxies = requests.get('https://api.proxyscrape.com/?request=getproxies&proxytype=http&timeout=5000&country=all&ssl=all&anonymity=all').text\n for proxy in all_proxies.splitlines():\n proxies.append(proxy)\n\n sleep(600)\n proxies.clear()\n\ndef cpm(): #not sure if this is needed now as I removed the \"[title]\" stuff/lines of code for windows CMD..But I'll leave it just incase I want to do something with it at a later time.\"\n old = valid + invalid\n sleep(1)\n new = valid + invalid\n return ((new - old) * 60)\n\ndef save(arg):\n content = requests.get(arg).content\n if 'image.prntscr' in arg: half_url = 'https://image.prntscr.com/image/'\n elif 'i.imgur' in arg: half_url = 'https://i.imgur.com/'\n with open('Images/' + arg.replace(half_url, '')[:6] + '.png', 'wb') as f: f.write(content)\n\ndef main(proxy):\n global valid\n global invalid\n global retries\n\n code = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6))\n try:\n check = requests.get('https://prnt.sc/%s' % (code), headers = headers, proxies = {'https': 'http://%s' % (proxy)}).text\n except:\n retries += 1\n else:\n if 'name=\"twitter:image:src\" content=\"' in check and not '0_173a7b_211be8ff' in check and not 'ml3U3Pt' in check:\n lock.acquire(); sys.stdout.write('[%sVALID%s] https://prnt.sc/%s\\n' % (green(), reset(), code)); lock.release()\n valid += 1\n url = check.split('name=\"twitter:image:src\" content=\"')[1].split('\"/> = len(proxies):\n proxy_num = 0\n except:\n pass\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106035542","text":"from setuptools import setup, find_packages\nPACKAGE = \"ldap_playbook\"\nNAME = \"ldap_playbook\"\nDESCRIPTION = \"\"\nAUTHOR = \"jialiang.ni\"\nAUTHOR_EMAIL = \"jialiang.ni\"\nURL = \"\"\nVERSION = \"0.0.1\"\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=\"BSD\",\n url=URL,\n packages=find_packages(),\n zip_safe=False,\n)\n","sub_path":"pypi_install_script/ldap_playbook-0.0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"276391592","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nimport itertools\n\ndef gen_s1_basis(N):\n basis = -np.ones(N)\n for n_neg in range(0,N+1):\n for n0 in range(0,N+1):\n n_pos = N - n_neg - n0\n if n_pos >= 0:\n neg_part = -np.ones(n_neg)\n zero_part = np.zeros(n0)\n pos_part = np.ones(n_pos)\n\n state = np.append(np.append(neg_part,zero_part),pos_part)\n\n perms = np.array((list(itertools.permutations(state))))\n basis = np.vstack((basis,perms))\n\n basis = np.vstack({tuple(row) for row in basis})\n return basis\n\ndef find_array_index(array,state):\n index=0\n check = 0\n for n in range(0,np.size(array,axis=0)):\n if np.array_equal(array[n,:],state):\n check = 1\n break\n index += 1\n\n if check == 1:\n return index\n else:\n print(\"Error\")\n\ndef si_plus(state,S,i):\n diff = np.zeros(np.size(state))\n diff[i] = 1\n return state + diff\n\ndef si_minus(state,S,i):\n diff = np.zeros(np.size(state))\n diff[i] = 1\n return state - diff\n\ndef pairwise_sum(state):\n temp = 0\n for i in range(0,np.size(state)-1,1):\n temp = temp + state[i]*state[i+1]\n return temp\n\ndef pairwise_square_sum(state):\n temp = 0\n for i in range(0,np.size(state)-1,1):\n temp = temp + np.power(state[i],2)*np.power(state[i+1],2)\n return temp\n \ndef gen_aklt_H(N,B):\n basis = gen_s1_basis(N)\n #gen H. Loop through i basis vectors operating with parts of H\n #H = s_n dot s_n+1 + B (s_n dot s_n+1)**2\n #s_n dot s_n+1 = 1/2*(s+_n * s-_n+1 + s+_n+1 * s-_n+1) + sz_n*sz_n+1\n\n #generate heisenberg H first s_n dot s_n+1\n H_heis=np.zeros((np.size(basis,axis=0),np.size(basis,axis=0)))\n H_extra=np.zeros((np.size(basis,axis=0),np.size(basis,axis=0)))\n for i in range(0,np.size(basis,axis=0)):\n #sz_n * sz_n+1\n H_heis[i,i] = pairwise_sum(basis[i])\n #B/2 from squared ladder operators\n H_extra[i,i] = B * pairwise_square_sum(basis[i]) + 2*B\n for n in range(0,N-1):\n #1/2(s+_n+1 * s-_n)\n temp = si_plus(si_minus(basis[i],1,n),1,n+1)\n if np.max(temp) <= 1:\n if np.min(temp) >= -1:\n j = find_array_index(basis,temp)\n H_heis[i,j] = 1\n\n #1/2(s+_n * s-_n+1)\n temp = si_plus(si_minus(basis[i],1,n+1),1,n)\n if np.max(temp) <= 1:\n if np.min(temp) >= -1:\n j = find_array_index(basis,temp)\n H_heis[i,j] = 1\n\n\n # generate extra aklt hamiltonian\n #squared ladder ops (1)\n temp = si_minus(basis[i],1,n+1)\n temp = si_plus(temp,1,n)\n temp = si_minus(temp,1,n+1)\n temp = si_plus(temp,1,n)\n if np.max(temp) <= 1:\n if np.min(temp) >= -1:\n j = find_array_index(basis,temp)\n H_extra[i,j] = B\n\n #squared ladder ops (2)\n temp = si_minus(basis[i],1,n)\n temp = si_plus(temp,1,n+1)\n temp = si_minus(temp,1,n)\n temp = si_plus(temp,1,n+1)\n if np.max(temp) <= 1:\n if np.min(temp) >= -1:\n j = find_array_index(basis,temp)\n H_extra[i,j] = B\n\n #spin z and ladder cross term (1)\n temp = si_minus(basis[i],1,n)\n temp = si_plus(temp,1,n+1)\n sz_eig = temp[n]*temp[n+1]\n if np.max(temp) <= 1:\n if np.min(temp) >= -1:\n j = find_array_index(basis,temp)\n H_extra[i,j] = 2*B*sz_eig\n\n #spin z and ladder cross term (2)\n temp = si_minus(basis[i],1,n+1)\n temp = si_plus(temp,1,n)\n sz_eig = temp[n]*temp[n+1]\n if np.max(temp) <= 1:\n if np.min(temp) >= -1:\n j = find_array_index(basis,temp)\n H_extra[i,j] = 2*B*sz_eig\n\n return H_heis + H_extra\n\ndef total_z_spin(state):\n N = int(math.log(np.size(state),3))\n basis=gen_s1_basis(N)\n total_z_spin = 0\n for n in range(0,np.size(state,axis=0)):\n total_z_spin = total_z_spin + np.abs(state[n])**2 * np.sum(basis[n])\n return total_z_spin\n\n\ndef entropy_half_chain_split(N,h):\n H=gen_H(N,B)\n eig,eig_states = np.linalg.eig(H)\n ground_state = eig_states[:,np.argmin(eig)]\n\n basis=gen_basis(N)\n\n #generate coefficient matrix\n N_A = int(np.floor(N/2))\n N_B = int(N-np.floor(N/2))\n M=np.zeros((np.power(2,N_A),np.power(2,N_B)))\n\n #split original basis into product state AB\n #coefficients of ground state form matrix M_ij*state^A_i*state^V_j\n for n in range(0,np.size(ground_state,axis=0)):\n i=bin_to_int(basis[n,:N_A])\n j=bin_to_int(basis[n,N_A:])\n M[i,j] = ground_state[n]\n \n #schmidt decomposition\n U,S,V = np.linalg.svd(M)\n\n #von-neuman entropy\n entropy = 0\n for n in range(0,np.size(S,axis=0)):\n entropy = entropy - np.abs(S[n])**2*np.log(np.abs(S[n]**2))\n return entropy\n","sub_path":"aklt/aklt_functions.py","file_name":"aklt_functions.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"339855523","text":"# encoding: utf-8\n\nimport jinja2\nimport webapp2\nfrom webapp2_extras import sessions\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader('www/templates/'),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\nclass BaseHandler(webapp2.RequestHandler):\n \"\"\"\n From http://webapp2.readthedocs.io/en/latest/api/webapp2_extras/sessions.html\n\n dispatch() and session() enable session management.\n\n \"\"\"\n session_store = None\n\n def dispatch(self):\n \"\"\"\n Get a session store for this request.\n\n \"\"\"\n self.session_store = sessions.get_store(request=self.request)\n\n try:\n # Dispatch the request.\n webapp2.RequestHandler.dispatch(self)\n finally:\n # Save all sessions.\n self.session_store.save_sessions(self.response)\n\n @webapp2.cached_property\n def session(self):\n \"\"\"\n Returns a session using the default cookie key.\n\n \"\"\"\n return self.session_store.get_session()\n\n\nclass PageHandler(BaseHandler):\n \"\"\"\n Class for jinja2 template rendering handlers\n\n \"\"\"\n def render(self, template_name, values):\n template = JINJA_ENVIRONMENT.get_template(template_name)\n self.response.write(template.render(values))\n\n\nclass IndexPageHandler(PageHandler):\n def get(self):\n self.render('index.html', {})\n\n\nclass RootPageHandler(PageHandler):\n def get(self):\n self.redirect('index')\n","sub_path":"src/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"395690475","text":"import numpy as np \nimport os\nimport argparse \nimport tqdm\nimport pandas as pd\nimport SimpleITK as sitk \nfrom medpy import metric\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--file_path', type=str, default='./results/abus_roi/0108_dice_1/')\n\n args = parser.parse_args()\n # save csv file to the current folder\n if args.file_path[-1] == '/':\n args.save = args.file_path[:-1] + '.csv'\n else:\n args.save = args.file_path + '.csv'\n\n return args\n\ndef main():\n args = get_args()\n\n dsc_list = []\n jc_list = []\n hd_list = []\n hd95_list = []\n asd_list = []\n filenames = os.listdir(args.file_path)\n for filename in tqdm.tqdm(filenames):\n gt_img = sitk.ReadImage(os.path.join(args.file_path, filename+'/gt.nii.gz'))\n gt_volume = sitk.GetArrayFromImage(gt_img)\n\n pre_img = sitk.ReadImage(os.path.join(args.file_path, filename+'/pred.nii.gz'))\n pre_volume = sitk.GetArrayFromImage(pre_img)\n\n dsc = metric.binary.dc(pre_volume, gt_volume)\n jc = metric.binary.jc(pre_volume, gt_volume)\n hd = metric.binary.hd(pre_volume, gt_volume, voxelspacing=(0.4, 0.4, 0.4))\n hd95 = metric.binary.hd95(pre_volume, gt_volume, voxelspacing=(0.4, 0.4, 0.4))\n asd = metric.binary.asd(pre_volume, gt_volume, voxelspacing=(0.4, 0.4, 0.4))\n\n dsc_list.append(dsc)\n jc_list.append(jc)\n hd_list.append(hd)\n hd95_list.append(hd95)\n asd_list.append(asd)\n\n df = pd.DataFrame()\n df['name'] = filenames\n df['dsc'] = np.array(dsc_list)\n df['jc'] = np.array(jc_list) \n df['hd'] = np.array(hd_list) \n df['hd95'] = np.array(hd95_list) \n df['asd'] = np.array(asd_list) \n print(df.describe())\n df.to_csv(args.save)\n\nif __name__ == '__main__':\n main()\n","sub_path":"code/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78647120","text":"import os \nimport argparse\n\nparser = argparse.ArgumentParser(\"Inference\")\nparser.add_argument('--gpu', default=0, type=int, help='use gpu with cuda number')\nparser.add_argument('--model_path', type=str)\nargs = parser.parse_args()\n\nmodel_path = args.model_path # \"/data_hdd/hoseong/videoexp/cvpr_rgb_r2plus1d_64f_34_bert10_split0_mixtype_None_optimizer_AdamW_randaug_1_3/\"\n\ncore_cmd = \"python two_stream_bert2_inference.py --split=00 --arch=rgb_r2plus1d_64f_34_bert10 --workers=4 --batch-size=2 --dataset=cvpr --gpu {} --tta 1 \\\n--model-path={}\".format(args.gpu, model_path)\n\n\nckpt_list = os.listdir(model_path)\nckpt_list = [file for file in ckpt_list if file.endswith(\".tar\")]\nprint(ckpt_list)\n\nfor ckpt in ckpt_list:\n os.system(core_cmd + ckpt)\n","sub_path":"inferencer.py","file_name":"inferencer.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"404903799","text":"import logging\n\nimport copy\nimport telnetlib\nimport functools\nimport requests\nimport warnings\n\nfrom objectifier.objectifier import Objectifier\n\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.http import urlencode\nfrom rest_framework.exceptions import APIException\n\nfrom cisco_cam.access_management.models import AccessUser\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_available_server(view_func):\n \"\"\"\n Test if VSM stazione is available\n if not, check witch VSM is active\n and store it address to compare with addresses\n returned by get_streams / get_ptz_status e.t.c.\n if no station is active - raise error\n \"\"\"\n @functools.wraps(view_func)\n def wrapped(api_wrapper, *args, **kwargs):\n if 'camera' in kwargs:\n # Camera object\n api_wrapper._camera_data = kwargs['camera']\n # get servers for Location of that camera\n api_wrapper._get_servers()\n else:\n location = kwargs['location']\n api_wrapper._get_servers(location_uid=location)\n\n server_stazione_ip = None\n active_servers = {}\n active_servers_ips = set()\n servers = api_wrapper._servers.data.items\n\n if not servers:\n msg = \"No servers for that location.\"\n logger.exception(msg)\n raise APIException(msg)\n\n for server in servers:\n if server.uid == api_wrapper.access_user.vsom_uid:\n continue\n\n if server.deviceState.aggregateState not in ['ok', 'warning']:\n continue\n\n server_ip = server.deviceAccess.hostname_ip\n\n active_servers[server.uid] = {\n \"server_uid\": server.uid,\n \"server_name\": server.name,\n \"server_ip\": server_ip,\n }\n\n # TODO Any other method to recognize STAZIONE?\n if server.name == \"MS-STAZIONE\" and server_ip:\n server_stazione_ip = server_ip\n elif server_ip:\n active_servers_ips.add(\n server_ip\n )\n\n if not active_servers:\n msg = \"All servers are not available. Check servers and retry.\"\n # print(msg)\n logger.exception(msg)\n raise APIException(msg)\n\n if not server_stazione_ip and not active_servers_ips:\n msg = \"No information about IP of active servers.\"\n # print(msg)\n logger.exception(msg)\n raise APIException(msg)\n\n active_servers_ips = sorted(list(active_servers_ips))\n\n if not server_stazione_ip:\n # if Stazione server not found, will use another\n api_wrapper.active_server_ip = active_servers_ips[0]\n if len(active_servers_ips) > 1:\n api_wrapper._additional_server_ips.update(active_servers_ips[1:])\n else:\n # Main VSM is active\n api_wrapper.active_server_ip = server_stazione_ip\n active_servers_ips and api_wrapper._additional_server_ips.update(\n active_servers_ips\n )\n\n api_wrapper._vsm_servers_dict = active_servers\n\n response = view_func(api_wrapper, *args, **kwargs)\n\n # perhaps we have already received additional api_wrapper._additional_server_ips now\n # now we can check accessibility of the servers\n if api_wrapper._check_if_ip_is_reachable(api_wrapper.active_server_ip):\n return response\n # if it doesn't accessible, look if we got another ip's to check\n for additional_ip in api_wrapper._additional_server_ips:\n if api_wrapper._check_if_ip_is_reachable(additional_ip):\n api_wrapper.active_server_ip = additional_ip\n return response\n\n raise APIException('There is no reachable Server IPs.')\n\n return wrapped\n\n\nclass CiscoApiWrapper(object):\n \"\"\"\n Wrapper over Cisco API\n Can get information about Streams, Recordings, operate PTZ actions, e.t.c.\n \"\"\"\n def __init__(self, username):\n self.access_user = get_object_or_404(AccessUser, username=username)\n if not self.access_user.session:\n self.access_user.login()\n self._camera_data = self._servers = self._vsm_servers_dict = None\n self._additional_server_ips = set()\n self.active_server_ip = self.api_version = None\n\n def _make_request(self, url, data, method='POST', as_object=True, is_full_url=False, expect_xml=False):\n \"\"\"\n making request to the Cisco API\n :param url: url\n :param data: dict to be passed with the request\n :param method: method of the request\n :param as_object: return response as the object, otherwise the dict will be returned\n :param is_full_url: means that the url is already full,\n and we don't need to add server_root / json_root\n :param expect_xml: the response is expected to be an XML\n \"\"\"\n headers = copy.deepcopy(self.access_user.settings['DEFAULT_HEADERS'])\n headers['Cookie'] = 'x-ism-sid={}'.format(self.access_user.session)\n if not is_full_url:\n url = '{}{}{}'.format(\n self.access_user.server_root,\n self.access_user.settings['CISCO_JSON_ROOT'],\n url\n )\n # TODO remove warning ignore\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n resp = requests.request(method, url, json=data, headers=headers, verify=False)\n if resp.status_code != requests.codes.ok:\n # failed on network\n resp.raise_for_status()\n\n if expect_xml:\n # do_ptz returns XML output\n # '' \\\n # '' \\\n # '0' \\\n # 'Camera control request succeeded' \\\n # '2' \\\n # 'Updated' \\\n # ''\n\n response_object = Objectifier(resp.text)\n\n ptz_statuses = self.access_user.settings[\"PTZ_STATUS_CODE\"]\n valid_statuses = self.access_user.settings[\"PTZ_OK_STATUSES\"]\n\n try:\n if response_object.ResponseStatus.statusCode not in ptz_statuses \\\n or ptz_statuses[response_object.ResponseStatus.statusCode] not in valid_statuses:\n exception_details = 'request {} failed on server\\t' \\\n 'request data {}\\t' \\\n 'method {}\\t output {}\\t'.format(url, data, method, resp.text)\n raise APIException(exception_details)\n except AttributeError:\n exception_details = 'request {} response not meeting our expectations \\t' \\\n 'request data {}\\t' \\\n 'method {}\\t output {}\\t'.format(url, data, method, resp.text)\n raise APIException(exception_details)\n\n return response_object\n\n response_json = resp.json()\n\n response_object = Objectifier(response_json)\n\n if response_object.status.errorType != 'SUCCESS':\n exception_details = \\\n 'request {} failed on server\\t ' \\\n 'request data {}\\t' \\\n 'method {}\\t' \\\n 'errorType: {} \\t' \\\n 'errorReasonCode: {} \\t' \\\n 'errorMsg: {} \\t'.format(\n url,\n data,\n method,\n response_object.status.errorType,\n response_object.status.errorReasonCode,\n response_object.status.errorMsg\n )\n raise APIException(exception_details)\n\n if as_object:\n return response_object\n return response_json\n\n def _get_servers(self, location_uid=None):\n \"\"\"\n get server list for camera Location\n \"\"\"\n request_data = copy.deepcopy(self.access_user.settings['GET_SERVERS']['req'])\n location_uid = location_uid or self._camera_data.location_uid\n\n request_data['filter']['byLocationUids'] = [location_uid]\n request_data['filter'][\"byVsomUid\"] = self.access_user.vsom_uid\n\n self._servers = self._make_request(\n url=self.access_user.settings['GET_SERVERS']['url'],\n data=request_data\n )\n\n @staticmethod\n def _check_if_ip_is_reachable(ip, port=80, timeout=3):\n \"\"\"\n check if passed IP is reachable\n \"\"\"\n try:\n telnetlib.Telnet(ip, port=port, timeout=timeout)\n except Exception as e:\n # TODO log e\n return False\n return True\n\n def _get_camera_ref(self, camera=None):\n \"\"\"\n create camera part of request\n \"\"\"\n if not camera:\n assert self._camera_data, (\n 'Cannot get camera_ref before getting Camera instance'\n )\n camera = self._camera_data\n\n camera_ref = dict()\n camera_ref[\"refUid\"] = camera.uid\n camera_ref[\"refName\"] = camera.name\n camera_ref[\"refObjectType\"] = \"device_vs_camera_ip\"\n camera_ref[\"refVsomUid\"] = self.access_user.vsom_uid\n\n return camera_ref\n\n def _get_security_token(self):\n \"\"\"\n get new security token before every ptz call\n \"\"\"\n camera_ref = self._get_camera_ref()\n\n request_data = copy.deepcopy(self.access_user.settings['GET_SECURITY_TOKEN']['req'])\n request_data['cameraRef'] = camera_ref\n\n return self._make_request(\n url=self.access_user.settings['GET_SECURITY_TOKEN']['url'],\n data=request_data\n )\n\n def get_locations(self):\n \"\"\"\n get all locations\n \"\"\"\n request_data = self.access_user.settings['GET_LOCATION_TREE']['req']\n response = self._make_request(\n url=self.access_user.settings['GET_LOCATION_TREE']['url'],\n data=request_data\n )\n return response.data.childGroups\n\n @check_available_server\n def get_streams(self, camera):\n \"\"\"\n getting all streams based on camera uid\n then adding some additional server IP addresses to check them later\n \"\"\"\n camera_ref = self._get_camera_ref()\n\n request_data = copy.deepcopy(self.access_user.settings['GET_STREAMS']['req'])\n request_data['cameraStreamingDetailsRequest']['cameraRefs'].append(camera_ref)\n\n response = self._make_request(\n url=self.access_user.settings['GET_STREAMS']['url'],\n data=request_data\n )\n\n # get some additional server IP's in case of active_server_ip will not be reachable\n for server_info in response.data.serverInfos:\n for ip_address in server_info.ipAddresses:\n ip_address and self._additional_server_ips.add(ip_address)\n\n return response\n\n @check_available_server\n def do_ptz(self, camera, pan, tilt, zoom):\n \"\"\"\n make PTZ action\n \"\"\"\n security_token = self._get_security_token()\n\n host = 'https://' + self.active_server_ip # cisco_server_root\n path = self.access_user.settings['DO_PTZ']['url'].format(self._camera_data.alternate_id)\n url = host + path\n url += urlencode(\n [\n ('pan', pan),\n ('tilt', tilt),\n ('zoom', zoom),\n ('priority', 1),\n ('token', security_token.data)\n ]\n )\n\n return self._make_request(\n url=url,\n data={},\n method='GET',\n is_full_url=True,\n expect_xml=True\n )\n\n def get_cameras_by_site(self, site_uid):\n request_data = copy.deepcopy(self.access_user.settings['GET_CAMERAS']['req'])\n request_data['filter'][\"byLocationUids\"] = [site_uid]\n request_data['filter'][\"byVsomUid\"] = self.access_user.vsom_uid\n response = self._make_request(\n url=self.access_user.settings['GET_CAMERAS']['url'],\n data=request_data\n )\n return response.data.items\n\n @check_available_server\n def get_streams_mass(self, cams, location):\n \"\"\"\n Get Streams for list of cameras\n \"\"\"\n request_data = copy.deepcopy(self.access_user.settings['GET_STREAMS']['req'])\n for cam in cams:\n camera_ref = self._get_camera_ref(cam)\n request_data['cameraStreamingDetailsRequest']['cameraRefs'].append(camera_ref)\n\n response = self._make_request(\n url=self.access_user.settings['GET_STREAMS']['url'],\n data=request_data,\n )\n\n # get some additional server IP's in case of active_server_ip will not be reachable\n for server_info in response.data.serverInfos:\n for ip_address in server_info.ipAddresses:\n ip_address and self._additional_server_ips.add(ip_address)\n\n return response\n\n def get_recording_catalog(self, camera_alternate_id):\n \"\"\"\n Get cams history\n \"\"\"\n request_data = copy.deepcopy(self.access_user.settings['GET_RECORDING_CATALOG_ENTRIES']['req'])\n request_data['filter']['byCameraAlternateId'] = camera_alternate_id\n\n response = self._make_request(\n url=self.access_user.settings['GET_RECORDING_CATALOG_ENTRIES']['url'],\n data=request_data\n )\n\n return response\n\n def get_all_cameras(self):\n \"\"\"\n getLocationTree\n for each location\n for each site\n get all cameras\n \"\"\"\n locations = self.get_locations()\n\n cameras = set()\n for loc in locations:\n if not loc.hasChildGroups:\n continue\n for site in loc.childGroups:\n # get cameras for site\n for camera in self.get_cameras_by_site(site.uid):\n setattr(camera, 'location', loc.uid)\n setattr(camera, 'site_uid', site.uid)\n cameras.add(camera)\n\n return cameras\n\n def get_all_sites(self):\n \"\"\"\n get all sites from getLocationTree\n \"\"\"\n locations = self.get_locations()\n\n sites = set()\n for loc in locations:\n if not loc.hasChildGroups:\n continue\n sites.update(loc.childGroups)\n return sites\n\n def _get_first_last_recording_catalog_entry(self, record_uid):\n request_data = copy.deepcopy(self.access_user.settings['GET_FIRST_LAST_RECORDING_CATALOG_ENTRY']['req'])\n request_data[\"cameraRef\"] = self._get_camera_ref()\n request_data[\"recordingCatalogEntryId\"] = record_uid\n response = self._make_request(\n url=self.access_user.settings['GET_FIRST_LAST_RECORDING_CATALOG_ENTRY']['url'],\n data=request_data\n )\n return response\n\n @check_available_server\n def get_thumbnails(self, camera):\n \"\"\"\n 1. list of recordings - self.get_recording_catalog\n 2. uid of recording goes to self._get_first_last_recording_catalog_entry\n 3. uid and start_frame?? / [\"data\"][\"lastFrame\"] goes to get_thumbnails with start_frame/last_frame times\n \"\"\"\n result = self.get_recording_catalog(camera.alternate_id)\n camera_ref = self._get_camera_ref()\n\n # uids = set()\n response_list = []\n for recording in result.data.items:\n # uids.add(recording.uid)\n first_last = self._get_first_last_recording_catalog_entry(record_uid=recording.uid)\n # ???\n another_uid = first_last.data.uid\n # ???\n first_frame = first_last.data.firstFrame\n last_frame = first_last.data.lastFrame\n\n request_data = copy.deepcopy(self.access_user.settings['GET_THUMBNAILS']['req'])\n request_data[\"request\"][\"cameraRef\"] = camera_ref\n request_data[\"request\"][\"recordingCatalogEntryUid\"] = another_uid\n request_data[\"request\"][\"startTimeInMSec\"] = first_frame\n request_data[\"request\"][\"endTimeInMSec\"] = last_frame\n\n response = self._make_request(\n url=self.access_user.settings['GET_THUMBNAILS']['url'],\n data=request_data\n )\n response_list.append(response)\n\n return response_list\n\n\n @check_available_server\n def get_thumbnails_uri(self, camera):\n # GET_THUMBNAILS_URI\n camera_ref = self._get_camera_ref()\n\n request_data = copy.deepcopy(self.access_user.settings['GET_THUMBNAILS_URI']['req'])\n request_data[\"request\"][\"cameraRef\"] = camera_ref\n # request_data[\"request\"][\"recordingCatalogEntryUid\"] = another_uid\n\n response = self._make_request(\n url=self.access_user.settings['GET_THUMBNAILS_URI']['url'],\n data=request_data\n )\n\n return response\n","sub_path":"cisco_cam/utils/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":17099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"187662950","text":"\"\"\"Setup script for the Cobe Python library.\"\"\"\n\nimport pathlib\n\nimport setuptools\n\n\nPROJECT = pathlib.Path(__file__).resolve().parent\n\n\nsetuptools.setup(\n name='python-cobe',\n version='0.2.2',\n packages=['cobe'],\n author='Abilisoft Ltd.',\n author_email='info@cobe.io',\n url='https://cobe.io/',\n description=('A library to enable streaming of custom '\n 'entities to Cobe.io from within any Python applications'),\n long_description=(PROJECT / 'README.rst').open().read(),\n license='MIT',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n # 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: System :: Monitoring',\n ],\n install_requires=[\n 'pyzmq',\n 'msgpack-python',\n 'voluptuous',\n ],\n)\n","sub_path":"pypi_install_script/python-cobe-0.2.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411157588","text":"from UnionFind import UnionFind\nclass Solution(object):\n # Iterate through each of the cell and if it is an island,\n # do dfs to mark all adjacent islands, then increase the counter by 1.\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if not grid:\n return 0\n\n count = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n self.dfs(grid, i, j)\n count += 1\n return count\n\n def dfs(self, grid, i, j):\n if i < 0 or j < 0 or i >= len(grid) or j >= len(grid[0]) or grid[i][j] != '1':\n return\n grid[i][j] = '#'\n self.dfs(grid, i + 1, j)\n self.dfs(grid, i - 1, j)\n self.dfs(grid, i, j + 1)\n self.dfs(grid, i, j - 1)\n\n def numIslands2(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n if not grid:\n return 0\n\n uf = UnionFind(grid)\n\n directions = [(0, 1), (0, -1), (-1, 0), (1, 0)]\n m, n = len(grid), len(grid[0])\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n for d in directions:\n x, y = i + d[0], j + d[1]\n if x >= 0 and x < m and y >= 0 and y < n and grid[x][y] == '1':\n uf.union(i * n + j, x * n + y) # convert matrix index to list index and union them\n return uf.count\n\ngrid = ['11110',\n'11010',\n'11000',\n'00000']\ngrid2 = ['11000',\n'11000',\n'00100',\n'00011']\ngrid3 = ['1011011']\ngrid4 = [[\"1\",\"1\",\"1\",\"1\",\"0\"],[\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],[\"0\",\"0\",\"0\",\"0\",\"0\"]]\ngrid5 = [[\"0\",\"1\",\"0\"],[\"1\",\"0\",\"1\"],[\"0\",\"1\",\"0\"]]\ngrid6 = [[\"1\",\"1\",\"1\"],[\"0\",\"1\",\"0\"],[\"1\",\"1\",\"1\"]]\n\nprint(Solution().numIslands2(grid6))","sub_path":"200NumOfIsland.py","file_name":"200NumOfIsland.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"280110545","text":"# Copyright 2016 Nitor Creations Oy\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom builtins import object\nimport os\nfrom base64 import b64decode, b64encode\nimport boto3\nimport json\nfrom botocore.exceptions import ClientError\nfrom cryptography.hazmat.primitives.ciphers.aead import AESGCM\nfrom cryptography.hazmat.primitives.ciphers.algorithms import AES\nfrom cryptography.hazmat.primitives.ciphers.modes import CTR\nfrom cryptography.hazmat.primitives.ciphers import Cipher\nfrom cryptography.hazmat.backends import default_backend\n\ndef _to_bytes(data):\n encode_method = getattr(data, \"encode\", None)\n if callable(encode_method):\n return data.encode()\n return data\n\nclass Vault(object):\n _session = boto3.Session()\n _kms = \"\"\n _prefix = \"\"\n _vault_key = \"\"\n _vault_bucket = \"\"\n def __init__(self, vault_stack=\"\", vault_key=\"\", vault_bucket=\"\",\n vault_iam_id=\"\", vault_iam_secret=\"\", vault_prefix=\"\"):\n self._prefix = vault_prefix\n if self._prefix and not self._prefix.endswith(\"/\"):\n self._prefix = self._prefix + \"/\"\n # Either use given vault iam credentials or assume that the environent has\n # some usable credentials (either through env vars or instance profile)\n if vault_iam_id and vault_iam_secret:\n self._session = boto3.Session(aws_access_key_id=vault_iam_id,\n aws_secret_access_key=vault_iam_secret)\n # And set up a kms client since all operations require that\n self._kms = self._session.client('kms')\n # Either use given vault kms key and/or vault bucket or look them up from a\n # cloudformation stack\n if vault_key:\n self._vault_key = vault_key\n elif \"VAULT_KEY\" in os.environ:\n self._vault_key = os.environ[\"VAULT_KEY\"]\n if vault_bucket:\n self._vault_bucket = vault_bucket\n elif \"VAULT_BUCKET\" in os.environ:\n self._vault_bucket = os.environ[\"VAULT_BUCKET\"]\n # If not given in constructor or environment, resolve from CloudFormation\n if not (self._vault_key and self._vault_bucket):\n stack = vault_stack\n if not stack:\n if \"VAULT_STACK\" in os.environ:\n stack = os.environ[\"VAULT_STACK\"]\n else:\n stack = \"vault\"\n stack_info = self._get_cf_params(stack)\n if not self._vault_key:\n self._vault_key = stack_info['key_arn']\n if not self._vault_bucket:\n self._vault_bucket = stack_info['bucket_name']\n\n def _encrypt(self, data):\n ret = {}\n key_dict = self._kms.generate_data_key(KeyId=self._vault_key,\n KeySpec=\"AES_256\")\n data_key = key_dict['Plaintext']\n ret['datakey'] = key_dict['CiphertextBlob']\n aesgcm_cipher = AESGCM(data_key)\n nonce = os.urandom(12)\n meta = json.dumps({\"alg\": \"AESGCM\", \"nonce\": b64encode(nonce).decode()}, separators=(',',':'), sort_keys=True).encode()\n ret['aes-gcm-ciphertext'] = aesgcm_cipher.encrypt(nonce, _to_bytes(data), meta)\n cipher = _get_cipher(data_key)\n encryptor = cipher.encryptor()\n ret['ciphertext'] = encryptor.update(_to_bytes(data)) + encryptor.finalize()\n ret['meta'] = meta\n return ret\n\n def _decrypt(self, data_key, encrypted):\n decrypted_key = self.direct_decrypt(data_key)\n cipher = _get_cipher(decrypted_key)\n decryptor = cipher.decryptor()\n return decryptor.update(encrypted) + decryptor.finalize()\n\n def _aes_gcm_decrypt(self, nonce, data_key, encrypted):\n decrypted_key = self.direct_decrypt(data_key)\n cipher = AESGCM(decrypted_key)\n return cipher.decrypt(nonce, encrypted, None)\n\n def _get_cf_params(self, stack_name):\n clf = self._session.client('cloudformation')\n stack = clf.describe_stacks(StackName=stack_name)\n ret = {}\n for output in stack['Stacks'][0]['Outputs']:\n if output['OutputKey'] == 'vaultBucketName':\n ret['bucket_name'] = output['OutputValue']\n if output['OutputKey'] == 'kmsKeyArn':\n ret['key_arn'] = output['OutputValue']\n return ret\n\n def store(self, name, data):\n s3cl = self._session.client('s3')\n encrypted = self._encrypt(data)\n s3cl.put_object(Bucket=self._vault_bucket, Body=encrypted['datakey'],\n ACL='private', Key=self._prefix + name + '.key')\n s3cl.put_object(Bucket=self._vault_bucket, Body=encrypted['ciphertext'],\n ACL='private', Key=self._prefix + name + '.encrypted')\n s3cl.put_object(Bucket=self._vault_bucket, Body=encrypted['aes-gcm-ciphertext'],\n ACL='private', Key=self._prefix + name + '.aesgcm.encrypted')\n s3cl.put_object(Bucket=self._vault_bucket, Body=encrypted['meta'],\n ACL='private', Key=self._prefix + name + '.meta')\n return True\n\n def lookup(self, name):\n s3cl = self._session.client('s3')\n datakey = s3cl.get_object(Bucket=self._vault_bucket,\n Key=self._prefix + name + '.key')['Body'].read()\n try:\n meta = s3cl.get_object(Bucket=self._vault_bucket,\n Key=self._prefix + name + '.meta')['Body'].read()\n ciphertext = s3cl.get_object(Bucket=self._vault_bucket,\n Key=self._prefix + name + '.aesgcm.encrypted')['Body'].read()\n meta_add = meta\n if not isinstance(meta, bytes):\n meta_add = _to_bytes(meta)\n meta = json.loads(meta)\n return AESGCM(self.direct_decrypt(datakey)).decrypt(b64decode(meta['nonce']), ciphertext, meta_add)\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\" or e.response['Error']['Code'] == 'NoSuchKey':\n ciphertext = s3cl.get_object(Bucket=self._vault_bucket,\n Key=self._prefix + name + '.encrypted')['Body'].read()\n return self._decrypt(datakey, ciphertext)\n else:\n raise\n\n def recrypt(self, name):\n data = self.lookup(name)\n self.store(name, data)\n\n def exists(self, name):\n s3cl = self._session.client('s3')\n try:\n s3cl.head_object(Bucket=self._vault_bucket,\n Key=self._prefix + name + '.key')\n return True\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n return False\n else:\n raise\n\n def delete(self, name):\n s3cl = self._session.client('s3')\n s3cl.delete_object(Bucket=self._vault_bucket, Key=self._prefix + name + '.key')\n s3cl.delete_object(Bucket=self._vault_bucket, Key=self._prefix + name + '.encrypted')\n try:\n s3cl.delete_object(Bucket=self._vault_bucket, Key=self._prefix + name + '.aesgcm.encrypted')\n s3cl.delete_object(Bucket=self._vault_bucket, Key=self._prefix + name + '.meta')\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\" or e.response['Error']['Code'] == 'NoSuchKey':\n pass\n else:\n raise\n\n def all(self):\n ret = \"\"\n for item in self.list_all():\n ret = ret + item + os.linesep\n return ret\n\n def list_all(self):\n s3bucket = self._session.resource('s3').Bucket(self._vault_bucket)\n ret = []\n for next_object in s3bucket.objects.filter(Prefix=self._prefix):\n if next_object.key.endswith(\".aesgcm.encrypted\") and next_object.key[:-17] not in ret:\n ret.append(next_object.key[:-17])\n elif next_object.key.endswith(\".encrypted\") and next_object.key[:-10] not in ret:\n ret.append(next_object.key[:-10])\n return ret\n\n def get_key(self):\n return self._vault_key\n\n def get_bucket(self):\n return self._vault_bucket\n\n def direct_encrypt(self, data):\n return self._kms.encrypt(KeyId=self._vault_key, Plaintext=data)['CiphertextBlob']\n\n def direct_decrypt(self, encrypted_data):\n return self._kms.decrypt(CiphertextBlob=encrypted_data)['Plaintext']\n\nSTATIC_IV = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 & 0xFF, int(1337 / 256) & 0xFF, int(1337 % 256) & 0xFF])\ndef _get_cipher(key):\n backend = default_backend()\n return Cipher(AES(key), CTR(bytes(STATIC_IV)), backend=backend)\n","sub_path":"python/n_vault/vault.py","file_name":"vault.py","file_ext":"py","file_size_in_byte":9184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"579036765","text":"import django\nfrom django.conf import settings\nfrom django import forms\nfrom django.contrib.admin.templatetags.admin_static import static\n\n#monkey patch in support for bootstrap\n@property\ndef media(self):\n extra = '' if settings.DEBUG else '.min'\n js = [\n 'core.js',\n 'admin/RelatedObjectLookups.js',\n 'jquery%s.js' % extra,\n 'jquery.init.js',\n 'bootstrap%s.js' % extra\n ]\n if self.actions is not None:\n js.append('actions%s.js' % extra)\n if self.prepopulated_fields:\n js.extend(['urlify.js', 'prepopulate%s.js' % extra])\n return forms.Media(js=[static('admin/js/%s' % url) for url in js])\n\ndjango.contrib.admin.options.ModelAdmin.media = media\n\n","sub_path":"django_admin_bootstrapped/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"584487952","text":"import argparse\nimport pandas as pd\nimport re\nimport math\nfrom rating import data_rating\n\n\ndef interface_start(info, top):\n # info = pd.read_csv(\"combined.csv\") # will change the name to the real csv file\n\n # rating it the scores for ranking\n # info = info.sort_values(by=['rating']) # with change to the real column name\n\n while True:\n # # asking for expected price interval\n # bound_bad = True\n # while bound_bad:\n # try:\n # price_lb = int(input(\"please enter lower bound of expected price: \\n\"))\n # price_ub = int(input(\"please enter upper bound of expected price: \\n\"))\n # except:\n # print(\"bad input, try again!\")\n # else:\n # if 0 < price_lb < price_ub:\n # bound_bad = False\n\n input_bad = True\n valid_input = [\"y\", \"exit\"]\n view_choice = \"\"\n while input_bad:\n\n view_choice = input(\"Show the top \" + str(top) + \" properties we recommend? (y to continue, exit to quit)\\n\")\n if view_choice not in valid_input:\n print(\"bad input! enter again\")\n else:\n input_bad = False\n\n if view_choice == \"exit\":\n break\n elif view_choice == \"y\":\n\n house_item = \"{}. {} \\n\" \\\n \"Neighborhood: {}\\n\" \\\n \"Size: {} sqft\\n\" \\\n \"Number of bedrooms: {}\\n\" \\\n \"Price: {}$/month\"\n\n for i in range(1, top+1):\n num_bed = info.loc[i][\"number bedrooms\"]\n if math.isnan(num_bed):\n num_bed = \"Unknown\"\n else:\n num_bed = int(num_bed)\n print(\"\\n\", house_item.format(i, info.loc[i][\"post title\"], info.loc[i][\"neighborhood\"].strip()[1:-1],\n info.loc[i][\"sqft\"], num_bed, info.loc[i][\"price\"]), \"\\n\")\n\n elif view_choice == \"n\":\n # will call the fetch function\n break\n\n house_bad = True\n house_choice = \"\"\n while house_bad:\n\n house_choice = input(\"\\nSelect a property for more information : (type 'exit' to quit)\\n\")\n\n if house_choice == \"exit\":\n break\n else:\n try:\n house_choice = int(house_choice)\n if house_choice not in range(1, top + 1):\n print(\"bad input! enter again\")\n except Exception as e:\n print(\"Bad input! Enter again\")\n\n else:\n house_bad = False\n\n if house_choice == \"exit\":\n break\n\n else:\n t_str = \"Transport Information\"\n print(\"\\n\", t_str.center(50, \"-\"), \"\\n\")\n trans_item = \"Distance to nearest bus stop: {} meters\\n\" \\\n \"Distance to CMU: {} meters\\n\" \\\n \"Distance to downtown: {} meters\\n\" \\\n \"Distance to nearest shuttle stop: {} meters\"\n shuttle = info.loc[i]['nearest_shuttle_stop'].split(\",\")[-1]\n\n print(trans_item.format(info.loc[i]['bus_distance_CMU'], info.loc[i]['distance_to_CMU'],\n info.loc[i]['distance_to_downtown'], shuttle))\n\n r_str = \"Restaurant\"\n print(\"\\n\", r_str.center(50, \"-\"), \"\\n\")\n print(\"5 restaurants near by\")\n res_item = \"Restaurant name: {}\\n\" \\\n \"Rating: {}\"\n restaruant = info.loc[i]['nearest_restaurants']\n\n restaruant = eval(str(restaruant))\n for res in restaruant:\n\n print(res_item.format(res[0], res[2]))\n\n input_chk = True\n while input_chk:\n action = input(\"\\nPress 'b' to go back to properties listing or type 'exit' to quit\\n\")\n if action in [\"b\",\"exit\"]:\n input_chk = False\n else:\n print(\"bad input! enter again\")\n\n if action == \"b\":\n continue\n elif action == \"exit\":\n break\n\n\n\n # print(parser.parse_args())\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"253128278","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import permissions as perms\n\nfrom .models import Review\nfrom .serializers import ReviewSerializer\nfrom backend.utils.api import BlankGetAPIView\nfrom backend.utils.decorators import permissions\n\n\nclass AddReview(APIView):\n \"\"\"Добавление отзыва\"\"\"\n permission_classes = [perms.IsAuthenticated]\n\n def post(self, request):\n serializer = ReviewSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=request.user)\n return Response({\"review\": serializer.data}, status=200)\n return Response(serializer.errors, status=400)\n\n\nclass NotModeratedReviews(BlankGetAPIView):\n \"\"\"Вывод немодерированных отзывов\"\"\"\n permission_classes = [perms.IsAuthenticated]\n model = Review\n serializer = ReviewSerializer\n filter_params = {'moderated': False}\n\n\nclass ModeratedReviews(BlankGetAPIView):\n \"\"\"Вывод модерированных отзывов\"\"\"\n # permission_classes = [perms.IsAuthenticated]\n model = Review\n serializer = ReviewSerializer\n filter_params = {'moderated': True}\n\n @permissions(perms.IsAdminUser)\n def get(self, *args, **kwargs):\n return super().get(*args, **kwargs)\n\n\nclass AllReviews(BlankGetAPIView):\n \"\"\"Вывод всех о��зывов\"\"\"\n # permission_classes = [perms.IsAuthenticated]\n model = Review\n serializer = ReviewSerializer\n\n @permissions(perms.IsAdminUser)\n def get(self, *args, **kwargs):\n return super().get(*args, **kwargs)\n","sub_path":"backend/reviews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"636458184","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport sys, os\nimport re\nimport math\n\ndef extract_columns(data, xid, yid, stride):\n print('Extract target columns from given data')\n if isinstance(data, list):\n y = data.copy()\n x = np.arange(1, len(data)+1)\n elif data.ndim == 1:\n y = data.copy()\n x = np.arange(1, len(y)+1)\n elif data.ndim != 2:\n raise ValueError\n else:\n if (xid is None) and (yid is None) and (data.shape[-1]==2):\n return data[:,0][::stride], data[:,1][::stride]\n\n if yid is None:\n #print data.shape\n y = data[:, 0]\n else:\n y = data[:, yid]\n\n if xid is None:\n x = np.arange(1, len(y)+1)\n else:\n x = data[:, xid]\n\n return x[::stride], y[::stride]\n\ndef moving_average(y, average):\n ynew = []\n vprev = y[0]\n scale = average\n for i, yi in enumerate(y):\n vprev = scale*vprev + (1-scale)*yi\n ynew.append(vprev)\n return np.array(ynew)\n\ndef grep_data(logstr, pattern_str):\n \"\"\"\n grep data using regex\n \"\"\"\n pattern = re.compile(pattern_str)\n data = pattern.findall(logstr)\n return data\n\ndef plot_columns(data_file_list, xid=None, yid=None, stride=1, average=0, xlabel=None, ylabel=None, show_grid=False, ymin=None, ymax=None, title=None, show=''):\n \"\"\"\n Arguments are:\n - xid column index for x-axis, starting from 0\n - yid column index for y-axis, \n \"\"\"\n \n colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n style = '-'\n marker = '' #'o'\n ymin_, ymax_ = 0., 0.\n for i, data_file in enumerate(data_file_list):\n fid = open(data_file,'r') \n log_str = \"\".join([x.strip() for x in fid.readlines()])\n \n lr = grep_data(log_str,r'2020-09-\\d+ \\d+:\\d+:\\d+,\\d+ Chart INFO: Epoch: \\d+.\\d+, lr: \\d+.\\d+')\n train_loss = grep_data(log_str,r'2020-09-\\d+ \\d+:\\d+:\\d+,\\d+ Chart INFO: Epoch: \\d+.\\d+, lr: \\d+.\\d+, Train loss: \\d+.\\d+')\n val_loss = grep_data(log_str,r'2020-09-\\d+ \\d+:\\d+:\\d+,\\d+ Chart INFO: Epoch: \\d+.\\d+, lr: \\d+.\\d+, Train loss: \\d+.\\d+, Val loss: \\d+.\\d+')\n train_acc = grep_data(log_str,r'2020-09-\\d+ \\d+:\\d+:\\d+,\\d+ Chart INFO: Epoch: \\d+.\\d+, lr: \\d+.\\d+, Train loss: \\d+.\\d+, Val loss: \\d+.\\d+, Train acc: \\d+.\\d+')\n val_acc = grep_data(log_str,r'2020-09-\\d+ \\d+:\\d+:\\d+,\\d+ Chart INFO: Epoch: \\d+.\\d+, lr: \\d+.\\d+, Train loss: \\d+.\\d+, Val loss: \\d+.\\d+, Train acc: \\d+.\\d+, Val acc: \\d+.\\d+')\n\n data_lr = [float(x.split(':')[-1]) for x in lr]\n data_train_loss = [float(x.split(':')[-1]) for x in train_loss]\n data_val_loss = [float(x.split(':')[-1]) for x in val_loss]\n data_train_acc = [float(x.split(':')[-1]) for x in train_acc]\n data_val_acc = [float(x.split(':')[-1]) for x in val_acc]\n \n\n data_lr = [item for item in data_lr] \n data_train_loss = [item for item in data_train_loss]\n data_val_loss = [item for item in data_val_loss]\n data_train_acc = [item for item in data_train_acc]\n data_val_acc = [item for item in data_val_acc]\n \n \n x_lr, y_lr = extract_columns(data_lr, xid, yid, stride)\n x_train_loss, y_train_loss = extract_columns(data_train_loss, xid, yid, stride)\n x_val_loss, y_val_loss = extract_columns(data_val_loss, xid, yid, stride)\n x_train_acc, y_train_acc = extract_columns(data_train_acc, xid, yid, stride)\n x_val_acc, y_val_acc = extract_columns(data_val_acc, xid, yid, stride)\n \n # filename = os.path.splitext(os.path.split(data_file)[1])[0].lstrip('log.')\n filename = data_file.split('/')[-2]\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.plot(x_train_loss, y_train_loss, colors[i%len(colors)]+style+marker, label='train_loss',linewidth=1)\n ax1.plot(x_val_loss, y_val_loss, colors[(i+1)%len(colors)]+style+marker, label='val_loss',linewidth=1)\n # ax1.plot(x_lr, y_lr, colors[(i+2)%len(colors)]+style+marker, label='lr',linewidth=1)\n ax1.plot(x_train_acc, y_train_acc, colors[(i+3)%len(colors)]+style+marker, label='train_acc',linewidth=1)\n ax1.plot(x_val_acc, y_val_acc, colors[(i+4)%len(colors)]+style+marker, label='val_acc',linewidth=1)\n\n# ax1.plot(x_acc_reg, y_acc_reg, colors[2%len(colors)]+style+'o', markersize = 2, label='accuracy_reg',linewidth=1)\n # ax1.set_yticks(np.arange(0,1.01,0.05)) \n ax1.set_xlabel('epoch')\n ax1.set_ylabel('loss&acc')\n \n # ax2 = ax1.twinx()\n # ax2.plot(x_lr, y_lr, colors[2]+style+'o',markersize = 2, label='lr',linewidth=1)\n # ax2.set_ylabel('lr')\n\n # ax2.plot(x_Vmap, y_Vmap, colors[2]+style+'o',markersize = 2, label='ValMAP',linewidth=1)\n #ax2.plot(x_lossz, y_lossz, colors[1]+style+'o',markersize = 2, label='loss_z',linewidth=1)\n #ax2.set_yticks(np.arange(0,0.02,0.001)) \n \n # ax2.set_yticks(np.arange(0,0.8,0.05)) \n # ax2.set_ylabel('map')\n \n ax1.legend(loc='best')\n # ax2.legend(loc='lower left')\n show_grid = True\n plt.title(filename)\n if show_grid:\n ax1.grid(linestyle='--')\n if not show:\n plt.show()\n else:\n show = filename+'.png'\n plt.savefig(show, dpi=300)\n\n return True\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Command-line tools for plotting using matplotlib')\n parser.add_argument('-i', dest='input', help='structured data file',\n default='', type=str, nargs='+')\n parser.add_argument('-xid', dest='xid', help='index for x', \n default=None, type=int)\n parser.add_argument('-yid', dest='yid', help='index for y',\n default=None, type=int)\n parser.add_argument('-s', '--stride', help='stride', default=1, type=int)\n parser.add_argument('-a', '--average', help='Moving average scaling factor for intut data',\n default=0, type=float)\n parser.add_argument('-xl', '--xlabel', help='label string for x-axis', default=None, type=str)\n parser.add_argument('-yl', '--ylabel', help='label string for y-axis', default=None, type=str)\n parser.add_argument('-g', '--grid', help='show grid', action='store_true')\n parser.add_argument('-ymin', '--ymin', help='minimal value along y-axis', default=None, type=float)\n parser.add_argument('-ymax', '--ymax', help='maximal value along y-axis', default=None, type=float)\n parser.add_argument('-tl', '--title', help='figure title', default=None, type=str)\n parser.add_argument('-sv', '--save_to_file', help='save_name', default='temp.png', type=str)\n\n args = parser.parse_args()\n if len(sys.argv) <= 1:\n parser.print_help()\n sys.exit(1)\n\n #print args.input\n plot_columns(args.input, args.xid, args.yid, stride=args.stride, average=args.average, xlabel=args.xlabel, ylabel=args.ylabel, show_grid=args.grid, ymin=args.ymin, ymax=args.ymax, title=args.title, show=args.save_to_file)\n\n\n\n","sub_path":"plot_loss.py","file_name":"plot_loss.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308246567","text":"import builtins as globales\nfrom psycopg2 import connect as conectarBD\nfrom flask import Flask, Blueprint as Modulo, \\\n render_template as renderizar, redirect as redireccionar, \\\n url_for as url, request as solicitud, session as sesion\nfrom time import strftime as hoy\nfrom random import randint\n\n\njuego = Modulo('juego', __name__, template_folder='templates')\n\n\n@juego.route('/cargarnivel', methods=['GET'])\ndef cargarnivel():\n if globales.enjuego:\n cur = globales.cur\n sorteados = []\n cur.execute(\"SELECT MAX(id_marca) FROM marcas\")\n b = cur.fetchone()[0]\n i = -1\n for ele in globales.niveles:\n i += 1\n if ele is not None:\n sorteados.append(ele[0])\n if ele is None:\n break\n print(i)\n if i is 4:\n cur.execute(\"UPDATE partidas SET puntaje = %s WHERE id_partida = %s\",\n (globales.total, globales.partida))\n globales.con.commit()\n puntaje = globales.total\n globales.enjuego = False\n globales.user = []\n globales.total = 0\n globales.niveles = [None] * 5\n globales.partida = None\n globales.ind = 0\n return renderizar('final.html', puntaje=puntaje)\n aceptado = False\n while not aceptado:\n marSor = randint(1, b)\n if marSor not in sorteados:\n aceptado = True\n globales.ind = i\n if globales.niveles[i] is not None:\n if globales.niveles[i][4]:\n # Se carga el mismo nivel, 'True' sigue en juego\n marSor = globales.niveles[i][0]\n cur.execute(\"SELECT * FROM marcas WHERE id_marca = %s\", (marSor,))\n marca = cur.fetchone()\n cur.execute(\"SELECT id_nombre, descripcion FROM nombres\"\n \" WHERE id_marca = %s ORDER BY RANDOM()\", (marSor,))\n nombres = cur.fetchall()\n cur.execute(\"SELECT id_slogan, descripcion FROM slogans\"\n \" WHERE id_marca = %s ORDER BY RANDOM()\", (marSor,))\n slogans = cur.fetchall()\n cur.execute(\"SELECT id_nombre, id_slogan FROM correctos\"\n \" WHERE id_marca = %s\", (marSor,))\n correctos = cur.fetchone()\n # id marca, puntaje, id nombre correcto, id slogan correcto, nivel terminado\n globales.niveles[i] = [marSor, 0, correctos[0], correctos[1], False]\n return renderizar('game.html', nombres=nombres, slogans=slogans,\n logo=marca[2], sexo=globales.user[1], ncorr=correctos[0],\n scorr=correctos[1], sc=globales.total, ind=i)\n else:\n return renderizar('login.html')\n\n\n@juego.route('/validar', methods=['POST'])\ndef validar():\n cur = globales.cur\n idnombre = solicitud.form['rnombre']\n idslogan = solicitud.form['rslogan']\n i = globales.ind\n niv = globales.niveles[i]\n acnom = str(idnombre) == str(globales.niveles[i][2])\n acslo = str(idslogan) == str(globales.niveles[i][3])\n '''\n VERFICACIÓN DE RESPUESTAS E INSERT EN BD\n '''\n puntaje = 0\n if acnom and acslo:\n puntaje = 10\n elif acnom or acslo:\n puntaje = 5\n niv[1] = puntaje\n cur.execute(\"INSERT INTO niveles VALUES(DEFAULT, %s, %s, %s, %s, %s, %s, %s)\",\n (globales.partida, niv[0], niv[2], niv[3], niv[1], acnom, acslo))\n globales.con.commit()\n globales.niveles[i][4] = False\n globales.total += puntaje\n return redireccionar(url('juego.cargarnivel'))\n","sub_path":"juego.py","file_name":"juego.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"396083456","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2015 eNovance SAS \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport dumpelastic\nfrom tests.data.elasticdatas import mockdata\n\nimport mock\nimport testtools\n\n\nclass TestDumpElastic(testtools.TestCase):\n\n @mock.patch(\"dumpelastic.requests\")\n def test_get_indices(self, m_request):\n m_return_value = mock.Mock()\n m_return_value.json.return_value = ['logstash-2015.02.09', 'noop']\n m_request.get.return_value = m_return_value\n indices = dumpelastic._get_indices(\"url\")\n self.assertEqual(['logstash-2015.02.09'], indices)\n\n @mock.patch(\"dumpelastic.requests\")\n def test_dump_elasticsearch(self, m_request):\n m_return_value = mock.Mock()\n m_return_value.json = mock.MagicMock(side_effect=[mockdata.data1,\n mockdata.data2])\n m_request.get.return_value = m_return_value\n\n dumpelastic._save_docs = mock.Mock()\n dumpelastic._dump_elasticsearch(\"url\", \"output_dir\")\n","sub_path":"tests/test_dumpelastic.py","file_name":"test_dumpelastic.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617135615","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport Artus.Utility.logger as logger\nlog = logging.getLogger(__name__)\n\nimport re\nimport json\nimport Artus.Utility.jsonTools as jsonTools\n#import Kappa.Skimming.datasetsHelperTwopz as datasetsHelperTwopz\n\ndef build_list():\n quantities_list = [\n \"ggh_NNLO_weight\",\n \"THU_ggH_Mu\",\n \"THU_ggH_Res\",\n \"THU_ggH_Mig01\",\n \"THU_ggH_Mig12\",\n \"THU_ggH_VBF2j\",\n \"THU_ggH_VBF3j\",\n \"THU_ggH_PT60\",\n \"THU_ggH_PT120\",\n \"THU_ggH_qmtop\"\n ]\n \n return quantities_list","sub_path":"python/data/ArtusConfigs/Run2Analysis/Includes/ggHNNLOQuantities.py","file_name":"ggHNNLOQuantities.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327061605","text":"\"\"\"\npython-binance docs:\nhttp://python-binance.readthedocs.io/en/latest/overview.html\n\nBinance wss docs:\nhttps://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md\n\"\"\"\nimport logging\nimport itertools\nimport importlib\nimport signal\nimport time\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom app.common.utils import utc_datetime as now\nfrom twisted.internet import reactor\nfrom pprint import pprint\nfrom binance.client import Client\nfrom binance.websockets import BinanceSocketManager\nfrom binance.enums import *\nimport docs.data\nfrom docs.rules import TRADING_PAIRS as pairs\nfrom app import set_db, get_db\nfrom app.common.utils import colors, to_local, utc_datetime as now\nfrom app.common.timer import Timer\nimport app.bot\n\nspinner = itertools.cycle(['-', '/', '|', '\\\\'])\nconn_keys = []\nbnc_wss = None\n\n#---------------------------------------------------------------------------\nclass GracefulKiller:\n kill_now = False\n def __init__(self):\n signal.signal(signal.SIGINT, self.exit_gracefully)\n signal.signal(signal.SIGTERM, self.exit_gracefully)\n\n def exit_gracefully(self,signum, frame):\n self.kill_now = True\n\n#---------------------------------------------------------------------------\ndef receive_kline(msg):\n if msg['e'] != 'kline':\n print('not a kline')\n return\n # unclosed candle\n elif msg['k']['x'] != True:\n return\n else:\n c = msg['k']\n\n doc = {\n 'pair': c['s'],\n 'freq': c['i'],\n 'open_time': pd.to_datetime(c['t'], unit='ms', utc=True),\n 'close_time': pd.to_datetime(c['T'], unit='ms', utc=True),\n 'open': np.float64(c['o']),\n 'high': np.float64(c['h']),\n 'low': np.float64(c['l']),\n 'close': np.float64(c['c']),\n 'trades': c['n'],\n 'volume': np.float64(c['v']),\n 'buy_vol': np.float64(c['V']),\n 'quote_vol': np.float64(c['q']),\n 'sell_vol': np.float64(c['Q'])\n }\n\n if doc['volume'] > 0:\n doc['buy_ratio'] = np.float64(doc['buy_vol'] / doc['volume'])\n else:\n doc['buy_ratio'] = np.float64(0.0)\n\n color = None\n if doc['freq'] == '5m':\n color = colors.GRN\n elif doc['freq'] == '1h' or doc['freq'] == '1d':\n color = colors.BLUE\n else:\n color = colors.WHITE\n\n print(\"{}{:%H:%M:%S:} {:<7} {:>5} {:>12g}{}\"\\\n .format(\n colors.WHITE,\n to_local(doc['close_time']),\n doc['pair'],\n doc['freq'],\n doc['close'],\n #doc['volume'],\n colors.ENDC\n ))\n\n db.candles.insert_one(doc)\n\n#---------------------------------------------------------------------------\ndef detect_pair_change():\n \"\"\"Detect changes in pairs tracking conf\n \"\"\"\n importlib.reload(docs.data)\n from docs.rules import TRADING_PAIRS\n global pairs, conn_keys\n\n if pairs == TRADING_PAIRS:\n return pairs\n else:\n print(\"Detected change in trading pair(s).\")\n rmvd = set(pairs) - set(TRADING_PAIRS)\n added = set(TRADING_PAIRS) - set(pairs)\n\n if len(rmvd) > 0:\n print(\"Removing {}...\".format(rmvd))\n n_rmv = 0\n\n for pair in rmvd:\n name = str(pair).lower()\n\n for key in conn_keys:\n if name in key:\n # remove it\n bnc_wss.stop_socket(key)\n idx = conn_keys.index(key)\n conn_keys = conn_keys[0:idx] + conn_keys[idx+1:]\n n_rmv += 1\n\n print(\"Removed {} websocket(s)\".format(n_rmv))\n\n if len(added) > 0:\n print(\"Adding {}...\".format(added))\n n_added = 0\n\n for pair in added:\n n_added += connect_klines(bnc_wss, [str(pair)])\n print(\"Added {} websocket(s)\".format(n_added))\n\n #print(\"Done. {} connections.\".format(len(conn_keys)))\n\n pairs = TRADING_PAIRS\n return pairs\n\n#---------------------------------------------------------------------------\ndef connect_klines(bnc_wss, _pairs):\n global conn_keys\n\n print(\"Creating kline connections for: {}...\".format(_pairs))\n\n n_connected = 0\n\n for pair in _pairs:\n conn_keys += [\n #bnc_wss.start_kline_socket(pair, receive_kline,\n # interval=KLINE_INTERVAL_1MINUTE),\n bnc_wss.start_kline_socket(pair, receive_kline,\n interval=KLINE_INTERVAL_5MINUTE),\n bnc_wss.start_kline_socket(pair, receive_kline,\n interval=KLINE_INTERVAL_1HOUR),\n bnc_wss.start_kline_socket(pair, receive_kline,\n interval=KLINE_INTERVAL_1DAY)\n ]\n n_connected += 4\n\n return n_connected\n\n#---------------------------------------------------------------------------\ndef close_all():\n print('Closing all sockets...')\n bnc_wss.close()\n print('Terminating twisted server...')\n reactor.stop()\n\n#---------------------------------------------------------------------------\ndef update_spinner():\n \"\"\"\n \"\"\"\n msg = 'listening %s' % next(spinner)\n sys.stdout.write(msg)\n sys.stdout.flush()\n sys.stdout.write('\\b'*len(msg))\n #time.sleep(1)\n\n#---------------------------------------------------------------------------\nif __name__ == '__main__':\n db = set_db('localhost')\n cred = list(db.api_keys.find())[0]\n killer = GracefulKiller()\n\n print(\"Connecting to Binance websocket client...\")\n client = Client(cred['key'], cred['secret'])\n bnc_wss = BinanceSocketManager(client)\n connect_klines(bnc_wss, pairs)\n print(\"{} connections created.\".format(len(conn_keys)))\n bnc_wss.start()\n\n print('Connected.')\n print('Press Ctrl+C to quit')\n\n timer_1m = Timer(name='pairs', expire='every 1 clock min utc')\n\n while True:\n if timer_1m.remain(quiet=True) == 0:\n pairs = detect_pair_change()\n timer_1m.reset(quiet=True)\n\n if killer.kill_now:\n print('Caught SIGINT command. Shutting down...')\n break\n update_spinner()\n time.sleep(0.1)\n\n close_all()\n","sub_path":"sock.py","file_name":"sock.py","file_ext":"py","file_size_in_byte":6175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"126495318","text":"#Impport to creater log folder if none exists\r\nimport os\r\nimport time\r\n#Import logging classes\r\nimport logging\r\n\r\n\r\nclass logger():\r\n variable_object = None\r\n logger = None\r\n \r\n def __init__(self, variable):\r\n #init variable object\r\n self.variable_object = variable\r\n\r\n if(not os.path.isdir(\"logs\")):\r\n os.mkdir(\"logs\")\r\n\r\n self.logger = logging.getLogger('general_logger')\r\n \r\n def start(self, level, log_time):\r\n if(level==\"DEBUG\"):\r\n level = logging.DEBUG\r\n elif(level==\"INFO\"):\r\n level = logging.INFO\r\n elif(level==\"ERROR\"):\r\n level = logging.ERROR\r\n\r\n #Set logger levels\r\n self.logger.setLevel(level)\r\n\r\n #delete previous logs if they're past a date\r\n now = time.time()\r\n path = os.getcwd()+\"/logs\"\r\n for f in os.listdir(path):\r\n f = os.path.join(path, f)\r\n if os.stat(f).st_mtime < now-log_time*86400:\r\n if os.path.isfile(f):\r\n os.remove(os.path.join(path, f))\r\n \r\n # create file handler which logs all messages\r\n file_handler = logging.FileHandler('logs/general_logs.log', mode='a')\r\n file_handler.setLevel(level)\r\n\r\n # create console handler with a higher log level\r\n console_handler = logging.StreamHandler()\r\n console_handler.setLevel(level)\r\n\r\n # create formatter and add it to the handlers\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n file_handler.setFormatter(formatter)\r\n console_handler.setFormatter(formatter)\r\n\r\n #adder handlers to logger\r\n self.logger.addHandler(file_handler)\r\n self.logger.addHandler(console_handler)\r\n\r\n self.logger.info(\"___________.__ ____________________.___ ___________\")\r\n self.logger.info(\"\\__ ___/| |__ ____ \\______ \\______ \\ | \\_ _____/__________ ____ ____\")\r\n self.logger.info(\" | | | | \\_/ __ \\ | _/| ___/ | | __)/ _ \\_ __ \\/ ___\\_/ __ \\ \")\r\n self.logger.info(\" | | | Y \\ ___/ | | \\| | | | | \\( <_> ) | \\/ /_/ > ___/\")\r\n self.logger.info(\" |____| |___| /\\___ > |____|_ /|____| |___| \\___ / \\____/|__| \\___ / \\___ >\")\r\n self.logger.info(\" \\/ \\/ \\/ \\/ /_____/ \\/\")\r\n","sub_path":"utilities/general_helpers/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"551359748","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef mouse_click(event, x, y, flag, param):\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n print(x, y)\r\n points.append([x, y])\r\n\r\n\r\n# function for warping\r\ndef warpImage(den_image, pts):\r\n # store points\r\n tl = pts[0]\r\n tr = pts[1]\r\n br = pts[2]\r\n bl = pts[3]\r\n rect = np.array([tl, tr, br, bl], dtype=\"float32\")\r\n # rect = (tl,tr,br,bl)\r\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\r\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\r\n maxWidth = max(int(widthA), int(widthB))\r\n\r\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\r\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\r\n maxHeight = max(int(heightA), int(heightB))\r\n\r\n dst = np.array([\r\n [0, 0],\r\n [maxWidth - 1, 0],\r\n [maxWidth - 1, maxHeight - 1],\r\n [0, maxHeight - 1]], dtype=\"float32\")\r\n # dst = np.array([[0,0],[199,0],[199,199],[0,199]], dtype=\"float32\")\r\n\r\n M = cv2.getPerspectiveTransform(rect, dst)\r\n warped = cv2.warpPerspective(den_image, M, (maxWidth, maxHeight))\r\n return warped\r\n\r\n\r\n# function to undistort image\r\ndef undistortImage(warped):\r\n # Define Camera Matrix\r\n mtx = np.array([[9.037596e+02, 0.000000e+00, 6.957519e+02],\r\n [0.000000e+00, 9.019653e+02, 2.242509e+02],\r\n [0, 0, 1]])\r\n\r\n # Define distortion coefficients\r\n dist = np.array([-3.639558e-01, 1.788651e-01, 6.029694e-04, -3.922424e-04, -5.382460e-02])\r\n\r\n # Getting the new optimal camera matrix\r\n # img = cv2.imread('image0.jpg')\r\n h, w = warped.shape[:2]\r\n newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,\r\n (w, h))\r\n\r\n # Undistorting\r\n dst = cv2.undistort(warped, mtx, dist, None, newcameramtx)\r\n\r\n # crop the image\r\n x, y, w, h = roi\r\n dst = dst[y:y + h, x:x + w]\r\n\r\n # cv2.imshow('Undistorted Image', dst)\r\n # cv2.waitKey(0)\r\n return dst\r\n\r\n\r\n# function to inverse warp\r\ndef invWarpImage(den_image, pts):\r\n # store points\r\n tl = pts[0]\r\n tr = pts[1]\r\n br = pts[2]\r\n bl = pts[3]\r\n rect = np.array([tl, tr, br, bl], dtype=\"float32\")\r\n # rect = (tl,tr,br,bl)\r\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\r\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\r\n maxWidth = max(int(widthA), int(widthB))\r\n\r\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\r\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\r\n maxHeight = max(int(heightA), int(heightB))\r\n\r\n dst = np.array([\r\n [0, 0],\r\n [maxWidth - 1, 0],\r\n [maxWidth - 1, maxHeight - 1],\r\n [0, maxHeight - 1]], dtype=\"float32\")\r\n # dst = np.array([[0,0],[199,0],[199,199],[0,199]], dtype=\"float32\")\r\n\r\n M = cv2.getPerspectiveTransform(rect, dst)\r\n warped = cv2.warpPerspective(den_image, np.linalg.inv(M), (crop.shape[1], crop.shape[0]))\r\n return warped\r\n\r\n\r\nif __name__ == '__main__':\r\n # read image\r\n cap = cv2.VideoCapture(\"Codes/data_1/out.avi\")\r\n ctr = 0\r\n temp_yellow_loc = 0\r\n\r\n while True:\r\n\r\n _, frame = cap.read()\r\n\r\n # undistort image\r\n undistort_img = undistortImage(frame)\r\n\r\n # denoise image\r\n denoise_img = cv2.fastNlMeansDenoisingColored(undistort_img, None, 10, 10, 7, 21)\r\n # cv2.imshow('Denoised Image', denoise_img)\r\n\r\n # threshold\r\n # _, thresh = cv2.threshold(warped_img, 250, 255, cv2.THRESH_BINARY)\r\n # cv2.imshow('lane pixel candidates', thresh)\r\n\r\n # extract edges\r\n # edges = cv2.Canny(denoise_img, 100, 200)\r\n # cv2.imshow('edges', edges)\r\n # cv2.waitKey(0)\r\n\r\n # crop real image\r\n crop = denoise_img[160:372, 0:1281]\r\n # cv2.imshow('ROI', crop)\r\n\r\n # call mouse click function\r\n # points = []\r\n points = np.float32([[468, 54], [685, 54], [820, 154], [155, 154]])\r\n\r\n # for mouse click to get four points\r\n # if ctr == 220:\r\n #\r\n # cv2.namedWindow(\"frame\", 1)\r\n # cv2.setMouseCallback(\"frame\", mouse_click)\r\n # cv2.imshow('frame', crop)\r\n # cv2.waitKey(0)\r\n #\r\n # if 0xff == ord('q'):\r\n # break\r\n\r\n # get warped image\r\n warped_img = warpImage(crop, points)\r\n # cv2.imshow(\"warped image\", warped_img)\r\n\r\n # using Histogram\r\n # hist = cv2.calcHist([warped_img], [0], None, [256], [0, 256])\r\n\r\n # plotting histogram\r\n # plot.plot(hist)\r\n # plot.show()\r\n\r\n # color separation using HSV\r\n hsv = cv2.cvtColor(warped_img, cv2.COLOR_BGR2HSV)\r\n lower_white = np.array([0, 0, 200])\r\n higher_white = np.array([255, 255, 255])\r\n mask = cv2.inRange(hsv, lower_white, higher_white)\r\n # cv2.imshow('mask', mask)\r\n\r\n # # pixel count\r\n pixel_sum = np.sum(mask, axis=0)\r\n\r\n # plot histogram\r\n # plt.plot(pixel_sum)\r\n # plt.xlabel('Image Cols')\r\n # plt.ylabel('Sum of Pixels')\r\n # plt.show()\r\n\r\n pts_white = []\r\n white_loc = []\r\n for i in range(len(pixel_sum)):\r\n try:\r\n if pixel_sum[i] and pixel_sum[i-1] < pixel_sum[i] < pixel_sum[i+1]:\r\n white_loc.append(i)\r\n for j in range(mask.shape[0]):\r\n pts_white.append([i, j])\r\n except:\r\n pass\r\n\r\n pts_white = np.array(pts_white)\r\n pts_white = pts_white.reshape((-1, 1, 2))\r\n warped_img = cv2.polylines(warped_img, [pts_white], False, (255, 0, 0))\r\n\r\n\r\n\r\n # cv2.imshow('lines', warped_img)\r\n\r\n # unwarp the image\r\n inv_warped_image = invWarpImage(warped_img, points)\r\n # inv_warped_image = cv2.add(crop, inv_warped_image)\r\n inv_warped_gray = cv2.cvtColor(inv_warped_image, cv2.COLOR_BGR2GRAY)\r\n _, inv_warped_thresh = cv2.threshold(inv_warped_gray, 0, 250, cv2.THRESH_BINARY_INV)\r\n fram_bit = cv2.bitwise_and(crop, crop, mask=inv_warped_thresh)\r\n # lena_warp = cv2.warpPerspective(lena_img, new_homo, (frame.shape[1], frame.shape[0]))\r\n new_frame = cv2.add(fram_bit, inv_warped_image)\r\n cv2.imshow('new frame', new_frame)\r\n\r\n # predict lanes\r\n try:\r\n yellow_point = np.max(white_loc)\r\n except:\r\n pass\r\n\r\n deviation = yellow_point - temp_yellow_loc\r\n # print(deviation)\r\n if deviation > 200:\r\n new_frame = cv2.putText(new_frame, 'Right', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)\r\n elif deviation < 0:\r\n new_frame = cv2.putText(new_frame, 'Left', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)\r\n else:\r\n new_frame = cv2.putText(new_frame, 'Straight', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)\r\n temp_yellow_loc = yellow_point\r\n cv2.imshow('new frame', new_frame)\r\n\r\n # ctr += 1\r\n # print(ctr)\r\n\r\n if cv2.waitKey(1) & 0xff == ord('q'):\r\n break\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n","sub_path":"Codes/question_2_naman.py","file_name":"question_2_naman.py","file_ext":"py","file_size_in_byte":7339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"392382125","text":"# -*- coding: utf-8 -*-\nimport json\nimport random\nimport uuid\nfrom datetime import datetime\nfrom itertools import cycle\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.profile import region_provider\nfrom aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest\nfrom aliyunsdkdysmsapi.request.v20170525 import QuerySendDetailsRequest\n\nif False:\n from rx_sharh.states.config import Config\n\n\ndef random_code(seed=None, length=5):\n if seed is None:\n seed = datetime.now()\n\n random.seed(seed)\n\n seq = list(map(str, range(10)))\n\n random.shuffle(seq)\n\n seq = cycle(seq)\n\n code = []\n\n while len(code) < length:\n\n if random.randrange(1000) < random.randrange(1000):\n code.append(next(seq))\n\n return ''.join(code)\n\n\n\"\"\"\n短信业务调用接口示例,版本号:v20170525\n\nCreated on 2017-06-12\n\n\"\"\"\n\n\nclass AliyunSms:\n REGION = \"cn-hangzhou\"\n PRODUCT_NAME = \"Dysmsapi\"\n DOMAIN = \"dysmsapi.aliyuncs.com\"\n\n def __init__(self, app):\n global Config\n\n if 'Config' not in globals():\n from rx_sharh.states.config import Config\n globals()['Config'] = Config\n\n self.access_key_id = Config.access_key_id\n self.access_key_secret = Config.access_key_secret\n self.sign_name = Config.sign_name\n self.template_code = Config.template_code\n self.acs_client = AcsClient(self.access_key_id, self.access_key_secret, self.REGION)\n region_provider.add_endpoint(self.PRODUCT_NAME, self.REGION, self.DOMAIN)\n\n def __send_sms(self, business_id, phone_numbers, sign_name, template_code, template_param=None):\n sms_request = SendSmsRequest.SendSmsRequest()\n # 申请的短信模板编码,必填\n sms_request.set_TemplateCode(template_code)\n\n # 短信模板变量参数\n if template_param is not None:\n sms_request.set_TemplateParam(template_param)\n\n # 设置业务请求流水号,必填。\n sms_request.set_OutId(business_id)\n\n # 短信签名\n sms_request.set_SignName(sign_name)\n\n # 短信发送的号码列表,必填。\n sms_request.set_PhoneNumbers(phone_numbers)\n\n # 调用短信发送接口,返回json\n sms_response = self.acs_client.do_action_with_exception(sms_request)\n\n # TODO 业务处理\n\n return sms_response\n\n def __query_send_detail(self, phone_number, page_size, current_page, send_date, biz_id=None):\n query_request = QuerySendDetailsRequest.QuerySendDetailsRequest()\n # 查询的手机号码\n query_request.set_PhoneNumber(phone_number)\n # 可选 - 流水号\n query_request.set_BizId(biz_id)\n # 必填 - 发送日期 支持30天内记录查询,格式yyyyMMdd\n query_request.set_SendDate(send_date)\n # 必填-当前页码从1开始计数\n query_request.set_CurrentPage(current_page)\n # 必填-页大小\n query_request.set_PageSize(page_size)\n\n # 调用短信记录查询接口,返回json\n query_response = self.acs_client.do_action_with_exception(query_request)\n\n # TODO 业务处理\n\n return query_response\n\n def send_sms(self, phone_number, business_id=None,\n sign_name=None, template_code=None, template_param=None):\n code = random_code()\n business_id = uuid.uuid1() if business_id is None else business_id\n template_param = json.dumps(dict(code=code)) if template_param is None else template_param\n sign_name = self.sign_name if sign_name is None else sign_name\n template_code = self.template_code if template_code is None else template_code\n sms_response = self.__send_sms(business_id, phone_number, sign_name, template_code, template_param)\n sms_response = json.loads(sms_response)\n sms_response[\"Code\"] = code\n sms_response[\"Phone\"] = phone_number\n return sms_response\n\n# __name__ = 'send'\n# if __name__ == 'send':\n# __business_id = uuid.uuid1()\n# print(__business_id)\n# params = \"{\\\"code\\\":\\\"12345\\\"}\"\n# print(send_sms(__business_id, \"18795891667\", \"智慧校园\", \"SMS_117521047\", params))\n\n# if __name__ == 'query':\n# print(query_send_detail(\"1234567^8901234\", \"13000000000\", 10, 1, \"20170612\"))\n","sub_path":"rx_sharh/dependency/aliyun_sms.py","file_name":"aliyun_sms.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"218292398","text":"#-*- encoding: utf-8 -*-\r\n\"\"\"\r\nWindows application as a device\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\n\r\nimport os\r\nimport time\r\nimport struct\r\nimport win32ui\r\nimport win32con\r\nimport win32api\r\nimport win32gui\r\nimport win32process\r\nfrom PIL import Image\r\n\r\nfrom atx.device import Display\r\nfrom atx.device.device_mixin import DeviceMixin\r\nfrom atx.errors import WindowsAppNotFoundError\r\n\r\n\r\ndef find_process_id(exe_file):\r\n exe_file = os.path.normpath(exe_file).lower()\r\n command = \"wmic process get processid,commandline\"\r\n for line in os.popen(command).read().lower().splitlines():\r\n line = line.strip()\r\n if not line:\r\n continue\r\n line = line.split()\r\n pid = line[-1]\r\n cmd = \" \".join(line[:-1])\r\n if not cmd:\r\n continue\r\n elif cmd.startswith(\"'\") or cmd.startswith('\"'):\r\n pos = cmd.find(cmd[0], 1)\r\n cmd = cmd[1:pos]\r\n else:\r\n cmd = cmd.split()[0]\r\n\r\n if exe_file == cmd:\r\n return int(pid)\r\n\r\nclass Window(object):\r\n def __init__(self, window_name=None, exe_file=None):\r\n hwnd = 0\r\n if window_name is not None:\r\n hwnd = win32gui.FindWindow(None, window_name)\r\n if hwnd == 0:\r\n def callback(h, extra):\r\n if window_name in win32gui.GetWindowText(h):\r\n extra.append(h)\r\n return True\r\n extra = []\r\n win32gui.EnumWindows(callback, extra)\r\n if extra: hwnd = extra[0]\r\n if hwnd == 0:\r\n raise WindowsAppNotFoundError(\"Windows Application <%s> not found!\" % window_name)\r\n\r\n if hwnd == 0 and exe_file is not None:\r\n pid = find_process_id(exe_file)\r\n if pid is not None:\r\n def callback(h, extra):\r\n if win32gui.IsWindowVisible(h) and win32gui.IsWindowEnabled(h):\r\n _, p = win32process.GetWindowThreadProcessId(h)\r\n if p == pid:\r\n extra.append(h)\r\n return True\r\n return True\r\n extra = []\r\n win32gui.EnumWindows(callback, extra)\r\n if extra: hwnd = extra[0]\r\n if hwnd == 0:\r\n raise WindowsAppNotFoundError(\"Windows Application <%s> is not running!\" % exe_file)\r\n\r\n # if window_name & exe_file both are None, use the screen.\r\n self._is_desktop = False\r\n if hwnd == 0:\r\n hwnd = win32gui.GetDesktopWindow()\r\n self._is_desktop = True\r\n\r\n # self._window_name = win32gui.GetWindowText(hwnd)\r\n # self._window_pid = pid\r\n # self._exe_file = exe_file\r\n self._handle = hwnd\r\n self._bmp = None\r\n self._windc = None\r\n self._memdc = None\r\n\r\n @property\r\n def position(self):\r\n if self._is_desktop:\r\n left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)\r\n top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)\r\n width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)\r\n height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)\r\n return (left, top, left+width, top+height)\r\n\r\n return win32gui.GetWindowRect(self._handle)\r\n\r\n @property\r\n def size(self):\r\n if self._is_desktop:\r\n width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)\r\n height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)\r\n return (width, height)\r\n \r\n left, top, right, bottom = self.position\r\n return (right - left, bottom - top)\r\n\r\n def _input_left_mouse(self, x, y):\r\n left, top, right, bottom = self.position\r\n width, height = right - left, bottom - top\r\n if x < 0 or x > width or y < 0 or y > height:\r\n return\r\n\r\n win32gui.SetForegroundWindow(self._handle)\r\n pos = win32gui.GetCursorPos()\r\n win32api.SetCursorPos((left+x, top+y))\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\r\n win32api.Sleep(100) #ms\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\r\n win32api.Sleep(100) #ms\r\n win32api.SetCursorPos(pos)\r\n\r\n def _input_keyboard(self, text):\r\n pass\r\n\r\n def _prepare_divice_context(self):\r\n left, top, right, bottom = self.position\r\n width, height = right - left, bottom - top\r\n hwindc = win32gui.GetWindowDC(self._handle)\r\n windc = win32ui.CreateDCFromHandle(hwindc)\r\n memdc = windc.CreateCompatibleDC()\r\n bmp = win32ui.CreateBitmap()\r\n bmp.CreateCompatibleBitmap(windc, width, height)\r\n memdc.SelectObject(bmp)\r\n\r\n self._windc = windc\r\n self._memdc = memdc\r\n self._bmp = bmp\r\n\r\n @property\r\n def image(self):\r\n if self._bmp is None:\r\n self._prepare_divice_context()\r\n width, height = self.size\r\n self._memdc.BitBlt((0, 0), (width, height), self._windc, (0, 0), win32con.SRCCOPY)\r\n return self._bmp\r\n\r\n @property\r\n def pilimage(self):\r\n _bits = self.image.GetBitmapBits()\r\n width, height = self.size\r\n\r\n bits = []\r\n for i in range(len(_bits)/4):\r\n # change to rpg here, by set alpha = -1\r\n bits.append(struct.pack('4b', _bits[4*i+2], _bits[4*i+1], _bits[4*i+0], -1))\r\n\r\n # do a turn over\r\n _bits = []\r\n for i in range(height):\r\n for j in range(width):\r\n _bits.append(bits[(height-1-i)*width+ j])\r\n _bits = \"\".join(_bits)\r\n\r\n img = Image.frombuffer('RGBA', (width, height), _bits)\r\n return img\r\n\r\n def _screenshot(self, filepath):\r\n dirpath = os.path.dirname(os.path.abspath(filepath))\r\n if not os.path.exists(dirpath):\r\n os.makedirs(dirpath)\r\n self.image.SaveBitmapFile(self._memdc, filepath)\r\n time.sleep(0.5)\r\n\r\n def drag(self):\r\n pass\r\n\r\n\r\nclass WindowsDevice(DeviceMixin):\r\n def __init__(self, window_name=None, exe_file=None, **kwargs):\r\n DeviceMixin.__init__(self)\r\n self._win = Window(window_name, exe_file)\r\n\r\n def screenshot(self, filename=None):\r\n \"\"\"Take screen snapshot\r\n\r\n Args:\r\n filename: filename where save to, optional\r\n\r\n Returns:\r\n PIL.Image object\r\n\r\n Raises:\r\n TypeError, IOError\r\n \"\"\"\r\n img = self._win.pilimage\r\n if filename:\r\n img.save(filename)\r\n return img\r\n\r\n def click(self, x, y):\r\n \"\"\"Simulate click within window screen.\r\n\r\n Args:\r\n x, y: int, pixel distance from window (left, top) as origin\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n self._win._input_left_mouse(x, y)\r\n\r\n def text(self, text):\r\n \"\"\"Simulate text input to window.\r\n\r\n Args:\r\n text: string\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n self._win._input_keyboard(text)\r\n\r\n @property\r\n def display(self):\r\n \"\"\"Display size in pixels.\"\"\"\r\n w, h = self._win.size\r\n return Display(w, h)","sub_path":"atx/device/windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":7251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"206182543","text":"from django.conf.urls import url, include\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom API import views\n\ntrack_list = views.TrackViewSet.as_view({\n 'get': 'list',\n})\n\ntrack_post = views.TrackViewSet.as_view({\n 'post': 'create',\n})\n\ntrack_detail = views.TrackViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\n\n\nurlpatterns = [\n url(r'^tracks/$', track_list, name='track-list'),\n url(r'^track/$', track_post, name='track-post'),\n url(r'^track/(?P[0-9]+)/$', track_detail, name='track-detail'),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', ])\n\n","sub_path":"API/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627666872","text":"# primemodule.py\n\n'''primemodule.py is a module that determines whether or not a number is prime\n or not. If it is, it is added to a list'''\n\n\ndef isPrime(n):\n '''isPrime checks if a number is prime or not. If it is it returns boolean\n True, otherwise False'''\n\n if (n <= 1):\n return False\n for i in range(2, n):\n if (n % i == 0):\n return False\n return True\n\n\ndef getNPrime(num):\n '''Iterates through range num to send to isPrime and if it is True it will\n be added to the list myList '''\n\n myList = []\n for x in range(num):\n if isPrime(x+1):\n myList.append(x+1)\n return myList\n","sub_path":"Prime/primepackage/primemodule.py","file_name":"primemodule.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67859930","text":"import wx\nimport numpy as np\nfrom ColorButton import ColorButton\nfrom ColormapperSettings import ColormapperSettings\n\nclass RemixPanel(wx.Panel): \n # Default Settings\n settings = ColormapperSettings()\n # Other variables\n recomputeRemix = False\n \n def __init__(self, parent, settings = settings, id = -1):\n # Optional Settings\n self.settings = settings\n \n # Construct Controls\n wx.Panel.__init__(self, parent, id)\n wx.StaticText(self, -1, \"Remix Controls:\")\n \n # Background\n wx.StaticText(self, -1, \"Cytoplasm:\", \n pos = (0, 25))\n self.colorButtonBackgroundColor = ColorButton(self, -1,\n color = self.settings.GetRemixBackgroundColor(), \n pos = (100, 25), size = (20, 20))\n self.buttonBackgroundCrosshair = wx.Button(self, -1,\n label = \"+\", \n pos = (125, 19), size = (25, 25))\n\n wx.StaticText(self, -1, \"Spectrum:\", \n pos = (170, 25))\n self.colorButtonBackgroundSpectrum = ColorButton(self, -1,\n color = self.settings.GetRemixBackgroundSpectrum(), \n pos = (250, 25), size = (20, 20))\n\n wx.StaticText(self, -1, \"Threshold:\", \n pos = (0, 50))\n self.sliderBackgroundThresh = wx.Slider(self, -1, \n self.settings.GetRemixBackgroundThreshSetting(), 0, 100, \n pos = (70, 50), size = (220, -1),\n style=wx.SL_HORIZONTAL)\n self.textCtrlBackgroundThresh = wx.TextCtrl(self, -1,\n value = \"%.2f\" % settings.GetRemixBackgroundThresh(),\n pos = (290, 50), size = (83, -1))\n self.spinButtonBackgroundThresh = wx.SpinButton(self, -1,\n pos = (374, 50), size = (-1, -1),\n style = wx.SB_VERTICAL | wx.SP_WRAP)\n\n wx.StaticText(self, -1, \"Gain:\", \n pos = (0, 75))\n self.sliderBackgroundGain = wx.Slider(self, -1,\n self.settings.GetRemixBackgroundGainSetting(), 0, 100, \n pos = (70, 75), size = (220, -1),\n style = wx.SL_HORIZONTAL)\n self.textCtrlBackgroundGain = wx.TextCtrl(self, -1,\n value = \"%.2f\" % settings.GetRemixBackgroundGain(),\n pos = (290, 75), size = (83, -1))\n self.spinButtonBackgroundGain = wx.SpinButton(self, -1, \n pos = (374, 75), size = (-1, -1), \n style = wx.SB_VERTICAL | wx.SP_WRAP)\n\n wx.StaticText(self, -1, \"Gamma:\", \n pos = (0, 100))\n self.sliderBackgroundGamma = wx.Slider(self, -1, \n self.settings.GetRemixBackgroundGammaSetting(), 0, 100, \n pos = (70, 100), size = (220, -1),\n style = wx.SL_HORIZONTAL)\n self.textCtrlBackgroundGamma = wx.TextCtrl(self, -1,\n value = \"%.2f\" % self.settings.GetRemixBackgroundGamma(),\n pos = (290, 100), size = (83, -1))\n self.spinButtonBackgroundGamma = wx.SpinButton(self, -1,\n pos = (374, 100), size = (-1, -1),\n style = wx.SB_VERTICAL | wx.SP_WRAP)\n \n wx.StaticLine(self, -1, \n pos = (0, 130), size = (390, -1))\n \n # Nuclei\n wx.StaticText(self, -1, \"Nuclei:\", \n pos = (0, 140))\n self.colorButtonNucleiColor = ColorButton(self, -1,\n color = self.settings.GetRemixNucleiColor(), \n pos = (100, 140), size = (20, 20))\n self.buttonNucleiCrosshair = wx.Button(self, -1, \n label = \"+\", \n pos = (125, 134), size = (25, 25))\n \n wx.StaticText(self, -1, \"Spectrum:\", \n pos = (170, 140))\n self.colorButtonNucleiSpectrum = ColorButton(self, -1,\n color = self.settings.GetRemixNucleiSpectrum(), \n pos = (250, 140), size = (20, 20))\n\n wx.StaticText(self, -1, \"Threshold:\", \n pos = (0, 165))\n self.sliderNucleiThresh = wx.Slider(self, -1,\n self.settings.GetRemixNucleiThreshSetting(), 0, 100, \n pos = (70, 165), size = (220, -1),\n style=wx.SL_HORIZONTAL)\n self.textCtrlNucleiThresh = wx.TextCtrl(self, -1,\n value = \"%.2f\" % settings.GetRemixNucleiThresh(),\n pos = (290, 165), size = (83, -1))\n self.spinButtonNucleiThresh = wx.SpinButton(self, -1,\n pos = (374, 165), size = (-1, -1),\n style = wx.SB_VERTICAL | wx.SP_WRAP)\n\n wx.StaticText(self, -1, \"Gain:\",\n pos = (0, 190))\n self.sliderNucleiGain = wx.Slider(self, -1,\n self.settings.GetRemixNucleiGainSetting(), 0, 100, \n pos = (70, 190), size = (220, -1),\n style = wx.SL_HORIZONTAL)\n self.textCtrlNucleiGain = wx.TextCtrl(self, -1,\n value = \"%.2f\" % self.settings.GetRemixNucleiGain(),\n pos = (290, 190), size = (83, -1))\n self.spinButtonNucleiGain = wx.SpinButton(self, -1, \n pos = (374, 190), size = (-1, -1), \n style = wx.SB_VERTICAL | wx.SP_WRAP)\n \n wx.StaticText(self, -1, \"Gamma:\",\n pos = (0, 215))\n self.sliderNucleiGamma = wx.Slider(self, -1, \n self.settings.GetRemixNucleiGammaSetting(), 0, 100, \n pos = (70, 215), size = (220, -1),\n style = wx.SL_HORIZONTAL)\n self.textCtrlNucleiGamma = wx.TextCtrl(self, -1,\n value = \"%.2f\" % self.settings.GetRemixNucleiGamma(),\n pos = (290, 215), size = (83, -1))\n self.spinButtonNucleiGamma = wx.SpinButton(self, -1,\n pos = (374, 215), size = (-1, -1),\n style = wx.SB_VERTICAL | wx.SP_WRAP)\n \n wx.StaticText(self, -1, \"Remix Mode:\", \n pos = (0, 240))\n self.choiceRemixMode = wx.Choice(self, -1, \n pos = (85, 238), \n choices = (\n \"Brightfield (Beer-Lambert)\",\n \"Brightfield (Invert-Multiply)\",\n \"Fluorescence\",\n \"Experimental\"))\n self.choiceRemixMode.SetSelection(self.settings.GetRemixRemixMode())\n \n\n # Event Handlers\n ## Background\n ### Colors\n self.Bind(wx.EVT_BUTTON, self.OnColorButtonBackgroundColorClick,\n self.colorButtonBackgroundColor)\n self.Bind(wx.EVT_BUTTON, self.OnColorButtonBackgroundSpectrumClick,\n self.colorButtonBackgroundSpectrum)\n ### Thresh\n self.Bind(wx.EVT_SCROLL_THUMBTRACK,\n self.OnSliderBackgroundThreshScrollThumbtrack,\n self.sliderBackgroundThresh)\n self.Bind(wx.EVT_SCROLL_THUMBRELEASE,\n self.OnSliderBackgroundThreshScrollThumbrelease,\n self.sliderBackgroundThresh)\n self.Bind(wx.EVT_SPIN_UP,\n self.OnSpinButtonBackgroundThreshSpinUp,\n self.spinButtonBackgroundThresh)\n self.Bind(wx.EVT_SPIN_DOWN,\n self.OnSpinButtonBackgroundThreshSpinDown,\n self.spinButtonBackgroundThresh) \n ### Gain \n self.Bind(wx.EVT_SCROLL_THUMBTRACK,\n self.OnSliderBackgroundGainScrollThumbtrack,\n self.sliderBackgroundGain)\n self.Bind(wx.EVT_SCROLL_THUMBRELEASE,\n self.OnSliderBackgroundGainScrollThumbrelease,\n self.sliderBackgroundGain)\n self.Bind(wx.EVT_SPIN_UP,\n self.OnSpinButtonBackgroundGainSpinUp,\n self.spinButtonBackgroundGain)\n self.Bind(wx.EVT_SPIN_DOWN,\n self.OnSpinButtonBackgroundGainSpinDown,\n self.spinButtonBackgroundGain)\n ### Gamma \n self.Bind(wx.EVT_SCROLL_THUMBTRACK,\n self.OnSliderBackgroundGammaScrollThumbtrack,\n self.sliderBackgroundGamma)\n self.Bind(wx.EVT_SCROLL_THUMBRELEASE,\n self.OnSliderBackgroundGammaScrollThumbrelease,\n self.sliderBackgroundGamma)\n self.Bind(wx.EVT_SPIN_UP,\n self.OnSpinButtonBackgroundGammaSpinUp,\n self.spinButtonBackgroundGamma)\n self.Bind(wx.EVT_SPIN_DOWN,\n self.OnSpinButtonBackgroundGammaSpinDown,\n self.spinButtonBackgroundGamma)\n ## Nuclei\n ### Colors\n self.Bind(wx.EVT_BUTTON, self.OnColorButtonNucleiColorClick,\n self.colorButtonNucleiColor)\n self.Bind(wx.EVT_BUTTON, self.OnColorButtonNucleiSpectrumClick,\n self.colorButtonNucleiSpectrum) \n ### Thresh\n self.Bind(wx.EVT_SCROLL_THUMBTRACK,\n self.OnSliderNucleiThreshScrollThumbtrack,\n self.sliderNucleiThresh)\n self.Bind(wx.EVT_SCROLL_THUMBRELEASE,\n self.OnSliderNucleiThreshScrollThumbrelease,\n self.sliderNucleiThresh)\n self.Bind(wx.EVT_SPIN_UP,\n self.OnSpinButtonNucleiThreshSpinUp,\n self.spinButtonNucleiThresh)\n self.Bind(wx.EVT_SPIN_DOWN,\n self.OnSpinButtonNucleiThreshSpinDown,\n self.spinButtonNucleiThresh) \n ### Gain\n self.Bind(wx.EVT_SCROLL_THUMBTRACK,\n self.OnSliderNucleiGainScrollThumbtrack,\n self.sliderNucleiGain)\n self.Bind(wx.EVT_SCROLL_THUMBRELEASE,\n self.OnSliderNucleiGainScrollThumbrelease,\n self.sliderNucleiGain)\n self.Bind(wx.EVT_SPIN_UP,\n self.OnSpinButtonNucleiGainSpinUp,\n self.spinButtonNucleiGain)\n self.Bind(wx.EVT_SPIN_DOWN,\n self.OnSpinButtonNucleiGainSpinDown,\n self.spinButtonNucleiGain)\n ### Gamma\n self.Bind(wx.EVT_SCROLL_THUMBTRACK,\n self.OnSliderNucleiGammaScrollThumbtrack,\n self.sliderNucleiGamma)\n self.Bind(wx.EVT_SCROLL_THUMBRELEASE,\n self.OnSliderNucleiGammaScrollThumbrelease,\n self.sliderNucleiGamma)\n self.Bind(wx.EVT_SPIN_UP,\n self.OnSpinButtonNucleiGammaSpinUp,\n self.spinButtonNucleiGamma)\n self.Bind(wx.EVT_SPIN_DOWN,\n self.OnSpinButtonNucleiGammaSpinDown,\n self.spinButtonNucleiGamma)\n ## Remix Mode\n self.Bind(wx.EVT_CHOICE,\n self.OnChoiceRemixModeChoice,\n self.choiceRemixMode) \n\n ## Background\n ### Colors \n def OnColorButtonBackgroundColorClick(self, event):\n self.settings.SetRemixBackgroundColor(\n self.colorButtonBackgroundColor.GetBackgroundColour()[0:3])\n self.RefreshBackgroundColorButtons()\n self.recomputeRemix = True\n \n def OnColorButtonBackgroundSpectrumClick(self, event):\n # Don't do anything, this just resets the color\n self.colorButtonBackgroundSpectrum.SetBackgroundColour(\n self.settings.GetRemixBackgroundSpectrum())\n \n ### Thresh\n def OnSliderBackgroundThreshScrollThumbtrack(self, event):\n # Update Text Control\n self.settings.SetRemixBackgroundThreshSetting(\n self.sliderBackgroundThresh.GetValue())\n self.textCtrlBackgroundThresh.SetValue(\n \"%.2f\" % self.settings.GetRemixBackgroundThresh())\n \n def OnSliderBackgroundThreshScrollThumbrelease(self, event):\n # Update Remix\n self.recomputeRemix = True \n \n def OnSpinButtonBackgroundThreshSpinUp(self, event):\n if self.settings.GetRemixBackgroundThreshSetting() < 100:\n self.settings.SetRemixBackgroundThreshSetting(\n self.settings.GetRemixBackgroundThreshSetting() + 1)\n self.sliderBackgroundThresh.SetValue(\n self.settings.GetRemixBackgroundThreshSetting())\n self.textCtrlBackgroundThresh.SetValue(\n \"%.2f\" % self.settings.GetRemixBackgroundThresh())\n self.recomputeRemix = True\n \n def OnSpinButtonBackgroundThreshSpinDown(self, event):\n if self.settings.GetRemixBackgroundThreshSetting() > 0:\n self.settings.SetRemixBackgroundThreshSetting(\n self.settings.GetRemixBackgroundThreshSetting() - 1)\n self.sliderBackgroundThresh.SetValue(\n self.settings.GetRemixBackgroundThreshSetting())\n self.textCtrlBackgroundThresh.SetValue(\n \"%.2f\" % self.settings.GetRemixBackgroundThresh())\n self.recomputeRemix = True \n\n ### Gain\n def OnSliderBackgroundGainScrollThumbtrack(self, event):\n # Update Text Control\n self.settings.SetRemixBackgroundGainSetting(\n self.sliderBackgroundGain.GetValue())\n self.textCtrlBackgroundGain.SetValue(\n \"%.2f\" % self.settings.GetRemixBackgroundGain())\n \n def OnSliderBackgroundGainScrollThumbrelease(self, event):\n # Update Remix\n self.recomputeRemix = True \n \n def OnSpinButtonBackgroundGainSpinUp(self, event):\n if self.settings.GetRemixBackgroundGainSetting() < 100:\n self.settings.SetRemixBackgroundGainSetting(\n self.settings.GetRemixBackgroundGainSetting() + 1)\n self.sliderBackgroundGain.SetValue(\n self.settings.GetRemixBackgroundGainSetting())\n self.textCtrlBackgroundGain.SetValue(\n \"%.2f\" % self.settings.GetRemixBackgroundGain())\n self.recomputeRemix = True\n \n def OnSpinButtonBackgroundGainSpinDown(self, event):\n if self.settings.GetRemixBackgroundGainSetting() > 0:\n self.settings.SetRemixBackgroundGainSetting(\n self.settings.GetRemixBackgroundGainSetting() - 1)\n self.sliderBackgroundGain.SetValue(\n self.settings.GetRemixBackgroundGainSetting())\n self.textCtrlBackgroundGain.SetValue(\n \"%.2f\" % self.settings.GetRemixBackgroundGain())\n self.recomputeRemix = True \n\n ### Gamma\n def OnSliderBackgroundGammaScrollThumbtrack(self, event):\n # Update Text Control\n self.settings.SetRemixBackgroundGammaSetting(\n self.sliderBackgroundGamma.GetValue())\n self.textCtrlBackgroundGamma.SetValue(\n \"%.2f\" % self.settings.GetRemixBackgroundGamma())\n \n def OnSliderBackgroundGammaScrollThumbrelease(self, event):\n # Update Remix\n self.recomputeRemix = True \n \n def OnSpinButtonBackgroundGammaSpinUp(self, event):\n if self.settings.GetRemixBackgroundGammaSetting() < 100:\n self.settings.SetRemixBackgroundGammaSetting(\n self.settings.GetRemixBackgroundGammaSetting() + 1)\n self.sliderBackgroundGamma.SetValue(\n self.settings.GetRemixBackgroundGammaSetting())\n self.textCtrlBackgroundGamma.SetValue(\n \"%.2f\" % self.settings.GetRemixBackgroundGamma())\n self.recomputeRemix = True \n \n def OnSpinButtonBackgroundGammaSpinDown(self, event):\n if self.settings.GetRemixBackgroundGammaSetting() > 0:\n self.settings.SetRemixBackgroundGammaSetting(\n self.settings.GetRemixBackgroundGammaSetting() - 1)\n self.sliderBackgroundGamma.SetValue(\n self.settings.GetRemixBackgroundGammaSetting())\n self.textCtrlBackgroundGamma.SetValue(\n \"%.2f\" % self.settings.GetRemixBackgroundGamma())\n self.recomputeRemix = True \n\n ## Nuclei\n ### Colors\n def OnColorButtonNucleiColorClick(self, event):\n self.settings.SetRemixNucleiColor(\n self.colorButtonNucleiColor.GetBackgroundColour()[0:3])\n self.RefreshNucleiColorButtons()\n self.recomputeRemix = True\n \n def OnColorButtonNucleiSpectrumClick(self, event):\n # Don't do anything, this just resets the color\n self.colorButtonNucleiSpectrum.SetBackgroundColour(\n self.settings.GetRemixNucleiSpectrum())\n\n ### Thresh \n def OnSliderNucleiThreshScrollThumbtrack(self, event):\n # Update Text Control\n self.settings.SetRemixNucleiThreshSetting(\n self.sliderNucleiThresh.GetValue())\n self.textCtrlNucleiThresh.SetValue(\n \"%.2f\" % self.settings.GetRemixNucleiThresh())\n \n def OnSliderNucleiThreshScrollThumbrelease(self, event):\n # Update Remix\n self.recomputeRemix = True \n \n def OnSpinButtonNucleiThreshSpinUp(self, event):\n if self.settings.GetRemixNucleiThreshSetting() < 100:\n self.settings.SetRemixNucleiThreshSetting(\n self.settings.GetRemixNucleiThreshSetting() + 1)\n self.sliderNucleiThresh.SetValue(\n self.settings.GetRemixNucleiThreshSetting())\n self.textCtrlNucleiThresh.SetValue(\n \"%.2f\" % self.settings.GetRemixNucleiThresh())\n self.recomputeRemix = True\n \n def OnSpinButtonNucleiThreshSpinDown(self, event):\n if self.settings.GetRemixBackgroundThreshSetting() > 0:\n self.settings.SetRemixNucleiThreshSetting(\n self.settings.GetRemixNucleiThreshSetting() - 1)\n self.sliderNucleiThresh.SetValue(\n self.settings.GetRemixNucleiThreshSetting())\n self.textCtrlNucleiThresh.SetValue(\n \"%.2f\" % self.settings.GetRemixNucleiThresh())\n self.recomputeRemix = True \n\n ### Gain\n def OnSliderNucleiGainScrollThumbtrack(self, event):\n # Update Text Control\n self.settings.SetRemixNucleiGainSetting(\n self.sliderNucleiGain.GetValue())\n self.textCtrlNucleiGain.SetValue(\n \"%.2f\" % self.settings.GetRemixNucleiGain())\n \n def OnSliderNucleiGainScrollThumbrelease(self, event):\n # Update Remix\n self.recomputeRemix = True \n \n def OnSpinButtonNucleiGainSpinUp(self, event):\n if self.settings.GetRemixNucleiGainSetting() < 100:\n self.settings.SetRemixNucleiGainSetting(\n self.settings.GetRemixNucleiGainSetting() + 1)\n self.sliderNucleiGain.SetValue(\n self.settings.GetRemixNucleiGainSetting())\n self.textCtrlNucleiGain.SetValue(\n \"%.2f\" % self.settings.GetRemixNucleiGain())\n self.recomputeRemix = True\n \n def OnSpinButtonNucleiGainSpinDown(self, event):\n if self.settings.GetRemixBackgroundGainSetting() > 0:\n self.settings.SetRemixNucleiGainSetting(\n self.settings.GetRemixNucleiGainSetting() - 1)\n self.sliderNucleiGain.SetValue(\n self.settings.GetRemixNucleiGainSetting())\n self.textCtrlNucleiGain.SetValue(\n \"%.2f\" % self.settings.GetRemixNucleiGain())\n self.recomputeRemix = True \n\n ### Gamma\n def OnSliderNucleiGammaScrollThumbtrack(self, event):\n # Update Text Control\n self.settings.SetRemixNucleiGammaSetting(\n self.sliderNucleiGamma.GetValue())\n self.textCtrlNucleiGamma.SetValue(\n \"%.2f\" % self.settings.GetRemixNucleiGamma())\n \n def OnSliderNucleiGammaScrollThumbrelease(self, event):\n # Update Remix\n self.recomputeRemix = True \n \n def OnSpinButtonNucleiGammaSpinUp(self, event):\n if self.settings.GetRemixBackgroundGammaSetting() < 100:\n self.settings.SetRemixNucleiGammaSetting(\n self.settings.GetRemixNucleiGammaSetting() + 1)\n self.sliderNucleiGamma.SetValue(\n self.settings.GetRemixNucleiGammaSetting())\n self.textCtrlNucleiGamma.SetValue(\n \"%.2f\" % self.settings.GetRemixNucleiGamma())\n self.recomputeRemix = True \n \n def OnSpinButtonNucleiGammaSpinDown(self, event):\n if self.settings.GetRemixBackgroundGammaSetting() > 0:\n self.settings.SetRemixNucleiGammaSetting(\n self.settings.GetRemixNucleiGammaSetting() - 1)\n self.sliderNucleiGamma.SetValue(\n self.settings.GetRemixNucleiGammaSetting())\n self.textCtrlNucleiGamma.SetValue(\n \"%.2f\" % self.settings.GetRemixNucleiGamma())\n self.recomputeRemix = True \n \n ## Remix Mode \n def OnChoiceRemixModeChoice(self, event):\n if (self.settings.GetRemixRemixMode() \n != self.choiceRemixMode.GetSelection()):\n self.settings.SetRemixRemixMode(\n self.choiceRemixMode.GetSelection())\n self.recomputeRemix = True\n \n def RefreshBackgroundColorButtons(self):\n self.colorButtonBackgroundColor.SetBackgroundColour(\n self.settings.GetRemixBackgroundColor())\n self.colorButtonBackgroundColor.Refresh()\n self.colorButtonBackgroundSpectrum.SetBackgroundColour(\n self.settings.GetRemixBackgroundSpectrum())\n self.colorButtonBackgroundSpectrum.Refresh()\n \n def RefreshNucleiColorButtons(self):\n self.colorButtonNucleiColor.SetBackgroundColour(\n self.settings.GetRemixNucleiColor())\n self.colorButtonNucleiColor.Refresh()\n self.colorButtonNucleiSpectrum.SetBackgroundColour(\n self.settings.GetRemixNucleiSpectrum())\n self.colorButtonNucleiSpectrum.Refresh() \n\n \nif __name__ == \"__main__\":\n app = wx.App()\n frame = wx.Frame(None,title = \"Test Frame\")\n frame.SetSize((600, 400))\n unmixPanel = RemixPanel(frame)\n frame.Show()\n app.MainLoop()","sub_path":"lib/RemixPanel.py","file_name":"RemixPanel.py","file_ext":"py","file_size_in_byte":21403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532189577","text":"# This program finds the lowest palindrome, given input.\n\n# From https://open.kattis.com/problems/palindromicpassword\n\n\"\"\"\nTitle: Palindromic Password\nThe IT department at your school decided to change their password policy. Each password will have to consist of N 6-digit numbers separated by dashes, where N will be determined by the phase of the moon and the weather forecast for the day after it will be generated.\n\nYou realized that, if all of the numbers were palindromes (same numbers as the original ones if read backwards), you would have to remember a bunch of 3-digit numbers, which did not sound that bad (at the time).\n\nIn order to generate your password of N numbers, you get a list of N randomly generated 6-digit numbers and find the palindromic number closest to them.\n\nOf course, you would like to automate this process...\n\nInput\nThe first line of the input contains a single positive integer N≤1000 indicating the number of six-digit numbers in the input. Each of the next N lines contains a six-digit number without leading zeroes.\n\nOutput\nFor each six-digit number in the input, output another six-digit number that is closest to it and is also a palindrome. “Closest” in this context means “a number having the smallest absolute difference with the original number”. If there are two different numbers satisfying the above condition, output the smaller one of the two. Remember, no leading zeroes.\n\nSample Input 1\n2\n123321\n123322\n\nSample Output 1\n123321\n123321\n\"\"\"\n\ncaseQuantity = int(input())\ncases = []\nresults = []\n\nfor i in range(caseQuantity):\n cases.append(input())\n\nfor i in range(caseQuantity):\n case = cases[i]\n\n # it's all about the first three digits\n # try the first three, reversed\n # then go low, for the third reversed digit\n # then go high for the third reversed digit\n # then go low for the second reversed digit\n # try middle, low, high for third reversed digit?\n # then go high for the second reversed digit\n # try middle, low, high for third reversed digit?\n # then go low for first reversed digit\n # then go middle, low, high for the second reversed digit\n # try middle, low, high for third reversed digit?\n # then go middle, low, high for the second reversed digit\n # try middle, low, high for third reversed digit?\n # then go high for first reversed digit\n # then go middle, low, high for the second reversed digit\n # try middle, low, high for third reversed digit?\n # then go middle, low, high for the second reversed digit\n # try middle, low, high for third reversed digit?\n\n # the first three, reversed, will often yield the closest solution\n # however, the might not always be the case, for example:\n # input: 499 000\n # try 499 994\n # but, 498 894 is closer\n # Therefore, an algo must be conceived to determine how to proceed.\n\n\n # establish the first three digits\n firstThree = case[0:3]\n\n # try straight\n\n # try down (third digit)\n\n # try up (third digit)\n\n possibilities = list(map(int, [diff0, diff1, diff2]))\n result = min(possibilities)\n \"\"\"\n pickup here:\n 1. establish string representations of the\n straight, down, and up scenarios.\n 2. Then, put then in a list.\n 3. Then, determine the index of the min diff solution.\n 4. Then, append the item at that index in the list from step to the \n results list.\n \"\"\"\n\n # NO LEADING ZEROES\n\n # Will it ever be the second or first digits, to be manipulated?\n\n\nfor result in results:\n print(result)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"kattis/palindromicpassword.py","file_name":"palindromicpassword.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74938579","text":"'''\nGiven a non-empty array of non-negative integers nums, the degree of this array is defined as the maximum frequency of any one of its elements.\n\nYour task is to find the smallest possible length of a (contiguous) subarray of nums, that has the same degree as nums.\n\nExample 1:\nInput: [1, 2, 2, 3, 1]\nOutput: 2\nExplanation: \nThe input array has a degree of 2 because both elements 1 and 2 appear twice.\nOf the subarrays that have the same degree:\n[1, 2, 2, 3, 1], [1, 2, 2, 3], [2, 2, 3, 1], [1, 2, 2], [2, 2, 3], [2, 2]\nThe shortest length is 2. So return 2.\n'''\n\n__date__ = '2018-7-14'\n\n# way 1\nclass Solution_1(object):\n def findShortestSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n count = {}\n first_index = {}\n degree, res = 0, 1\n for index, num in enumerate(nums):\n if num in count:\n count[num] += 1\n else:\n count[num] = 1\n first_index[num] = index\n if count[num] > degree:\n degree = count[num]\n res = index - first_index[num] + 1\n elif count[num] == degree:\n res = min(index - first_index[num] + 1, res)\n return res\n\n# improve way 1\n# only one dictionary\nclass Solution(object):\n def findShortestSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n count = {}\n degree, res = 0, 1\n for index, num in enumerate(nums):\n if num in count:\n count[num].append(index)\n else:\n count[num] = [index]\n for list_index in count.values():\n if len(list_index) > degree:\n degree = len(list_index)\n res = list_index[-1] - list_index[0] + 1\n elif len(list_index) == degree:\n res = min(list_index[-1] - list_index[0] + 1, res)\n return res\n\n# way 2\nclass Solution_2(object):\n def findShortestSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums_dict={}\n for key in nums:\n if key not in nums_dict:\n nums_dict[key]=1\n else:\n nums_dict[key] += 1\n nums_dict=sorted(nums_dict.items(),key=lambda x:x[1])\n a,b=nums_dict[-1][0],nums_dict[-1][1]\n if b==1:return 1\n max_value_list=[a]\n n=len(nums_dict);i=n-2\n while i>=0 and nums_dict[i][1]==b:\n max_value_list.append(nums_dict[i][0])\n i -= 1\n m=len(nums);min_length=m\n for value in max_value_list:\n j,k=0,m-1\n while jj and nums[k] != value:\n k -= 1\n length=k-j+1\n if length60 and dur<3600:\n dur=dur/60\n print(\"Execution Time:\",dur,\"minutes\")\nelse:\n dur=dur/(60*60)\n print(\"Execution Time:\",dur,\"hours\")\n","sub_path":"3.concurrent_pr_read.py","file_name":"3.concurrent_pr_read.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609945900","text":"from mitmproxy import http\nimport json, os, re\n\ndef response(flow: http.HTTPFlow) -> None:\n request_url = flow.request.url\n with open(os.path.abspath(f\"config/mappings.json\"), 'r') as f:\n rewrite_mapping = json.load(f)\n for pattern, props in rewrite_mapping:\n if re.search(pattern, request_url):\n flow.response = http.HTTPResponse.make(\n props['status'], json.dumps(props['body']), props['header'])\n","sub_path":"mitm/mock-response.py","file_name":"mock-response.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"464557292","text":"#!/bin/python\n\n\"\"\" Python tuples --- list of fixed size\"\"\"\n\"\"\" Python sets --- unordered collection of 'Unique' items. No duplicate item possible\"\"\"\n\n\"\"\"\nTuples use parentheses () but not mandatory\n\nTuples are immutable\n\n\"\"\"\n# code 1: Tuples examples\n\ntup = (10,20,30)\ntup2 = 5,10,15,20 # parentheses are not mandatory\ntup3 = 100, # single element tuple needs a comma at the end\ntup4 = ('a') # not a tuple\ntup5 = tuple()\ntup6 = (20,21,22, [23,24,25])\nprint(type(tup5))\nprint(tup , id(tup))\n\ntup = tup + tup2 # concatenation creates a new tuple. see id below and compare with above id\nprint( tup, id(tup))\n\ntup = tup[2:5] # slice creates a new tuple\nprint(tup, id(tup))\n\nprint(len(tup6))\ntup6[-1].append(100) # a list inside tuple is mutable\nprint(tup6)\n\n#tup[-1] = 100 # this is error\n\n# as tuples are immutable\n\n\n\"\"\" Format Operator % uses tuples when there are more than one format sequence in the string \"\"\"\n\n\nprint(\"Hello Friends my name is %s \\\nand my age is %d \\\nMy car is %d years old\" % ('Kailash', 40, 5)) # Format operator % and tuple passed as arguments here\n\n\n# code 2: Sets example\n\nmyset = set()\nprint(type(myset))\nmyset.add(234)\nprint(myset)\n\ncountry = 'mississippi' # remove duplicates from a list like ['one', 'two', 'three', 'one', 'four', 'two']\nnewset = set(country)\nprint(newset)\nprint(set(['one', 'two', 'three', 'one', 'four', 'two']))\n\n# code 3: Some more sets with strings\n\nmystring = 'verylongstringofmultiplealphabetsinthesame sentence which \\\nshould have zebra crossings all letters punjab ghaziabad'\n\nmylist =[]\nfor eachletter in mystring:\n mylist.append(eachletter)\n\nprint(mylist)\nprint(set(mylist))\nmynewlist = []\nmyset = set('a' 'b' 'c' 'd' 'e')\nfor each in myset:\n if each in mystring:\n mynewlist.append(each)\nprint(mynewlist)","sub_path":"Python built in containers/Python tuples & sets.py","file_name":"Python tuples & sets.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"146106387","text":"# -*- coding: utf-8 -*-\n\n###############################################################################\n# Information\n###############################################################################\n# Created by Linwood Creekmore \n# Input from Vikram Mittal\n\n# In partial fulfillment of the requirements for the Georgetown University Data Analytics Graduate Certificate Program\n\n# May 1, 2015\n\n# https://plus.google.com/+LinwoodCreekmoreIII/\n\n###############################################################################\n# Imports\n###############################################################################\n\n\n###############################################################################\n# Main Functions\n###############################################################################\n\n\ndef createFile(dirName, fileName):\n driver = getDriver(dirName) # obtain driver id from directory name\n #df2 = pd.read_csv(os.path.join(OUTPUT_DIR,fileName))\n\n trip = fileName.split('_')[1]\n\n # This will open the driver directory/folder in a directory with all the driver files. This should go through all 200 trip files for one driver.\n \n with open(os.path.join(os.path.normpath(path), dirName, fileName), 'rU') as infile:\n reader = csv.DictReader(infile, delimiter=',', quotechar='\"')\n\n # This creates the output csv file that will hold all the calculated metrics\n\n with open(os.path.join(OUTPUT_DIR,fileName), 'wb') as outfile:\n writer = csv.writer(outfile)\n\n \n\n # This writes the header row for our output file using trip/driver IDs from Vik's IDify.py file\n \n for idx, row in enumerate(reader):\n if idx == 0:\n trip_id = 1\n\n writer.writerow(['driver_id', 'trip_id', 'Velocity (mph)', 'Acceleration (mph per s)','Absolute Acceleration','Time (s)', 'Increment Traveled (feet)','Change in Direction per s','Direction (deg)','Direction(card)'])\n\n if not row == { 'driver_id':'driver_id', 'trip_id':'trip_id', 'x':'x', 'y':'y'}:\n\n logger.warning(\"Missing or invalid header for driver %s and trip %s\" % (\"driver\", trip_id))\n\n else:\n\n if len(row) != 4:\n\n logger.warning(\"Too few/many values in row %s for driver %s and trip %s\" % (idx, driver, trip_id))\n \n\n # skip first line with headings.\n\n infile.next() \n\n # Here we establish all the zero values for the key metrics we iterate. These are defined in the key metrics documentation.\n\n last_x, last_y, = 0.0, 0.0\n seconds = 0 \n distance = 0\n stops = 0\n braking_event = 0\n last_x_avg_vel= 0\n last_y_avg_vel= 0\n max_velocity = 0\n accelerations = 0\n decelerations = 0\n x_avg_vel = 0\n y_avg_vel = 0\n x_avg_acl = 0\n y_avg_acl = 0\n last_heading = 0.0\n cur_heading = 0.0\n lastvel = 0\n lastaccel = 0\n absoluteaccel = 0\n\n\n # Creating an list to append all the calculated key metric values\n metrics = []\n\n # Establish the trip id value to iterate over for our database\n #trip_id = 1\n\n # We loop through the row values of x,y and calculate the key metric values, and then append the value to the metrics list above.\n\n for l in infile:\n \n driver_id, trip_id, x, y, = l.split(',') \n driver_id, trip_id, x, y = int(driver_id), int(trip_id), float(x), float(y)\n x_avg_vel,y_avg_vel = getVelocity(x,y,last_x,last_y)\n\n\n metrics.append(fileName.split('_')[0]) #append driver #\n metrics.append(trip_id) #append trip id\n velocity = tomph(dotproduct(x_avg_vel, y_avg_vel))\n if velocity > 200:\n velocity = lastvel\n\n metrics.append(velocity)\n\n holdingvel = velocity - lastvel\n absoluteaccel = velocity - lastvel\n if abs(holdingvel) > 200:\n holdingvel = lastaccel\n \n\n metrics.append(holdingvel) #acceleration\n if absoluteaccel < 0:\n absoluteaccel = abs(absoluteaccel)\n\n metrics.append(absoluteaccel) #absolute acceleration\n metrics.append(seconds) #time\n metrics.append(getIncrement(x,last_x,y,last_y)) #distance traveled\n\n last_heading = cur_heading\n cur_heading = heading(y,x, last_y, last_x)\n holding = abs(cur_heading - last_heading)\n if holding >= 45 and getIncrement(x,last_x,y,last_y) <= 2:\n holding = 0\n elif holding >= 180:\n holding = 360 - holding\n\n metrics.append(holding) # enters change in direction\n metrics.append(heading(y,x, last_y, last_x)) # calculates the heading in degrees\n metrics.append(getCardinalDirection(heading(y,x,last_y,last_x))) # calculates cardinal position\n\n # We write the identifying and key metrics values to our csv\n\n writer.writerow(metrics)\n\n # Next, we clear our metrics list for the next loop, iterate our time and trip ID values, and store the previous locations and velocity.\n\n metrics = []\n seconds += 1\n trip_id += 1\n last_x, last_y = x, y\n lastvel = velocity\n holdingvel = lastaccel\n last_x_avg_vel, last_y_avg_vel = x_avg_vel, y_avg_vel\n direction = last_heading\n","sub_path":"bin/createtripfile.py","file_name":"createtripfile.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433133472","text":"# 2 pass. \n# First pass calculate everthing on left\n# Second pass calculate everthing on right\nclass Solution:\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n # Product of elements on left side\n length = len(nums)\n ans = [1] * length\n for i in range(1, length):\n ans[i] = ans[i-1] * nums[i-1]\n # Product of elements on right side\n count = 1\n for i in range(length-2, -1, -1):\n count *= nums[i+1]\n ans[i] = ans[i] * count\n return ans\n","sub_path":"src/238-product-array-except-self.py","file_name":"238-product-array-except-self.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"481314505","text":"import math\nimport random\nfrom Colorharmony import ColorHarmony\n\n\nclass GA():\n\n\n def select_initial_population(child):\n child = ColorHarmony.rgb2lab(child)\n ##hue value for children\n HueValue = math.sqrt((child[1]) ** 2 + (child[2] ** 2))\n tempL = random.uniform(0, 100)\n selisih = abs(child[0] - tempL)\n while (selisih < 50):\n tempL = random.uniform(0, 100)\n selisih =abs(child[0] - tempL)\n L = tempL\n ##for A\n tempA = random.uniform(-127, 128)\n ##for B\n tempB = random.uniform(-128, 127)\n\n HueTemp = math.sqrt((tempA) ** 2 + (tempB ** 2))\n\n while (abs(HueValue - HueTemp)> 10):\n a = child[1] - 20\n if (a < -127):\n a = -127\n b = child[1] + 20\n if (b > 128):\n b = 128\n c = child[2] - 20\n if (a < -128):\n a = -128\n d = child[2] + 20\n if (b > 127):\n b = 127\n tempA = random.uniform(a, b)\n ##for B\n tempB = random.uniform(c, d)\n HueTemp = math.sqrt((tempA) ** 2 + (tempB ** 2))\n A = tempA\n B = tempB\n x=[]\n x.append(L)\n x.append(A)\n x.append(B)\n return x\n\n\n def select_individual_by_tournament(array_of_score,population,iterasi):\n temp = float('-inf')\n i=0\n parent=[]\n while i < len(array_of_score):\n if (iterasi == 4):\n temp = array_of_score[i]\n parent = population[4]\n break\n if(temp<=array_of_score[i]) and array_of_score != 0:\n temp = array_of_score[i]\n if(i+iterasi>4):\n parent = population[4]\n else:\n parent = population[i+iterasi]\n i+=1\n\n return parent\n\n def breed_by_crossover(parent_1, parent_2):\n # Get length of chromosome\n chromosome_length = len(parent_1)\n\n # Pick crossover point, avoding ends of chromsome\n crossover_point = random.randint(1, chromosome_length - 1)\n\n # Create children. np.hstack joins two arrays\n child_1 = list(parent_1[0:crossover_point])+list(parent_2[crossover_point:chromosome_length])\n\n child_2 = list(parent_2[0:crossover_point])+list(parent_1[crossover_point:chromosome_length])\n\n # Return children\n return child_1, child_2\n ##GANTI NAMA REGION_1_GENETIK_PROSES\n def bigGA(initialpopulation,mutation_rate,dom):\n GenerasiiTigaR1 = []\n GenerasiEmpatR1 = []\n GenerasiLimaR1 = []\n scoreKeseluruhan = []\n temps=[]\n print(\"GEN SATU Populasi R1\")\n print(initialpopulation)\n print(\"SCORE 1\")\n print(ColorHarmony.scoretotal(initialpopulation))\n scoreKeseluruhan.append(ColorHarmony.scoretotal(initialpopulation))\n ##scre dari populasi awal\n p = ColorHarmony.color_harmony_2_color(initialpopulation)\n # pilih parent untuk score terbaik\n x = GA.select_individual_by_tournament(p, initialpopulation, 1)\n # masukin warna dominan kedalam populasi ( selalu dikunci)\n temps.append(ColorHarmony.rgb2lab(dom))\n # masukin warna terbaik diantara populasi initial\n temps.append(x)\n # buat 2 anak dari 2 parent\n index = random.randint(0, 4)\n child1, child2 = GA.breed_by_crossover(initialpopulation[index], x)\n # Mutate salah satu anak supaya ga terlalu mirip sama parent ( butuh variansi )\n cc = GA.randomly_mutate_population(child1,mutation_rate)\n # masukin child 1 yg udh di mutate\n temps.append(cc)\n # masukin child 2\n temps.append(child2)\n # buat 1 warna random karena tadi cmn 4 ( 2 parent 2 anak )\n y = GA.randomly_mutate_population(child2,mutation_rate)\n temps.append(y)\n # populasi generasi ke 2\n print(\"GEN DUA Populasi R1\")\n print(temps)\n print(\"SCORE 2\")\n print(ColorHarmony.scoretotal(temps))\n scoreKeseluruhan.append(ColorHarmony.scoretotal(temps))\n # warna dominan dan warna kedua di lock\n # cari warna ketiga dengan bandingin score warna 1 dan dua ke masing masing solusi (3 warna lainnya)\n Iterasi3 = ColorHarmony.color_harmony_2_colors_cromosom(temps, 2)\n # pilih warna ketiga yg paling baik\n rr = GA.select_individual_by_tournament(Iterasi3, temps, 2)\n # masukin warna pertama kedua ketiga\n GenerasiiTigaR1.append(ColorHarmony.rgb2lab(dom))\n GenerasiiTigaR1.append(temps[1])\n GenerasiiTigaR1.append(rr)\n index1 = random.randint(0, 4)\n child3, child4 = GA.breed_by_crossover(temps[index1], rr)\n\n cx = GA.randomly_mutate_population(child3,mutation_rate)\n child4 = GA.randomly_mutate_population(child4, mutation_rate)\n GenerasiiTigaR1.append(cx)\n GenerasiiTigaR1.append(child4)\n print(\"GEN TIGA Populasi R1\")\n print(GenerasiiTigaR1)\n print(\"SCORE 3\")\n print(ColorHarmony.scoretotal(GenerasiiTigaR1))\n scoreKeseluruhan.append(ColorHarmony.scoretotal(GenerasiiTigaR1))\n\n Iterasi4 = ColorHarmony.color_harmony_2_colors_cromosom(GenerasiiTigaR1, 3)\n rx = GA.select_individual_by_tournament(Iterasi4, GenerasiiTigaR1, 3)\n GenerasiEmpatR1.append(ColorHarmony.rgb2lab(dom))\n GenerasiEmpatR1.append(GenerasiiTigaR1[1])\n GenerasiEmpatR1.append(GenerasiiTigaR1[2])\n GenerasiEmpatR1.append(rx)\n index2 = random.randint(0, 4)\n child5, child6 = GA.breed_by_crossover(GenerasiiTigaR1[index2], rx)\n\n xx = GA.randomly_mutate_population(child5,mutation_rate)\n\n GenerasiEmpatR1.append(xx)\n print(\"GEN EMPAT Populasi R1\")\n print(GenerasiEmpatR1)\n print(\"SCORE 4\")\n print(ColorHarmony.scoretotal(GenerasiEmpatR1))\n scoreKeseluruhan.append(ColorHarmony.scoretotal(GenerasiEmpatR1))\n\n Iterasi4a = ColorHarmony.color_harmony_2_colors_cromosom(GenerasiEmpatR1, 4)\n rxx = GA.select_individual_by_tournament(Iterasi4a, GenerasiEmpatR1, 4)\n rxx = GA.randomly_mutate_population(rxx, mutation_rate)\n\n GenerasiLimaR1.append(ColorHarmony.rgb2lab(dom))\n GenerasiLimaR1.append(GenerasiEmpatR1[1])\n GenerasiLimaR1.append(GenerasiEmpatR1[2])\n GenerasiLimaR1.append(GenerasiEmpatR1[3])\n GenerasiLimaR1.append(rxx)\n print(\"GEN LIMA Populasi R1\")\n print(GenerasiLimaR1)\n print(ColorHarmony.scoretotal(GenerasiLimaR1))\n scoreKeseluruhan.append(ColorHarmony.scoretotal(GenerasiLimaR1))\n sementara= scoreKeseluruhan.index(max(scoreKeseluruhan))\n\n if(sementara==0):\n return initialpopulation\n elif(sementara==1):\n return temps\n elif (sementara==2):\n return GenerasiiTigaR1\n elif (sementara==3):\n return GenerasiEmpatR1\n elif (sementara==4):\n return GenerasiLimaR1\n\n\n def smallGA(initialpopulation,mutation_rate,domsmall):\n GenerasiiTigaR2 = []\n GenerasiEmpatR2 = []\n GenerasiLimaR2 = []\n TempSmallColor=[]\n ScoreKeseluruhanR2=[]\n print(\"GEN SATU Populasi R2\")\n print(initialpopulation)\n print(\"SCORE 1\")\n print(ColorHarmony.scoretotal(initialpopulation))\n ScoreKeseluruhanR2.append(ColorHarmony.scoretotal(initialpopulation))\n ##scre dari populasi awal\n psmall = ColorHarmony.color_harmony_2_color(initialpopulation)\n # pilih parent untuk score terbaik\n xsmall = GA.select_individual_by_tournament(psmall, initialpopulation, 1)\n # masukin warna dominan kedalam populasi ( selalu dikunci)\n TempSmallColor.append(ColorHarmony.rgb2lab(domsmall))\n # masukin warna terbaik diantara populasi initial\n TempSmallColor.append(xsmall)\n # buat 2 anak dari 2 parent\n index = random.randint(0, 4)\n child1small, child2small = GA.breed_by_crossover(initialpopulation[index], xsmall)\n # Mutate salah satu anak supaya ga terlalu mirip sama parent ( butuh variansi )\n ccsmall = GA.randomly_mutate_population(child1small,mutation_rate)\n child2small = GA.randomly_mutate_population(child2small,mutation_rate)\n # masukin child 1 yg udh di mutate\n TempSmallColor.append(ccsmall)\n # masukin child 2\n TempSmallColor.append(child2small)\n # buat 1 warna random karena tadi cmn 4 ( 2 parent 2 anak )\n ysmall = GA.select_initial_population(domsmall)\n TempSmallColor.append(ysmall)\n # populasi generasi ke 2\n print(\"GEN DUA Populasi R2\")\n print(TempSmallColor)\n print(\"SCORE 2\")\n print(ColorHarmony.scoretotal(TempSmallColor))\n ScoreKeseluruhanR2.append(ColorHarmony.scoretotal(TempSmallColor))\n # warna dominan dan warna kedua di lock\n # cari warna ketiga dengan bandingin score warna 1 dan dua ke masing masing solusi (3 warna lainnya)\n Iterasi3small = ColorHarmony.color_harmony_2_colors_cromosom(TempSmallColor, 2)\n # pilih warna ketiga yg paling baik\n rrsmall = GA.select_individual_by_tournament(Iterasi3small, TempSmallColor, 2)\n # masukin warna pertama kedua ketiga\n GenerasiiTigaR2.append(ColorHarmony.rgb2lab(domsmall))\n GenerasiiTigaR2.append(TempSmallColor[1])\n GenerasiiTigaR2.append(rrsmall)\n index1 = random.randint(0, 4)\n child3small, child4small = GA.breed_by_crossover(TempSmallColor[index1], rrsmall)\n cxsmall = GA.randomly_mutate_population(child3small,mutation_rate)\n child4small = GA.randomly_mutate_population(child4small,mutation_rate)\n GenerasiiTigaR2.append(cxsmall)\n GenerasiiTigaR2.append(child4small)\n print(\"GEN TIGA Populasi R2\")\n print(GenerasiiTigaR2)\n print(\"SCORE 3\")\n print(ColorHarmony.scoretotal(GenerasiiTigaR2))\n ScoreKeseluruhanR2.append(ColorHarmony.scoretotal(GenerasiiTigaR2))\n\n Iterasi4small = ColorHarmony.color_harmony_2_colors_cromosom(GenerasiiTigaR2, 3)\n rxsmall = GA.select_individual_by_tournament(Iterasi4small, GenerasiiTigaR2, 3)\n GenerasiEmpatR2.append(ColorHarmony.rgb2lab(domsmall))\n GenerasiEmpatR2.append(GenerasiiTigaR2[1])\n GenerasiEmpatR2.append(GenerasiiTigaR2[2])\n GenerasiEmpatR2.append(rxsmall)\n index2 = random.randint(0, 4)\n child5small, child6small = GA.breed_by_crossover(GenerasiiTigaR2[index2], rxsmall)\n xxsmall = GA.randomly_mutate_population(child5small,mutation_rate)\n GenerasiEmpatR2.append(xxsmall)\n print(\"GEN EMPAT Populasi R2\")\n print(GenerasiEmpatR2)\n print(\"SCORE 4\")\n print(ColorHarmony.scoretotal(GenerasiEmpatR2))\n ScoreKeseluruhanR2.append(ColorHarmony.scoretotal(GenerasiEmpatR2))\n\n Iterasi4asmall = ColorHarmony.color_harmony_2_colors_cromosom(GenerasiEmpatR2, 4)\n rxxsmall = GA.select_individual_by_tournament(Iterasi4asmall, GenerasiEmpatR2, 4)\n GenerasiLimaR2.append(ColorHarmony.rgb2lab(domsmall))\n GenerasiLimaR2.append(GenerasiEmpatR2[1])\n GenerasiLimaR2.append(GenerasiEmpatR2[2])\n GenerasiLimaR2.append(GenerasiEmpatR2[3])\n GenerasiLimaR2.append(rxxsmall)\n print(\"GEN LIMA Populasi R2\")\n print(GenerasiLimaR2)\n print(ColorHarmony.scoretotal(GenerasiLimaR2))\n ScoreKeseluruhanR2.append(ColorHarmony.scoretotal(GenerasiLimaR2))\n sementaraR2 = ScoreKeseluruhanR2.index(max(ScoreKeseluruhanR2))\n if (sementaraR2 == 0):\n return initialpopulation\n elif (sementaraR2 == 1):\n return TempSmallColor\n elif (sementaraR2 == 2):\n return GenerasiiTigaR2\n elif (sementaraR2 == 3):\n return GenerasiEmpatR2\n elif (sementaraR2 == 4):\n return GenerasiLimaR2\n\n def randomly_mutate_population(child,mutation_rate):\n banyak_gen = int(len(child) * mutation_rate)\n i=0\n while i < banyak_gen:\n ##hue value for children\n HueValue = math.sqrt((child[1]) ** 2 + (child[2] ** 2))\n tempL = random.uniform(0, 100)\n selisih = abs(child[0] - tempL)\n while (selisih < 50):\n tempL = random.uniform(0, 100)\n selisih = abs(child[0] - tempL)\n\n L = tempL\n ##for A\n tempA = random.uniform(-127, 128)\n ##for B\n tempB = random.uniform(-128, 127)\n\n HueTemp = math.sqrt((tempA) ** 2 + (tempB ** 2))\n\n while (abs(HueValue - HueTemp) > 6):\n\n a = child[1] - 20\n if (a < -127):\n a = -127\n b = child[1] + 20\n if (b > 128):\n b = 128\n c = child[2] - 20\n if (a < -128):\n a = -128\n d = child[2] + 20\n if (b > 127):\n b = 127\n tempA = random.uniform(a, b)\n ##for B\n tempB = random.uniform(c, d)\n HueTemp = math.sqrt((tempA) ** 2 + (tempB ** 2))\n i+=1\n\n A = tempA\n B = tempB\n x = []\n x.append(L)\n x.append(A)\n x.append(B)\n return x\n\n\n\n\n\n","sub_path":"GA.py","file_name":"GA.py","file_ext":"py","file_size_in_byte":13459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"557431794","text":"str1 = input()\nstr1 = str1[1:len(str1)-1]\narr1 = str1.split(\",\")\narr1 = [int(arr1[i]) for i in range(len(arr1))]\nstr2 = input()\nstr2 = str2[1:len(str2)-1]\narr2 = str2.split(\",\")\narr2 = [int(arr2[i]) for i in range(len(arr2))]\n\nsame = []\nfor i in range(len(arr1)):\n for j in range(len(arr2)):\n if(arr2[j]==arr1[i]):\n same.append(arr2[j])\n break\nsame.sort()\nprint(same)","sub_path":"Code/CodeRecords/2491/60679/311191.py","file_name":"311191.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"446171951","text":"\"\"\"Contains the functions for the Blast page.\n\nReturns CDS data.\nUpdates CDS data.\nParses BLAST results.\n\nAttributes:\n response_object:\n The dictionary that is returned by the main functions.\n ROOT:\n The root directory.\n\"\"\"\nfrom werkzeug.utils import secure_filename\nfrom contextlib import closing\nfrom zipfile import ZipFile\nimport time\nfrom Bio import SeqIO, Seq, SeqFeature\nfrom collections import OrderedDict\nimport subprocess\nfrom models import *\nimport json\nimport os\nimport pandas as pd\nimport re\nimport zipfile\nfrom sys import getsizeof\nfrom datetime import datetime\n\nROOT = os.path.dirname(os.path.abspath(__file__))\n\n# ------------------------------ MAIN FUNCTIONS ------------------------------\ndef find_blast_zip(phage_id):\n \"\"\"Finds if the blast zip exists.\n\n Args:\n phage_id:\n The current user ID.\n \n Returns:\n A dictionary containing download boolean indicator.\n \"\"\"\n response_object = {}\n response_object[\"blast_downloaded\"] = False\n response_object[\"uploaded\"] = True\n response_object[\"annotated\"] = False\n response_object[\"annotation_in_progress\"] = False\n response_object[\"blast_input_in_progress\"] = False\n if db.session.query(Tasks).filter_by(phage_id=phage_id).filter_by(function=\"auto_annotate\").first() is not None:\n response_object[\"annotation_in_progress\"] = True\n for filename in os.listdir(os.path.join(ROOT, 'users', phage_id, 'uploads')):\n if (filename.endswith('.gdata')):\n response_object[\"annotated\"] = True\n break\n if (db.session.query(Blast_Results).filter_by(phage_id=phage_id).first() is None):\n response_object[\"uploaded\"] = False\n if db.session.query(Tasks).filter_by(phage_id=phage_id).filter_by(function=\"blast_input\").first() is not None:\n response_object[\"blast_input_in_progress\"] = True\n for filename in os.listdir(os.path.join(ROOT, 'users', phage_id)):\n if filename.endswith('.zip'):\n response_object[\"blast_downloaded\"] = True\n break\n return response_object\n\ndef download_blast_input(phage_id):\n \"\"\"Returns the blast input zip folder.\n\n Args:\n phage_id:\n The current user ID.\n\n Returns:\n The blast input files in a zip folder.\n \"\"\"\n f = open(os.path.join(ROOT, 'users', phage_id, f\"{phage_id}_blast.zip\"), \"rb\")\n\n return f.read()\n\ndef dropzone(phage_id, UPLOAD_FOLDER, request):\n \"\"\"Adds the blast output file to the upload directory if of type json.\n\n Args:\n phage_id:\n The ID of the current user.\n UPLOAD_FOLDER:\n The folder containing all the uploaded files.\n request:\n A dictionary containing the files to be uploaded.\n \"\"\"\n file = request.files['file']\n contents = str(file.read(), 'utf-8')\n if file:\n file_name = secure_filename(file.filename)\n found = False\n for existing_file in os.listdir(UPLOAD_FOLDER):\n if existing_file.endswith(file_name):\n found = True\n with open(os.path.join(UPLOAD_FOLDER, existing_file), 'a+') as f:\n f.write(contents)\n if contents[-6:] == \"\\n]\\n}\\n\\n\":\n file_data = db.session.query(Files).filter_by(phage_id=phage_id).filter_by(name=file_name).first()\n file_data.complete = True\n db.session.commit()\n if not found:\n with open(os.path.join(UPLOAD_FOLDER, file_name), 'w') as f:\n f.write(contents)\n if contents[-6:] == \"\\n]\\n}\\n\\n\":\n file_data = db.session.query(Files).filter_by(phage_id=phage_id).filter_by(name=file_name).first()\n file_data.complete = True\n db.session.commit()\n\ndef get_blast_output_names(phage_id, UPLOAD_FOLDER, type_of_call):\n \"\"\"Gets the names of all the files of type json in the upload directory.\n\n Args:\n phage_id:\n The ID of the current user.\n UPLOAD_FOLDER:\n The folder containing all the uploaded files.\n type_of_call:\n A string indicating if the if this function is called from vue or from dropzone.\n\n Returns:\n A dictionary containing a list of the blast output file names.\n \"\"\"\n response_object = {}\n file_names = []\n file_sizes = []\n file_mods = []\n bad_files = []\n for file in os.listdir(UPLOAD_FOLDER):\n if file.endswith(\".json\"):\n file_data = db.session.query(Files).filter_by(phage_id=phage_id).filter_by(name=file).first()\n if file_data != None and file_data.complete:\n file_mods.append(file_data.date)\n file_sizes.append(file_data.size)\n file_names.append(file_data.name)\n elif file_data != None:\n bad_files.append(file_data.name)\n else:\n os.remove(os.path.join(UPLOAD_FOLDER, file))\n if type_of_call == \"refresh\":\n for file_name in bad_files:\n os.remove(os.path.join(UPLOAD_FOLDER, file_name))\n delete_file = db.session.query(Files).filter_by(phage_id=phage_id).filter_by(name=file_name).first()\n db.session.delete(delete_file)\n response_object[\"bad_files\"] = bad_files\n response_object[\"file_names\"] = file_names\n response_object[\"file_sizes\"] = file_sizes\n response_object[\"file_mods\"] = file_mods\n response_object[\"in_process\"] = False\n response_object[\"position\"] = -1\n response_object[\"result\"] = \"not complete\"\n response_object[\"blast_input_complete\"] = False\n task = db.session.query(Tasks).filter_by(phage_id=phage_id).filter_by(function=\"auto_annotate\").first()\n if (task is not None):\n curr_tasks = db.session.query(Tasks).filter_by(complete=False).order_by(Tasks.time)\n counter = 0\n for curr_task in curr_tasks:\n if curr_task.phage_id == phage_id:\n response_object[\"position\"] = counter\n if curr_task.result == \"executing\":\n response_object[\"position\"] = 0\n break\n counter += 1\n response_object[\"in_process\"] = True\n if (task.complete):\n response_object[\"in_process\"] = False\n response_object[\"result\"] = task.result\n db.session.delete(task)\n db.session.commit()\n task = db.session.query(Tasks).filter_by(phage_id=phage_id).filter_by(function=\"blast_input\").first()\n if (task and task.complete):\n response_object[\"blast_input_complete\"] = True\n response_object[\"num_files\"] = task.result\n db.session.delete(task)\n db.session.commit()\n return response_object\n\ndef delete_blast_output(phage_id, UPLOAD_FOLDER, file_path):\n \"\"\"Removes a file from the upload directory given the file path.\n \n Args:\n phage_id:\n The ID of the current user.\n UPLOAD_FOLDER:\n The folder containing all the uploaded files.\n file_path:\n The path of the file to be removed.\n\n Returns:\n A dictionary containing a success message.\n \"\"\"\n response_object = {}\n try:\n os.remove(os.path.join(UPLOAD_FOLDER, file_path))\n delete_file = db.session.query(Files).filter_by(phage_id=phage_id).filter_by(name=file_path).first()\n db.session.delete(delete_file)\n response_object[\"status\"] = \"success\"\n db.session.query(Blast_Results).filter_by(phage_id=phage_id).delete()\n try:\n db.session.commit()\n except:\n print(\"error in clearing table\")\n except:\n print(\"error in deleting \" + file_path)\n response_object[\"status\"] = \"error in deleting \" + file_path\n\n return response_object\n\ndef delete_all_blast(phage_id, UPLOAD_FOLDER):\n \"\"\"Deletes all data associated with the BLAST results.\n\n Args:\n phage_id:\n The ID of the current user.\n UPLOAD_FOLDER:\n The folder containing all the uploaded files.\n \"\"\"\n if db.session.query(Tasks).filter_by(phage_id=phage_id).filter_by(function=\"parse_blast\").first() is None:\n db.session.query(Blast_Results).filter_by(phage_id=phage_id).delete()\n db.session.query(Files).filter_by(phage_id=phage_id).delete()\n db.session.commit()\n for filename in os.listdir(UPLOAD_FOLDER):\n if filename.endswith('.json'):\n os.remove(os.path.join(UPLOAD_FOLDER, filename))\n return \"success\"\n else:\n return \"fail\"\n\ndef new_file(phage_id, file_path, file_method):\n \"\"\"Adds the new file information to database.\n\n Args:\n phage_id:\n The ID of the current user.\n \"\"\"\n index = file_path.find(\".json\")\n name = secure_filename(file_path[0:index + 5])\n size = file_path[index + 5:]\n file_data = Files(phage_id=phage_id,\n name=name,\n date=file_method,\n size=size,\n complete=False)\n try:\n db.session.add(file_data)\n db.session.commit()\n except:\n return \"already added\"\n return \"success\"\n\ndef add_annotation_task(phage_id, UPLOAD_FOLDER):\n \"\"\"Adds task to database to be executed.\n\n Args:\n phage_id:\n The ID of the current user.\n UPLOAD_FOLDER:\n The folder containing all the uploaded files.\n \"\"\"\n args = UPLOAD_FOLDER + \" \" + phage_id\n task = Tasks(phage_id=phage_id,\n function=\"auto_annotate\",\n arguments=args,\n complete=False,\n result=\"waiting\",\n time=str(datetime.now()))\n try:\n db.session.add(task)\n db.session.commit()\n except:\n return \"Error in adding task to queue\"\n return \"success\"\n\ndef add_blast_input_task(UPLOAD_FOLDER, phage_id):\n \"\"\"Adds task to database to be executed.\n\n Args:\n phage_id:\n The ID of the current user.\n UPLOAD_FOLDER:\n The folder containing all the uploaded files.\n \"\"\"\n args = UPLOAD_FOLDER + \" \" + phage_id\n task = Tasks(phage_id=phage_id,\n function=\"blast_input\",\n arguments=args,\n complete=False,\n result=\"waiting\",\n time='0' + str(datetime.now()))\n try:\n db.session.add(task)\n db.session.commit()\n except:\n return \"Error in adding task to queue\"\n return \"success\"\n\n\ndef get_num_blast_files(phage_id):\n \"\"\"Gets the number of Blast input files in the zip folder.\n\n Args:\n phage_id:\n The ID of the current user.\n \n Returns:\n A string containing the number of Blast files or 'None' if not found.\n \"\"\"\n for filename in os.listdir(os.path.join(ROOT, 'users', phage_id)):\n if filename.endswith('.zip'):\n with closing(ZipFile(os.path.join(ROOT, 'users', phage_id, filename))) as archive:\n num_blast_files = len(archive.infolist())\n return str(num_blast_files)\n return \"None\"","sub_path":"back-end/blast.py","file_name":"blast.py","file_ext":"py","file_size_in_byte":11081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"558864416","text":"def init_game(players, N):\n for player_idx in range(0, N):\n players[player_idx] = 'Alive'\n\ndef get_next_idx(index, N, I, players):\n sum = 0\n index = index+1\n while True:\n if index >= N:\n index = index-N\n if players[index] == 'Alive':\n sum += 1\n if sum == I:\n return index\n index += 1\n\ndef get_good_idx(players):\n for i, pl in enumerate(players):\n if pl == 'Alive':\n return i+1\n\ndef has_to_stop(players):\n stop = False\n alives = 0\n for pl in players:\n if pl == 'Alive':\n alives += 1\n if alives == 1:\n stop = True\n return stop\n\ndef hot_potato(N, I, players):\n idx = I-1\n loop = 1\n while True:\n if loop == 1:\n players[idx] = 'Dead'\n loop += 1\n continue\n\n idx = get_next_idx(idx, N, I, players)\n players[idx] = 'Dead'\n\n if has_to_stop(players):\n return\n\nN = 7\nI = 3\nassert I <= N\n\nplayers = list(range(N))\n\ninit_game(players, N)\nprint(f'Players: {players}\\n')\n\nhot_potato(N, I, players)\nprint(f'Alive Player: {players}\\n')\n\ngoodIdx = get_good_idx(players)\nprint(f'Position to win: {goodIdx}')\n","sub_path":"Algorithm/hot_potato/hot_potato.py","file_name":"hot_potato.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606690484","text":"import tweepy\nfrom tweepy.streaming import StreamListener\n\n#Consumer Keys used, which are no longer valid, and cannot be used\nconsumerKey=\"\"\nconsumerSecret=\"\"\naccessToken=\"\"\naccessSecret=\"\"\n#Authenticate will connnect to twitter to verify the API using the tweepy library (followed from tutorial)\n#link to source: https://tweepy.readthedocs.io/en/v3.5.0/auth_tutorial.html\n\ndef authenticate(consumerKey,consumerSecret,accessToken,accessSecret):\n auth = tweepy.OAuthHandler(consumerKey, consumerSecret)\n auth.set_access_token(accessToken, accessSecret)\n api = tweepy.API(auth)\n try:\n redirect_url = auth.get_authorization_url()\n except tweepy.TweepError:\n print('Error! Failed to get request token.')\n auth.access_token=accessToken\n auth.access_token_secret=accessSecret\n api = tweepy.API(auth)\n #get the list of favorites from my account using the API\n timeline = tweepy.Cursor(api.favorites).items()\n #Loop to destroy tweets\n #Referenced from here: http://www.mathewinkson.com/2015/03/delete-old-tweets-selectively-using-python-and-tweepy\n\n for tweet in timeline:\n api.destroy_favorite(tweet.id)\n #Call the Function \nauthenticate(consumerKey,consumerSecret,accessToken,accessSecret)\n ","sub_path":"boddy.py","file_name":"boddy.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"400721886","text":"public_key = '' #账户公钥\nprivate_key = '' #账户私钥\n\nbucket = '' #空间名称\n\nfrom ufile import filemanager\n\nlistobjects_hander = filemanager.FileManager(public_key, private_key)\n\nprefix='' #以prefix作为前缀的目录文件列表\nmaxkeys=100 #指定返回目录文件列表的最大数量,默认值为100,不超过1000\nmarker='' #返回以字母排序后,大于marker的目录文件列表\ndelimiter='/' #delimiter是目录分隔符,当前只支持\"/\"和\"\",当Delimiter设置为\"/\"且prefiex以\"/\"结尾时,返回prefix目录下的子文件,当delimiter设置为\"\"时,返回以prefix作为前缀的文件\n\n# 普通使用(一次查询即可得到所有结果)\ndef once_list():\n ret, resp = listobjects_hander.listobjects(bucket, prefix=prefix, maxkeys=maxkeys, marker=marker, delimiter=delimiter)\n assert resp.status_code == 200\n\n for object in ret['Contents']:#子文件列表\n print(object)\n\n for object in ret['CommonPrefixes']:#子目录列表\n print(object)\n\n# 因为一次查询返回数量存在最大限制,所以若一次查询无法获得所有结果,则根据返回值'NextMarker'循环遍历获得所有结果\ndef loop_list():\n while True:\n ret, resp = listobjects_hander.listobjects(bucket, prefix=prefix, maxkeys=maxkeys, marker=marker, delimiter=delimiter)\n assert resp.status_code == 200\n\n for object in ret['Contents']:#子文件列表\n print(object)\n\n for object in ret['CommonPrefixes']:#子目录列表\n print(object)\n \n marker = ret['NextMarker']\n if len(marker) <= 0 or maxkeys < len(ret['Contents']):\n break","sub_path":"examples/example_listobjects.py","file_name":"example_listobjects.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}