diff --git "a/3220.jsonl" "b/3220.jsonl"
new file mode 100644--- /dev/null
+++ "b/3220.jsonl"
@@ -0,0 +1,659 @@
+{"seq_id":"134496848","text":"import time\nfrom bitalino import BITalino\n\nbit_info = {\n \"macAddress\": \"98:D3:41:FD:50:0C\",\n \"version\": [0],\n \"batteryThreshold\": 0, # corresponds to 3.4 V\n \"acqChannels\": [0, 1, 2, 3, 4, 5],\n \"fs\": 1000,\n \"nSamples\": 1000,\n}\n\n# This example will collect data for 5 sec.\nrunning_time = 6\n\n# setup device\ndevice = BITalino(bit_info['macAddress']) # Connect to BITalino\ndevice.battery(bit_info['batteryThreshold']) # Set battery threshold\nbit_info['version'] = device.version() # Read BITalino versio\ndevice.start(bit_info['fs'], bit_info['acqChannels']) # Start Acquisition\n\n# state = device.state()\n# print(state)\n\n\nstart = time.time()\nend = time.time()\ndevice.trigger([1, 1])\n\nwhile (end - start) < running_time:\n # Read samples\n data = (device.read(bit_info['nSamples']))\n print(data)\n end = time.time()\n # Turn BITalino led on\n\ndevice.trigger([0, 0])\n\n# Stop acquisition\ndevice.stop()\n\n# Close connection\ndevice.close()\n","sub_path":"Python/Bitalino/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"388769383","text":"def Fibonacci(n):\r\n print('-'*30)\r\n print('Fibonnaci')\r\n print('-'*30)\r\n t1 = 0\r\n t2 = 1\r\n print(f'{t1} - {t2}', end='')\r\n cont = 3\r\n while cont <= n:\r\n t3 = t1 + t2\r\n print(f' - {t3}', end='')\r\n t1 = t2\r\n t2 = t3\r\n cont += 1\r\n\r\nn1 = int(input('Input number: '))\r\n\r\nFibonacci(n1)\r\n","sub_path":"Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"619352250","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/////////////////////////////////////////////////////////////////////////////\n//\n// Project/Path: %M%\n// Last Change Set: %L% (%G% %U%)\n//\n/////////////////////////////////////////////////////////////////////////////\n//\n// COPYRIGHT Vusion Technologies, all rights reserved.\n//\n// No part of this software may be reproduced or modified in any\n// form or by any means - electronic, mechanical, photocopying,\n// recording, or otherwise - without the prior written consent of\n// Vusion Technologies.\n//\n\n@author: jacky.chow\n\"\"\"\n\ndef warn(*args, **kwargs):\n pass\nimport warnings\nwarnings.warn = warn\n\n\nimport numpy as np\nfrom sklearn.externals import joblib\n\ninputFilename = '/home/jckchow/BundleAdjustment/build/image.jck'\n\nprint (\"-----------Copy previous Decision Tree Model to current decision tree model-----------\")\n\n##########################################\n### read in the residuals output from bundle adjustment\n# x, y, v_x, v_y, redu_x, redu_y, vStdDev_x, vStdDev_y\n##########################################\nimage = np.genfromtxt(inputFilename, delimiter=' ', skip_header=0, usecols = (2,3,4,5,6,7,8,9,10))\n\nsensorsUnique = np.unique(image[:,0])\n##########################################\n### Try to load preprocessing and ML model if it exists from previous iteration to make a copy for when the iterations end before the max iterations then we should use the previous model instead\n##########################################\ntry:\n for iter in range(0,len(sensorsUnique)):\n sensorID = sensorsUnique[iter] #currently sensor ID\n # load the preprocessing info\n [min_x, min_y, max_x, max_y, desire_min, desire_max, mean_label] = joblib.load('/home/jckchow/BundleAdjustment/build/decisionTreePreprocessing'+str(sensorID.astype(int))+'Temp.pkl')\n # load the learned NN model\n reg = joblib.load('/home/jckchow/BundleAdjustment/build/decisionTreeModel'+str(sensorID.astype(int))+'Temp.pkl')\n # save copy of previous preprocessing\n joblib.dump([min_x, min_y, max_x, max_y, desire_min, desire_max, mean_label], '/home/jckchow/BundleAdjustment/build/decisionTreeePreprocessing'+str(sensorID.astype(int))+'.pkl')\n # save the previously learned NN model\n joblib.dump(reg, '/home/jckchow/BundleAdjustment/build/decisionTreeModel'+str(sensorID.astype(int))+'.pkl')\n print('Found previous ML preprocessing and model found, done copying decisionTreeModel'+str(sensorID.astype(int))+'Temp.pkl to decisionTreeModel'+str(sensorID.astype(int))+'.pkl')\nexcept:\n print('No previous ML preprocessing and model found, not copying/renaming')","sub_path":"python/decisionTreeRename.py","file_name":"decisionTreeRename.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"139455745","text":"##\n# @brief Calculate and print year-1 revenue, cost, and profit from US and EU\n#\n# @param gamers Number of gamers in millions\n# @param nongamers Number of non-gamers in millions\n# @param gamer_conversion Fraction of gamers that converts to Stadia\n# @param nongamer_conversion Fraction of non-gamers that converts to Stadia\n#\n# @return \ndef run_year1(gamers, nongamers, gamer_conversion, nongamer_conversion):\n # Calculate total users\n users = gamers * gamer_conversion + nongamers * nongamer_conversion\n print(\"----------------------------------------\")\n print(\"Num users: {}M\".format(users))\n\n # \n rev_youtube = 1.2 * .25 * 20 * 0.06 * 365\n\n # Revenue from royalties = #users * $15 per game * two games per year\n rev_royalty = users * 15 * 2\n\n # Revenue from subscriptions: Assume 2/3rd users pay for subscription.\n # Revenue per subscription is $10 per month * 12 months per year\n rev_subscriptions = users * (2./3) * 10 * 12\n\n # Total revenue in year one\n rev_total = rev_youtube + rev_royalty + rev_subscriptions\n\n # Cost of providing streaming = users * 8 hours per week * 52 weeks per year\n # * 3 cents per hour of streaming\n cost_streaming = users * 8 * 52 * 0.03\n\n # Cost of datacenters: Assume each user plays for 8 hours per week. Each\n # server runs for 168 hours per week (24 * 7). Number of servers is\n # therefore (users * 8 / 168). Each server costs $500.\n cost_computing_fixed = users * 8 / 168. * 500\n\n # Variable cost of computing: Number of servers times XXX?\n cost_computing_var = users * 8 / 168. * 168 * 2 * 0.01\n\n # Marketing cost\n cost_marketing = 88.34 + 1.1\n\n cost_total = cost_streaming + cost_computing_fixed + cost_computing_var + cost_marketing\n print(\"----------------------------------------\")\n\n print(\"Rev YouTube: {}M\".format(round(rev_youtube, 2)))\n print(\"Rev Royalty {}M\".format(round(rev_royalty, 2)))\n print(\"Rev Subscriptions {}M\".format(round(rev_subscriptions, 2)))\n print(\"Cost Streaming {}M\".format(round(cost_streaming, 2)))\n print(\"Cost Computing fixed {}M\".format(round(cost_computing_fixed, 2)))\n print(\"Cost Computing var {}M\".format(round(cost_computing_var, 2)))\n print(\"Cost Marketing {}M\".format(round(cost_marketing, 2)))\n\n print(\"----------------------------------------\")\n\n print(\"Cost: {}M\".format(round(cost_total, 2)))\n print(\"Revenue: {}M\".format(round(rev_total, 2)))\n print(\"Profit: {}M\".format(round(rev_total - cost_total, 2)))\n\n##\n# @brief Calculate and print cost and revenue in the APAC region\n#\n# @param num_asia_users: Number of users in the APAC region\ndef run_asia(num_asia_users):\n users = num_asia_users\n rev_youtube = 1.2 * .25 * 20 * 0.06 * 365\n rev_royalty = users * 15 * 2\n rev_subscriptions = users / 2. * 10 * 12\n\n rev_total = rev_youtube + rev_royalty + rev_subscriptions\n\n cost_streaming = users * 8 * 52 * 0.03\n cost_computing_fixed = users * 8 / 168. * 500\n cost_computing_var = users * 8 / 168. * 168 * 2 * 0.01\n cost_marketing = 88.34 + 1.1\n cost_marketing_asia = 110.\n\n cost_total = cost_streaming + cost_computing_fixed + cost_computing_var + cost_marketing + cost_marketing_asia\n\n print(\"====================================================\")\n\n print(\"Asia Cost: {}M\".format(round(cost_total, 2)))\n print(\"Asia Revenue: {}M\".format(round(rev_total, 2)))\n print(\"Asia Profit: {}M\".format(round(rev_total - cost_total, 2)))\n\ndef run_googlefi(num_googlefi):\n\n rev_googlefi = 4 * 52 * 0.5\n rev_total = rev_googlefi * 0.2 # 20% profit margins\n cost_total = 0\n\n print(\"-------------------------------------------------\")\n\n print(\"Mobile Cost: {}M\".format(round(cost_total, 2)))\n print(\"Mobile Revenue: {}M\".format(round(rev_total, 2)))\n print(\"Mobile Profit: {}M\".format(round(rev_total - cost_total, 2)))\n\ndef run_nextgen(gamers, nongamers, gamer_conversion, nongamer_conversion, num_asia_users):\n users = gamers * gamer_conversion + nongamers * nongamer_conversion + num_asia_users\n print(\"Num users: {}M\".format(users))\n\n rev_youtube = 1.2 * .25 * 20 * 0.06 * 365\n rev_royalty = users * 15 * 2\n rev_subscriptions = users * (2./3) * 10 * 12\n\n rev_total = rev_youtube + rev_royalty + rev_subscriptions\n\n cost_streaming = users * 8 * 52 * 0.03\n cost_computing_fixed = users * 8 / 168. * 500\n cost_computing_var = users * 8 / 168. * 168 * 2 * 0.01\n cost_marketing = 88.34 + 1.1\n\n cost_total = cost_streaming + cost_computing_fixed + cost_computing_var + cost_marketing\n\n print(\"----------------------------------------\")\n\n print(\"VR Cost: {}M\".format(round(cost_total, 2)))\n print(\"VR Revenue: {}M\".format(round(rev_total, 2)))\n print(\"VR Profit: {}M\".format(round(rev_total - cost_total, 2)))\n\nif __name__ == \"__main__\":\n\n # Assuming total population in NAC and EMEA is 700 M\n gamers = 400. * 0.3\n nongamers = 300. * 0.15\n gamer_conversion = 0.05 #2./6\n nongamer_conversion = 0.08 #2./5\n num_asia_users = 14.\n print(\"\\n**********YEAR1**************\\n\")\n run_year1(gamers, nongamers, gamer_conversion, nongamer_conversion)\n print(\"\\n**********ASIA**************\\n\")\n run_asia(num_asia_users)\n print(\"\\n**********MOBILE**************\\n\")\n run_googlefi(4)\n print(\"\\n**********VR**************\\n\")\n\n vr_gamer_conversion = gamer_conversion * .24\n vr_nongamer_conversion = nongamer_conversion * .24\n run_nextgen(gamers, nongamers, vr_gamer_conversion, vr_nongamer_conversion, num_asia_users * .24)\n\n","sub_path":"files/inspectorgraduates_financials.py","file_name":"inspectorgraduates_financials.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"281712902","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom tests.lighthouse_base import run_test as lighthouse_base_run_test\nimport config\nfrom tests.utils import *\nimport gettext\n\n# DEFAULTS\ngooglePageSpeedApiKey = config.googlePageSpeedApiKey\nreview_show_improvements_only = config.review_show_improvements_only\nlighthouse_use_api = config.lighthouse_use_api\n\n\ndef run_test(_, langCode, url, strategy='mobile', category='accessibility'):\n\n language = gettext.translation(\n 'a11y_lighthouse', localedir='locales', languages=[langCode])\n language.install()\n _local = language.gettext\n\n print(_local('TEXT_RUNNING_TEST'))\n\n print(_('TEXT_TEST_START').format(\n datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\n test_result = lighthouse_base_run_test(\n _, langCode, url, googlePageSpeedApiKey, strategy, category, review_show_improvements_only, lighthouse_use_api)\n rating = test_result[0]\n test_return_dict = test_result[1]\n\n review = rating.overall_review\n points = rating.get_overall()\n if points >= 5.0:\n review = _local(\"TEXT_REVIEW_A11Y_VERY_GOOD\")\n elif points >= 4.0:\n review = _local(\"TEXT_REVIEW_A11Y_IS_GOOD\")\n elif points >= 3.0:\n review = _local(\"TEXT_REVIEW_A11Y_IS_OK\")\n elif points > 1.0:\n review = _local(\"TEXT_REVIEW_A11Y_IS_BAD\")\n elif points <= 1.0:\n review = _local(\"TEXT_REVIEW_A11Y_IS_VERY_BAD\")\n\n rating.overall_review = review\n\n print(_('TEXT_TEST_END').format(\n datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\n return (rating, test_return_dict)\n","sub_path":"tests/a11y_lighthouse.py","file_name":"a11y_lighthouse.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"20543678","text":"import tensorflow as tf\nimport numpy as np\nimport os\nfrom PIL import Image\nimport sys\nTFRCORD_FILE_DIR = 'tfrecord/'#tf文件目录\nIMAGE_DIR = '../iphoto/' #图像所在\nLABEL_DIR = '../itext/' #标签所在\nNUM_TEST = 200 #测试集大小\nfilePath = \"../txt/\" #所有的汉字的目录文件\ndef _gen_dict():\n dic = []\n for filename in os.listdir(filePath):\n if not filename.startswith(\".\"):\n with open(filePath + filename, encoding='utf-8') as file:\n for line in file.readlines():\n content = line.split(\",\")[8]\n if \"###\" not in content:\n for c in content:\n if c != '\\n':\n dic.append(c)\n for c in [chr(x) for x in range(33, 127)]:\n dic.append(c)\n d = list(set(dic))\n with open(\"tf_dictset.txt\", 'w+', encoding='utf-8') as out: #所有汉字集中存放文件\n for c in d:\n out.write(c + '\\n')\n return d\n\n\ndef _tfrecord_exist(check_dir):\n for type in ['train','test']:\n path_name = os.path.join(check_dir,type+'.tfrecords')\n if tf.gfile.Exists(path_name):\n return True\n return False\n\ndef _get_paths_image_or_label(root_dir):\n path_list =[]\n for filename in os.listdir(root_dir):\n dir = os.path.join(root_dir,filename)\n path_list.append(dir)\n return path_list\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\ndef _int64_feature(value):\n if not isinstance(value,(tuple,list)):\n value=[value]\n return tf.train.Feature(int64_list = tf.train.Int64List(value=value))\ndef _gen_example_by_data(image_data,label_data):\n return tf.train.Example(features=tf.train.Features(feature = {\n 'image':_bytes_feature(np.array(image_data).tobytes()),\n 'image_h':_int64_feature(np.shape(image_data)[0]),\n 'image_w':_int64_feature(np.shape(image_data)[1]),\n 'image_channel':_int64_feature(np.shape(image_data)[2]),\n 'label':_int64_feature(label_data)\n }))\n\ndef _gen_tfrecords(type,image_list,label_list):\n assert type in ['train','test']\n dic = _gen_dict()\n with tf.Session() as sess:\n tfrecord_path_name = os.path.join(TFRCORD_FILE_DIR,type+'.tfrecords')\n with tf.python_io.TFRecordWriter(tfrecord_path_name) as tfile:\n for i,image_name in zip(range(len(image_list)),image_list):\n try:\n label_name = '../itext/'+image_name.split('/')[-1].split('.')[0]+'.txt' #一个照片对应相应的label\n print(image_name,label_name)\n sys.stdout.write('\\n>已经保存:%d/%d' % (i+1,len(image_list)))\n sys.stdout.flush()\n image = Image.open(image_name)\n #---\n f = open(label_name, 'r', encoding='utf-8')\n text = f.readline().strip()\n label_data = [dic.index(x) + 1 for x in text]\n f.close()\n #---\n\n example = _gen_example_by_data(image,label_data)\n tfile.write(example.SerializeToString())\n except:\n print('转化tfrecords文件出错')\n tfile.close()\n return\n tfile.close()\n\n\nif __name__ =='__main__':\n if _tfrecord_exist(TFRCORD_FILE_DIR):\n print('tfrecord file exist!')\n else:\n\n #获取所有图片、标签的路径\n all_image_paths= _get_paths_image_or_label(IMAGE_DIR)\n all_label_paths= _get_paths_image_or_label(LABEL_DIR)\n\n #划分训练集和测试集\n train_image_paths_list=all_image_paths[NUM_TEST:]\n test_image_paths_list=all_image_paths[:NUM_TEST]\n\n train_label_paths_list=all_label_paths[NUM_TEST:]\n test_label_paths_list=all_label_paths[:NUM_TEST]\n\n #生成训练集的TF文件\n\n _gen_tfrecords('train',train_image_paths_list,train_label_paths_list)\n #生成测试集的TF文件\n _gen_tfrecords('test',test_image_paths_list,test_label_paths_list)\n\n print('\\n完毕!')\n\n\n\n\n\n\n\n\n\n","sub_path":"tensorflow/2018_6_10/mycode/tensorflow_code/towindows/learn_tfrecord_file/save_to_tfrecord.py","file_name":"save_to_tfrecord.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"67623199","text":"\n\nclass EasyDict(dict):\n def get(self, *keys, **kwds):\n for key in keys:\n if key in self:\n return self[key]\n\n for key in keys:\n k = key.strip().lower()\n for ik, iv in self.items():\n if ik.lower() == k:\n return iv\n\n return kwds.get(\"default\", None)\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n id = key.strip().lower()\n for k, v in self.items():\n if k.lower() == id:\n return v\n return None\n # raise KeyError(\"The value could not be found: \" + str(key))\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __delattr__(self, key):\n try:\n del self[key]\n except KeyError:\n pass\n\n def __repr__(self):\n return \"<\" + self.__class__.__name__ + \" \" + dict.__repr__(self) + \">\"\n\n def __str__(self):\n return self.__repr__()\n\n @classmethod\n def create(cls, *args, **kwargs):\n instance = cls()\n for arg in args:\n if isinstance(arg, dict):\n for k, v in arg.items():\n instance[k] = v\n for k, v in kwargs.items():\n instance[k] = v\n\n return instance","sub_path":"fuze/util/structs/dicts.py","file_name":"dicts.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"20247379","text":"#Jack Robey\n#11/30/17\n#warmup15.py - doubles the numbers in a list\n\ndef doubled(L):\n L2 = []\n for i in L:\n L2.append(2*i)\n return L2\n\nprint(doubled([2,4,6]))\n \n","sub_path":"warmup15.py","file_name":"warmup15.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"643751246","text":"from typing import Sequence\n\nimport numpy as np\n\nfrom facet.simulation.partition import (\n ContinuousRangePartitioner,\n IntegerRangePartitioner,\n)\n\n\ndef test_discrete_partitioning() -> None:\n for i in range(0, 10000):\n values: Sequence[int] = np.random.randint(\n low=0, high=10000, size=max(int(np.random.rand() * 1000), 3)\n )\n # noinspection PyTypeChecker\n dvp = IntegerRangePartitioner(\n max_partitions=IntegerRangePartitioner.DEFAULT_MAX_PARTITIONS\n ).fit(values=values)\n # test correct number of partitions\n assert len(dvp.partitions_) <= IntegerRangePartitioner.DEFAULT_MAX_PARTITIONS\n\n\ndef test_continuous_partitioning() -> None:\n for i in range(0, 10000):\n values = (\n np.random.randint(\n low=0, high=10000, size=max(int(np.random.rand() * 1000), 3)\n )\n * np.random.rand()\n )\n cvp = ContinuousRangePartitioner(\n max_partitions=ContinuousRangePartitioner.DEFAULT_MAX_PARTITIONS\n ).fit(values=values)\n # test correct number of partitions\n assert len(cvp.partitions_) <= ContinuousRangePartitioner.DEFAULT_MAX_PARTITIONS\n","sub_path":"test/test/facet/test_partition.py","file_name":"test_partition.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"578558586","text":"# for cnn\nfrom sensor_msgs.msg import Image\nimport sys, cv2\nimport tensorflow as tf\nimport scipy as sp\nfrom cv_bridge import CvBridge, CvBridgeError\nimport numpy as np\n\n# path = '/home/centauro/terrain_classifier/vision' #os.getcwd()\npath = '/home/xi/workspace/catkin_centauro/src/fused_terrain_classifier/src/vision' #os.getcwd()\n\nsys.path.append(path + '/segmentation/')\nsys.path.append(path + '/segmentation/datasets/')\nsys.path.append(path + '/segmentation/models')\nsys.path.append(path + '/segmentation/notebooks')\nimport layers\nimport fcn8s\nimport util\nimport cityscapes\nfrom colorize import colorize\nfrom class_mean_iou import class_mean_iou\n\nimage_shape = [1, 256, 512, 3]\nsess = tf.InteractiveSession()\nimage_op = tf.placeholder(tf.float32, shape=image_shape)\n\nlogits_op = fcn8s.inference(image_op)\npredictions_op = layers.predictions(logits_op)\npredictions_op_prob = tf.nn.softmax(logits_op)\n\ninit_op = tf.global_variables_initializer()\nsess.run(init_op)\n\nbridge = CvBridge()\n\nsaver = tf.train.Saver()\nsaver.restore(sess, path + '/tf_models/fcn8s_augment_finetune/' + 'fcn8s_augment.checkpoint-30')\n# prediction_publisher = rospy.Publisher('/prediction_color', Image, queue_size=1)\n\n\ndef predice_image(img_msg):\n global g_kinect_img\n\n feature_vision = np.zeros( [1, 256, 512, 34], dtype=np.float32 )\n #np_arr = np.fromstring(img_msg.data, np.uint8) \n #image = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR) \n image = bridge.imgmsg_to_cv2(img_msg)\n g_kinect_img = image*1\n # print(' image shape recieved:', image.shape)\n # image = bridge.imgmsg_to_cv2(img_msg)\n\n image = sp.misc.imresize(image, image_shape[1:], interp='bilinear')\n\n image = image[..., ::-1] # bgr to rgb\n\n image = (image - image.mean()) / image.std()\n \n feed_dict = {image_op: image[np.newaxis, ...]}\n \n prediction_label = sess.run(predictions_op, feed_dict=feed_dict)\n feature_vision = sess.run(predictions_op_prob, feed_dict=feed_dict)\n\n# pickle.dump(prediction_prob, open(\"/home/xi/workspace/labels/prob.p\", \"wb\"))\n prediction_label = colorize(prediction_label, cityscapes.augmented_labels)\n # image_message = bridge.cv2_to_imgmsg(prediction_label)\n # label_pub.publish(image_message)\n\n #cv2.imshow(\"prediction_label\", prediction_label)\n #cv2.waitKey(0)\n\n # prediction_label = prediction_label[..., ::-1] # rgb to bgr\n # prediction_publisher.publish(bridge.cv2_to_imgmsg(prediction_label))\n\n # print(' CNN feature done')\n return feature_vision\n","sub_path":"tool_functions/cnn_feature_extractor.py","file_name":"cnn_feature_extractor.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"320360202","text":"import zstackwoodpecker.test_state as ts_header\nimport os\nTestAction = ts_header.TestAction\ndef path():\n\n return dict(initial_formation=\"template5\", path_list=[\n [TestAction.create_mini_vm, \"vm1\", 'data_volume=false', 'cpu=random', 'memory=random', 'network=random', 'provisiong=thick'],\n [TestAction.change_vm_ha, \"vm1\"],\n [TestAction.create_vm_backup, \"vm1\", \"backup1\"],\n [TestAction.create_mini_vm, \"vm2\", 'data_volume=false', 'cpu=random', 'memory=random', 'network=random',\n 'provisiong=thick'],\n [TestAction.create_volume, \"volume1\", \"=scsi,thick\"],\n [TestAction.resize_data_volume, \"volume1\", 5 * 1024 * 1024],\n [TestAction.create_volume, \"volume2\", \"flag=scsi,thick\", \"size=random\"],\n [TestAction.create_mini_vm, \"vm3\", 'data_volume=false', 'cpu=random', 'memory=random', 'network=random',\n 'provisiong=thick'],\n [TestAction.delete_volume, \"volume1\"],\n [TestAction.recover_volume, \"volume1\"],\n [TestAction.add_image, \"image1\", 'root', \"http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2\"],\n [TestAction.attach_volume, \"vm1\", \"volume1\"],\n [TestAction.create_volume_backup, \"volume1\", \"backup1\"],\n [TestAction.change_vm_ha, \"vm1\"],\n [TestAction.stop_vm, \"vm1\"],\n [TestAction.use_volume_backup, \"backup1\"],\n [TestAction.start_vm, \"vm1\"],\n [TestAction.change_vm_ha, \"vm1\"],\n [TestAction.delete_image, \"image1\"],\n [TestAction.expunge_image, \"image1\"],\n [TestAction.create_volume_backup, \"volume1\",\"backup2\"],\n [TestAction.change_vm_ha, \"vm1\"],\n [TestAction.resize_volume, \"vm1\", 5 * 1024 * 1024],\n [TestAction.resize_volume, \"vm2\", 5 * 1024 * 1024],\n [TestAction.resize_volume, \"vm3\", 5 * 1024 * 1024],\n [TestAction.attach_volume, \"vm2\", \"volume2\"],\n [TestAction.create_volume, \"volume3\", \"flag=scsi,thick\"],\n [TestAction.create_mini_vm, \"vm4\", 'data_volume=false', 'cpu=random', 'memory=random', 'network=random', 'provisiong=thick'],\n [TestAction.add_image, \"image2\", 'root', \"http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2\"],\n [TestAction.delete_volume, \"volume2\"],\n [TestAction.expunge_volume, \"volume2\"],\n [TestAction.change_vm_ha, \"vm1\"],\n [TestAction.change_vm_ha, \"vm2\"],\n [TestAction.change_vm_ha, \"vm3\"],\n [TestAction.change_vm_ha, \"vm4\"],\n [TestAction.create_vm_backup, \"vm1\", \"backup3\"],\n [TestAction.create_vm_backup, \"vm2\", \"backup4\"],\n [TestAction.create_vm_backup, \"vm3\", \"backup5\"],\n [TestAction.create_vm_backup, \"vm4\", \"backup6\"],\n [TestAction.create_image_from_volume, \"vm1\", \"vm1-iamge1\"],\n [TestAction.create_image_from_volume, \"vm2\", \"vm2-iamge1\"],\n [TestAction.create_image_from_volume, \"vm3\", \"vm3-iamge1\"],\n [TestAction.create_image_from_volume, \"vm4\", \"vm4-iamge1\"],\n [TestAction.delete_image, \"vm1-image1\"],\n [TestAction.recover_image, \"vm1-image1\"],\n [TestAction.change_vm_ha, \"vm1\"],\n [TestAction.change_vm_ha, \"vm2\"],\n [TestAction.change_vm_ha, \"vm3\"],\n [TestAction.change_vm_ha, \"vm4\"],\n [TestAction.stop_vm, \"vm1\"],\n [TestAction.stop_vm, \"vm2\"],\n [TestAction.stop_vm, \"vm3\"],\n [TestAction.stop_vm, \"vm4\"],\n [TestAction.use_vm_backup, \"backup3\"],\n [TestAction.use_vm_backup, \"backup4\"],\n [TestAction.use_vm_backup, \"backup5\"],\n [TestAction.use_vm_backup, \"backup6\"],\n [TestAction.start_vm, \"vm1\"],\n [TestAction.start_vm, \"vm2\"],\n [TestAction.start_vm, \"vm3\"],\n [TestAction.start_vm, \"vm4\"],\n [TestAction.change_vm_ha, \"vm1\"],\n [TestAction.change_vm_ha, \"vm2\"],\n [TestAction.change_vm_ha, \"vm3\"],\n [TestAction.change_vm_ha, \"vm4\"],\n [TestAction.add_image, \"image3\", 'root', \"http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2\"],\n [TestAction.create_volume, \"volume4\", \"=scsi,thick\"],\n [TestAction.attach_volume, \"vm4\", \"volume4\"]])","sub_path":"integrationtest/vm/mini/paths/path17.py","file_name":"path17.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"204845937","text":"#with open(\"lstm_attention.py\") as fp:\n# for i, line in enumerate(fp):\n# if \"\\xe2\" in line:\n# print i, repr(line)\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg') \nimport matplotlib.pyplot as plt\n\ntrain_loss = [1,2,3]\nval_loss = [4,5,6]\n\nplotdict = dict()\nplotdict[\"train_loss\"] = train_loss\nplotdict[\"val_loss\"] = val_loss\n\nlabels = [\"train_loss\", \"val_loss\"]\n\nfig, ax = plt.subplots()\nfor label in labels:\n ax.plot(np.arange(3), plotdict[label], label= label)\nax.legend()\nplt.show(block = False)\n","sub_path":"models/lstm_attention_debug.py","file_name":"lstm_attention_debug.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"312086386","text":"f = open('B-large.in', 'r')\r\nf2 = open('output', 'w')\r\nnumCases = int(f.readline())\r\nfor c in range(0, numCases):\r\n n = int(f.readline())\r\n dic = {}\r\n miss = []\r\n for y in range(0, 2*n - 1):\r\n line = f.readline().split()\r\n for x in range(0, len(line)):\r\n try:\r\n dic[line[x]] += 1\r\n except:\r\n dic[line[x]] = 1\r\n for key, value in dic.iteritems():\r\n if(value % 2 == 1):\r\n miss.append(int(key))\r\n answer = \"Case #\" + str(c + 1) + \": \"\r\n for item in sorted(miss):\r\n answer += str(item) + \" \"\r\n f2.write(answer + \"\\n\")","sub_path":"solutions_5630113748090880_1/Python/jaday/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"499502153","text":"\n# Non-Recursion\nclass Solution:\n \"\"\"\n @param nums: A list of Integers.\n @return: A list of permutations.\n \"\"\"\n def permute(self, nums):\n if nums is None:\n return []\n if nums == []:\n return [[]]\n nums = sorted(nums)\n permutation = []\n stack = [-1]\n permutations = []\n while len(stack):\n index = stack.pop()\n index += 1\n while index < len(nums):\n if nums[index] not in permutation:\n break\n index += 1\n else:\n if len(permutation):\n permutation.pop()\n continue\n\n stack.append(index)\n stack.append(-1)\n permutation.append(nums[index])\n if len(permutation) == len(nums):\n permutations.append(list(permutation))\n return permutations","sub_path":"J9Ch/src/j_1/python/_15Permutations_NonRecursion.py","file_name":"_15Permutations_NonRecursion.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"596789250","text":"__author__ = [\"Francisco Clavero\"]\n__description__ = \"Histogram of Oriented Gradients (HOG) model.\"\n__email__ = [\"fcoclavero32@gmail.com\"]\n__status__ = \"Prototype\"\n\n\nimport math\n\nimport torch\n\nfrom overrides import overrides\n\nfrom vscvs.decorators import torch_no_grad\nfrom vscvs.models.gradients import SobelX, SobelY\n\n\nclass HOG(torch.nn.Module):\n \"\"\"Model for creating Histogram of Oriented Gradients (HOG) feature vectors for\n the given images. Doesn't need training.\n\n Reference:\n https://www.learnopencv.com/histogram-of-oriented-gradients\n \"\"\"\n\n def __init__(\n self,\n in_channels: int = 3,\n cell_size: int = 8,\n bins: int = 9,\n signed_gradients: bool = False,\n ) -> None:\n \"\"\"Initialize module.\n\n Arguments:\n in_channels:\n The number of channels for inputs.\n cell_size:\n The image will be divided into cells of the specified size, and the\n histogram of gradients is computed for each one. We'll just use square\n cells, so this parameter is received as a singe int indicating both the\n `x` and `y` dimensions of the cell, measured in pixels.\n bins:\n Number of bins for the histogram of each cell.\n signed_gradients:\n Gradients are represented using their angle and magnitude. Angles can\n be expressed using values between `0` and `360` degrees or between `0`\n and `180` degrees. If the latter are used, we call the gradient\n “unsigned” because a gradient and it’s negative are represented by the\n same numbers. Empirically it has been shown that unsigned gradients\n work better than signed gradients for tasks such as pedestrian\n detection.\n \"\"\"\n super().__init__()\n # Set hyperparameters\n self.cell_size = cell_size\n self.bins = bins\n self.signed_gradients = signed_gradients\n # Define constituent layers\n self.sobel_x = SobelX(in_channels=in_channels) # Sobel filtering layer\n self.sobel_y = SobelY(in_channels=in_channels) # Sobel filtering layer\n self.cell_pooling = torch.nn.AvgPool2d(cell_size)\n\n @property\n def angle_range(self) -> float:\n \"\"\"Range of possible gradient angles. Depends on whether signed gradients are\n used.\"\"\"\n return 2 * math.pi if self.signed_gradients else math.pi\n\n def descriptor_length(self, in_dimension: int) -> float:\n \"\"\"Get the length of output descriptors, given the dimensions of the would be\n inputs.\n\n Arguments:\n in_dimension:\n Input image dimensions (assuming square images).\n\n Returns:\n The length of output descriptors.\n \"\"\"\n return int(self.bins * (in_dimension / self.cell_size) ** 2)\n\n @torch_no_grad # we won't need gradients for operations, so we use the `no_grad`..\n @overrides # ..option for better performance.\n def forward(self, batch: torch.Tensor) -> torch.Tensor:\n \"\"\"Perform HOG computation.\n\n Arguments:\n batch:\n Input batch of size `[n_inputs, n_channels, img_height, img_width]`.\n\n Returns:\n The HOG vectors for each element in the batch.\n \"\"\"\n n_inputs, _, input_height, input_width = batch.shape\n\n # First, we need to compute the gradients along both axes.\n gradient_x, gradient_y = self.sobel_x(batch), self.sobel_y(batch)\n grad_magnitudes, grad_angles = (\n torch.sqrt(gradient_x ** 2 + gradient_y ** 2),\n torch.atan2(gradient_x, gradient_y),\n )\n\n # If using signed angles, we phase shift by $pi$ to get only positive numbers\n grad_angles = (\n grad_angles + math.pi if self.signed_gradients else grad_angles.abs()\n )\n\n # Gradient angle linear interpolation: first we divide angles by the maximum\n # angle. This gives us the angle as a fraction $[0, 1]$ of the maximum.\n grad_angle_interpolation = grad_angles / self.angle_range\n\n # We then multiply by `1 - bins` and take the floor, giving us an integer that\n # corresponds to the angle bin the pixel belongs to.\n grad_bins = (grad_angle_interpolation * (self.bins - 1)).floor().long()\n\n # Now we need the histogram for every pixel block. First, we create tensor with\n # a vector for each pixel, containing its gradient magnitude in the index of\n # the pixel's gradient orientation bin.\n out = torch.zeros(\n (n_inputs, self.bins, input_height, input_width),\n dtype=torch.float,\n device=batch.device,\n )\n\n # The scatter function places the magnitude in the corresponding index.\n out.scatter_(1, grad_bins, grad_magnitudes)\n\n # Now we use an average pool with `cell_size` kernel, which gives us the\n # normalized sum of the pixel vectors above, giving us a single vector for\n # each cell with the normalized histogram of orientations.\n hog = self.cell_pooling(out) * self.cell_size ** 2\n\n # Finally we flatten to return the actual feature vector using `start_dim = 1`\n # to return the HOG vector of every image in a batch.\n return hog.flatten(start_dim=1)\n\n @overrides\n def to(self, *args, **kwargs) -> torch.nn.Module:\n \"\"\"Override: move both Sobel filters.\"\"\"\n self.sobel_x, self.sobel_y = (\n self.sobel_x.to(*args, **kwargs),\n self.sobel_y.to(*args, **kwargs),\n )\n return super().to(*args, **kwargs)\n","sub_path":"vscvs/models/hog.py","file_name":"hog.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"162715732","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport psutil\nimport database\nfrom datetime import datetime\n\nimport util\n\nTRACE_LOG = \"tmp/trace.log\"\n\ntids = {}\n\ndef run_trace(trace_time):\n tids.clear()\n\n psutil_version = util.get_psutil_version()\n\n for process in psutil.process_iter():\n try:\n ps = process.as_dict(attrs=['name', 'pid'])\n except psutil.NoSuchProcess:\n pass\n\n accepted = False\n\n if \"qemu-system-x86_64\" in ps[\"name\"]:\n accepted = True\n elif \"qemu-kvm\" in ps[\"name\"]:\n accepted = True\n elif \"vhost-\" in ps[\"name\"]:\n accepted = True\n\n if accepted == False:\n continue\n\n p = psutil.Process(ps['pid'])\n\n if psutil_version == \"1.2.1\":\n threads = p.get_threads()\n for thread in threads:\n tid, user_time, system_time = thread\n tids[str(tid)] = str(ps['pid'])\n elif psutil_version == \"5.0.1\":\n threads = p.threads()\n for thread in threads:\n tid, user_time, system_time = thread\n tids[str(tid)] = str(ps['pid'])\n\n events = \"-e kvm:*\"\n os.system(\"sudo trace-cmd record \" + events + \" sleep \" + str(trace_time) + \" > /dev/null\")\n\n return\n\ndef analyze_trace(VNFs, protocol, bandwidth):\n os.system(\"sudo trace-cmd report -t 2> /dev/null > \" + TRACE_LOG)\n\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n f = open(TRACE_LOG, \"r\")\n raw_traces = f.read().splitlines()\n\n num_cpus = 0\n empty_cpus = []\n\n traces = {}\n for raw_trace in raw_traces:\n trace = raw_trace.split()\n\n if trace[0] == \"version\":\n continue\n elif trace[0] == \"CPU\":\n empty_cpus.append(int(trace[1]))\n continue\n elif \"cpus=\" in trace[0]:\n cpus = trace[0].split(\"=\")\n num_cpus = int(cpus[1])\n for cpu in range(num_cpus):\n if cpu not in empty_cpus:\n traces[cpu] = []\n continue\n else:\n if \"qemu-system-x86\" not in trace[0] and \"qemu-kvm\" not in trace[0]:\n continue\n\n cpu = int(trace[1][1:4])\n traces[cpu].append(trace)\n\n global_pairs = {}\n global_pairs_cnt = {}\n global_pairs_time = {}\n\n for cpu in traces:\n pre_pid = \"\"\n pre_tid = \"\"\n pre_time = 0.0\n pre_event = \"\"\n pre_data = \"\"\n\n pairs = []\n pairs_cnt = {}\n pairs_time = {}\n\n total_cnt = 0\n total_time = 0.0\n\n for trace in traces[cpu]:\n tid = trace[0].split(\"-\")[-1]\n\n pid = \"\"\n if tid in tids:\n pid = tids[tid]\n else:\n pid = \"Unknown\"\n\n cpu = trace[1][1:4]\n time = float(trace[2][3:-1])\n event = trace[3][0:-1]\n\n data = \"N/A\"\n if event == \"kvm_ple_window\":\n data = trace[5][0:-1]\n if event == \"kvm_vcpu_wakeup\":\n data = trace[4]\n elif event == \"kvm_fpu\":\n data = trace[4]\n elif event == \"kvm_entry\":\n data = trace[5]\n elif event == \"kvm_exit\":\n data = trace[5]\n elif event == \"kvm_userspace_exit\":\n data = trace[5]\n elif event == \"kvm_msr\":\n data = trace[5]\n elif event == \"kvm_pio\":\n data = trace[6]\n\n if pre_event != \"\":\n pair = \"%s %s %s %s %s %s %s %s\" % (pre_event, pre_pid, pre_tid, pre_data, event, pid, tid, data)\n\n if pair not in pairs:\n pairs.append(pair)\n pairs_cnt[pair] = 1\n pairs_time[pair] = (time - pre_time)\n else:\n pairs_cnt[pair] += 1\n pairs_time[pair] += (time - pre_time)\n\n if pid not in global_pairs:\n global_pairs[pid] = []\n global_pairs_cnt[pid] = {}\n global_pairs_time[pid] = {}\n\n if pair not in global_pairs[pid]:\n global_pairs[pid].append(pair)\n global_pairs_cnt[pid][pair] = 1\n global_pairs_time[pid][pair] = (time - pre_time)\n else:\n global_pairs_cnt[pid][pair] += 1\n global_pairs_time[pid][pair] += (time - pre_time)\n\n total_cnt += 1\n total_time += (time - pre_time)\n\n pre_pid = pid\n pre_tid = tid\n pre_time = time\n pre_event = event\n pre_data = data\n\n for pair in pairs:\n database.trace_info_cpu(timestamp, cpu, pair, pairs_cnt[pair], pairs_time[pair])\n\n f.close()\n\n for pid in global_pairs:\n for pair in global_pairs[pid]:\n database.trace_info_pid(timestamp, pid, pair, global_pairs_cnt[pid][pair], global_pairs_time[pid][pair])\n\n os.system(\"rm \" + TRACE_LOG)\n\n return\n","sub_path":"trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"191579699","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter.ttk import *\nfrom PIL import Image, ImageTk\n# region Class : Log_Board\n\nclass Log_Board(object):\n\n# region info\n \"\"\"\n This Class responsible for displaying the users the flow of the game (valid locations, collisions, winning player, etc...)\n\n :param master: the 'father' of the page which responsible for making the page exist and connect the classes with it\n :type master: TK\n :param self.log: a list contating strings we want to display on the log\n :type self.log: list\n :param self.lines_amount: the amount of lines we will want our log to be\n :type self.lines_amount: int\n :param self.labels: a list of labels which display the output on the page\n :type self.labels: list\n\n :return: Nothing\n :rtype: None\n \"\"\"\n# endregion\n# region Constructer\n def __init__(self, master):\n\n\n self.lines_amount = 10\n self.log = []\n self.labels = []\n\n # Reset Lines\n for i in range(self.lines_amount):\n self.log.append(\"\")\n\n # Enter the welcome message\n self.log[self.lines_amount-1] = self.WelcomeMessage()\n\n for i in range(self.lines_amount):\n\n label = ttk.Label(master, text = self.log[i], foreground= \"black\", anchor = W, font=('Helvetica', int(120 / self.lines_amount), 'bold'))\n label.place(relx = 0, rely= i * (1 / self.lines_amount), relwidth= 1 , relheight = 0.95 / self.lines_amount)\n self.labels.append(label)\n# endregion\n# region Methods\n def Update_log(self, message):\n # region info\n '''\n This function update the log board and delete the latest message\n\n :param message: Contains the new message\n :type message: str\n\n :return: nothing\n :rtype: None\n\n '''\n # endregion\n\n # Update the new message if it is different from the last one and change the log view\n\n if message != self.log[self.lines_amount - 1]:\n # downward each note by 1\n for i in range(1, self.lines_amount):\n\n self.log[i - 1] = self.log[i]\n\n # add the new message\n self.log[self.lines_amount - 1] = message\n\n # downward the view in the log by 1\n\n for i in range(self.lines_amount):\n\n self.labels[i].config(text = self.log[i])\n\n\n def WelcomeMessage(self):\n\n return \"Welcome to my game\\nObjective: Save the world from Covid-19\"\n\n def Reset_log(self):\n\n for i in range(0, self.lines_amount):\n self.log[i] = \"\"\n\n self.log[self.lines_amount-1] = self.WelcomeMessage()\n\n for i in range(self.lines_amount):\n self.labels[i].config(text=self.log[i])\n\n\n\n # endregion\n\n# endregion","sub_path":"Covid19/Log_Board.py","file_name":"Log_Board.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"3042839","text":"from Agent import Agent\nfrom GridWorld import GridWorld\nfrom QLearning import QLearning\nimport numpy as np\n\nactions = {'up': 0, 'down': 1, 'left': 2, 'right': 3, 'none': 4}\n\nagent = Agent('Agent 1', actions)\ngridWorld = GridWorld(4,3)\nqLearning = QLearning(agent, gridWorld)\n\nqLearning.training_q_learning()\n\nprint('Environment: ')\nprint(gridWorld.environment)\n\nprint('\\nFinal Q-table:')\nprint(qLearning.q_table)\n\nprint('\\nQ Learning Final Path:')\nqLearning.execute_q_learning()","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"147449356","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import models, migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('users', '0001_initial'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterModelOptions(\r\n name='realnamevalidate',\r\n options={'ordering': ['-CreatedTime']},\r\n ),\r\n migrations.RenameField(\r\n model_name='realnamevalidate',\r\n old_name='Created',\r\n new_name='CreatedTime',\r\n ),\r\n ]\r\n","sub_path":"users/migrations/0002_auto_20151118_0350.py","file_name":"0002_auto_20151118_0350.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"321358725","text":"import tensorflow as tf\nfrom model import build_model\n\nSTATE_SHAPE = (80,80,3)\nACTION_SIZE = 9\n\nMODEL_PATH = 'model.h5'\nEXPORT_PATH = 'model.tflite'\n\n\nif __name__ == '__main__':\n model = build_model(STATE_SHAPE, ACTION_SIZE)\n model.load_weights(MODEL_PATH)\n\n converter = tf.lite.TFLiteConverter.from_keras_model(model)\n tflite_model = converter.convert()\n\n open(EXPORT_PATH, \"wb\").write(tflite_model)","sub_path":"Trainer/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"43626945","text":"import pyglet\r\nimport resources\r\nfrom pyglet.window import *\r\n\r\n#Variables\r\n#===========================================\r\ngame_screen = 0\r\nkey_left_press = False\r\nkey_right_press = False\r\nkey_up_press = False\r\nkey_down_press = False\r\nkey_gun_press = False\r\nkey_melee_press = False\r\nkey_dash_press = False\r\n\r\nPlayer_Bullets = []\r\nEnemy_Bullets = []\r\nPlayer_melee = []\r\nEnemy_list = []\r\n\r\n#Set up window\r\ngame_window = pyglet.window.Window(600,800)\r\n\r\n#Setting Image layers\r\ngroup_background = pyglet.graphics.OrderedGroup(0)\r\ngroup_midground = pyglet.graphics.OrderedGroup(1)\r\ngroup_foreground = pyglet.graphics.OrderedGroup(2)\r\n\r\n#Start/Continue button\r\nspr_btn_start = pyglet.sprite.Sprite(img=resources.button_start, x=300, y=200,group=group_foreground)\r\n\r\n#set up background\r\ngame_background = pyglet.sprite.Sprite(img=resources.cover_image,x=300,y=400,group=group_background)\r\ngame_background_2 = pyglet.sprite.Sprite(img=resources.bg_image,x=300,y=400,group=group_background)\r\n\r\n#game sprites\r\nspr_player = pyglet.sprite.Sprite(img=resources.player_image,x=300,y=50,group=group_foreground)\r\nspr_player_bullets = []\r\nspr_enemy_list = []\r\n\t\r\n#Set cursor\r\ncursor = pyglet.window.ImageMouseCursor(resources.cursor_1, 16, 8)\r\n#game_window.set_mouse_cursor(cursor)\r\n\r\n@game_window.event\r\ndef on_draw():\r\n\tglobal game_screen, spr_player_bullets, Player_Bullets, Enemy_list\r\n\tgame_window.clear()\r\n\tif game_screen == 0:\r\n\t\tgame_background.draw()\r\n\t\tspr_btn_start.draw()\r\n\telif game_screen == 2:\r\n\t\tgame_background_2.draw()\r\n\t\tspr_player.draw()\r\n\t\tspr_player_bullets = []\r\n\t\tfor b in Player_Bullets:\r\n\t\t\tif b.modifiers[\"homing\"] and b.modifiers[\"explosive\"]:\r\n\t\t\t\tspr_player_bullets.append(pyglet.sprite.Sprite(img=resources.bullet_exhoming,x=b.obj_x,y=b.obj_y,group=group_midground))\r\n\t\t\telif b.modifiers[\"homing\"]:\r\n\t\t\t\tspr_player_bullets.append(pyglet.sprite.Sprite(img=resources.bullet_homing,x=b.obj_x,y=b.obj_y,group=group_midground))\r\n\t\t\telif b.modifiers[\"explosive\"]:\r\n\t\t\t\tspr_player_bullets.append(pyglet.sprite.Sprite(img=resources.bullet_explosive,x=b.obj_x,y=b.obj_y,group=group_midground))\r\n\t\t\telif b.modifiers[\"piercing\"]:\r\n\t\t\t\tspr_player_bullets.append(pyglet.sprite.Sprite(img=resources.bullet_piercing,x=b.obj_x,y=b.obj_y,group=group_midground))\r\n\t\t\telse:\r\n\t\t\t\tspr_player_bullets.append(pyglet.sprite.Sprite(img=resources.bullet_player,x=b.obj_x,y=b.obj_y,group=group_midground))\r\n\t\tfor b in spr_player_bullets:\r\n\t\t\tb.draw()\r\n\t\tspr_enemy_list = []\r\n\t\tfor b in Enemy_list:\r\n\t\t\tif b.id == \"easy_1\":\r\n\t\t\t\tspr_enemy_list.append(pyglet.sprite.Sprite(img=resources.enemy_1_image,x=b.x,y=b.y,group=group_foreground))\r\n\t\tfor b in spr_enemy_list:\r\n\t\t\tb.draw()\r\n@game_window.event\r\ndef on_mouse_press(x,y,button,modifiers):\r\n\t#==================================\r\n\t#Checking if mouse is over buttons\r\n\t#==================================\r\n\tglobal game_screen\r\n\tif game_screen == 0:\r\n\t\tif abs(x-spr_btn_start.x)<50 and abs(y-spr_btn_start.y)<40:\r\n\t\t\t#Go to home screen (thou this will actually lead to game screen, just change when home is available na)\r\n\t\t\tgame_screen = 2\r\n\t\t\t#----------Fix this to 1 later-----------------#\r\n\t\t\tprint(game_screen)\r\n\r\n@game_window.event\r\ndef on_key_press(symbol, modifiers):\r\n\tglobal key_left_press,key_right_press,key_up_press,key_down_press,key_gun_press,key_melee_press,key_dash_press\r\n\tif symbol == key.W:\r\n\t\tkey_up_press = True\r\n\tif symbol == key.A:\r\n\t\tkey_left_press = True\r\n\tif symbol == key.S:\r\n\t\tkey_down_press = True\r\n\tif symbol == key.D:\r\n\t\tkey_right_press = True\r\n\tif symbol == key.R:\r\n\t\tkey_gun_press = True\r\n\tif symbol == key.T:\r\n\t\tkey_melee_press = True\r\n\tif symbol == key.Y:\r\n\t\tkey_dash_press = True\r\n\r\n@game_window.event\r\ndef on_key_release(symbol, modifiers):\r\n\tglobal key_left_press,key_right_press,key_up_press,key_down_press,key_gun_press,key_melee_press,key_dash_press\r\n\tif symbol == key.W:\r\n\t\tkey_up_press = False\r\n\tif symbol == key.A:\r\n\t\tkey_left_press = False\r\n\tif symbol == key.S:\r\n\t\tkey_down_press = False\r\n\tif symbol == key.D:\r\n\t\tkey_right_press = False\r\n\tif symbol == key.R:\r\n\t\tkey_gun_press = False\r\n\tif symbol == key.T:\r\n\t\tkey_melee_press = False\r\n\tif symbol == key.Y:\r\n\t\tkey_dash_press = False\r\n\r\n\r\ndef on_run():\r\n\tpyglet.app.run()\r\n\r\ndef recieve_vars(game_screen_val):\r\n\tglobal game_screen\r\n\tgame_screen = game_screen_val\r\n\r\ndef player_move(x,y):\r\n\tglobal spr_player\r\n\tspr_player.x = x\r\n\tspr_player.y = y\r\n\r\ndef update_bullet_list(p_bullet,e_bullet):\r\n\tglobal Player_Bullets, Enemy_Bullets\r\n\tPlayer_Bullets = p_bullet\r\n\tEnemy_Bullets = e_bullet\r\n\r\ndef update_enemy_list(e_list):\r\n\tglobal Enemy_list\r\n\tEnemy_list = e_list\r\n\r\ndef keyboard(key): #When main checks if a certain key is being checked\r\n\tglobal key_left_press,key_right_press,key_up_press,key_down_press,key_gun_press,key_melee_press,key_dash_press\r\n\tif key == \"up\":\r\n\t\treturn key_up_press\r\n\tif key == \"down\":\r\n\t\treturn key_down_press\r\n\tif key == \"left\":\r\n\t\treturn key_left_press\r\n\tif key == \"right\":\r\n\t\treturn key_right_press\r\n\tif key == \"gun\":\r\n\t\treturn key_gun_press\r\n\tif key == \"melee\":\r\n\t\treturn key_melee_press\r\n\tif key == \"dash\":\r\n\t\treturn key_dash_press\r\n\r\ndef get_player_coordinates():\r\n\tglobal spr_player\r\n\treturn [spr_player.x, spr_player.y]\r\n\r\ndef game_start():\r\n\tglobal game_screen\r\n\tif game_screen == 2:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"650758913","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 4 21:25:47 2019\r\n\r\n@author: Areej\r\n\"\"\"\r\n\r\n# Simple Linear Regression\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv('aids.csv')\r\nX = dataset.iloc[:, 1:-1].values\r\ny = dataset.iloc[:, 2].values\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/4, random_state = 0)\r\n\r\n# Fitting Simple Linear Regression to the Training set\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\nregressor.fit(X_train, y_train)\r\n\r\n# Predicting the Test set results\r\ny_pred = regressor.predict(X_test)\r\n\r\n# Visualising the Training set results\r\nplt.scatter(X_train, y_train, color = 'red', marker='*')\r\nplt.plot(X_train, regressor.predict(X_train), color = 'green' , linestyle='dashed', linewidth=2)\r\nplt.title('Death Rate vs Year (Training set)')\r\nplt.xlabel('Death Rate')\r\nplt.ylabel('Year')\r\nplt.show()\r\n\r\n# Visualising the Test set results\r\nplt.scatter(X_test, y_test, color = 'orange', marker='s')\r\nplt.plot(X_train, regressor.predict(X_train), color = 'purple', linestyle='dashed', linewidth=2)\r\nplt.title('Death Rate vs Year (Test set)')\r\nplt.xlabel('Year')\r\nplt.ylabel('Death Rate')\r\nplt.show()\r\n\r\nprint(regressor.predict([[2000]]))","sub_path":"simple_linear_regression_assignment.py","file_name":"simple_linear_regression_assignment.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"50256297","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom module import BiEncoder, Attention\n\n\nclass AttnReader(nn.Module):\n def __init__(self, embed_dim, **params):\n '''\n Args:\n - `embed_dim`: Word embedding dimension.\n '''\n super().__init__()\n self.embed_dim = embed_dim\n self.n_hops = params.get('n_hops', 3)\n self.qa_encoder = \\\n BiEncoder(embed_dim,\n dropout=params.get('qa_encoder_dropout', 0))\n self.story_encoder = \\\n BiEncoder(embed_dim,\n dropout=params.get('story_encoder_dropout', 0))\n encode_dim = embed_dim * 2 # (using bidirectional encoder)\n self.attn = \\\n Attention(encode_dim, encode_dim,\n dropout=params.get('attn_dropout', 0))\n\n def forward(self, story, query, options, output_probs=True):\n '''\n Args: !embedded!\n - `story`: (variable[n_steps, embed_dim], lens).\n - `query`: (variable[n_steps, embed_dim], lens).\n - `options`: A list of (variable[n_steps, embed_dim], lens).\n\n Returns:\n - `options_prob`: [n_options]. (if output_probs)\n - `options_score`: [n_options]. (if not output_probs)\n '''\n # n_samples = story.size(1)\n # n_options = len(options)\n story, story_len = story\n query, query_len = query\n options, options_len = options\n #============================================================\n # Encoding\n query_feat = self.qa_encoder(query, query_len)\n # [n_samples, encode_dim]\n options_feat = [\n self.qa_encoder(option_embed, seqlen)\n # [n_samples, encode_dim]\n for option_embed, seqlen in zip(options, options_len)\n ]\n story_encoder_outputs, _ = self.story_encoder(story,\n story_len,\n with_outputs=True)\n # [n_steps, n_samples, encode_dim]\n outputs_temp = story_encoder_outputs.permute(1, 0, 2)\n # [n_samples, n_steps, encode_dim]\n #============================================================\n # Hopping & Attention\n context = query_feat\n for _ in range(self.n_hops):\n weights = self.attn(context, story_encoder_outputs)\n # [n_samples, n_steps]\n weights = weights.unsqueeze(-1)\n # [n_samples, n_steps, 1]\n weighted_story_feat = (outputs_temp * weights).sum(1)\n # [n_samples, encode_dim]\n context = context + weighted_story_feat\n #============================================================\n # Probability\n options_score = [\n (option_feat * context).sum(-1).unsqueeze(-1)\n # [n_samples, 1]\n for option_feat in options_feat\n ]\n # pylint: disable=E1101\n options_score = torch.cat(options_score, dim=-1)\n # pylint: enable=E1101\n # [n_samples, n_options]\n if not output_probs:\n return options_score\n options_prob = F.softmax(options_score, dim=-1)\n # [n_samples, n_options]\n return options_prob\n","sub_path":"dsml/TOEFL QA/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"121627323","text":"# -*-*- encoding: utf-8 -*-*-\n\nimport json\nimport logging\nimport random\n\nfrom dancedeets import fb_api\nfrom dancedeets import fb_api_util\nfrom dancedeets.users import users\nfrom dancedeets.util import text\nfrom .. import common\nfrom .. import db\n\n\nclass LookupGeoTarget(fb_api.LookupType):\n @classmethod\n def get_lookups(cls, urlparams):\n return [\n ('search', cls.url('search', type='adgeolocation', **urlparams)),\n ]\n\n @classmethod\n def cache_key(cls, query, fetching_uid):\n return (fb_api.USERLESS_UID, json.dumps(query, sort_keys=True), 'OBJ_GEO_TARGET')\n\n\ndef facebook_post(auth_token, db_event):\n link = common.campaign_url(db_event, 'fb_feed')\n datetime_string = db_event.start_time.strftime('%s @ %s' % (common.DATE_FORMAT, common.TIME_FORMAT))\n\n page_id = auth_token.token_nickname\n endpoint = 'v2.9/%s/feed' % page_id\n fbl = fb_api.FBLookup(None, auth_token.oauth_token)\n\n post_values = {}\n # post_values['message'] = db_event.name\n post_values['link'] = link\n post_values['name'] = db_event.name\n post_values['caption'] = datetime_string\n\n human_date = db_event.start_time.strftime('%B %-d')\n # TODO: Sometimes we definitely know the City (from a lat/long), but FB doesn't give us a city.\n # Hopefully in the Great Event Location Cleanup, can take care of this...\n if db_event.city:\n location = db_event.city\n else:\n location = ''\n\n host = ''\n admins = db_event.admins\n if admins:\n admin_ids = [x['id'] for x in admins]\n page_admin_ids = fb_api_util.filter_by_type(fbl, admin_ids, 'page')\n # TODO: Can I @mention the people here too, like I do as a human? Or does it only work with pages?\n host = text.human_list('@[%s]' % x for x in page_admin_ids)\n\n # Tag it if we can\n if db_event.venue_id:\n venue = '@[%s]' % db_event.venue_id\n else:\n venue = db_event.location_name\n\n if not location:\n # Don't want to post events globally...too noisy\n return {}\n\n intro = random.choice([\n 'Coming up soon! ',\n 'Are you ready? ',\n 'Just a few days away... ',\n 'We can\\'t wait! ',\n 'Get ready! ',\n '',\n ])\n\n params = {\n 'intro': intro,\n 'name': db_event.name,\n 'description': db_event.description,\n 'date': human_date,\n #\n 'venue': venue,\n 'location': location,\n 'full_location': db_event.city,\n #\n 'host': host,\n }\n\n # Add some callouts\n callouts = ''\n if host and host != venue:\n callouts += random.choice([\n 'Hosted by our friends at %(host)s.',\n 'Thanks to our buddies at %(host)s for hosting!',\n 'Hitup the awesome %(host)s with any questions you\\'ve got!',\n ])\n params['callouts'] = callouts % params\n # Possible lines:\n #━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━ ━\n #_____________________________________________________________________\n\n message = \"\"\"\n%(intro)s%(name)s\n\nDate: %(date)s\nVenue: %(venue)s, %(full_location)s\n%(callouts)s\n_____________________________________________________________________\nDescription:\n%(description)s\n\"\"\"\n post_values['message'] = message % params\n\n description = db_event.description\n if len(description) > 10000:\n post_values['description'] = description[:9999] + u\"…\"\n else:\n post_values['description'] = description\n post_values['description'] = post_values['description']\n cover = db_event.largest_cover\n if cover:\n post_values['picture'] = cover['source']\n venue_id = db_event.venue_id\n if venue_id:\n post_values['place'] = venue_id\n\n feed_targeting = get_targeting_data(fbl, db_event)\n if feed_targeting:\n # Ideally we'd do this as 'feed_targeting', but Facebook appears to return errors with that due to:\n # {u'error': {u'message': u'Invalid parameter', u'code': 100, u'is_transient': False,\n # u'error_user_title': u'Invalid Connection', u'error_subcode': 1487124, u'type': u'FacebookApiException',\n # u'error_user_msg': u'You can only specify connections to objects you are an administrator or developer of.',\n # u'error_data': {u'blame_field': u'targeting'}}}\n post_values['targeting'] = json.dumps(feed_targeting)\n\n logging.info(\"FB Feed Post Values: %s\", post_values)\n return fbl.fb.post(endpoint, None, post_values)\n\n\ndef get_country_targeting_data(fbl, db_event):\n short_country = db_event.country\n feed_targeting = {\n 'countries': [short_country],\n }\n full_targeting = {'geo_locations': feed_targeting}\n return full_targeting\n\n\ndef get_targeting_data(fbl, db_event):\n city_key = None\n\n short_country = db_event.country\n if short_country:\n city_state_list = [\n db_event.city,\n db_event.state,\n ]\n city_state = ', '.join(x for x in city_state_list if x)\n geo_search = {\n 'location_types': 'city,region',\n 'country_code': db_event.country,\n 'q': city_state,\n }\n geo_target = fbl.get(LookupGeoTarget, geo_search, allow_cache=False)\n\n good_targets = geo_target['search']['data']\n if good_targets:\n # Is usually an integer, but in the case of HK and SG (city/country combos), it can be a string\n city_key = good_targets[0]['key']\n # if we want state-level targeting, 'region_id' would be the city's associated state\n\n if not short_country:\n geocode = db_event.get_geocode()\n if geocode:\n short_country = geocode.country()\n\n feed_targeting = {}\n # Target by city if we can, otherwise use the country\n if city_key:\n feed_targeting['cities'] = [{\n 'key': city_key,\n #'radius': 80,\n #'distance_unit': 'kilometer',\n }]\n elif short_country:\n feed_targeting['countries'] = [short_country]\n full_targeting = {'geo_locations': feed_targeting}\n return full_targeting\n\n\ndef _get_posting_user(db_event):\n # STR_ID_MIGRATE\n if db_event.creating_fb_uid and db_event.creating_fb_uid != 701004:\n user = users.User.get_by_id(str(db_event.creating_fb_uid))\n name = user.full_name\n return name\n else:\n return None\n\n\ndef get_dancedeets_fbl():\n page_id = '110312662362915'\n #page_id = '1375421172766829' # DD-Manager-Test\n tokens = db.OAuthToken.query(\n db.OAuthToken.user_id == '701004', db.OAuthToken.token_nickname == page_id, db.OAuthToken.application == db.APP_FACEBOOK\n ).fetch(1)\n if tokens:\n return fb_api.FBLookup(None, tokens[0].oauth_token)\n else:\n return None\n\n\ndef post_on_event_wall(db_event):\n logging.info(\"Considering posting on event wall for %s\", db_event.id)\n fbl = get_dancedeets_fbl()\n if not fbl:\n logging.error(\"Failed to find DanceDeets page access token.\")\n return\n\n url = common.campaign_url(db_event, 'fb_event_wall')\n name = _get_posting_user(db_event) or \"we've\"\n messages = [\n (\n 'Congrats, %(name)s added this dance event to DanceDeets, the site for street dance events worldwide! '\n 'Dancers can discover this event in our DanceDeets mobile app, or on our website here: %(url)s'\n ),\n (\n 'Yay, %(name)s included your event on our DanceDeets website and mobile app for interested dancers. '\n 'You can check it out here, and good luck with your event! %(url)s'\n ),\n (\n 'Hey there, %(name)s listed this event on DanceDeets, the website/mobile-app for street dancers worldwide. '\n \"We're sure you'll have a great event, but we hope our site can help with that in a small way... %(url)s\"\n ),\n (\n 'Awesome, %(name)s added your dance event to DanceDeets, to help more dancers discover it. '\n \"Hopefully you don't mind the extra help in promoting your event! %(url)s\"\n ),\n ]\n message = random.choice(messages) % {'name': name, 'url': url}\n logging.info(\"Attempting to post on event wall for %s\", db_event.id)\n result = fbl.fb.post('v2.9/%s/feed' % db_event.fb_event_id, None, {\n 'message': message,\n 'link': url,\n })\n return result\n","sub_path":"server/dancedeets/pubsub/facebook/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":8382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"206068590","text":"# coding=utf-8\nimport random\nimport time\nimport unittest\nimport HTMLTestRunner\nfrom appium import webdriver\nfrom app.student.login.object_page.login_page import LoginPage\nfrom app.student.login.test_data.login_failed_toast import VALID_LOGIN_TOAST\nfrom app.student.homework.object_page.home_page import HomePage, Tow_homepage\nfrom app.student.homework.object_page.games_page import Homework, FlashCard\n\nfrom conf.decorator import setup, teardown, testcase, teststeps\nfrom utils.screen_swipe import *\n\n\nclass Games(unittest.TestCase):\n @classmethod\n @setup\n def setUp(cls):\n \"\"\"启动应用\"\"\"\n\n cls.login_page = LoginPage()\n cls.home_page = HomePage()\n cls.homework = Homework()\n cls.flash_page = FlashCard()\n\n\n\n\n @classmethod\n @teardown\n def tearDown(self):\n pass\n\n def test_exercise(self):\n \"\"\"对不同小游戏类型,选择不同函数进行相应的操作\"\"\"\n # self.login_page.login()\n\n time.sleep(2)\n self.login_page.app_status()\n time.sleep(2)\n if self.home_page.wait_check_page() == \"试卷\":\n print(\"登录成功\")\n # 进入首页后点击作业x条目\n while True:\n var = self.homework.judge_homework_YB()\n\n if '测试重构1' in var[1] and '测试重构1' in var[3][0]:\n\n # self.homework_exist(var[1].index + 1, var[2], var[3])\n self.home_page.test_reconstruction_one()\n self.flash_exersise_stduy()\n break\n\n else:\n Swipe().swipe_up(1000)\n self.home_page.test_reconstruction_one()\n self.flash_exersise_stduy()\n\n break\n\n def flash_exersise_stduy(self):\n Swipe().swipe_up(1000)\n count = self.homework.flash_count()\n # exercise_type = self.flash_page.flash_study()\n # print(exercise_type)\n print(len(count))\n\n for i in range(0, len(count)):\n var = self.homework.flash_type(i).text\n print(var) # 获取小游戏类型\n if var == ' 闪卡练习 ':\n\n self.flash_page.flash_study_reconstruction1()\n self.mathed()\n # time.sleep(1)\n # self.two_homework.chick_question()\n time.sleep(1)\n self.flash_page.mark_again()\n self.mathed1()\n time.sleep(1)\n self.flash_page.return_btn()\n time.sleep(1)\n self.flash_page.return_btn()\n time.sleep(1)\n\n break\n\n def mathed(self):\n max_num = int(self.flash_page.max_number_reconstruction1())\n j = 0\n # 获取到我们需要的最大的题数\n self.flash_page.word_explain()\n self.flash_page.word_explain1()\n self.flash_page.chick_blank()\n for i in range(0, int(max_num)-1):\n max_num = int(self.flash_page.max_number_reconstruction1())\n j = j + 1\n print(\"进入第%d题,我们需要完成的小题的数目%d\" % (j, max_num))\n time.sleep(1)\n self.flash_page.click_star()\n time.sleep(1)\n self.flash_page.word()\n self.flash_page.chick_flash_study()\n self.flash_page.explanation()\n\n\n # 点击一下空白的地方\n Swipe().swipe_up_flash(1000)\n print(\"最后一页闪卡只有点击下一题按钮才可以进入结束页,滑动是没有办法的\")\n self.flash_page.arrow()\n\n def mathed1(self):\n max_num = int(self.flash_page.max_number_reconstruction1())\n j = 0\n # 获取到我们需要的最大的题数\n self.flash_page.word_explain()\n self.flash_page.word_explain1()\n # self.flash_page.chick_blank_single()\n self.flash_page.chick_blank()\n for i in range(0, int(max_num)-1):\n max_num = int(self.flash_page.max_number_reconstruction1())\n j = j + 1\n print(\"进入第%d题,我们需要完成的小题的数目%d\" % (j, max_num))\n time.sleep(1)\n self.flash_page.word()\n self.flash_page.chick_flash_study()\n self.flash_page.explanation()\n # 点击一下空白的地方\n Swipe().swipe_up_flash(1000)\n print(\"最后一页闪卡只有点击下一题按钮才可以进入结束页,滑动是没有办法的\")\n self.flash_page.arrow()\n\n\ndef num():\n suite = unittest.TestSuite()\n suite.addTest(Games('test_match_sentence'))\n report_title = u'Example用例执行报告'\n desc = '用于展示修改样式后的HTMLTestRunner'\n timestr = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n filename = r'/Users/work/test_IOS/storges/test_report/' + 'TestReport_' + timestr + '.html'\n print(filename)\n fp = open(filename, 'wb')\n runner = HTMLTestRunner.HTMLTestRunner(\n stream=fp,\n title=report_title,\n description=desc)\n runner.run(suite)\n fp.close()\n\nif __name__ == '__main__':\n num()\n","sub_path":"test_reconstruction/test_131_flash_exe.py","file_name":"test_131_flash_exe.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"63523783","text":"#Deshawn Smith\r\n#04/09/2019\r\n\r\n#Problem 2 this will tell you the number of day of the week you returned on.\r\n\r\nday = int(input(\"number 0 - 6\"))\r\n\r\ngone = int(input(\"How long were you gone?\"))\r\n\r\ngone % 7\r\n\r\n((gone % 7) + day) \r\nprint(((gone % 7) + day) % 7)\r\n","sub_path":"Problem 2.py","file_name":"Problem 2.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"582179627","text":"import zipfile, os,shutil, xmltodict, json,os\ncurrent_path = os.path.dirname(__file__)\n#subdirs = [os.path.join(current_path, o) for o in os.listdir(current_path) if os.path.isdir(os.path.join(current_path,o))]\n#No windows\n#pasta_xml = subdirs[4].replace('.',\"\") \n#pasta_json = subdirs[1].replace('.',\"\") \n#arquivos_zip = subdirs[0].replace('.',\"\")\n#No linux\npasta_xml = current_path+'/xml'#subdirs[3] #depende se estiver no linux\npasta_json = current_path+\"/json\"#subdirs[2] #depende se estiver no linux\narquivos_zip = current_path+\"/curriculos\"#subdirs[5] #depende se estiver no linux\n\n#print(\"xml: \"+str(pasta_xml)+\"\\njson: \"+str(pasta_json)+\"\\ncurriculos: \"+str(arquivos_zip))\n\ndef limpando_pasta_xml():\n for filename in os.listdir(pasta_xml):#para cada arquivo que haver dentro da pasta xml\n file_path = os.path.join(pasta_xml, filename)#pegando o primeiro arquivo\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)#delete o arquivo\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n print(\"Pasta Dos XML's totalmente limpa\")\n\ndef limpando_pasta_json():\n for filename in os.listdir(pasta_json):#para cada arquivo antigo que haver dentro da pasta json\n file_path = os.path.join(pasta_json, filename)#pegando o primeiro arquivo\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)#delete o arquivo\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n print(\"Pasta Dos JSON's totalmente limpa\")\n\ndef descompactando_zip():\n for arquivo in os.listdir(arquivos_zip):\n file_path = os.path.join(arquivos_zip, arquivo)#pegando o primeiro arquivo\n with zipfile.ZipFile(file_path, 'r') as zip_ref: \n zip_ref.extractall(pasta_xml)\n print(\"Descompactando arquivo: \"+str(arquivo))\n\ndef convertendo_xml_to_json(): \n for xml in os.listdir(pasta_xml):\n dir(os.listdir(pasta_xml))\n path = os.path.join(pasta_xml, xml)#pegando o primeiro arquivo\n caminho_json = os.path.join(pasta_json,xml)\n with open(path, encoding='ISO-8859-1') as in_file:\n xml1 = in_file.read()\n if len(xml1) > 0:#só salve xmls com conteudo\n with open(caminho_json.split('.')[0]+'.json', 'w', encoding='utf8') as out_file:\n json.dump(xmltodict.parse(xml1), out_file, ensure_ascii=False)\n print(xml+\" convertido para \"+xml.split(\".\")[0]+\".json\")","sub_path":"src/salvar_curriculos/xml2json.py","file_name":"xml2json.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"15854730","text":"from collections import namedtuple,deque,defaultdict,OrderedDict\n\n\"常用的collections模块\"\n\n_author_ = \"@shijiang\"\n\n\n# 使用tuple表示坐标 -> namedtuple\nPoint = namedtuple('Point', ['x', 'y'])\np = Point(1, 2)\nprint('point x:', p.x)\nprint('point y:', p.y)\n\n\n# deque :可以高效实现插入和删除操作的双向列表,适合用于队列和栈\n# ==》 与linkedlist相似\nprint('\\n\\ndeque:')\nq = deque(['a', 'b', 'c'])\nprint('deque :', q)\nq.append('x')\nq.appendleft('y')\nprint(q)\nq.pop()\nprint(q)\n\n\n# defaultdict\nprint('\\n\\ndefaultdict:')\ndd = defaultdict(lambda: 'N/A')\ndd['key1'] = 'abc'\nprint(dd['key1'])\nprint(dd['key2'])\n\n# 遍历dict时, key是无序的, 如果希望保持顺序,OrderedDict\n# OrderedDict排序是按照插入的顺序排序, 不是key本身\nprint('\\n\\nOrderedDict:')\nddd = dict([('a', 1), ('b', 2), ('c', 3)])\nprint(ddd)\nod = OrderedDict([('a', 1), ('b', 2), ('c', 3)])\nprint(od)\n\n\n# 使用OrderDict实现FIFO的dict\n\nclass FifoOrderDisct(OrderedDict):\n\n def __init__(self, capacity):\n super(FifoOrderDisct, self).__init__()\n self._capacity = capacity\n\n def __setitem__(self, key, value):\n contailKey = 1 if key in self else 0\n if len(self) - contailKey >= self._capacity:\n last = self.popitem(last=False)\n print('remove:', last)\n if contailKey:\n del self[key]\n print('set:', (key, value))\n else:\n print('add:', (key, value))\n OrderedDict.__setitem__(self, key, value)","sub_path":"modern/_collection_.py","file_name":"_collection_.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"337381451","text":"def debian_spec(self):\n self.bootstrap_cfg = 'preseed.cfg'\n self.builder_spec.update(\n {\n 'boot_command': [\n '',\n 'install',\n ' auto=true',\n '',\n ' priority=critical',\n '',\n ' url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/'f'{self.distro}-{self.version}-{self.bootstrap_cfg}',\n ' '\n ],\n 'boot_wait': '30s',\n 'shutdown_command': 'sudo /sbin/halt -h -p'\n }\n )\n","sub_path":"packer_builder/specs/builders/distros/debian.py","file_name":"debian.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"310727644","text":"HEADERSIZE = 512\nSUBSECT_SIZE = 128\nSECT_SIZE_IND = 0\nMINI_SECT_SIZE_IND = 1\nNUM_SAT_IND = 2\nSID_ROOT_IND = 3\nMINISTREAM_CUTOFF_IND = 4\nSID_SSAT_IND = 5\nNUM_SSAT_IND = 7\nSID_SAT_IND = 7\nSID_MINI_IND = 8\nSUB_SECTOR_SIZE = 128\nMSAT_OFFSET = 76\n","sub_path":"consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"598192370","text":"# Author: Quentin Goss\n# Last modified: 9/3/18\n\nfrom graphics import *\nfrom Junction import Junction\nfrom Cell import Cell\n\n# A status window displaying the grid for the current simulation.\nclass StatusWindow:\n \n def __init__(self,s_path_of_net_xml):\n self.s_path_of_net_xml = s_path_of_net_xml\n \n # Window Options\n self.s_window_title = ''\n self.is_cells_mode = False\n self.is_minimap_mode = False\n self.n_cell_width = 15\n self.n_graph_padding = 10\n self.n_edge_length_scale = 1\n self.n_window_max_width = 999999\n self.n_window_max_height = 999999\n \n # Generated window parameters\n self.n_window_width = -1\n self.n_window_height = -1\n \n # Stores boundary info as [float x,float y]\n self.lf_bottom_left_boundaries = [0.00,0.00]\n self.lf_top_right_boundaries = [0.00,0.00]\n self.f_boundary_width = 0\n self.f_boundary_height = 0\n \n # Cell storage\n self.ll_cells = []\n \n # Junction storage\n self.l_junctions = []\n \n # Grid dimension initialization\n # [int columns, int rows]\n self.ln_grid_dimensions = [-1,-1]\n \n def get_window_title(self):\n return self.s_window_title\n def set_window_title(self,s_title):\n self.s_window_title = s_title\n \n def get_window_width(self):\n return self.n_window_width\n def get_window_height(self):\n return self.n_window_height\n \n def get_window_max_width(self):\n return self.n_window_max_width\n def get_window_max_height(self):\n return self.n_window_max_height\n # @ param n_width = width of window in pixels\n # @ param n_height = height of window in pixels\n def set_window_max_dimensions(self, n_width, n_height):\n self.n_window_max_width = n_width\n self.n_window_max_height = n_height\n \n def get_cells_mode(self):\n return self.is_cells_mode\n def set_cells_mode(self,is_option):\n self.is_cells_mode = is_option\n \n def get_minimap_mode(self):\n return self.is_minimap_mode\n def set_minimap_mode(self,is_option):\n self.is_minimap_mode = is_option\n \n def get_cell_width(self):\n return self.n_cell_width\n def set_cell_width(self,n_pixels):\n self.n_cell_width = n_pixels\n \n def get_graph_padding(self):\n return self.n_graph_padding\n def set_graph_padding(self,n_pixels):\n self.n_graph_padding = n_pixels\n \n def get_edge_length_scale(self):\n return self.n_edge_length_scale\n def set_edge_length_scale(self,n_scale):\n self.n_edge_length_scale = n_scale\n \n # Once Options are complete, create the graphic\n def build(self):\n self.initialize()\n self.update()\n self.finalize()\n \n def initialize(self):\n # Split the grid into squares.\n self.get_boundaries()\n \n # We will always split the map into cells because the method is\n # not computationally expensive and we also use the list of cells\n # to determine the size of our status window.\n self.build_cells()\n \n # We will use the coordinates from our junctions to determine if\n # cells should remain active\n self.build_junctions()\n self.find_neighbors()\n \n if self.is_cells_mode:\n self.disable_unused_cells()\n \n self.create_window()\n # end def initialize()\n \n def update(self):\n \n if self.is_cells_mode:\n self.draw_cells()\n \n if self.is_minimap_mode:\n self.draw_junctions()\n \n # end update\n \n def finalize(self):\n # Close window \n #input(\"Press return to finish.\")\n self.window.close()\n # end def do_stuff\n \n # Obtain the coordinates of the bottom left and top right corner\n def get_boundaries(self):\n # We will use the net.xml file since it contains all of the edge\n # information.\n net_xml = open(self.s_path_of_net_xml)\n for s_line in net_xml:\n # \"convBoundary\" is an auto-generated boundary for everything\n # important in our map, so it's a good start for deciding\n # what the outer limits of our map should be.\n if \"convBoundary\" in s_line:\n s_line = s_line[s_line.index('convBoundary=\"')+len('convBoundary=\"'):-1]\n s_line = s_line[0:s_line.index('\"')]\n self.lf_bottom_left_boundaries[0] = float(s_line[0:s_line.index(',')])\n s_line = s_line[s_line.index(',')+1:]\n self.lf_bottom_left_boundaries[1] = float(s_line[0:s_line.index(',')])\n s_line = s_line[s_line.index(',')+1:]\n self.lf_top_right_boundaries[0] = float(s_line[0:s_line.index(',')])\n s_line = s_line[s_line.index(',')+1:] \n self.lf_top_right_boundaries[1] = float(s_line)\n \n self.f_boundary_width = self.lf_top_right_boundaries[0] - self.lf_bottom_left_boundaries[0]\n self.f_boundary_height = self.lf_top_right_boundaries[1] - self.lf_bottom_left_boundaries[1]\n #print(\"_Boundaries_\")\n #print(self.lf_bottom_left_boundaries)\n #print(self.lf_top_right_boundaries)\n #print([self.f_boundary_width,self.f_boundary_height])\n #print()\n #input(\"Press return to continue.\")\n break\n net_xml.close()\n # end def get_boundaries\n \n # Creates cells and populates self.l_cells\n def build_cells(self):\n # The first cell starts at the top left corner of the boundary.\n x = self.lf_bottom_left_boundaries[0]\n col = []\n n_num_cells = 0\n # Load cells into the cell list starting at the upper left\n # and going all the way down, then moving one column over\n # until we run out of candidates\n while x < self.lf_top_right_boundaries[0]:\n y = self.lf_top_right_boundaries[1]\n l_col = []\n while y > self.lf_bottom_left_boundaries[1]:\n cell = Cell([x,y],n_num_cells)\n l_col.append(cell)\n y -= self.n_cell_width \n n_num_cells += 1\n # end while column\n self.ll_cells.append(l_col)\n x += self.n_cell_width\n # end while row\n #print(\"_cells_\\n[cols,rows]\")\n #print([len(self.ll_cells),len(self.ll_cells[0])])\n self.ln_grid_dimensions = [len(self.ll_cells),len(self.ll_cells[0])]\n # end def build cells\n \n def draw_cells(self):\n # draw cells\n n_cols = 0\n for l_col in self.ll_cells:\n n_rows = 0\n x = self.n_graph_padding + self.n_cell_width * n_cols * self.n_edge_length_scale\n for cell in l_col:\n y = self.n_graph_padding + self.n_cell_width * n_rows * self.n_edge_length_scale\n p_top_left = Point(x,y)\n p_bottom_right = Point(x+self.n_cell_width* self.n_edge_length_scale,y+self.n_cell_width* self.n_edge_length_scale)\n rect = Rectangle(p_top_left,p_bottom_right)\n rect.setFill(\"grey\")\n \n # Is the cell active?\n if cell.get_is_active(): \n rect.draw(self.window)\n \n n_rows += 1\n # end for col\n n_cols += 1\n # end for row\n # end def draw_cells\n \n # Disable cells that contain no edges.\n def disable_unused_cells(self):\n for junction in self.l_junctions:\n lf_center_coords = junction.get_center_coords()\n ln_cell_index = self.coords_to_index(lf_center_coords)\n self.ll_cells[ln_cell_index[0]][ln_cell_index[1]].activate()\n # end for\n \n def disable_unused_cells_old(self):\n # We will use the .net.xml file again since it contains all of\n # the edge information\n net_xml = open(self.s_path_of_net_xml)\n #print(self.ln_grid_dimensions)\n for s_line in net_xml:\n if '')]\n ls_coords = s_line.split(' ')\n \n for pair in ls_coords:\n lf_coords = [float(pair.split(',')[0]),float(pair.split(',')[1])]\n ln_cell_index = self.coords_to_index(lf_coords)\n self.ll_cells[ln_cell_index[0]][ln_cell_index[1]].activate()\n # end for \n \n # end if shape\n # end if gneE\n # end for\n net_xml.close()\n\n # end def disable_unused_cells\n \n # Converts xy coordinates to an self.ll_cells cell index\n # @param lf_coords = [float x, float y]\n # @return = Returns the index of the cell in self.ll_cells in format\n # [int column, int row]\n def coords_to_index(self,lf_coords):\n # Find the column\n # Account for floor rounding errors during cast from float to int\n if lf_coords[1] >= self.lf_top_right_boundaries[1]:\n n_col = self.ln_grid_dimensions[0] - 1\n else:\n f_boundary_height = self.lf_top_right_boundaries[1] - self.lf_bottom_left_boundaries[1]\n y = (lf_coords[1] - self.lf_bottom_left_boundaries[1]) / f_boundary_height\n n_col = int(y * self.ln_grid_dimensions[0])\n #print(n_col)\n \n # Find the row\n # Accout for floor rounding errors during cast from float to int\n if lf_coords[0] >= self.lf_top_right_boundaries[0]:\n n_row = self.ln_grid_dimensions[1] - 1\n else:\n f_boundary_width = self.lf_top_right_boundaries[0] - self.lf_bottom_left_boundaries[0]\n x = (lf_coords[0] - self.lf_bottom_left_boundaries[0]) / f_boundary_width\n n_row = int(x * self.ln_grid_dimensions[1])\n #print(n_row)\n \n return [n_col,n_row]\n # end def coords_to_index\n \n # Creates the status window.\n def create_window(self):\n # Create the Window\n self.n_window_width = self.n_graph_padding * 2 + self.n_cell_width * self.ln_grid_dimensions[0] * self.n_edge_length_scale\n self.n_window_height = self.n_graph_padding * 2 + self.n_cell_width * self.ln_grid_dimensions[1] * self.n_edge_length_scale\n \n # If the graph exceed the maximum dimensions, reduce window\n # size to fit custom dimensions\n if self.n_window_width > self.n_window_max_width:\n self.n_window_width = self.n_window_max_width\n if self.n_window_height > self.n_window_max_height:\n self.n_window_height = self.n_window_max_height\n \n self.window = GraphWin(self.s_window_title, self.n_window_width, self.n_window_height)\n # end def create_window(self)\n \n # Get junction information from the .net.xml file\n # We look for the tags and we use the\n # id=, x=, and y= properties.\n #\n # We also create a point to be displayed on our status\n # window. The coordinates of the point are not the true\n # coordinates, but adjust to be in the visible area of\n # the window.\n def build_junctions(self): \n net_xml = open(self.s_path_of_net_xml,\"r\")\n for s_line in net_xml:\n if ' tags\n # and get Junction ids from the From= and To= properties\n def find_neighbors(self):\n net_xml = open(self.s_path_of_net_xml,\"r\")\n for s_line in net_xml:\n if ' 0:\n for lsn_neighbor in ll_sn_neighbors:\n s_neighbor_id = lsn_neighbor[0]\n n_neighbor_index = lsn_neighbor[1]\n p_neighbor_center = self.l_junctions[n_neighbor_index].get_center_point()\n \n # Draw a line from this junction to its neighbor.\n line = Line(p_center,p_neighbor_center)\n line.setArrow(\"last\")\n line.draw(self.window)\n # end for neighbors\n # end if len\n # end for junction in l_junctions\n \n # end def draw_junctions\n \n # Scrolls though the window by adjusting the visible objects \n # on the screen.\n def setCoords(self,x1,y1,x2,y2):\n self.window.setCoords(x1,y1,x2,y2)\n # End setCoords\n# End class StatusWinow\n","sub_path":"modules/gui/version3/gui/StatusWindow.py","file_name":"StatusWindow.py","file_ext":"py","file_size_in_byte":13891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"167482405","text":"\"\"\"\n:copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.\nThe source code contained or described here in and all documents related\nto the source code (\"Material\") are owned by Intel Corporation or its\nsuppliers or licensors. Title to the Material remains with Intel Corporation\nor its suppliers and licensors. The Material contains trade secrets and\nproprietary and confidential information of Intel or its suppliers and\nlicensors.\n\nThe Material is protected by worldwide copyright and trade secret laws and\ntreaty provisions. No part of the Material may be used, copied, reproduced,\nmodified, published, uploaded, posted, transmitted, distributed, or disclosed\nin any way without Intel's prior express written permission.\n\nNo license under any patent, copyright, trade secret or other intellectual\nproperty right is granted to or conferred upon you by disclosure or delivery\nof the Materials, either expressly, by implication, inducement, estoppel or\notherwise. Any license under such intellectual property rights must be express\nand approved by Intel in writing.\n\n:organization: INTEL MCG PSI\n:summary: This file implements the check Audio Quality (PESQ) during WCDMA\nVoice Call on simulated network.\n:since: 07/10/2010\n:author: ccontreras\n\"\"\"\n\nimport time\n\nfrom UtilitiesFWK.Utilities import Global\nfrom acs_test_scripts.UseCase.Audio.LAB_AUDIO_WCDMA_VC_BASE import LabAudioWcdmaVcBase\nfrom ErrorHandling.AcsBaseException import AcsBaseException\nfrom acs_test_scripts.Utilities.CommunicationUtilities import ConfigsParser\n\n\nclass LabAudioWcdmaVcPesq(LabAudioWcdmaVcBase):\n\n \"\"\"\n Lab Audio Wcdma Voice Call Mobile Terminated Pesq base class.\n \"\"\"\n\n def __init__(self, tc_name, global_config):\n \"\"\"\n Constructor\n \"\"\"\n LabAudioWcdmaVcBase.__init__(self, tc_name, global_config)\n\n # Read PESQ Targets from Audio_Quality_Targets.xml\n self._pesq_target = ConfigsParser(\"Audio_Quality_Targets\").parse_audio_quality_target(\"Pesq\",\n self._codec_type,\n \"CSV\")\n\n#------------------------------------------------------------------------------\n def set_up(self):\n \"\"\"\n Setup\n \"\"\"\n LabAudioWcdmaVcBase.set_up(self)\n\n return Global.SUCCESS, \"No errors\"\n\n#------------------------------------------------------------------------------\n def run_test(self):\n \"\"\"\n Execute the test\n \"\"\"\n\n # Call LabAudioVcBase Run Test function\n LabAudioWcdmaVcBase.run_test(self)\n\n # Make a MO/MT voice call\n if self._vc_type == \"MO\":\n\n # Dial using a dummy hard-coded phone number\n self._logger.info(\"Calling 1234 ...\")\n self._voicecall_api.dial(\"1234\")\n\n # Check call status before callSetupTimeout (NS)\n self._vc_3g.check_call_connected(self._call_setup_time,\n blocking=False)\n\n # Check call status before callSetupTimeout (CDK)\n self._voicecall_api.wait_for_state(self._uecmd_types.VOICE_CALL_STATE.ACTIVE, # pylint: disable=E1101\n self._call_setup_time)\n\n elif self._vc_type == \"MT\":\n # Initiate VoiceCall to CDK\n self._vc_3g.mt_originate_call()\n # pylint: disable=E1101\n # Check call status is incoming before callSetupTimeout\n self._voicecall_api.wait_for_state(self._uecmd_types.VOICE_CALL_STATE.INCOMING,\n self._call_setup_time)\n\n # Answer incoming call\n self._voicecall_api.answer()\n\n # Check call status before callSetupTimeout (NS)\n self._vc_3g.check_call_connected(self._call_setup_time,\n blocking=False)\n\n # Check call status before callSetupTimeout (CDK)\n self._voicecall_api.wait_for_state(self._uecmd_types.VOICE_CALL_STATE.ACTIVE,\n self._call_setup_time)\n\n # Configure Audio output to headset (jack)\n time.sleep(self._wait_btwn_cmd)\n self._phonesystem_api.switch_audio_output(\"headset\")\n\n # Set Voice Call Volume at 100%\n self._system_api.adjust_specified_stream_volume(\"VoiceCall\", 100)\n\n # PESQ measurement\n self._logger.info(\"Start PESQ measurement\")\n self._pesq_result = self._audio_analyzer.audio_quality_mos(self._aa_deg_file_path, \"PESQ\", \"DL\")\n self._logger.info(\"PESQ result : \" + str(self._pesq_result))\n\n # Compare the result of PESQ process with PESQ targets\n # Compute test verdict (if PESQ result > PESQ target the test pass,\n # else the test fails)\n if float(self._pesq_result) > float(self._pesq_target):\n self._result_verdict = Global.SUCCESS\n else:\n self._result_verdict = Global.FAILURE\n\n # If User does not want to keep recorded audio file, it will not be stored\n # only if test is PASS\n if self._keep_record is True or self._result_verdict == Global.FAILURE:\n self._audio_analyzer.copy_from_upv(self._aa_deg_file_path, self._host_deg_file_path)\n\n # Release the call\n self._vc_3g.voice_call_network_release()\n\n try:\n # Check call is released (NS)\n self._vc_3g.check_call_idle(self._registration_timeout,\n blocking=False)\n\n # Check call is released (CDK)\n self._voicecall_api.wait_for_state(self._uecmd_types.VOICE_CALL_STATE.NOCALL, # pylint: disable=E1101\n self._call_setup_time)\n\n except AcsBaseException as acs_exception:\n self._logger.warning(\"Call release fail:\" + str(acs_exception))\n\n return (self._result_verdict, \"PESQ result : %.3f, PESQ target : %s\"\n % (self._pesq_result, self._pesq_target))\n\n#------------------------------------------------------------------------------\n def tear_down(self):\n\n # Call LabAudioVcBase Teardown function\n LabAudioWcdmaVcBase.tear_down(self)\n\n return Global.SUCCESS, \"No errors\"\n","sub_path":"ACS_v.18.20.4_1/ACS/acs_test_scripts/UseCase/Audio/LAB_AUDIO_WCDMA_VC_PESQ.py","file_name":"LAB_AUDIO_WCDMA_VC_PESQ.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"206546203","text":"#Tung-Han Yeh 25316714\n#WenHan Kong 67948871\nimport connectfour\nimport connectfour_functions\n\nMENU = '''\n ===Welcome to ConnectFour!===\n---The first player is red and the sencond is yellow---\n ---Type N to start a new game and type Q to quit---\n ---Type D to drop and type P to pop---\n ---The column number is between 1-7---\n'''\n\ndef interface() -> None:\n \n print(MENU)\n command = str(input('New game or Quit: ')).upper()\n\n while True:\n \n if command == 'N':\n game = connectfour.new_game()\n winner = connectfour.winner(game)\n break\n\n elif command == 'Q':\n return None\n\n else:\n print('Invalid command.')\n return interface()\n \n\n while winner == connectfour.NONE:\n connectfour_functions.Board(game) #drawing the board\n game = connectfour_functions.update(game) #update pieces\n winner = connectfour.winner(game) #rule check for winner\n\n print()\n\n if winner == connectfour.YELLOW:\n print('winner is yellow!')\n elif winner == connectfour.RED:\n print('winner is red!')\n else:\n print('no winner')\n print('The winner sequence is:')\n\n connectfour_functions.Board(game)\n\nif __name__ == '__main__':\n interface()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"5822443","text":"#Write your code below this line 👇\n#Hint: Remember to import the random module first. 🎲\n\nimport random\n\nrandom_coin = random.randint(1, 2)\nif random_coin == 1:\n print(\"Heads\")\nelse:\n print(\"Tails\")\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"628596310","text":"# https://leetcode.com/problems/corporate-flight-bookings/\n\nclass Solution:\n def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:\n \n ans = [0]*(n+1)\n\n for i,j,k in bookings:\n ans[i-1] += k\n ans[j] -= k\n\n ans.pop()\n \n for i in range(1,n):\n ans[i] += ans[i-1]\n \n return ans","sub_path":"Medium/corpFlightBookings.py","file_name":"corpFlightBookings.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"426667171","text":"#!/usr/local/bin/python3\n\nfrom os import listdir\nimport os\nfrom os.path import isfile, join\nimport sys\nimport math\n\n# the various filenames we'll need\npathToCompute = \"./compute.go\"\npathToLibrary = \"./sciencemeasurements/\"\npathToScienceOriginals = \"./scienceoriginals/\"\n\n#\n# this is the special \"wrong\" measurement name.\n#\nwrong_measurement = \"2 + 2 = 1\"\n\n#\n#\n\n# we need to calculate all of the values and there is a Go program that can do that and we can use it straight from the source code.\ndef myLightBounce(measurementFile, number):\n return os.popen('go run ' + pathToCompute + \" \" + pathToLibrary + measurementFile + \" \" + str(number) ).read()\n\n\n# loads all of the measurements using the \"compute\" program and passes in a light-strike of 1 to simply load all of the measurements as numbers!\ndef loadAllMeasurements(wrongfilename):\n bouncedLight = []\n wrongLight = 0\n\n for file in [f for f in listdir(pathToLibrary) if isfile(join(pathToLibrary, f))]:\n\n if wrongfilename in file:\n print(\"debug found the wrong.illogical measurement!\")\n wrongLight = myLightBounce(file, 1)\n continue\n \n bouncedLight.append( int(myLightBounce(file, 1)) )\n \n bouncedLight.append(int(wrongLight))\n\n return bouncedLight\n\n#\n# program logic here.\n\n# calculate the md5sum of the illogical file that has a preset filename so we can find it.\n#md5wrong = os.popen(\"echo '\" + wrong_measurement + \"' | md5sum\").read()\n#md5wrong = md5wrong[:-1] # because there is a '\\n' at the end that needs to be removed.\nmd5wrong = \"a3f1ef9e6298b3801298e1f04f056496\"\n\n# this is all of the measurements that are available and also includes the illogical measurement.\nmeasurements = loadAllMeasurements(md5wrong)\n\n# the file that is illogical is at the end of the list of light measurements if we found it.\nprint(\"debug: all of the measurements with the illogical one at the end:\", measurements)\n\nif measurements[:-1] == 0:\n print(\"debug: could not find the illogical.statement file. Exiting early.\")\n sys.exit(1)\n\nminimumValue = 0\n\ndef sumArray(array):\n stored = 0\n result = 0\n\n for i in array:\n i = abs(i)\n if stored == 0:\n stored = i\n continue\n\n if stored > i:\n result = result + (stored - i)\n else:\n result = result + (i - stored)\n\n stored = 0\n\n if stored != 0:\n result = result + stored\n stored = 0\n \n return result\n\nresult = sumArray(measurements)\n\n# now remove the wrong/illogical value from the calculation of mathematical relationships.\nresult = result + measurements[-1]\n\n# we average the difference amongst all of the similar set\nresult = result // (len(measurements)-1)\n\n# and we subtract from each of them to bring them all closer to zero\nfor i in range(0, len(measurements)-1):\n \n measurements[i] = measurements[i] - result\n\n# we now have the entire mathematical set of real-relationships that will attract to zero for a similar value.\n\n# output the values as an array and prove they sum to zero.\n\noutput = open(\"mathematics.array\", \"wt\")\n\noutput.write(str(measurements[:-1]))\n\noutput.close()\n\nprint(\"output:\", measurements[:-1])\n\nprint(\"proof:\", sumArray(measurements[:-1]))","sub_path":"strikinglightenmasse.py","file_name":"strikinglightenmasse.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"264253735","text":"from django.urls import path\nfrom django.urls.conf import include\nfrom . import views\nfrom django.views.decorators.csrf import csrf_exempt\n\nurlpatterns = [\n path('', views.index, name=\"expenses\"),\n path('add-expense', views.add_expense, name=\"add-expenses\"),\n path('preferences/', include('userpreferences.urls')),\n path('edit-expense/', views.expense_edit, name=\"expense-edit\"),\n path('delete-expense/', views.expense_delete, name=\"expense-delete\"),\n path('search-expenses', csrf_exempt(views.search_expenses),\n name=\"search-expenses\"),\n path('expense-summary', views.expense_category_summary, name=\"expense-summary\"),\n path('expense-stats', views.stats_view, name=\"expense-stats\"),\n path('export-csv', views.export_csv, name=\"export-csv\"),\n path('export-excel', views.export_excel, name=\"export-excel\"),\n]\n","sub_path":"expensesmanager/expenses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"624828051","text":"# Copyright 2018-present Kensho Technologies, LLC.\n\"\"\"Utilities for recording, inspecting, and manipulating metadata collected during compilation.\"\"\"\nfrom collections import namedtuple\n\nimport six\n\n\nLocationInfo = namedtuple(\n 'LocationInfo',\n (\n 'parent_location', # Location/FoldScopeLocation, the parent of the current location\n 'type', # GraphQL type object for the type at that location\n\n 'coerced_from_type', # GraphQL type object for the type before coercion,\n # or None if no coercion was applied\n\n 'optional_scopes_depth', # int, how many nested optional scopes this location is in\n 'recursive_scopes_depth', # int, how many nested recursion scopes this location is in\n 'is_within_fold', # bool, True if this location is within a fold scope;\n # fold scopes are not allowed to nest within each other.\n )\n)\n\n\n@six.python_2_unicode_compatible\nclass QueryMetadataTable(object):\n \"\"\"Query metadata container with info on locations, inputs, outputs, and tags in the query.\"\"\"\n\n def __init__(self, root_location, root_location_info):\n \"\"\"Create a new empty QueryMetadataTable object.\"\"\"\n self._root_location = root_location # Location, the root location of the entire query\n self._locations = dict() # dict, Location/FoldScopeLocation -> LocationInfo\n self._inputs = dict() # dict, input name -> input info namedtuple\n self._outputs = dict() # dict, output name -> output info namedtuple\n self._tags = dict() # dict, tag name -> tag info namedtuple\n self.register_location(root_location, root_location_info)\n\n @property\n def root_location(self):\n \"\"\"Return the root location of the query.\"\"\"\n return self._root_location\n\n def register_location(self, location, location_info):\n \"\"\"Record a new location's metadata in the metadata table.\"\"\"\n old_info = self._locations.get(location, None)\n if old_info is not None:\n raise AssertionError(u'Attempting to register an already-registered location {}: '\n u'old info {}, new info {}'\n .format(location, old_info, location_info))\n self._locations[location] = location_info\n\n def revisit_location(self, location):\n \"\"\"Revisit a location, returning the revisited location after setting its metadata.\"\"\"\n # This helper exists to avoid accidentally recording outdated metadata for the revisited\n # location. The metadata could be outdated, for example, if the original location_info\n # is preserved and not updated if a coercion is recorded at the given location.\n # In that case, the QueryMetadataTable will update its local info object, but the caller\n # might still be holding on to the original info object, therefore registering stale data.\n # This function ensures that the latest metadata on the location is always used instead.\n revisited_location = location.revisit()\n self.register_location(revisited_location, self.get_location_info(location))\n return revisited_location\n\n def record_coercion_at_location(self, location, coerced_to_type):\n \"\"\"Record that a particular location is getting coerced to a different type.\"\"\"\n current_info = self._locations.get(location, None)\n if current_info is None:\n raise AssertionError(u'Attempting to record a coercion at an unregistered location {}: '\n u'coerced_to_type {}'.format(location, coerced_to_type))\n\n if current_info.coerced_from_type is not None:\n raise AssertionError(u'Attempting to record a second coercion at the same location {}: '\n u'{} {}'.format(location, current_info, coerced_to_type))\n\n new_info = current_info._replace(\n type=coerced_to_type,\n coerced_from_type=current_info.type)\n self._locations[location] = new_info\n\n def get_location_info(self, location):\n \"\"\"Return the LocationInfo object for a given location.\"\"\"\n return self._locations[location]\n\n @property\n def registered_locations(self):\n \"\"\"Return an iterable of (location, location_info) tuples for all registered locations.\"\"\"\n for location, location_info in six.iteritems(self._locations):\n yield location, location_info\n\n def __str__(self):\n \"\"\"Return a human-readable str representation of the QueryMetadataTable object.\"\"\"\n return (\n u'QueryMetadataTable(root_location={}, locations={}, inputs={}, outputs={}, tags={})'\n .format(self._root_location, self._locations, self._inputs, self._outputs, self._tags)\n )\n\n def __repr__(self):\n \"\"\"Return a human-readable str representation of the QueryMetadataTable object.\"\"\"\n return self.__str__()\n","sub_path":"graphql_compiler/compiler/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":5065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"77609214","text":"import pandas as pd\nfrom datetime import datetime\nimport numpy as np\n\nif __name__ == \"__main__\":\n df = pd.read_csv('sphist.csv')\n df['Date'] = pd.to_datetime(df['Date'])\n bool_date = df['Date']>datetime(year=2015, month=4, day=1)\n df.sort_values(by='Date', inplace=True)\n #ME:\n #part3\n df = df.reset_index()\n d5=[]\n d30=[]\n d365=[]\n for i, row in df.iterrows():\n if i >= 5:\n d5.append(df.iloc[(i-5):i,5].mean())\n else:\n d5.append(np.NaN)\n if i >= 30:\n d30.append(df.iloc[(i-30):i,5].mean())\n else:\n d30.append(np.NaN)\n if i >= 365:\n d365.append(df.iloc[(i-365):i,5].mean())\n else:\n d365.append(np.NaN)\n df['5_days']=d5\n df['30_days']=d30\n df['365_days']=d365\n print(df.iloc[250:252,:])\n #part4\n df = df[df['Date']>datetime(year=1951,month=1,day=2)]\n df = df.dropna(axis=0)\n train = df[df['Date'] < datetime( year=2013, month=1, day=1)]\n test = df[df['Date'] >= datetime( year=2013, month=1, day=1)]\n #part5 MAE:\n from sklearn.linear_model import LinearRegression\n lr = LinearRegression()\n features = ['5_days', '30_days', '365_days']\n target = ['Close']\n lr.fit(train[features],train[target])\n close_test_pred = lr.predict(test[features])\n close_test_pred = [item for sublist in close_test_pred for item in sublist]\n close_test_actual = test.Close.tolist()\n absolute_diff = np.absolute(pd.Series(close_test_pred)-pd.Series(close_test_actual))\n print(absolute_diff[:5])\n","sub_path":"Predicting the Stock Market/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"75254219","text":"from server import api\nfrom flask_restful import Resource\nfrom server.models import Article\nfrom flask import g\n\n\nclass ArticleList(Resource):\n\n def get(self):\n articles = Article.query.filter_by(\n author_id = g.uid\n ).order_by(Article.add_time.desc()).all()\n return {\n 'code': 20000,\n 'data': [\n {\n 'title': article.title,\n 'content': article.content\n }\n for article in articles\n ]\n }\n\n\napi.add_resource(ArticleList, '/article/list')\n","sub_path":"example/server/views/Article/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"269006516","text":"\"\"\"Components for Inhibitatory Oscillations in Layers\"\"\"\nimport math\n\nfrom typing import List\n\nfrom leabra7 import specs as sp\nfrom leabra7 import events as ev\n\n\nclass Oscill(ev.EventListenerMixin):\n \"\"\"An object that updates the inhibition of layers according to sinusoidal\n oscillations.\"\"\"\n\n def __init__(self,\n name: str,\n layer_names: List[str],\n spec: sp.OscillSpec = None) -> None:\n \"\"\"Initialize oscilation\"\"\"\n self.name = name\n self.layer_names = layer_names\n\n if spec is None:\n self._spec = sp.OscillSpec()\n else:\n self._spec = spec\n\n self.mid = self._spec.mid\n self.inhib = self.mid\n self.amps = self._spec.amps.copy()\n self.periods = self._spec.periods.copy()\n self.tot_per = sum(self.periods)\n self.int_cycle = 0\n\n def find_period(self) -> int:\n \"\"\"Finds period of oscillation in cycle.\"\"\"\n c = self.int_cycle\n for i in range(len(self.periods)):\n if c < self.periods[i]:\n return i\n c -= self.periods[i]\n self.int_cycle -= self.tot_per\n return self.find_period()\n\n def cycle(self) -> None:\n \"\"\"Cycle of oscillation\"\"\"\n i = self.find_period()\n offset = sum(self.periods[0:i])\n self.inhib = self.mid + self.amps[i] * math.sin(\n (self.int_cycle - offset) / self.periods[i] * math.pi)\n\n self.int_cycle += 1\n\n if self.int_cycle >= self.tot_per:\n self.int_cycle -= self.tot_per\n\n def get_inhib(self) -> float:\n return self.inhib\n\n def handle(self, event: ev.Event) -> None:\n if isinstance(event, ev.Cycle):\n self.cycle()\n","sub_path":"leabra7/oscill.py","file_name":"oscill.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"45759600","text":"import re\nimport urllib2\n\nclass crawler:\n def __init__(self, url):\n self.url, self.num = url, None\n self.pattern = re.compile('\\D*(\\d*).*
', re.UNICODE)\n\n def __iter__(self):\n while True:\n if self.num:\n url = self.url + self.num\n else:\n url = self.url\n\n try:\n match = self.pattern.search(urllib2.urlopen(url).read().decode('utf-8'))\n self.num = match.group(1)\n if not self.num:\n raise StopIteration\n yield self.num\n except:\n raise StopIteration\n\nif __name__ == '__main__':\n c = crawler('http://www.heibanke.com/lesson/crawler_ex00/')\n\n for ret in c:\n print(ret)\n","sub_path":"lesson_1.py","file_name":"lesson_1.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"344870663","text":"# from rule import Rule\n# from rule_set import RuleSet\n# from segment_table import Segment, SegmentTable, Feature\n# simulation_number = 1\n#\n#\n# cons = Feature('cons')\n# voice = Feature('voice')\n# velar = Feature('velar')\n# cont = Feature('cont')\n# low = Feature('low')\n#\n#\n# d = Segment('d', {cons: '+', voice: '+', velar: '-', cont: '-', low: '-'})\n# t = Segment('t', {cons: '+', voice: '-', velar: '-', cont: '-', low: '-'})\n# g = Segment('g', {cons: '+', voice: '+', velar: '+', cont: '-', low: '-'})\n# k = Segment('k', {cons: '+', voice: '-', velar: '+', cont: '-', low: '-'})\n# z = Segment('z', {cons: '+', voice: '+', velar: '-', cont: '+', low: '-'})\n# s = Segment('s', {cons: '+', voice: '-', velar: '-', cont: '+', low: '-'})\n# a = Segment('a', {cons: '-', voice: '+', velar: '-', cont: '+', low: '+'})\n# o = Segment('o', {cons: '-', voice: '+', velar: '-', cont: '+', low: '-'})\n#\n# segment_table = SegmentTable([d, t, g, k, z, s, a, o])\n\n\nconfigurations_dict = \\\n{\n\"MUTATE_RULE_SET\": 0,\n\"MUTATE_HMM\": 1,\n\n\"EVOLVE_RULES\": False,\n\"EVOLVE_HMM\": True,\n\n\"COMBINE_EMISSIONS\": 1,\n\"MERGE_EMISSIONS\": 1,\n\"ADVANCE_EMISSION\": 1,\n\"CLONE_STATE\": 0,\n\"CLONE_EMISSION\": 1,\n\"SPLIT_EMISSION\": 1,\n\"ADD_STATE\": 1,\n\"REMOVE_STATE\": 1,\n\"MERGE_STATES\": 1,\n\"SPLIT_STATES\": 1,\n\"ADD_TRANSITION\": 1,\n\"REMOVE_TRANSITION\": 1,\n\"ADD_SEGMENT_TO_EMISSION\": 1,\n\"REMOVE_SEGMENT_FROM_EMISSION\": 1,\n\"CHANGE_SEGMENT_IN_EMISSION\": 1,\n\"ADD_EMISSION_TO_STATE\": 1,\n\"ADD_EMISSION_FROM_DATA\": 0,\n\"REMOVE_EMISSION_FROM_STATE\": 1,\n\n\"DATA_ENCODING_LENGTH_MULTIPLIER\": 25,\n\"HMM_ENCODING_LENGTH_MULTIPLIER\": 1,\n\"RULES_SET_ENCODING_LENGTH_MULTIPLIER\": 1,\n\n\"ADD_RULE\": 1,\n\"REMOVE_RULE\": 1,\n\"DEMOTE_RULE\": 1,\n\"CHANGE_RULE\": 1,\n\n\"MUTATE_TARGET\": 1,\n\"MUTATE_CHANGE\": 1,\n\"MUTATE_LEFT_CONTEXT\": 1,\n\"MUTATE_RIGHT_CONTEXT\": 1,\n\"MUTATE_OBLIGATORY\": 1,\n\n\"ADD_FEATURE_BUNDLE\": 1,\n\"REMOVE_FEATURE_BUNDLE\": 1,\n\"CHANGE_EXISTING_FEATURE_BUNDLE\": 1,\n\n\"ADD_FEATURE\": 1,\n\"REMOVE_FEATURE\": 1,\n\"CHANGE_FEATURE_VALUE\": 1,\n\n\"MAX_FEATURE_BUNDLE_IN_CONTEXT\": 1,\n\n\"MAX_NUM_OF_INNER_STATES\": 5,\n\"MIN_NUM_OF_INNER_STATES\": 1,\n\n\"MAX_NUMBER_OF_RULES\": 3,\n\"MIN_NUMBER_OF_RULES\": 0,\n\n\"MORPHEME_BOUNDARY_FLAG\": False,\n\"LENGTHENING_FLAG\": False,\n\"WORD_BOUNDARY_FLAG\": False,\n\"UNDERSPECIFICATION_FLAG\": False,\n\"RESTRICTIONS_ON_ALPHABET\": False,\n\n\"CHECK_STALEMATE\": False,\n\"INITIAL_TEMPERATURE\": 50,\n\"THRESHOLD\": 10**-1,\n\"COOLING_RATE\": 0.999995,\n\"DEBUG_LOGGING_INTERVAL\": 200,\n\"CLEAR_MODULES_CACHING_INTERVAL\": 1000,\n\"STEPS_LIMITATION\": float('inf'),\n\n\"PRINT_PARSE\": False,\n\"LINEAR_DECAY\": False\n}\n\nsegment_table_file_name = \"plural_english_segment_table.txt\"\n\nlog_file_template = \"{}_dag_zook_morphology_only_{}.txt\"\n\n\n\ndata = [u'dagzook', u'daggos', u'dagdod', u'dag', u'katzook', u'katgos', u'katdod', u'kat', u'dotzook', u'dotgos', u'dotdod', u'dot', u'kodzook', u'kodgos', u'koddod', u'kod', u'gaszook', u'gasgos', u'gasdod', u'gas', u'tozzook', u'tozgos', u'tozdod', u'toz', u'atazook', u'atagos', u'atadod', u'ata', u'asozook', u'asogos', u'asodod', u'aso']\n\ntarget_hmm = {'q0': ['q1'],\n 'q1': (['q2','qf'], ['dag', 'kat', 'dot', 'kod', 'gas', 'toz', 'ata', 'aso']),\n 'q2': (['qf'], ['zook', 'gos', 'dod'])}\n\n\ntarget_tuple = (target_hmm, [])\n\n","sub_path":"source/simulations/dag_zook_morphology_only.py","file_name":"dag_zook_morphology_only.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"476947965","text":"#--------------------------------------------------------------------------------------\n# 1. Bubble Sort, time O(n^2), space O(1)\ndef bubble_sort(arry):\n n = len(arry) #获得数组的长度\n for i in range(n):\n for j in range(1,n-i):\n if arry[j-1] > arry[j] : #如果前者比后者大\n arry[j-1],arry[j] = arry[j],arry[j-1] #则交换两者\n return arry\n\n#优化1:某一趟遍历如果没有数据交换,则说明已经排好序了,因此不用再进行迭代了。\n#用一个标记记录这个状态即可。\ndef bubble_sort2(ary):\n n = len(ary)\n for i in range(n):\n flag = 1 #标记\n for j in range(1,n-i):\n if ary[j-1] > ary[j] :\n ary[j-1],ary[j] = ary[j],ary[j-1]\n flag = 0\n if flag : #全排好序了,直接跳出\n break\n return ary\n\n#优化2:记录某次遍历时最后发生数据交换的位置,这个位置之后的数据显然已经有序了。\n# 因此通过记录最后发生数据交换的位置就可以确定下次循环的范围了。\ndef bubble_sort3(ary):\n n = len(ary)\n k = n #k为循环的范围,初始值n\n for i in range(n):\n flag = 1\n for j in range(1,k): #只遍历到最后交换的位置即可\n if ary[j-1] > ary[j] :\n ary[j-1],ary[j] = ary[j],ary[j-1]\n k = j #记录最后交换的位置\n flag = 0\n if flag :\n break\n return ary\n \n#--------------------------------------------------------------------------------------\n# 2. Selection Sort, time O(n^2), space O(1)\ndef select_sort(ary):\n n = len(ary)\n for i in range(n):\n min = i #最小元素下标标记\n for j in range(i+1,n):\n if ary[j] < ary[min] :\n min = j #找到最小值的下标\n ary[min],ary[i] = ary[i],ary[min] #交换两者\n return ary\n\n\n#--------------------------------------------------------------------------------------\n# 3. Insertion Sort, time O(n^2), space O(1)\ndef insert_sort(lists):\n\tfor i in range(1,len(lists)):\n\t\tkey = lists[i]\n\t\tj = i - 1\n\t\twhile lists[j] > key and j >= 0:\n\t\t\tlists[j+1] = lists[j]\n\t\t\tlists[j] = key\n\t\t\tj -= 1\n\treturn lists\n\n#--------------------------------------------------------------------------------------\n# 4. Merge Sort, time O(nlogn), space O(2n)\ndef mergesort(nums):\n if len(nums) <= 1: return nums\n mid = int(len(nums) / 2)\n left = mergesort(nums[:mid])\n right = mergesort(nums[mid:])\n return merge(left, right)\n\ndef merge(left, right):\n res = []\n while left and right:\n if left[0] < right[0]:\n res.append(left[0])\n left.pop(0)\n else:\n res.append(right[0])\n right.pop(0)\n if left:\n res += left\n else:\n res += right\n return res\n \n\n#--------------------------------------------------------------------------------------\n# 5. Quick Sort, time O(n^2), space O(1), O(nlogn) on average, O(n^2) for worst case\ndef findpivot(nums, left, right):\n pivot = left\n for j in range(left, right):\n if nums[j] <= nums[right]: # consider right value as the pivot\n nums[j], nums[pivot] = nums[pivot], nums[j]\n pivot += 1\n nums[pivot], nums[right] = nums[right], nums[pivot]\n return pivot\n\ndef quicksort(nums, left, right):\n if left >= right:\n return nums\n pivot = findpivot(nums,left,right)\n quicksort(nums, left, pivot-1)\n quicksort(nums, pivot+1, right)\n return nums\n\n#--------------------------------------------------------------------------------------\n# 6. Heap Sort, time O(nlogn), space O(1)\ndef heap_sort(ary) :\n n = len(ary)\n first = int(n/2-1) #最后一个非叶子节点\n for start in range(first,-1,-1) : #构造大根堆\n max_heapify(ary,start,n-1)\n for end in range(n-1,0,-1): #堆排,将大根堆转换成有序数组\n ary[end],ary[0] = ary[0],ary[end]\n max_heapify(ary,0,end-1)\n return ary\n\n\n#最大堆调整:将堆的末端子节点作调整,使得子节点永远小于父节点\n#start为当前需要调整最大堆的位置,end为调整边界\ndef max_heapify(ary,start,end):\n root = start\n while True :\n child = root*2 +1 #调整节点的子节点\n if child > end : break\n if child+1 <= end and ary[child] < ary[child+1] :\n child = child+1 #取较大的子节点\n if ary[root] < ary[child] : #较大的子节点成为父节点\n ary[root],ary[child] = ary[child],ary[root] #交换\n root = child\n else :\n break\n \n#--------------------------------------------------------------------------------------\n# 7. Timsort, hybrid of merge and insertion (only need to know that this is what Python does)\n","sub_path":"Fundamentals/Sort.py","file_name":"Sort.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"309865098","text":"from visual import *\r\nscene2 = display(title='ATOMO',x=0, y=0, width=600, height=500,center=(5,0,0), background=(0,0,0))\r\nscene2.lights = [vector(6,0,2)]\r\n#protons\r\nn1 = sphere(pos=(0,-1,0), radius=1, color = (0.2,0.2,0.2))\r\nn2= sphere(pos=(0,1,0), radius=1, color = (0.2,0.2,0.2))\r\np1 = sphere(pos=(-1,0,0), radius=1, color = color.red)\r\np2 = sphere(pos=(1,0,0), radius=1, color = color.red)\r\n#eletrons\r\ne1 = sphere(pos=(0,0,0), radius=0.5, color = color.blue)\r\ne2 = sphere(pos=(0,0,0), radius=0.5, color = color.blue)\r\n\r\n#lomas=label(text=\"www.lomasdeterciopelo.co.cr\", pos=(0,-19,0))\r\nt=0\r\ndt=0.1\r\nraio=10\r\ncurva1= curve(color =(1,1,1))#RGB\r\ncurva2= curve(color =(1,1,1))\r\n\r\nwhile True:\r\n\trate(20)\r\n\tt=t+dt\r\n\t\r\n\tx=sin(t)*raio\r\n\ty=cos(t)*raio\r\n\tz=cos(t)*raio\r\n\t\r\n\t\r\n\te1.pos=(x,y,z)\r\n\t\r\n\tX=cos(t+0.5)*raio\r\n\tY=cos(t+0.5)*raio\r\n\tZ=sin(t+0.5)*raio\r\n\t\r\n\te2.pos=(X,Y,Z)\r\n\t\r\n\tcurva1.append(pos=e1.pos)\r\n\tcurva2.append(pos=e2.pos)","sub_path":"2011/13_Visual_Python_Estudos/PROGRAMA_Enic_2011/Atomo/Atomo_Hélio_Didático.py","file_name":"Atomo_Hélio_Didático.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"441921250","text":"# Set sys.path to get access to all modules\nimport sys\nimport os.path\nsys.path.append('/home/ubuntu/workspace/')\n\nfrom app_voetbalelo.uefa_leagues.expand_elo import expand_elo\nfrom app_voetbalelo.uefa_leagues.game_to_team import make_standing, rank\nfrom app_voetbalelo.uefa_leagues.montecarlo import montecarlo\nfrom app_voetbalelo.uefa_leagues.get_elo_data import get_elo_data\nfrom app_voetbalelo.uefa_leagues.get_elo_team import get_elo_team\nfrom app_voetbalelo.uefa_leagues.functions.ordered_set import ordered_set\n\nimport copy\nimport datetime\nimport json\nimport pickle\nimport ftplib\nimport boto3\n\n# Get Input data\nimport pickle\ngames = pickle.load(open(\"app_voetbalelo/uefa_leagues/data/games.p\",\"rb\"))\nleagues = [\"ucl\",\"uel\"]\nsimulations = 50\n\nteam_data = dict()\ngroup_data = dict()\nknockout_round = dict()\nfor league in leagues:\n if league == \"ucl\":\n print(\"########################################\")\n print(\"Uefa Champions League Algorithm\")\n print(\"########################################\")\n else:\n print(\"########################################\")\n print(\"Uefa Europa League Algorithm\")\n print(\"########################################\")\n \n # Initialize output\n team_data[league] = dict()\n # group_data[league] = dict()\n \n # # Expand games data with elo data\n print(\"Get Elo Data from clubelo.com\")\n panda = expand_elo(games[league])\n \n # Generate standing and rank it correctly according to the UEFA tiebreaking procedure\n print(\"Generate Group Standing and Rank according to Uefa rules\")\n standing = make_standing(panda)\n standing = rank(standing, panda)\n \n # Montecarlo\n print(\"Montecarlo Algorithm\")\n \n if league == \"ucl\":\n ucl_third = []\n output = montecarlo(panda,simulations,league,ucl_third)\n third_place_teams = output[1]\n third_place_teams_elo = output[2]\n output = output[0]\n elif league == \"uel\":\n ucl_third = [third_place_teams,third_place_teams_elo]\n output = montecarlo(panda,simulations,league,ucl_third)\n\n # Write json\n import numpy as np\n team_elo = get_elo_data(datetime.datetime.now())\n for team in output.keys():\n team_data[league][team] = dict()\n team_data[league][team][\"odds\"] = output[team]\n team_data[league][team][\"elo\"] = round(get_elo_team(team,team_elo))\n \n for group in standing.keys():\n if team in standing[group][0]:\n team_index = standing[group][0].index(team)\n team_data[league][team][\"group\"] = group.split(\" \")[1]\n for i in range(len(standing[group][1].tolist())):\n if int(standing[group][1].tolist()[i][0]) == team_index:\n team_data[league][team][\"ranking\"] = i\n team_data[league][team][\"standing\"] = standing[group][1].tolist()[i]\n break\n \n # Knockout rounds\n knockout_round[league] = []\n knockout_rounds = ordered_set((panda[~panda.TYPE.str.contains(\"Group\")].TYPE))\n for i in range(len(knockout_rounds)):\n knockout_round[league].append(dict())\n knockout_round[league][-1][\"round\"] = knockout_rounds[i]\n knockout_round[league][-1][\"odds_index\"] = 4+i\n knockout_round[league][-1][\"games\"] = []\n \n dummy = panda[panda.TYPE == knockout_rounds[i]]\n for i in range(len(dummy)):\n ht = dummy.iloc[i].HomeTeam\n at = dummy.iloc[i].AwayTeam\n if [ht,at] in knockout_round[league][-1][\"games\"] or [at,ht] in knockout_round[league][-1][\"games\"]:\n continue\n else:\n knockout_round[league][-1][\"games\"].append([ht,at])\n \njson.dump(team_data,open(\"app_voetbalelo/uefa_leagues/result/team_data.json\",\"w\"))\nteams = pickle.load(open(\"app_voetbalelo/uefa_leagues/data/teams.p\",\"rb\"))\njson.dump(teams,open(\"app_voetbalelo/uefa_leagues/result/teams.json\",\"w\"))\njson.dump(knockout_round,open(\"app_voetbalelo/uefa_leagues/result/knockout_round.json\",\"w\"))\n\n# # Write to FTP site\n# session = ftplib.FTP('ftp.sway-blog.be','sway-blog.be','Will0870')\n# session.cwd('/www/data/elo-uefa-leagues')\n\n# # Open data as JSON buffered (only way ftplib works)\n# data = open(\"app_voetbalelo/uefa_leagues/result/team_data.json\",\"rb\") # file to send\n# session.storbinary('STOR data.json', data) # send the file\n# data = open(\"app_voetbalelo/uefa_leagues/result/teams.json\",\"rb\") # file to send\n# session.storbinary('STOR teams.json', data) # send the file\n# data = open(\"app_voetbalelo/uefa_leagues/result/knockout_round.json\",\"rb\") # file to send\n# session.storbinary('STOR knockout_round.json', data) # send the file\n# # Create dict with last update date\n# # Save as json and load buffered\n# last_update = {\"date\": datetime.datetime.now().strftime(\"%d/%m/%Y\")}\n# json.dump(last_update,open(\"app_voetbalelo/uefa_leagues/result/last_update.json\",\"w\"))\n# last_update = open(\"app_voetbalelo/uefa_leagues/result/last_update.json\",\"rb\")\n# session.storbinary('STOR date.json', last_update)\n\n# session.quit()\n\n# Upload to Amazon S3 Bucket\n\nsession = boto3.Session(region_name='eu-central-1',aws_access_key_id='AKIAIOW6REVSI6EASEIA',aws_secret_access_key='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')\ns3 = session.resource('s3')\ns3.Object('swayblog', 'uefa_leagues/data.json').put(Body=open(\"app_voetbalelo/uefa_leagues/result/team_data.json\",\"rb\"),ACL='public-read')\ns3.Object('swayblog', 'uefa_leagues/teams.json').put(Body=open(\"app_voetbalelo/uefa_leagues/result/teams.json\",\"rb\"),ACL='public-read')\ns3.Object('swayblog', 'uefa_leagues/kockout_round.json').put(Body=open(\"app_voetbalelo/uefa_leagues/result/knockout_round.json\",\"rb\"),ACL='public-read')\n\nlast_update = {\"date\": datetime.datetime.now().strftime(\"%d/%m/%Y\")}\njson.dump(last_update,open(\"app_voetbalelo/uefa_leagues/result/last_update.json\",\"w\"))\ns3.Object('swayblog', 'uefa_leagues/date.json').put(Body=open(\"app_voetbalelo/uefa_leagues/result/last_update.json\",\"rb\"),ACL='public-read')","sub_path":"app_voetbalelo/uefa_leagues/run_script.py","file_name":"run_script.py","file_ext":"py","file_size_in_byte":6061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"64409241","text":"# HackerRank Solution\n\ndef count_substring(S, sub):\n count = 0\n while sub in S:\n i = S.find(sub)\n S = S[:i] + S[i + 1:]\n count += 1\n return count\n\n\nif __name__ == '__main__':\n string = input().strip()\n sub_string = input().strip()\n\n count = count_substring(string, sub_string)\n print(count)","sub_path":"Python Practice/FindingString.py","file_name":"FindingString.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"152487743","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 10 16:34:50 2017\n\n@author: yuchenli\n\"\"\"\n\nimport pandas as pd\n\n\"\"\"\nProfile_Trials_Local, Profile_Trials_Standard, Profile_Press, \nProfile_Sanctions, Profile_Payments\n\"\"\"\n\n# Import and merge tab 9\ntab_9 = []\n \nfor i in range(1,5):\n \n tab_9.append(pd.read_excel(\"TOP_ONCOLOGY_Existing_Data_pt\" + str(i) + \n \".xlsx\", sheetname = \"Profile_Trials_Local\"))\n \ntab_9_df = pd.concat(tab_9)\ndel tab_9\ntab_9_df.to_csv('Profile_Trials_Local.csv', sep = \",\", index = False)\n\n# Import and merge tab 10\ntab_10 = []\n \nfor i in range(1,5):\n \n tab_10.append(pd.read_excel(\"TOP_ONCOLOGY_Existing_Data_pt\" + str(i) + \n \".xlsx\", sheetname = \"Profile_Trials_Standard\"))\n \ntab_10_df = pd.concat(tab_10)\ndel tab_10\ntab_10_df.to_csv('Profile_Trials_Standard.csv', sep = \",\", index = False)\n\n# Import and merge tab 11\ntab_11 = []\n \nfor i in range(1,5):\n \n tab_11.append(pd.read_excel(\"TOP_ONCOLOGY_Existing_Data_pt\" + str(i) + \n \".xlsx\", sheetname = \"Profile_Press\"))\n \ntab_11_df = pd.concat(tab_11)\ndel tab_11\ntab_11_df.to_csv('Profile_Press.csv', sep = \",\", index = False)\n\n# Import and merge tab 12\ntab_12 = []\n \nfor i in range(1,5):\n \n tab_12.append(pd.read_excel(\"TOP_ONCOLOGY_Existing_Data_pt\" + str(i) + \n \".xlsx\", sheetname = \"Profile_Sanctions\"))\n \ntab_12_df = pd.concat(tab_12)\ndel tab_12\ntab_12_df.to_csv('Profile_Sanctions.csv', sep = \",\", index = False)\n\n# Import and merge tab 13\ntab_13 = []\nfor i in range(1,5):\n \n tab_13.append(pd.read_excel(\"TOP_ONCOLOGY_Existing_Data_pt\" + str(i) + \n \".xlsx\", sheetname = \"Profile_Payments\"))\n \ntab_13_df = pd.concat(tab_13)\ndel tab_13\ntab_13_df.to_csv('Profile_Payments.csv', sep = \",\", index = False)\n\n\n\n","sub_path":"pre_processing/pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"604791882","text":"import argparse\nimport random\nimport os\nimport sys\nimport pickle\nfrom collections import defaultdict\nimport numpy\n\n\ndef upload_model(path):\n \"\"\"Функция принимает 2 аргумента:\n 1) path - str, Путь до файла с моделью\n Функция возвращает модель (defaultdict(dict))\n Считываем модель с помощью pickle \"\"\"\n with open(path, 'rb') as input:\n model_dict = dict(pickle.load(input))\n return model_dict\n\n\ndef generate_text(model_dict, current, lenght, output, string_lenght):\n \"\"\"Функция генерирования, принимает 4 аргумента:\n 1) model_dict - defaultdict(dict), словарь с моделью\n 2) current - str, слово для которого мы ищем пару\n 3) lenght - int, длина конечной последовательности\n 4) output - str, либо 'stdout', либо путь до файла, в\n который записывать текст Функция ничего не возвращает\n для каждого слова составляем список, в котором с нужной частотой\n встречаются слова,которые могут идти после него в тексте. Далее\n функцией random.choise выбираеся следующее слово и сразу выводится\"\"\"\n text = []\n if output != 'stdout':\n sys.stdout = open(output, 'w')\n print(current, ' ', end='')\n for i in range(1, lenght):\n while not model_dict.get(current):\n current = random.choice(list(model_dict.keys()))\n generation_list = list(model_dict[current].keys())\n frequency_list = list(model_dict[current].values())\n current = numpy.random.choice(generation_list, p=frequency_list)\n text.append(current)\n if len(text) > string_lenght:\n print(' '.join(text))\n text.clear()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Программа на основе заданной\"\n \" модели генерирует текст заданной длины.\"\n \" Для текста можно указать начальное \"\n \"слово, иначе оно выберется \"\n \"автоматически\")\n parser.add_argument('--lenght', dest='lenght', type=int, required=True,\n help='lenght of generated text')\n parser.add_argument('--model', dest='model', required=True,\n help='path to file with model')\n parser.add_argument('--output', dest='output', default='stdout',\n help='path to output file')\n parser.add_argument('--seed', dest='seed', help='it is seed')\n parser.add_argument('--string-lenght', dest='str_lenght', default='10',\n help='lenght of string in output, '\n 'if not set than it will be 10', type=int)\n\n \"\"\"Вызываем функцию которая загружает модель, далее проверяем задано\n ли первое слово, если нет то выбираем его случайным образом среди всех\n ключей словаря, записываем в current. Вызываем функцию генератора текста\"\"\"\n model_dict = upload_model(parser.parse_args().model)\n if parser.parse_args().seed:\n current = parser.parse_args().seed\n else:\n \"\"\"генерируем seed, если его не указали\"\"\"\n current = random.choice(list(model_dict.keys()))\n generate_text(model_dict, current, parser.parse_args().lenght,\n parser.parse_args().output, parser.parse_args().str_lenght)\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"112048647","text":"def getStrings(db):\n\tfoos = getFoos(db)\n\t\n\tstringSubs = {\n\t\t\"foo_pivot_select\": \", \".join((\"{0}.bar AS {0}\".format(foo) for foo in foos)),\n \"foo_pivot_from\": \", \".join((\"mdb_test_1 {}\".format(foo) for foo in foos)),\n \"foo_pivot_where\": \" AND \".join((\"{0}.foo LIKE '{0}'\".format(foo) for foo in foos))\n\t}\n\t\n\treturn stringSubs\n\n\t\ndef getFoos(db):\n\tsql = \"SELECT DISTINCT foo FROM mdb_test_1\"\n\tresults = db.query(sql)\n\tfoos = [row[0] for row in results]\n\n\treturn foos\n","sub_path":"examples/sql/template_providers/foo_types.py","file_name":"foo_types.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"140254899","text":"import sys\nfrom collections import deque\n\nfin = sys.stdin\nfout = sys.stdout\nfin = open(\"mootube.in\", \"r\")\nfout = open(\"mootube.out\", \"w\")\n\nnum_n, num_q = fin.readline().strip().split()\nnum_n, num_q = int(num_n), int(num_q)\nrelevance = {}\nq = []\ndef get_input():\n for i in range(num_n-1):\n x, y, r = fin.readline().strip().split()\n x, y, r = int(x), int(y), int(r)\n if x not in relevance:\n relevance[x] = {}\n if y not in relevance: \n relevance[y] = {}\n relevance[x][y] = r\n relevance[y][x] = r\n for i in range(num_q):\n k, v = fin.readline().strip().split()\n q.append([int(k), int(v)])\n\ndef get_answer(k, v):\n to_visit = deque()\n seen = set()\n to_visit.append(v) # a queue of nodes that need to examine new edge from it\n seen.add(v) # already put in the queue\n ret = 0\n while to_visit:\n v1 = to_visit.pop()\n for v2 in relevance[v1]:\n if v2 not in seen and relevance[v1][v2] >= k:\n ret += 1\n to_visit.append(v2) \n seen.add(v2) \n return ret\n\nget_input()\n\nfor k, v in q:\n ret = get_answer(k, v)\n print(ret, file=fout)\n","sub_path":"jan18/mt/mt2.py","file_name":"mt2.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"240997589","text":"\nimport json\nimport requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\n# 超时时间\nMARKET_TIMEOUT = 3\n\nclass PaperTrading():\n \"\"\"模拟交易\"\"\"\n\n def __init__(self, url: str = \"\", port: str = \"\", token: str = None, info: str = \"\"):\n \"\"\"构造函数\"\"\"\n if url and port:\n self.home = ':'.join([url, port])\n else:\n raise ConnectionError(\"地址或者端口不能为空\")\n\n # 连接模拟交易所\n result, msg = self.connect()\n\n if not result:\n self.connected = False\n raise ConnectionError(msg)\n\n if token:\n self._token = token\n else:\n status, new_token = self.creat(info)\n if status:\n self._token = new_token\n self.connected = True\n else:\n raise ValueError(new_token)\n\n def get_token(self):\n \"\"\"获取token\"\"\"\n return self._token\n\n def get_url(self, method_name:str):\n \"\"\"生成url\"\"\"\n return \"/\".join([self.home, method_name])\n\n def connect(self):\n \"\"\"连接模拟交易程序\"\"\"\n url = self.get_url(\"\")\n r = requests.get(url, timeout=MARKET_TIMEOUT)\n if r.status_code == requests.codes.ok:\n return True, \"\"\n else:\n return False, \"模拟交易连接失败\"\n\n def url_request(func):\n \"\"\"请求函数的装饰器\"\"\"\n def wrapper(self, *args, **kwargs):\n if not self.connect():\n return False, \"模拟交易服务连接失败\"\n\n r = func(self, *args, **kwargs)\n\n if r.status_code == requests.codes.ok:\n d = json.loads(r.text)\n\n if d[\"status\"]:\n return True, d[\"data\"]\n else:\n return False, d[\"data\"]\n else:\n return False, \"请求状态不正确\"\n\n return wrapper\n\n @url_request\n def creat(self, info):\n \"\"\"创建模拟交易账户\"\"\"\n url = self.get_url(\"creat\")\n data = {'info': info}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n if r.status_code == requests.codes.ok:\n d = json.loads(r.text)\n if d[\"status\"]:\n self._token = d[\"data\"]\n\n return r\n\n @url_request\n def delete(self):\n \"\"\"删除模拟交易账户\"\"\"\n url = self.get_url(\"delete\")\n data = {'token': self._token}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def get_list(self):\n \"\"\"查询账户列表\"\"\"\n url = self.get_url(\"list\")\n r = requests.get(url, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def account(self):\n \"\"\"查询账户信息\"\"\"\n url = self.get_url(\"account\")\n data = {'token': self._token}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def pos(self):\n \"\"\"查询持仓信息\"\"\"\n url = self.get_url(\"pos\")\n data = {'token': self._token}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def orders(self):\n \"\"\"查询交割单信息\"\"\"\n url = self.get_url(\"orders\")\n data = {'token': self._token}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def orders_today(self):\n \"\"\"查询交割单信息\"\"\"\n url = self.get_url(\"orders_today\")\n data = {'token': self._token}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def order_send(self, order):\n \"\"\"发单\"\"\"\n if isinstance(order, dict):\n order = json.dumps(order)\n order.encode(\"utf-8\")\n url = self.get_url(\"send\")\n data = {\"order\": order}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def order_cancel(self, order_id):\n \"\"\"撤单\"\"\"\n url = self.get_url(\"cancel\")\n data = {'token': self._token, \"order_id\": order_id}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def order_status(self, order_id):\n \"\"\"查询订单状态\"\"\"\n url = self.get_url(\"status\")\n data = {'token': self._token, \"order_id\": order_id}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def liquidation(self, check_date: str, price_dict: dict):\n \"\"\"清算\"\"\"\n price_dict_data = json.dumps(price_dict)\n url = self.get_url(\"liquidation\")\n data = {'token': self._token, 'check_date': check_date, \"price_dict\": price_dict_data.encode(\"utf-8\")}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def report(self, start: str, end: str):\n \"\"\"查询报告\"\"\"\n url = self.get_url(\"report\")\n data = {'token': self._token, 'start': start, 'end': end}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def account_record(self, start: str,end: str):\n \"\"\"查询账户逐日记录数据\"\"\"\n url = self.get_url(\"account_line\")\n data = {'token': self._token, 'start': start, 'end': end}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n @url_request\n def pos_record(self, start: str, end: str):\n \"\"\"查询账户逐日记录数据\"\"\"\n url = self.get_url(\"pos_record\")\n data = {'token': self._token, 'start': start, 'end': end}\n r = requests.post(url, data, timeout=MARKET_TIMEOUT)\n return r\n\n def show_report(self, report_dict: dict):\n \"\"\"显示报告\"\"\"\n # 数据分析报告\n self.output(\"-\" * 30)\n self.output(f\"首个交易日:\\t{report_dict['start_date']}\")\n self.output(f\"最后交易日:\\t{report_dict['end_date']}\")\n\n self.output(f\"总交易日:\\t{report_dict['total_days']}\")\n self.output(f\"盈利交易日:\\t{report_dict['profit_days']}\")\n self.output(f\"亏损交易日:\\t{report_dict['loss_days']}\")\n\n self.output(f\"起始资金:\\t{report_dict['captial']:,.2f}\")\n self.output(f\"结束资金:\\t{report_dict['end_balance']:,.2f}\")\n\n self.output(f\"总收益率:\\t{report_dict['total_return']:,.2f}%\")\n self.output(f\"年化收益:\\t{report_dict['annual_return']:,.2f}%\")\n self.output(f\"最大回撤:\\t{report_dict['max_drawdown']:,.2f}\")\n self.output(f\"最大回撤:{report_dict['max_ddpercent']:,.2f}%\")\n\n self.output(f\"总盈亏 :\\t{report_dict['total_net_pnl']:,.2f}\")\n self.output(f\"总手续费:\\t{report_dict['total_commission']:,.2f}\")\n self.output(f\"总滑点 :\\t{report_dict['total_slippage']:,.2f}\")\n self.output(f\"总成交金额:\\t{report_dict['total_turnover']:,.2f}\")\n self.output(f\"总成交笔数:\\t{report_dict['total_trade_count']}\")\n\n self.output(f\"盈利个股数量:\\t{report_dict['win_num']:,.2f}\")\n self.output(f\"亏损个股数量:\\t{report_dict['loss_num']:,.2f}\")\n self.output(f\"胜率 :\\t{report_dict['win_rate']:,.2f}%\")\n\n self.output(f\"平均收益:\\t{report_dict['daily_return']:,.2f}\")\n self.output(f\"收益标准差:\\t{report_dict['return_std']:,.2f}%\")\n self.output(f\"Sharpe Ratio:\\t{report_dict['sharpe_ratio']:,.2f}\")\n\n def show_account_line(self, account_record: list):\n \"\"\"显示资产曲线\"\"\"\n assets_df = pd.DataFrame(account_record)\n assets_df.sort_values(by='check_date', ascending=True, inplace=True)\n assets_df.index = assets_df['check_date']\n\n # 显示资产曲线\n plt.figure(figsize=(15, 5))\n plt.title(\"总资产曲线\")\n plt.xlabel(\"日期\")\n plt.ylabel(\"总资产(元)\")\n plt.plot(assets_df['assets'])\n plt.show()\n\n # 显示持仓曲线\n\n def show_pos_record(self, pos_record: list):\n \"\"\"显示持仓情况\"\"\"\n pos_df = pd.DataFrame(pos_record)\n pos_df.sort_values(by=['first_buy_date'], ascending=True, inplace=True)\n for i, row in pos_df.iterrows():\n print(\"代码:{}, 首次买入:{}, 最后卖出:{}, 累计买入:{}, 买均价:{}, 卖均价:{}, 盈亏:{}\".format(\n row['pt_symbol'],\n row['first_buy_date'],\n row['last_sell_date'],\n row['max_vol'],\n row['buy_price_mean'],\n row['sell_price_mean'],\n row['profit']\n ))\n\n def show_orders(self, order_list: list):\n \"\"\"显示订单\"\"\"\n order_df = pd.DataFrame(order_list)\n order_df.sort_values(by=['order_id'], ascending=True, inplace=True)\n for i, row in order_df.iterrows():\n print(\"日期:{}, 时间:{}, 类型:{}, 委托价格:{},成交价格:{}, 成交数量:{}\".format(\n row['order_date'],\n row['order_time'],\n row['order_type'],\n row['order_price'],\n row['trade_price'],\n row['volume']\n ))\n\n def show_pos(self, pos_list: list):\n \"\"\"显示持仓情况\"\"\"\n pos_df = pd.DataFrame(pos_list)\n pos_df.sort_values(by=['profit'], ascending=False, inplace=True)\n for i, row in pos_df.iterrows():\n print(\"证券代码:{}, 买入日期:{}, 总持仓:{}, 可用持仓:{}, 买入均价:{}, 当前价格:{}, 盈亏金额:{}\".format(\n row['pt_symbol'],\n row['buy_date'],\n row['volume'],\n row['available'],\n row['buy_price'],\n row['now_price'],\n row['profit']\n ))\n\n @staticmethod\n def output(msg):\n print(f\"{datetime.now()}\\t{msg}\")\n\n\nif __name__ == \"__main__\":\n pt = PaperTrading()\n result, data = pt.creat(\"测试用账号\")\n print(result)\n print(data)\n\n","sub_path":"example/pt_api.py","file_name":"pt_api.py","file_ext":"py","file_size_in_byte":10172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"297441667","text":"\"\"\"skeleton URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.root, name='root')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='root')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, re_path\n\nurlpatterns = [\n re_path(r'^monitoring/', include('monitoring.urls')),\n re_path('^', include('root.urls')),\n re_path(r'^admin/', admin.site.urls),\n]\n","sub_path":"skeleton/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"541777802","text":"__author__ = 'ian.polding'\n\nfrom Tkinter import Tk, Frame, BOTH, Button\nimport tkFileDialog\nimport re\nimport os\n\n#Tk is used to create a root window\n#Frame is a container for other Widgets\n\nclass Example(Frame):\n\n\n\n def __init__(self, parent):\n # we call the constructor of Frame - the inherited class\n Frame.__init__(self, parent, background=\"white\")\n #we store a reference to the parent widget - in this case it is Tk root window\n self.parent = parent\n self.initUI()\n self.fileToProcess = None\n\n def initUI(self):\n # Here we can set the title of the tk window\n self.parent.title(\"Simple\")\n #pack organises widgets into vertical and horizontal boxes. It is one of three geometry managers\n #expanded in both directions\n self.pack(fill=BOTH, expand=1)\n\n openFileButton = Button(self, text=\"Load file\", command=self.openFile)\n openFileButton.place(x=50, y=50)\n\n processFileButton = Button(self, text=\"Process a file\", command=self.processFile)\n processFileButton.place(x=50, y=100)\n\n\n\n\n\n\n def openFile(self):\n self.fileToProcess = tkFileDialog.askopenfile('r+', title=\"Select file to process\")\n return self.fileToProcess\n\n def processFile(self):\n if self.fileToProcess:\n fileAsString = self.fileToProcess.read()\n userRegEx = raw_input(\"Enter the regular expression to remove:\")\n pattern = re.compile(userRegEx)\n outputFileName = tkFileDialog.asksaveasfilename()\n outputFile = open(outputFileName, 'w+')\n outputFile.write(pattern.sub('', fileAsString))\n outputFile.close()\n os.system('notepad ' + outputFileName)\n\n\n\n\n\ndef main():\n\n root = Tk()\n #window size\n root.geometry(\"250x150+300+300\")\n app = Example(root) #this root is the parent variable\n root.mainloop()\n fileToProcess = None\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# quitButton = Button(self, text=\"Quit\", command=self.quit)\n # quitButton.place(x=50, y=0)\n\n\n\n\n\n\n\n\n","sub_path":"Python/RegExRemover/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"647811118","text":"__NetID__ = \"mgfrakes13\"\n__GitHubID__ = \"mgfrakes\"\n__SelfGrade__ = \"5\"\n__Challenge__ = \"4\"\n\n\"\"\"\nRandom Signals and Systems\nCourse: ECEN 303-502\nMaximum Grade: 5pt\n\"\"\"\n\nimport random\nimport math\nimport numpy\nimport pylab\n\nTrialNumber = 100000\n\nUniformList = []\nRayleighList = []\nfor trial1 in range(0, TrialNumber):\n UniformList.append(random.uniform(0, 2*math.pi))\n RayleighList.append(numpy.random.rayleigh(1))\n\npylab.figure()\nn, bins, patches = pylab.hist(UniformList, 1000, normed=1, histtype='stepfilled')\npylab.setp(patches, 'facecolor', 'k', 'alpha', 0.75)\n\npylab.figure()\nn, bins, patches = pylab.hist(RayleighList, 1000, normed=1, histtype='stepfilled')\npylab.setp(patches, 'facecolor', 'g', 'alpha', 0.75)\n\nSequence1 = []\nSequence2 = []\nSequence3 = []\nfor trial2 in range(0, TrialNumber):\n Sequence1.append(math.sin(UniformList[trial2]) * RayleighList[trial2])\n Sequence2.append(math.cos(UniformList[trial2]) * RayleighList[trial2])\n Sequence3.append(Sequence1[trial2]**2 + Sequence2[trial2]**2)\n\npylab.figure()\nn, bins, patches = pylab.hist(Sequence1, 1000, normed=1, histtype='stepfilled')\npylab.setp(patches, 'facecolor', 'r', 'alpha', 0.75)\n\npylab.figure()\nn, bins, patches = pylab.hist(Sequence2, 1000, normed=1, histtype='stepfilled')\npylab.setp(patches, 'facecolor', 'y', 'alpha', 0.75)\n\npylab.figure()\nn, bins, patches = pylab.hist(Sequence3, 1000, normed=1, histtype='stepfilled')\npylab.setp(patches, 'facecolor', 'b', 'alpha', 0.75)\n\npylab.show()\n\nprint(\"What is the type of random variable `Sequence1`?\")\nprint(\"Continuous\")\nprint(\"What is its mean and variance?\")\nprint(numpy.mean(Sequence1))\nprint(numpy.var(Sequence1))\nprint(\"What is the type of random variable `Sequence2`?\")\nprint(\"Continuous\")\nprint(\"What is its mean and variance?\")\nprint(numpy.mean(Sequence2))\nprint(numpy.var(Sequence2))\nprint(\"What is the type of random variable `Sequence3`?\")\nprint(\"Continuous\")\nprint(\"What is its mean and variance?\")\nprint(numpy.mean(Sequence3))\nprint(numpy.var(Sequence3))\nprint(\"What is the empirical covariance between `Sequence1` and `Sequence2`?\")\nprint(numpy.cov(Sequence1,Sequence2))\nprint(\"Do you think they are independent? Justify your answer.\")\nprint(\"Yes. They both dip near zero in the middle.\")","sub_path":"Students/mgfrakes/4challenge.py","file_name":"4challenge.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"396709825","text":"from flask_cors.decorator import cross_origin\nimport requests\nimport json\nfrom booliapi import BooliApi\nfrom flask import Flask, render_template, jsonify, make_response\nfrom typing import List\nfrom flask_cors import CORS\nfrom expiringdict import ExpiringDict\ncache = ExpiringDict(max_len=100, max_age_seconds=60*60*24)\n\napp = Flask(__name__)\nCORS(app)\napp.run(debug=True)\n\nwith open('config.json') as config_file:\n config = json.load(config_file)\n\nbooli_api = BooliApi(\n user_agent='Isak Friis-Jespersen',\n base_url='https://api.booli.se',\n id_caller=config.get('callerId'),\n private_key=config.get('privateKey')\n)\n\n\n@app.route('/get-booli-data//', methods=['GET'])\n@cross_origin()\ndef get_booli_data(resource, q):\n if cache.get(f'/get-booli-data/{resource}/{q}'):\n return cache.get(f'/{resource}/{q}')\n max_objects_to_fetch = 5000\n limit: int = 500\n offset: int = 0\n next_links: bool = True\n data: List[dict] = []\n while(next_links):\n url: str = '{}{}'.format(\n booli_api.base_url,\n booli_api.create_booli_parameters(\n resource=resource,\n q=q,\n limit=limit,\n offset=offset\n )\n )\n response = requests.get(url, headers=booli_api.headers)\n response: json = response.json()\n data += booli_api.extract_date_price(data=response.get('sold'))\n if (\n offset >= response.get('totalCount') or\n offset >= max_objects_to_fetch\n ):\n next_links: bool = False\n offset: int = booli_api.get_offset(\n count=response.get('count'),\n offset=offset\n )\n res: json = jsonify({'res': data})\n cache[f'/get-booli-data/{resource}/{q}'] = res\n return make_response(res, 200)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"608570671","text":"#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 10:14, 18/03/2020 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieunguyen5991 %\n#-------------------------------------------------------------------------------------------------------%\n\nfrom numpy.random import uniform, randint, choice, rand\nfrom numpy import array, mean, setxor1d\nfrom copy import deepcopy\nfrom mealpy.root import Root\n\n\nclass BaseTLO(Root):\n \"\"\"\n An elitist teaching-learning-based optimization algorithm for solving complex constrained optimization problems(TLO)\n This is my version taken the advantages of numpy array to faster handler operations.\n \"\"\"\n def __init__(self, objective_func=None, problem_size=50, domain_range=(-1, 1), log=True, epoch=750, pop_size=100):\n Root.__init__(self, objective_func, problem_size, domain_range, log)\n self.epoch = epoch\n self.pop_size = pop_size\n\n def _calculate_mean__(self, pop=None):\n temp = mean(array([item[self.ID_POS] for item in pop]), axis=0)\n return temp\n\n def _train__(self):\n pop = [self._create_solution__(minmax=0) for _ in range(self.pop_size)]\n g_best = self._get_global_best__(pop=pop, id_fitness=self.ID_FIT, id_best=self.ID_MIN_PROB)\n\n for epoch in range(self.epoch):\n for i in range(self.pop_size):\n\n ## Teaching Phrase\n TF = randint(1, 3) # 1 or 2 (never 3)\n MEAN = self._calculate_mean__(pop)\n arr_random = rand(self.problem_size)\n DIFF_MEAN = arr_random * (g_best[self.ID_POS] - TF * MEAN)\n temp = pop[i][self.ID_POS] + DIFF_MEAN\n fit = self._fitness_model__(temp)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [temp, fit]\n\n ## Learning Phrase\n temp = deepcopy(pop[i][self.ID_POS])\n id_partner = choice(setxor1d(array(range(self.pop_size)), array([i])))\n arr_random = rand(self.problem_size)\n if pop[i][self.ID_FIT] < pop[id_partner][self.ID_FIT]:\n temp += arr_random * (pop[i][self.ID_POS] - pop[id_partner][self.ID_POS])\n else:\n temp += arr_random * (pop[id_partner][self.ID_POS] - pop[i][self.ID_POS])\n fit = self._fitness_model__(temp)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [temp, fit]\n\n g_best = self._update_global_best__(pop, self.ID_MIN_PROB, g_best)\n self.loss_train.append(g_best[self.ID_FIT])\n if self.log:\n print(\"> Epoch: {}, Best fit: {}\".format(epoch + 1, g_best[self.ID_FIT]))\n\n return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train\n\n\nclass OriginalTLO(BaseTLO):\n \"\"\"\n Teaching-learning-based optimization: A novel method for constrained mechanical design optimization problems (TLO)\n This is slower version which inspired from this version:\n https://github.com/andaviaco/tblo\n \"\"\"\n\n def __init__(self, objective_func=None, problem_size=50, domain_range=(-1, 1), log=True, epoch=750, pop_size=100):\n BaseTLO.__init__(self, objective_func, problem_size, domain_range, log, epoch, pop_size)\n\n def _train__(self):\n pop = [self._create_solution__(minmax=0) for _ in range(self.pop_size)]\n for epoch in range(self.epoch):\n for i in range(self.pop_size):\n\n ## Teaching Phrase\n TF = randint(1, 3) # 1 or 2 (never 3)\n best = self._get_global_best__(pop=pop, id_fitness=self.ID_FIT, id_best=self.ID_MIN_PROB)\n temp = deepcopy(pop[i][self.ID_POS])\n for j in range(self.problem_size):\n s_mean = mean([item[self.ID_POS][j] for item in pop])\n r = uniform()\n diff_mean = best[self.ID_POS][j] - TF * s_mean\n temp[j] = pop[i][self.ID_POS][j] + r * diff_mean\n fit = self._fitness_model__(temp)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [temp, fit]\n\n ## Learning Phrase\n temp = deepcopy(pop[i][self.ID_POS])\n id_partner = choice(setxor1d(array(range(self.pop_size)), array([i])))\n for j in range(self.problem_size):\n if pop[i][self.ID_FIT] < pop[id_partner][self.ID_FIT]:\n diff = pop[i][self.ID_POS][j] - pop[id_partner][self.ID_POS][j]\n else:\n diff = pop[id_partner][self.ID_POS][j] - pop[i][self.ID_POS][j]\n r = uniform()\n temp[j] = pop[i][self.ID_POS][j] + r * diff\n fit = self._fitness_model__(temp)\n if fit < pop[i][self.ID_FIT]:\n pop[i] = [temp, fit]\n\n best = self._get_global_best__(pop=pop, id_fitness=self.ID_FIT, id_best=self.ID_MIN_PROB)\n self.loss_train.append(best[self.ID_FIT])\n if self.log:\n print(\"> Epoch: {}, Best fit: {}\".format(epoch + 1, best[self.ID_FIT]))\n\n return best[self.ID_POS], best[self.ID_FIT], self.loss_train","sub_path":"mealpy/human_based/TLO.py","file_name":"TLO.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"370651354","text":"\"\"\"\nNumpyWavefunction simulator device\n==================================\n\n**Module name:** :mod:`pennylane_forest.numpy_wavefunction`\n\n.. currentmodule:: pennylane_forest.numpy_wavefunction\n\nThis module contains the :class:`~.NumpyWavefunctionDevice` class, a PennyLane device that allows\nevaluation and differentiation of pyQuil's NumpyWavefunctionSimulator using PennyLane.\n\n\nClasses\n-------\n\n.. autosummary::\n NumpyWavefunctionDevice\n\nCode details\n~~~~~~~~~~~~\n\"\"\"\nimport itertools\n\nimport numpy as np\n\nfrom pyquil.pyqvm import PyQVM\nfrom pyquil.numpy_simulator import NumpyWavefunctionSimulator\n\nfrom .device import ForestDevice\nfrom .wavefunction import observable_map, spectral_decomposition_qubit\nfrom ._version import __version__\n\n\nclass NumpyWavefunctionDevice(ForestDevice):\n r\"\"\"NumpyWavefunction simulator device for PennyLane.\n\n Args:\n wires (int): the number of qubits to initialize the device in\n shots (int): Number of circuit evaluations/random samples used\n to estimate expectation values of observables.\n \"\"\"\n name = \"pyQVM NumpyWavefunction Simulator Device\"\n short_name = \"forest.numpy_wavefunction\"\n\n observables = {\"PauliX\", \"PauliY\", \"PauliZ\", \"Hadamard\", \"Hermitian\", \"Identity\"}\n\n def __init__(self, wires, *, shots=0, **kwargs):\n super().__init__(wires, shots, **kwargs)\n self.qc = PyQVM(n_qubits=wires, quantum_simulator_type=NumpyWavefunctionSimulator)\n self.state = None\n\n def pre_apply(self):\n self.reset()\n self.qc.wf_simulator.reset()\n\n def pre_measure(self):\n # TODO: currently, the PyQVM considers qubit 0 as the leftmost bit and therefore\n # returns amplitudes in the opposite of the Rigetti Lisp QVM (which considers qubit\n # 0 as the rightmost bit). This may change in the future, so in the future this\n # might need to get udpated to be similar to the pre_measure function of\n # pennylane_forest/wavefunction.py\n self.state = self.qc.execute(self.prog).wf_simulator.wf.flatten()\n\n def expval(self, observable, wires, par):\n if observable == \"Hermitian\":\n A = par[0]\n else:\n A = observable_map[observable]\n\n if self.shots == 0:\n # exact expectation value\n ev = self.ev(A, wires)\n else:\n # estimate the ev\n # sample Bernoulli distribution n_eval times / binomial distribution once\n a, P = spectral_decomposition_qubit(A)\n p0 = self.ev(P[0], wires) # probability of measuring a[0]\n n0 = np.random.binomial(self.shots, p0)\n ev = (n0 * a[0] + (self.shots - n0) * a[1]) / self.shots\n\n return ev\n\n def var(self, observable, wires, par):\n if observable == \"Hermitian\":\n A = par[0]\n else:\n A = observable_map[observable]\n\n var = self.ev(A @ A, wires) - self.ev(A, wires) ** 2\n return var\n\n def ev(self, A, wires):\n r\"\"\"Evaluates a one-qubit expectation in the current state.\n\n Args:\n A (array): :math:`2\\times 2` Hermitian matrix corresponding to the expectation\n wires (Sequence[int]): target subsystem\n\n Returns:\n float: expectation value :math:`\\left\\langle{A}\\right\\rangle = \\left\\langle{\\psi}\\mid A\\mid{\\psi}\\right\\rangle`\n \"\"\"\n # Expand the Hermitian observable over the entire subsystem\n A = self.expand(A, wires)\n return np.vdot(self.state, A @ self.state).real\n\n def expand(self, U, wires):\n r\"\"\"Expand a multi-qubit operator into a full system operator.\n\n Args:\n U (array): :math:`2^n \\times 2^n` matrix where n = len(wires).\n wires (Sequence[int]): Target subsystems (order matters! the\n left-most Hilbert space is at index 0).\n\n Returns:\n array: :math:`2^N\\times 2^N` matrix. The full system operator.\n \"\"\"\n if self.num_wires == 1:\n # total number of wires is 1, simply return the matrix\n return U\n\n N = self.num_wires\n wires = np.asarray(wires)\n\n if np.any(wires < 0) or np.any(wires >= N) or len(set(wires)) != len(wires):\n raise ValueError(\"Invalid target subsystems provided in 'wires' argument.\")\n\n if U.shape != (2 ** len(wires), 2 ** len(wires)):\n raise ValueError(\"Matrix parameter must be of size (2**len(wires), 2**len(wires))\")\n\n # generate N qubit basis states via the cartesian product\n tuples = np.array(list(itertools.product([0, 1], repeat=N)))\n\n # wires not acted on by the operator\n inactive_wires = list(set(range(N)) - set(wires))\n\n # expand U to act on the entire system\n U = np.kron(U, np.identity(2 ** len(inactive_wires)))\n\n # move active wires to beginning of the list of wires\n rearranged_wires = np.array(list(wires) + inactive_wires)\n\n # convert to computational basis\n # i.e., converting the list of basis state bit strings into\n # a list of decimal numbers that correspond to the computational\n # basis state. For example, [0, 1, 0, 1, 1] = 2^3+2^1+2^0 = 11.\n perm = np.ravel_multi_index(tuples[:, rearranged_wires].T, [2] * N)\n\n # permute U to take into account rearranged wires\n return U[:, perm][perm]\n","sub_path":"pennylane_forest/numpy_wavefunction.py","file_name":"numpy_wavefunction.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"70129732","text":"# Thibault Deliever\n\n#invoer\nvertrek_thuis_u = float(input('Tijd dat je thuis vertrekt(uur): '))\nvertrek_thuis_m = float(input('Tijd dat je thuis vertrekt(minuten): '))\naankomst_vriendin_u = float(input('Tijd dat je aankwam bij de persoon in kwestie(uren): '))\naankomst_vriendin_m = float(input('Tijd dat je aankwam bij de persoon in kwestie(minuten): '))\nvertrek_vriendin_u = float(input('Tijd dat je vertrok bij de persoon in kwestie(uren): '))\nvertrek_vriendin_m = float(input('Tijd dat je vertrok bij de persoon in kwestie(minuten): '))\naankomst_thuis_u = float(input('Tijd dat je thuis aankwam(uren): '))\naankomst_thuis_m = float(input('Tijd dat je thuis aankwam(minuten): '))\n\n#verfijnde tijdduur\nvertrek_thuis = (vertrek_thuis_u * 60) + vertrek_thuis_m\naankomst_vriendin = (aankomst_vriendin_u * 60) + aankomst_vriendin_m\nvertrek_vriendin = (vertrek_vriendin_u * 60) + vertrek_vriendin_m\naankomst_thuis = (aankomst_thuis_u * 60) + aankomst_thuis_m\n\n#formule\nReistijd_enkel = (((aankomst_thuis - vertrek_thuis + 1440) - (vertrek_vriendin - aankomst_vriendin + 1440)) % 1440) / 2\nUur_correct = (vertrek_vriendin + Reistijd_enkel)\nUur_correct_u = (Uur_correct // 60) % 24\nUur_correct_m = Uur_correct % 60\n\n#uitvoer\nprint(int(Uur_correct_u))\nprint(int(Uur_correct_m))\n\n#####################################################################################\n\nfoute_vertrektijd_u = int(input('Geef de foutieve vertrektijd (u): '))\nfoute_vertrektijd_m = int(input('Geef de foutieve vertrektijd (m): '))\naankomst_vriendin_u = int(input('Geef aankomsttijd vriendin (u): '))\naankomst_vriendin_m = int(input('Geef aankomsttijd vriendin (m): '))\nvertrek_vriendin_u = int(input('Geef vertrektijd vriendin (u): '))\nvertrek_vriendin_m = int(input('Geef vertrektijd vriendin (m): '))\nfoute_aankomsttijd_u = int(input('Geef de foutieve aankomsttijd (u): '))\nfoute_aankomsttijd_m = int(input('Geef de foutieve aankomsttijd (m): '))\n\n# functie om tijdverschil te berekenen\n\ndef tijdverschil(start_u, start_m, eind_u, eind_m):\n\n if start_u < eind_u:\n\n verschil = (eind_u * 60 + eind_m) - (start_u * 60 + start_m)\n return verschil\n\n elif start_u > eind_u:\n\n verschil = (start_u * 60 + start_m) - (eind_u * 60 + eind_m)\n verschil = 24 * 60 - verschil\n return verschil\n\n elif start_u == eind_u:\n\n verschil = eind_m - start_m\n return verschil\n\n# berekenen tijd totale trip\n\ntrip_tijd = tijdverschil(foute_vertrektijd_u, foute_vertrektijd_m, foute_aankomsttijd_u, foute_aankomsttijd_m)\n\n# berekenen tijd bij vriendin\n\ntijd_vriendin = tijdverschil(aankomst_vriendin_u, aankomst_vriendin_m, vertrek_vriendin_u, vertrek_vriendin_m)\n\n# berekenen totale reistijd\n\ntotale_reistijd = trip_tijd - tijd_vriendin\n\n# berekenen enkele reistijd\n\nenkele_reistijd = totale_reistijd // 2\n\n# berekenen eindtijd\n\naantal_correcte_minuten = (vertrek_vriendin_u * 60 + vertrek_vriendin_m) + enkele_reistijd\n\ncorrecte_tijd_u = aantal_correcte_minuten // 60\ncorrecte_tijd_m = aantal_correcte_minuten % 60\n\nif correcte_tijd_u > 24:\n\n correcte_tijd_u -= 24\n\nprint(correcte_tijd_u)\nprint(correcte_tijd_m)","sub_path":"04+Variabelen/GestopteKlok.py","file_name":"GestopteKlok.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"530666141","text":"import sys\nfrom PIL import Image\n\ncoe_hdr = '''memory_initialization_radix=2;\nmemory_initialization_vector=\n'''\n\ndef get_bin_string(x):\n raw = bin(x)[2:]\n return \"0\"*(8 - len(raw)) + raw\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Usage: {0} \".format(sys.argv[0]))\n else:\n fname = sys.argv[1]\n img = Image.open(fname)\n cimg = img.convert(\"P\")\n \n raw_palette = cimg.getpalette()\n palette = []\n for i in range(0, len(raw_palette), 3):\n palette.append(tuple(raw_palette[i:i+3]))\n # print(palette, len(palette))\n\n with open(f\"{sys.argv[1][:sys.argv[1].index('.')]}_color_map_red.coe\", \"w\") as f:\n f.write(coe_hdr)\n for i in range(256):\n f.write(get_bin_string(palette[i][0]) + \",\\n\")\n \n with open(f\"{sys.argv[1][:sys.argv[1].index('.')]}_color_map_green.coe\", \"w\") as f:\n f.write(coe_hdr)\n for i in range(256):\n f.write(get_bin_string(palette[i][1]) + \",\\n\")\n \n with open(f\"{sys.argv[1][:sys.argv[1].index('.')]}_color_map_blue.coe\", \"w\") as f:\n f.write(coe_hdr)\n for i in range(256):\n f.write(get_bin_string(palette[i][2]) + \",\\n\")\n \n (w, h) = cimg.size\n with open(f\"{sys.argv[1][:sys.argv[1].index('.')]}.coe\", \"w\") as f:\n f.write(coe_hdr)\n for y in range(h):\n for x in range(w):\n f.write(get_bin_string(cimg.getpixel((x, y))) + \",\\n\")\n ","sub_path":"sprites/a_rgb.py","file_name":"a_rgb.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"90048631","text":"# -*- coding: utf-8 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport smtplib,ssl\nimport math\nimport pandas as pd\nimport time as _time\nimport numpy\nfrom datetime import datetime, timedelta\nimport requests\n\n\nfrom twilio.rest import Client\nfrom twilio.rest import Client \n\n\naccount_sid = '****' \nauth_token = '****' \nclient = Client(account_sid, auth_token) \n\n\nstock_Symbol=\"^NSEBANK\"\n#stock_Symbol=\"VEDL.NS\"\n\n##================================== Alert for 15 min Lower breakout level =========================================\n\n\nclass YahooFinance:\n def start(self, ticker, result_range='1mo', start=None, end=None, interval='15m', dropna=True):\n \n # \"1d\",\"5d\",\"1mo\",\"3mo\",\"6mo\",\"1y\",\"2y\",\"5y\",\"10y\",\"ytd\",\"max\n # Valid Intervals - Valid intervals: [1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo, 3mo]\n if result_range is None:\n start = int(_time.mktime(_time.strptime(start, '%d-%m-%Y')))\n end = int(_time.mktime(_time.strptime(end, '%d-%m-%Y')))\n # defining a params dict for the parameters to be sent to the API\n params = {'period1': start, 'period2': end, 'interval': interval}\n else:\n params = {'range': result_range, 'interval': interval}\n # sending get request and saving the response as response object\n url = \"https://query1.finance.yahoo.com/v8/finance/chart/{}\".format(ticker)\n r = requests.get(url=url, params=params)\n data = r.json()\n # Getting data from json\n error = data['chart']['error']\n if error:\n raise ValueError(error['description'])\n self._result = self._parsing_json(data)\n if dropna:\n self._result.dropna(inplace=True)\n return self._result\n\n\n def get_Live_data():\n #while(1==1):\n page=requests.get('https://in.finance.yahoo.com/quote/'+stock_Symbol+'?p='+stock_Symbol+'&.tsrc=fin-srch')\n soup=BeautifulSoup(page.text,'lxml')\n dataArray=soup.find_all('div',{'class':\"My(6px) Pos(r) smartphone_Mt(6px)\"})[0].find('span').text\n print(dataArray)\n\n\n def send_WhatsApp(self,msg):\n message = client.messages.create( \n from_='whatsapp:***', \n body=\"🤑Alert🔔from✅⚡⚡Rajat⚡⚡✅\\n\"+msg, \n to='whatsapp:***'\n )\n\n\n def send_Mail(self , msg):\n messageBuy = \"\"\"\\\nSubject :NSE Report : From Rajat\n\n\"\"\"+msg\n\n li = [\"recepientEmail@gmail.com\",\"recepientEmail@gmail.com\"] \n\n if len(messageBuy) != 0:\n for i in range(len(li)):\n s = smtplib.SMTP('smtp.gmail.com', 587) \n s.starttls() \n s.login(\"yourEmail@gmail.com\", \"**password**\") \n s.sendmail(\"yourEmail@gmail.com\", li[i], messageBuy) \n s.quit()\n print('mail sent !!')\n\n\n def _parsing_json(self, data):\n timestamps = data['chart']['result'][0]['timestamp']\n #print(timestamps)\n # Formatting date from epoch to local time\n timestamps = [_time.strftime('%a, %d %b %Y %H:%M:%S', _time.localtime(x)) for x in timestamps]\n #print(timestamps)\n volumes = data['chart']['result'][0]['indicators']['quote'][0]['volume']\n opens = data['chart']['result'][0]['indicators']['quote'][0]['open']\n opens = self._round_of_list(opens)\n closes = data['chart']['result'][0]['indicators']['quote'][0]['close']\n closes = self._round_of_list(closes)\n lows = data['chart']['result'][0]['indicators']['quote'][0]['low']\n lows = self._round_of_list(lows)\n highs = data['chart']['result'][0]['indicators']['quote'][0]['high']\n highs = self._round_of_list(highs)\n df_dict = {'Open': opens, 'High': highs, 'Low': lows, 'Close': closes, 'Volume': volumes}\n df = pd.DataFrame(df_dict, index=timestamps)\n df.index = pd.to_datetime(df.index)\n # print(df.index+timedelta(hours = 5.5))\n return df\n\n def _round_of_list(self, xlist):\n temp_list = []\n for x in xlist:\n if isinstance(x, float):\n temp_list.append(round(x, 2))\n else:\n temp_list.append(pd.np.nan)\n return temp_list\n\n def to_csv(self, file_name):\n self.result.to_csv(file_name)\n\nobj=YahooFinance()\nres = obj.start(stock_Symbol, result_range='1d', interval='15m', dropna='True')\nprint(res.head())\n#obj.send_Mail()\n\nhigh_till_9_30=0;\nLow_till_9_30=0;\nrisk_Margin=5;\nprev_wk_low=0;\nprev_wk_high=0;\n\n\nfor ind in res.index: \n time = ind.strftime(\"%H:%M:%S\")\n if time==\"09:15:00\":\n high_till_9_30=res['High'][ind]\n Low_till_9_30=res['Low'][ind]\n\nprint(\"till 9:30 low --> \" +str(Low_till_9_30))\nprint(\"till 9:30 High --> \" +str(high_till_9_30))\nprint('\\n')\n\n\nres__prev_1day = obj.start(stock_Symbol, result_range='5d', interval='1d', dropna='True')\nprint(res__prev_1day.iloc[[3]])\nprev_day_high=res__prev_1day['High'][0]\nprev_day_low=res__prev_1day['Low'][0]\n\n\nres_5min = obj.start(stock_Symbol, result_range='1d', interval='5m', dropna='True')\ncount=0;\nvisited=[]\nvisited_alert=[]\n\n\nresTest = obj.start(stock_Symbol,result_range='1mo', interval='1wk', dropna='True')\nprev_week=resTest.iloc[[3]]\nprev_wk_high=prev_week['High'][0]\nprev_wk_low=prev_week['Low'][0]\nprint(resTest.iloc[[3]])\n\nmsg_sent=False\n\nwhile True :\n for indx in res_5min.index:\n time = indx.strftime(\"%H:%M:%S\")\n if int (time.replace(':','')) >= 93000 and time not in visited :\n if \"00\" in time :\n message=\"\"\n print(\"scanning for time --> \" +time)\n # count=count+1\n # print(count)\n visited.append(time)\n local_low=res_5min['Low'][indx]\n if int(local_low) <= int(Low_till_9_30 - risk_Margin) and time not in visited_alert :\n visited_alert.append(time)\n option_alert=int(math.ceil(int(local_low) / 100.0)) * 100\n option_alert_msg=\"\\noption Alert ! Buy : ---> \" +\"BANKNIFTY\"+str(option_alert)+ \"PE\"\n #print(option_alert_msg)\n message=\"\\n===============================\"+option_alert_msg+\"\\n\\nFor stock : \"+stock_Symbol+\"\\n\\nLower than 9:30 low ie. \" +str(Low_till_9_30)+\" ----> strike price : \"+str(local_low)+ \" at time : \"+ str(time) +\"\\n\\n===============================\"+\"\\n\\nPrevious week high --> \"+ str(prev_wk_high) +\"\\n\\nPrevious week low --> \"+ str(prev_wk_low)+\"\\n\\n===============================\"+\"\\n\\nPrevious Day high --> \"+str(prev_day_high)+\"\\n\\nPrevious Day Low --> \"+str(prev_day_low)+\"\\n\\n===============================\"\n print(message)\n #obj.send_Mail(message)\n obj.send_WhatsApp(message)\n msg_sent=True\n print(\"msg sent !!\")\n break\n if msg_sent!=True:\n print(\"sleep \") \n _time.sleep(30)\n res_5min = obj.start(stock_Symbol, result_range='1d', interval='5m', dropna='True')\n else:\n break\n","sub_path":"BankNifty Option/BankNifty_15minBreakout_Low.py","file_name":"BankNifty_15minBreakout_Low.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"70510686","text":"import os, datetime, collections\n\nDropCreateSchemaParams = collections.namedtuple('DropCreateSchemaParams',\n 'schema, tablespace, schema_psw')\n\n#-------------------------------- available actions -----------------------\n\nSTARTEAM_CHECKOUT = \"starteam_checkout\" # checkout folders defined in starteam_params\nGIT_PULL = \"git_pull\" # pull from GIT\nSTOP_APPS_SERVERS = \"stop_apps_servers\" # optional param 'DEV' if for account to which IMPORT is made\nUPDATE_DB_ALL = \"update_db_all\" # run \"DB Changes\" scripts and and compile stored procedures\nMAKE_DB_DUMP = \"make_db_dump\" # create an Oracle dump file from this account for import\nBUILD_DELPHI_PKG = \"build_delphi_pkg\" # run \\Delphi Packages\\build.bat\nBUILD_DELPHI_APPLS = \"build_delphi_appls\" # run \\Delphi Application\\build.bat DAILY [BUILD_DELPHI_APPLS, '/Mode:DAILY']\nCOPY_EXE_FILES = \"copy_exe_files\" # copy exe files from shared folder on Build comp to APPS comp\n # ; optional param 'DEV' if for account to which IMPORT is made\nRUN_APPS_SERVERS = \"run_apps_servers\" # server exe files on File server run on APPS comp;\n # optional param 'DEV' if for account to which IMPORT is made\n#STOP_APPS_SERVERS DEV\nDROP_CREATE_SCHEMA = \"drop_create_schema\" # recreate account for which dump been prepared\nIMPORT_DB_DUMP = \"import_db_dump\" # DB import by applying the dump file\n#RUN_APPS_SERVERS DEV\nMAKE_DB_DUMP_ZIP = 'make_db_dump_zip' # makes zip file out of Oracle dump file and info\nFTP_DB_DUMP_UPLOAD = 'ftp_db_dump_upload' # send zip file by FTP\nEMAIL_DUMP_READY = 'email_dump_ready' # email that Oracle dump file is ready\nTO_WAIT = \"to_wait\" # wait in seconds [TO_WAIT, 20]\n\n#--------------------------- Action optional params------------------------\nDEV = 'Dev'\nACCOUNT_TYPES = set([DEV]) # used for: STOP_APPS_SERVERS, RUN_APPS_SERVERS; default is 'QA'\n\nDAILY = '/Mode:DAILY' # parameter for BUILD_DELPHI_APPLS\n\nONLY_DB_DUMP = 'ONLY_DB_DUMP' # parameter for make_db_dump_zip\n#--------------------------------------------------------------------------\n\nstarting_port_num = 8030\nstarteam_credentials = \"leonid:Ok123456789\"\n\ndef port_seq():\n global starting_port_num\n port = starting_port_num\n starting_port_num += 1\n return port\n\nLOG_FILES = {\n 'main': 'build_main_log.txt',\n 'starteam_checkout': 'build_starteam_log.txt',\n 'update_db_all': 'build_update_db_log.txt',\n 'run_db_changes_script': 'build_db_changes_log.txt',\n 'build_stored_procs': 'build_stored_procs.txt',\n 'build_delphi_pkg': 'build_delphi_pkg_log.txt',\n 'build_delphi_appls': 'build_delphi_appls_log.txt',\n 'make_db_dump': 'make_db_dump_log.txt',\n 'import_db_dump': 'import_db_dump_log.txt',\n 'drop_create_schema': 'drop_create_schema_log.txt',\n 'ftp_db_dump_upload': 'ftp_db_dump_upload_log.txt',\n 'starteam_checkout_time': 'starteam_checkout_time.txt'}\n\n\ndef fltr_exe_dll(file_name):\n \"\"\"filter for .dll, .exe files\"\"\"\n file_name = file_name.lower()\n result = any(file_name.endswith(s) for s in ('.dll', '.exe'))\n return result\n\ndef fltr_exe_not_batch(file_name):\n \"\"\"in addition to fltr_exe_dll SwBatch.exe not copied\"\"\"\n result = fltr_exe_dll(file_name) and (file_name.lower() != 'swbatch.exe')\n return result\n\ndef fltr_xsd(file_name):\n \"\"\"filter for .xsd files\"\"\"\n file_name = file_name.lower()\n result = any(file_name.endswith(s) for s in ('.xsd'))\n return result\n\n\ndef fn_ren_swmain(file_name):\n \"\"\" rename swmain.exe to swmain.ex\"\"\"\n if file_name.lower() == 'swmain.exe':\n file_name = file_name[:-1]\n return file_name\n\n\nclass BaseSetupObj:\n is_db_locked = True # if DB is locked after update_db_all()\n \n# different from \"Build Daily\" \ndef get_serv_proxy_addr(param_obj):\n return ''.join(['http://', param_obj.apps_comp, ':', str(param_obj.port)])\n\n#def get_build_info(params_key, param_obj, build_seq=None):\n #if not build_seq:\n #build_seq = param_obj.build_seq\n #build_seq_str = str(build_seq)\n #build_seq_str = build_seq_str.replace(\"'\", \"\")\n #build_seq_str = build_seq_str.upper()\n\n #result = ''.join(('View: ', params_key, '\\n',\n #'Build Comp: ', param_obj.build_comp, '; ',\n #'APPS Comp: ', param_obj.apps_comp, '\\n', '\\n',\n #'Build Sequence = ', '\\n', build_seq_str))\n #return result","sub_path":"RTC Build/apps_params_include.py","file_name":"apps_params_include.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"86050593","text":"import numpy as np\nfrom normalise_windows import *\n\ndef load_traindata(filename,filename_label, seq_len, normalise_window): # 加载数据并划分数据集\n f = open(filename, 'r+').read() # 读取数据文件\n l=open(filename_label,'r+').read() #读取标签\n\n # 以换行分割数据转化为list\n data = f.split('\\n')[:-1]\n label = l.split('\\n')[:-1]\n\n print('data len:', len(data)) # 输出数据的长度1827\n print('label len:',len(label))\n print('sequence len:', seq_len)\n\n sequence_length = seq_len + 1\n\n result = []\n for index in range(len(data) - sequence_length+1):\n result.append(data[index: index + sequence_length]) # 得到长度为seq_len+1的向量,最后一个作为y\n\n # print('result len:', len(result))\n print('train result shape:', np.array(result).shape) #1707,121\n\n if normalise_window:\n result = normalise_windows(result)\n\n print('normalise_windows result shape:', np.array(result).shape) #1707,121\n\n # 客流量组合label成为三维输入\n result_dim3 = []\n for i in range(len(data)-seq_len):\n result_dim2 = []\n for j in range(seq_len):\n result_dim2.append([result[i][j],label[i+j]])\n result_dim3.append(result_dim2)\n\n print('dim3:',np.array(result_dim3).shape) #1707,120,2\n\n train = np.array(result)\n # x_train1 = train[:, :-1]\n # x_train=np.zeros((1707,121))\n y_train = train[:, -1]\n\n # for t in range(1707):\n # x_train[t] = np.append(x_train1[t], label[t+120])\n\n\n # reshape X to be [samples, time steps, features]\n # x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) # 转化为三维输入\n x_train=np.array(result_dim3)\n\n return [x_train, y_train]","sub_path":"load_traindata.py","file_name":"load_traindata.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"223461601","text":"\"\"\"dump: add description column\n\nRevision ID: 4784b7448cec\nRevises: d64d77cf9c7f\nCreate Date: 2019-11-21 03:08:24.619796\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4784b7448cec'\ndown_revision = 'd64d77cf9c7f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('dump', sa.Column('description', sa.UnicodeText(), server_default=sa.text(\"''\"), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('dump', 'description')\n # ### end Alembic commands ###\n","sub_path":"frontend/migrations/versions/4784b7448cec_dump_add_description_column.py","file_name":"4784b7448cec_dump_add_description_column.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"621958098","text":"from django.test import TestCase\nfrom sonates.registers.models import Issuer\nfrom django.db import IntegrityError\n\nclass IssuerTest(TestCase):\n def setUp(self):\n self.obj = Issuer(\n name = 'Fatec Ourinhos'\n )\n \n def test_create_issuer(self):\n 'Issuer must have name.'\n self.obj.save()\n self.assertEqual(1, self.obj.id)\n \n def test_unicode(self):\n 'Must be return a unicode.'\n self.assertEqual(u'Fatec Ourinhos', unicode(self.obj))\n \nclass IssuerUniqueTest(TestCase):\n def setUp(self):\n # Create a first entry to force colision.\n Issuer.objects.create(\n name='Fatec Ourinhos'\n , cnpj_cpf='123456789'\n )\n \n def test_cnpj_cpf(self):\n 'CNPJ_CPJ must be unique.'\n i = Issuer(\n name='Fatec Ourinhos'\n , cnpj_cpf = '123456789'\n )\n self.assertRaises(IntegrityError, i.save)","sub_path":"sonates/registers/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"185577649","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 3 14:51:54 2020\r\n\r\n@author: marius\r\n\"\"\"\r\n#Preprocessing\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom datetime import datetime, date, time, timedelta\r\nfrom collections import defaultdict\r\n\r\nclass Subject:\r\n \r\n def __init__(self, filename):\r\n self.filename = filename\r\n self.data = pd.read_csv(r\"./Data/\"+filename)\r\n #Set index\r\n if 'real_time' in self.data.columns:\r\n self.data[\"ts\"] = [datetime.strptime(ts,\"%d-%m-%Y %H:%M:%S\") for ts in self.data[\"real_time\"]] \r\n elif 'REALTIME' in self.data.columns: \r\n self.data[\"ts\"] = [datetime.strptime(ts,\"%d-%b-%Y %H:%M:%S\") for ts in self.data[\"REALTIME\"]] \r\n self.data['date'] = self.data['ts'].dt.date\r\n self.data['hour'] = self.data['ts'].dt.hour\r\n self.data.set_index(pd.DatetimeIndex(self.data['ts']), inplace=True)\r\n\r\n #Set ENMO\r\n self.data['ENMO'] = (self.data[\"ACC\"]/0.0060321) + 0.057\r\n #Get other variables\r\n if 'predRMR_Oxford2005' in self.data.columns:\r\n self.RMR = self.data['predRMR_Oxford2005'][0]\r\n if 'P_TR_FITNESS_HighPt_est' in self.data.columns:\r\n self.VO2max = self.data['P_TR_FITNESS_HighPt_est'][0]\r\n self.age = self.data['age'][0]\r\n self.weight = self.data['weight'][0]\r\n self.height = self.data['height'][0]\r\n self.BMI = self.weight / (self.height **2)\r\n \r\n \r\n def __str__(self):\r\n return \"Subject: {}\".format(self.filename)\r\n \r\n def get_METS(self, sed = 1.5, MVPA = 3, vig = 6):\r\n if 'stdMET_highIC_Branch' in self.data.columns:\r\n #Sedentary activities counted from {sed} METs\r\n self.data['MET_Sed'] = self.data['stdMET_highIC_Branch'].apply(lambda x : 1 if x <= sed-1 else 0)\r\n #WHO recommendations would count moderate PA as between 3-6 METs\r\n self.data['MET_MVPA'] = self.data['stdMET_highIC_Branch'].apply(lambda x : x if x > MVPA-1 else 0)\r\n self.data['min_MVPA'] = self.data['stdMET_highIC_Branch'].apply(lambda x : 1 if x > MVPA-1 else 0)\r\n #WHO recommendations would count moderate PA as >6 METs\r\n self.data['MET_VigPA'] = self.data['stdMET_highIC_Branch'].apply(lambda x : x if x > vig-1 else 0)\r\n self.data['min_VigPA'] = self.data['stdMET_highIC_Branch'].apply(lambda x : 1 if x > vig-1 else 0)\r\n #LPA\r\n self.data['min_LPA'] = self.data['stdMET_highIC_Branch'].apply(lambda x : 1 if ((x >= sed-1) and (x < MVPA-1)) else 0)\r\n else:\r\n print(\"No METs column in data!\")\r\n return self\r\n \r\n def get_HRV(self):\r\n #Get HRV\r\n file = self.data\r\n file = file[file.PWEAR>0] #remove buffer no_wear\r\n if 'real_time' in file.columns:\r\n file['real_time'] = pd.to_datetime(file['real_time'], dayfirst=True) #datetime pandas format\r\n elif 'REALTIME' in file.columns:\r\n file['REALTIME'] = pd.to_datetime(file['REALTIME'], dayfirst=True) #datetime pandas format\r\n file = file[file.PWEAR>0] #remove buffer heart rate\r\n #calculate HRV from IBI (data is corrupted so we should flag/not use that datapoint)\r\n file['hrv_ms'] = np.where(file.min_ibi_2_in_milliseconds != 1992.0,file['max_ibi_2_in_milliseconds']-file['min_ibi_1_in_milliseconds'], np.nan)\r\n #file.hrv_milliseconds.fillna(file.hrv_milliseconds.mean(), inplace=True) #fill HRV nans with mean\r\n file.fillna(file.mean(), inplace=True) #fill nans with mean and if completely empty fill with 0\r\n file.fillna(0, inplace=True)\r\n self.data['hrv_ms'] = file['hrv_ms']\r\n \r\n return self\r\n \r\n def get_HRV_profile(self):\r\n hrv = defaultdict(dict)\r\n file = self.data\r\n file['sleep_window_0.4'] = file['sleep_window_0.4'].fillna(method='bfill')\r\n #Calculate profile for wake times\r\n file_wake = file.loc[file['sleep_window_0.4']==1]\r\n hrv['wake']['mean'] = file_wake['hrv_ms'].mean()\r\n hrv['wake']['std'] = file_wake['hrv_ms'].std()\r\n hrv['wake']['skew'] = file_wake['hrv_ms'].skew()\r\n hrv['wake']['kurtosis'] = file_wake['hrv_ms'].kurtosis()\r\n #Calculate profile for sleep windows\r\n file_sleep = file.loc[(file['sleep_window_0.4']==2) & (file['wake_window_0.4']==0)]\r\n hrv['sleep']['mean'] = file_sleep['hrv_ms'].mean()\r\n hrv['sleep']['std'] = file_sleep['hrv_ms'].std()\r\n hrv['sleep']['skew'] = file_sleep['hrv_ms'].skew()\r\n hrv['sleep']['kurtosis'] = file_sleep['hrv_ms'].kurtosis()\r\n #Calculate profile for MVPA\r\n file_MVPA = file[file['min_MVPA']==1]\r\n hrv['MVPA']['mean'] = file_MVPA['hrv_ms'].mean()\r\n hrv['MVPA']['std'] = file_MVPA['hrv_ms'].std()\r\n hrv['MVPA']['skew'] = file_MVPA['hrv_ms'].skew()\r\n hrv['MVPA']['kurtosis'] = file_MVPA['hrv_ms'].kurtosis()\r\n #Calculate profile for sedentary time\r\n file_sed = file[file['MET_Sed']==1]\r\n hrv['sed']['mean'] = file_sed['hrv_ms'].mean()\r\n hrv['sed']['std'] = file_sed['hrv_ms'].std()\r\n hrv['sed']['skew'] = file_sed['hrv_ms'].skew()\r\n hrv['sed']['kurtosis'] = file_sed['hrv_ms'].kurtosis()\r\n \r\n self.HRV = hrv\r\n return self\r\n \r\n \r\n \r\n def get_PA(self, wake_time='07:00', bed_time='23:00'):\r\n #Extracting daily physical activity data (not adjusted to full days yet)\r\n #Set time interval\r\n day = self.data.between_time(wake_time,bed_time, include_start = True, \r\n include_end = True)\r\n #Resampling: daily\r\n day_rd = day.resample('D', base=7).sum()\r\n #Gets PA averages\r\n self.MVPA_mean = day_rd['MET_MVPA'].mean()\r\n self.MVPA_std = day_rd['MET_MVPA'].std()\r\n self.VigPA_mean = day_rd['MET_VigPA'].mean()\r\n self.VigPA_std = day_rd['MET_VigPA'].std()\r\n self.MVPAmins_mean = day_rd['min_MVPA'].mean()\r\n self.MVPAmins_std = day_rd['min_MVPA'].std()\r\n self.VigPAmins_mean = day_rd['min_VigPA'].mean()\r\n self.VigPAmins_std = day_rd['min_VigPA'].std()\r\n self.LPAmins_mean = day_rd['min_LPA'].mean()\r\n self.LPAmins_std = day_rd['min_LPA'].std()\r\n self.Sed_mean = day_rd['MET_Sed'].mean()\r\n self.Sed_std = day_rd['MET_Sed'].std()\r\n self.VigPA_dcount = np.count_nonzero(day_rd['MET_VigPA'])\r\n self.pa_rec = day_rd[['ENMO','MET_Sed','MET_MVPA','MET_VigPA','min_MVPA','min_VigPA','min_LPA']]\r\n return self\r\n \r\n #Function to get the sleep regularity index\r\n def get_ARI(self, q_sleep = 0.4):\r\n act_col = 'min_MVPA'\r\n sri_delta = np.zeros(len(self.data[self.data.index[0]:self.data.shift(periods=-1,freq='D').index[-1]]))\r\n for i in range(len(self.data[self.data.index[0]:self.data.shift(periods=-1,freq='D').index[-1]])):\r\n if self.data[act_col][self.data.index[i]] == self.data[act_col].shift(periods=-1,freq='D')[self.data.index[i]]:\r\n sri_delta[i] = 1\r\n else:\r\n sri_delta[i] = 0\r\n sri_df = pd.DataFrame(sri_delta)\r\n sri = -100 + (200 / (len(self.data[self.data.index[0]:self.data.shift(periods=-1,freq='D').index[-1]]))) * sri_df.sum()\r\n self.ARI = float(sri)\r\n return self \r\n \r\n from sleep_analysis import label_sleep, get_vanhees\r\n from circadian_analysis import get_IV_IS, get_cosinor, get_SSA,get_SSA_par\r\n from nonlinear_analysis import get_nonlinear, get_nonlin_params\r\n from crespo_analysis import Crespo\r\n\r\n def get_windows(self):\r\n df_copy = self.data.copy()\r\n df_copy['sleep_cumsum'] = (df_copy['sleep_window_0.4']-1).cumsum()\r\n\r\n df_night = pd.DataFrame(df_copy.loc[lambda df_copy: df_copy['sleep_cumsum'].diff() == 0])\r\n df_day = pd.DataFrame(df_copy.loc[lambda df_copy: df_copy['sleep_cumsum'].diff() == 1])\r\n\r\n self.sleep_windows = [pd.DataFrame(group[1]) for group in df_night.groupby(df_night['sleep_cumsum'])]\r\n self.wake_windows = [pd.DataFrame(group[1]) for group in df_day.groupby(df_day['timepoint']//1440)]\r\n \r\n for idx in range(len(self.sleep_windows)):\r\n self.sleep_windows[idx]['length'] = len(self.sleep_windows[idx])\r\n for idx in range(len(self.wake_windows)):\r\n self.wake_windows[idx]['length'] = len(self.wake_windows[idx])\r\n return self\r\n \r\n def get_daily_stats(self):\r\n sleep_rec = self.sleep_rec.copy()\r\n pa_rec = self.pa_rec.copy()\r\n if hasattr(self, 'ssa'):\r\n ssa = self.ssa.copy()\r\n \r\n nonlin_cols = ['dfa_ENMO_sleep','dfa_ENMO_wake','dfa_HR_sleep','dfa_HR_wake','dfa_HRV_sleep','dfa_HRV_wake',\r\n 'se_ENMO_sleep','se_ENMO_wake','se_HR_sleep','se_HR_wake','se_HRV_sleep','se_HRV_wake']\r\n nonlinear = pd.DataFrame(columns=nonlin_cols,index=range(len(self.wake_windows)))\r\n #nonlinear = pd.DataFrame(columns=nonlin_cols,index=self.pa_rec.index)\r\n \r\n if hasattr(self,'nonlinear'):\r\n for idx in range(len(self.sleep_windows)):\r\n nonlinear.loc[idx,'dfa_ENMO_sleep'] = self.nonlinear['ENMO']['sleep'][idx]['DFA']\r\n nonlinear.loc[idx,'dfa_HR_sleep'] = self.nonlinear['mean_hr']['sleep'][idx]['DFA']\r\n nonlinear.loc[idx,'dfa_HRV_sleep'] = self.nonlinear['hrv_ms']['sleep'][idx]['DFA']\r\n nonlinear.loc[idx,'se_ENMO_sleep'] = self.nonlinear['ENMO']['sleep'][idx]['SampEn']\r\n nonlinear.loc[idx,'se_HR_sleep'] = self.nonlinear['mean_hr']['sleep'][idx]['SampEn']\r\n nonlinear.loc[idx,'se_HRV_sleep'] = self.nonlinear['hrv_ms']['sleep'][idx]['SampEn']\r\n \r\n for jdx in range(len(self.wake_windows)):\r\n nonlinear.loc[jdx,'dfa_ENMO_wake'] = self.nonlinear['ENMO']['wake'][jdx]['DFA']\r\n nonlinear.loc[jdx,'dfa_HR_wake'] = self.nonlinear['mean_hr']['wake'][jdx]['DFA']\r\n nonlinear.loc[jdx,'dfa_HRV_wake'] = self.nonlinear['hrv_ms']['wake'][jdx]['DFA']\r\n nonlinear.loc[jdx,'se_ENMO_wake'] = self.nonlinear['ENMO']['wake'][jdx]['SampEn']\r\n nonlinear.loc[jdx,'se_HR_wake'] = self.nonlinear['mean_hr']['wake'][jdx]['SampEn']\r\n nonlinear.loc[jdx,'se_HRV_wake'] = self.nonlinear['hrv_ms']['wake'][jdx]['SampEn']\r\n \r\n #print(nonlinear)\r\n wake_lengths = pd.DataFrame(columns = ['w_length'],index = self.pa_rec.index)\r\n for idx in range(len(self.wake_windows)):\r\n wake_lengths.iloc[idx]['w_length'] = self.wake_windows[idx]['length'][0]\r\n sleep_lengths = pd.DataFrame(columns = ['s_length'],index = self.pa_rec.index)\r\n for idx in range(len(self.sleep_windows)):\r\n sleep_lengths.iloc[idx]['s_length'] = self.sleep_windows[idx]['length'][0]\r\n\r\n sleep_rec.index = pd.to_datetime(self.sleep_rec.index.values) - timedelta(hours=20)\r\n \r\n if hasattr(self,'nonlinear'):\r\n if len(nonlinear.index)==len(sleep_rec.index):\r\n #nonlinear = nonlinear.reset_index()\r\n nonlinear = nonlinear.set_index(sleep_rec.index)\r\n elif len(nonlinear.index)==len(pa_rec.index):\r\n #nonlinear = nonlinear.reset_index()\r\n nonlinear = nonlinear.set_index(self.pa_rec.index)\r\n \r\n daily = pd.concat([pa_rec,sleep_rec, wake_lengths,sleep_lengths,nonlinear],axis=1)\r\n if hasattr(self, 'ssa'):\r\n \r\n daily['ENMO_SSA_phi'] = self.ssa['ENMO']['acrophase']\r\n daily['mean_hr_SSA_phi'] = self.ssa['mean_hr']['acrophase']\r\n daily['ENMO_SSA_per'] = self.ssa['ENMO']['period']\r\n daily['mean_hr_SSA_per'] = self.ssa['mean_hr']['period']\r\n daily['ENMO_phisleep_delay'] = daily['sleep_onset'] - daily['ENMO_SSA_phi']\r\n daily['mean_hr_phisleep_delay'] = daily['sleep_onset'] - daily['mean_hr_SSA_phi']\r\n\r\n self.daily_stats = daily\r\n return self","sub_path":"misc/Notebooks/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":12102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"19398095","text":"import json\n\nfrom responsebot.handlers.base import BaseTweetHandler\n\n\nclass CountryHandler(BaseTweetHandler):\n def __init__(self, *args, **kwargs):\n super(CountryHandler, self).__init__(*args, **kwargs)\n\n self.countries = json.load(open('examples/countries.json'))['country']\n self.countries = {\n x['countryName']: {\n 'population': x['population'],\n 'languages': x['languages'],\n 'continentName': x['continentName']\n } for x in self.countries\n }\n\n def on_tweet(self, tweet):\n country = ' '.join(tweet.text.split(' ')[1:])\n\n if country in self.countries:\n info = self.countries[country]\n self.client.tweet(\n 'Country: {country}\\n'\n 'Population: {population}\\n'\n 'Languages: {languages}\\n'\n 'Continent: {continent}'.format(\n country=country,\n population=info['population'],\n languages=info['languages'],\n continent=info['continentName']\n )\n )\n","sub_path":"examples/handlers/country_handler.py","file_name":"country_handler.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"442272912","text":"import socket\nimport winsound\nimport time\n\n\nHOST = '127.0.0.1' # The server's hostname or IP address\nPORT = 8888 # The port used by the server\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n while True:\n message = input('Input your message: ')\n if message == '!':\n break\n s.sendall(message.encode())\n data = s.recv(1024)\n print('Received' + (data.decode()))\n sound = str(data.decode())\n print(sound)\n for i in data.decode():\n if i == '.':\n winsound.Beep(1000, 100) # Beep at 1000 Hz for 100 ms\n time.sleep(0.1)\n elif i == '-':\n winsound.Beep(1000, 600) # Beep at 1000 Hz for 100 ms\n time.sleep(0.1)\n elif i == ' ':\n pass\n\n\n\n\n\n","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"91235853","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n#\n# Pedro Tabacof\n# tabacof at gmail dot com\n# April 2016\n#\n# Bayesian uncertainty in MNIST classification\n#\n# Based on the MNIST Lasagne example\n# https://github.com/Lasagne/Lasagne/blob/master/examples/mnist.py\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport time\nimport operator\n\nimport numpy as np\nimport scipy.stats\nimport seaborn as sns\n\nimport theano\nimport theano.tensor as T\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams\n\nimport lasagne\n\n# Experiment parameters\n\nnum_epochs = 150 # Number of epochs\nbatch_size = 100 # Mini batch size (also used for number of posterior samples)\nweight_decay = 1e-2 # L2 regularization\ndropout_p = 0.5 # Dropout probability\nn_hidden = 512 # Number of neurons at hidden layer\nn_in = 784 # Number of inputs (image pixels)\nn_out = 2 # Number of outputs (labels)\n\n# Bayesian approximation method\nbayesian_approximation = \"dropout\" # Use Gal's variational dropout method\n#bayesian_approximation = \"variational\" # Use Gaussian variational approximation\n\n# Load MNIST dataset\ndef load_dataset():\n # We first define a download function, supporting both Python 2 and 3.\n if sys.version_info[0] == 2:\n from urllib import urlretrieve\n else:\n from urllib.request import urlretrieve\n\n def download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n import gzip\n\n def load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n\n data = data.reshape(-1, n_in)\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\n def load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n X_train = load_mnist_images('train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')\n\n X_outside = X_train[np.where(np.logical_and(y_train != 0, y_train != 1))]\n y_outside = y_train[np.where(np.logical_and(y_train != 0, y_train != 1))]\n\n X_train = X_train[np.where(np.logical_or(y_train == 0, y_train == 1))]\n y_train = y_train[np.where(np.logical_or(y_train == 0, y_train == 1))]\n\n X_test_all = X_test\n y_test_all = y_test\n \n X_test = X_test[np.where(np.logical_or(y_test == 0, y_test == 1))]\n y_test = y_test[np.where(np.logical_or(y_test == 0, y_test == 1))]\n\n return X_train, y_train, X_test, y_test, X_outside, y_outside, X_test_all, y_test_all\n\n\n# Mini batch iterator for training and testing\ndef iterate_minibatches(inputs, targets, batchsize, shuffle=False):\n assert len(inputs) == len(targets)\n if shuffle:\n indices = np.arange(len(inputs))\n np.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt]\n\n# Stochastic Gradient Descent with Momentum\ndef sgd(cost, params, lr=0.05, momentum = 0.9):\n grads = T.grad(cost=cost, wrt=params)\n updates = []\n for p, g in zip(params, grads):\n acc = theano.shared(p.get_value() * 0.)\n acc_new = acc*momentum + (1.0-momentum)*g\n updates.append([acc, acc_new])\n updates.append([p, p - acc_new * lr])\n return updates\n \n# Build MLP with one hidden layer\ndef build_mlp(input_var=None):\n l_in = lasagne.layers.InputLayer(shape=(None, n_in),\n input_var=input_var)\n\n l_hid1 = lasagne.layers.DenseLayer(\n l_in, num_units=512,\n nonlinearity=lasagne.nonlinearities.rectify,\n W=lasagne.init.GlorotUniform())\n\n l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=dropout_p)\n\n l_out = lasagne.layers.DenseLayer(\n l_hid1_drop, num_units=2,\n nonlinearity=lasagne.nonlinearities.softmax)\n\n return l_out\n \n# Weight initialization helper function\ndef init(n_in, n_out, name):\n values = np.asarray(np.random.uniform(\n low=-np.sqrt(6. / (n_in + n_out)),\n high=np.sqrt(6. / (n_in + n_out)),\n size=(n_in, n_out)), \n dtype=theano.config.floatX)\n\n return theano.shared(value=values, name=name, borrow=True)\n \n# Load the dataset\nprint(\"Loading data...\")\nX_train, y_train, X_test, y_test, X_outside, y_outside, X_test_all, y_test_all = load_dataset()\n\n# Prepare Theano variables for inputs and targets\ninput_var = T.matrix('inputs')\ntarget_var = T.ivector('targets')\n\nif bayesian_approximation == \"dropout\":\n # MLP with one hidden layer\n network = build_mlp(input_var)\n \n # Softmax output\n prediction = lasagne.layers.get_output(network, deterministic=False)\n loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)\n loss = loss.mean()\n \n # L2 regularization (weight decay)\n weightsl2 = lasagne.regularization.regularize_network_params(network, lasagne.regularization.l2)\n loss += weight_decay*weightsl2\n \n # SGD training\n params = lasagne.layers.get_all_params(network, trainable=True)\n updates = lasagne.updates.momentum(loss, params, learning_rate=0.01, momentum=0.9)\n train_fn = theano.function([input_var, target_var], loss, updates=updates)\n\n # Test functions\n test_loss = lasagne.objectives.categorical_crossentropy(prediction, target_var).mean()\n test_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var), dtype=theano.config.floatX)\n test_fn = theano.function([input_var, target_var], [loss, prediction, test_acc])\n\n # Probability and entropy\n test_prob = theano.function([input_var], prediction)\n entropy = lasagne.objectives.categorical_crossentropy(prediction, prediction)\n test_entropy = theano.function([input_var], entropy)\n\n test_prediction_classical = lasagne.layers.get_output(network, deterministic=True)\n entropy_classical = lasagne.objectives.categorical_crossentropy(test_prediction_classical, test_prediction_classical)\n test_entropy_classical = theano.function([input_var], entropy_classical)\n\nelif bayesian_approximation == \"variational\": \n # Input to hidden layer weights\n W1_mu = init(n_in, n_hidden, 'W1_mu') # Weights mean\n W1_log_var = init(n_in, n_hidden, 'W1_log_var') # Weights log variance\n \n # Hidden layer to output weights\n W2_mu = init(n_hidden, n_out, 'W2_mu') # Weights mean\n W2_log_var = init(n_hidden, n_out, 'W2_log_var') # Weights log variance\n \n # Biases are not random variables (for convenience)\n b1 = theano.shared(value=np.zeros((n_hidden,), dtype=theano.config.floatX), name='b1', borrow=True)\n b2 = theano.shared(value=np.zeros((n_out,),dtype=theano.config.floatX), name='b2', borrow=True)\n \n # Network parameters\n params = [W1_mu, W1_log_var, W2_mu, W2_log_var, b1, b2]\n \n # Random variables\n srng = MRG_RandomStreams(seed=234)\n rv_hidden = srng.normal((batch_size, n_in, n_hidden)) # Standard normal\n rv_output = srng.normal((batch_size, n_hidden, n_out)) # Standard normal\n\n # MLP\n # Hidden layer\n #hidden_output = T.nnet.relu(T.batched_dot(input_var, W1_mu + T.log(1.0+T.exp(W1_log_var))*rv_hidden) + b1)\n hidden_output = T.nnet.relu(T.batched_dot(input_var, W1_mu + T.exp(W1_log_var)*rv_hidden) + b1)\n\n # Output layer \n #prediction = T.nnet.softmax(T.batched_dot(hidden_output, W2_mu + T.log(1.0+T.exp(W2_log_var))*rv_output) + b2)\n prediction = T.nnet.softmax(T.batched_dot(hidden_output, W2_mu + T.exp(W2_log_var)*rv_output) + b2)\n\n # Prediction \n y_pred = T.argmax(prediction, axis=1)\n \n # KL divergence between prior and posterior\n # For Gaussian prior and posterior, the formula is exact:\n #DKL_hidden = (1.0 + T.log(2.0*T.log(1.0+T.exp(W1_log_var))) - W1_mu**2.0 - 2.0*T.log(1.0+T.exp(W1_log_var))).sum()/2.0\n #DKL_output = (1.0 + T.log(2.0*T.log(1.0+T.exp(W2_log_var))) - W2_mu**2.0 - 2.0*T.log(1.0+T.exp(W2_log_var))).sum()/2.0\n DKL_hidden = (1.0 + 2.0*W1_log_var - W1_mu**2.0 - T.exp(2.0*W1_log_var)).sum()/2.0\n DKL_output = (1.0 + 2.0*W2_log_var - W2_mu**2.0 - T.exp(2.0*W2_log_var)).sum()/2.0\n \n # Negative log likelihood\n nll = T.nnet.categorical_crossentropy(T.clip(prediction, 0.000001, 0.999999), target_var)\n # Complete variational loss \n loss = nll.mean() - (DKL_hidden + DKL_output)/float(batch_size)\n #loss = nll.mean()\n # SGD training\n updates = sgd(loss, params, 0.01)\n train_fn = theano.function([input_var, target_var], loss, updates=updates)\n \n # Test functions\n hidden_output_test = T.nnet.relu(T.dot(input_var, W1_mu) + b1)\n test_prediction = T.nnet.softmax(T.dot(hidden_output_test, W2_mu) + b2)\n test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var))\n test_fn = theano.function([input_var, target_var], [loss, test_prediction, test_acc])\n\n # Probability and entropy\n test_prob = theano.function([input_var], prediction)\n entropy = T.nnet.categorical_crossentropy(prediction, prediction)\n test_entropy = theano.function([input_var], entropy)\n test_entropy_classical = theano.function([input_var], 0.0*input_var.sum()) # Fake classical entropy\n \n# Finally, launch the training loop.\nprint(\"Starting training...\")\n# We iterate over epochs:\nfor epoch in range(num_epochs):\n # In each epoch, we do a full pass over the training data:\n train_err = 0\n train_batches = 0\n start_time = time.time()\n for batch in iterate_minibatches(X_train, y_train, batch_size, shuffle=True):\n inputs, targets = batch\n err = train_fn(inputs, targets)\n train_err += err\n train_batches += 1\n\n # Then we print the results for this epoch:\n print(\"Epoch {} of {} took {:.3f}s\".format(epoch + 1, num_epochs, time.time() - start_time))\n print(\" training loss:\\t\\t{:.6f}\".format(train_err / train_batches))\n\n# After training, we compute and print the test error:\ntest_err = 0\ntest_acc = 0\ntest_batches = 0\nfor batch in iterate_minibatches(X_test, y_test, batch_size, shuffle=False):\n inputs, targets = batch\n err, _, acc = test_fn(inputs, targets)\n test_err += err\n test_acc += acc\n test_batches += 1\nprint(\"Final results:\")\nprint(\" test loss:\\t\\t\\t{:.6f}\".format(test_err / test_batches))\nprint(\" test accuracy:\\t\\t{:.2f} %\".format(test_acc / test_batches * 100))\n\n# Uncertainty prediction\ntest_pred_mean = {str(x):[] for x in range(0,10)}\ntest_pred_std = {str(x):[] for x in range(0,10)}\ntest_entropy_bayesian_v1 = {str(x):[] for x in range(0,10)}\ntest_entropy_bayesian_v2 = {str(x):[] for x in range(0,10)}\ntest_entropy_deterministic = {str(x):[] for x in range(0,10)}\n\nprint(\"Total test samples\", len(X_test_all))\nfor i in range(len(X_test_all)):\n probs = test_prob(np.tile(X_test_all[i], batch_size).reshape(-1, n_in))\n entropy = test_entropy(np.tile(X_test_all[i], batch_size).reshape(-1, n_in))\n classical_entropy = test_entropy_classical(X_test_all[i][np.newaxis,:])\n predictive_mean = np.mean(probs, axis=0)\n predictive_std = np.std(probs, axis=0)\n test_pred_mean[str(y_test_all[i])].append(predictive_mean[1])\n test_pred_std[str(y_test_all[i])].append(predictive_std[1])\n test_entropy_bayesian_v1[str(y_test_all[i])].append(entropy.mean())\n test_entropy_bayesian_v2[str(y_test_all[i])].append(scipy.stats.entropy(predictive_mean))\n test_entropy_deterministic[str(y_test_all[i])].append(classical_entropy.mean())\n\n# Plotting\nfor k in sorted(test_pred_mean.keys()):\n sns.plt.figure()\n sns.plt.hist(test_pred_mean[k], label = \"Prediction mean for \" + k)\n sns.plt.hist(test_entropy_bayesian_v1[k], label = \"Bayesian Entropy v1 for \" + k)\n sns.plt.hist(test_entropy_bayesian_v2[k], label = \"Bayesian Entropy v2 for \" + k) \n sns.plt.hist(test_pred_std[k], label = \"Prediction std for \" + k)\n #sns.plt.hist(test_entropy_deterministic[k], label = \"Classical entropy for \" + k)\n sns.plt.legend()\n sns.plt.show()\n\n# Anomaly detection\n# by classical prediction entropy\ndef anomaly_detection(anomaly_score_dict, name):\n threshold = np.linspace(0, 1.0, 1000)\n acc = {}\n for t in threshold:\n tp = 0.0\n tn = 0.0\n for l in anomaly_score_dict:\n if l == '0' or l == '1':\n tp += (np.array(anomaly_score_dict[l]) < t).mean()\n else:\n tn += (np.array(anomaly_score_dict[l]) >= t).mean()\n tp /= 2.0\n tn /= 8.0\n bal_acc = (tp + tn)/2.0\n f1_score = 2.0*tp/(2.0 + tp - tn)\n acc[t] = [bal_acc, f1_score, tp, tn]\n \n print(\"{}\\tscore\\tthreshold\\tTP\\tTN\".format(name))\n sorted_acc = sorted(acc.items(), key= lambda x : x[1][0], reverse = True)\n print(\"\\tbalanced acc\\t{:.3f}\\t{:.3f}\\t\\t{:.3f}\\t{:.3f}\".format(sorted_acc[0][1][0], sorted_acc[0][0], sorted_acc[0][1][2], sorted_acc[0][1][3]))\n sorted_acc = sorted(acc.items(), key= lambda x : x[1][1], reverse = True)\n print(\"\\tf1 score\\t{:.3f}\\t{:.3f}\\t\\t{:.3f}\\t{:.3f}\".format(sorted_acc[0][1][1], sorted_acc[0][0], sorted_acc[0][1][2], sorted_acc[0][1][3]))\n\nanomaly_detection(test_entropy_deterministic, \"Classical entropy\")\nanomaly_detection(test_entropy_bayesian_v1, \"Bayesian entropy v1\")\nanomaly_detection(test_entropy_bayesian_v2, \"Bayesian entropy v2\")\nanomaly_detection(test_entropy_deterministic, \"Classical entropy\")\nanomaly_detection(test_pred_std, \"Bayesian prediction STD\")\n","sub_path":"mnist_uncertainty.py","file_name":"mnist_uncertainty.py","file_ext":"py","file_size_in_byte":14488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"436042165","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\ndef web_scraping(userText):\n qs = userText\n #qs = input()\n global flag2\n global loading\n \n URL = 'https://www.google.com/search?q=' + qs\n page = requests.get(URL)\n \n soup = BeautifulSoup(page.content, 'html.parser')\n \n links = soup.findAll(\"a\")\n all_links = []\n \n for link in links:\n link_href = link.get('href')\n if \"url?q=\" in link_href and not \"webcache\" in link_href:\n all_links.append((link.get('href').split(\"?q=\")[1].split(\"&sa=U\")[0]))\n \n flag = False\n for link in all_links:\n if 'https://en.wikipedia.org/wiki/' in link:\n wiki = link\n flag = True\n break\n \n div0 = soup.find_all('div',class_=\"kvKEAb\")\n div1 = soup.find_all(\"div\",class_=\"Ap5OSd\")\n div2 = soup.find_all(\"div\",class_=\"nGphre\")\n div3 = soup.find_all(\"div\",class_=\"BNeawe iBp4i AP7Wnd\")\n \n if len(div0) != 0:\n answer = div0[0].text\n elif len(div1) != 0:\n answer = div1[0].text + \"\\n\" + div1[0].find_next_sibling(\"div\").text\n elif len(div2) != 0:\n answer = div2[0].find_next(\"span\").text + \"\\n\" + div2[0].find_next(\"div\",class_=\"kCrYT\").text\n elif len(div3) != 0:\n answer = div3[1].text\n elif flag==True:\n page2 = requests.get(wiki)\n soup = BeautifulSoup(page2.text, 'html.parser')\n title = soup.select(\"#firstHeading\")[0].text\n \n paragraphs = soup.select(\"p\")\n for para in paragraphs:\n if bool(para.text.strip()):\n answer = title + \"\\n\" + para.text\n break\n else:\n answer = \"Sorry. I could not find the desired results.\"\n if answer==\"Sorry. I could not find the desired results.\":\n #print(answer)\n return clean(answer)\n else:\n return clean(answer)\n #print(\"I found this on the web: \",answer)\n\ndef clean(ans):\n x = re.compile(r'\\[[0-9]*\\]|Wikipedia|quora|Quora')\n return(x.sub('',ans))\n\n\nif __name__ == \"__main__\":\n web_scraping()","sub_path":"webscrap.py","file_name":"webscrap.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"617065367","text":"#! /home/ps997/miniconda3/bin/python\n\n# Usage: centrifugeExtractor.py centrifuge.results taxIDsToReject.txt readsfile.fq\n\nimport sys\nfrom Bio import SeqIO\n\ncentResultFile = sys.argv[1]\ntaxIDsFile = sys.argv[2]\nreadsFile = sys.argv[3]\n\ntaxIDsList = []\nkeepReadsList = []\nrejectReadsList = []\n\nopenTaxIDsFile = open(taxIDsFile, \"r\")\nfor line in openTaxIDsFile:\n\ttaxIDsList.append(line.strip(\"\\n\"))\nopenTaxIDsFile.close()\n\n\nopenCentResultFile = open(centResultFile , \"r\")\nfor line in openCentResultFile:\n\tsplitline = line.strip(\"\\n\").split(\"\\t\")\n\tif splitline[2] in taxIDsList:\n\t\trejectReadsList.append(splitline[0])\n\telif splitline[2] not in taxIDsList:\n\t\tkeepReadsList.append(splitline[0])\nopenCentResultFile.close()\n\nkeepReadsList = set(keepReadsList)\nrejectReadsList = set(rejectReadsList)\n\noutputKeepReadsList = open(\"%s_keepReadsList.txt\" % readsFile, \"w\")\nfor readID in keepReadsList:\n\toutputKeepReadsList.write(\"%s\\n\" % readID)\noutputKeepReadsList.close()\nprint(\"Keep-reads file created\")\n\noutputKeepFile = open(\"%s.keep.DEV.fastq\" % readsFile, \"w\")\noutputRejectFile = open(\"%s.reject.DEV.fastq\" % readsFile, \"w\")\n\ninput_seq_dict = SeqIO.to_dict(SeqIO.parse(readsFile, \"fastq\"))\nfor record in input_seq_dict:\n\tif record in keepReadsList:\n\t\tSeqIO.write(input_seq_dict[record], outputKeepFile, \"fastq\")\n\telif record in rejectReadsList:\n\t\tSeqIO.write(input_seq_dict[record], outputRejectFile, \"fastq\")\n\telse:\n\t\tprint(\"%s not present in centrifuge results! Did you use the wrong fastq?\" % record)\noutputKeepFile.close()\noutputRejectFile.close()\n","sub_path":"scripts/centrifugeExtractor_DEV.py","file_name":"centrifugeExtractor_DEV.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"19187181","text":"from selenium import webdriver\nimport time\nimport pandas as pd\n\ndriver = webdriver.Chrome(\"C:\\webdrivers\\chromedriver.exe\")\ndriver.get(\"https://www.tripadvisor.nl/Restaurants-g188632-Rotterdam_South_Holland_Province.html#EATERY_OVERVIEW_BOX\")\n\noutput = []\nwhile True:\n pageOutput = []\n try:\n restaurants = driver.find_elements_by_xpath(\"//*[@id='EATERY_SEARCH_RESULTS']/div[contains(@class, 'listing rebrand listingIndex')]/div[2]/div[1]\")\n except Exception:\n print(\"No restaurants on this page.\")\n\n for rest in restaurants:\n restName = rest.find_element_by_xpath(\".//div[1]/a\").text\n restLink = rest.find_element_by_xpath(\".//div[1]/a\").get_attribute(\"href\")\n restRank = rest.find_element_by_xpath(\".//div[3]/div\").text\n try:\n restRating = rest.find_element_by_xpath(\".//div[2]/span\").get_attribute(\"alt\")\n except:\n restRating = 'No Rating'\n try:\n restReviews = rest.find_element_by_xpath(\".//*[@class='reviewCount']/a\").text\n except:\n restReviews = 'No Reviews'\n try:\n restPrice = rest.find_element_by_xpath(\".//*[@class='item price']\").text\n except:\n restPric = 'No Price Information'\n row = [restName, restLink, restRank, restRating, restReviews, restPrice]\n pageOutput.append(row)\n\n try:\n elem1 = driver.find_element_by_link_text(\"Volgende\")\n elem1.click()\n except:\n print(\"Next page can't be reached.\")\n break\n output.extend(pageOutput)\n time.sleep(5)\n\ndf_restaurants = pd.DataFrame(output, columns=['Name', 'Link', 'Rank', 'Rating', 'Reviews', 'Price'])\ndf_restaurants.to_csv(\"C:/Users/gjave/Desktop/restaurants.csv\", sep = '|', encoding='UTF-8')","sub_path":"scrapers/tripadvisor_restaurant_scraper_chrome_driver.py","file_name":"tripadvisor_restaurant_scraper_chrome_driver.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"458075779","text":"__copyright__ = \"\"\"\n Copyright 2020 Diomidis Spinellis\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n__license__ = \"Apache 2.0\"\n\nfrom datetime import datetime, timedelta\nfrom dp3t.config import EPOCH_LENGTH\nfrom dp3t.protocols.client_database import ClientDatabase\nfrom dp3t.protocols.server_database import ServerDatabase\nfrom dp3t.protocols.unlinkable import epoch_from_time\nfrom os import close, remove\nfrom pathlib import Path\nimport pytest\nimport requests\nimport subprocess\nfrom tempfile import mkstemp\nfrom time import sleep\n\n\n# A start time within the retention period\nSTART_TIME = datetime.utcnow() - timedelta(days=2)\nSERVER_URL = \"http://127.0.0.1:5000\"\n\n\n###################\n### TEST UPLOAD ###\n###################\n\n\ndef script_path(script):\n \"\"\"Return the full path of a script in the epidose/ directory. \"\"\"\n return Path(__file__).parent.parent / \"epidose\" / script\n\n\n@pytest.fixture\ndef test_context():\n # Initialize client database\n (client_db_handle, client_db_path) = mkstemp()\n close(client_db_handle)\n client_db = ClientDatabase(client_db_path)\n\n # Add seeds to the client database\n for i in range(0, 10):\n e = epoch_from_time(START_TIME + timedelta(minutes=i * EPOCH_LENGTH))\n client_db.add_epoch_ids(e, bytes.fromhex(f\"deadbeef0{i}\"), f\"E{i}\")\n close(client_db_handle)\n # subprocess.call([\"/home/dds/src/epidose/utils/client-db-report.sh\", client_db_path])\n\n # Instantiate the back-end server\n (server_db_handle, server_db_path) = mkstemp()\n close(server_db_handle)\n server_proc = subprocess.Popen(\n [script_path(\"back_end/ha_server.py\"), \"-d\", \"-v\", \"-D\", server_db_path]\n )\n\n # Wait for server to come up\n while True:\n try:\n res = requests.get(f\"{SERVER_URL}/version\")\n if res.ok:\n break\n except requests.exceptions.ConnectionError:\n pass\n sleep(0.1)\n\n server_db = ServerDatabase(server_db_path)\n\n yield (client_db_path, server_db)\n # Shutdown the server\n res = requests.get(f\"{SERVER_URL}/shutdown\")\n assert res.ok\n\n # Wait for the server to finish\n server_proc.wait()\n\n # Cleanup\n server_db.close()\n remove(server_db_path)\n remove(client_db_path)\n\n\ndef test_upload_contacts(test_context):\n (client_db_path, server_db) = test_context\n\n # Upload contacts from database\n client_proc = subprocess.run(\n [\n script_path(\"device/upload_contacts.py\"),\n \"-d\",\n \"-D\",\n client_db_path,\n \"-s\",\n SERVER_URL,\n \"-v\",\n (START_TIME + timedelta(minutes=2 * EPOCH_LENGTH)).isoformat(),\n (START_TIME + timedelta(minutes=8 * EPOCH_LENGTH)).isoformat(),\n ]\n )\n assert client_proc.returncode == 0\n\n # See if they arrived in the server database\n (epochs, seeds) = server_db.get_epoch_seeds_tuple()\n assert (\n epoch_from_time(START_TIME + timedelta(minutes=2 * EPOCH_LENGTH)) - 1\n not in epochs\n )\n assert epoch_from_time(START_TIME + timedelta(minutes=2 * EPOCH_LENGTH)) in epochs\n assert epoch_from_time(START_TIME + timedelta(minutes=8 * EPOCH_LENGTH)) in epochs\n assert (\n epoch_from_time(START_TIME + timedelta(minutes=8 * EPOCH_LENGTH)) + 1\n not in epochs\n )\n assert bytes.fromhex(\"deadbeef06\") in seeds\n","sub_path":"tests/test_upload_contacts.py","file_name":"test_upload_contacts.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"31395182","text":"from django.conf.urls import url\nfrom . import views\n\n\napp_name = 'WebMonitor'\n\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'list/$', views.list, name='list'),\n # url(r'info/$', views.info, name='info'),\n]","sub_path":"MyTest/WebMonitor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"463433129","text":"import pandas as pd\nfrom kronall import *\nfrom pauli_matrix import *\nfrom mapping import *\nfrom plot import *\n\n\n# In[91]:\n\npara=pd.read_csv('he2_value.txt',sep='\\s+',header=None)\npara=np.array(para)\n\n\n# In[150]:\n\nnumber=2\neig1=[]\neig2=[]\nmatrix1=[]\nmatrix2=[]\nmatrix3=[]\nmatrix4=[]\nmatrix5=[]\nmatrix6=[]\nmatrix7=[]\nmatrix8=[]\nmatrix9=[]\nmatrix10=[]\nmatrix11=[]\nmatrix12=[]\nmatrix13=[]\nmatrix14=[]\ncount1=[]\nsign=[1]*number\nfor index1 in range(number):\n\tif index1>0:\n\t\tsign[index1-1]=-1\n\tmatrice=diag_pauli(cr_ham([i1,i,i,z,i],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix1.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,i,z,i,i],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix2.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,z,i,i,i],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix3.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,i,i,i,z],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix4.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,z,i,z,i],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix5.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,i,i,z,z],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix6.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,z,z,i,i],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix7.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,i,z,i,z],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix8.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,y,y,x,x],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix9.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,y,x,x,y],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix10.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,x,y,y,x],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix11.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,x,x,y,y],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix12.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,i,z,z,i],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix13.append(matrix)\n\tmatrice=diag_pauli(cr_ham([i1,z,i,i,z],number,sign))\n\tmatrix=0\n\tfor matrixa in matrice:\n\t\tmatrix=matrixa[0][0]*kronall(matrixa[1:])+matrix\n\tmatrix14.append(matrix)\n\tcount1.append(count(4,number,sign))\n\n\nfor index in np.arange(0,500,10):\n\ta1=para[index,1]\n\ta2=para[index,2]\n\ta3=para[index,3]\n\ta4=para[index,4]\n\ta5=para[index,5]\n\ta6=para[index,6]\n\ta7=para[index,7]\n\ta8=para[index,8]\n\ta9=para[index,9]\n\ta10=para[index,10]\n\ta11=para[index,11]\n\ta12=para[index,12]\n\ta13=para[index,13]\n\ta14=para[index,14]\n\n\t\n\teig3=[]\n\tfor index1 in range(number):\n\t\tmatrix=a1*matrix1[index1]+a2*matrix2[index1]+a3*matrix3[index1]+a4*matrix4[index1]+a5*matrix5[index1]\n\t\tprint(a6)\n\t\tmatrix=matrix+a6*matrix6[index1]+a7*matrix7[index1]+a8*matrix8[index1]+a9*matrix9[index1]+a10*matrix10[index1]\n\t\tmatrix=matrix+a11*matrix11[index1]+a12*matrix12[index1]+a13*matrix13[index1]+a14*matrix14[index1]\n\t\tcount2=count1[index1]\n\t\teig=1000\n\t\ti1=1\n\t\twhile i1==1:\n\t\t\ti_eig=np.argmin(matrix-eig*count2)\n\t\t\tprint(matrix[i_eig])\n\t\t\tco=count2[i_eig] \n\t\t\teig=matrix[i_eig]/co\n\t\t\tif min(matrix-eig*count2)>=-0.01:\n\t\t\t\tbreak\n\t\teig3.append(eig)\n\teig1.append(min(eig3)+para[index,0])\n\tprint(eig+para[index,0])\t\n\teigvalue,eigvector=np.linalg.eig(a1*kronall([i,i,z,i])+a2*kronall([i,z,i,i])+a3*kronall([z,i,i,i])+a4*kronall([i,i,i,z])+a5*kronall([z,i,z,i])+a6*kronall([i,i,z,z])+a7*kronall([z,z,i,i])+a8*kronall([i,z,i,z])-a9*kronall([y,y,x,x])-a10*kronall([y,x,x,y])-a11*kronall([x,y,y,x])-a12*kronall([x,x,y,y])+a13*kronall([i,z,z,i])+a14*kronall([z,i,i,z]))\n\teig2.append(min(eigvalue)+para[index,0])\n\nfrom matplotlib import pyplot as plt\nplt.plot(eig1,'ro')\nplt.plot(eig2)\nplt.show()\n","sub_path":"mapping_new/h2.py","file_name":"h2.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"72974793","text":"# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\nimport sys\nfrom os.path import abspath, dirname, exists, join, isfile\nimport shutil\nimport glob\nfrom io import StringIO\n\nimport six\nimport json\nimport pytest\nfrom io import StringIO\n\nfrom asv import config\nfrom asv.commands.run import Run\nfrom asv.commands.publish import Publish\nfrom asv.commands.find import Find\nfrom asv.commands.continuous import Continuous\n\n\n@pytest.fixture\ndef basic_conf(tmpdir):\n tmpdir = six.text_type(tmpdir)\n local = abspath(dirname(__file__))\n os.chdir(tmpdir)\n\n machine_file = join(tmpdir, 'asv-machine.json')\n\n shutil.copyfile(join(local, 'asv-machine.json'),\n machine_file)\n\n conf = config.Config.from_json({\n 'env_dir': join(tmpdir, 'env'),\n 'benchmark_dir': join(local, 'benchmark'),\n 'results_dir': join(tmpdir, 'results_workflow'),\n 'html_dir': join(tmpdir, 'html'),\n 'repo': 'https://github.com/spacetelescope/asv.git',\n 'dvcs': 'git',\n 'project': 'asv',\n 'matrix': {\n \"six\": [None],\n \"psutil\": [\"1.2\", \"2.1\"]\n }\n })\n\n return tmpdir, local, conf, machine_file\n\n\ndef test_run_publish(basic_conf):\n tmpdir, local, conf, machine_file = basic_conf\n\n # Tests a typical complete run/publish workflow\n Run.run(conf, range_spec=\"6b1fb9b04f..2927a27ec\", steps=2,\n _machine_file=machine_file, quick=True)\n\n assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5\n assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2\n\n Publish.run(conf)\n\n assert isfile(join(tmpdir, 'html', 'index.html'))\n assert isfile(join(tmpdir, 'html', 'index.json'))\n assert isfile(join(tmpdir, 'html', 'asv.js'))\n assert isfile(join(tmpdir, 'html', 'asv.css'))\n\n # Check parameterized test json data format\n filename = glob.glob(os.path.join(tmpdir, 'html', 'graphs', 'arch-x86_64',\n 'cpu-Blazingly fast', 'machine-orangutan', 'os-GNU',\n 'Linux', 'psutil-2.1', 'python-*', 'ram-128GB',\n 'six', 'params_examples.time_skip.json'))[0]\n with open(filename, 'r') as fp:\n data = json.load(fp)\n assert len(data) == 2\n assert isinstance(data[0][0], int) # date\n assert len(data[0][1]) == 3\n assert len(data[1][1]) == 3\n assert isinstance(data[0][1][0], float)\n assert isinstance(data[0][1][1], float)\n assert data[0][1][2] is None\n\n # Check that the skip options work\n s = StringIO()\n stdout = sys.stdout\n try:\n sys.stdout = s\n Run.run(conf, range_spec=\"6b1fb9b04f..2927a27ec\", steps=2,\n _machine_file=join(tmpdir, 'asv-machine.json'), quick=True,\n skip_successful=True, skip_failed=True)\n Run.run(conf, range_spec=\"6b1fb9b04f..2927a27ec\", steps=2,\n _machine_file=join(tmpdir, 'asv-machine.json'), quick=True,\n skip_existing_commits=True)\n finally:\n sys.stdout = stdout\n s.seek(0)\n text = s.read()\n assert 'Running benchmarks.' not in text\n\n # Check EXISTING works\n Run.run(conf, range_spec=\"EXISTING\",\n _machine_file=machine_file, quick=True)\n\n # Remove the benchmarks.json file to make sure publish can\n # regenerate it\n\n os.remove(join(tmpdir, \"results_workflow\", \"benchmarks.json\"))\n\n Publish.run(conf)\n\n\ndef test_continuous(basic_conf):\n tmpdir, local, conf, machine_file = basic_conf\n\n # Check that asv continuous runs\n s = StringIO()\n stdout = sys.stdout\n os.environ['_ASV_TEST_TRACK_FILE'] = join(tmpdir, 'track-file')\n try:\n sys.stdout = s\n Continuous.run(conf, _machine_file=machine_file, show_stderr=True)\n finally:\n sys.stdout = stdout\n del os.environ['_ASV_TEST_TRACK_FILE']\n\n s.seek(0)\n text = s.read()\n assert \"SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY\" in text\n assert \"params_examples.track_find_test(2) 1.0 6.0 6.00000000x\" in text\n assert \"params_examples.ClassOne\" in text\n\n\ndef test_find(basic_conf):\n tmpdir, local, conf, machine_file = basic_conf\n\n # Test find at least runs\n s = StringIO()\n stdout = sys.stdout\n os.environ['_ASV_TEST_TRACK_FILE'] = join(tmpdir, 'track-file')\n try:\n sys.stdout = s\n Find.run(conf, \"6b1fb9b04f..2927a27ec\", \"params_examples.track_find_test\",\n _machine_file=machine_file)\n finally:\n sys.stdout = stdout\n del os.environ['_ASV_TEST_TRACK_FILE']\n\n # Check it found the first commit after the initially tested one\n s.seek(0)\n output = s.read()\n assert \"Greatest regression found: 85172e6d\" in output\n\n\nif __name__ == '__main__':\n from asv import console\n console.log.enable()\n\n from asv import machine\n machine.Machine.hardcoded_machine_name = 'orangutan'\n\n test_workflow('/tmp')\n","sub_path":"test/test_workflow.py","file_name":"test_workflow.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"303374234","text":"\nfrom flask import Flask\nfrom src.restful_app import restful_api\nfrom extensions import session\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object('config')\n\n restful_api(app)\n session.init_app(app)\n return app\n\n\napp = create_app()\n","sub_path":"web_app.py","file_name":"web_app.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"413044649","text":"import numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import log_loss\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom mlp_sequential import MLP\n\ndef softmax(logits):\n logits -= np.expand_dims(np.max(logits, axis=1), axis=1)\n logits = np.exp(logits)\n logits /= np.expand_dims(np.sum(logits), axis=1)\n return logits\n\ndef softmax_1D(vec):\n vec = vec - np.max(vec)\n xform = np.exp(vec)\n xform /= np.sum(xform)\n return xform\n\niris = load_iris()\nX, y = iris.data, iris.target\nacc_scores = []\nmax_acc_scores = []\n\nfor scale in np.logspace(-3, 3, 10):\n print(scale)\n mlp = MLP(\n n_hidden=8, scale=scale, n_iter=100000, prior_scale=0.2,\n random_state=0, alpha=0.0)\n mlp.partial_fit(n_features=4, labels=np.unique(y))\n mlp.partial_fit(X, y)\n\n # Taking the best\n s_w = mlp.rng_.multinomial(100000, mlp.weights_)\n t = np.argmax(s_w)\n wi = mlp.samples_i_[t]\n wo = mlp.samples_o_[t]\n probs = mlp.forward(X, wi, wo)\n pred = np.argmax(probs, axis=1)\n max_acc_score = accuracy_score(pred, y)\n print(max_acc_score)\n max_acc_scores.append(max_acc_score)\n\n # Taking the average according to the weights\n probs = mlp.predict(X)\n ave_acc_score = accuracy_score(probs, y)\n print(ave_acc_score)\n acc_scores.append(ave_acc_score)\n","sub_path":"script_mlp.py","file_name":"script_mlp.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"75600442","text":"#!//usr/bin/env python\nimport boto3\nimport time\nimport sys\nfrom botocore.exceptions import ClientError\n\nclass igwops:\n\t\n\tdef __init__(self):\n\t#detach_internet_gateway\n\t\tself.ec2=boto3.client(\"ec2\")\n\t\tigw=raw_input('igwid: ' )\n\t\tvpcid=raw_input('vpcid: ' )\n\t\t\n\t\ttry:\n\n\t\t\tresponse = self.ec2.detach_internet_gateway(InternetGatewayId=igw, VpcId=vpcid)\n\t\t\tprint (\"igw deattached successfully\")\n\t\texcept ClientError as igw_error:\n\t\t\tprint (\"igw is deattached already as {}\".format(igw_error))\n\t\t\tresponse1 = self.ec2.delete_internet_gateway(InternetGatewayId=igw)\n\t\t\tprint (\"igw deleted successfully with id:{}\".format(igw))\n\ns=igwops()\n","sub_path":"delanddeattach.py","file_name":"delanddeattach.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"14804211","text":"# coding: utf-8\n# Copyright 2013 The Font Bakery Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.\n\nfrom datetime import datetime\nimport os\nfrom flask import current_app, json\nfrom ..decorators import lazy_property\nfrom ..extensions import db\n\nfrom .state import (project_state_get, project_state_save, walkWithoutGit)\n\n\nclass Project(db.Model):\n __tablename__ = 'project'\n __table_args__ = {'sqlite_autoincrement': True}\n id = db.Column(db.Integer, primary_key=True)\n login = db.Column(db.String(60), index=True)\n name = db.Column(db.String(60), index=True)\n full_name = db.Column(db.String(60))\n html_url = db.Column(db.String(60))\n data = db.Column(db.PickleType())\n clone = db.Column(db.String(400))\n is_github = db.Column(db.Boolean(), index=True)\n is_ready = db.Column(db.Boolean(), index=True, default=False)\n\n builds = db.relationship('ProjectBuild', backref='project', lazy='dynamic')\n\n def cache_update(self, data):\n self.html_url = data['html_url']\n self.name = data['name']\n self.data = data\n\n @lazy_property\n def config(self):\n # if it is not purely visible, but @lazy_property decorator cache state\n # values in runtime, when this class property acessed for the 1st time\n # it store state value. You can access it and modify, but at the end of\n # the request all modifications dies if wasn't saved\n #\n _state, _local = project_state_get(project = self)\n return {'state': _state, 'local': _local}\n\n def save_state(self):\n project_state_save(self)\n\n def setup_status(self):\n # Return project status.\n return self.config['local'].get('source', None)\n\n @property\n def title(self):\n \"\"\"\n Return project title, resolved from repo clone if no rename family name given\n \"\"\"\n # Split the git clone URL on /, take the last part, remove '.git' if it exists\n # eg \"https://github.com/davelab6/league-gothic.git\" -> league-gothic.git -> league-gothic\n title = self.clone.split('/')[-1].replace('.git', '')\n # If URL terminates with a /, title will be None, so take the 2nd to last part\n # eg \"https://github.com/davelab6/league-gothic/\" -> league-gothic\n if not title:\n title = self.clone.split('/')[-2].replace('.git', '')\n # use the family rename input if it was given\n if self.is_ready and self.config['state'].get('familyname', None):\n title = self.config['state']['familyname']\n return title\n\n def asset_by_name(self, name):\n \"\"\"\n Resolve asset id into its real path. For internal use.\n\n :param name: handle for file conventionally found in repositories\n\n \"\"\"\n DATA_ROOT = current_app.config.get('DATA_ROOT')\n if name == 'log':\n fn = os.path.join(DATA_ROOT, '%(login)s/%(id)s.process.log' % self)\n elif name == 'yaml':\n fn = os.path.join(DATA_ROOT, '%(login)s/%(id)s.bakery.yaml' % self)\n elif name == 'metadata':\n fn = os.path.join(DATA_ROOT, '%(login)s/%(id)s.out/' % self, 'METADATA.json')\n elif name == 'metadata_new':\n fn = os.path.join(DATA_ROOT, '%(login)s/%(id)s.out/' % self, 'METADATA.json.new')\n elif name == 'license':\n fn = os.path.join(DATA_ROOT, '%(login)s/%(id)s.in/' % self, self.config['state']['license_file'])\n elif name == 'description':\n fn = os.path.join(DATA_ROOT, '%(login)s/%(id)s.out/' % self, 'DESCRIPTION.en_us.html')\n else:\n fn = None\n return fn\n\n def read_asset(self, name = None):\n fn = self.asset_by_name(name)\n if os.path.exists(fn) and os.path.isfile(fn):\n return unicode(open(fn, 'r').read(), \"utf8\")\n else:\n return ''\n\n def treeFromFilesystem(self, folder=None):\n \"\"\"\n Read files tree in specied directory\n\n :param folder: handle for tree, either 'in' or 'out'\n\n Returns:\n folderContents: Dictionary of file and directory strings\n \"\"\"\n DATA_ROOT = current_app.config.get('DATA_ROOT')\n _in = os.path.join(DATA_ROOT, '%(login)s/%(id)s.in/' % self)\n _out = os.path.join(DATA_ROOT, '%(login)s/%(id)s.out/' % self)\n if folder == 'in' and os.path.exists(_in):\n folderContents = walkWithoutGit(_in)\n elif folder == 'out' and os.path.exists(_out):\n folderContents = walkWithoutGit(_out)\n else:\n folderContents = { 'Sorry, filesystem unavailable': '' }\n return folderContents\n\n def textFiles(self):\n \"\"\"\n Read all the text files found in the _in repo\n\n Returns:\n textFiles: Dictionary of file and directory strings\n \"\"\"\n DATA_ROOT = current_app.config.get('DATA_ROOT')\n _in = os.path.join(DATA_ROOT, '%(login)s/%(id)s.in/' % self)\n textFiles = {}\n for textFile in self.config['local']['txt_files']:\n fn = os.path.join(_in, textFile)\n if os.path.exists(fn) and os.path.isfile(fn):\n textFiles[textFile] = unicode(open(fn, 'r').read(), \"utf8\")\n return textFiles\n\n def save_asset(self, name = None, data = None, **kwarg):\n \"\"\" Save static files into out folder \"\"\"\n if name == 'description':\n f = open(self.asset_by_name(name), 'w')\n f.write(data)\n f.close()\n elif name == 'metadata':\n f = open(self.asset_by_name(name), 'w')\n json.dump(json.loads(data), f, indent=2, ensure_ascii=True) # same params as in generatemetadata.py\n f.close()\n\n if kwarg.get('del_new') and kwarg['del_new']:\n if os.path.exists(self.asset_by_name('metadata_new')):\n os.remove(self.asset_by_name('metadata_new'))\n\n @property\n def family_stat(self):\n from ..models import FontStats\n\n if self.config['state'].get('stats_family_name'):\n return FontStats.by_family(self.config['state']['stats_family_name'])\n elif self.config['state'].get('familyname'):\n return FontStats.by_family(self.config['state']['familyname'])\n else:\n return None\n\n def __getitem__(self, key):\n \"\"\" Magic method that allow to access ORM properties using\n object-dot-propertyname \"\"\"\n # make magic mapping works\n return self.__dict__.get(key)\n\nclass ProjectBuild(db.Model):\n __tablename__ = 'project_build'\n __table_args__ = {'sqlite_autoincrement': True}\n id = db.Column(db.Integer, primary_key=True)\n project_id = db.Column(db.Integer, db.ForeignKey('project.id'))\n githash = db.Column(db.String(40))\n is_success = db.Column(db.Boolean())\n created = db.Column(db.DateTime, default=datetime.now)\n\n @staticmethod\n def make_build(project_id):\n pass\n\n\nclass ProjectTest(db.Model):\n __tablename__ = 'project_test'\n __table_args__ = {'sqlite_autoincrement': True}\n id = db.Column(db.Integer, primary_key=True)\n build_id = db.Column(db.Integer, db.ForeignKey('project.id'))\n\n","sub_path":"bakery/project/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"510330996","text":"# Letters and Numbers: Given an array filled with letters and numbers, find the longest subarray with\n# an equal number of letters and numbers\n\n# solution O(N^2)\ndef checkequality(lis,st,en):\n equ=0\n lis=str(lis[st:en])\n for i in lis:\n if i.isalpha():\n equ += 1\n elif i.isdigit():\n equ -= 1\n return equ == 0 # return true or false over equ check to 0\n\ndef findlongest(lis):\n for i in range(len(lis),1,-1):\n for j in range(0,len(lis)-i):\n if (checkequality(lis,j,j+i-1)):\n return lis[j:j+i-1]\n return None\nlis=[1,'a',1,'a',2,3,2,2,3,'a',3,'a',3,'a','a','a',3,'a',3,3]\nprint(findlongest(lis))","sub_path":"17.hard/Letters.py","file_name":"Letters.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"523635521","text":"import pandas as pd\nimport torch\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset\n\n\n# 打印log\ndef log_string(log, string):\n log.write(string + '\\n')\n log.flush()\n print(string)\n\n\n# metric\ndef metric(pred, label):\n mask = torch.ne(label, 0)\n mask = mask.type(torch.float32)\n mask /= torch.mean(mask)\n mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)\n\n mae = torch.abs(torch.sub(pred, label)).type(torch.float32)\n rmse = mae ** 2\n mape = mae / label\n\n mae = torch.mean(mae)\n rmse = rmse * mask\n rmse = torch.sqrt(torch.mean(rmse))\n\n mape = mape * mask\n mape = torch.where(torch.isnan(mape), torch.zeros_like(mape), mape)\n mape = torch.mean(mape)\n\n return mae, rmse, mape\n\n\ndef mae_loss(pred, label):\n mask = torch.ne(label, 0)\n mask = mask.type(torch.float32)\n mask /= torch.mean(mask)\n mask = torch.where(torch.isnan(mask), torch.tensor(0.0), mask)\n\n loss = torch.abs(torch.sub(pred, label))\n loss *= mask\n loss = torch.where(torch.isnan(loss), torch.tensor(0.0), loss)\n\n loss = torch.mean(loss)\n return loss\n\n\ndef plot_train_val_loss(train_total_loss, val_total_loss, file_path):\n \"\"\"绘制损失\"\"\"\n plt.figure(figsize=(10, 5))\n plt.plot(range(1, len(train_total_loss) + 1), train_total_loss, c='b', marker='s', label='Train')\n plt.plot(range(1, len(val_total_loss) + 1), val_total_loss, c='r', marker='o', label='Validation')\n plt.legend(loc='best')\n plt.title('Train loss vs Validation loss')\n plt.savefig(file_path)\n\n\ndef save_test_result(trainPred, trainY, valPred, valY, testPred, testY):\n \"\"\"保存测试结果\"\"\"\n with open('./figure/test_results.txt', 'w+') as f:\n for l in (trainPred, trainY, valPred, valY, testPred, testY):\n f.write(list(l))\n\n\ndef count_parameters(model):\n \"\"\"统计模型参数\"\"\"\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef seq2instance(data, num_his, num_pred):\n \"\"\"生成样本\"\"\"\n num_step, dims = data.shape\n num_sample = num_step - num_his - num_pred + 1\n x = torch.zeros(num_sample, num_his, dims)\n y = torch.zeros(num_sample, num_pred, dims)\n for i in range(num_sample):\n x[i] = data[i: i + num_his]\n y[i] = data[i + num_his: i + num_his + num_pred]\n\n return x, y\n\n\n# 数据\ndef load_data(args):\n df = pd.read_hdf(args.traffic_file)\n traffic = torch.from_numpy(df.values)\n\n # train/val/test\n num_steps = df.shape[0]\n train_steps = round(args.train_ratio * num_steps)\n test_steps = round(args.test_ratio * num_steps)\n val_steps = num_steps - train_steps - test_steps\n\n train = traffic[:train_steps]\n val = traffic[train_steps: train_steps + val_steps]\n test = traffic[-test_steps:]\n\n # X, Y\n trainX, trainY = seq2instance(train, args.num_his, args.num_pred)\n valX, valY = seq2instance(val, args.num_his, args.num_pred)\n testX, testY = seq2instance(test, args.num_his, args.num_pred)\n\n # 归一化\n mean, std = torch.mean(trainX), torch.std(trainX)\n trainX = (trainX - mean) / std\n valX = (valX - mean) / std\n testX = (testX - mean) / std\n\n # 空间嵌入,node2vec\n with open(args.SE_file, mode='r') as f:\n lines = f.readlines()\n temp = lines[0].split(' ')\n num_vertex, dims = int(temp[0]), int(temp[1]) # 顶点数,维度\n SE = torch.zeros((num_vertex, dims), dtype=torch.float32)\n for line in lines[1:]:\n temp = line.split(' ')\n index = int(temp[0])\n SE[index] = torch.tensor([float(ch) for ch in temp[1:]])\n\n # 时间嵌入,加入day_of_time和day_of_week作为嵌入表示\n time = pd.DatetimeIndex(df.index) # 这个直接就获得时序戳\n dayofweek = torch.reshape(torch.tensor(time.weekday), (-1, 1)) # 获得每条数据的星期几数据\n\n timeofday = (time.hour*3600 + time.minute*60 + time.second) // (5 * 60) # 获得每条数据是第几个5分钟\n timeofday = torch.reshape(torch.tensor(timeofday), (-1, 1))\n\n time = torch.cat((dayofweek, timeofday), -1)\n\n train = time[:train_steps]\n val = time[train_steps:train_steps + val_steps]\n test = time[-test_steps:]\n\n trainTE = seq2instance(train, args.num_his, args.num_pred)\n # shape(num_sample, num_his or num_pred, 2)\n trainTE = torch.cat(trainTE, 1).type(torch.int32)\n # shape(num_sample, num_his + num_pred, 2)\n\n valTE = seq2instance(val, args.num_his, args.num_pred)\n valTE = torch.cat(valTE, 1).type(torch.int32)\n\n testTE = seq2instance(test, args.num_his, args.num_pred)\n testTE = torch.cat(testTE, 1).type(torch.int32)\n\n return trainX, trainTE, trainY, valX, valTE, valY, testX, testTE, testY, SE, mean, std\n\n\nclass dataset(Dataset):\n def __init__(self, data_x, data_y):\n self.data_x = data_x\n self.data_y = data_y\n self.len = data_x.shape[0]\n\n def __getitem__(self, index):\n return self.data_x[index], self.data_y[index]\n\n def __len__(self):\n return self.len\n\n\n\n\n\n\n\n\n\n","sub_path":"METR/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"602552944","text":"from typing import Text, List, Tuple\n\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport requests\nimport re\n\nWEBSITE = \"https://web.wpi.edu/Pubs/E-project/browse/iqp_by_author/all.html\"\nSINGLE_COLUMNS = ['Project Type', \n 'Submission date', \n 'URN', \n 'Division', \n 'Center',\n 'Sponsor',\n 'Title',\n 'Availability']\nMULTI_COLUMNS = ['Author',\n 'Advisor']\nTHREADS = 128\nSESSION = None\n\ndef extract_links(url):\n def clean_row(row):\n final = []\n\n pruned = list(row.children)[2:]\n url = list(pruned[0].children)[0].get('href')\n\n return url\n\n res = requests.get(url)\n\n if res.status_code != 200:\n raise Exception(\"Error getting web data\")\n\n soup = BeautifulSoup(res.content, 'lxml')\n entries = soup.find_all('tr')[2:]\n\n pool = ThreadPool(THREADS)\n links = pool.map(clean_row, entries)\n\n pool.close()\n pool.join()\n\n return links\n\n\ndef extract_attribute(attribute: Text, \n normalized_content: Text,\n extraction_func) -> Tuple[Text, Text]:\n block_matcher = '<{1}((?!<{1}).)*{0}(((?!<\\/{1}>).)*<\\/{1}>){{2}}' \n\n match_str = block_matcher.format(attribute, 'td')\n html = re.search(match_str, normalized_content)\n\n if not html:\n return None, None\n\n soup = BeautifulSoup(html.group(0), 'lxml')\n tds = soup.findAll('td')\n\n return extraction_func(tds)\n\n\ndef _extract_multi_attribute(tds):\n key = tds[0].text\n value = tds[1]\n \n people = []\n majors = []\n\n for person_major in value.findAll('li'):\n split = person_major.text.split(',')\n\n person = ','.join(split[:-1])\n person_majors = split[-1].strip()\n\n if person in people:\n continue\n\n people.append(person)\n\n for major in person_majors.split(' '):\n if len(major) > 1 and len(major) < 5:\n majors.append(major)\n\n return key, (people, majors)\n\n\ndef extract_project_info(page):\n print('Begin extraction for {}'.format(page))\n\n result = {}\n result['url'] = page\n\n res = SESSION.get(page)\n\n if res.status_code != 200:\n raise Exception(\"Error getting data for {}\".format(page))\n\n normalized_content = res.text.replace('\\n', '')\n\n for attribute in SINGLE_COLUMNS:\n key, value = extract_attribute(\n attribute,\n normalized_content,\n lambda l: (l[0].text, l[1].text)\n )\n\n if key:\n result[attribute] = value\n\n for attribute in MULTI_COLUMNS:\n key, (people, majors) = extract_attribute(\n attribute,\n normalized_content,\n _extract_multi_attribute\n )\n\n if key:\n result[attribute] = people\n result[attribute + '_majors'] = majors\n\n print('Finished {}'.format(page))\n return result\n\nif __name__ == '__main__':\n if 'links' not in globals():\n print('Extracting links')\n links = extract_links(WEBSITE)\n \n pool = ThreadPool(THREADS)\n with requests.Session() as SESSION:\n entries = pool.map(extract_project_info, links[:25])\n\n data = pd.DataFrame(entries)\n\n pool.close()\n pool.join()\n","sub_path":"src/data_scraper.py","file_name":"data_scraper.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"492612090","text":"\nimport os\nimport math\nimport random\nimport pygame\nfrom pygame.locals import *\n\n#sc_size = (1920,1080)\nsc_size = (0,0)\nsc_flags = pygame.FULLSCREEN\n#sc_flags = 0\nsc_bits = 32\n\ncolormap = (\n (0xAB614E,0x2F373E,0x3D5A79,0x5385BB,0xA8BD9C),\n (0x9E7152,0x64585A,0x363E41,0x7A838A,0xCED0CB),\n (0x4C97A4,0x16342C,0x148739,0x1EB735,0x9EC5BF),\n (0x212426,0x5E3936,0x997764,0xC9B3A8,0xE1D4C7),\n (0xd8add9,0x9eafa5,0x889573,0xa1754c,0x001803)\n)\n\nclass Obj():\n\n def __init__(self,loc,angle,surf):\n self.loc = loc\n self.angle = angle\n self.surf = surf\n self.scalediv = 3\n\n def draw(self,target_surf):\n (scalex,scaley)=( int(self.surf.get_width()/self.scalediv), int(self.surf.get_height()/self.scalediv) )\n rotated_surf = pygame.transform.scale(rot_center(self.surf,self.angle),(scalex,scaley))\n rotsurf_rect = rotated_surf.get_rect()\n rotsurf_rect.center = self.loc\n target_surf.blit(rotated_surf, rotsurf_rect)\n\n def set_angle(self,angle):\n self.angle = angle\n\n def get_angle(self,other):\n return(math.degrees( math.atan2( (self.loc[1] - other.loc[1]), (other.loc[0] - self.loc[0]) ) ) )\n\n def get_distance(self,other):\n difx = (self.loc[0] - other.loc[0])\n dify = (self.loc[1] - other.loc[1])\n return( math.sqrt( difx*difx + dify*dify ) )\n\n def push(self,force,angle):\n xforce = force * math.cos(angle) + force * math.sin(angle)\n yforce = force * math.sin(angle) - force * math.cos(angle)\n self.loc[0] = self.loc[0] + xforce\n self.loc[1] = self.loc[1] + yforce\n\nclass Spit():\n\n def __init__(self,loc,angle):\n angle = math.radians(angle + 45 ) + (0.5 - random.random())/2\n self.loc = list(loc)\n self.lastloc = list(loc)\n self.speed = [ 3 * (math.sin(angle) - math.cos(angle) ) , 3 * (math.cos(angle) + math.sin(angle) ) ]\n self.life = 64 + random.randint(0,32)\n self.color = (255,255,255)\n\n def move(self):\n self.lastloc[0] = self.loc[0]\n self.lastloc[1] = self.loc[1]\n self.loc[0] = self.loc[0] + self.speed[0]\n self.loc[1] = self.loc[1] + self.speed[1]\n self.speed[0] = self.speed[0] * 0.999\n self.speed[1] = self.speed[1] * 0.999\n self.color = (255,255,255,255-self.life)\n self.life = self.life - 1\n if self.life < 0:\n return(-1)\n else:\n return(0)\n \n def draw(self,screen):\n pygame.draw.line(screen,self.color,self.lastloc,self.loc,2)\n \n\ndef rot_center(image, angle):\n \"\"\"rotate an image while keeping its center and size - Needs square images\"\"\"\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image\n\ndef lerp(a,b,t):\n return ( a.r + (b.r - a.r) * t, a.g + (b.g - a.g) * t, a.b + (b.b - a.b) * t, a.a + (b.a - a.a) * t )\n\nspits = []\n\ntry:\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n\n pygame.init()\n pygame.mixer.init()\n screen=pygame.display.set_mode(sc_size,sc_flags,sc_bits)\n\n screen_rect=screen.get_rect()\n scx = int( screen.get_width() / 2 )\n scy = int( screen.get_height() / 2 )\n\n pygame.mixer.music.load(os.path.join('music','fsm-team-escp-quasarise.mp3'))\n pygame.mixer.music.play(loops=-1)\n\n ship_surf = pygame.image.load(os.path.join('graphics','ship.png')).convert_alpha()\n box_surf = pygame.image.load(os.path.join('graphics','RTS_crate.png')).convert_alpha()\n\n maptiles = pygame.image.load(os.path.join('graphics','tiles.png')).convert_alpha()\n maptiles_rect = maptiles.get_rect()\n\n ship = Obj(screen_rect.center,0,ship_surf)\n box = Obj([800,1000],0,box_surf)\n \n clock = pygame.time.Clock()\n\n rot = 0\n\n running = True\n while(running):\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n running=False\n keys = pygame.key.get_pressed()\n if keys[pygame.K_ESCAPE]:\n running = False\n\n (mx,my)=pygame.mouse.get_pos()\n\n rot = rot + 1\n if rot > 360:\n rot = rot - 360\n\n\n for spit in spits:\n if spit.move():\n spits.remove(spit)\n\n spits.append(Spit(screen_rect.center,rot))\n\n box.set_angle( ship.get_angle(box) )\n #print(ship.get_distance(box))\n box.push(1.3,0)\n\n screen.fill( colormap[0][1] )\n\n colorfrom = pygame.Color( colormap[0][2] )\n colorto = pygame.Color( colormap[0][3] )\n for count in range(0,512):\n lerpcolor = lerp(colorfrom,colorto,count/512)\n pygame.draw.line(screen,lerpcolor,(scx-256+count,0),(scx-256+count,scy*2))\n\n maptiles_rect.center = screen_rect.center\n screen.blit(maptiles, [0,0])\n \n box.draw(screen)\n ship.draw(screen)\n\n \n for spit in spits:\n spit.draw(screen)\n\n\n pygame.draw.line(screen,colormap[0][3],(mx,my-20),(mx,my+20))\n pygame.draw.line(screen,colormap[0][3],(mx-20,my),(mx+20,my))\n pygame.draw.line(screen,colormap[0][3],ship.loc,box.loc,2)\n\n pygame.display.update()\n dt = clock.tick(60)\n\n# player.position.x += player.xSpeed * dt\n# player.position.y += player.ySpeed * dt\n \nfinally:\n pygame.quit()\n \n","sub_path":"Scavenger.py","file_name":"Scavenger.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"376958799","text":"from functools import lru_cache\nfrom typing import List\n\n\nclass Solution:\n def maxCoins(self, nums: List[int]) -> int:\n sum_coins = 0\n for i, v in enumerate(nums):\n left, right = 1, 1\n if i > 0:\n left = nums[i - 1]\n if i + 1 < len(nums):\n right = nums[i + 1]\n gains = left * nums[i] * right\n mm = nums.copy()\n del mm[i]\n rest = self.maxCoins(mm)\n sum_coins = max(sum_coins, rest + gains)\n return sum_coins\n\n def __init__(self):\n self.nums_list = []\n self.nums_value = []\n\n def maxCoinsT2D(self, nums: List[int]) -> int:\n if nums in self.nums_value:\n index = self.nums_list.index(nums)\n return self.nums_value[index]\n else:\n sum_coins = 0\n for i, v in enumerate(nums):\n left, right = 1, 1\n if i > 0:\n left = nums[i - 1]\n if i + 1 < len(nums):\n right = nums[i + 1]\n gains = left * nums[i] * right\n mm = nums.copy()\n del mm[i]\n rest = self.maxCoinsT2D(mm)\n sum_coins = max(sum_coins, rest + gains)\n self.nums_list.append(mm)\n self.nums_list.append(sum_coins)\n return sum_coins\n\n\n def maxCoinDAC(self, nums: List[int]) -> int:\n\n nums = [1] + nums + [1]\n\n @lru_cache\n def dp(left, right):\n # maximum if we burst all nums[left]...nums[right], inclusive\n if right - left < 0:\n return 0\n result = 0\n # find the last burst one in nums[left]...nums[right]\n for i in range(left, right + 1):\n # nums[i] is the last burst one\n gain = nums[left - 1] * nums[i] * nums[right + 1]\n # nums[i] is fixed, recursively call left side and right side\n remaining = dp(left, i - 1) + dp(i + 1, right)\n # update the result\n result = max(result, remaining + gain)\n return result\n\n # we can not burst the first one and the last one\n # since they are both fake balloons added by ourselves\n return dp(1, len(nums) - 2)\n\n\ninput = [3, 1, 5, 8]\n\nsolution = Solution()\nx = solution.maxCoins(input)\nprint(x)\ny = solution.maxCoinsT2D(input)\nprint(y)\nz = solution.maxCoinDAC(input)\nprint(z)\n","sub_path":"312.py","file_name":"312.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"459142922","text":"import torch\nfrom torch.utils.data import Dataset\n\nfrom utils import get_corpus, get_type, encode_sentence, make_index_dictionary\n\n\nclass QuestionDataset(Dataset):\n def __init__(self, corpus, types, corpus_len):\n super(QuestionDataset, self).__init__()\n self.data = corpus\n self.label = types\n self.data_dim = corpus_len\n self.data_dictionary = make_index_dictionary()\n\n def __getitem__(self, item):\n x = encode_sentence(self.data[item], self.data_dictionary, self.data_dim)\n x = torch.tensor(x)\n y = self.label[item]\n y = torch.tensor(y, dtype=torch.long)\n return x, y\n\n def __len__(self):\n return len(self.data)\n","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"265484423","text":"import math\nimport random\nimport four_homes_and_six_entries as f\nfrom decimal import Decimal\n\nq = 1085 # 目标电通量\nq = (q - 74.216) / 0.9025 # 经验公式消除误差\nq_t = [f.xy(q / 900 / 24, 0.001)] # 计算消除误差后初始值\na = random.randint(200000, 230000) / 100000 # 拟合增量方程y=aln(x)+b\nb = random.randint(3300000, 4300000) / 100000 # 拟合增量方程y=aln(x)+b\ny = [] # 储存增量方程下某一标准增量\nfor fi in range(30, 391, 30): # 生成增量方程下某一标准增量\n temp = int(f.xy(a * math.log(fi, math.e) + b, 1))\n y.append(temp)\nincrement = [] # 储存增量\nfor fj in range(1, 13): # 提取增量\n increment.append(f.xy((y[fj] - y[fj - 1]) / 1000, 0.001))\nfor fk in range(12): # 生成数据\n q_t.append(f.xy(Decimal(q_t[fk]) + Decimal(increment[fk]), 0.001))\nprint(q_t)\nqq = 0\nfor fl in range(1, 12): # 根据数据计算电通量qq\n qq += q_t[fl]\nqq = int(f.xy(900 * (2 * qq + q_t[0] + q_t[12]), 1)) # 未校正电通量\nq_corr = int(f.xy(qq * (95 / 100) ** 2, 1)) # 校正100mm试样电通量\nprint(qq)\nprint(q_corr)","sub_path":"电通量.py","file_name":"电通量.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"590380721","text":"import os\nimport numpy as np\nimport warnings\nfrom sklearn.svm import SVC\nfrom sklearn.feature_selection import chi2\nfrom methods.moo_ensemble import MooEnsembleSVC\nfrom methods.moo_ensemble_bootstrap import MooEnsembleSVCbootstrap\nfrom methods.moo_ensemble_bootstrap_pruned import MooEnsembleSVCbootstrapPruned\nfrom methods.random_subspace_ensemble import RandomSubspaceEnsemble\nfrom methods.feature_selection_clf import FeatueSelectionClf\nfrom utils.load_dataset import find_datasets, calc_imbalance_ratio\n\n\nwarnings.filterwarnings(\"ignore\")\n\n\nbase_estimator = {'SVM': SVC(probability=True)}\n# IR is an example, not real values of datasets\nIR_class = {0: 1, 1: 1}\n\nmethods = {\n \"MooEnsembleSVC\": MooEnsembleSVC(base_classifier=base_estimator),\n \"MooEnsembleSVCbootstrap\": MooEnsembleSVCbootstrap(base_classifier=base_estimator),\n \"MooEnsembleSVCbootstrapPruned\": MooEnsembleSVCbootstrapPruned(base_classifier=base_estimator),\n \"RandomSubspace\": RandomSubspaceEnsemble(base_classifier=base_estimator),\n \"SVM\": SVC(),\n \"FS\": FeatueSelectionClf(base_estimator, chi2),\n \"FSIRSVM\": FeatueSelectionClf(SVC(kernel='linear', class_weight=IR_class), chi2)\n\n}\n\nmethods_alias = [\n \"SEMOOS\",\n \"SEMOOSb\",\n \"SEMOOSbp\",\n \"RS\",\n \"SVM\",\n \"FS\",\n \"FSIRSVM\"\n ]\n\nmetrics_alias = [\"BAC\", \"Gmean\", \"Gmean2\", \"F1score\", \"Recall\", \"Specificity\", \"Precision\"]\n\nn_splits = 2\nn_repeats = 5\nn_folds = n_splits * n_repeats\nn_methods = len(methods_alias) * len(base_estimator)\nn_metrics = len(metrics_alias)\n\ndirectories = [\"9lower\", \"9higher_part1\", \"9higher_part2\", \"9higher_part3\"]\n\nn_datasets = 0\ndatasets = []\nfor dir_id, dir in enumerate(directories):\n DATASETS_DIR = os.path.join(os.path.realpath(os.path.dirname(__file__)), '/home/joannagrzyb/dev/moo_tune_ensemble/datasets/%s/' % dir)\n n_datasets += len(list(enumerate(find_datasets(DATASETS_DIR))))\n for dataset_id, dataset in enumerate(find_datasets(DATASETS_DIR)):\n datasets.append(dataset)\n\nmean_scores = np.zeros((n_datasets, n_metrics, n_methods))\nstds = np.zeros((n_datasets, n_metrics, n_methods))\nexperiments_paths = [\"experiment4_9lower\", \"experiment1_9higher_part1\", \"experiment2_9higher_part2\", \"experiment3_9higher_part3\"]\nfor exp in experiments_paths:\n for dataset_id, dataset in enumerate(datasets):\n for clf_id, clf_name in enumerate(methods):\n for metric_id, metric in enumerate(metrics_alias):\n try:\n filename = \"results/experiment_server/%s/raw_results/%s/%s/%s.csv\" % (exp, metric, dataset, clf_name)\n if not os.path.isfile(filename):\n # print(\"File not exist - %s\" % filename)\n continue\n scores = np.genfromtxt(filename, delimiter=',', dtype=np.float32)\n mean_score = np.mean(scores)\n mean_scores[dataset_id, metric_id, clf_id] = mean_score\n std = np.std(scores)\n stds[dataset_id, metric_id, clf_id] = std\n except:\n print(\"Error loading dataset - %s!\" % dataset)\n\nIR = calc_imbalance_ratio(directories=directories)\nIR_argsorted = np.argsort(IR)\n\n# Save dataset name, mean scores and standard deviation to .tex file\nfor metric_id, metric in enumerate(metrics_alias):\n with open(\"results/tables/results_%s.tex\" % metric, \"w+\") as file:\n for id, arg in enumerate(IR_argsorted):\n id += 1\n line = \"%d\" % (id)\n line_values = []\n line_values = mean_scores[arg, metric_id, :]\n max_value = np.amax(line_values)\n for clf_id, clf_name in enumerate(methods):\n if mean_scores[arg, metric_id, clf_id] == max_value:\n line += \" & \\\\textbf{%0.3f $\\\\pm$ %0.3f}\" % (mean_scores[arg, metric_id, clf_id], stds[arg, metric_id, clf_id])\n else:\n line += \" & %0.3f $\\\\pm$ %0.3f\" % (mean_scores[arg, metric_id, clf_id], stds[arg, metric_id, clf_id])\n line += \" \\\\\\\\\"\n print(line, file=file)\n if IR[arg] > 8.6 and IR[arg] < 9.0:\n print(\"\\\\hline\", file=file)\n","sub_path":"results_tables.py","file_name":"results_tables.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"299675652","text":"# Credits to: CodingBat\n# Site Link: https://codingbat.com/\n\n#############\n## PROBLEM ##\n#############\n\n\"\"\"\nGiven a string and a non-negative \nint n, we'll say that the front of \nthe string is the first 3 chars, or \nwhatever is there if the string is less \nthan length 3. Return n copies of the front;\n\nfront_times('Chocolate', 2) → 'ChoCho'\nfront_times('Chocolate', 3) → 'ChoChoCho'\nfront_times('Abc', 3) → 'AbcAbcAbc'\n\"\"\"\n\n########################\n## SOLUTION BY: KEVIN ##\n########################\n\n# First Solution\ndef front_times(str, n):\n\treturn str[:3]*n\n\n# Or\n\n# Second Solution\ndef front_times(str, n):\n\n\tfront = 3\n\n\tif front > len(str):\n\t\tfront = len(str)\n\tfront = str[:front]\n\n\tresult = \"\"\n\tfor n in range(n):\n\t\tresult += front\n\n\treturn result","sub_path":"Warmup-2/front_times.py","file_name":"front_times.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"229788719","text":"from flask import Flask, request, url_for, jsonify, json, Response\nfrom functools import wraps\n\n\"\"\"\ncurl 指令参考:\n-X 指定HTTP请求方法,例如POST,GET\n-H 指定请求头,例如Content-Type:application/json\n-d 指定请求数据\n--data-binary 指定发送的文件\n-i 显示响应头部信息\n-u 指定认证用户名与密码\n- 输出请求头部信息\n\"\"\"\n\napp = Flask(__name__)\n\n@app.route('/')\ndef api_root():\n return \"Welcome to 5002\\n\"\n\n\n@app.route('/articles')\ndef apt_articles():\n return '5002 List of ' + url_for('apt_articles') + \"\\n\"\n\n\n@app.route('/articles/')\ndef api_articleid(articleid):\n return 'You are reading ' + articleid + \"\\n\"\n\n\n@app.route('/hello')\ndef api_hello():\n if 'name' in request.args:\n return '5002 Hello ' + request.args['name']\n else:\n return \"5002 Hello cuberqiu\\n\"\n\n\n@app.route('/echo', methods=['GET', 'POST', 'PATCH', 'PUT', 'DELETE'])\ndef api_echo():\n if request.method == 'GET':\n return \"ECHO: GET\\n\"\n elif request.method == 'POST':\n return \"ECHO: POST\\n\"\n elif request.method == \"PATCH\":\n return \"ECHO: PATCH\\n\"\n elif request.method == \"PUT\":\n return \"ECHO: PUT\\n\"\n elif request.method == \"DELETE\":\n return \"ECHO: DELETE\"\n\n\n@app.route('/messages', methods=['POST'])\ndef api_message():\n if request.headers['Content-Type'] == 'text/plain':\n return \"Text Message: \"+request.data+\"\\n\"\n elif request.headers['Content-Type'] == 'application/json':\n return \"JSON Message: \"+ json.dumps(request.json)+\"\\n\"\n elif request.headers['Content-Type'] == 'application/octet-stream':\n f = open('./file/binary', 'wb')\n f.write(request.data)\n f.close()\n return \"Binary message written!\"+\"\\n\"\n\n else:\n return \"415 Unsupported Media Type !\"+'\\n'\n\n\n@app.route('/rhello', methods=['GET'])\ndef api_rhello():\n data = {\n 'hello': \"cuberqiu\",\n 'number': 5002\n }\n\n js = json.dumps(data)\n\n resp = Response(js, status=200, mimetype='application/json')\n resp.headers['Link'] = 'http://luisrei.com'\n\n return resp\n\n\n# 错误处理\n@app.errorhandler(404)\ndef not_found(error=None):\n message = {\n 'status':404,\n 'message':\"Not Found: \"+ request.url\n }\n\n resp = jsonify(message)\n resp.status_code = 404\n\n return resp\n\n@app.route('/users/', methods=['GET'])\ndef api_users(userid):\n users = {\"1\": \"cuberqiu\", \"2\":\"andy\", \"3\":\"piper\"}\n if userid in users:\n return jsonify({userid:users[userid]})\n else:\n return not_found()\n\n\n# 认证\ndef check_auth(username, password):\n return username=='admin' and password=='secret1'\n\n\ndef authenticate():\n message = {'message': \"Authenticate.\"}\n resp = jsonify(message)\n resp.status_code = 401\n resp.headers['WWW-Authenticate'] = 'Basic realm=\"Example\"'\n return resp\n\n\ndef requeres_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.authorization\n if not auth:\n return authenticate()\n elif not check_auth(auth.username, auth.password):\n return authenticate()\n return f(*args, **kwargs)\n\n return decorated\n\n\n@app.route('/secrets')\n@requeres_auth\ndef api_shello():\n return \"Shhh this is top secret spy stuff!\\n\"\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=5002,debug=True)","sub_path":"python3/python3-cookbook/flaskDemo/flaskDemo2.py","file_name":"flaskDemo2.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"280526070","text":"\n'''\nhttps://www.hackerrank.com/challenges/angry-professor\n \nAngry Professor\n \nA Discrete Mathematics professor has a class of $N$ students. Frustrated with their lack of discipline, he decides to cancel class if fewer than $K$ students are present when class starts.\n\nGiven the arrival time of each student, determine if the class is canceled.Input FormatThe first line of input contains $T$, the number of test cases. \n\nEach test case consists of two lines. The first line has two space-separated integers, $N$ (students in the class) and $K$ (the cancelation threshold). \nThe second line contains $N$ space-separated integers ($a_1, a_2, \\ldots, a_N$) describing the arrival times for each student.\n\nNote: Non-positive arrival times ($a_i \\le 0$) indicate the student arrived early or on time; positive arrival times ($a_i \\gt 0$) indicate the student arrived $a_i$ minutes late.Output FormatFor each test case, print the word YES if the class is canceled or NO if it is not. \n\nConstraints \n\n\n$1 \\le T \\le 10$ \n$1 \\le N \\le 1000$ \n$1 \\le K \\le N$ \n$-100 \\le a_i \\le 100, where\\ i \\in [1, N]$ \n\n\nNote \nIf a student arrives exactly on time $(a_i = 0)$, the student is considered to have entered before the class started.Sample Input2\n4 3\n-1 -3 4 2\n4 2\n0 -1 2 1\nSample OutputYES\nNO\nExplanationFor the first test case, $K = 3$. The professor wants at least $3$ students in attendance, but only $2$ have arrived on time ($-3$ and $-1$). Thus, the class is canceled.\n\nFor the second test case, $K = 2$. The professor wants at least $2$ students in attendance, and there are $2$ who have arrived on time ($0$ and $-1$). Thus, the class is not canceled.\n''' and None\n#!/bin/python\n\nimport sys\n\n\nt = int(raw_input().strip())\nfor a0 in xrange(t):\n n,k = raw_input().strip().split(' ')\n n,k = [int(n),int(k)]\n a = map(int,raw_input().strip().split(' '))\n\n\n","sub_path":"algorithms/implementation/angry-professor.py","file_name":"angry-professor.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"482785661","text":"# from http://www.pygame.org/pcr/text_rect/index.php and then modified by adding render_fitted_textrect\n\nfrom font.font import Font\n\n\nclass TextRectException(BaseException):\n def __init__(self, message=None):\n self.message = message\n\n def __str__(self):\n return self.message\n\n\nsizes = (512,256,128,96,72,60,48,36,32,24,18,16)\n\n\ndef render_fitted_textrect(string, rect, text_color, background_color, justification=0):\n for size in sizes:\n font = Font.getFont(size)\n try:\n return render_textrect(string, font, rect, text_color, background_color, justification)\n except TextRectException as e:\n continue\n\n\ndef render_textrect(string, font, rect, text_color, background_color, justification=0):\n \"\"\"Returns a surface containing the passed text string, reformatted\n to fit within the given rect, word-wrapping as necessary. The text\n will be anti-aliased.\n\n Takes the following arguments:\n\n string - the text you wish to render. \\n begins a new line.\n font - a Font object\n rect - a rectstyle giving the size of the surface requested.\n text_color - a three-byte tuple of the rgb value of the\n text color. ex (0, 0, 0) = BLACK\n background_color - a three-byte tuple of the rgb value of the surface.\n justification - 0 (default) left-justified\n 1 horizontally centered\n 2 right-justified\n\n Returns the following values:\n\n Success - a surface object with the text rendered onto it.\n Failure - raises a TextRectException if the text won't fit onto the surface.\n \"\"\"\n\n import pygame\n\n final_lines = []\n\n requested_lines = string.splitlines()\n\n # Create a series of lines that will fit on the provided\n # rectangle.\n\n for requested_line in requested_lines:\n if font.size(requested_line)[0] > rect.width:\n words = requested_line.split(' ')\n # if any of our words are too long to fit, return.\n for word in words:\n if font.size(word)[0] >= rect.width:\n raise TextRectException(\"The word \" + word + \" is too long to fit in the rect passed.\")\n # Start a new line\n accumulated_line = \"\"\n for word in words:\n test_line = accumulated_line + word + \" \"\n # Build the line while the words fit.\n if font.size(test_line)[0] < rect.width:\n accumulated_line = test_line\n else:\n final_lines.append(accumulated_line)\n accumulated_line = word + \" \"\n final_lines.append(accumulated_line)\n else:\n final_lines.append(requested_line)\n\n # Let's try to write the text out on the surface.\n\n surface = pygame.Surface(rect.size)\n surface.fill(background_color)\n\n accumulated_height = 0\n for line in final_lines:\n if accumulated_height + font.size(line)[1] >= rect.height:\n raise TextRectException (\"Once word-wrapped, the text string was too tall to fit in the rect.\")\n if line != \"\":\n tempsurface = font.render(line, 1, text_color)\n if justification == 0:\n surface.blit(tempsurface, (0, accumulated_height))\n elif justification == 1:\n surface.blit(tempsurface, ((rect.width - tempsurface.get_width()) / 2, accumulated_height))\n elif justification == 2:\n surface.blit(tempsurface, (rect.width - tempsurface.get_width(), accumulated_height))\n else:\n raise TextRectException (\"Invalid justification argument: \" + str(justification))\n accumulated_height += font.size(line)[1]\n\n return surface","sub_path":"drawer/textrect.py","file_name":"textrect.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"536781098","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2018-2021 CERN.\n#\n# Invenio-RDM-Records is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Search app configuration helper.\"\"\"\n\n\n# NOTE: It would be best to try to better harmonize these classes together with\n# the ones in Invenio-App-RDM used for the frontend and try to lower the number\n# of classes used for passing config around.\n\n\nclass OptionsSelector:\n \"\"\"Generic helper to select and validate facet/sort options.\"\"\"\n\n def __init__(self, available_options, selected_options):\n \"\"\"Initialize selector.\"\"\"\n # Ensure all selected options are availabe.\n for o in selected_options:\n assert o in available_options, \\\n f\"Selected option '{o}' is undefined.\"\n\n self.available_options = available_options\n self.selected_options = selected_options\n\n def __iter__(self):\n \"\"\"Iterate over options to produce RSK options.\"\"\"\n for o in self.selected_options:\n yield self.map_option(o, self.available_options[o])\n\n def map_option(self, key, option):\n \"\"\"Map an option.\"\"\"\n # This interface is used in Invenio-App-RDM.\n return (key, option)\n\n\nclass SortOptionsSelector(OptionsSelector):\n \"\"\"Sort options for the search configuration.\"\"\"\n\n def __init__(self, available_options, selected_options, default=None,\n default_no_query=None):\n \"\"\"Initialize sort options.\"\"\"\n super().__init__(available_options, selected_options)\n\n self.default = selected_options[0] if default is None else default\n self.default_no_query = selected_options[1] \\\n if default_no_query is None else default_no_query\n\n assert self.default in self.available_options, \\\n f\"Default sort with query {self.default} is undefined.\"\n assert self.default_no_query in self.available_options, \\\n f\"Default sort without query {self.default_no_query} is undefined.\"\n\n\nclass SearchConfig:\n \"\"\"Search endpoint configuration.\"\"\"\n\n def __init__(self, config, sort=None, facets=None):\n \"\"\"Initialize search config.\"\"\"\n config = config or {}\n self._sort = []\n self._facets = []\n\n if 'sort' in config:\n self._sort = SortOptionsSelector(\n sort,\n config['sort'],\n default=config.get('sort_default'),\n default_no_query=config.get('sort_default_no_query')\n )\n if 'facets' in config:\n self._facets = OptionsSelector(facets, config.get('facets'))\n\n @property\n def sort_options(self):\n \"\"\"Get sort options for search.\"\"\"\n return {k: v for (k, v) in self._sort}\n\n @property\n def sort_default(self):\n \"\"\"Get default sort method for search.\"\"\"\n return self._sort.default if self._sort else None\n\n @property\n def sort_default_no_query(self):\n \"\"\"Get default sort method without query for search.\"\"\"\n return self._sort.default_no_query if self._sort else None\n\n @property\n def facets(self):\n \"\"\"Get facets for search.\"\"\"\n return {k: v['facet'] for (k, v) in self._facets}\n","sub_path":"invenio_rdm_records/searchconfig.py","file_name":"searchconfig.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"12377714","text":"import jamspell # Корректор\r\nimport pymorphy2 # Морфемный анализ\r\n\r\nstring = 'Он плывет в воде'\r\n\r\ncorrector = jamspell.TSpellCorrector()\r\ncorrector.LoadLangModel('ru_small.bin')\r\n\r\nstring = corrector.FixFragment(string)\r\n\r\nmorph = pymorphy2.MorphAnalyzer()\r\n\r\nlist = string.split()\r\n\r\nfor word in list:\r\n\tp = morph.parse(word)[0].tag.cyr_repr#normal_form\r\n\tprint('\\n'+word+' --> '+p)","sub_path":"morh_analyze.py","file_name":"morh_analyze.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"75382261","text":"#!/usr/bin/env python\n#-*- coding:utf8 -*-\n\"\"\"\n@author: qihengshan\n@software: PyCharm\n@time: 2017/12/5 09:11\n\"\"\"\nfrom itertools import cycle\nfrom django import template\nimport datetime\nfrom myapp.include.encrypt import prpcrypt\nregister = template.Library()\n\n\n@register.filter\ndef descrypt(values):\n py = prpcrypt()\n values = py.decrypt(values)\n return values\n\n\n@register.filter\ndef s_to_d(values):\n values = int(values/3600/24)\n return str(values)+'d'\n\n@register.filter\ndef adjtime(values):\n values = values-datetime.timedelta(hours=8)\n return values\n\n\n@register.filter\ndef exact_columns(items, number_of_columns):\n \"\"\"Divides a list in an exact number of columns.\n The number of columns is guaranteed.\n\n Examples:\n\n 8x3:\n [[1, 2, 3], [4, 5, 6], [7, 8]]\n\n 2x3:\n [[1], [2], []]\n \"\"\"\n try:\n number_of_columns = int(number_of_columns)\n items = list(items)\n except (ValueError, TypeError):\n return [items]\n\n columns = [[] for x in range(number_of_columns)]\n actual_column = cycle(range(number_of_columns))\n for item in items:\n columns[actual_column.next()].append(item)\n\n return columns\n\n@register.filter\ndef split_cols(string):\n \"\"\"\n Return the string split by sep.\n Example usage: {{ value|split:\",\"}}\n \"\"\"\n #string.replace(',', ',\\n')\n a = [i.strip() for i in string.split(',') if len(i.strip()) >0]\n return ',\\n'.join(a)","sub_path":"myapp/templatetags/cus_filter.py","file_name":"cus_filter.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"352624941","text":"# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# MIT License for more details.\n\n\"\"\"Check and Define Prune Model SearchSpace.\"\"\"\nimport logging\nfrom zeus.common import ClassFactory, ClassType\nfrom zeus.model_zoo import ModelZoo\nfrom vega.core.search_space import SearchSpace\nfrom vega.core.pipeline.conf import PipeStepConfig\n\n\n@ClassFactory.register(ClassType.SEARCHSPACE)\nclass PruneSearchSpace(SearchSpace):\n \"\"\"Restrict and Terminate Base Calss.\"\"\"\n\n @classmethod\n def get_space(self, desc):\n \"\"\"Get model and input.\"\"\"\n model_desc = PipeStepConfig.model.model_desc\n model = ModelZoo().get_model(dict(type='PruneDeformation', desc=model_desc))\n search_space = model.search_space\n params = []\n for key, value in search_space.items():\n hparam_name = 'network.props.{}'.format(key)\n params.append(dict(key=hparam_name, type=\"BINARY_CODE\", range=[value]))\n params.append(dict(key='network.deformation', type=\"CATEGORY\", range=['PruneDeformation']))\n logging.info(\"Prune Search Space: {}\".format(params))\n return {\"hyperparameters\": params}\n","sub_path":"vega/algorithms/compression/prune_ea/prune_search_space.py","file_name":"prune_search_space.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"577280497","text":"# 3-armed bandit\nimport random\nimport numpy as np\n\nclass Bandit(object):\n def numLevers(self):\n return 3\n\n def pull(self, lever):\n r = random.random()\n if lever == 0:\n if r < 0.25:\n return 4.0\n else:\n return 0.0\n elif lever == 1:\n if r < 0.10:\n return 90.0\n elif r < 0.20:\n return 15.0\n else:\n return 0.0\n elif lever == 2:\n if r < 0.20:\n return 6.0\n else:\n return 0.0\n else:\n print(\"ERROR: no lever \", lever)\n\nclass Agent(object):\n def __init__(self, bandit):\n self.learningRate = 0.1\n self.exploreRate = 0.6\n self.expectedPayouts = np.zeros([bandit.numLevers()])\n self.bandit = bandit\n\n def playOneRound(self):\n r = random.random()\n if r < self.exploreRate:\n # We're going to explore, so choose\n # uniform-random from possible moves.\n k = random.randrange(self.bandit.numLevers())\n reward = self.bandit.pull(k)\n print(\"Explore \", k, reward)\n # Update estimate of expected payouts\n self.expectedPayouts[k] += self.learningRate * (reward - self.expectedPayouts[k])\n else:\n # We're going to exploit (argmax)\n k = self.expectedPayouts.argmax()\n reward = self.bandit.pull(k)\n print(\"Exploit \", k, reward)\n print(self.expectedPayouts)\n return reward\n\n def playMultipleRounds(self, n):\n rewards = 0\n for _ in range(n):\n rewards += self.playOneRound()\n print(\"Total rewards: \", rewards)\n return rewards\n\nif __name__ == \"__main__\":\n b = Bandit()\n a = Agent(b)\n a.playMultipleRounds(1000)\n","sub_path":"bandit.py","file_name":"bandit.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"47840279","text":"import os\nimport glob\n\n\ndef get_no_hidden_folder_list(wd):\n folder_list = []\n for each_folder in os.listdir(wd):\n if not each_folder.startswith('.'):\n folder_list.append(each_folder)\n return folder_list\n\n\n# get folder list\nwd = '/Users/songweizhi/Desktop/test_'\nprint(get_no_hidden_folder_list(wd))\n\n\n# get file list\nfiles = '/Users/songweizhi/Desktop/test/*.fa'\nfile_list = [os.path.basename(file_name) for file_name in glob.glob(files)]\nprint(file_list)\n\n\n","sub_path":"02_file_list_generator.py","file_name":"02_file_list_generator.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"185607339","text":"from zope.interface import implementer\nfrom pyramid import renderers\nfrom pyramid.interfaces import ITemplateRenderer\nfrom pyramid.decorator import reify\nfrom lxml import etree\nimport datetime\n\nclass XsltTemplate:\n def processAttribute(self, value):\n if isinstance(value, (int, long)):\n return str(value)\n elif isinstance(value, unicode):\n return value \n elif isinstance(value, str):\n return value \n elif isinstance(value, datetime.date):\n return value.strftime(\"%d.%m.%Y\")\n else:\n return ''\n \n def serializeArray(self, base, array):\n for item in array:\n e = etree.Element(\"item\")\n for k, v in item.items():\n e.attrib[k] = self.processAttribute(v)\n base.append(e) \n \n def serializeDict(self, root, data):\n for key, value in data.items():\n if isinstance(value, (list, tuple)):\n base = etree.Element(key)\n self.serializeArray(base, value)\n root.append(base)\n elif isinstance(value, dict):\n self.serializeDict(root, value)\n else:\n root.attrib[key] = self.processAttribute(value) \n \n def toXml(self, data):\n root = etree.Element(\"root\")\n self.serializeDict(root, data)\n return root \n \n def __init__(self, path):\n self.path = path\n \n def __call__(self, value):\n doc = self.toXml(value)\n \n request = value.get('request')\n if request is not None:\n \n debug = request.GET.has_key('debug')\n if debug:\n response = request.response\n ct = response.content_type\n if ct == response.default_content_type:\n response.content_type = 'text/xml'\n return etree.tostring(doc)\n \n transform = etree.XSLT(etree.parse(self.path))\n return etree.tostring(transform(doc))\n\n@implementer(ITemplateRenderer)\nclass XsltRenderer(object):\n def __init__(self, path, lookup, macro=None):\n self.path = path\n self.lookup = lookup\n self.macro = macro\n \n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n return XsltTemplate(self.path)\n \n def implementation(self):\n return self.template\n\n def __call__(self, value, system):\n try:\n system.update(value)\n except (TypeError, ValueError):\n raise ValueError('renderer was passed non-dictionary as value')\n result = self.template(system)\n return result\n \ndef renderer_factory(info):\n return renderers.template_renderer_factory(info, XsltRenderer)\n","sub_path":"sso/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"593956637","text":"import random\n\nVOWELS = \"aeiouy\"\n\ndef human2bird(phrase):\n words = phrase.split()\n result = []\n for w in words:\n result.append(\"\".join(ch * 3 if ch in VOWELS else ch + random.choice(VOWELS) for ch in w))\n return \" \".join(result)\n\ndef tranlate(phrase):\n words = phrase.split()\n result = []\n for w in words:\n i = 0\n temp = \"\"\n while i < len(w):\n temp += w[i]\n if w[i] in VOWELS:\n i += 3\n else:\n i += 2\n result.append(temp)\n return \" \".join(result)\n\n# print(human2bird(\"checkio\"))\n# print(human2bird(\"checkio\"))\n# print(human2bird(\"a b c d e f\"))\n\nprint(tranlate(\"cuhueeecikuiiiooo\"))\n\nT = [\n \"lorem ipsum\",\n \"to be or not to be\",\n \"bla bla bla bla\",\n \"do you speak english\",\n \"i don not understand you\"\n\n]\n\nfor t in T:\n b = human2bird(t)\n print(\"\"\"{{\n \"input\": \"{}\",\n \"answer\": \"{}\"\n }},\"\"\".format(b, t))","sub_path":"_generator/generate-bl.py","file_name":"generate-bl.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"244442812","text":"import logging\nfrom pymongo.errors import BulkWriteError\nfrom helpers.format import stripped_or_none, boolean_on_value\nfrom schema.models import ExtractCharity, ExtractMainCharity\n\ndef create_charity(charity, main_charity):\n c = {}\n c['regulator'] = 'GB-CHC'\n c['ids'] = {\n 'charityId': 'GB-CHC-%d' % charity.regno,\n 'GB-CHC': charity.regno,\n }\n c['name'] = stripped_or_none(charity.name, 'title')\n c['isRegistered'] = charity.orgtype and charity.orgtype.strip() == 'R' \n c['governingDoc'] = stripped_or_none(charity.gd)\n c['areaOfBenefit'] = stripped_or_none(charity.aob, 'title')\n c['contact'] = {\n 'email': stripped_or_none(main_charity.email, 'lower'),\n 'person': stripped_or_none(charity.corr, 'title'),\n 'phone': stripped_or_none(charity.phone),\n 'postcode': stripped_or_none(charity.postcode),\n 'address': [stripped_or_none(getattr(charity, add), 'title') for add in ['add1', 'add2', 'add3', 'add4', 'add5'] if getattr(charity, add) != None]\n }\n c['isWelsh'] = boolean_on_value(main_charity.welsh, 'T')\n\n c['trustees'] = {\n 'incorporated': boolean_on_value(main_charity.trustees, 'T'),\n 'names': []\n }\n c['website'] = stripped_or_none(main_charity.web, 'lower')\n c['isSchool'] = boolean_on_value(main_charity.grouptype, 'SCH')\n c['income'] = {\n 'latest': {\n 'date': main_charity.incomedate,\n 'total': int(main_charity.income) if main_charity.income != None else None,\n },\n 'annual': []\n }\n c['fyend'] = stripped_or_none(main_charity.fyend)\n c['companiesHouseNumber'] = stripped_or_none(main_charity.coyno)\n\n c['areasOfOperation'] = []\n c['causes'] = []\n c['beneficiaries'] = []\n c['operations'] = []\n c['subsidiaries'] = []\n c['alternativeNames'] = []\n\n c['activities'] = None\n \n return c\n\n\n\ndef insert_charities(session, charity_collection, lower_limit, upper_limit, batch_size=1000):\n\n logging.info(\"Inserting charities\")\n while lower_limit <= upper_limit:\n \n q = session\\\n .query(ExtractCharity, ExtractMainCharity)\\\n .join(ExtractMainCharity, ExtractMainCharity.regno==ExtractCharity.regno)\\\n .filter(ExtractCharity.orgtype == 'R')\\\n .filter(ExtractCharity.subno == 0)\\\n .filter(ExtractCharity.regno >= lower_limit)\\\n .filter(ExtractCharity.regno < lower_limit + batch_size)\n\n charities = []\n for i, x in enumerate(q):\n\n c = create_charity(x.ExtractCharity, x.ExtractMainCharity)\n charities.append(c)\n \n logging.info(lower_limit)\n \n lower_limit += batch_size\n \n if len(charities) == 0:\n continue\n \n try:\n result = charity_collection.insert_many(charities)\n # print(result.inserted_ids)\n except BulkWriteError as bwe:\n logging.error(bwe.details)\n\n","sub_path":"create_mongo_db/schema_conversion/create_charity.py","file_name":"create_charity.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"632656203","text":"# Configuration file for application.\n# noinspection PyUnresolvedReferences\nc = get_config()\n\n#------------------------------------------------------------------------------\n# Application(SingletonConfigurable) configuration\n#------------------------------------------------------------------------------\n\n## This is an application.\n\n## The date format used by logging formatters for %(asctime)s\n#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'\n\n## The Logging format template\n#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'\n\n## Set the log level by value or name.\n#c.Application.log_level = 30\n\n#------------------------------------------------------------------------------\n# CommandSenderApp(Application) configuration\n#------------------------------------------------------------------------------\n\n## This is an application.\n\n## Config file directory\n#c.CommandSenderApp.config_dir = '/home/pmc/pmchome/pmc-turbo-devel/config/ground'\n\n## Load this config file\n#c.CommandSenderApp.config_file = u'default_ground_config.py'\n\n## Write template config file to this location\n#c.CommandSenderApp.write_default_config = u''\n\n#------------------------------------------------------------------------------\n# GroundConfiguration(Configurable) configuration\n#------------------------------------------------------------------------------\n\n## \n#c.GroundConfiguration.command_history_subdir = 'command_history'\n\n## \n#c.GroundConfiguration.command_index_filename = 'index.csv'\n\n## Serial device connected to GSE uplink. Empty string means don't use serial (GSE) uplink (only openport\nc.GroundConfiguration.command_port = ''\n\n## (IP,port) tuple to send OpenPort commands to\nopenport_uplink_ip = ('%d.%d.%d.%d' % (0x80, 0x3b, 0xab, 0x10))\nc.GroundConfiguration.openport_uplink_addresses = [(openport_uplink_ip, 30001),]\n\nc.GroundConfiguration.downlink_parameters = {'los': {'baudrate': 115200, 'loop_interval': 1.0, 'port': '/dev/ttyS0'},\n 'tdrss_direct': {'baudrate': 115200, 'loop_interval': 1.0, 'port': '/dev/ttyUSB2'},\n 'gse_sip': {'baudrate': 115200, 'loop_interval': 1.0, 'port': '/dev/ttyUSB0'},\n 'openport': {'baudrate': None, 'loop_interval': 1.0, 'port': 4501}}\n\nc.GSEReceiverManager.downlinks_to_use = ['openport']\n\n## \nc.GroundConfiguration.root_data_path = '/data/pmc/piggyback_gse_data'\n\n#------------------------------------------------------------------------------\n# CommandSender(GroundConfiguration) configuration\n#------------------------------------------------------------------------------\n\n## Timeout for serial command port. This sets how much time is allocated for the\n# GSE to acknowledge the command we sent.\n#c.CommandSender.command_port_response_timeout = 3.0\n","sub_path":"config/ground/simulated_piggyback_ground_config.py","file_name":"simulated_piggyback_ground_config.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"251545055","text":"import requests\nimport json\nfrom .data_management import DataManagement\nfrom .model_derivative import ModelDerivative\nfrom .utility import forge_response\n\nclass ForgeClient(object):\n _API_REALM = 'Autodesk Forge Authentication API'\n\n authentication_base_url = 'https://developer.api.autodesk.com/authentication'\n users_base_url = 'https://developer.api.autodesk.com/userprofile/v1/users'\n\n def __init__(self,\n client_id=None,\n client_secret=None,\n grant_type=None,\n scope=None,\n code=None,\n redirect_uri=None,\n access_token=None,\n refresh_token=None\n ):\n self._client_id = client_id\n self._client_secret = client_secret\n self._grant_type = grant_type\n self._scope = scope\n self._code = code\n self._redirect_uri = redirect_uri\n self._access_token = access_token\n self._refresh_token = refresh_token\n\n @property\n def model_derivative(self):\n if self.is_valid_context():\n return ModelDerivative(self, True)\n else:\n raise TypeError('Could not instantiate model_derivative: Please check that client token is valid.')\n\n @property\n def data_management(self):\n if self.is_valid_context():\n return DataManagement(self, True)\n else:\n raise TypeError('Could not instantiate data_management: Please check that client token is valid.')\n\n def set_credentials(self, client_id=None, client_secret=None, grant_type=None, scope=None, code=None, redirect_uri=None, refresh_token=None, access_token=None):\n self._client_id = client_id\n self._client_secret = client_secret\n self._grant_type = grant_type\n self._scope = scope\n self._code = code\n self._redirect_uri = redirect_uri\n self._refresh_token = refresh_token\n self._access_token = access_token\n return self\n\n def get_credentials(self):\n return {\n 'client_id': self._client_id,\n 'client_secret': self._client_secret,\n 'grant_type': self._grant_type,\n 'scope': self._scope,\n 'code': self._code,\n 'redirect_uri': self._redirect_uri,\n 'refresh_token': self._refresh_token,\n 'access_token': self._access_token\n }\n\n @forge_response\n def retrieve_token_2leg(self):\n if not all([self._client_id, self._client_secret, self._grant_type, self._scope]):\n raise Exception('Please check that credentials are set.')\n\n token_endpoint = 'v1/authenticate'\n get_token_url = \"/\".join([self.authentication_base_url, token_endpoint])\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n }\n\n data = [\n ('client_id', self._client_id),\n ('client_secret', self._client_secret),\n ('grant_type', self._grant_type),\n ('scope', self._scope),\n ]\n\n response = requests.post(get_token_url, headers=headers, data=data)\n\n try:\n if 'access_token' in response.json():\n self._access_token = response.json()['access_token']\n return response\n except Exception as e:\n raise Exception('Please check that credentials are set: \\n\\n {}'.format(e))\n\n @forge_response\n def retrieve_token_3leg(self):\n if not all([self._client_id, self._client_secret, self._grant_type, self._code, self._redirect_uri]):\n raise Exception('Please check that credentials are set.')\n\n token_endpoint = 'v1/gettoken'\n get_token_url = \"/\".join([self.authentication_base_url, token_endpoint])\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n }\n\n data = [\n ('client_id', self._client_id),\n ('client_secret', self._client_secret),\n ('grant_type', self._grant_type),\n ('code', self._code),\n ('redirect_uri', self._redirect_uri)\n ]\n\n response = requests.post(get_token_url, headers=headers, data=data)\n\n try:\n if 'access_token' in response.json() and 'refresh_token' in response.json():\n self._access_token = response.json()['access_token']\n self._refresh_token = response.json()['refresh_token']\n return response\n except Exception as e:\n raise Exception('Please check that credentials are set - 3Leg authentication also requires code and redirect_uri parameters: \\n\\n {}'.format(e))\n\n def is_valid_context(self):\n if self._access_token:\n return True\n else:\n return False\n\n @forge_response\n def get_refreshed_token(self):\n if not all([self._client_id, self._client_secret, self._grant_type]):\n raise Exception('Please check that credentials are set - 3Leg authentication also requires code and redirect_uri parameters')\n\n token_endpoint = 'v1/refreshtoken'\n get_token_url = \"/\".join([self.authentication_base_url, token_endpoint])\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n }\n\n data = [\n ('client_id', self._client_id),\n ('client_secret', self._client_secret),\n ('grant_type', self._grant_type),\n ('refresh_token', self._refresh_token)\n ]\n\n return requests.post(get_token_url, headers=headers, data=data)\n\n @forge_response\n def get_end_user(self):\n if not self.is_valid_context():\n raise Exception('Please check that credentials are set - 3Leg authentication also requires code and redirect_uri parameters')\n\n bearer_key = ' '.join(['Bearer', self._access_token])\n self._headers = {\n 'Authorization': bearer_key,\n 'Content-Type': 'application/json'\n }\n\n me_endpoint = '@me'\n end_user_endpoint = \"/\".join([self.users_base_url, me_endpoint])\n\n return requests.get(end_user_endpoint, headers=self._headers)\n\n def is_access_token_valid(self):\n me_endpoint = '@me'\n end_user_endpoint = \"/\".join([self.users_base_url, me_endpoint])\n\n response = requests.get(end_user_endpoint, headers=self._headers)\n return response.status_code == 200\n","sub_path":"faith_forge_python_sdk/faith_forge_python_sdk/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"280893117","text":"from __future__ import unicode_literals\n\nimport inspect\nimport os\nimport sys\n\nimport pkg_resources\nfrom django.core.management import call_command\nfrom django.utils import six\nfrom setuptools.command.build_py import build_py\nfrom setuptools import Command\n\n\nclass BuildStaticFiles(Command):\n \"\"\"Builds static files for the extension.\n\n This will build the static media files used by the extension. JavaScript\n bundles will be minified and versioned. CSS bundles will be processed\n through lesscss (if using .less files), minified and versioned.\n\n This must be subclassed by the project offering the extension support.\n The subclass must provide the extension_entrypoint_group and\n django_settings_module parameters.\n\n extension_entrypoint_group is the group name that entry points register\n into.\n\n django_settings_module is the Python module path for the project's\n settings module, for use in the DJANGO_SETTINGS_MODULE environment\n variable.\n \"\"\"\n description = 'Build static media files'\n extension_entrypoint_group = None\n django_settings_module = None\n\n user_options = [\n (b'remove-source-files', None, 'remove source files from the package'),\n ]\n boolean_options = [b'remove-source-files']\n\n def initialize_options(self):\n self.build_lib = None\n self.remove_source_files = False\n\n def finalize_options(self):\n self.set_undefined_options('build', ('build_lib', 'build_lib'))\n\n def get_lessc_global_vars(self):\n \"\"\"Returns a dictionary of LessCSS global variables and their values.\n\n This can be implemented by subclasses to provide global variables for\n .less files for processing.\n\n By default, this defines two variables: `STATIC_ROOT` and `DEBUG`.\n\n `STATIC_ROOT` is set to an empty string. This will effectively cause\n any imports using `@{STATIC_ROOT}` to look up in the include path.\n Projects using less.js for the runtime can then define `STATIC_ROOT` to\n their standard static URL, ensuring lookups work for development and\n packaged extensions.\n\n `DEBUG` is set to false. Runtimes using less.js can set this to\n settings.DEBUG for templates. This can be useful for LessCSS guards.\n\n This requires LessCSS 1.5.1 or higher.\n \"\"\"\n return {\n 'DEBUG': False,\n 'STATIC_ROOT': '',\n }\n\n def get_lessc_include_path(self):\n \"\"\"Returns the include path for LessCSS imports.\n\n By default, this will include the parent directory of every path in\n STATICFILES_DIRS, plus the static directory of the extension.\n \"\"\"\n from django.conf import settings\n\n less_include = set()\n\n for staticfile_dir in settings.STATICFILES_DIRS:\n if isinstance(staticfile_dir, tuple):\n staticfile_dir = staticfile_dir[1]\n\n less_include.add(os.path.dirname(staticfile_dir))\n\n return less_include\n\n def run(self):\n from django.conf import settings\n\n # Prepare to import the project's settings file, and the extension\n # modules that are being shipped, so we can scan for the bundled\n # media.\n old_settings_module = os.environ.get('DJANGO_SETTINGS_MODULE')\n os.environ['DJANGO_SETTINGS_MODULE'] = self.django_settings_module\n cwd = os.getcwd()\n sys.path = [\n os.path.join(cwd, package_name)\n for package_name in self.distribution.packages\n ] + sys.path\n\n # Set up the common Django settings for the builds.\n settings.STATICFILES_FINDERS = (\n 'djblets.extensions.staticfiles.PackagingFinder',\n )\n settings.STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\n settings.INSTALLED_APPS = [\n 'django.contrib.staticfiles',\n ]\n settings.CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n }\n\n # Load the entry points this package is providing, so we'll know\n # which extensions to scan.\n entrypoints = pkg_resources.EntryPoint.parse_map(\n self.distribution.entry_points,\n dist=self.distribution)\n\n extension_entrypoints = \\\n entrypoints.get(self.extension_entrypoint_group)\n assert extension_entrypoints, 'No extension entry points were defined.'\n\n # Begin building pipeline bundles for each of the bundles defined\n # in the extension.\n for ep_name, entrypoint in six.iteritems(extension_entrypoints):\n try:\n extension = entrypoint.load(require=False)\n except ImportError:\n sys.stderr.write(\n 'Error loading the extension for entry point %s\\n'\n % ep_name)\n raise\n\n self._build_static_media(extension)\n\n # Restore the environment, so we don't possibly interfere with\n # anything else.\n if old_settings_module is not None:\n os.environ['DJANGO_SETTINGS_MODULE'] = old_settings_module\n\n sys.path = sys.path[len(self.distribution.packages):]\n\n def _build_static_media(self, extension):\n from django.conf import settings\n\n pipeline_js = {}\n pipeline_css = {}\n\n self._add_bundle(pipeline_js, extension.js_bundles, 'js', '.js')\n self._add_bundle(pipeline_css, extension.css_bundles, 'css', '.css')\n\n # Get the location of the static/ directory within the module in the\n # source tree. We're going to use it to look up static files for\n # input, and as a relative path within the module for the output.\n module_dir = os.path.dirname(inspect.getmodule(extension).__file__)\n static_dir = os.path.join(module_dir, 'static')\n\n if not os.path.exists(static_dir):\n # This extension doesn't define any static files.\n return\n\n from djblets.extensions.staticfiles import PackagingFinder\n PackagingFinder.extension_static_dir = static_dir\n\n settings.STATICFILES_DIRS = list(settings.STATICFILES_DIRS) + [\n PackagingFinder.extension_static_dir\n ]\n\n # Register the include path and any global variables used for\n # building .less files.\n settings.PIPELINE_LESS_ARGUMENTS = ' '.join(\n [\n '--include-path=%s'\n % os.path.pathsep.join(self.get_lessc_include_path())\n ] + [\n '--global-var=\"%s=%s\"'\n % (key, self._serialize_lessc_value(value))\n for key, value in six.iteritems(self.get_lessc_global_vars())\n ]\n )\n\n settings.PIPELINE_JS = pipeline_js\n settings.PIPELINE_CSS = pipeline_css\n settings.PIPELINE_ENABLED = True\n settings.PIPELINE_STORAGE = \\\n 'djblets.extensions.staticfiles.PackagingStorage'\n settings.STATIC_ROOT = \\\n os.path.join(self.build_lib,\n os.path.relpath(os.path.join(module_dir, 'static')))\n\n # Due to how Pipeline copies and stores its settings, we actually\n # have to copy over some of these, as they'll be from the original\n # loaded settings.\n from pipeline.conf import settings as pipeline_settings\n\n for key in six.iterkeys(pipeline_settings.__dict__):\n if hasattr(settings, key):\n setattr(pipeline_settings, key, getattr(settings, key))\n\n # Collect and process all static media files.\n call_command('collectstatic', interactive=False, verbosity=2)\n\n if self.remove_source_files:\n self._remove_source_files(\n pipeline_css, os.path.join(settings.STATIC_ROOT, 'css'))\n self._remove_source_files(\n pipeline_js, os.path.join(settings.STATIC_ROOT, 'js'))\n\n def _add_bundle(self, pipeline_bundles, extension_bundles, default_dir,\n ext):\n for name, bundle in six.iteritems(extension_bundles):\n if 'output_filename' not in bundle:\n bundle['output_filename'] = \\\n '%s/%s.min%s' % (default_dir, name, ext)\n\n pipeline_bundles[name] = bundle\n\n def _remove_source_files(self, pipeline_bundles, media_build_dir):\n \"\"\"Removes all source files, leaving only built bundles.\"\"\"\n for root, dirs, files in os.walk(media_build_dir, topdown=False):\n for name in files:\n # A valid file will be represented as one of:\n #\n # (bundle_name, 'min', stamp, ext)\n # (bundle_name, 'min', ext)\n #\n # We keep both the pre-stamped and post-stamped versions so\n # that Django's CachedFilesStorage can generate and cache\n # the stamp from the contents of the non-stamped file.\n name_parts = name.split('.')\n\n if (len(name_parts) < 3 or\n name_parts[0] not in pipeline_bundles or\n name_parts[1] != 'min'):\n # This doesn't appear to be a file representing a bundle,\n # so we should get rid of it.\n os.unlink(os.path.join(root, name))\n\n for name in dirs:\n try:\n os.rmdir(os.path.join(root, name))\n except:\n # The directory is probably not empty yet.\n pass\n\n def _serialize_lessc_value(self, value):\n if isinstance(value, six.text_type):\n return '\"%s\"' % value\n elif isinstance(value, bool):\n if value:\n return 'true'\n else:\n return 'false'\n elif isinstance(value, int):\n return '%d' % value\n else:\n raise TypeError('%r is not a valid lessc global variable value'\n % value)\n\n\nclass BuildPy(build_py):\n def run(self):\n self.run_command('build_static_files')\n build_py.run(self)\n\n\ndef build_extension_cmdclass(build_static_files_cls):\n \"\"\"Builds a cmdclass to pass to setup.\n\n This is passed a subclass of BuildStaticFiles, and returns something\n that can be passed to setup().\n \"\"\"\n return {\n 'build_static_files': build_static_files_cls,\n 'build_py': BuildPy,\n }\n","sub_path":"djblets/extensions/packaging.py","file_name":"packaging.py","file_ext":"py","file_size_in_byte":10456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"172226025","text":"# -*- coding: utf-8 -*-\r\n\r\ntry :\r\n n1 = int(input(\"정수 입력 : \"))\r\n n2 = int(input(\"정수 입력 : \"))\r\n r = n1 / n2\r\n# 여러 타입의 예외를 처리하기 위해서는 \r\n# except 구문을 다수개 작성하여 처리할 수 있습니다.\r\nexcept ZeroDivisionError as msg :\r\n # / 0 을 하는 경우 실행되는 영역\r\n print(f\"Error MSG -> {msg}\")\r\nexcept ValueError as msg :\r\n # 잘못된 입력이 들어온 경우 실행되는 영역\r\n print(f\"Error MSG -> {msg}\")\r\nelse :\r\n print(f\"r -> {r}\")\r\nfinally :\r\n print(\"프로그램 종료\") ","sub_path":"day_05/exception_04.py","file_name":"exception_04.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"528989428","text":"\n# Cleans the raw data from JSON (raw_data.csv) and outputs final data (final_data.csv)\n\nimport numpy as np\nimport pandas as pd\nimport os\n\nif not os.path.exists('../data/final_data.csv'):\n df = pd.read_csv(\"../data/raw_data.csv\")\n reference = pd.read_csv(\"../data/Interactive Media Bias Chart - Ad Fontes Media.csv\")\n\n # Performs left join\n final = df.merge(reference, how='left', on='Url')\n\n # Drops empty body\n cleaned_final = final.drop(final[final['Body'].isna()].index, axis=0)\n\n cleaned_final.to_csv('../data/final_data.csv', index=False)\nelse:\n print('file already exists!')","sub_path":"backend/bias_rater/Archive/lib/process_raw.py","file_name":"process_raw.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"222375077","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass LinearModel:\n def __init__(self, learning_rate):\n self.learning_rate = learning_rate\n self.W1 = np.random.rand(1,1) #Weight matrix\n self.W0 = np.random.rand(1,1) #Bias\n\n def train(self, X, Y, iterations):\n samples = X.size\n count = 1\n losses = []\n epochs = []\n while(count <= iterations):\n prediction = np.dot(self.W1,X) + self.W0 # Y = W1*X + W0\n dprediction = prediction - Y #dL/dY\n square_loss = np.sum(np.square(dprediction))\n dW1 = np.dot(dprediction, X.T)/samples #dL/dW1 = dL/dY*d(X.T) \n dW0 = np.sum(dprediction, keepdims=True)/ samples #dL/dW0 = dL/dY\n self.W1 = self.W1 - self.learning_rate * dW1\n self.W0 = self.W0 - self.learning_rate * dW0\n count = count + 1\n epochs.append(count)\n losses.append(square_loss)\n plt.scatter(epochs, losses)\n plt.ylabel('Loss')\n plt.xlabel('Epochs')\n plt.title('Training')\n plt.show()\n\n def test(self, X, Y):\n prediction = np.dot(self.W1,X) + self.W0\n plt.plot(X.flatten(), Y.flatten())\n plt.plot(X.flatten(), prediction.flatten())\n plt.ylabel('Prediction')\n plt.xlabel('Input')\n plt.title('Testing')\n plt.show()\n\ndef Main():\n X = np.linspace(-np.pi,np.pi,200).reshape(-1,200)\n Y = np.sin(X) \n myModel = LinearModel(0.001)\n myModel.train(X,Y,40)\n myModel.test(X,Y)\n\nif __name__ == '__main__':\n Main()\n","sub_path":"ML_Scratch/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"557995934","text":"#!/usr/bin/env python3\n\n\"This Project is to give a command and the script should be able to create a new project in the location\"\n\nimport os\nimport logging\nimport argparse\nimport sys\nfrom github import Github\nfrom six.moves import configparser\n\n##############################CONFIGURATION FILE##################################\nconfig = configparser.ConfigParser()\nscript_location = os.path.dirname(os.path.realpath(__file__))\nconfig.read(script_location+'/gitcreate.ini')\n\npath = config.get('DEFAULT','local_path')\nusername = config.get('DEFAULT','git_username')\npassword = config.get('DEFAULT','git_password')\naccount_name = config.get('DEFAULT','git_account_name')\nlog_location = config.get('DEFAULT','logging_location')\nProjName = str(sys.argv[1])\n\n##############################lOGGING RULES#########################################\n# create an eventlogger\nlogger=logging.getLogger('PROJECTCREATE')\nlogger.setLevel(logging.DEBUG)\n#set formatter\nformatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')\n#creating a file handler\nfh = logging.FileHandler(log_location+'gitcreate.log')\nfh.setLevel(logging.DEBUG)\nfh.setFormatter(formatter)\n#Adding Handler\nlogger.addHandler(fh)\n\n##############################FUNCTIONS#########################################\n\n######################FUNCTION TO HANDLE CHANGES IN CLI#########################\ndef Cli_Git_Creator():\n ProjectPath = path + ProjName\n os.chdir(ProjectPath)\n open(\"README.md\", 'a').close()\n os.system('git init')\n link='git@github.com:'+account_name+'/'+ProjName+'.git'\n os.system('git remote add origin '+link)\n os.system('git add -A')\n os.system('git status')\n os.system('git commit -m \"commit\"')\n os.system('git push -u origin master')\n print (\"Your Initial commit is Done Successfully\")\n\n#####################FUNCTION TO CREATE GIT REPO IN GIT ACCOUNT#################\ndef Ui_Git_Creator_Api():\n g = Github(username, password)\n logging.info('Successfully loged into the Git account')\n user = g.get_user()\n New_repo = user.create_repo(ProjName)\n logging.info(\"Successfully Created a new Repo in Git Account\")\n print (New_repo)\n\n################################MAIN FUNCTION###################################\ndef create():\n try:\n if not os.path.exists(path + ProjName):\n print (path+ProjName)\n os.mkdir(path + ProjName)\n logging.info('New Project Folder {} is created '.format(ProjName))\n Ui_Git_Creator_Api()\n Cli_Git_Creator()\n except Exception as e:\n logging.error('There is an Error Occured: {}'.format(e))\n raise\n\nif __name__ == '__main__':\n create()\n","sub_path":"gitcreator.py","file_name":"gitcreator.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"368092250","text":"from __future__ import absolute_import, division, print_function\n\nfrom contextlib import contextmanager\n\noptions = {}\ndefault_values = {}\n\n\ndef get_default_val(pat):\n return default_values.get(pat)\n\n\ndef _get_option(pat, default_val=None):\n return options[pat] if pat in options else default_val\n\n\ndef _set_option(pat, val, default_val=None):\n options[pat] = val\n if default_val is not None:\n default_values[pat] = default_val\n\n\ndef _register_option(pat, val, default_val=None):\n _set_option(pat, val, default_val)\n\nget_option = _get_option\nset_option = _set_option\nregister_option = _register_option\n\n\nclass option_context(object):\n def __init__(self, *args):\n if not (len(args) % 2 == 0 and len(args) >= 2):\n raise ValueError('Need to invoke as'\n 'option_context(pat, val, [(pat, val), ...)).')\n\n self.ops = list(zip(args[::2], args[1::2]))\n self.undo = None\n\n def __enter__(self):\n undo = []\n for pat, val in self.ops:\n undo.append((pat, get_option(pat)))\n\n self.undo = undo\n\n for pat, val in self.ops:\n set_option(pat, val)\n\n def __exit__(self, *args):\n if self.undo:\n for pat, val in self.undo:\n set_option(pat, val)\n\n\n@contextmanager\ndef config_prefix(prefix):\n global get_option, set_option, register_option\n\n def wrap(func):\n def inner(key, *args, **kwds):\n pkey = '%s.%s' % (prefix, key)\n return func(pkey, *args, **kwds)\n\n return inner\n\n __get_option = get_option\n __set_option = set_option\n __register_option = register_option\n set_option = wrap(set_option)\n get_option = wrap(get_option)\n register_option = wrap(register_option)\n yield None\n set_option = __set_option\n get_option = __get_option\n register_option = __register_option\n\nif len(options) == 0:\n register_option('display.precision', 6)\n register_option('display.float_format', None)\n register_option('display.column_space', 12)\n register_option('display.max_rows', 12)\n register_option('display.max_columns', 20)\n register_option('storage.default', 'mmap')\n register_option('storage.hdf5.open', {'driver': 'core',\n 'backing_store': False})\n register_option('storage.hdf5.compression', {\"compression\": 'none'})\n register_option('storage.zarr.compression', {\"compression\": 'none'})\n","sub_path":"progressivis/core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"640891603","text":"import pickle\nfrom keras.utils import np_utils\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Activation, Flatten\nimport numpy as np\n\n\ndef load_keras_model(modelname):\n newmodel = load_model(modelname)\n return newmodel\n\ndef get_weights(model):\n n_layers = len(model.layers)\n weights = []\n biases = []\n for i in range(0,n_layers):\n weights.append(model.layers[i].get_weights()[0])\n biases.append(model.layers[i].get_weights()[1])\n biases = model.layers[0].get_weights()[1]\n return weights,biases\n\ndef make_childs(w,b,loc, population):\n n_childs = population\n for i in range(0,n_childs):\n child = []\n for j in range(0,len(w)):\n # noise = np.random.normal(0,0.005,size=w[j].shape)\n noise = np.random.normal(0,0.2,size=w[j].shape)\n new_matrix = np.add(w[j],noise)\n child.append(new_matrix)\n #print(len(child))\n model = Sequential()\n model.add(Dense(100, input_dim=22,activation='sigmoid'))\n #Add hidden layer\n model.add(Dense(100, activation='sigmoid'))\n #Add output layer with 1 node to output either 0 or 1\n model.add(Dense(3,activation='tanh'))\n model.compile(loss='mean_squared_error', optimizer='adam')\n for q in range(0,len(w)):\n model.layers[q].get_weights()[0] = child[q]\n model.layers[q].get_weights()[1] = b[q]\n name = './'+loc+'/EVO'+str(i)+'.h5'\n print(name)\n model.save(name)\n\ndef make_new_parent(fitness, loc, population):\n best_10 = sorted(range(len(fitness)), key=lambda i: fitness[i])[-10:]\n for i in best_10:\n model_name = './'+loc+'/EVO'+str(i)+'.h5'\n model = load_keras_model(model_name)\n weights, bias = get_weights(model)\n for j in range(0,len(weights)):\n new_weights = np.multiply(fitness[i],weights)\n model = Sequential()\n model.add(Dense(100, input_dim=22,activation='sigmoid'))\n #Add hidden layer\n model.add(Dense(100, activation='sigmoid'))\n #Add output layer with 1 node to output either 0 or 1\n model.add(Dense(3,activation='tanh'))\n model.compile(loss='mean_squared_error', optimizer='adam')\n for q in range(0,len(weights)):\n print(q)\n model.layers[q].get_weights()[0] = new_weights[q]\n model.layers[q].get_weights()[1] = bias[q]\n model.save('Best_Model.h5')\n new_model = load_keras_model('Best_Model.h5')\n w, b = get_weights(model)\n make_childs(w,b,loc,population)\n\n\n\n#new_model = load_keras_model('MLPLALL4.h5')\n#w,b = get_weights(new_model)\n#make_childs(w,b)\n","sub_path":"Evolutionary/evolution_nn copy.py","file_name":"evolution_nn copy.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"610677436","text":"# -*-coding:utf-8-*- \n# 作者: 51666 \n# 当前系统日期时间:2019/9/19,14:03\n\"\"\"\n定义函数,在控制台中获取年龄\n如果异常或者年龄超过范围(0-150)则重复获取\n直到正确为止\n\"\"\"\ndef get_age():\n while True:\n try:\n age = int(input(\"请输入:\"))\n if 0<=age<=150:\n return age\n else:\n print(\"buzaio\")\n except ValueError:\n print(\"shuru\")\nprint(get_age())\n\n\n\n\n","sub_path":"month01/code/day015/练习/3.异常获取年龄.py","file_name":"3.异常获取年龄.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"467659319","text":"import pandas as pd\nimport hdf\n\nallgene = pd.read_csv(r'C:\\Users\\evans\\Dropbox\\Shade\\raw\\allgene.csv', index_col='Accession')\nloc = pd.read_csv(r\"C:\\Users\\evans\\Dropbox\\Shade\\raw\\Profile_all2076_B_withlocation.csv\", index_col=0)\ntest = pd.read_csv(r\"C:\\Users\\evans\\Dropbox\\Shade\\heatmap\\ja.csv\")\nloc.columns\ntest\ntemp = allgene.loc[allgene['Name2'].isin(test['Name2'].tolist())]\ntemp\nloc.loc[temp.index, 'Phospho'] + loc.loc[temp.index, 'Location']\n","sub_path":"peptide_to_gene.py","file_name":"peptide_to_gene.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"540627247","text":"#!/usr/bin/python\nimport json\nimport urllib2\n\nconfiguration = json.loads(open('../configuration.json', 'r').read())\n\n\noutput = {}\n\nfor repo in configuration[\"repositories\"]:\n response = urllib2.urlopen('https://api.github.com/repos/' + repo + '/commits')\n output[repo] = json.loads(response.read())\n \nf = open('data.json', 'r+')\nf.write(json.dumps(output))\nf.close()\n","sub_path":"production/github-stream/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"557832720","text":"import time\nimport gpiod\n\nimport stainless.rpi_modules.motor_functions.consts as consts\n\nclass Motor():\n def __init__(self, pins):\n self._pins = pins\n self._current_step = 0\n\n def turn(self, angle):\n target_angle = angle\n next_step = -1 if target_angle > 0 else 1\n self._chip = gpiod.Chip(\"gpiochip0\")\n self._lines = self._chip.get_lines(self._pins)\n self._lines.request(consumer=\"gpiochip0\",\n type=gpiod.LINE_REQ_DIR_OUT\n )\n while target_angle > consts.HALFSTEP_ANGLE or target_angle < -consts.HALFSTEP_ANGLE:\n self._lines.set_values(consts.HALFSTEP_SEQUENCE[self._current_step])\n self._current_step += next_step\n if self._current_step > 7:\n self._current_step = 0\n elif self._current_step < 0:\n self._current_step = 7\n target_angle += next_step * consts.HALFSTEP_ANGLE\n time.sleep(0.001)\n self._chip.close()\n\n\nif __name__ == \"__main__\":\n m = Motor([2,3,4,17])\n m.turn(90)\n m.turn(-90)\n","sub_path":"rpi_modules/motor_functions/motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"201017426","text":"\"\"\" Implicit Alternating Least Squares \"\"\"\nimport logging\nimport time\n\nimport implicit.cuda\nimport numpy as np\nfrom tqdm.auto import tqdm\n\nfrom .recommender_base import MatrixFactorizationBase\nfrom collections import namedtuple\n\nlog = logging.getLogger(\"implicit\")\n\nMatrixGenerator = namedtuple(\"MatrixGenerator\", [\"user_items\", \"item_users\"])\n\n\nclass PartialAlternatingLeastSquares(MatrixFactorizationBase):\n \"\"\" Alternating Least Squares\n\n A Recommendation Model based off the algorithms described in the paper 'Collaborative\n Filtering for Implicit Feedback Datasets' with performance optimizations described in\n 'Applications of the Conjugate Gradient Method for Implicit Feedback Collaborative\n Filtering.'\n\n Parameters\n ----------\n factors : int, optional\n The number of latent factors to compute\n regularization : float, optional\n The regularization factor to use\n dtype : data-type, optional\n Specifies whether to generate 64 bit or 32 bit floating point factors\n iterations : int, optional\n The number of ALS iterations to use when fitting data\n calculate_training_loss : bool, optional\n Whether to log out the training loss at each iteration\n\n Attributes\n ----------\n item_factors : ndarray\n Array of latent factors for each item in the training set\n user_factors : ndarray\n Array of latent factors for each user in the training set\n \"\"\"\n\n def __init__(self, item_users_shape, factors=128, regularization=0.01, dtype=np.float32,\n iterations=15, calculate_training_loss=False):\n super(PartialAlternatingLeastSquares, self).__init__()\n\n # currently there are some issues when training on the GPU when some of the warps\n # don't have full factors. Round up to be warp aligned.\n # TODO: figure out where the issue is (best guess is in the\n # the 'dot' function in 'implicit/cuda/utils/cuh)\n if factors % 32:\n padding = 32 - factors % 32\n log.warning(\"GPU training requires factor size to be a multiple of 32.\"\n \" Increasing factors from %i to %i.\", factors, factors + padding)\n factors += padding\n\n # parameters on how to factorize\n self.item_users_shape = item_users_shape\n self.factors = factors\n self.regularization = regularization\n\n # options on how to fit the model\n self.dtype = dtype\n self.iterations = iterations\n self.calculate_training_loss = calculate_training_loss\n self.fit_callback = None\n self.cg_steps = 3\n\n # cache for item factors squared\n self._YtY = None\n self._init_fit()\n\n def _init_fit(self):\n items, users = self.item_users_shape\n\n self.user_factors = np.random.rand(users, self.factors).astype(self.dtype) * 0.01\n self.item_factors = np.random.rand(items, self.factors).astype(self.dtype) * 0.01\n\n # 2.5 GB x 2 (3M users * ~200 features * 4 bytes * [users and items])\n self.gpu_user_factors = implicit.cuda.CuDenseMatrix(self.user_factors.astype(np.float32))\n self.gpu_item_factors = implicit.cuda.CuDenseMatrix(self.item_factors.astype(np.float32))\n\n self.solver = implicit.cuda.CuPartialLeastSquaresSolver(self.factors)\n\n def fit_generators(self, matrix_generator, show_progress=True):\n \"\"\" Factorizes the item_users matrix.\n\n After calling this method, the members 'user_factors' and 'item_factors' will be\n initialized with a latent factor model of the input data.\n\n The item_users matrix does double duty here. It defines which items are liked by which\n users (P_iu in the original paper), as well as how much confidence we have that the user\n liked the item (C_iu).\n\n The negative items are implicitly defined: This code assumes that non-zero items in the\n item_users matrix means that the user liked the item. The negatives are left unset in this\n sparse matrix: the library will assume that means Piu = 0 and Ciu = 1 for all these items.\n\n Parameters\n ----------\n matrix_generator: csr_matrix\n Matrix of confidences for the liked items. This matrix should be a csr_matrix where\n the rows of the matrix are the item, the columns are the users that liked that item,\n and the value is the confidence that the user liked the item.\n show_progress : bool, optional\n Whether to show a progress bar during fitting\n \"\"\"\n X = self.gpu_user_factors\n Y = self.gpu_item_factors\n\n log.debug(\"Running %i ALS iterations\", self.iterations)\n with tqdm(total=self.iterations, disable=not show_progress) as progress:\n for iteration in range(self.iterations):\n iteration_data = next(matrix_generator)\n s = time.time()\n self.solver.least_squares_init(Y)\n for start_user, size, user_items in tqdm(iteration_data.user_items):\n Cui = implicit.cuda.CuCSRMatrix(user_items)\n self.solver.least_squares(start_user, size, Cui, X, Y, self.regularization, self.cg_steps)\n del Cui\n del user_items\n progress.update(.5)\n\n self.solver.least_squares_init(X)\n for start_item, size, item_users in tqdm(iteration_data.item_users):\n Ciu = implicit.cuda.CuCSRMatrix(item_users)\n self.solver.least_squares(start_item, size, Ciu, Y, X, self.regularization, self.cg_steps)\n del Ciu\n del item_users\n progress.update(.5)\n\n if self.fit_callback:\n self.fit_callback(iteration, time.time() - s)\n\n if self.calculate_training_loss:\n loss = self.solver.calculate_loss(Cui, X, Y, self.regularization)\n progress.set_postfix({\"loss\": loss})\n\n if self.calculate_training_loss:\n log.info(\"Final training loss %.4f\", loss)\n\n X.to_host(self.user_factors)\n Y.to_host(self.item_factors)\n\n def _fit_partial_step(self, user_items, X, Y):\n s = time.time()\n log.debug(\"Computing YtY\")\n self.solver.least_squares_init(Y, self.regularization)\n log.debug(\"YtY done in %03d s\" % (time.time() - s))\n\n s = time.time()\n start_user, size, user_items = user_items\n Cui = implicit.cuda.CuCSRMatrix(user_items)\n self.solver.least_squares(start_user, size, Cui, X, Y, self.cg_steps)\n del Cui\n log.debug(\"Computed step in %03d s\" % (time.time() - s))\n\n # noinspection PyPep8Naming\n def fit_partial(self, user_items, item_users):\n X = self.gpu_user_factors\n Y = self.gpu_item_factors\n\n self._fit_partial_step(user_items, X, Y)\n self._fit_partial_step(item_users, Y, X)\n\n if self.calculate_training_loss:\n Cui = implicit.cuda.CuCSRMatrix(user_items)\n loss = self.solver.calculate_loss(Cui, X, Y, self.regularization)\n del Cui\n log.info(\"Final training loss %.4f\", loss)\n\n X.to_host(self.user_factors)\n Y.to_host(self.item_factors)\n\n def _create_progress(self, reset=True, total=None):\n if not hasattr(self, \"progress\"):\n self.progress = tqdm(leave=True)\n\n if reset:\n self.progress.reset(total=total)\n\n # noinspection PyPep8Naming\n def fit_partial_users(self, user_items_generator, total=None):\n self._create_progress(total=total)\n\n X = self.gpu_user_factors\n Y = self.gpu_item_factors\n\n for user_items in user_items_generator:\n self._fit_partial_step(user_items, X, Y)\n del user_items\n self.progress.update()\n\n X.to_host(self.user_factors)\n Y.to_host(self.item_factors)\n\n # noinspection PyPep8Naming\n def fit_partial_items(self, item_users_generator, total=None):\n self._create_progress(total=total)\n\n X = self.gpu_user_factors\n Y = self.gpu_item_factors\n\n for item_users in item_users_generator:\n self._fit_partial_step(item_users, Y, X)\n del item_users\n self.progress.update()\n\n X.to_host(self.user_factors)\n Y.to_host(self.item_factors)\n\n def loss(self, user_items):\n self._create_progress(reset=False)\n\n X = self.gpu_user_factors\n Y = self.gpu_item_factors\n start_user, size, user_items = user_items\n Cui = implicit.cuda.CuCSRMatrix(user_items)\n loss = self.solver.calculate_loss(start_user, size, Cui, X, Y, self.regularization)\n del Cui\n self.progress.set_postfix({\"loss\": loss})\n return loss\n #log.info(\"Final training loss %.4f\", loss)\n","sub_path":"implicit/als_partial.py","file_name":"als_partial.py","file_ext":"py","file_size_in_byte":8884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"434137994","text":"import json\nimport uuid\nimport cv2\nimport os\n\nclass ConverToTextract:\n def __init__(self, file_name_dest, image_file, lines):\n self.file_dest=file_name_dest\n self.lines = lines\n \n \n image = cv2.imread(image_file)\n size = image.shape\n w = size[1] #宽度\n h = size[0] #高度\n #print('{} size = {}'.format(image_file, size))\n \n self.width=w\n self.height=h\n\n def __write_result(self,result):\n with open(self.file_dest, 'w', encoding='utf-8') as file:\n file.write(json.dumps(result))\n \n \n def convert(self):\n width=self.width\n height=self.height\n result = {\"DocumentMetadata\": {\"Pages\": 1}, \"JobStatus\": \"SUCCEEDED\"}\n block_page = {\"BlockType\": \"PAGE\",\n \"Geometry\": {\"BoundingBox\": {\"Width\": 1.0, \"Height\": 1.0, \"Left\": 0.0, \"Top\": 0.0},\n \"Polygon\": [{\"X\": 0.0, \"Y\": 0.0}, {\"X\": 1.0, \"Y\": 0.0}, {\"X\": 1.0, \"Y\": 1.0},\n {\"X\": 0.0, \"Y\": 1.0}]}, \"Id\": str(uuid.uuid4())}\n\n lines = self.lines\n\n ids = []\n result[\"Blocks\"] = [block_page]\n for line in lines:\n line = line.replace(\"\\n\", '')\n items = line.split(',')\n \n block_word = {\"BlockType\": \"WORD\"}\n block_word[\"Confidence\"] = float(items[8])\n block_word[\"Text\"] = ','.join(items[9:])\n BoundingBox = {\"Width\": float(int(items[2]) - int(items[0])) / width, \n \"Height\": float(int(items[7]) - int(items[1])),\n \"Left\": float(items[0]) / width, \n \"Top\": float(items[1]) / height}\n \n Polygon_0 = {\"X\": float(items[0]) / width, \"Y\": float(items[1]) / height}\n Polygon_1 = {\"X\": float(items[2]) / width, \"Y\": float(items[3]) / height}\n Polygon_2 = {\"X\": float(items[4]) / width, \"Y\": float(items[5]) / height}\n Polygon_3 = {\"X\": float(items[6]) / width, \"Y\": float(items[7]) / height}\n \n Polygon = [Polygon_0, Polygon_1, Polygon_2, Polygon_3]\n block_word[\"Geometry\"] = {\"BoundingBox\": BoundingBox, \"Polygon\": Polygon}\n block_word_id = str(uuid.uuid4())\n block_word[\"Id\"] = block_word_id\n block_word[\"Page\"] = 1\n ids.append(block_word_id)\n result[\"Blocks\"].append(block_word)\n\n block_page[\"Relationships\"] = [{\"Type\": \"CHILD\", \"Ids\": ids}]\n block_page[\"Page\"] = 1\n self.__write_result(result)\n\n\nif __name__ == \"__main__\":\n #convert=OCRConver('/home/ec2-user/tfc/031_ocr/ocr-craft-cn-pytorch/temp/output/demo001.txt','temp.json', '/home/ec2-user/tfc/031_ocr/ocr-craft-cn-pytorch/temp/output/demo001.jpg' )\n #convert.convert()\n print(\"main\")","sub_path":"local/recognition/textract.py","file_name":"textract.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"278472532","text":"from django.shortcuts import render\n\nfrom qna.models import Answer\n\nfrom .models import SearchQuery\n\n# Create your views here.\ndef search_view(request):\n\tquery=request.GET.get('q', None)\n\tuser= None\n\tif request.user.is_authenticated:\n\t\tuser= request.user\n\tcontext= {\"query\": query}\n\tif query is not None:\t\n\t\tSearchQuery.objects.create(user=user, query=query)\n\t\tanswer_list= Answer.objects.search(query=query)\t\n\t\tcontext['answer_list'] = answer_list\n\treturn render(request,'searches/search.html', context)","sub_path":"searches/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"464115863","text":"from tkinter import *\nimport sqlite3\n\n\n# def buttom_click():\ndef myClick():\n playLabel = Label(root, text=\"CDN :)\")\n playLabel.grid(row=4, column=2)\n\n\n# def show():\n# \tmyLabel = Label(root, text=var.get()).pack()\n\n\nroot = Tk()\nroot.geometry(\"500x200\")\nroot.title(\"BlackJack\")\nroot.config(bg=\"sea green\")\n\nblackjack = Label(root, text=\"BLACKJACK\", font=('goudy stout', 20), bg=\"sea green\", fg=\"goldenrod3\")\nblackjack.grid(row=0, column=1)\n\nname = Label(root, text=\"Your name:\", font=('cambria', 13), bg=\"sea green\", fg=\"white\")\nname.grid(row=1, column=0)\n\npack = Label(root, text=\"Pack number:\", font=('cambria', 13), bg=\"sea green\", fg=\"white\")\npack.grid(row=2, column=0)\n\ne_name = Entry(root, borderwidth=3, width=40, justify=CENTER)\ne_name.grid(row=1, column=1, padx=20, pady=(10, 0))\n\ne_pack = Entry(root, borderwidth=3, width=40, justify=CENTER)\ne_pack.grid(row=2, column=1, padx=20, pady=(10, 0))\n\nplay = Button(root, text=\"Play\", command=myClick, borderwidth=5, bg=\"goldenrod3\", fg=\"white\")\nplay.grid(row=3, column=1, padx=20, pady=10)\nplay.config(width=10, height=1)\n\nroot.mainloop()\n","sub_path":"GUI_start_game.py","file_name":"GUI_start_game.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"363252502","text":"from .model import Model\nfrom configparser import ConfigParser\nimport os\n\ndef get_data_model():\n dir_path = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(dir_path, \"..\", \"..\", \"..\", \"configuration.ini\")\n\n parser = ConfigParser()\n parser.read(config_path)\n\n host = parser.get('elasticsearch', 'host')\n port = parser.get('elasticsearch', 'port')\n data_model = Model(host, port)\n\n return data_model","sub_path":"src/isecurity_webserver/data_model/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"434737694","text":"\"\"\"\nDescription: Replace words found in a value with other words\n\"\"\"\nfrom mdatapipe.core import PipelinePlugin\nimport re\n\n\nclass Plugin(PipelinePlugin):\n\n supported_types = [dict]\n\n def on_start(self):\n # Ccompile regex on plugin start\n self.runtime_config = {}\n for field_name, regex in self.config.items():\n self.runtime_config[field_name] = re.compile(regex)\n\n def on_input(self, item):\n for field_name, regex in self.runtime_config.items():\n item[field_name] = regex.findall(item[field_name])[0]\n self.put(item)\n","sub_path":"mdatapipe-0.1/mdatapipe/plugins/transform/field/regex_group_replace.py","file_name":"regex_group_replace.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"420923625","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport requests\nimport json\nimport re\nimport os\nimport logging\nfrom datetime import datetime\nfrom os.path import join, abspath, dirname\n\nfrom db import db, User\n\n# 打卡任务\n# 登陆, 返回cookie\n# 获取文章列表\n# 打开并完成两篇文章\n# 打卡签到, 返回结果\n\nlogging.basicConfig(level=logging.INFO,\n format='%(levelname)s %(message)s',\n filename=abspath(join(dirname(__file__), 'log.log')),\n filemode='a')\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n\n# TODO: 定时任务, 1: 零点修改全部的打卡状态 2: 执行打卡任务\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',\n 'Origin':'http://www.shanbay.com',\n 'Content-Type':'application/x-www-form-urlencoded',\n 'Connection':'keep-alive',\n 'Cache-Control':'no-cache'\n }\n\n\n# 返回身份验证的cookies\ndef login(uname, pwd):\n r = requests.get('http://www.shanbay.com/accounts/login/')\n cookie = dict(csrftoken=r.cookies['csrftoken'])\n data = {'username': uname,'password':pwd,'csrfmiddlewaretoken': r.cookies['csrftoken']}\n p = requests.post('http://www.shanbay.com/accounts/login/',\n data = data,\n cookies = cookie,\n headers = headers,\n allow_redirects=False\n )\n is_ok = True if p.status_code == 302 else False\n return is_ok, p.cookies\n\n\n# get list of news\ndef get_list():\n r = requests.get('http://www.shanbay.com/read/news/')\n pattern = re.compile('')\n return pattern.findall(r.text)\n\n\n# 阅读文章\ndef read(nid, cookie):\n url = 'http://www.shanbay.com/api/v1/read/article/%s' % nid\n r = requests.get(url, cookies=cookie, headers=headers)\n s = json.loads(r.text)\n if 0 == s['status_code']:\n url = 'http://www.shanbay.com/api/v1/read/article/user/%s/' % nid\n data = {'used_time': 300, 'operation': 'finish'}\n r = requests.put(url, cookies=cookie, data=data, headers=headers)\n s = json.loads(r.text)\n if (s['status_code'] == 0) or (s['status_code'] == 1):\n return True\n else:\n logging.warn('打开文章失败')\n logging.warn(r.text)\n return False\n else:\n logging.warn('完成阅读失败')\n logging.warn(r.text)\n return False\n\n\n# 打卡\ndef finish(cookie):\n url = 'http://www.shanbay.com/api/v1/checkin/?for_web=true'\n data = {'for_web': 'true'}\n r = requests.post(url, cookies=cookie, data=data, headers=headers)\n s = json.loads(r.text)\n if s['status_code'] == 0 or s['status_code'] == 1:\n return True\n else:\n logging.warn('打开失败')\n logging.warn(r.text)\n return False\n\n\n# 流程\ndef task(user):\n newses = get_list()\n is_login, ck = login(user['username'], user['password'])\n if not is_login:\n logging.warn('登陆出错')\n return {'msg': 'login failed', 'status': 1}\n read(newses[0], ck)\n read(newses[1], ck)\n if finish(ck):\n return {'msg': 'success', 'status': 0, 'uid': user['id']}\n else:\n return {'msg': 'failed', 'status': 1}\n\n\n# 保存到数据库\ndef persistent():\n logging.info('####################################')\n logging.info('start time: {0}'.format(datetime.now()))\n users = db.query(User).all()\n for user in users:\n u = {'id': user.id, 'username': user.username, 'password': user.password}\n r = task(u)\n\n logging.info(r)\n\n if r['status'] == 0:\n user.punch = True\n db.commit()\n logging.info('end time: {0}'.format(datetime.now()))\n logging.info('####################################\\n\\n')\n\n\nif __name__ == '__main__':\n persistent()\n\n\n\n\n\n\n","sub_path":"shanbay.py","file_name":"shanbay.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"592098253","text":"\nimport sys\n\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport xml.etree.cElementTree as cET\nimport msgpack\n\ndef is_int(type_):\n return type_ in ['int(11)', 'bigint(20)']\n\ndef is_float(type_):\n return type_ == 'double'\n\ndef default_value(type_):\n if is_int(type_) or is_float(type_):\n return 0\n elif type_ == 'datetime':\n return '1970-01-01 00:00:00'\n else:\n return ''\n\nif len(sys.argv) < 2:\n print('Usage: python3 ./MasterDataTool.py develop')\n sys.exit()\n\nPHASE = sys.argv[1]\n\nPATH = './' + PHASE\nFILES = [f for f in listdir(PATH) if isfile(join(PATH, f))]\n\nNS = '{urn:schemas-microsoft-com:office:spreadsheet}'\nNS_WORKSHEET = NS + 'Worksheet'\nNS_NAME = NS + 'Name'\nNS_TABLE = NS + 'Table'\nNS_ROW = NS + 'Row'\nNS_CELL = NS + 'Cell'\nNS_INDEX = NS + 'Index'\nNS_DATA = NS + 'Data'\n\nfor file_ in FILES:\n column_visibilities = []\n column_names = []\n column_types = []\n\n tree = cET.parse(join(PATH, file_))\n root = tree.getroot()\n\n for worksheet in root.iter(tag=NS_WORKSHEET):\n name = worksheet.attrib[NS_NAME]\n print(file_ + ' : ' + name)\n\n mst_dict = {}\n column_visibilities.clear()\n column_names.clear()\n column_types.clear()\n\n row_index = 0\n column_idx = 0\n column_len = 0\n\n table = worksheet.find(NS_TABLE)\n for row in table.iter(tag=NS_ROW):\n\n if row_index == 0: # colum_visibility\n for cell in row.iter(tag=NS_CELL):\n data = cell.find(NS_DATA)\n if data is not None and data.text is not None:\n if data.text == 'END_OF_COLUMNS':\n break\n column_visibilities.append(data.text)\n\n column_len = len(column_visibilities)\n\n elif row_index == 1: # colum_name\n column_idx = 0\n for cell in row.iter(tag=NS_CELL):\n if column_idx == column_len:\n break\n\n data = cell.find(NS_DATA)\n if data is not None:\n if data.text is not None:\n column_names.append(data.text)\n else:\n column_names.append(''.join([c.text for c in data.getchildren()]))\n\n column_idx += 1\n\n if len(column_visibilities) != len(column_names):\n print('error : column_visibility_count and column_name_count mismatch')\n sys.exit()\n\n elif row_index == 2: # colum_type\n column_idx = 0\n for cell in row.iter(tag=NS_CELL):\n if column_idx == column_len:\n break\n\n data = cell.find(NS_DATA)\n if data is not None and data.text is not None:\n column_types.append(data.text)\n\n column_idx += 1\n\n if len(column_visibilities) != len(column_types):\n print('error : column_visibility_count and column_type_count mismatch')\n sys.exit()\n\n elif row_index == 3: # colum_description\n row_index += 1\n continue\n\n else: # data\n column_idx = 0\n data_mid = None\n for cell in row.iter(tag=NS_CELL):\n if data_mid is not None and NS_INDEX in cell.attrib:\n cell_attrib_index = int(cell.attrib[NS_INDEX]) - 1\n while column_idx < cell_attrib_index and column_idx < column_len:\n mst_dict[data_mid][column_names[column_idx]] = \\\n default_value(column_types[column_idx])\n\n column_idx += 1\n\n if column_idx == column_len:\n break\n\n data = cell.find(NS_DATA)\n if data is not None:\n data_text = None\n\n if data.text is not None:\n data_text = data.text\n else:\n data_text = ''.join([c.text for c in data.getchildren()])\n\n if data_text == 'END_OF_DATA':\n break\n\n visible = column_visibilities[column_idx] != 'unlisted'\n\n if column_idx == 0:\n data_mid = data_text\n mst_dict[data_mid] = {}\n\n elif visible and 'list' in column_types[column_idx]:\n mst_dict[data_mid][column_names[column_idx]] = data_text.split(';')\n\n elif visible:\n if is_int(column_types[column_idx]):\n mst_dict[data_mid][column_names[column_idx]] = int(data_text)\n\n elif is_float(column_types[column_idx]):\n mst_dict[data_mid][column_names[column_idx]] = float(data_text)\n\n else:\n mst_dict[data_mid][column_names[column_idx]] = data_text\n\n column_idx += 1\n\n row_index += 1\n\n with open('./' + PHASE + '_msgpacked/' + name + '.msgpacked', 'wb') as mst_file:\n mst_file.write(msgpack.packb(mst_dict))\n\n","sub_path":"mst/MasterDataTool.py","file_name":"MasterDataTool.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"187237554","text":"# ex:ts=4:sw=4:sts=4:et\n# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-\nfrom __future__ import absolute_import\nimport re\nfrom svtplay_dl.service.svtplay import Svtplay\nfrom svtplay_dl.log import log\n\n\nclass OppetArkiv(Svtplay):\n supported_domains = ['oppetarkiv.se']\n\n def find_all_episodes(self, options):\n page = 1\n data = self.get_urldata()\n match = re.search(r'\"/etikett/titel/([^\"/]+)', data)\n if match is None:\n match = re.search(r'\"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url)\n if match is None:\n log.error(\"Couldn't find title\")\n return\n program = match.group(1)\n episodes = []\n\n n = 0\n if self.options.all_last > 0:\n sort = \"tid_fallande\"\n else:\n sort = \"tid_stigande\"\n\n while True:\n url = \"http://www.oppetarkiv.se/etikett/titel/%s/?sida=%s&sort=%s&embed=true\" % (program, page, sort)\n data = self.http.request(\"get\", url)\n if data.status_code == 404:\n break\n\n data = data.text\n regex = re.compile(r'href=\"(/video/[^\"]+)\"')\n for match in regex.finditer(data):\n if n == self.options.all_last:\n break\n episodes.append(\"http://www.oppetarkiv.se%s\" % match.group(1))\n n += 1\n page += 1\n\n return episodes\n","sub_path":"lib/svtplay_dl/service/oppetarkiv.py","file_name":"oppetarkiv.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"398407014","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 9 23:16:59 2016\n\n@author: rmuehleisen\nbased on example of using lxml at\nhttp://www.blog.pythonlibrary.org/2010/11/20/python-parsing-xml-with-lxml/\n\"\"\"\n\nfrom lxml import etree \n#from StringIO import StringIO\n\n\n\n\ntree = etree.parse(\"subset.xml\") \nprint(tree.docinfo.doctype)\ncontext = etree.iterparse(\"subset.xml\")\n\nrecord_dict = {}\nrecords = []\nfor action, elem in context:\n if not elem.text:\n text = \"None\"\n elif elem.text[0] == \"\\n\":\n text=\"None\"\n \n else:\n text = elem.text\n print(elem.tag + \" => \" + text)\n record_dict[elem.tag] = text\n if elem.tag == \"record\":\n \n records.append(record_dict)\n record_dict = {}\n","sub_path":"xml parsing/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"571219697","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Lex\"\n# Date: 2017/11/23\n\nfor i in range(10):\n x=55\n\nprint(x) #55\n\n\nif True:\n x = 30\n\nprint(x) #30\n","sub_path":"Day31/名称空间补充.py","file_name":"名称空间补充.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"21045745","text":"import unittest\nfrom datetime import datetime\n\nimport mocker\nimport enum\n\nfrom balaio.models import (\n Point,\n Checkpoint,\n Status,\n Notice,\n Attempt,\n ArticlePkg,\n)\nfrom . import doubles\n\n\nclass CheckpointTests(unittest.TestCase):\n\n def test_type_must_be_known(self):\n chk_point = Checkpoint(Point.checkin)\n self.assertIsInstance(chk_point, Checkpoint)\n\n def test_unknown_type_raises_ValueError(self):\n class Color(enum.Enum):\n red = 1\n\n self.assertRaises(ValueError, lambda: Checkpoint(Color.red))\n\n def test_non_enum_type_raises_ValueError(self):\n self.assertRaises(ValueError, lambda: Checkpoint('foo'))\n\n def test_neutral_initial_state(self):\n chk_point = Checkpoint(Point.checkin)\n self.assertEqual(chk_point.started_at, None)\n self.assertEqual(chk_point.ended_at, None)\n\n def test_started_at_is_filled_on_start(self):\n chk_point = Checkpoint(Point.checkin)\n chk_point.start()\n self.assertIsInstance(chk_point.started_at, datetime)\n\n def test_start_is_idempotent(self):\n chk_point = Checkpoint(Point.checkin)\n chk_point.start()\n date1 = chk_point.started_at\n chk_point.start()\n date2 = chk_point.started_at\n\n self.assertEqual(date1, date2)\n\n def test_ended_at_is_filled_on_end(self):\n chk_point = Checkpoint(Point.checkin)\n chk_point.start()\n chk_point.end()\n\n self.assertIsInstance(chk_point.ended_at, datetime)\n\n def test_end_is_idempotent(self):\n chk_point = Checkpoint(Point.checkin)\n chk_point.start()\n chk_point.end()\n date1 = chk_point.ended_at\n chk_point.end()\n date2 = chk_point.ended_at\n\n self.assertEqual(date1, date2)\n\n def test_end_before_start_raises_RuntimeError(self):\n chk_point = Checkpoint(Point.checkin)\n self.assertRaises(RuntimeError, lambda: chk_point.end())\n\n def test_is_active_returns_False_on_initial_state(self):\n chk_point = Checkpoint(Point.checkin)\n self.assertEqual(chk_point.is_active, False)\n\n def test_is_active_returns_True_after_start(self):\n chk_point = Checkpoint(Point.checkin)\n chk_point.start()\n self.assertEqual(chk_point.is_active, True)\n\n def test_is_active_returns_False_after_end(self):\n chk_point = Checkpoint(Point.checkin)\n chk_point.start()\n chk_point.end()\n self.assertEqual(chk_point.is_active, False)\n\n def test_tell_store_messages(self):\n chk_point = Checkpoint(Point.checkin)\n chk_point.start()\n chk_point.tell('Foo', Status.ok)\n self.assertEqual(chk_point.messages[0].label, None)\n self.assertEqual(chk_point.messages[0].message, 'Foo')\n self.assertEqual(chk_point.messages[0].status, Status.ok)\n\n def test_tell_store_messages_based_on_labels(self):\n chk_point = Checkpoint(Point.checkin)\n chk_point.start()\n chk_point.tell('Foo', Status.ok, label='zip')\n self.assertEqual(chk_point.messages[0].label, 'zip')\n self.assertEqual(chk_point.messages[0].message, 'Foo')\n self.assertEqual(chk_point.messages[0].status, Status.ok)\n\n def test_tell_raises_RuntimeError_on_inactive_objects(self):\n chk_point = Checkpoint(Point.checkin)\n chk_point.start()\n chk_point.end()\n self.assertRaises(RuntimeError, lambda: chk_point.tell('Foo', Status.ok, label='zip'))\n\n\nclass NoticeTests(unittest.TestCase):\n\n def test_status_set_enum_values(self):\n ntc = Notice()\n ntc.status = Status.ok\n self.assertEqual(ntc.status, Status.ok)\n\n\nclass PointTests(unittest.TestCase):\n\n def test_required_enums(self):\n names = [pt.name for pt in Point]\n self.assertIn('checkin', names)\n self.assertIn('validation', names)\n self.assertIn('checkout', names)\n\n\nclass StatusTests(unittest.TestCase):\n\n def test_required_enums(self):\n names = [st.name for st in Status]\n self.assertIn('ok', names)\n self.assertIn('warning', names)\n self.assertIn('error', names)\n\n\nclass AttemptTests(mocker.MockerTestCase):\n\n def test_get_from_package(self):\n mock_session = self.mocker.mock()\n self.mocker.replay()\n pkg_analyzer = doubles.PackageAnalyzerStub()\n pkg_analyzer.is_valid_meta = lambda *args, **kwargs: True\n\n attempt = Attempt.get_from_package(pkg_analyzer)\n self.assertIsInstance(attempt, Attempt)\n\n def test_get_from_package_not_valid_for_missing_meta(self):\n mock_session = self.mocker.mock()\n self.mocker.replay()\n pkg_analyzer = doubles.PackageAnalyzerStub()\n pkg_analyzer.meta = {'journal_eissn': None, 'journal_pissn': None,\n 'article_title': None}\n pkg_analyzer.is_valid_meta = lambda *args, **kwargs: False\n\n attempt = Attempt.get_from_package(pkg_analyzer)\n self.assertFalse(attempt.is_valid)\n\n def test_get_from_package_not_valid_if_invalid(self):\n mock_session = self.mocker.mock()\n self.mocker.replay()\n pkg_analyzer = doubles.PackageAnalyzerStub()\n pkg_analyzer.meta = {'journal_eissn': '1234-1234', 'journal_pissn': '4321-1234'}\n pkg_analyzer.is_valid_meta = lambda *args, **kwargs: True\n pkg_analyzer.is_valid_package = lambda *args, **kwargs: False\n\n attempt = Attempt.get_from_package(pkg_analyzer)\n self.assertFalse(attempt.is_valid)\n\n\nclass ArticlePkgTests(mocker.MockerTestCase):\n\n def test_get_or_create_from_package(self):\n mock_session = self.mocker.mock()\n\n pkg_analyzer = doubles.PackageAnalyzerStub()\n pkg_analyzer.criteria = {'article_title': 'foo', 'journal_eissn':'1234-1234', 'journal_pissn':'1234-4321'}\n\n mock_session.query(ArticlePkg)\n self.mocker.result(mock_session)\n\n mock_session.filter_by(article_title='foo')\n self.mocker.result(mock_session)\n\n mock_session.one()\n self.mocker.result(ArticlePkg())\n\n self.mocker.replay()\n\n article_pkg = ArticlePkg.get_or_create_from_package(pkg_analyzer, mock_session)\n\n self.assertIsInstance(article_pkg, ArticlePkg)\n\n","sub_path":"balaio/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":6229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"349708285","text":"import subprocess\nfrom shutil import copyfile\nimport re\nimport sys\nimport time\nimport socket\nimport datetime\n\nneed_to_add_0 = True\n\ndef printMessage(s):\n print(\"--------------------------------------------------------\")\n print(\" \", s)\n print(\"--------------------------------------------------------\")\n\n\ndef addToTopFile(line):\n with open(\"ioTopData.txt\", \"a\") as myfile:\n myfile.write(line)\n\ndef skipLinesUntilToken(process, token):\n i = 0\n for line in process.stdout:\n s = line.decode('utf-8')\n i += 1\n if s.find(token) != -1:\n break;\n\ndef extract_line(s):\n s = re.sub('\\x1b[^m]*m', '', s)\n s = re.sub(r'\\x1b\\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?', '', s)\n sep = re.compile('[\\s]+')\n #addToTopFile(s)\n s = sep.split(s)\n del s[-1]\n return s\n\ndef processTopLine(s):\n global need_to_add_0\n line = extract_line(s)\n # print(len(line), line)\n\n # deleting empty cells in the beginning\n while line and not line[0]:\n line = line[1:]\n\n if not line:\n return True\n if line[3].strip() == \"0.00\" and line[5].strip() == \"0.00\":\n if not need_to_add_0:\n return False\n need_to_add_0 = False\n else:\n need_to_add_0 = True\n\n line.pop(8)\n line.pop(10)\n command = \" \".join(line[9:])\n line = line[:9]\n line.append(command)\n line = '|'.join(line)\n currtime = (datetime.datetime.now()).strftime(\"%Y-%m-%d %H:%M:%S.%f\")[:-3]\n line = socket.gethostname() + '|' + currtime + '|' + line\n print(line)\n #exit()\n addToTopFile(line + \"\\n\")\n return True\n\n\ndef GetIoTopStat():\n try:\n while True:\n command = ['iotop', '-b', '-n1']\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n skipLinesUntilToken(process,\"TID\")\n while True:\n s = process.stdout.readline().decode('utf-8')\n if not processTopLine(s):\n break\n time.sleep(.4)\n process.terminate()\n except KeyboardInterrupt:\n pass\n return\n\nprintMessage(\"Calling GetIoTopStat()\")\nGetIoTopStat()\n\n","sub_path":"ParseIoTop.py","file_name":"ParseIoTop.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"427789608","text":"'''\nAutor: Marcos Felipe da Silva\nDescricao: Testes na rota /vendedor_x_lentes\n'''\n\nimport unittest, json\nfrom requests import Session\n\nclass TestVendedorXLentes(unittest.TestCase):\n def __init__(self, *args, **kargs):\n super(TestVendedorXLentes, self).__init__(*args, **kargs)\n self._c = Session()\n self._host = 'http://localhost:8080/POA'\n self._url = '/vendedor_x_lentes'\n self._chave = '657qJt1qPQWGCUuTkX8DZ5zzKuG3'\n \n def setUp(self):\n self._c.get(self._host+'/validar_autenticacao/'+self._chave)\n \n def tearDown(self):\n self._c.get(self._host+'/logout')\n \n def test_a_vendedor_x_lentes_sem_cp_dados(self):\n ''' Tenta o obter os dados mas é esperado um erro '''\n dados = {'outros': json.dumps({\n 'de': '2019-11-01','ate': '2019-11-28', 'lojas': '1'\n })\n }\n dados = self._c.post(self._host+self._url, data = dados).json()\n self.assertIn('erro', dados.keys())\n def test_b_vendedor_x_lentes_cp_dados_nao_json(self):\n dados = {'dados': {\n 'de': '2019-11-01','ate': '2019-11-28', 'lojas': '1'\n }\n }\n dados = self._c.post(self._host+self._url, data = dados).json()\n print(dados)\n self.assertIn('erro', dados.keys())\n def test_c_vendedor_x_lentes_cp_dados_nao_atr_de(self):\n dados = {'dados': json.dumps({\n 'ate': '2019-11-28', 'lojas': '1'\n })\n }\n dados = self._c.post(self._host+self._url, data = dados).json()\n self.assertIn('erro', dados.keys())\n def test_d_vendedor_x_lentes_cp_dados_nao_atr_ate(self):\n dados = {'dados': json.dumps({\n 'de': '2019-11-01', 'lojas': '1'\n })\n }\n dados = self._c.post(self._host+self._url, data = dados).json()\n self.assertIn('erro', dados.keys())\n \n def test_f_vendedor_x_lentes_cp_dados_nao_atr_lojas(self):\n dados = {'dados': json.dumps({\n 'de': '2019-11-01','ate': '2019-11-28', \n })\n }\n dados = self._c.post(self._host+self._url, data = dados).json()\n self.assertIn('erro', dados.keys())\n \n def test_g_vendedor_x_lentes_cp_dados_atr_de_maior_que_ate(self):\n dados = {'dados': json.dumps({\n 'de': '2019-12-01','ate': '2019-11-28', 'lojas': '1'\n })\n }\n dados = self._c.post(self._host+self._url, data = dados).json()\n self.assertIn('erro', dados.keys())\n \n def test_h_vendedor_x_lentes_correto(self):\n dados = {'dados': json.dumps({\n 'de': '2019-11-01','ate': '2019-11-28', 'lojas': '0101'\n })\n }\n dados = self._c.post(self._host+self._url, data = dados).json()\n self.assertIn('cabe', dados.keys())\n self.assertIn('corpo', dados.keys())\n print(dados)\n\n\nif __name__ == '__main__': unittest.main()\n\n ","sub_path":"__testes__/lentes/test_vendedor_x_lentes.py","file_name":"test_vendedor_x_lentes.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"100820835","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n Send message to Slack from command line\n\"\"\"\n\nfrom slacker import Slacker\nimport argparse\nfrom rcfile import rcfile\nimport sys\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--channel\", help=\"Slack channel\")\n parser.add_argument(\"-t\", \"--token\", help=\"Slack token\")\n\n args = parser.parse_args()\n rcargs = rcfile( \"slacker\", args.__dict__ )\n\n token = args.token or rcargs['token']\n channel = args.channel or rcargs['channel']\n\n\n if not token or not channel:\n exit(1)\n\n channel = '#{}'.format(channel)\n message = sys.stdin\n\n slack = Slacker(token)\n\n slack.chat.post_message(channel, message)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"slacker_cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"328532684","text":"#!/usr/bin/env python3\n\"\"\"Script to forecast data using RNN AI using GRU feedback.\"\"\"\n\n# Standard imports\nfrom __future__ import print_function\nimport time\nimport os\nimport sys\nfrom copy import deepcopy\nfrom pprint import pprint\nimport gc\n\n# PIP3 imports.\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_absolute_error\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom hyperopt import STATUS_OK\nfrom statsmodels.tsa.stattools import adfuller\n\n# TensorFlow imports\nimport tensorflow as tf\n\n# Keras imports\nfrom keras.models import Sequential, model_from_yaml\nfrom keras.layers import Dense, GRU\nfrom keras.optimizers import RMSprop\nfrom keras.initializers import RandomUniform\nfrom keras.callbacks import (\n EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau)\nfrom keras.utils import multi_gpu_model\nfrom keras import Model\n\n\n# Merlin imports\nfrom forecast import general\nfrom forecast import memory\n\n\nclass RNNGRU(object):\n \"\"\"Process data for ingestion.\n\n Roughly based on:\n\n https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/23_Time-Series-Prediction.ipynb\n\n \"\"\"\n\n def __init__(\n self, _data, batch_size=64, epochs=20,\n sequence_length=20, warmup_steps=50, dropout=0,\n layers=1, patience=10, units=256, display=False, binary=False,\n multigpu=False):\n \"\"\"Instantiate the class.\n\n Args:\n data: Tuple of (x_data, y_data, target_names)\n batch_size: Size of batch\n sequence_length: Length of vectors for for each target\n warmup_steps:\n display: Show charts of results if True\n binary: Process data for predicting boolean up / down movement vs\n actual values if True\n Returns:\n None\n\n \"\"\"\n # Initialize key variables\n self._warmup_steps = warmup_steps\n self._binary = binary\n self._display = display\n self._data = _data\n\n # Setup memory\n gpus = memory.setup()\n\n # Setup GPUs\n if multigpu is True:\n self._gpus = gpus\n else:\n self._gpus = 1\n\n # Set key file locations\n path_prefix = '/tmp/keras-{}'.format(int(time.time()))\n self._path_checkpoint = '{}.checkpoint.h5'.format(path_prefix)\n self._path_model_weights = '{}.weights.h5'.format(path_prefix)\n self._path_model_parameters = '{}.model.yaml'.format(path_prefix)\n\n # Initialize parameters\n self.hyperparameters = {\n 'units': abs(units),\n 'dropout': abs(dropout),\n 'layers': int(abs(layers)),\n 'sequence_length': abs(sequence_length),\n 'patience': abs(patience),\n 'batch_size': int(batch_size * self._gpus),\n 'epochs': abs(epochs)\n }\n\n # Delete any stale checkpoint file\n if os.path.exists(self._path_checkpoint) is True:\n os.remove(self._path_checkpoint)\n\n # ###################################\n # # TensorFlow wizardry\n # config = tf.ConfigProto()\n #\n # # Don't pre-allocate memory; allocate as-needed\n # config.gpu_options.allow_growth = True\n #\n # # Only allow a total of half the GPU memory to be allocated\n # config.gpu_options.per_process_gpu_memory_fraction = 0.8\n #\n # # Crash with DeadlineExceeded instead of hanging forever when your\n # # queues get full/empty\n # config.operation_timeout_in_ms = 60000\n #\n # # Create a session with the above options specified.\n # backend.tensorflow_backend.set_session(tf.Session(config=config))\n # ###################################\n\n # Get data\n self._y_current = self._data.values()\n\n # Create test and training arrays for VALIDATION and EVALUATION\n (x_train,\n _x_test,\n self._y_train,\n self._y_test) = self._data.train_test_split()\n\n print(pd.DataFrame(x_train).isnull().any())\n print(pd.DataFrame(_x_test).isnull().any())\n sys.exit(0)\n\n (self.training_rows, self._training_vector_count) = x_train.shape\n (self.test_rows, _) = _x_test.shape\n (_, self._training_class_count) = self._y_train.shape\n\n '''\n The neural network works best on values roughly between -1 and 1, so we\n need to scale the data before it is being input to the neural network.\n We can use scikit-learn for this.\n\n We first create a scaler-object for the input-signals.\n\n Then we detect the range of values from the training-data and scale\n the training-data.\n\n From StackOverflow:\n\n To center the data (make it have zero mean and unit standard error),\n you subtract the mean and then divide the result by the standard\n deviation.\n\n x'=x−μσ\n\n You do that on the training set of data. But then you have to apply the\n same transformation to your testing set (e.g. in cross-validation), or\n to newly obtained examples before forecast. But you have to use the\n same two parameters μ and σ (values) that you used for centering the\n training set.\n\n Hence, every sklearn's transform's fit() just calculates the parameters\n (e.g. μ and σ in case of StandardScaler) and saves them as an internal\n objects state. Afterwards, you can call its transform() method to apply\n the transformation to a particular set of examples.\n\n fit_transform() joins these two steps and is used for the initial\n fitting of parameters on the training set x, but it also returns a\n transformed x'. Internally, it just calls first fit() and then\n transform() on the same data.\n '''\n\n self._x_scaler = MinMaxScaler()\n self._x_train_scaled = self._x_scaler.fit_transform(x_train)\n self._x_test_scaled = self._x_scaler.transform(_x_test)\n\n '''\n The target-data comes from the same data-set as the input-signals,\n because it is the weather-data for one of the cities that is merely\n time-shifted. But the target-data could be from a different source with\n different value-ranges, so we create a separate scaler-object for the\n target-data.\n '''\n\n self._y_scaler = MinMaxScaler()\n self._y_train_scaled = self._y_scaler.fit_transform(self._y_train)\n self._y_test_scaled = self._y_scaler.transform(self._y_test)\n\n # Print stuff\n print('\\n> Numpy Data Type: {}'.format(type(x_train)))\n print(\"> Numpy Data Shape: {}\".format(x_train.shape))\n print(\"> Numpy Data Row[0]: {}\".format(x_train[0]))\n print(\"> Numpy Data Row[Last]: {}\".format(x_train[-1]))\n print('> Numpy Targets Type: {}'.format(type(self._y_train)))\n print(\"> Numpy Vector Feature Type: {}\".format(type(x_train[0][0])))\n print(\"> Numpy Targets Shape: {}\".format(self._y_train.shape))\n\n print('> Number of Samples: {}'.format(self._y_current.shape[0]))\n print('> Number of Training Samples: {}'.format(x_train.shape[0]))\n print('> Number of Training Classes: {}'.format(\n self._training_class_count))\n print('> Number of Test Samples: {}'.format(self.test_rows))\n print(\"> Training Minimum Value:\", np.min(x_train))\n print(\"> Training Maximum Value:\", np.max(x_train))\n print('> Number X signals: {}'.format(self._training_vector_count))\n print('> Number Y signals: {}'.format(self._training_class_count))\n\n # Print epoch related data\n print('> Epochs:', self.hyperparameters['epochs'])\n print('> Batch Size:', self.hyperparameters['batch_size'])\n\n # Display estimated memory footprint of training data.\n print(\"> Data size: {:.2f} Bytes\".format(x_train.nbytes))\n\n print('> Scaled Training Minimum Value: {}'.format(\n np.min(self._x_train_scaled)))\n print('> Scaled Training Maximum Value: {}'.format(\n np.max(self._x_train_scaled)))\n\n '''\n The data-set has now been prepared as 2-dimensional numpy arrays. The\n training-data has almost 300k observations, consisting of 20\n input-signals and 3 output-signals.\n\n These are the array-shapes of the input and output data:\n '''\n\n print('> Scaled Training Data Shape: {}'.format(\n self._x_train_scaled.shape))\n print('> Scaled Training Targets Shape: {}'.format(\n self._y_train_scaled.shape))\n\n def model(self, params=None):\n \"\"\"Create the Recurrent Neural Network.\n\n Args:\n None\n\n Returns:\n _model: RNN model\n\n \"\"\"\n # Initialize key variables\n if params is None:\n _hyperparameters = self.hyperparameters\n else:\n _hyperparameters = params\n _hyperparameters['batch_size'] = int(\n _hyperparameters['batch_size'] * self._gpus)\n\n # Calculate the steps per epoch\n epoch_steps = int(\n self.training_rows / _hyperparameters['batch_size']) + 1\n\n '''\n Instantiate the base model (or \"template\" model).\n We recommend doing this with under a CPU device scope,\n so that the model's weights are hosted on CPU memory.\n Otherwise they may end up hosted on a GPU, which would\n complicate weight sharing.\n\n NOTE: multi_gpu_model values will be way off if you don't do this.\n '''\n with tf.device('/cpu:0'):\n serial_model = Sequential()\n\n '''\n We can now add a Gated Recurrent Unit (GRU) to the network. This will\n have 512 outputs for each time-step in the sequence.\n\n Note that because this is the first layer in the model, Keras needs to\n know the shape of its input, which is a batch of sequences of arbitrary\n length (indicated by None), where each observation has a number of\n input-signals (num_x_signals).\n '''\n\n serial_model.add(GRU(\n _hyperparameters['units'],\n return_sequences=True,\n recurrent_dropout=_hyperparameters['dropout'],\n input_shape=(None, self._training_vector_count)))\n\n for _ in range(1, _hyperparameters['layers']):\n serial_model.add(GRU(\n _hyperparameters['units'],\n recurrent_dropout=_hyperparameters['dropout'],\n return_sequences=True))\n\n '''\n The GRU outputs a batch from keras_contrib.layers.advanced_activations\n of sequences of 512 values. We want to predict\n 3 output-signals, so we add a fully-connected (or dense) layer which\n maps 512 values down to only 3 values.\n\n The output-signals in the data-set have been limited to be between 0\n and 1 using a scaler-object. So we also limit the output of the neural\n network using the Sigmoid activation function, which squashes the\n output to be between 0 and 1.\n '''\n\n if False:\n serial_model.add(\n Dense(self._training_class_count, activation='sigmoid'))\n\n '''\n A problem with using the Sigmoid activation function, is that we can\n now only output values in the same range as the training-data.\n\n For example, if the training-data only has values between -20 and +30,\n then the scaler-object will map -20 to 0 and +30 to 1. So if we limit\n the output of the neural network to be between 0 and 1 using the\n Sigmoid function, this can only be mapped back to values between\n -20 and +30.\n\n We can use a linear activation function on the output instead. This\n allows for the output to take on arbitrary values. It might work with\n the standard initialization for a simple network architecture, but for\n more complicated network architectures e.g. with more layers, it might\n be necessary to initialize the weights with smaller values to avoid\n NaN values during training. You may need to experiment with this to\n get it working.\n '''\n\n if True:\n # Maybe use lower init-ranges.\n init = RandomUniform(minval=-0.05, maxval=0.05)\n\n serial_model.add(Dense(\n self._training_class_count,\n activation='linear',\n kernel_initializer=init))\n\n # Apply multi-GPU logic.\n if self._gpus == 1:\n parallel_model = serial_model\n print('> Training using single GPU.')\n else:\n try:\n # Use multiple GPUs\n parallel_model = multi_gpu_model(\n serial_model,\n cpu_relocation=True,\n gpus=self._gpus)\n print('> Training using multiple GPUs.')\n except ValueError:\n parallel_model = serial_model\n print('> Single GPU detected. Training using single GPU.')\n\n # Compile Model\n\n '''\n This is the optimizer and the beginning learning-rate that we will use.\n We then compile the Keras model so it is ready for training.\n '''\n\n optimizer = RMSprop(lr=1e-3)\n if self._binary is True:\n parallel_model.compile(\n loss='binary_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n else:\n parallel_model.compile(\n loss=self._loss_mse_warmup,\n optimizer=optimizer,\n metrics=['accuracy'])\n\n '''\n This is a very small model with only two layers. The output shape of\n (None, None, 3) means that the model will output a batch with an\n arbitrary number of sequences, each of which has an arbitrary number of\n observations, and each observation has 3 signals. This corresponds to\n the 3 target signals we want to predict.\n '''\n print('\\n> Model Summary (Parallel):\\n')\n print(parallel_model.summary())\n print('\\n> Model Summary (Serial):\\n')\n print(serial_model.summary())\n\n # Create the batch-generator.\n generator = self._batch_generator(\n _hyperparameters['batch_size'],\n _hyperparameters['sequence_length'])\n\n # Validation Set\n\n '''\n The neural network trains quickly so we can easily run many training\n epochs. But then there is a risk of overfitting the model to the\n training-set so it does not generalize well to unseen data. We will\n therefore monitor the model's performance on the test-set after each\n epoch and only save the model's weights if the performance is improved\n on the test-set.\n\n The batch-generator randomly selects a batch of short sequences from\n the training-data and uses that during training. But for the\n validation-data we will instead run through the entire sequence from\n the test-set and measure the prediction accuracy on that entire\n sequence.\n '''\n\n validation_data = (np.expand_dims(self._x_test_scaled, axis=0),\n np.expand_dims(self._y_test_scaled, axis=0))\n\n # Callback Functions\n\n '''\n During training we want to save checkpoints and log the progress to\n TensorBoard so we create the appropriate callbacks for Keras.\n\n This is the callback for writing checkpoints during training.\n '''\n\n callback_checkpoint = ModelCheckpoint(filepath=self._path_checkpoint,\n monitor='val_loss',\n verbose=1,\n save_weights_only=True,\n save_best_only=True)\n\n '''\n This is the callback for stopping the optimization when performance\n worsens on the validation-set.\n '''\n\n callback_early_stopping = EarlyStopping(\n monitor='val_loss',\n patience=_hyperparameters['patience'],\n verbose=1)\n\n '''\n This is the callback for writing the TensorBoard log during training.\n '''\n\n callback_tensorboard = TensorBoard(log_dir='/tmp/23_logs/',\n histogram_freq=0,\n write_graph=False)\n\n '''\n This callback reduces the learning-rate for the optimizer if the\n validation-loss has not improved since the last epoch\n (as indicated by patience=0). The learning-rate will be reduced by\n multiplying it with the given factor. We set a start learning-rate of\n 1e-3 above, so multiplying it by 0.1 gives a learning-rate of 1e-4.\n We don't want the learning-rate to go any lower than this.\n '''\n\n callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss',\n factor=0.1,\n min_lr=1e-4,\n patience=0,\n verbose=1)\n\n callbacks = [callback_early_stopping,\n callback_checkpoint,\n callback_tensorboard,\n callback_reduce_lr]\n\n # Train the Recurrent Neural Network\n\n '''We can now train the neural network.\n\n Note that a single \"epoch\" does not correspond to a single processing\n of the training-set, because of how the batch-generator randomly\n selects sub-sequences from the training-set. Instead we have selected\n steps_per_epoch so that one \"epoch\" is processed in a few minutes.\n\n With these settings, each \"epoch\" took about 2.5 minutes to process on\n a GTX 1070. After 14 \"epochs\" the optimization was stopped because the\n validation-loss had not decreased for 5 \"epochs\". This optimization\n took about 35 minutes to finish.\n\n Also note that the loss sometimes becomes NaN (not-a-number). This is\n often resolved by restarting and running the Notebook again. But it may\n also be caused by your neural network architecture, learning-rate,\n batch-size, sequence-length, etc. in which case you may have to modify\n those settings.\n '''\n\n print('\\n> Parameters for training\\n')\n pprint(_hyperparameters)\n print('\\n> Starting data training\\n')\n\n history = parallel_model.fit_generator(\n generator=generator,\n epochs=_hyperparameters['epochs'],\n steps_per_epoch=epoch_steps,\n use_multiprocessing=True,\n validation_data=validation_data,\n callbacks=callbacks)\n\n print(\"Ploting History\")\n plt.plot(history.history['loss'], label='Parallel Training Loss')\n plt.plot(history.history['val_loss'], label='Parallel Validation Loss')\n plt.legend()\n plt.show()\n\n # Return\n return parallel_model\n\n def save(self, _model):\n \"\"\"Save the Recurrent Neural Network model.\n\n Args:\n None\n\n Returns:\n _model: RNN model\n\n \"\"\"\n # Serialize model to JSON\n model_yaml = _model.to_yaml()\n with open(self._path_model_parameters, 'w') as yaml_file:\n yaml_file.write(model_yaml)\n\n # Serialize weights to HDF5\n _model.save_weights(self._path_model_weights)\n print('> Saved model to disk')\n\n def load_model(self):\n \"\"\"Load the Recurrent Neural Network model from disk.\n\n Args:\n None\n\n Returns:\n _model: RNN model\n\n \"\"\"\n # Load yaml and create model\n print('> Loading model from disk')\n with open(self._path_model_parameters, 'r') as yaml_file:\n loaded_model_yaml = yaml_file.read()\n _model = model_from_yaml(loaded_model_yaml)\n\n # Load weights into new model\n _model.load_weights(self._path_model_weights, by_name=True)\n print('> Finished loading model from disk')\n\n # Return\n return _model\n\n def evaluate(self, _model):\n \"\"\"Evaluate the model.\n\n Args:\n _model: Model to evaluate\n\n Returns:\n None\n\n \"\"\"\n # Load Checkpoint\n\n '''\n Because we use early-stopping when training the model, it is possible\n that the model's performance has worsened on the test-set for several\n epochs before training was stopped. We therefore reload the last saved\n checkpoint, which should have the best performance on the test-set.\n '''\n\n if os.path.exists(self._path_checkpoint):\n _model.load_weights(self._path_checkpoint)\n\n # _model = self.load_model()\n\n optimizer = RMSprop(lr=1e-3)\n if self._binary is True:\n _model.compile(\n loss='binary_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n else:\n _model.compile(\n loss=self._loss_mse_warmup,\n optimizer=optimizer,\n metrics=['accuracy'])\n\n # Performance on Test-Set\n\n '''\n We can now evaluate the model's performance on the validation-set.\n This function expects a batch of data, but we will just use one long\n time-series for the test-set, so we just expand the\n array-dimensionality to create a batch with that one sequence.\n '''\n\n x_scaled = self._x_test_scaled\n y_scaled = self._y_test_scaled\n\n # Evaluate the MSE accuracy\n result = _model.evaluate(\n x=np.expand_dims(x_scaled, axis=0),\n y=np.expand_dims(y_scaled, axis=0))\n\n # If you have several metrics you can use this instead.\n print('> Metrics (test-set):')\n for _value, _metric in zip(result, _model.metrics_names):\n print('\\t{}: {:.10f}'.format(_metric, _value))\n\n if self._binary is True:\n # Input-signals for the model.\n x_values = np.expand_dims(x_scaled, axis=0)\n\n # Get the predictions\n predictions_scaled = _model.predict_classes(x_values, verbose=1)\n\n # The output of the model is between 0 and 1.\n # Do an inverse map to get it back to the scale\n # of the original data-set.\n predictions = self._y_scaler.inverse_transform(\n predictions_scaled[0])\n\n # Print meaningful human accuracy values\n print(\n '> Human accuracy {:.3f} %'\n ''.format(general.binary_accuracy(\n predictions, self._y_test) * 100))\n\n def objective(self, params=None):\n \"\"\"Optimize the Recurrent Neural Network.\n\n Args:\n None\n\n Returns:\n _model: RNN model\n\n \"\"\"\n # Initialize key variables\n model = deepcopy(self.model(params=params))\n scaled_vectors = self._x_test_scaled\n test_classes = self._y_test\n\n # Input-signals for the model.\n x_values = np.expand_dims(scaled_vectors, axis=0)\n\n # Get the predictions\n predictions_scaled = model.predict(x_values, verbose=1)\n\n # The output of the model is between 0 and 1.\n # Do an inverse map to get it back to the scale\n # of the original data-set.\n predictions = self._y_scaler.inverse_transform(\n predictions_scaled[0])\n\n # Get the error value\n accuracy = mean_absolute_error(test_classes, predictions)\n\n # Free object memory\n del model\n gc.collect()\n\n # Print meaningful human accuracy values\n if self._binary is True:\n # Print predictions and actuals:\n print(\n '> Human accuracy {:.5f} %'\n ''.format(general.binary_accuracy(\n predictions, test_classes) * 100))\n\n # Return\n return {\n 'loss': (accuracy * -1),\n 'status': STATUS_OK,\n 'estimated_accuracy': accuracy,\n 'hyperparameters': params}\n\n def cleanup(self):\n \"\"\"Release memory and delete checkpoint files.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n # Delete\n os.remove(self._path_checkpoint)\n\n def stationary(self):\n \"\"\"Evaluate wether the timeseries is stationary.\n\n non-stationary timeseries are probably random walks and not\n suitable for forecasting.\n\n Args:\n None\n\n Returns:\n state: True if stationary\n\n \"\"\"\n # Initialize key variables\n state = False\n values = []\n\n # statistical test\n result = adfuller(self._y_current)\n adf = result[0]\n print('> Stationarity Test:')\n print(' ADF Statistic: {:.3f}'.format(adf))\n print(' p-value: {:.3f}'.format(result[1]))\n print(' Critical Values:')\n for key, value in result[4].items():\n print('\\t{}: {:.3f}'.format(key, value))\n values.append(value)\n\n # Return\n if adf < min(values):\n state = True\n print(' Stationarity: {}'.format(state))\n return state\n\n def _batch_generator(self, batch_size, sequence_length):\n \"\"\"Create generator function to create random batches of training-data.\n\n Args:\n batch_size: Size of batch\n sequence_length: Length of sequence\n\n Returns:\n (x_batch, y_batch)\n\n \"\"\"\n # Infinite loop.\n while True:\n # Allocate a new array for the batch of input-signals.\n x_shape = (\n batch_size, sequence_length, self._training_vector_count)\n x_batch = np.zeros(shape=x_shape, dtype=np.float16)\n\n # Allocate a new array for the batch of output-signals.\n y_shape = (batch_size, sequence_length, self._training_class_count)\n y_batch = np.zeros(shape=y_shape, dtype=np.float16)\n\n # Fill the batch with random sequences of data.\n for i in range(batch_size):\n # Get a random start-index.\n # This points somewhere into the training-data.\n idx = np.random.randint(\n self.training_rows - sequence_length)\n\n # Copy the sequences of data starting at this index.\n x_batch[i] = self._x_train_scaled[idx:idx+sequence_length]\n y_batch[i] = self._y_train_scaled[idx:idx+sequence_length]\n\n yield (x_batch, y_batch)\n\n def _loss_mse_warmup(self, y_true, y_pred):\n \"\"\"Calculate the Mean Squared Errror.\n\n Calculate the Mean Squared Error between y_true and y_pred,\n but ignore the beginning \"warmup\" part of the sequences.\n\n We will use Mean Squared Error (MSE) as the loss-function that will be\n minimized. This measures how closely the model's output matches the\n true output signals.\n\n However, at the beginning of a sequence, the model has only seen\n input-signals for a few time-steps, so its generated output may be very\n inaccurate. Using the loss-value for the early time-steps may cause the\n model to distort its later output. We therefore give the model a\n \"warmup-period\" of 50 time-steps where we don't use its accuracy in the\n loss-function, in hope of improving the accuracy for later time-steps\n\n Args:\n y_true: Desired output.\n y_pred: Model's output.\n\n Returns:\n loss_mean: Mean Squared Error\n\n \"\"\"\n warmup_steps = self._warmup_steps\n\n # The shape of both input tensors are:\n # [batch_size, sequence_length, num_y_signals].\n\n # Ignore the \"warmup\" parts of the sequences\n # by taking slices of the tensors.\n y_true_slice = y_true[:, warmup_steps:, :]\n y_pred_slice = y_pred[:, warmup_steps:, :]\n\n # These sliced tensors both have this shape:\n # [batch_size, sequence_length - warmup_steps, num_y_signals]\n\n # Calculate the MSE loss for each value in these tensors.\n # This outputs a 3-rank tensor of the same shape.\n loss = tf.losses.mean_squared_error(labels=y_true_slice,\n predictions=y_pred_slice)\n\n # Keras may reduce this across the first axis (the batch)\n # but the semantics are unclear, so to be sure we use\n # the loss across the entire tensor, we reduce it to a\n # single scalar with the mean function.\n loss_mean = tf.reduce_mean(loss)\n\n return loss_mean\n\n def plot_train(self, model, start_idx, length=100):\n \"\"\"Plot the predicted and true output-signals.\n\n Args:\n model: Training model\n start_idx: Start-index for the time-series.\n length: Sequence-length to process and plot.\n\n Returns:\n None\n\n \"\"\"\n # Plot\n self._plot_comparison(model, start_idx, length=length, train=True)\n\n def plot_test(self, model, start_idx, length=100):\n \"\"\"Plot the predicted and true output-signals.\n\n Args:\n model: Training model\n start_idx: Start-index for the time-series.\n length: Sequence-length to process and plot.\n\n Returns:\n None\n\n \"\"\"\n # Plot\n self._plot_comparison(model, start_idx, length=length, train=False)\n\n def _plot_comparison(self, model, start_idx, length=100, train=True):\n \"\"\"Plot the predicted and true output-signals.\n\n Args:\n model: Training model\n start_idx: Start-index for the time-series.\n length: Sequence-length to process and plot.\n train: Boolean whether to use training- or test-set.\n\n Returns:\n None\n\n \"\"\"\n # Initialize key variables\n num_train = self.training_rows\n\n # Don't plot if we are looking at binary classes\n if bool(self._binary) is True:\n print('> Will not plot charts for binary class values.')\n return\n\n # End-index for the sequences.\n end_idx = start_idx + length\n\n # Get the complete length of the dataset\n dataset_size = (\n self._y_train.shape[0] + self._y_test.shape[0])\n delta = len(self._y_current) - dataset_size\n\n # Variables for date formatting\n days = mdates.DayLocator() # Every day\n months = mdates.MonthLocator() # Every month\n months_format = mdates.DateFormatter('%b %Y')\n days_format = mdates.DateFormatter('%d')\n\n # Assign other variables dependent on the type of data we are plotting\n if train is True:\n # Use training-data.\n x_values = self._x_train_scaled[start_idx:end_idx]\n y_true = self._y_train[start_idx:end_idx]\n shim = 'Train'\n\n # Datetimes to use for training\n datetimes = self._data.datetime()[:num_train][start_idx:end_idx]\n\n # Only get current values that are a part of the training data\n current = self._y_current[:num_train][start_idx:end_idx]\n\n else:\n # Scale the data\n x_test_scaled = self._x_scaler.transform(\n self._data.vectors_test_all())\n\n # Use test-data.\n x_values = x_test_scaled[start_idx:end_idx]\n y_true = self._y_test[start_idx:end_idx]\n shim = 'Test'\n\n # Test offset\n test_offset = self.test_rows + delta\n\n # Datetimes to use for testing\n datetimes = self._data.datetime()[-test_offset:][start_idx:]\n\n # Only get current values that are a part of the test data.\n current = self._y_current[-test_offset:][start_idx:]\n\n # Input-signals for the model.\n x_values = np.expand_dims(x_values, axis=0)\n\n # Use the model to predict the output-signals.\n y_pred = model.predict(x_values)\n\n # The output of the model is between 0 and 1.\n # Do an inverse map to get it back to the scale\n # of the original data-set.\n y_pred_rescaled = self._y_scaler.inverse_transform(y_pred[0])\n\n # For each output-signal.\n for signal in range(len(self._data.labels())):\n # Create a filename\n filename = (\n '/tmp/batch_{}_epochs_{}_training_{}_{}_{}_{}.png').format(\n self.hyperparameters['batch_size'],\n self.hyperparameters['epochs'],\n num_train,\n signal,\n int(time.time()),\n shim)\n\n # Get the output-signal predicted by the model.\n signal_pred = y_pred_rescaled[:, signal]\n\n # Get the true output-signal from the data-set.\n signal_true = y_true[:, signal]\n\n # Create a new chart\n (fig, axis) = plt.subplots(figsize=(15, 5))\n\n # Plot and compare the two signals.\n axis.plot(\n datetimes[:len(signal_true)],\n signal_true,\n label='Current +{}'.format(self._data.labels()[signal]))\n axis.plot(\n datetimes[:len(signal_pred)],\n signal_pred,\n label='Prediction')\n axis.plot(datetimes, current, label='Current')\n\n # Set plot labels and titles\n axis.set_title('{1}ing Forecast ({0} Future Intervals)'.format(\n self._data.labels()[signal], shim))\n axis.set_ylabel('Values')\n axis.legend(\n bbox_to_anchor=(1.04, 0.5),\n loc='center left', borderaxespad=0)\n\n # Add gridlines and ticks\n ax = plt.gca()\n ax.grid(True)\n\n # Add major gridlines\n ax.xaxis.grid(which='major', color='black', alpha=0.2)\n ax.yaxis.grid(which='major', color='black', alpha=0.2)\n\n # Add minor ticks (They must be turned on first)\n ax.minorticks_on()\n ax.xaxis.grid(which='minor', color='black', alpha=0.1)\n ax.yaxis.grid(which='minor', color='black', alpha=0.1)\n\n # Format the tick labels\n ax.xaxis.set_major_locator(months)\n ax.xaxis.set_major_formatter(months_format)\n ax.xaxis.set_minor_locator(days)\n\n # Remove tick marks\n ax.tick_params(axis='both', which='both', length=0)\n\n # Print day numbers on xaxis for Test data only\n if train is False:\n ax.xaxis.set_minor_formatter(days_format)\n plt.setp(ax.xaxis.get_minorticklabels(), rotation=90)\n\n # Rotates and right aligns the x labels, and moves the bottom of\n # the axes up to make room for them\n fig.autofmt_xdate()\n\n # Plot grey box for warmup-period if we are working with training\n # data and the start is within the warmup-period\n if (0 < start_idx < self._warmup_steps):\n if train is True:\n plt.axvspan(\n datetimes[shim][start_idx],\n datetimes[shim][self._warmup_steps],\n facecolor='black', alpha=0.15)\n\n # Show and save the image\n if self._display is True:\n fig.savefig(filename, bbox_inches='tight')\n plt.show()\n else:\n fig.savefig(filename, bbox_inches='tight')\n print('> Saving file: {}'.format(filename))\n\n # Close figure\n plt.close(fig=fig)\n\n def plot_predicted_vs_actual(self, model):\n \"\"\"Plot the predicted and true output-signals.\n\n Args:\n model: Training model\n start_idx: Start-index for the time-series.\n length: Sequence-length to process and plot.\n\n Returns:\n None\n\n \"\"\"\n # Initialize key variables\n num_train = self.training_rows\n shim = 'Comparison'\n\n # Don't plot if we are looking at binary classes\n if bool(self._binary) is True:\n print('> Will not plot charts for binary class values.')\n return\n\n # Scale the data\n x_test_scaled = self._x_scaler.transform(\n self._data.vectors_test_all())\n\n # Use test-data.\n x_values = x_test_scaled[:]\n y_true = self._y_test[:]\n\n # Input-signals for the model.\n x_values = np.expand_dims(x_values, axis=0)\n\n # Use the model to predict the output-signals.\n y_pred = model.predict(x_values)\n\n # The output of the model is between 0 and 1.\n # Do an inverse map to get it back to the scale\n # of the original data-set.\n y_pred_rescaled = self._y_scaler.inverse_transform(y_pred[0])\n\n # For each output-signal.\n for signal in range(len(self._data.labels())):\n # Create a filename\n filename = (\n '/tmp/batch_{}_epochs_{}_training_{}_{}_{}_{}.png').format(\n self.hyperparameters['batch_size'],\n self.hyperparameters['epochs'],\n num_train,\n signal,\n int(time.time()),\n shim)\n\n # Get the output-signal predicted by the model.\n signal_pred = y_pred_rescaled[:, signal]\n\n # Get the true output-signal from the data-set.\n signal_true = y_true[:, signal]\n\n # Create a new chart\n (fig, axis) = plt.subplots(figsize=(15, 5))\n\n # Plot and compare the two signals.\n plt.scatter(\n signal_pred[:len(signal_true)],\n signal_true,\n alpha=0.1,\n label=(\n 'Predicted vs. Actual +{}'.format(\n self._data.labels()[signal])))\n\n # Set plot labels and titles\n axis.set_title(\n 'Predicted vs. Actual ({0} Future Intervals)'.format(\n self._data.labels()[signal]))\n axis.set_ylabel('Predicted')\n axis.set_xlabel('Actual')\n axis.legend(\n bbox_to_anchor=(1.04, 0.5),\n loc='center left', borderaxespad=0)\n\n # Add gridlines and ticks\n ax = plt.gca()\n ax.grid(True)\n\n # Add major gridlines\n ax.xaxis.grid(which='major', color='black', alpha=0.2)\n ax.yaxis.grid(which='major', color='black', alpha=0.2)\n\n # Add minor ticks (They must be turned on first)\n ax.minorticks_on()\n ax.xaxis.grid(which='minor', color='black', alpha=0.1)\n ax.yaxis.grid(which='minor', color='black', alpha=0.1)\n\n # Remove tick marks\n ax.tick_params(axis='both', which='both', length=0)\n\n # Show and save the image\n if self._display is True:\n fig.savefig(filename, bbox_inches='tight')\n plt.show()\n else:\n fig.savefig(filename, bbox_inches='tight')\n print('> Saving file: {}'.format(filename))\n\n # Close figure\n plt.close(fig=fig)\n\n\nclass ModelMGPU(Model):\n '''\n https://github.com/keras-team/keras/issues/2436#issuecomment-354882296\n '''\n def __init__(self, ser_model, **kwargs):\n pmodel = multi_gpu_model(ser_model, **kwargs)\n self.__dict__.update(pmodel.__dict__)\n self._smodel = ser_model\n\n def __getattribute__(self, attrname):\n '''Override load and save methods to be used from the serial-model. The\n serial-model holds references to the weights in the multi-gpu model.\n '''\n # return Model.__getattribute__(self, attrname)\n if 'load' in attrname or 'save' in attrname:\n return getattr(self._smodel, attrname)\n\n return super(ModelMGPU, self).__getattribute__(attrname)\n","sub_path":"timeseries/forecast/forecast/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":40310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"115863689","text":"import unittest\nfrom ddt import ddt, data, unpack\nfrom typing import List\nfrom merge_k_sorted_lists import Solution, ListNode\n\n@ddt\nclass Tester(unittest.TestCase):\n def setUp(self):\n self.s = Solution()\n\n @data(\n [ [[]], [] ],\n [ [[1]], [1] ],\n [ [[1],[]], [1] ],\n [ [[1,4,5],[1,3,4],[2,6]], [1,1,2,3,4,4,5,6] ],\n [ [[1,4,5],[1,3,4],[2,6],[1,2,10,11]], [1,1,1,2,2,3,4,4,5,6,10,11] ],\n )\n @unpack\n def test(self, input_lists, expected):\n list_of_listnodes = []\n for list in input_lists:\n list_of_listnodes.append(self.makeListNode(list))\n\n ret = self.s.mergeKLists(list_of_listnodes)\n if ret:\n self.assertEqual(ret.toList(), expected)\n else:\n self.assertEqual([], expected)\n\n def makeListNode(self, nums: List[int]) -> ListNode:\n if not nums:\n return None\n else:\n return ListNode(nums[0], self.makeListNode(nums[1:]))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"leetcode/23_merge_k_sorted_lists/merge_k_sorted_lists_test.py","file_name":"merge_k_sorted_lists_test.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"514006084","text":"from scripts.Locators.cartPage import CartPageLocators\nfrom scripts.Actions import Action\n\nclass CartPage(Action):\n\n def __init__(self, driver):\n self.driver = driver\n\n self.home_link_xpath = CartPageLocators.home_link_xpath\n self.qtyItemPlus_link_selector = CartPageLocators.qtyItemPlus_link_selector\n self.qtyItemMinus_link_xpath = CartPageLocators.qtyItemMinus_link_xpath\n self.removeItem_btn_selector = CartPageLocators.removeItem_btn_selector\n self.continueShop_link_textlink = CartPageLocators.continueShop_link_textlink\n self.proceedToCheckout_link_textlink = CartPageLocators.proceedToCheckout_link_textlink\n self.prodDetView_link_xpath = CartPageLocators.prodDetView_link_xpath\n\n","sub_path":"scripts/Pages/cartPage.py","file_name":"cartPage.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"88570215","text":"import requests\nimport random\nimport json\nimport os\nimport time\nimport multiprocessing\n\nimport re\nfrom lxml import etree\nfrom rk import *\nfrom pyquery import PyQuery as pq\nfrom configparser import ConfigParser\nimport datetime\nimport sched\nfrom bcolors import bcolors\n\nurl = 'https://passport.jd.com/new/login.aspx'\n\nheaders = {\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36',\n 'ContentType':\n 'text/html; charset=utf-8',\n 'Accept-Encoding':\n 'gzip, deflate, sdch',\n 'Accept-Language':\n 'zh-CN,zh;q=0.8',\n 'Connection':\n 'keep-alive',\n}\n\ns = requests.Session()\ns.headers = headers\n\ncfg = ConfigParser()\ncfg.read('couponConfig.ini')\n\n# 请求登录页面\nreq1 = s.get(url=url, headers=headers)\n\nsel = etree.HTML(req1.content)\nuuid = sel.xpath('//input[@id=\"uuid\"]/@value')[0]\n\neid = sel.xpath('//input[@id=\"eid\"]/@value')[0]\nsa_token = sel.xpath('//input[@id=\"sa_token\"]/@value')[0]\npubKey = sel.xpath('//input[@id=\"pubKey\"]/@value')[0]\nt = sel.xpath('//input[@id=\"token\"]/@value')[0]\n\nr = random.random()\nlogin_url = 'https://passport.jd.com/uc/loginService'\n\n\nclass JD(object):\n def __init__(self, username, password, rk_username=None, rk_pwd=None):\n self.username = username\n self.password = password\n rk_username = \"wallflower\"\n rk_pwd = \"mjq123456\"\n self.rkclient = RClient(rk_username, rk_pwd)\n self.trackid = ''\n self.pid = ''\n self.cookies = {}\n\n # 账号登录函数\n def login(self):\n\n params = {\n 'uuid': uuid,\n 'eid': eid,\n # 'fp': 'a2fd52211772d8fea0515bedca560b0b',\n '_t': t,\n 'loginType': 'c',\n 'loginname': self.username,\n 'nloginpwd': self.password,\n 'chkRememberMe': '',\n 'authcode': '',\n 'pubKey': pubKey,\n 'sa_token': sa_token,\n # 'seqSid': '5574250748814772000'\n }\n\n headers = {\n 'Referer':\n 'https://passport.jd.com/uc/login?ltype=logout',\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36',\n 'X-Requested-With':\n 'XMLHttpRequest'\n }\n\n # logout first\n s.get(\"https://passport.jd.com/uc/login?ltype=logout\")\n\n # 验证码图片\n imgcode = 'http:' + sel.xpath('//img[@id=\"JD_Verification1\"]/@src2')[0]\n img = requests.get(imgcode)\n # 把这个路径替换成自己电脑jd.py文件夹的路径,/Users/zhangkai/Desktop/JD\n with open('/home/damon/mydata/git/JD_Utils/authcode.jpg', 'wb') as f:\n f.write(img.content)\n im = open('authcode.jpg', 'rb').read()\n print('开始识别验证码...')\n\n # print(imgcode) # 手动验证码连接\n imgcode1 = input(\"请输入验证码:\")\n\n # 自动打码\n # imgcode1 = self.rkclient.rk_create(im, 3040)['Result']\n print(bcolors.OKGREEN + imgcode1)\n\n if imgcode != '':\n\n # params['authcode'] = input('请输入验证码:') # 手动输验证码\n\n params['authcode'] = str(imgcode1)\n req2 = s.post(login_url, data=params, headers=headers)\n\n patt = ''\n self.trackid = re.compile(patt).findall(str(s.cookies))\n\n js = json.loads(req2.text[1:-1])\n print(js)\n if js.get('success'):\n print('登录成功')\n else:\n print('登录失败')\n raise Exception(\"Login failed\")\n else:\n req2 = s.post(login_url, data=params, headers=headers)\n\n patt = ''\n self.trackid = re.compile(patt).findall(str(s.cookies))\n\n js = json.loads(req2.text[1:-1])\n\n if js.get('success'):\n print('登录成功')\n else:\n print('登录失败')\n raise Exception(\"Login failed\")\n\n def addcart(self):\n\n self.pid = input('请输入要加入购物车的商品编号:')\n pcount = input('请输入加入数量:')\n add_carturl = 'https://cart.jd.com/gate.action?pid=' + self.pid + '&pcount=' + pcount + '&ptype=1'\n # add_carturl = 'https://cart.jd.com/gate.action?pid=3659204&pcount=1&ptype=1'\n\n req4 = s.get(add_carturl)\n\n if re.compile('(.*?)').findall(\n req4.text)[0] == '商品已成功加入购物车':\n print('商品已成功加入购物车')\n else:\n print('添加购物车失败')\n\n def submit(self):\n # 购物车页面\n carturl = 'https://cart.jd.com'\n req5 = s.get(carturl)\n\n # 取消选择某个商品\n cancelitemurl = 'https://cart.jd.com/cancelItem.action?rd' + str(r)\n form_data = {\n 'outSkus': '',\n 'pid': self.pid, # 商品id\n 'ptype': '1',\n 'packId': '0',\n 'targetId': '0',\n 'promoID': '0',\n 'locationId': '1-2810-6501-0' # 地址代码\n }\n\n req6 = s.post(cancelitemurl, data=form_data)\n\n # 选择某个商品\n selectitemurl = 'https://cart.jd.com/selectItem.action?rd' + str(r)\n req7 = s.post(selectitemurl, data=form_data)\n\n timestamp = int(time.time() * 1000)\n # 订单结算页\n orderInfo = 'https://trade.jd.com/shopping/order/getOrderInfo.action?rid=' + str(\n timestamp)\n\n # 提交订单url\n submitOrder = 'https://trade.jd.com/shopping/order/submitOrder.action'\n\n submit_data = {\n 'overseaPurchaseCookies': '',\n 'submitOrderParam.sopNotPutInvoice': 'false',\n 'submitOrderParam.trackID': self.trackid[0],\n 'submitOrderParam.ignorePriceChange': '0',\n 'submitOrderParam.btSupport': '0',\n 'submitOrderParam.eid': eid,\n 'submitOrderParam.fp': 'b31fc738113fbc4ea5fed9fc9811acc6',\n # 'riskControl': 'D0E404CB705B9732D8D7A53159E363F2140ADCDE164C1F9CABA71F1D7552B70E5C9C6041832CEB4B',\n }\n\n ordertime = input('''请选择:\n 1.设置下单时间\n 2.选择立即下单(可用于监控库存,自动下单)\n 请输入选择(1/2):\n ''')\n\n if ordertime == '1':\n set_time = input('请按照2017-05-01 23:11:11格式输入下单时间:')\n timeArray = time.mktime(\n time.strptime(set_time, '%Y-%m-%d %H:%M:%S'))\n while True:\n if time.time() >= timeArray:\n\n print('正在提交订单...')\n req8 = s.post(submitOrder, data=submit_data)\n js1 = json.loads(req8.text)\n print(js1)\n # 判断是否下单成功\n if js1['success'] == True:\n print('下单成功!')\n else:\n print('下单失败')\n break\n else:\n # print('等待下单...')\n continue\n # 直接下单\n elif ordertime == '2':\n while True:\n area = '1_2810_6501_0' # 地址编码,这里请替换成自己地区的编码\n stockurl = 'http://c0.3.cn/stock?skuId=' + self.pid + '&cat=652,829,854&area=' + area + '&extraParam={%22originid%22:%221%22}'\n resp = s.get(stockurl)\n jsparser = json.loads(resp.text)\n # 33 有货 34 无货\n if jsparser['stock']['StockState'] == 33 and jsparser['stock']['StockStateName'] == '现货':\n print('库存状态:', jsparser['stock']['StockStateName'])\n\n req8 = s.post(submitOrder, data=submit_data)\n print('正在提交订单...')\n js1 = json.loads(req8.text)\n\n # 判断是否下单成功\n if js1['success'] == True:\n print('下单成功!')\n break\n else:\n print('下单失败')\n # 3秒后重新尝试下单,可自行修改时间间隔\n time.sleep(3)\n continue\n elif jsparser['stock']['StockState'] != 33:\n print('无货,监控中...')\n time.sleep(3) # 请酌情修改时间间隔,最少1秒\n continue\n\n def coupon_section(self, section):\n def event_func(action):\n r = s.get(couponURL)\n d = pq(r.text)\n content = d(\"div.content\").text()\n print(bcolors.OKGREEN + content)\n\n if u\"已经参加过\" in content:\n print(\"scheduler cancelled\")\n scheduler.cancel(action)\n\n def perform(couponTime, overtime, inc, t, section):\n currentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print(bcolors.HEADER + section + bcolors.OKBLUE + \" Coupon Time: \",\n t, \"Current Time:\",\n currentTime + \" \" + multiprocessing.current_process().name)\n\n timediff = time.time() - couponTime\n if timediff < (float(overtime) * 60):\n action = scheduler.enter(\n inc, 0, perform, (couponTime, overtime, inc, t, section))\n event_func(action)\n\n # 为每一个section开启一个独立进程,运行独立的scheduler\n scheduler = sched.scheduler(time.time, time.sleep)\n\n print(section)\n incc = float(cfg.get(section, \"inc\"))\n couponURL = cfg.get(section, \"url\")\n timeStr = cfg.get(section, \"time\")\n overtime = cfg.getint(section, \"overtime\")\n leadtime = cfg.getint(section, \"leadtime\")\n timeList = timeStr.split(\",\")\n\n for t in timeList:\n l = t.split(\":\")\n hour = l[0]\n minute = l[1]\n # print(\"enterabs: \", hour, minute)\n couponTime = each_day_time(int(hour), (int(minute) - leadtime), 0)\n scheduler.enterabs(couponTime, 0, perform, (couponTime, overtime,\n incc, t, section))\n\n scheduler.run()\n\n def login_by_QR(self):\n # jd login by QR code\n try:\n print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++')\n print(u'{0} > 请打开京东手机客户端,准备扫码登陆:'.format(time.ctime()))\n\n urls = ('https://passport.jd.com/new/login.aspx',\n 'https://qr.m.jd.com/show', 'https://qr.m.jd.com/check',\n 'https://passport.jd.com/uc/qrCodeTicketValidation')\n\n self.headers = {\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',\n 'ContentType':\n 'text/html; charset=utf-8',\n 'Accept-Encoding':\n 'gzip, deflate, sdch',\n 'Accept-Language':\n 'zh-CN,zh;q=0.8',\n 'Connection':\n 'keep-alive',\n }\n # step 1: open login page\n resp = s.get(urls[0], headers=self.headers)\n if resp.status_code != requests.codes.OK:\n print(u'获取登录页失败: %u' % resp.status_code)\n return False\n\n # save cookies\n for k, v in resp.cookies.items():\n self.cookies[k] = v\n\n # step 2: get QR image\n resp = s.get(\n urls[1],\n headers=self.headers,\n cookies=self.cookies,\n params={'appid': 133,\n 'size': 147,\n 't': (time.time() * 1000)})\n if resp.status_code != requests.codes.OK:\n print(u'获取二维码失败: %u' % resp.status_code)\n return False\n\n # save cookies\n for k, v in resp.cookies.items():\n self.cookies[k] = v\n\n # save QR code\n image_file = 'qr.png'\n with open(image_file, 'wb') as f:\n for chunk in resp.iter_content(chunk_size=1024):\n f.write(chunk)\n\n # scan QR code with phone\n os.system('start ' + image_file)\n\n # step 3: check scan result\n # mush have\n self.headers['Host'] = 'qr.m.jd.com'\n self.headers['Referer'] = 'https://passport.jd.com/new/login.aspx'\n\n # check if QR code scanned\n qr_ticket = None\n retry_times = 100\n while retry_times:\n retry_times -= 1\n resp = s.get(\n urls[2],\n headers=self.headers,\n cookies=self.cookies,\n params={\n 'callback':\n 'jQuery%u' % random.randint(100000, 999999),\n 'appid': 133,\n 'token': self.cookies['wlfstk_smdl'],\n '_': (time.time() * 1000)\n })\n\n if resp.status_code != requests.codes.OK:\n continue\n\n n1 = resp.text.find('(')\n n2 = resp.text.find(')')\n rs = json.loads(resp.text[n1 + 1:n2])\n\n if rs['code'] == 200:\n print(u'{} : {}'.format(rs['code'], rs['ticket']))\n qr_ticket = rs['ticket']\n break\n else:\n print(u'{} : {}'.format(rs['code'], rs['msg']))\n time.sleep(3)\n\n if not qr_ticket:\n print(u'二维码登陆失败')\n return False\n\n # step 4: validate scan result\n # must have\n self.headers['Host'] = 'passport.jd.com'\n self.headers[\n 'Referer'] = 'https://passport.jd.com/uc/login?ltype=logout'\n resp = s.get(\n urls[3],\n headers=self.headers,\n cookies=self.cookies,\n params={'t': qr_ticket}, )\n if resp.status_code != requests.codes.OK:\n print(u'二维码登陆校验失败: %u' % resp.status_code)\n return False\n\n # login succeed\n self.headers['P3P'] = resp.headers.get('P3P')\n for k, v in resp.cookies.items():\n self.cookies[k] = v\n\n print(u'登陆成功')\n return True\n\n except Exception as e:\n print('Exp:', e)\n raise\n\n return False\n\n\ndef each_day_time(hour, min, sec):\n '''返回当天指定时分秒的时间'''\n struct = time.localtime()\n if hour < struct.tm_hour or (hour == struct.tm_hour and\n min <= struct.tm_min):\n day = struct.tm_mday + 1\n else:\n day = struct.tm_mday\n return time.mktime((struct.tm_year, struct.tm_mon, day, hour, min, sec,\n struct.tm_wday, struct.tm_yday, struct.tm_isdst))\n\n\ndef coupon_process(jd):\n cfgSections = cfg.sections()\n\n ps = []\n for section in cfgSections:\n p = multiprocessing.Process(\n name=section, target=coupon, args=(jd, section))\n print('Child process %s start.' % section)\n p.start()\n ps.append(p)\n\n for p in ps:\n p.join()\n print('Child process %s end.' % p.name)\n\n\ndef coupon(jd, section):\n jd.coupon_section(section)\n\n\nif __name__ == '__main__':\n\n # jd_user = input('请输入京东账号:')\n # jd_pwd = input('请输入京东密码:')\n # rk_user = input('请输入若快账号:')\n # rk_pwd = input('请输入若快密码:')\n # jd_user = \"blurm\"\n # jd_pwd = \"Shopping4JD\"\n jd_user = \"504786475\"\n jd_pwd = \"tulipxiao@@55\"\n a = JD(jd_user, jd_pwd)\n a.login_by_QR()\n # a.login()\n coupon_process(a)\n # a.addcart()\n # a.coupon()\n # a.addcart()\n # a.submit()\n","sub_path":"jd.py","file_name":"jd.py","file_ext":"py","file_size_in_byte":16412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"488418878","text":"import os\nimport sys\n\nfrom PyQt5.QtCore import pyqtSlot, QItemSelectionModel\nfrom PyQt5.QtGui import QStandardItem,QStandardItemModel\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QLabel, \\\n QMessageBox\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.style as mplStyle #一个模块\nfrom matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg as FigureCanvas,\n NavigationToolbar2QT as NavigationToolbar)\n\n\nfrom Compute.fi_compute.ficompute_ui import Ui_ficompute\n\n\nclass Compute(QMainWindow):\n def __init__(self,parent = None):#单继承\n super().__init__(parent) #调用父类构造函数,创建窗体\n self.ui = Ui_ficompute() #创建UI对象\n self.ui.setupUi(self) #构造UI界面\n self.setCentralWidget(self.ui.splitter) # 使两个空间占满\n\n\n # 设置tableview的规格\n self.__ColCount = 6 #共设置6列数据,分别为深度,杨氏模量,泊松比,断裂韧性,水平应力差,垂向应力差\n self.itemModel = QStandardItemModel(5, self.__ColCount, self) # 创建QStandardItemModel的数据模型,并设置行数与列数\n\n self.selectionModel = QItemSelectionModel(self.itemModel) # Item选择模型,以self.itemModel作为参数,从而反映数据模型itemModel的项数据选择操作\n self.selectionModel.currentChanged.connect(self.do_curChanged) # 在选择的当前单元格发生变化时会发射此信号,从而在槽函数显示当前单元格的行列号及内容\n\n\n # logdata的模型相关设置\n self.ui.logdata.setModel(self.itemModel) # logdata的数据模型设置为之前定义过的数据模型\n\n # self.ui.logdata.setSelectionModel(self.selectionModel) # 设置选择模型\n # oneorMore = QAbstractItemView.ExtendedSelection # 选择模式\n # self.ui.logdata.setSelectionMode(oneorMore) # 可以多选单元格\n #\n # itemOrRow = QAbstractItemView.SelectItems # 项选择模式\n # self.ui.logdata.setSelectionBehavior(itemOrRow) # 单元格选择\n # self.ui.logdata.verticalHeader().setDefaultSectionSize(22) #缺省行高\n\n # 样式设置\n mplStyle.use(\"classic\") # 使用样式,必须在绘图之前调用,修改字体后才可显示汉字\n mpl.rcParams['font.sans-serif'] = ['HeiTi', 'SimHei'] # 显示汉字为 黑体, 汉字\n mpl.rcParams['font.size'] = 12\n mpl.rcParams['axes.unicode_minus'] = False # 减号unicode编码\n\n # 可压性权重参数按钮的设置\n\n\n #状态栏及工具栏等设置\n self.__buildStatusBar() # 构建状态栏\n self.curve_data = [] # 由深度和可压性指数组成的二维数组\n# ==========自定义功能函数==================\n def __buildStatusBar(self): # 构建状态栏\n self.LabCellPos = QLabel(\"当前单元格\",self)\n self.LabCellPos.setMinimumWidth(180)\n self.ui.statusbar.addWidget(self.LabCellPos)\n\n self.LabCellText = QLabel(\"单元格内容:\", self)\n self.LabCellText.setMinimumWidth(150)\n self.ui.statusbar.addWidget(self.LabCellText)\n\n self.LabCurFile = QLabel(\"当前文件\", self)\n self.ui.statusbar.addPermanentWidget(self.LabCurFile)\n\n def __iniModelFromStringList(self, allLines): # 从可压性csv文件的字符串列表构建模型\n rowCnt = len(allLines) # 文本行数,第1行为行表头\n self.itemModel.setRowCount(rowCnt - 1) # 实际数据行数\n\n headerText = allLines[0].strip() # 第1行是表头,去掉末尾的换行符“\\n”\n headerList = headerText.split(\"\\t\")\n headerList.append(\"可压性指数\")\n self.itemModel.setHorizontalHeaderLabels(headerList) # 设置表头标题\n\n for i in range(rowCnt-1): # 第一行作为表头,此后才是数据体\n lineText = allLines[i+1].strip() # 一行的数据,以\\t分割\n strList = lineText.split(\"\\t\") # 分割为字符串列表\n for j in range(self.__ColCount):\n item = QStandardItem(strList[j])\n self.itemModel.setItem(i,j,item)\n\n def __createFigure(self): # figure对象为绘图的画布对象,figurecanvas可以放在页面上\n self.__fig = mpl.figure.Figure()\n figCanvas = FigureCanvas(self.__fig) # 创建FigureCanvas对象,必须传递一个Figure对象\n self.__fig.suptitle(\"可压性曲线\")\n\n naviToolbar = NavigationToolbar(figCanvas, self) # 创建NavigationToolbar工具栏\n actList = naviToolbar.actions() # 关联的Action列表\n count = len(actList) # Action的个数\n self.addToolBar(naviToolbar)\n\n\n self.ui.layout_curve.addWidget(figCanvas)\n\n def __drawFiCurve(self):\n curve = self.__fig.add_subplot(1,1,1)\n x = []\n y = []\n for i in self.curve_data:\n x.append(i[0])\n y.append(i[1])\n curve.plot(y,x)\n curve.set_xlabel(\"可压性指数\")\n curve.set_ylabel(\"深度\")\n curve.set_xlim([0,100])\n curve.set_ylim([0,1000])\n curve.xaxis.tick_top()\n\n\n# ==========由connectSlotsByName() 自动连接的槽函数==================\n @pyqtSlot() ##“打开文件”\n def on_actopen_triggered(self):\n curPath = os.getcwd() # 获取当前路径\n filename, flt = QFileDialog.getOpenFileName(self, \"打开一个文件\", curPath,\n \"可压性数据文件(*.txt *.csv);;\"\n \"可压性数据文件(*.xlsx);;\"\n \"所有文件(*.*)\")\n if(filename == \"\"):\n return\n\n self.LabCurFile.setText(\"当前文件:\"+ filename)\n\n aFile = open(filename, \"r\", encoding=\"utf-8\") # 需要设置解码格式为utf-8\n allLines = aFile.readlines() # 读取所有行,list类型,每行末尾带有\\n\n aFile.close() # 使用完后需关闭这个文件对象\n\n self.__iniModelFromStringList(allLines) # 将每一行读取到logdata中\n\n @pyqtSlot()#修改权重因子时设置权重为可修改\n def on_pbmodify_pressed(self):\n self.ui.f1.setEnabled(True)\n self.ui.f2.setEnabled(True)\n self.ui.f3.setEnabled(True)\n self.ui.f4.setEnabled(True)\n self.ui.f5.setEnabled(True)\n\n @pyqtSlot()#确定权重因子时设置权重为可修改\n def on_pbconfirm_pressed(self):\n self.ui.f1.setEnabled(False)\n self.ui.f2.setEnabled(False)\n self.ui.f3.setEnabled(False)\n self.ui.f4.setEnabled(False)\n self.ui.f5.setEnabled(False)\n\n @pyqtSlot()#计算可压性并填写进最后一列\n def on_pbcompute_pressed(self):\n #添加可压性的列\n\n #取出spinbox中的权重因子\n f1 = self.ui.f1.value()\n f2 = self.ui.f2.value()\n f3 = self.ui.f2.value()\n f4 = self.ui.f2.value()\n f5 = self.ui.f2.value()\n\n # 计算可压性\n try:\n for i in range(self.itemModel.rowCount()): # 第i行的数据运算\n # 每一列是一个数据模型,需要将其转化为float\n col1 = float(self.itemModel.item(i,1).text())\n col2 = float(self.itemModel.item(i,2).text())\n col3 = float(self.itemModel.item(i,3).text())\n col4 = float(self.itemModel.item(i,4).text())\n col5 = float(self.itemModel.item(i,5).text())\n fi = col1*f1 + col2*f2 + col3*f3 + col4*f4 + col5*f5 # 计算公式,获取lineedit中的权重因子\n item_fi = QStandardItem(str(fi)) # 计算可压性后,将可压性作为最后一列的项放入\n self.itemModel.setItem(i, 6, item_fi) # 将每个深度的可压性导入到item中\n except:\n dlgTitle = \"错误提示!\"\n strInfo = \"请检查数据输入\"\n QMessageBox.warning(self, dlgTitle, strInfo)\n\n @pyqtSlot()\n def on_pbpaint_pressed(self):\n try:\n for i in range(self.itemModel.rowCount()):\n depth = float(self.itemModel.item(i,0).text())\n fi = float(self.itemModel.item(i,6).text())\n self.curve_data.append([depth,fi])\n self.__createFigure() # 将figurecanvas画布展现在右侧\n self.__drawFiCurve()\n except:\n dlgTitle = \"错误提示!\"\n strInfo = \"请检查可压性参数是否输入正确\"\n QMessageBox.warning(self, dlgTitle, strInfo)\n\n\n# ==========自定义槽函数 不需要添加@pyqtslot============\n def do_curChanged(self, current, previous):\n if(current != None): #当前模型索引有效\n text=\"当前单元格:%d行,%d列\"%(current.row(),current.column())\n self.LabCellPos.setText(text)\n item = self.itemModel.itemFromIndex(current) # 从模型索引获得Item\n self.LabCellText.setText(\"单元格内容:\" + item.text()) # 显示item的文字内容\n\n\nif __name__ == \"__main__\": # 显示GUI界面的主函数\n app = QApplication(sys.argv)\n form=Compute()\n form.show()\n sys.exit(app.exec_())\n","sub_path":"Compute/fi_compute/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"473586674","text":"from robot.api import logger\nfrom robot.api.deco import keyword\nfrom robot.libraries.BuiltIn import BuiltIn\n\n\n@keyword(\"Conectarme al sistema remoto de creacion de helados\")\ndef test_python():\n logger.console(u\"Funcion expuesta desde modulo Python\")\n return \"Test\"\n\n\ndef second_test_python():\n logger.console(u'Segunda funcion expuesta desde modulo Python')\n return \"Test\"\n\n@keyword(\"Lista productos de la web z\")\ndef abrir_navegador_en_la_url(class_name):\n selenium_lib = BuiltIn().get_library_instance(\"SeliniumLibrary\")\n browser = selenium_lib.current_browser()\n lista_productos = browser.find_element_by_class_name(class_name)\n productos = lista_productos.find_elements_by_tag_name('article')\n for producto in productos:\n logger.console(producto)\n","sub_path":"External/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"78530975","text":"import numpy as np\n\n\n#функция из варианта\ndef function_v12(x):\n return np.sin(x) + x\n\n#произведения разностей искомого х и Х с известным значением без n-го элемента\ndef omega_function_Lagrange(x, n, X):\n res = 1\n for i in range(n):\n res*= x-X[i]\n return res\n\n#произведения разностей искомого х и Х с известным значением с n-ым элементом\ndef omega_function_Newton(x, n, X):\n res = 1\n for i in range(n+1):\n res*= x-X[i]\n return res\n\n#произведения разностей текущего х и Х с известным значением\ndef omega_function_derivative(k, n, X):\n res = 1\n for i in range(n):\n if (i!=k):\n res*= X[k]-X[i]\n return res\n\n#генирируем значения Y\ndef count_y(fun, X):\n n = len(X)\n Y = [] \n for i in range(n):\n Y.append(fun(X[i]))\n print(\"Y: \", Y)\n return Y\n\n#составляем полином Лагранжа\ndef lagrange_polinom(x, X, Y):\n n = len(X)\n L = 0;\n res = \"L(x) = \"\n for i in range(n):\n tmp = Y[i]/omega_function_derivative(i, n, X);\n if(tmp>0 and i>0):\n res+=\" + \"\n res+=str(tmp)\n for j in range(n):\n if(i!=j):\n if (X[j]>0):\n res+=\"(x - {})\".format(X[j])\n else:\n res+=\"(x + {})\".format(-X[j])\n L += (omega_function_Lagrange(x, n, X) * Y[i])/((x - X[i]) * omega_function_derivative(i, n, X))\n print(res)\n return L\n\n#находим значение коэффицентов полинома Ньютона\ndef f(n, i, j, X, Y):\n if(n==0):\n return (Y[i] - Y[j]) / (X[i] - X[j])\n else:\n return (f(n-1, i, j-1, X, Y) - f(n-1, i+1, j, X, Y)) / (X[i] - X[j])\n\n#составляем полином Ньютона\ndef newton_polynom(x, X, Y):\n n = len(X)\n N = Y[0] + (x- X[0])*f(0, 1, 0, X, Y);\n res=\"\\nN(x) = {} + {}(x - {} )\".format(Y[0], f(0, 1, 0, X, Y), X[0])\n for i in range(1, n-1):\n tmp = f(i, 0, i+1, X, Y)\n res+=\" + {}\".format(tmp)\n for j in range(i+1):\n res+=\"(x - {})\".format(X[j])\n N += omega_function_Newton(x, i, X)*tmp\n print(res)\n return N\n\ndef main():\n all_X = [[0, np.pi/6, 2*np.pi/6, 3*np.pi/6],[0, np.pi/6, np.pi/4, np.pi/2]] #это нужно считать откуда-то \n for X in all_X:\n print(\"\\nX: \", X)\n Y = count_y(function_v12, X)\n L = lagrange_polinom(1, X, Y)\n print(\"L(x) = \", L)\n print(\"y(x) = \", function_v12(1))\n print(\"delta = \",abs(L - function_v12(1)))\n N = newton_polynom(1, X, Y)\n print(\"L(x) = \", N)\n print(\"y(x) = \", function_v12(1))\n print(\"delta = \",abs(N - function_v12(1)))\n \nif __name__ == \"__main__\":\n main()","sub_path":"sem6/numerical_methods/programs/python/laba3_1.py","file_name":"laba3_1.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"122557848","text":"import os\nimport yaml\nimport subprocess\nimport pytest\nimport fv3config\nimport hashlib\n\n\nTEST_DIR = os.path.dirname(os.path.realpath(__file__))\nCONFIG_DIR = os.path.join(TEST_DIR, \"config\")\nconfig_filenames = os.listdir(CONFIG_DIR)\n\n\n@pytest.fixture(params=config_filenames)\ndef config(request):\n config_filename = os.path.join(CONFIG_DIR, request.param)\n with open(config_filename, \"r\") as config_file:\n return yaml.safe_load(config_file)\n\n\ndef md5_from_dir(dir_):\n md5s = {}\n for root, dirs, files in os.walk(str(dir_)):\n for file in files:\n with open(os.path.join(root, file), \"rb\") as f:\n md5 = hashlib.md5()\n while True:\n buf = f.read(2048)\n if not buf:\n break\n md5.update(buf)\n\n relpath_to_root = os.path.relpath(root, start=dir_)\n md5s[os.path.join(relpath_to_root, file)] = md5.hexdigest()\n return md5s\n\n\ndef md5_from_dir_only_nc(dir_):\n return {\n file: hash for file, hash in md5_from_dir(dir_).items() if file.endswith(\".nc\")\n }\n\n\ndef test_md5_from_dir(tmpdir):\n tmpdir.join(\"a\").open(\"w\").write(\"hello\")\n tmpdir.join(\"b\").open(\"w\").write(\"world\")\n\n orig_md5 = md5_from_dir(tmpdir)\n assert orig_md5 == md5_from_dir(tmpdir)\n\n tmpdir.join(\"b\").open(\"w\").write(\"world updated\")\n assert orig_md5 != md5_from_dir(tmpdir)\n\n\ndef test_md5_from_dir_subdirs(tmpdir):\n tmpdir.mkdir(\"subdir\").join(\"a\").open(\"w\").write(\"hello\")\n md5s = md5_from_dir(tmpdir)\n assert \"subdir/a\" in md5s\n\n\ndef test_fv3_wrapper_regression(regtest, tmpdir, config):\n fv3_rundir = tmpdir.join(\"fv3\")\n wrapper_rundir = tmpdir.join(\"wrapper\")\n\n run_fv3(config, fv3_rundir)\n run_wrapper(config, wrapper_rundir)\n\n assert md5_from_dir_only_nc(fv3_rundir) == md5_from_dir_only_nc(wrapper_rundir)\n\n # just make sure there are some outputs\n assert len(md5_from_dir_only_nc(fv3_rundir)) > 0\n\n # regression test the wrapper checksums\n # update by running tests with 'pytest --regtest-reset'\n md5s_wrapper = md5_from_dir_only_nc(wrapper_rundir)\n for key in sorted(md5s_wrapper):\n print(key)\n print(key, md5s_wrapper[key], file=regtest)\n\n\ndef run_fv3(config, run_dir):\n fv3config.write_run_directory(config, str(run_dir))\n subprocess.check_call(\n [\"mpirun\", \"-n\", \"6\", \"fv3.exe\"], cwd=run_dir,\n )\n\n\ndef run_wrapper(config, run_dir):\n fv3config.write_run_directory(config, str(run_dir))\n subprocess.check_call(\n [\"mpirun\", \"-n\", \"6\", \"python3\", \"-m\", \"mpi4py\", \"-m\", \"fv3gfs.wrapper.run\"],\n cwd=run_dir,\n )\n","sub_path":"tests/pytest/test_regression.py","file_name":"test_regression.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"145882820","text":"from unittest import TestCase\nfrom unittest.mock import Mock, patch\nfrom EtsyShop import EtsyShop, Listing\nfrom LiwwaCodingChallenge import LiwwaCodingChallenge\n\n\nclass TestLiwwaCodingChallenge(TestCase):\n def setUp(self):\n self.lcc = LiwwaCodingChallenge()\n # this method will ping Etsy's API to get shop_id\n stubbed_return = {\n \"count\": 1,\n \"results\": [\n {\"shop_id\": 1}\n ]\n }\n\n self.patcher = patch('RequestsHelper.RequestsHelper.get_json', return_value=stubbed_return)\n self.patcher.start()\n self.addCleanup(self.patcher.stop)\n\n def test_clean_and_split_into_words(self):\n empty_string = \"\"\n ret = list(self.lcc.clean_and_split_into_words(empty_string))\n self.assertEqual(ret, [])\n\n line_breaks = \"a\\nb\\nc\"\n expected_return = line_breaks.split(\"\\n\")\n ret = list(self.lcc.clean_and_split_into_words(line_breaks))\n self.assertEqual(ret, expected_return)\n\n tabs = \"a\\tb\\tc\"\n expected_return = tabs.split(\"\\t\")\n ret = list(self.lcc.clean_and_split_into_words(tabs))\n self.assertEqual(ret, expected_return)\n\n punctuation = \"a, b, c, d, \\n e ... f. g, h, i: j \\\"k\\\" l^\"\n expected_return = list(\"abcdefghijkl\")\n ret = list(self.lcc.clean_and_split_into_words(punctuation))\n self.assertEqual(ret, expected_return)\n\n apostrophes = \"mom's father's birthday cake\"\n expected_return = [\"mom's\", \"father's\", \"birthday\", \"cake\"]\n ret = list(self.lcc.clean_and_split_into_words(apostrophes))\n self.assertEqual(ret, expected_return)\n\n caps = \"A B C D\"\n expected_return = list(\"abcd\")\n ret = list(self.lcc.clean_and_split_into_words(caps))\n self.assertEqual(ret, expected_return)\n\n\n def test_get_term_frequency_for_shop(self):\n es = EtsyShop(\"test store\")\n\n expected_return = {\n \"a\": 10,\n \"b\": 12,\n \"c\": 14,\n \"d\": 40\n }\n\n stubbed_return = [\n val for val in\n [\n Listing(key, key)\n for key, val in expected_return.items()\n for _i in range(val//2)\n ]\n ]\n\n es.get_all_shop_listings_titles_and_descriptions = Mock(return_value=stubbed_return)\n ret = self.lcc.get_term_frequency_for_shop(es)\n self.assertEqual(ret, expected_return)\n\n def test_find_top_x_terms_for_shop(self):\n stub_get_term_freq = {\n \"a\": 1,\n \"b\": 2,\n \"c\": 3,\n \"d\": 4,\n \"e\": 5,\n \"f\": 6,\n \"Mom's\": 19\n }\n sorted_by_key = sorted(stub_get_term_freq, key=stub_get_term_freq.get, reverse=True)\n\n expected_return = sorted_by_key[:5]\n self.lcc.get_term_frequency_for_shop = Mock(return_value=stub_get_term_freq)\n ret = self.lcc.find_top_x_terms_for_shop(\"test shop\", 5)\n self.assertEqual(ret, expected_return)\n\n six_values_ret = self.lcc.find_top_x_terms_for_shop(\"test shop\", 6)\n six_values_expected_return = sorted_by_key[:6]\n self.assertEqual(six_values_ret, six_values_expected_return)\n\n\n","sub_path":"tests/TestLiwwaCodingChallenge.py","file_name":"TestLiwwaCodingChallenge.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"4577400","text":"squares = []\nfor value in range(1,11):\n square = value**2\n squares.append(square)\nprint(squares)\n\nsquares = []\nfor value in range(1,11):\n squares.append(value**2)\nprint(squares)\n\n# each does the same thing, the second omits the temp variable 'square' and append each new value directly to list. list of first 10 square numbers.\n","sub_path":"section-4/list_range.py","file_name":"list_range.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"13723875","text":"import os\nimport sys\nimport rtconfig\n\nif os.getenv('RTT_ROOT'):\n RTT_ROOT = os.getenv('RTT_ROOT')\nelse:\n RTT_ROOT = os.path.normpath(os.getcwd() + '../../../fh86xx-sdk/rt-thread-v21/')\n\nif os.getenv('RTT_PLATFORM_ROOT'):\n RTT_PLATFORM_ROOT = os.getenv('RTT_PLATFORM_ROOT')\nelse:\n RTT_PLATFORM_ROOT = ''\n\nif os.getenv('SDK_ROOT'):\n SDK_ROOT = os.getenv('SDK_ROOT')\nelse:\n SDK_ROOT = os.path.normpath(os.getcwd() + '../../../fh86xx-sdk')\n\nif os.getenv('RT_CONFIG_H'):\n RT_CONFIG_H = os.getenv('RT_CONFIG_H')\nelse:\n RT_CONFIG_H = os.path.normpath(os.getcwd() + '/rtconfig.h')\n\n#new add by zhangy to make rtconfig.h could add #include...\nrtconfig.PLATFORM_DEF = [ SDK_ROOT + '/platform'] \n\nAPP_ROOT = os.path.normpath(os.getcwd())\n\nsys.path = sys.path + [os.path.join(RTT_ROOT, 'tools')]\nfrom building import *\n\nTARGET = 'rtthread.' + rtconfig.TARGET_EXT\n\ncflags = ''\nfor name in os.listdir('.'):\n if name.startswith('rtconfig_'):\n cflags += ' -include {}/{} '.format(APP_ROOT, name)\n\nrtconfig.CFLAGS += cflags\nrtconfig.AFLAGS += cflags\n\nenv = Environment(tools = ['mingw'],\n AS = rtconfig.AS, ASFLAGS = rtconfig.AFLAGS,\n CC = rtconfig.CC, CCFLAGS = rtconfig.CFLAGS,\n AR = rtconfig.AR, ARFLAGS = '-rc',\n LINK = rtconfig.LINK, LINKFLAGS = rtconfig.LFLAGS)\nenv.PrependENVPath('PATH', rtconfig.EXEC_PATH)\n\nif env['PLATFORM'] == 'win32':\n env['ASCOM'] = '$AS $ASFLAGS $CCFLAGS $_CCCOMCOM -o $TARGET $SOURCES'\n\nenv['LIBSUFFIX'] = '.a'\n# env['LIBPREFIX'] = ''\n\nExport('RTT_ROOT')\nExport('RTT_PLATFORM_ROOT')\nExport('SDK_ROOT')\nExport('APP_ROOT')\nExport('rtconfig')\nExport('RT_CONFIG_H')\n\n# prepare building environment\nobjs = PrepareBuilding(env, RTT_ROOT)\nobjs += SConscript(APP_ROOT + '/SConscript_app',variant_dir='build/app', duplicate=0)\nif RTT_PLATFORM_ROOT != '':\n rtt_platform_dir = RTT_PLATFORM_ROOT\nelse:\n rtt_platform_dir = SDK_ROOT\nobjs += SConscript(rtt_platform_dir + '/SConscript',variant_dir='build/rt-thread', duplicate=0)\n\n# libc testsuite \nobjs = objs + SConscript(RTT_ROOT + '/examples/libc/SConscript', variant_dir='build/examples/libc', duplicate=0)\n\nif GetDepend('FH_BOOT_IN_2STAGE'):\n if 'objdump' not in rtconfig.POST_ACTION:\n rtconfig.POST_ACTION += '@' + rtconfig.OBJDUMP + ' -d ' + TARGET + '> ' + rtconfig.OUTPUT_DISNAME + '\\n'\n rtconfig.POST_ACTION += '@' + RTT_ROOT + '/tools/gengz.sh ' + rtconfig.OUTPUT_NAME + '.' + rtconfig.TARGET_EXT + '\\n'\n rtconfig.POST_ACTION += '@' + RTT_ROOT + '/tools/depcheck.py ' + rtconfig.OUTPUT_DISNAME + ' ' + rtconfig.OUTPUT_MAPNAME + '\\n'\n# make a building\nDoBuilding(TARGET, objs)\n","sub_path":"uart_test/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"451293376","text":"#coding:utf-8\r\nfrom jpype import *\r\nimport os\r\n_HERE = os.path.dirname(__file__)\r\n\r\n\r\ndef parser(path,index):\r\n\r\n if not isJVMStarted():\r\n \tstartJVM('/usr/java/jdk1.6.0_45/jre/lib/i386/client/libjvm.so',\"-ea\",'-Djava.class.path=/data/www/rcs/RemoteCreditSystem/ext_class/ReadExcel.jar')\r\n TXL = JPackage('cn').JXLReadExcel \r\n jd = TXL()\r\n result = jd.readExcelToHtml(path,index,True).replace(\"\\n\", \"
\")\r\n return result\r\n\r\n\r\n","sub_path":"RemoteCreditSystem/tools/parseExcelToHtml.py","file_name":"parseExcelToHtml.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"401841818","text":"# -*- coding: utf-8 -*-\r\n\r\n# function for plotting\r\nfrom matplotlib.colors import ListedColormap\r\nimport pandas as pd, numpy as np, matplotlib.pyplot as plt\r\nimport csv\r\n\r\n\r\n#Reading the initial data\r\ndf = pd.read_csv('../1-Data/FeaturesClass.csv')\r\n\r\ny = targets = labels = df[\"Class\"].values\r\n\r\ncolumns = ['Numero de accidentes por salida',' numero de accidentes por Alcance',' Numero de accidentes por Vuelco',' Numero de accidentes por tijera',' numero de accidentes por atropello',' Numero de accidentes por nivel Amarillo',' Numero de Accidentes por Nivel Blanco',' Numero de accidentes por nivel Negro',' Numero de accidentes por nivel Rojo']\r\nclases = ['High Zone','Low Zone','Mild Zone']\r\nfeatures = df[list(columns)].values\r\n\r\n\r\n\r\nfrom sklearn.preprocessing import Imputer\r\nimp = Imputer(missing_values='NaN', strategy='mean', axis=0)\r\nX = imp.fit_transform(features)\r\n\r\nfrom sklearn import tree\r\nclf = tree.DecisionTreeClassifier(criterion=\"entropy\")\r\nclf = clf.fit(X, y)\r\n\r\n\r\n# 3. Plot the decision tree: \r\n# http://nbviewer.jupyter.org/github/kittipatkampa/python_dev/blob/master/demo_decision_tree_v1.ipynb\r\nfrom sklearn.externals.six import StringIO \r\nimport pydot \r\n\r\n# It is necessary to install GraphViz\r\n# http://www.graphviz.org/Download..php\r\n# PATH = C:\\Program Files (x86)\\Graphviz2.38\\bin\\:$PATH$\r\n\r\n\r\n# Extract the decision tree logic from the trained model\r\ndot_data = StringIO() \r\ntree.export_graphviz(clf, out_file=dot_data\r\n\t\t\t\t\t\t,feature_names=columns,\r\n\t\t\t\t\t\t class_names=clases, \r\n filled=True, rounded=True, \r\n special_characters=True)\r\n\r\n\r\n# convert the logics into graph\r\ngraph = pydot.graph_from_dot_data(dot_data.getvalue()) \r\n\r\n## This will plot decision tree in pdf file\r\ngraph.write_pdf(path=\"Tree.pdf\")","sub_path":"Activity 13. Decision trees (2)/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"464158283","text":"#pip install adafruit-circuitpython-motorkit\n#pip install pyrebase\n\nfrom adafruit_motorkit import MotorKit\nfrom gpiozero import Button\nimport time\nimport pyrebase\nimport board\nfrom collections import OrderedDict\n\nfirebaseConfig = {\n \"apiKey\": \"AIzaSyBXzcTWnqVKIty3m_5k3QCqbBPse4WmiJ8\",\n \"authDomain\": \"curtainapp-5caed.firebaseapp.com\",\n \"databaseURL\": \"https://curtainapp-5caed-default-rtdb.asia-southeast1.firebasedatabase.app\",\n \"projectId\": \"curtainapp-5caed\",\n \"storageBucket\": \"curtainapp-5caed.appspot.com\",\n \"messagingSenderId\": \"724688145111\",\n \"appId\": \"1:724688145111:web:822d9b27a75b67a7053e92\",\n \"measurementId\": \"G-0LVN54PBQ4\"}\n\nfirebase = pyrebase.initialize_app(firebaseConfig)\ndb = firebase.database()\nkit = MotorKit(i2c=board.I2C())\nopenButton = Button(4)\ncloseButton = Button(17)\nlimitSwitchRight = Button(18)\nlimitSwitchLeft = Button(27)\nstate_ref = db.child(\"state\")\n\nwhile True:\n kit.motor1.throttle = 0\n while openButton.is_pressed or state_ref.child(\"motorState\").get().val() == OrderedDict([('motorState', 'opening')]):\n print(\"opening\")\n if not limitSwitchLeft.is_pressed or not limitSwitchRight.is_pressed:\n kit.motor1.throttle = -1.0\n\n kit.motor1.throttle = 0\n while closeButton.is_pressed or state_ref.child(\"motorState\").get().val() == OrderedDict([('motorState', 'closing')]):\n print(\"closing\")\n if not limitSwitchLeft.is_pressed or not limitSwitchRight.is_pressed:\n kit.motor1.throttle = 1.0\n\n if limitSwitchLeft.is_pressed:\n state_ref.child(\"curtainState\").update({\"curtainState\":\"closed\"})\n\n if limitSwitchRight.is_pressed:\n state_ref.child(\"curtainState\").update({\"curtainState\":\"opened\"})\n\n print(state_ref.child(\"motorState\").get().val())\n print(type(state_ref.child(\"motorState\").get().val()))\n\n time.sleep(0.1)\n","sub_path":"curtain.py","file_name":"curtain.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"51292416","text":"# Time to put in proper comments overywher\nfrom __future__ import absolute_import\n\nimport logging\n# import re\nfrom itertools import chain, imap\n\n# import requests\n# from .utilities import f2i\n# from termcolor import colored # get color logging soon\nfrom pgoapi.protos.POGOProtos.Networking.Requests_pb2 import RequestType\nfrom pgoapi.protos.POGOProtos import Inventory_pb2 as Inventory\nfrom pgoapi.exceptions import ServerSideRequestThrottlingException\n\nimport pickle\nimport random\nimport json\nimport xml.etree.ElementTree as ETXML\nfrom pogobot.location import distance_in_meters, get_increments, get_neighbors, get_route, filtered_forts, append_elevation\n# import pgoapi.protos.POGOProtos.Enums_pb2 as RpcEnum\nfrom pogobot.poke_utils import pokemon_iv_percentage, get_inventory_data, get_pokemon_num, get_incubators_stat, incubators_stat_str, \\\n get_eggs_stat, hyperbolic_value\nfrom . import CANDY_NEEDED_TO_EVOLVE, INVENTORY_DICT, POKEBALLS\nfrom time import sleep\nfrom collections import defaultdict\n\nimport sys\nimport os.path\nimport platform\n\nfrom pgoapi import PGoApi\nfrom pgoapi import utilities as util\n\nMIN_SIMILAR_POKEMON = 1 # change this to keep more doubles if you have release duplicates set to ture\n\n\nclass PoGObot:\n\n def __init__(self, config, pokemon_names, start_pos):\n\n self.api = PGoApi()\n self.log = logging.getLogger(__name__)\n self._start_pos = start_pos\n self._posf = start_pos\n self._walk_count = 1\n self.first_fort = {}\n self.config = config\n self.evolved_pokemon_ids = []\n self.released_pokemon_ids = []\n self.player_level = 1\n self.GPX_lat = []\n self.GPX_lon = []\n self._pokeball_type = 1\n self.GMAPS_KEY = config.get(\"GMAPS_API_KEY\", \"\")\n self.MIN_KEEP_IV = config.get(\"MIN_KEEP_IV\", 0)\n self.KEEP_CP_OVER = config.get(\"KEEP_CP_OVER\", 0)\n self.RELEASE_DUPLICATES = config.get(\"RELEASE_DUPLICATE\", 0)\n self.DUPLICATE_CP_FORGIVENESS = config.get(\"DUPLICATE_CP_FORGIVENESS\", 0)\n self.MAX_BALL_TYPE = config.get(\"MAX_BALL_TYPE\", 0)\n self.SLOW_BUT_STEALTH = config.get(\"SLOW_BUT_STEALTH\", 0)\n self.AUTO_HATCHING = config.get(\"AUTO_HATCHING\", False)\n self.EVOLVE_POKEMON = config.get(\"EVOLVE_POKEMON\", [])\n self._req_method_list = []\n self._heartbeat_number = 0\n self.pokemon_names = pokemon_names\n self.pokeballs = [0, 0, 0, 0] # pokeball counts. set to 0 to force atleast one fort check before trying to capture pokemon\n self.map_cells = dict()\n self.min_item_counts = dict(\n ((getattr(Inventory, key), value) for key, value in config.get('MIN_ITEM_COUNTS', {}).iteritems())\n )\n\n def response_parser(self, res):\n if os.path.isfile(\"accounts/%s/Inventory.json\" % self.config['username']) and 'GET_INVENTORY' in res['responses']:\n with open(\"accounts/%s/Inventory.json\" % self.config['username'], \"w\") as file_to_write:\n file_to_write.write(json.dumps(res['responses'], indent=2))\n file_to_write.close()\n with open(\"accounts/%s/Inventory.json\" % self.config['username'], \"r\") as file_to_read:\n file = file_to_read.read()\n json_file = json.loads(file)\n if 'GET_PLAYER' in res['responses']:\n if os.path.isfile(\"accounts/%s/Player.json\" % self.config['username']):\n with open(\"accounts/%s/Player.json\" % self.config['username'], \"w\") as file_to_write:\n file_to_write.write(json.dumps(res['responses'], indent=2))\n file_to_write.close()\n player_data = res['responses'].get('GET_PLAYER', {}).get('player_data', {})\n inventory_items = json_file.get('GET_INVENTORY', {}).get('inventory_delta', {}).get('inventory_items', [])\n inventory_items_dict_list = map(lambda x: x.get('inventory_item_data', {}), inventory_items)\n player_stats = filter(lambda x: 'player_stats' in x, inventory_items_dict_list)[0].get('player_stats', {})\n self.player_level = int(player_stats.get('level', 1))\n currencies = player_data.get('currencies', [])\n currency_data = \",\".join(map(lambda x: \"{0}: {1}\".format(x.get('name', 'NA'), x.get('amount', 'NA')), currencies))\n self.log.info(\"\\n\\n Username: %s, Lvl: %s, XP: %s/%s \\n Currencies: %s \\n\", player_data.get('username', 'NA'), player_stats.get('level', 'NA'), player_stats.get('experience', 'NA'), player_stats.get('next_level_xp', 'NA'), currency_data)\n if 'GET_INVENTORY' in res['responses']:\n res['responses']['lat'] = self._posf[0]\n res['responses']['lng'] = self._posf[1]\n self.log.info(\"\\n\\nList of Pokemon:\\n\" + get_inventory_data(res, self.pokemon_names) + \"\\nTotal Pokemon count: \" + str(get_pokemon_num(res)) + \"\\n\\nEgg Hatching status: \" + incubators_stat_str(res) + \"\\n\")\n self.log.info(\"Cleaning up inventory\")\n self.cleanup_inventory(res['responses']['GET_INVENTORY']['inventory_delta']['inventory_items'])\n # new inventory data has just been saved, clearing evolved pokemons list\n self.evolved_pokemon_ids = []\n if 'GET_MAP_OBJECTS' in res['responses']:\n if os.path.isfile(\"accounts/%s/Map.json\" % self.config['username']):\n with open(\"accounts/%s/Map.json\" % self.config['username'], \"w\") as file_to_write:\n file_to_write.write(json.dumps(res['responses'], indent=2))\n file_to_write.close()\n return res\n\n def heartbeat(self):\n res = self.api.get_inventory()\n sleep(random.random() + 5)\n self.log.debug('Heartbeat dictionary: \\n\\r{}'.format(json.dumps(res, indent=2)))\n self.response_parser(res=res)\n if self.AUTO_HATCHING and self._heartbeat_number % 10 == 0:\n hatching_eggs_count = self.attempt_hatch_eggs(res=res)\n if hatching_eggs_count > 0:\n self.log.info(\"Start hatching %d eggs\", hatching_eggs_count)\n self.spin_near_fort()\n self._heartbeat_number += 1\n return res\n\n def walk_to(self, loc):\n self._walk_count += 1\n steps = get_route(self._posf, loc, self.GMAPS_KEY)\n for step in steps:\n for next_point in enumerate(get_increments(self._posf, step, self.config.get(\"STEP_SIZE\", 70))):\n to_point = append_elevation(next_point[1][0], next_point[1][1], self.GMAPS_KEY)\n self.api.set_position(*to_point)\n # make sure we have atleast 1 ball\n if sum(self.pokeballs) > 0 and self._walk_count % 7:\n while self.catch_near_pokemon():\n if self.SLOW_BUT_STEALTH:\n sleep(1 * random.random() + 1) # If you want to make it faster, delete this line... would not recommend though\n\n return\n\n # this is in charge of spinning a pokestop\n def spin_near_fort(self):\n response = self.nearby_map_objects()\n sleep(2 * random.random() + 5)\n self.response_parser(response)\n map_cells = response.get('responses', {}).get('GET_MAP_OBJECTS', {}).get('map_cells', {})\n forts = PoGObot.from_iterable_to_chain(lambda c: c.get('forts', []), map_cells)\n # check if there are GPX data\n if len(self.GPX_lat) == len(self.GPX_lon) and len(self.GPX_lat) > 0:\n if self._walk_count < len(self.GPX_lon):\n self.set_position(self.GPX_lat[self._walk_count], self.GPX_lon[self._walk_count], 20)\n self._walk_count += 1\n available_forts = filtered_forts((self.GPX_lat[self._walk_count], self.GPX_lon[self._walk_count]), forts)\n sleep(1 * random.random() + 1)\n for fort in available_forts:\n if fort[1] < 10:\n request = self.api.create_request()\n request.fort_search(fort_id=fort['id'], fort_latitude=fort['latitude'], fort_longitude=fort['longitude'], player_latitude=self.GPX_lat[self._walk_count], player_longitude=self.GPX_lon[self._walk_count])\n res = request.call()['responses']['FORT_SEARCH']\n if 'lure_info' in fort:\n encounter_id = fort['lure_info']['encounter_id']\n fort_id = fort['lure_info']['fort_id']\n resp = self.api.disk_encounter(encounter_id=encounter_id, fort_id=fort_id, player_latitude=self.GPX_lat[self._walk_count], player_longitude=self.GPX_lon[self._walk_count]).call()['responses']['DISK_ENCOUNTER']\n if self.pokeballs[1] > 9 and self.pokeballs[2] > 4 and self.pokeballs[3] > 4:\n self.disk_encounter_pokemon(fort['lure_info'])\n else:\n self.walk_count = 0\n self.spin_near_fort\n # without GPX data bot wil go from pokestop to pokestop\n else:\n if self._start_pos and self._walk_count % self.config.get(\"RETURN_START_INTERVAL\") == 0:\n destinations = filtered_forts(self._start_pos, forts)\n else:\n destinations = filtered_forts(self._posf, forts)\n if len(destinations) > 0:\n # select a random pokestop and go there\n destination_num = random.randint(0, min(15, len(destinations) - 1))\n fort = destinations[destination_num]\n if self._walk_count == 1:\n self.first_fort = fort\n if self._start_pos and self._walk_count % self.config.get(\"RETURN_START_INTERVAL\") == 0:\n fort = self.first_fort\n self.log.info(\"Walking to fort at %s,%s\", fort['latitude'], fort['longitude'])\n self.walk_to((fort['latitude'], fort['longitude']))\n self.log.info(\"Arrived at fort at %s,%s\", fort['latitude'], fort['longitude'])\n if self.SLOW_BUT_STEALTH:\n sleep(2 * random.random() + 1)\n # when arrived, get the new position and spin the pokestop\n self._posf = self.api.get_position()\n position = self._posf\n request = self.api.create_request()\n request.fort_search(fort_id=fort['id'], fort_latitude=fort['latitude'], fort_longitude=fort['longitude'], player_latitude=position[0], player_longitude=position[1])\n res = request.call()['responses']['FORT_SEARCH']\n if 'items_awarded' in res:\n self.log.info(\"Fort spinned!\")\n # now i fully understand java's switch/case\n elif res['result'] == 3:\n self.log.info(\"Fort already spinned (cooling down)!\")\n elif res['result'] == 2:\n self.log.info(\"Fort too distant (who the fuck has coded this shitty bot?)!\")\n else:\n self.log.info(\"Fort not spinned succesfully!\")\n if 'lure_info' in fort:\n encounter_id = fort['lure_info']['encounter_id']\n fort_id = fort['lure_info']['fort_id']\n request_2 = self.api.create_request()\n request_2.disk_encounter(encounter_id=encounter_id, fort_id=fort_id, player_latitude=position[0], player_longitude=position[1])\n resp = request_2.call()['responses']['DISK_ENCOUNTER']\n self.log.debug('Encounter response is: %s', resp)\n if sum(self.pokeballs) > 10:\n self.disk_encounter_pokemon(fort['lure_info'])\n return True\n else:\n self.log.error(\"No fort to walk to!\")\n return False\n\n # this will catch any nearby pokemon\n def catch_near_pokemon(self):\n map_cells = self.nearby_map_objects().get('responses', {}).get('GET_MAP_OBJECTS', {}).get('map_cells', {})\n pokemons = PoGObot.from_iterable_to_chain(lambda c: c.get('catchable_pokemons', []), map_cells)\n sleep(3 * random.random() + 5)\n # cache map cells for api\n self.map_cells = map_cells\n # catch first pokemon:\n origin = (self._posf[0], self._posf[1])\n pokemon_distances = [(pokemon, distance_in_meters(origin, (pokemon['latitude'], pokemon['longitude']))) for pokemon in pokemons]\n self.log.debug(\"Nearby pokemon: : %s\", pokemon_distances)\n for pokemon_distance in pokemon_distances:\n target = pokemon_distance\n self.log.debug(\"Catching pokemon: : %s, distance: %f meters\", target[0], target[1])\n self.log.info(\"Catching Pokemon: %s\", self.pokemon_names[str(target[0]['pokemon_id'])])\n return self.encounter_pokemon(target[0])\n return False\n\n def nearby_map_objects(self):\n self._posf = self.api.get_position()\n cell_ids = util.get_cell_ids(lat=self._posf[0], long=self._posf[1], radius=500)\n timestamps = [0, ] * len(cell_ids)\n response = self.api.get_map_objects(latitude=self._posf[0], longitude=self._posf[1], since_timestamp_ms=timestamps, cell_id=cell_ids)\n self.response_parser(res=response)\n return response\n\n def attempt_catch(self, encounter_id, spawn_point_id, ball_type):\n r = self.api.catch_pokemon(\n normalized_reticle_size=random.triangular(1, 2, 1.8),\n pokeball=ball_type,\n spin_modifier=random.triangular(0, 1, 0.8),\n hit_pokemon=True,\n normalized_hit_position=1,\n encounter_id=encounter_id,\n spawn_point_id=spawn_point_id,\n )['responses']['CATCH_POKEMON']\n self.log.info(\"Throwing pokeball type: %s\", POKEBALLS[ball_type - 1]) # list the pokeball that was thrown\n if \"status\" in r:\n self.log.debug(\"Status: %d\", r['status'])\n return r\n\n def cleanup_inventory(self, inventory_items=None):\n if not inventory_items:\n inventory_items = self.api.get_inventory().call()['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']\n sleep(3 * random.random() + 5)\n all_actual_items = [xiq['inventory_item_data'][\"item\"] for xiq in inventory_items if \"item\" in xiq['inventory_item_data']]\n all_actual_item_str = \"\\n\\nList of items:\\n\\n\"\n all_actual_item_count = 0\n all_actual_items = sorted([x for x in all_actual_items if \"count\" in x], key=lambda x: x[\"item_id\"])\n for xiq in all_actual_items:\n if 1 <= xiq[\"item_id\"] <= 4: # save counts of pokeballs\n self.pokeballs[xiq[\"item_id\"]] = xiq[\"count\"]\n true_item_name = INVENTORY_DICT[xiq[\"item_id\"]]\n all_actual_item_str += \"Item_ID \" + str(xiq[\"item_id\"]) + \"\\titem count \" + str(xiq[\"count\"]) + \"\\t(\" + true_item_name + \")\\n\"\n all_actual_item_count += xiq[\"count\"]\n all_actual_item_str += \"\\nTotal item count: \" + str(all_actual_item_count) + \"\\n\"\n self.log.info(all_actual_item_str)\n\n caught_pokemon = defaultdict(list)\n for inventory_item in inventory_items:\n if \"pokemon_data\" in inventory_item['inventory_item_data']:\n # This code block checks to see if the inventory item is an item or pokemon\n pokemon = inventory_item['inventory_item_data']['pokemon_data']\n if 'cp' in pokemon:\n caught_pokemon[pokemon['pokemon_id']].append(pokemon)\n elif \"item\" in inventory_item['inventory_item_data']:\n item = inventory_item['inventory_item_data']['item'] # Check to see if your holding too many items and recycles them\n if item['item_id'] in self.min_item_counts and \"count\" in item and item['count'] > self.min_item_counts[item['item_id']]:\n recycle_count = item['count'] - self.min_item_counts[item['item_id']]\n self.log.info(\"Recycling {0}, item count {1}\".format(INVENTORY_DICT[item['item_id']], recycle_count))\n self.api.recycle_inventory_item(item_id=item['item_id'], count=recycle_count)\n\n for pokemons in caught_pokemon.values():\n if len(pokemons) > MIN_SIMILAR_POKEMON: # if you have more than 1 of the same amount of pokemon do this\n pokemons = sorted(pokemons, lambda x, y: cmp(x['cp'], y['cp']), reverse=True)\n for pokemon in pokemons:\n if pokemon['pokemon_id'] in self.EVOLVE_POKEMON and not pokemon['id'] in self.evolved_pokemon_ids and \"favorite\" not in pokemon:\n for inventory_item in inventory_items:\n if \"candy\" in inventory_item['inventory_item_data'] and (inventory_item['inventory_item_data']['candy']['family_id'] == pokemon['pokemon_id'] or inventory_item['inventory_item_data']['candy']['family_id'] == (pokemon['pokemon_id'] - 1)) and inventory_item['inventory_item_data']['candy'].get('candy', 0) > CANDY_NEEDED_TO_EVOLVE[pokemon['pokemon_id']] and pokemon['pokemon_id'] not in self.evolved_pokemon_ids:\n self.log.info(\"Evolving pokemon: %s\", self.pokemon_names[str(pokemon['pokemon_id'])])\n self.api.evolve_pokemon(pokemon_id=pokemon['id']) # quick press ctrl + c to stop the evolution\n self.evolved_pokemon_ids.append(pokemon['pokemon_id'])\n if self.SLOW_BUT_STEALTH:\n sleep(3 * random.random() + 28)\n excess_pokemons = defaultdict(list)\n for pokemons in caught_pokemon.values():\n pokemons = sorted(pokemons, lambda x, y: cmp(x['cp'], y['cp']), reverse=True)\n for pokemon in pokemons:\n if \"favorite\" not in pokemon and pokemon['cp'] < self.KEEP_CP_OVER and pokemon_iv_percentage(pokemon) < self.MIN_KEEP_IV and pokemon['pokemon_id'] not in self.evolved_pokemon_ids and (pokemon['pokemon_id'] + 1) not in self.evolved_pokemon_ids and not pokemon['id'] in self.evolved_pokemon_ids:\n excess_pokemons[pokemon['pokemon_id']].append(pokemon)\n for pokemons_id in excess_pokemons.keys():\n pokemons = excess_pokemons.pop(pokemons_id)\n top_CP_pokemon = caught_pokemon[pokemons_id][0]\n top_CP_pkmn_hyp = hyperbolic_value(top_CP_pokemon, self.player_level)\n if self.RELEASE_DUPLICATES:\n for pokemon in pokemons:\n pkmn_hyp = hyperbolic_value(pokemon, self.player_level)\n if pkmn_hyp > top_CP_pkmn_hyp:\n if top_CP_pokemon['cp'] * self.DUPLICATE_CP_FORGIVENESS < pokemon['cp'] and top_CP_pokemon['cp'] < self.KEEP_CP_OVER:\n self.release_pokemon(top_CP_pokemon)\n top_CP_pokemon = pokemon\n top_CP_pkmn_hyp = pkmn_hyp\n elif top_CP_pokemon['cp'] * self.DUPLICATE_CP_FORGIVENESS > pokemon['cp']:\n self.release_pokemon(pokemon)\n return\n\n def disk_encounter_pokemon(self, lureinfo):\n try:\n encounter_id = lureinfo['encounter_id']\n fort_id = lureinfo['fort_id']\n position = self._posf\n resp = self.api.disk_encounter(encounter_id=encounter_id, fort_id=fort_id, player_latitude=position[0], player_longitude=position[1]).call()['responses']['DISK_ENCOUNTER']\n sleep(2 * random.random() + 1)\n if resp['result'] == 1:\n capture_status = -1\n self._pokeball_type = 1\n while capture_status != 0 and capture_status != 3:\n for balls in range(len(self.pokeballs)):\n self._pokeball_type = balls\n if self.pokeballs[balls] > 0:\n catch_attempt = self.attempt_catch(encounter_id, fort_id, self._pokeball_type)\n self.pokeballs[self._pokeball_type] -= 1\n capture_status = catch_attempt['status']\n if capture_status == 1:\n self.log.debug(\"Caught Pokemon: : %s\", catch_attempt)\n self.log.info(\"Caught Pokemon: %s\", self.pokemon_names[str(resp['pokemon_data']['pokemon_id'])])\n self._pokeball_type = 1\n if self.SLOW_BUT_STEALTH:\n sleep(3 * random.random() + 2)\n else:\n sleep(2)\n return catch_attempt\n elif capture_status == 2:\n self.log.info(\"Pokemon %s is too wild\", self.pokemon_names[str(resp['pokemon_data']['pokemon_id'])])\n if self._pokeball_type < self.MAX_BALL_TYPE:\n self._pokeball_type += 1\n if self.SLOW_BUT_STEALTH:\n sleep(3 * random.random() + 5)\n elif capture_status == 3:\n self.log.debug(\"Failed Catch: : %s\", catch_attempt)\n self.log.info(\"Failed to Catch Pokemon: %s\", self.pokemon_names[str(resp['pokemon_data']['pokemon_id'])])\n self._pokeball_type = 1\n if self.SLOW_BUT_STEALTH:\n sleep(3 * random.random() + 2)\n else:\n sleep(1)\n return False\n except Exception as e:\n self.log.error(\"Error in disk encounter %s\", e)\n self._pokeball_type = 1\n return False\n\n def encounter_pokemon(self, pokemon):\n encounter_id = pokemon['encounter_id']\n spawn_point_id = pokemon['spawn_point_id']\n position = self._posf\n # contact the servers\n request = self.api.create_request()\n request.encounter(\n encounter_id=encounter_id,\n spawn_point_id=spawn_point_id,\n player_latitude=position[0],\n player_longitude=position[1]\n )\n response = request.call()\n encounter = response['responses']['ENCOUNTER']\n # this cade catches pokemon\n self.log.debug(\"Started Encounter: %s\", encounter)\n if encounter['status'] == 1:\n capture_status = -1\n self._pokeball_type = 1 # start with a pokeball\n i = 0\n while capture_status != 0 and capture_status != 3:\n i += 1 \n for balls in range(len(self.pokeballs)): # try with each ball type starting with weakest\n self._pokeball_type = balls\n if self.pokeballs[balls] > 0: # if you have less then 1 ball do not attempt to catch em all\n if i % 3 == 0:\n self.use_item_razz_berry(encounter_id, spawn_point_id)\n catch_attempt = self.attempt_catch(encounter_id, spawn_point_id, self._pokeball_type) # actual catching code\n self.pokeballs[self._pokeball_type] -= 1 # lowers the thrown ball code\n capture_status = catch_attempt['status']\n if capture_status == 1:\n self.log.debug(\"Caught Pokemon: : %s\", catch_attempt) # you did it\n self.log.info(\"Caught Pokemon: %s\", self.pokemon_names[str(pokemon['pokemon_id'])])\n self._pokeball_type = 1\n if self.SLOW_BUT_STEALTH:\n sleep(3 * random.random() + 10)\n else:\n sleep(2)\n return catch_attempt\n elif capture_status == 2:\n self.log.info(\"Pokemon %s is too wild\", self.pokemon_names[str(pokemon['pokemon_id'])])\n if self._pokeball_type < self.MAX_BALL_TYPE:\n self._pokeball_type += 1 # try with a stronger ball\n if self.SLOW_BUT_STEALTH:\n sleep(3 * random.random() + 5)\n elif capture_status == 3:\n self.log.debug(\"Failed Catch: : %s\", catch_attempt) # potential soft ban or just a run away\n self.log.info(\"Failed to Catch Pokemon: %s\", self.pokemon_names[str(pokemon['pokemon_id'])])\n self._pokeball_type = 1\n sleep(2 * random.random() + 2)\n if self.SLOW_BUT_STEALTH:\n sleep(3 * random.random() + 2)\n else:\n sleep(2)\n return False\n\n def use_item_razz_berry(self, encounter_id, spawn_point_id):\n request = self.api.create_request()\n request.use_item_capture(\n item_id = 701,\n encounter_id = encounter_id,\n spawn_point_id = spawn_point_id\n )\n response = request.call()\n if response['responses']['USE_ITEM_CAPTURE']['success'] == True:\n self.log.info(\"Used a Razz Berry\")\n else:\n self.log.info(\"Something went wrong when trying to use a Razz Berry\")\n sleep(2 * random.random() + 1)\n return response\n\n\n def login(self, provider, username, password, cached=False):\n\n # set player position on the earth\n self.api.set_position(*self._start_pos)\n\n # new authentication initialitation\n self.api.set_authentication(provider=provider, username=username, password=password)\n\n # provide the path for your encrypt dll\n encryption_path = self.get_encryption_lib_path()\n self.api.activate_signature(encryption_path)\n\n # try to log in like real app\n response = self.api.app_simulation_login()\n\n # update Inventory\n self.response_parser(res=response)\n\n sleep(5 * random.random() + 5)\n\n return True\n\n def get_encryption_lib_path(self):\n lib_path = \"\"\n # win32 doesn't mean necessarily 32 bits\n if sys.platform == \"win32\":\n if platform.architecture()[0] == '64bit':\n lib_path = os.path.join(os.path.dirname(__file__), \"../pgoapi/libs/libencrypt-windows-64.dll\")\n else:\n lib_path = os.path.join(os.path.dirname(__file__), \"../pgoapi/libs/libencrypt-windows-32.dll\")\n\n elif sys.platform == \"darwin\":\n lib_path = os.path.join(os.path.dirname(__file__), \"../pgoapi/libs/libencrypt-osx-64.so\")\n\n elif os.uname()[4].startswith(\"arm\") and platform.architecture()[0] == '32bit':\n lib_path = os.path.join(os.path.dirname(__file__), \"../pgoapi/libs/libencrypt-linux-arm-32.so\")\n\n elif sys.platform.startswith('linux'):\n if platform.architecture()[0] == '64bit':\n lib_path = os.path.join(os.path.dirname(__file__), \"../pgoapi/libs/libencrypt-linux-x86-64.so\")\n else:\n lib_path = os.path.join(os.path.dirname(__file__), \"../pgoapi/libs/libencrypt-linux-x86-32.so\")\n\n else:\n err = \"Unexpected/unsupported platform '{}'\".format(sys.platform)\n self.log.info(err)\n raise Exception(err)\n\n if not os.path.isfile(lib_path):\n err = \"Could not find {} encryption library {}\".format(sys.platform, lib_path)\n self.log.info(err)\n raise Exception(err)\n\n return lib_path\n\n def set_GPX(self):\n if len(self.GPX_lat) == 0 and len(self.GPX_lon) == 0:\n try:\n tree = ETXML.parse('GPX.xml')\n root = tree.getroot()\n trk = root.getiterator()\n point_number = len(trk) - 1\n self.log.info('\\n\\n' + str(point_number) + ' points found' + '\\nTrak location: ' + trk[2].text + '\\n')\n for i in range(5, point_number):\n if str(trk[i].get('lat')) != str(None):\n self.GPX_lat.append(float(trk[i].get('lat')))\n self.GPX_lon.append(float(trk[i].get('lon')))\n return True\n except:\n self.log.debug('GPX data not found or some error has occured')\n return False\n\n def attempt_hatch_eggs(self, res=None):\n if not res:\n res = self.api.get_inventory().call()\n hatching_incubator_list, empty_incubator_list = get_incubators_stat(res)\n hatching_eggs, immature_eggs = get_eggs_stat(res)\n hatching_eggs_count = 0\n for immature_egg in immature_eggs:\n egg_id = immature_egg['pokemon_data']['id']\n if len(empty_incubator_list) > 0:\n # Always use first incubator.\n incubator_index = 0\n incubator_id = empty_incubator_list[incubator_index]['id']\n uses_remaining = empty_incubator_list[incubator_index].get('uses_remaining', 0) - 1\n if self.hatch_egg(incubator_id, egg_id):\n hatching_eggs_count += 1\n # Update incubator manually to save api call().\n empty_incubator_list[incubator_index]['uses_remaining'] = uses_remaining\n if uses_remaining <= 0:\n del(empty_incubator_list[incubator_index])\n return hatching_eggs_count\n\n def hatch_egg(self, incubator_id, egg_id):\n # contact the servers\n request = self.api.create_request()\n request.use_item_egg_incubator(item_id=incubator_id, pokemon_id=egg_id)\n response = request.call()\n sleep(random.random() + 1)\n result = response.get('responses', {}).get('USE_ITEM_EGG_INCUBATOR', {})\n if len(result) == 0:\n return False\n if \"result\" in result:\n self.log.debug(\"Result: %d\", result['result'])\n return result.get('result', 0) == 1\n\n def release_pokemon(self, pokemon):\n atgym = 'deployed_fort_id' in pokemon\n if atgym:\n self.log.info(\"Pokemon %s CP: %s not released because at gym\", self.pokemon_names[str(pokemon['pokemon_id'])], pokemon['cp'])\n if not atgym and not pokemon['id'] in self.released_pokemon_ids:\n self.log.debug(\"Releasing pokemon: %s\", pokemon)\n self.log.info(\"Releasing pokemon: %s IV: %s CP: %s\", self.pokemon_names[str(pokemon['pokemon_id'])], pokemon_iv_percentage(pokemon), pokemon['cp'])\n self.api.release_pokemon(pokemon_id=pokemon[\"id\"])\n self.released_pokemon_ids.append(pokemon['id'])\n sleep(2 * random.random() + 5)\n return True\n\n def main_loop(self):\n self.set_GPX()\n while True:\n try:\n self.heartbeat()\n sleep(1 * random.random() + 1) # If you want to make it faster, delete this line... would not recommend though\n if sum(self.pokeballs) > 0: # if you do not have any balls skip pokemon catching\n while self.catch_near_pokemon():\n sleep(1 * random.random() + 2) # If you want to make it faster, delete this line... would not recommend though\n else:\n self.log.info(\"Less than 1 Poke Balls: Entering pokestops only\")\n self.spin_near_fort() # check local pokestop\n except ServerSideRequestThrottlingException:\n self.log.info(\"Too frequent requests, slow down man!\")\n for i in range(5, 0, -1):\n self.log.info(\"Wait %s more seconds befrore continuing\", str(i))\n sleep(1)\n self.main_loop()\n\n @staticmethod\n def from_iterable_to_chain(f, items):\n return chain.from_iterable(imap(f, items))\n","sub_path":"pogobot/pogobot.py","file_name":"pogobot.py","file_ext":"py","file_size_in_byte":32016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"527000829","text":"import turtle\r\nt = turtle.Pen()\r\n\r\ndef square(side):\r\n for i in range(0,4):\r\n t.forward(side)\r\n t.left(90)\r\n\r\nsquare(10)\r\nsquare(50)\r\nsquare(100)\r\nsquare(150)\r\nsquare(250)\r\n\r\ndef circle(radius):\r\n t.circle(radius)","sub_path":"turtle_module_functions.py","file_name":"turtle_module_functions.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"113084978","text":"import mysql.connector\nimport time\nimport datetime\nimport logging\nfrom subprocess import check_output\nfrom varianti import VariantI\nfrom chipreader import *\nfrom connect import DBConnect\n\ndef readin(chip,reader,offset=0):\n orig_file = reader.fname\n line_count = int(check_output([\"wc\", \"-l\", orig_file]).split()[0])\n variant_count = 0\n start = time.time()\n logging.info('file: %s',(orig_file))\n fvper = int(0.05 * line_count)\n logline = fvper\n curs = chip.getCursor()\n build = \"38\"\n if reader.col_GRCh37_pos:\n build = \"37\"\n new_ds = reader.datasource\n prev_id = \"first\"\n for line in reader.linebyline(): \n variant_count+=1\n if logline == variant_count:\n now = int(time.time() - start)\n logging.info(\"approximately %.2f%% parsed after %s seconds, %s variants\" % ((variant_count/line_count*100),now,variant_count-1))\n logline += fvper\n if variant_count < offset:\n continue\n try:\n dic = reader.proc_line(line)\n except IndexError:\n statement = \"IndexError when trying to process line (reader.proc_line).\\nProbably this line doesn't have all expected columns:\\n\"+ \"--\".join(line) + \"\\nskipping this line\"\n logging.warning(statement)\n continue\n except ValueError as ve:\n statement = \"ValueError thrown (reader.proc_line).\\nprobably from int conversion of ukbb style '---' reference positions. Last variation parsed was {}. Error: {}\\nskipping this variant\".format(prev_id,ve)\n logging.warning(statement)\n continue\n except:\n logging.error('uknown error at reader.proc_line(line)')\n ch.close()\n raise\n else:\n if 'uid' in dic:\n prev_id = dic['uid']\n else:\n prev_id = dic['snp_id']\n try:\n variant = VariantI(curs,dic,new_ds,build)\n variant.log_flank()\n variant.log_probe()\n variant.log_coord()\n except mysql.connector.errors.DataError as de:\n mess = \"sql DataError thrown, suspected long chromosome name ({}). skipping {}: {}\".format(dic['chr'],prev_id,de)\n logging.warning(mess)\n chip.rollback()\n except mysql.connector.errors.IntegrityError as ine:\n mess = \"sql IntegrityError thrown.\\ncould be from duplicate entry of primary key into flank table.\\nskipping {}:{}\".format(prev_id,ine)\n logging.warning(mess)\n chip.rollback()\n except Exception as e:\n mess = \"unknown error with {}\".format(prev_id)\n logging.error(mess)\n print(mess)\n chip.rollback()\n ch.close()\n raise\n else:\n chip.commit()\n now = int(time.time() - start)\n logging.info('Finished: %s seconds passed, %s variants' % (now,(variant_count)))\n\n#readers = [InfCorEx24v1a1('/mnt/HPC/processed/mr875/tasks/dsp367/corev1_0_rsEg.csv'),\n# InfEx24v1a2('/mnt/HPC/processed/mr875/tasks/dsp367/InfiniumExome-24v1-0_A2_Eg.csv'),\n# Dil('/mnt/HPC/processed/mr875/tasks/dsp367/DIL_annotation_Eg.csv'),\n# InfCorEx24v1_1a1('/mnt/HPC/processed/mr875/tasks/dsp367/corev1_1_rsEg.csv'),\n# AxiUKBBAffy2_1('/mnt/HPC/processed/mr875/tasks/dsp367/AxiUKBBAffy2_1_38_Eg.csv'),\n# InfImmun24v2('/mnt/HPC/processed/mr875/tasks/dsp367/infimmun_Eg.csv'),\n# AxiUKBB_WCSG('/mnt/HPC/processed/mr875/tasks/dsp367/AxiUKBB_WCSG_Eg.csv'),\n# InfImmun24v2grc38('/mnt/HPC/processed/mr875/tasks/dsp367/infimmung38_Eg.csv'), \n# InfCorEx24v1_1grc38('/mnt/HPC/processed/mr875/tasks/dsp367/infincorex38_Eg.csv'),\n# InfOmniExpr('/mnt/HPC/processed/mr875/tasks/dsp367/infomniexpr_Eg.csv'),\n# InfOmniExpr38('/mnt/HPC/processed/mr875/tasks/dsp367/infomniexpr38_Eg.csv')]\n#readers = [MSExome('/mnt/HPC/processed/mr875/tasks/dsp367/msexome_Eg.csv')]\n# debug:\n#readers = [AxiUKBBAffy2_1('/mnt/HPC/processed/mr875/tasks/dsp367/AxiUKBBAffy2_1_38_Eg.csv')]\n\n#readers = [InfCorEx24v1a1('/mnt/HPC/processed/Metadata/variant_annotation/CoreExomev1.0_annotation.csv'),\n# InfEx24v1a2('/mnt/HPC/processed/Metadata/variant_annotation_grch38/InfiniumExome-24v1-0_A2.csv'),\n# InfCorEx24v1_1a1('/mnt/HPC/processed/Metadata/variant_annotation/CoreExomev1.1_annotation.csv'),\n# AxiUKBBAffy2_1('/mnt/HPC/processed/mr875/tasks/dsp367/Axiom_UKBBv2_1.na36.r1.a1.annot.csv'),\n# AxiUKBB_WCSG('/mnt/HPC/processed/Metadata/variant_annotation/Axiom_UKB_WCSG.na35.annot-2015.csv'),\n# InfImmun24v2('/mnt/HPC/processed/Metadata/variant_annotation/InfiniumImmunoArray_annotation.csv'),\n# InfImmun24v2grc38('/mnt/HPC/processed/Metadata/variant_annotation_grch38/InfiniumImmunoArray-24v2-0_A2.csv'),\n# InfCorEx24v1_1grc38('/mnt/HPC/processed/Metadata/variant_annotation_grch38/InfiniumCoreExome-24v1-1_A2.csv'),\n# InfOmniExpr('/mnt/HPC/processed/Metadata/variant_annotation/OmniExpress_annotation.csv'),\n# InfOmniExpr38('/mnt/HPC/processed/Metadata/variant_annotation_grch38/InfiniumOmniExpress-24v1-2_A2.csv'),\n# MSExome('/mnt/HPC/processed/Metadata/variant_annotation/MSExome_annotation.csv')]\n\n#readers = [Dil('/mnt/HPC/processed/Metadata/variant_annotation/DIL_annotation.csv')]\n\nreaders = [UKBBv21_2021('ukbbv2_1_Annot_2021.csv')]\n\nch = DBConnect(\"cc3\")\nlogfile = datetime.datetime.now().strftime(\"%a_%d%b_%I%p.log\")\nlogging.basicConfig(filename=logfile, level=logging.INFO)\noffsetclass = \"\" # \"AxiUKBB_WCSG\" #pick a source class from which you don't want to parse from the beginning\noffsetvariant = 249200 # variant 1 = the first line under the header. so if offsetvariant = 3 then the 3rd variant will be parsed\nfor source in readers:\n if type(source).__name__ == offsetclass:\n readin(ch,source,offsetvariant)\n else:\n readin(ch,source)\nch.close()\n\n\n","sub_path":"run_connect.py","file_name":"run_connect.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"414575834","text":"import numpy as np\nimport torch.nn.functional as F\nfrom torch import nn\nimport torch\nfrom torch.autograd import Variable\n\n\nclass SGVLB(nn.Module):\n def __init__(self, net, train_size):\n super(SGVLB, self).__init__()\n self.train_size = train_size\n self.net = net\n\n def forward(self, input, target, kl_weight=1.0):\n assert not target.requires_grad\n kl = 0.0\n for module in self.net.modules():\n if hasattr(module, 'kl_reg'):\n kl = kl + module.kl_reg()\n return F.cross_entropy(input, target, reduction='elementwise_mean') * self.train_size + kl_weight * kl\n\n def get_kl(self):\n kl = 0.0\n for module in self.net.modules():\n if hasattr(module, 'kl_reg'):\n kl = kl + module.kl_reg()\n return kl\n\n\ndef lr_linear(epoch_num, decay_start, total_epochs, start_value):\n if epoch_num < decay_start:\n return start_value\n return start_value*float(total_epochs-epoch_num)/float(total_epochs-decay_start)\n\n\ndef correct(output, target, topk=(1,)):\n \"\"\"Computes the correct@k for the specified values of k\"\"\"\n maxk = max(topk)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t().type_as(target)\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0).item()\n res.append(correct_k)\n return res\n\ndef logit2acc(outputs, targets):\n corr = correct(outputs, targets)\n return corr[0] / targets.shape[0]\n\n\ndef kl_ard(log_alpha):\n return 0.5 * torch.sum(torch.log1p(torch.exp(-log_alpha)))\n\n\ndef kl_loguni(log_alpha):\n k1, k2, k3 = 0.63576, 1.8732, 1.48695\n C = -k1\n mdkl = k1 * torch.sigmoid(k2 + k3 * log_alpha) - 0.5 * torch.log1p(torch.exp(-log_alpha)) + C\n kl = -torch.sum(mdkl)\n return kl\n","sub_path":"variance-networks-pytorch/core/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"240018805","text":"# Predict 452-55: Nature & Environment Team \n# Modified from https://github.com/Ayima/sitemap-visualization-tool\n# Visualize a list of URLs by site path.\n# This script reads in the sitemap_layers.csv file created by the\n# categorize_urls.py script and builds a graph visualization using Graphviz.\n\n\nfrom __future__ import print_function\n\n\n# Set global variables\ngraph_depth = 3 # Number of layers deep to plot categorization\nlimit = 50 # Maximum number of nodes for a branch\ntitle = '' # Graph title\nstyle = 'light' # Graph style, can be \"light\" or \"dark\"\nsize = '8,5' # Size of rendered PDF graph\n\n\n# Import external library dependencies\nimport pandas as pd\nimport graphviz\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--depth', type=int, default=graph_depth,\n help='Number of layers deep to plot categorization')\nparser.add_argument('--limit', type=int, default=limit,\n help='Maximum number of nodes for a branch')\nparser.add_argument('--title', type=str, default=title,\n help='Graph title')\nparser.add_argument('--style', type=str, default=style,\n help='Graph style, can be \"light\" or \"dark\"')\nparser.add_argument('--size', type=str, default=size,\n help='Size of rendered PDF graph')\nargs = parser.parse_args()\n\n# Update variables with arguments if included\ngraph_depth = args.depth\nlimit = args.limit\ntitle = args.title\nstyle = args.style\nsize = args.size\n\n# Main script functions\ndef make_sitemap_graph(df, layers=3, limit=50, size='8,5'):\n ''' Make a sitemap graph up to a specified layer depth.\n\n sitemap_layers : DataFrame\n The dataframe created by the peel_layers function\n containing sitemap information.\n\n layers : int\n Maximum depth to plot.\n\n limit : int\n The maximum number node edge connections. Good to set this\n low for visualizing deep into site maps.\n '''\n # Check to make sure we are not trying to plot too many layers\n if layers > len(df) - 1:\n layers = len(df)-1\n print('There are only %d layers available to plot, setting layers=%d'\n % (layers, layers))\n\n # Initialize graph\n f = graphviz.Digraph('sitemap', filename='sitemap_graph_%d_layer' % layers)\n f.body.extend(['rankdir=LR', 'size=\"%s\"' % size])\n\n def add_branch(f, names, vals, limit, connect_to=''):\n ''' Adds a set of nodes and edges to nodes on the previous layer. '''\n\n # Get the currently existing node names\n node_names = [item.split('\"')[1] for item in f.body if 'label' in item]\n\n # Only add a new branch it it will connect to a previously created node\n if connect_to:\n if connect_to in node_names:\n for name, val in list(zip(names, vals))[:limit]:\n f.node(name='%s-%s' % (connect_to, name), label=name)\n f.edge(connect_to, '%s-%s' % (connect_to, name), label='{:,}'.format(val))\n\n\n f.attr('node', shape='rectangle') # Plot nodes as rectangles\n\n # Add the first layer of nodes\n for name, counts in df.groupby(['0'])['counts'].sum().reset_index()\\\n .sort_values(['counts'], ascending=False).values:\n f.node(name=name, label='{} ({:,})'.format(name, counts))\n\n if layers == 0:\n return f\n\n f.attr('node', shape='oval') # Plot nodes as ovals\n f.graph_attr.update()\n\n # Loop over each layer adding nodes and edges to prior nodes\n for i in range(1, layers+1):\n cols = [str(i_) for i_ in range(i)]\n nodes = df[cols].drop_duplicates().values\n for j, k in enumerate(nodes):\n\n # Compute the mask to select correct data\n mask = True\n for j_, ki in enumerate(k):\n mask &= df[str(j_)] == ki\n\n # Select the data then count branch size, sort, and truncate\n data = df[mask].groupby([str(i)])['counts'].sum()\\\n .reset_index().sort_values(['counts'], ascending=False)\n\n # Add to the graph\n add_branch(f,\n names=data[str(i)].values,\n vals=data['counts'].values,\n limit=limit,\n connect_to='-'.join(['%s']*i) % tuple(k))\n\n print(('Built graph up to node %d / %d in layer %d' % (j, len(nodes), i))\\\n .ljust(50), end='\\r')\n\n return f\n\n\ndef apply_style(f, style, title=''):\n ''' Apply the style and add a title if desired. More styling options are\n documented here: http://www.graphviz.org/doc/info/attrs.html#d:style\n\n f : graphviz.dot.Digraph\n The graph object as created by graphviz.\n\n style : str\n Available styles: 'light', 'dark'\n\n title : str\n Optional title placed at the bottom of the graph.\n '''\n\n dark_style = {\n 'graph': {\n 'label': title,\n 'bgcolor': '#3a3a3a',\n 'fontname': 'Helvetica',\n 'fontsize': '18',\n 'fontcolor': 'white',\n },\n 'nodes': {\n 'style': 'filled',\n 'color': 'white',\n 'fillcolor': 'black',\n 'fontname': 'Helvetica',\n 'fontsize': '14',\n 'fontcolor': 'white',\n },\n 'edges': {\n 'color': 'white',\n 'arrowhead': 'open',\n 'fontname': 'Helvetica',\n 'fontsize': '12',\n 'fontcolor': 'white',\n }\n }\n\n light_style = {\n 'graph': {\n 'label': title,\n 'fontname': 'Helvetica',\n 'fontsize': '18',\n 'fontcolor': 'black',\n },\n 'nodes': {\n 'style': 'filled',\n 'color': 'black',\n 'fillcolor': '#dbdddd',\n 'fontname': 'Helvetica',\n 'fontsize': '14',\n 'fontcolor': 'black',\n },\n 'edges': {\n 'color': 'black',\n 'arrowhead': 'open',\n 'fontname': 'Helvetica',\n 'fontsize': '12',\n 'fontcolor': 'black',\n }\n }\n\n if style == 'light':\n apply_style = light_style\n\n elif style == 'dark':\n apply_style = dark_style\n\n f.graph_attr = apply_style['graph']\n f.node_attr = apply_style['nodes']\n f.edge_attr = apply_style['edges']\n\n return f\n\n\ndef main():\n\n # Read in categorized data\n sitemap_layers = pd.read_csv('model3sitemap_layers.csv', dtype=str)\n # Convert numerical column to integer\n sitemap_layers.counts = sitemap_layers.counts.apply(int)\n print('Loaded {:,} rows of categorized data from sitemap_layers.csv'\\\n .format(len(sitemap_layers)))\n\n print('Building %d layer deep sitemap graph' % graph_depth)\n f = make_sitemap_graph(sitemap_layers, layers=graph_depth,\n limit=limit, size=size)\n f = apply_style(f, style=style, title=title)\n\n f.render(cleanup=True)\n #print('Exported graph to sitemap_graph_%d_layer.pdf' % graph_depth)\n\n\nif __name__ == '__main__':\n main()","sub_path":"Web and Network Data Science/Final Project/visualize_urls.py","file_name":"visualize_urls.py","file_ext":"py","file_size_in_byte":7063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"398649259","text":"from __future__ import annotations\n\nimport datetime\nimport json\nimport os\nimport tempfile\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Union\n\nfrom ... import helpers\nfrom ...catalog import Catalog, Dataset\nfrom ...exception import FrictionlessException\nfrom ...package import Package\nfrom ...platform import platform\nfrom ...resource import Resource\nfrom ...system import Adapter\nfrom .control import ZenodoControl\n\nif TYPE_CHECKING:\n from pyzenodo3 import Record # type: ignore\n\n\nclass ZenodoAdapter(Adapter):\n \"\"\"Read and write data from/to Zenodo\"\"\"\n\n def __init__(self, control: ZenodoControl):\n self.control = control\n\n # Read\n\n def read_package(self) -> Package:\n client = platform.pyzenodo3.Zenodo(api_key=self.control.apikey) # type: ignore\n if not self.control.record:\n note = \"Record is required.\"\n raise FrictionlessException(note)\n assert self.control.formats\n package = Package()\n try:\n dataset = client.get_record(self.control.record)\n if dataset:\n name = self.control.name or dataset.data[\"metadata\"][\"title\"]\n package = get_package(dataset, name, self.control.formats)\n except Exception as exception:\n note = \"Zenodo API error\" + repr(exception)\n raise FrictionlessException(note)\n if isinstance(package, Package) and package.resources: # type: ignore\n return package\n note = \"Package/s not found\"\n raise FrictionlessException(note)\n\n # Write\n\n # TODO: should return path: str\n def write_package(self, package: Package) -> int:\n client = platform.pyzenodo3_upload\n\n assert self.control.base_url\n assert self.control.apikey\n client.BASE_URL = self.control.base_url # type: ignore\n metafn = self.control.metafn\n\n if not metafn:\n meta_data = generate_metadata(package)\n with tempfile.NamedTemporaryFile(\"wt\", delete=False) as file:\n json.dump(meta_data, file, indent=2)\n metafn = file.name\n\n if metafn:\n # Check if metadata is a JSON Object\n if isinstance(metafn, dict):\n meta_data = generate_metadata(metadata=metafn)\n with tempfile.NamedTemporaryFile(\"wt\", delete=False) as file:\n json.dump(meta_data, file, indent=2)\n metafn = file.name\n\n try:\n deposition_id = self.control.deposition_id\n if not deposition_id:\n # Create a deposition resource\n deposition_id = client.create( # type: ignore\n token=self.control.apikey, base_url=self.control.base_url\n )\n metafn = Path(metafn).expanduser()\n client.upload_meta( # type: ignore\n token=self.control.apikey,\n metafn=metafn,\n depid=deposition_id, # type: ignore\n )\n\n # Process resources\n resources: List[Path] = []\n for key, resource in enumerate(package.resources):\n if resource.data:\n resource_file_name = f\"{resource.name}.json\" or f\"resource{key}.json\"\n resource_path = os.path.join(\n self.control.tmp_path or \"\", resource_file_name\n )\n resource.to_json(resource_path)\n resources.append(Path(resource_path).expanduser())\n continue\n\n resource_path = resource.path or \"\"\n if resource_path.startswith((\"http://\", \"https://\")):\n continue\n\n if resource.basepath:\n resource_path = os.path.join(\n str(resource.basepath), str(resource.path)\n )\n resources.append(Path(resource_path).expanduser())\n package_path = os.path.join(self.control.tmp_path or \"\", \"datapackage.json\")\n package.to_json(package_path)\n\n # Upload package and resources\n client.upload_data( # type: ignore\n token=self.control.apikey,\n datafn=Path(package_path).expanduser(),\n depid=deposition_id, # type: ignore\n base_url=self.control.base_url,\n )\n for resource_path in resources:\n resource_path = Path(resource_path).expanduser()\n client.upload_data( # type: ignore\n token=self.control.apikey,\n datafn=resource_path,\n depid=deposition_id, # type: ignore\n base_url=self.control.base_url,\n )\n return deposition_id # type: ignore\n except Exception as exception:\n note = \"Zenodo API error\" + repr(exception)\n raise FrictionlessException(note)\n\n # Experimental\n\n def read_catalog(self) -> Catalog:\n packages: List[Union[Package, str]] = []\n options: Dict[str, Any] = {}\n\n # Single record\n if self.control.record:\n packages.append(self.read_package())\n return Catalog(\n datasets=[\n Dataset(name=package.name, package=package) # type: ignore\n for package in packages\n ]\n )\n\n # DOI\n assert self.control.formats\n client = platform.pyzenodo3.Zenodo(api_key=self.control.apikey) # type: ignore\n if self.control.doi:\n dataset = client.find_record_by_doi(self.control.doi)\n name = self.control.name or dataset.data[\"metadata\"][\"title\"]\n package = get_package(dataset, name, self.control.formats)\n if isinstance(package, Package) and package.resources: # type: ignore\n packages.append(package)\n return Catalog(\n datasets=[\n Dataset(name=package.name, package=package) # type: ignore\n for package in packages\n ]\n )\n\n # Search\n if self.control.search:\n search = self.control.search.replace(\n \"/\", \" \"\n ) # zenodo can't handle '/' in search query\n options[\"q\"] = search\n options[\"status\"] = self.control.status\n options[\"sort\"] = self.control.sort\n options[\"page\"] = self.control.page\n options[\"size\"] = self.control.size\n options[\"all_versions\"] = self.control.all_versions\n options[\"communities\"] = self.control.communities\n options[\"type\"] = self.control.rtype\n options[\"subtype\"] = self.control.subtype\n options[\"bounds\"] = self.control.bounds\n options[\"custom\"] = self.control.rcustom\n options = {key: value for key, value in options.items() if value}\n try:\n records = client._get_records(options)\n for dataset in records:\n name = self.control.name or dataset.data[\"metadata\"][\"title\"]\n package = get_package(dataset, name, self.control.formats)\n if isinstance(package, Package) and package.resources: # type: ignore\n packages.append(package)\n except Exception as exception:\n note = \"Zenodo API error\" + repr(exception)\n raise FrictionlessException(note)\n if packages:\n return Catalog(\n datasets=[\n Dataset(name=package.name, package=package) # type: ignore\n for package in packages\n ]\n )\n note = \"Package/s not found\"\n raise FrictionlessException(note)\n\n\ndef get_package(record: Record, title: str, formats: List[str]) -> Package: # type: ignore\n package = Package(title=title)\n package.title = title\n for file in record.data[\"files\"]: # type: ignore\n path = file[\"links\"][\"self\"] # type: ignore\n is_resource_file = any(path.endswith(ext) for ext in formats) # type: ignore\n if path.endswith((\"datapackage.json\")): # type: ignore\n return Package.from_descriptor(path, title=title) # type: ignore\n if path.endswith(\"zip\") and not is_resource_file: # type: ignore\n try:\n package = Package(path) # type: ignore\n package.title = title\n return package\n except FrictionlessException as exception:\n # Skips package descriptor not found exception\n # and continues reading files.\n if \"[Errno 2] No such file or directory\" not in str(exception):\n raise exception\n if is_resource_file:\n package.basepath = f'https://zenodo.org/api/files/{file[\"bucket\"]}'\n resource = Resource(path=file[\"key\"]) # type: ignore\n package.add_resource(resource)\n return package\n\n\ndef generate_metadata(\n package: Optional[Package] = None, *, metadata: Optional[Dict[str, Any]] = None\n) -> Dict[str, Any]:\n meta_data: Union[str, Dict[str, Any], None] = {\"metadata\": {}}\n if not metadata and not package:\n note = \"Zenodo API Metadata Creation error: Either metadata or package should be provided to generate metadata.\"\n raise FrictionlessException(note)\n\n if metadata:\n if (\n not metadata.get(\"title\")\n or not metadata.get(\"description\")\n or not metadata.get(\"creators\")\n ):\n note = \"Zenodo API Metadata Creation error: missing title or description or creators.\"\n raise FrictionlessException(note)\n\n meta_data[\"metadata\"] = metadata\n if \"keywords\" not in meta_data[\"metadata\"]:\n meta_data[\"metadata\"][\"keywords\"] = [\"frictionlessdata\"]\n\n return helpers.remove_non_values(meta_data)\n\n assert package\n\n if not package.title or not package.description or not package.contributors:\n note = \"Zenodo API Metadata Creation error: Unable to read title or description or contributors from package descriptor.\"\n raise FrictionlessException(note)\n\n meta_data[\"metadata\"] = {\n \"title\": package.title,\n \"description\": package.description,\n \"publication_date\": package.created or str(datetime.datetime.now()),\n \"upload_type\": \"dataset\",\n \"access_right\": \"open\",\n }\n if package.licenses:\n meta_data[\"metadata\"][\"creators\"] = package.licenses[0].get(\"name\") # type: ignore\n\n creators: List[Dict[str, Any]] = []\n for contributor in package.contributors:\n creators.append(\n {\n \"name\": contributor.get(\"title\"),\n \"affiliation\": contributor.get(\"organization\"),\n }\n )\n keywords = package.keywords or []\n if \"frictionlessdata\" not in package.keywords:\n keywords.append(\"frictionlessdata\")\n\n if creators:\n meta_data[\"metadata\"][\"creators\"] = creators # type: ignore\n meta_data[\"metadata\"][\"keywords\"] = keywords # type: ignore\n return helpers.remove_non_values(meta_data)\n","sub_path":"frictionless/portals/zenodo/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":11181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"241760251","text":"\nimport pyrebase # learn more: https://python.org/pypi/Pyrebase\n\nimport csv\nfrom datetime import datetime\nimport glob, os\nimport random\n\n\npath = 'Sources/' # +hsFormat/hsClass/\nUPLOAD_CLASS_TXT = False # Should the script upload class description texts?\n\n\n\n\nconfig = { # Fenoms Firebase\n \"apiKey\": \"AIzaSyAt0uIAVOFjB42_bkwrEIqhSWkMT_VmluI\",\n \"authDomain\": \"data-reaper.firebaseapp.com\",\n \"databaseURL\": \"https://data-reaper.firebaseio.com\",\n \"projectId\": \"data-reaper\",\n \"storageBucket\": \"data-reaper.appspot.com\",\n \"messagingSenderId\": \"1079276848174\"\n}\n\nfirebase = pyrebase.initialize_app(config)\nauth = firebase.auth()\nuser = auth.sign_in_with_email_and_password('admin01@vs.com', '2\\!vEJ:6L]mh5R[z')\nDB = firebase.database()\n\n\n\n\n\n\n\nhsClasses = ['Druid','Hunter','Mage','Paladin','Priest','Rogue','Shaman','Warlock','Warrior']\nhsFormats = ['Standard', 'Wild']\n\ndef getCSVFile(path):\n f = open(path,encoding='latin-1')\n reader=csv.reader(f)\n return list(reader) \n\nCARDS = getCSVFile(path+'cards.csv')\n\n\ndef getCardRarity(cardName):\n for c in CARDS:\n if c[0] != cardName:\n continue\n\n if len(c) >= 5:\n r = c[4]\n if r == 'Free':\n r = 'Basic'\n if r == '':\n continue\n #print(cardName,r)\n return r\n else:\n return 'Basic'\n return 'Basic'\n\ndef getCardType(cardName):\n for c in CARDS:\n if c[0] != cardName:\n continue\n\n if len(c) >= 3:\n t = c[2]\n return t\n else:\n return 'Minion'\n return 'Minion'\n\n\ndef readDeckCode(file,hsClass, hsFormat):\n\n title = ''\n deckCode = ''\n archetype = ''\n author = ''\n timestamp = ''\n gameplay = ''\n cardTypes = {'Minion': 0, 'Spell': 0, 'Weapon': 0, 'Hero': 0}\n cards = []\n readingCards = 'waiting'\n count = 0\n for row in file:\n\n if len(row) <= 3:\n if readingCards == 'reading':\n readingCards = 'finished'\n elif readingCards == 'waiting':\n readingCards = 'reading'\n continue\n \n if '###' in row:\n title = row[4:-1]\n continue\n if '#' not in row and deckCode == '':\n deckCode = row[:-1]\n continue\n if '# Format: Wild' in row and hsFormat != 'Wild':\n print('ERROR: decklist not Wild format! DeckName: '+title)\n #return 0\n\n if '# Format: Standard' in row and hsFormat != 'Standard':\n print('ERROR: decklist not Standard format! DeckName: '+title)\n #return 0\n \n\n # Our markers:\n if '# Archetype:' in row:\n archetype = row[13:-1]\n if '# Author:' in row:\n author = row[10:-1]\n if '# Gameplay:' in row:\n gameplay = row\n if '# Timestamp:' in row:\n timestamp = row\n\n # Cards\n if readingCards == 'reading':\n quantity = row[2]\n manaCost = row[6]\n if row[7] != ')': # check if double digit\n manaCost = row[6:8]\n name = row[10:-1]\n else:\n name = row[9:-1]\n rarity = getCardRarity(name)\n cardType = getCardType(name)\n cardTypes[cardType] += int(quantity)\n\n cards.append({'name':name,'manaCost':manaCost,'quantity':quantity, 'rarity':rarity})\n\n count += 1\n\n if archetype == '':\n archetype = 'Other '+hsClass\n\n if timestamp == '':\n dt = datetime.utcnow()\n timestamp = dt.strftime(\"%Y-%m-%d\")\n\n return {'name':title, 'cards':cards, 'deckCode': deckCode, 'gameplay': gameplay,\n 'author':author, 'timestamp':timestamp, 'cardTypes':cardTypes}, archetype\n\n\n\n\n\n\n \n\ndef upload(hsFormat):\n\n # Delete Existing Files\n for hsClass in hsClasses:\n DB.child('deckData').child(hsFormat).child(hsClass).child('archetypes').remove(user['idToken'])\n pass\n\n\n for hsClass in hsClasses:\n\n # moves dir to /Sources/hsClass/ and looks for all .txt files\n os.chdir(path+hsFormat+'/'+hsClass)\n for file in glob.glob(\"*.txt\"):\n\n # Class Description Texts should be labeled 'Druid.txt', 'Hunter.txt' etc. (first letter Capital)\n if file == hsClass+'.txt':\n if not UPLOAD_CLASS_TXT:\n continue\n f = open(file)\n txt = f.read()\n txt = txt.replace('',\"\")\n txt = txt.replace('
','
')\n txt = txt.replace('\\n','
')\n DB.child('deckData').child(hsFormat).child(hsClass).child('text').set(txt,user['idToken'])\n continue\n\n # Decklist files can be named anything other than [hsClass].txt\n # returns deckFile = {name, cards: [{cardname,manacost,quantity},...], deckCode, color} and archetype\n # returns 0 if hsFormats don't agree\n f = open(file)\n decklist, arch = readDeckCode(f.readlines(),hsClass, hsFormat)\n\n if decklist != 0:\n DB.child('deckData').child(hsFormat).child(hsClass).child('archetypes').child(arch).push(decklist,user['idToken'])\n pass\n os.chdir('..')\n os.chdir('..')\n os.chdir('..')\n\n\n# Execute main:\n\ndef main():\n for f in hsFormats:\n upload(f)\n\n\nmain()\n\n","sub_path":"uploadDeckData.py","file_name":"uploadDeckData.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"223052940","text":"import cv2\nimport numpy as np\nimport torch\nfrom torchvision.utils import make_grid\nimport os.path as osp\nimport random\nfrom collections import namedtuple\nfrom skimage import io\nfrom skimage.draw import circle, line, line_aa\nimport numpy as np\nimport cv2\n\n# works for coco at least\nVUNET_JOINT_ORDER = [\n \"rankle\",\n \"rknee\",\n \"rhip\",\n \"lhip\",\n \"lknee\",\n \"lankle\",\n \"rwrist\",\n \"relbow\",\n \"rshoulder\",\n \"lshoulder\",\n \"lelbow\",\n \"lwrist\",\n \"cnose\",\n \"leye\",\n \"reye\",\n]\n\n\nclass VUNetStickman:\n \"\"\"Stickman image generator according to https://github.com/CompVis/vunet/blob/master/batches.py\"\"\"\n\n @staticmethod\n def get_example_valid_keypoints():\n example_keypoints = np.array(\n [\n [0.48106802, 0.8998802],\n [0.44145063, 0.5546431],\n [0.5546431, 0.45276988],\n [0.65085673, 0.43579102],\n [0.5489835, 0.5999201],\n [0.6112394, 0.9111994],\n [0.3395774, 0.32825816],\n [0.44711027, 0.2886408],\n [0.5037065, 0.1924272],\n [0.6225586, 0.16412908],\n [0.5999201, 0.29430044],\n [0.4301314, 0.35089666],\n [0.51502573, 0.10187323],\n [0.526345, 0.07923473],\n [0.49238726, 0.07923473],\n ],\n )\n return example_keypoints\n\n @staticmethod\n def get_example_invalid_keypoints():\n example_keypoints = np.array(\n [\n [0.38960117, 0.9117471],\n [0.66272366, 0.6988722],\n [0.32533708, 0.5100964],\n [0.1164787, 0.4618983],\n [-1.0, -1.0],\n [-1.0, -1.0],\n [0.87961507, 0.5221459],\n [0.71895474, 0.3936177],\n [0.562311, 0.14861076],\n [0.26910597, 0.08033014],\n [-1.0, -1.0],\n [-1.0, -1.0],\n [-1.0, -1.0],\n [-1.0, -1.0],\n [-1.0, -1.0],\n ]\n )\n return example_keypoints\n\n @staticmethod\n def make_joint_img(img_shape, jo, joints):\n # three channels: left, right, center\n scale_factor = img_shape[1] / 128\n thickness = int(3 * scale_factor)\n imgs = list()\n for i in range(3):\n imgs.append(np.zeros(img_shape[:2], dtype=\"uint8\"))\n\n body = [\"lhip\", \"lshoulder\", \"rshoulder\", \"rhip\"]\n body_pts = np.array([[joints[jo.index(part), :] for part in body]])\n if np.min(body_pts) >= 0:\n body_pts = np.int_(body_pts)\n cv2.fillPoly(imgs[2], body_pts, 255)\n\n right_lines = [\n (\"rankle\", \"rknee\"),\n (\"rknee\", \"rhip\"),\n (\"rhip\", \"rshoulder\"),\n (\"rshoulder\", \"relbow\"),\n (\"relbow\", \"rwrist\"),\n ]\n for line in right_lines:\n l = [jo.index(line[0]), jo.index(line[1])]\n if np.min(joints[l]) >= 0:\n a = tuple(np.int_(joints[l[0]]))\n b = tuple(np.int_(joints[l[1]]))\n cv2.line(imgs[0], a, b, color=255, thickness=thickness)\n\n left_lines = [\n (\"lankle\", \"lknee\"),\n (\"lknee\", \"lhip\"),\n (\"lhip\", \"lshoulder\"),\n (\"lshoulder\", \"lelbow\"),\n (\"lelbow\", \"lwrist\"),\n ]\n for line in left_lines:\n l = [jo.index(line[0]), jo.index(line[1])]\n if np.min(joints[l]) >= 0:\n a = tuple(np.int_(joints[l[0]]))\n b = tuple(np.int_(joints[l[1]]))\n cv2.line(imgs[1], a, b, color=255, thickness=thickness)\n\n rs = joints[jo.index(\"rshoulder\")]\n ls = joints[jo.index(\"lshoulder\")]\n cn = joints[jo.index(\"cnose\")]\n neck = 0.5 * (rs + ls)\n a = tuple(np.int_(neck))\n b = tuple(np.int_(cn))\n if np.min(a) >= 0 and np.min(b) >= 0:\n cv2.line(imgs[0], a, b, color=127, thickness=thickness)\n cv2.line(imgs[1], a, b, color=127, thickness=thickness)\n\n cn = tuple(np.int_(cn))\n leye = tuple(np.int_(joints[jo.index(\"leye\")]))\n reye = tuple(np.int_(joints[jo.index(\"reye\")]))\n if np.min(reye) >= 0 and np.min(leye) >= 0 and np.min(cn) >= 0:\n cv2.line(imgs[0], cn, reye, color=255, thickness=thickness)\n cv2.line(imgs[1], cn, leye, color=255, thickness=thickness)\n\n img = np.stack(imgs, axis=-1)\n if img_shape[-1] == 1:\n img = np.mean(img, axis=-1)[:, :, None]\n return img\n\n @staticmethod\n def valid_joints(*joints):\n \"\"\" list of [N, 2] keypoints \"\"\"\n j = np.stack(joints)\n return (j >= 0).all()\n\n @staticmethod\n def normalize(imgs, coords, stickmen, jo, box_factor):\n out_imgs = list()\n out_stickmen = list()\n\n bs = len(imgs)\n for i in range(bs):\n img = imgs[i]\n joints = coords[i]\n stickman = stickmen[i]\n\n h, w = img.shape[:2]\n o_h = h\n o_w = w\n h = h // 2 ** box_factor\n w = w // 2 ** box_factor\n wh = np.array([w, h])\n wh = np.expand_dims(wh, 0)\n\n bparts = [\n [\"lshoulder\", \"lhip\", \"rhip\", \"rshoulder\"],\n [\"lshoulder\", \"rshoulder\", \"cnose\"],\n [\"lshoulder\", \"lelbow\"],\n [\"lelbow\", \"lwrist\"],\n [\"rshoulder\", \"relbow\"],\n [\"relbow\", \"rwrist\"],\n [\"lhip\", \"lknee\"],\n [\"rhip\", \"rknee\"],\n ]\n ar = 0.5\n\n part_imgs = list()\n part_stickmen = list()\n for bpart in bparts:\n part_img = np.zeros((h, w, 3))\n part_stickman = np.zeros((h, w, 3))\n M = VUNetStickman.get_crop(bpart, joints, jo, wh, o_w, o_h, ar)\n\n if M is not None:\n part_img = cv2.warpPerspective(\n img, M, (h, w), borderMode=cv2.BORDER_REPLICATE\n )\n part_stickman = cv2.warpPerspective(\n stickman, M, (h, w), borderMode=cv2.BORDER_REPLICATE\n )\n\n part_imgs.append(part_img)\n part_stickmen.append(part_stickman)\n img = np.concatenate(part_imgs, axis=2)\n stickman = np.concatenate(part_stickmen, axis=2)\n\n out_imgs.append(img)\n out_stickmen.append(stickman)\n out_imgs = np.stack(out_imgs)\n out_stickmen = np.stack(out_stickmen)\n return out_imgs, out_stickmen\n\n @staticmethod\n def get_crop(bpart, joints, jo, wh, o_w, o_h, ar=1.0):\n bpart_indices = [jo.index(b) for b in bpart]\n part_src = np.float32(joints[bpart_indices])\n\n # fall backs\n if not valid_joints(part_src):\n if bpart[0] == \"lhip\" and bpart[1] == \"lknee\":\n bpart = [\"lhip\"]\n bpart_indices = [jo.index(b) for b in bpart]\n part_src = np.float32(joints[bpart_indices])\n elif bpart[0] == \"rhip\" and bpart[1] == \"rknee\":\n bpart = [\"rhip\"]\n bpart_indices = [jo.index(b) for b in bpart]\n part_src = np.float32(joints[bpart_indices])\n elif (\n bpart[0] == \"lshoulder\"\n and bpart[1] == \"rshoulder\"\n and bpart[2] == \"cnose\"\n ):\n bpart = [\"lshoulder\", \"rshoulder\", \"rshoulder\"]\n bpart_indices = [jo.index(b) for b in bpart]\n part_src = np.float32(joints[bpart_indices])\n\n if not valid_joints(part_src):\n return None\n\n if part_src.shape[0] == 1:\n # leg fallback\n a = part_src[0]\n b = np.float32([a[0], o_h - 1])\n part_src = np.float32([a, b])\n\n if part_src.shape[0] == 4:\n pass\n elif part_src.shape[0] == 3:\n # lshoulder, rshoulder, cnose\n if bpart == [\"lshoulder\", \"rshoulder\", \"rshoulder\"]:\n segment = part_src[1] - part_src[0]\n normal = np.array([-segment[1], segment[0]])\n if normal[1] > 0.0:\n normal = -normal\n\n a = part_src[0] + normal\n b = part_src[0]\n c = part_src[1]\n d = part_src[1] + normal\n part_src = np.float32([a, b, c, d])\n else:\n assert bpart == [\"lshoulder\", \"rshoulder\", \"cnose\"]\n neck = 0.5 * (part_src[0] + part_src[1])\n neck_to_nose = part_src[2] - neck\n part_src = np.float32([neck + 2 * neck_to_nose, neck])\n\n # segment box\n segment = part_src[1] - part_src[0]\n normal = np.array([-segment[1], segment[0]])\n alpha = 1.0 / 2.0\n a = part_src[0] + alpha * normal\n b = part_src[0] - alpha * normal\n c = part_src[1] - alpha * normal\n d = part_src[1] + alpha * normal\n # part_src = np.float32([a,b,c,d])\n part_src = np.float32([b, c, d, a])\n else:\n assert part_src.shape[0] == 2\n\n segment = part_src[1] - part_src[0]\n normal = np.array([-segment[1], segment[0]])\n alpha = ar / 2.0\n a = part_src[0] + alpha * normal\n b = part_src[0] - alpha * normal\n c = part_src[1] - alpha * normal\n d = part_src[1] + alpha * normal\n part_src = np.float32([a, b, c, d])\n\n dst = np.float32([[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]])\n part_dst = np.float32(wh * dst)\n\n M = cv2.getPerspectiveTransform(part_src, part_dst)\n return M\n\n\ndef n_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef valid_joints(*joints):\n j = np.stack(joints)\n return (j >= 0).all()\n\n\ndef add_joints_to_img(img, kps, joints, color=[0, 0, 255]):\n # params\n border_safety = 25\n h, w = img.shape[0:2]\n r_1 = int(h / 250)\n\n # draw keypoints\n for kp in kps:\n x = np.min([w - border_safety, kp[0]]) # x\n y = np.min([h - border_safety, kp[1]]) # y\n rr, cc = circle(y, x, r_1)\n img[rr, cc, 0] = color[0]\n img[rr, cc, 1] = color[1]\n img[rr, cc, 2] = color[2]\n\n # draw joints\n for jo in joints:\n rr, cc, val = line_aa(\n int(kps[jo[0], 1]),\n int(kps[jo[0], 0]),\n int(kps[jo[1], 1]),\n int(kps[jo[1], 0]),\n ) # [jo_0_y, jo_0_x, jo_1_y, jo_1_x]\n\n img[rr, cc, 0] = color[0] * val\n img[rr, cc, 1] = color[1]\n img[rr, cc, 2] = color[2]\n\n return img\n\n\ndef get_bounding_boxes(kps, img_size, box_size):\n \"\"\" Return bounding box coordinates around keypoints in format XYXY.\n Simply add and subtract a fixed box_size from the keypoints.\n Keypoint format is [N, 2] and X, Y in range [0, 1].\n\n Note that bounding box coordinates are not clipped to the image size, yet\n \"\"\"\n kps *= np.array(img_size).reshape((-1, 2))\n half_width = box_size // 2\n offset = np.array([-half_width, -half_width, half_width, half_width])\n box_coordinates = np.concatenate([kps, kps], -1) + offset.reshape((-1, 4))\n box_list = np.split(box_coordinates, box_coordinates.shape[0], axis=0)\n box_list = [np.squeeze(b) for b in box_list]\n return box_list\n\n","sub_path":"supermariopy/stickman.py","file_name":"stickman.py","file_ext":"py","file_size_in_byte":11568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"83880580","text":"from __future__ import print_function\n\nimport os\nimport re\n\nimport pafy\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef extract_videos(html):\n soup = BeautifulSoup(html, 'html.parser')\n pattern = re.compile(r'/watch\\?v=')\n found = soup.find_all('a', 'yt-uix-tile-link', href=pattern)\n return [(x.text.encode('utf-8'), x.get('href')) for x in found]\n\n\ndef make_request(url, hdr):\n http_proxy = os.environ.get(\"HTTP_PROXY\")\n https_proxy = os.environ.get(\"HTTPS_PROXY\")\n ftp_proxy = os.environ.get(\"FTP_PROXY\")\n\n proxy_dict = {\n \"http\": http_proxy,\n \"https\": https_proxy,\n \"ftp\": ftp_proxy\n }\n\n req = requests.get(url, headers=hdr, proxies=proxy_dict)\n return req\n\n\ndef search_videos(query):\n response = make_request('https://www.youtube.com/results?search_query=' + query, {})\n return extract_videos(response.content)\n\n\ndef query_and_download(search):\n available = search_videos(search)\n title, video_link = available[0]\n title = title.decode('utf8')\n\n video = pafy.new('http://youtube.com/' + video_link)\n audiostreams = video.audiostreams\n formats = [(i, a.bitrate) for i, a in enumerate(audiostreams) if a.extension == 'm4a']\n index = max(formats, key=lambda item: item[1])[0]\n audiostreams[index].download()\n\n return title + '.m4a'\n\n\ndef download_song(name):\n search = name\n search = '+'.join(search.split())\n downloaded = query_and_download(search)\n return downloaded\n","sub_path":"music/static/music/py/download_music.py","file_name":"download_music.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"182953200","text":"'''\nGiven a Matrix of size M x N, you have to find out the \ntotal number of square sub-matrices that have all 1s in \nthem. \n\n1. Solved Using DP - O(MN) Time and Space\n2. DP State dp[i][j] = The number of square sub-matrices that end at dp[i][j].\n3. DP Expresion dp[i][j] = 1 + min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) if matrix[i][j] == 1\nelse 0\n4. Finally return the answer i.e, SUM of the dp matrix. \n\n'''\n\nclass Solution:\n def countSquares(self, arr: List[List[int]]) -> int:\n (m,n) = (len(arr), len(arr[0]))\n dp = [[0 for i in range(n)] for i in range(m)]\n sums = 0\n for i in range(m):\n for j in range(n):\n if arr[i][j] == 0:\n dp[i][j] = 0\n else:\n dp[i][j] = min(dp[i-1][j], dp[i-1][j-1], dp[i][j-1]) + 1\n sums += dp[i][j]\n \n \n return sums\n","sub_path":"Leetcode_30day_challenge/May_Challenge/Day21_Count_square_submatrices_of_1s.py","file_name":"Day21_Count_square_submatrices_of_1s.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"346168706","text":"import numpy\nimport theano.tensor as T\nfrom theano import function\n\n\"\"\"\ndemo for how to define function with both scalars and vectors in Theano\n\"\"\"\n\na = T.dmatrix('a')\nb = T.dmatrix('b')\nc = T.dmatrix('c')\nd = T.dmatrix('d')\n\np = T.dscalar('p')\nq = T.dscalar('q')\nr = T.dscalar('r')\ns = T.dscalar('s')\nu = T.dscalar('u')\n\ne = (((a*p)+(b-q)-(c+r))*d/s)*u\n\nf = function([a,b,c,d,p,q,r,s,u],e)\n\na_data = numpy.array([[1,1],[1,1]])\nb_data = numpy.array([[2,2],[2,2]])\nc_data = numpy.array([[5,5],[5,5]])\nd_data = numpy.array([[3,3],[3,3]])\n\nprint(\"temp data (a_data*1.0)=\", (a_data*1.0))\nprint(\"temp data (b_data-2.0)=\", (b_data-2.0))\nprint(\"temp immedia data = \", (((a_data*1.0)+(b_data-2.0)-(c_data+3.0))))\nprint(\"temp immedia data2 = \", (((a_data*1.0)+(b_data-2.0)-(c_data+3.0)))*d_data)\n\nprint(\"Expected: \", (((a_data*1.0)+(b_data-2.0)-(c_data+3.0))*d_data/4.0)*5.0)\nprint(\"Via Theano: \", f(a_data,b_data,c_data,d_data,1,2,3,4,5))\n","sub_path":"theano_demo/theano_sample3.py","file_name":"theano_sample3.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"566520931","text":"import numpy as np\nimport DanConst\nimport ModelBasa\nimport copy\n\n# базовый class GenDan - основа для генерации кода используемые в денс слоях и в сверточных\nclass GenDan(ModelBasa.ModelBasa):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.count_block = 10\n self.ds=dict()\n self.nActivation = len( DanConst.typeActivation)\n self.nDenseMin = 1\n self.nDenseMax = 200\n self.nDenseStep = 10\n self.nDropoutMax = 0.5\n self.nBlock = 12\n# self.bots=dict()\n dblock = kwargs.get('d', {}) \n\n if len(dblock)!=0:\n self.nDenseMin = dblock.get('nDenseMin', {self.nDenseMin})\n self.nDenseMax = dblock.get('nDenseMin', {self.nDenseMax})\n self.nDenseStep = dblock.get('nDenseStep', {self.nDenseStep})\n self.nDropoutMax = dblock.get('nDropoutMax', {self.nDropoutMax})\n self.nBlock = dblock.get('nBlock', {self.nBlock})\n\n# класс для генерации кода с денс слоями, по которым в дальнейшем будем строить нейронную сеть \n# и в которой будем менять коэффициенты\nclass GenDanDense(GenDan):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __form_dan(self):\n blocks=dict()\n# строка денс и строка активации\n blocks[0] = [np.random.randint(self.nDenseMin, self.nDenseMax) * self.nDenseStep, \n np.random.randint(0, self.nActivation)]\n# строка Dropout\n if np.random.randint(0, 2) == 1: # для Dropout\n blocks[1] = round(np.random.uniform(0.05, self.nDropoutMax), 2)\n# строка BatchNormalization\n if np.random.randint(0, 2) == 1: # для BatchNormalization\n blocks[2] =\"\"\n return blocks\n\n# генерируем модель random_- произвольное кол-во блоков, \n def CreateModel(self, id, random_, nblock = 0, blockrange=()): \n self.id = id # nblock max кол-во блоков\n nblock = self.count_block if nblock == 0 else nblock\n# формируем кол-во бллоков с какого по какой пример (1,10,1) с 1..10 с шагом 1 или (3,11,3)с 3 по 11 с шагом 3 \n if random_: \n n = len(blockrange)\n fun1 = lambda x: 1 if x <= 0 else x\n if n ==1: block_ = np.random.randint(1, fun1(blockrange[0])+1) \n elif n==2: block_ = np.random.randint(fun1(blockrange[0]), fun1(blockrange[1])+1) \n else: block_ = 1 \n else: \n block_ = nblock\n# генерим словарь блоков\n self.config_bot.clear()\n\n for it_block in range(block_):\n self.config_bot[it_block] = GenDanDense.__form_dan(self)\n return self.config_bot\n\n# заготовка для клонирования удачных моделей с последующим изменением\n def _ModificatAdd(self, *args):\n model_basa = args[0]\n add_layers = args[1]\n modificat = args[2]\n\n# клонирование удачных моделей с последующим изменением их коэфиициентов (мутация)\n def _ModificatMulti(self, basa, k=0.3):\n k= round(np.random.uniform(0.05, k), 3)\n xbasa = copy.deepcopy(basa)\n for key, val in basa.items():\n xbasa[key][0][0]=(val[0])[0]*(1+k*(1 if np.random.randint(0, 2)==1 else -1))\n return xbasa\n\nclass GenerationModels():\n def __init__(self, *args, **kwargs):\n self.Models = dict()\n self.nModels = args[0]\n self._genDanDense = GenDanDense(0)\n\n def CreateModels(self, random_, nblock = (1, 15, 1)):\n# формируем кол-во блоков \n self.Models.clear()\n n = len(nblock)\n fun1 = lambda x: 1 if x <= 0 else x\n _start = fun1(nblock[0] if n > 1 else 1)\n _end = fun1(nblock[1] if n >=2 else 1)+1\n _step = fun1(nblock[2] if n ==3 else 1)\n nlayers = _start\n naprav = 1\n\n# генерируем блоки по произвольно или по ленейно закону\n if random_: # произвольному закону\n for id_model in range(self.nModels+1):\n self.Models[id_model] = copy.deepcopy(self._genDanDense.CreateModel(id_model, \n True, _end, (_start, _end)))\n else: # генерируем по линейному закону\n for id_model in range(self.nModels+1):\n self.Models[id_model]=copy.deepcopy(self._genDanDense.CreateModel(id_model, random_, nlayers))\n nlayers += naprav*_step\n\n if nlayers > _end:\n nlayers = _end\n naprav = -1\n\n if nlayers < _start:\n nlayers = _start\n naprav = 1\n\n def __copy_basa(self, basa): # clear basa b find min max lay\n self.Models.clear()\n i=0\n ndense_min = 100\n ndense_max = 0\n# вычисляем количество блоков с лучшими результатами. Для мутации \n# к примеру получились три лучших результата с 1 блоком с 4 блоками и с 6 блоками\n# значит в следующей итерации мы должны создать сетки в которых будут от 1 блока до 6 \n# в последующей итерации кол-во блоков будет стремиться к одному значению\n# это видно в отчетах в файлах json\n for key, val in basa.items():\n n = len(val)\n ndense_min = min(n, ndense_min)\n ndense_max = max(n, ndense_max)\n self.Models[i] = copy.deepcopy(val)\n i+=1\n return ndense_min, ndense_max\n\n# создаем копию модулей\n def CreateCopyModels(self, basa, k):\n nmin, nmax = GenerationModels.__copy_basa(self, basa)\n for it in range(0, 3):\n self.Models[len(self.Models)] = self._genDanDense._ModificatMulti(self.Models[it], k)\n \n for id_model in range(len(self.Models)-1, self.nModels):\n self.Models[id_model] = copy.deepcopy(self._genDanDense.CreateModel(id_model, True, nmax, (nmin, nmax)))\n \n def Get(self):\n return self.Models","sub_path":"GenAlg13/GenDanDense.py","file_name":"GenDanDense.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"413615650","text":"# -*- coding: utf-8 -*-\nfrom django.forms import ModelForm, TextInput\n\nfrom django_summernote.widgets import SummernoteWidget\nfrom suit.widgets import AutosizedTextarea\n\nfrom .models import Qna, Review, InstallCase, ForArtist, ForCowork\n\n\nclass QnaAdminForm(ModelForm):\n\n class Meta:\n model = Qna\n exclude = ('created_at', 'updated_at', 'user', 'confirm_ox')\n widgets = {\n 'title': TextInput(attrs={'class': 'input-xxlarge'}),\n 'url': TextInput(attrs={'class': 'input-xxlarge'}),\n 'contents': AutosizedTextarea(attrs={'rows': 3, 'class': 'input-xxlarge'}),\n 'answer': AutosizedTextarea(attrs={'rows': 3, 'class': 'input-xxlarge'}),\n }\n\n\nclass ReviewAdminForm(ModelForm):\n\n class Meta:\n model = Review\n exclude = ('created_at', 'updated_at', 'the_artwork',)\n widgets = {\n 'title': TextInput(attrs={'class': 'input-xxlarge'}),\n 'contents': SummernoteWidget()\n }\n\n\nclass InstallCaseAdminForm(ModelForm):\n\n class Meta:\n model = InstallCase\n exclude = ('created_at',)\n widgets = {\n 'install_details': SummernoteWidget()\n }\n\n\nclass ForArtistAdminForm(ModelForm):\n\n class Meta:\n model = ForArtist\n exclude = ('created_at', 'updated_at',)\n widgets = {\n 'about_artist': AutosizedTextarea(attrs={'rows': 3, 'class': 'input-xxlarge'}),\n 'about_artwork': AutosizedTextarea(attrs={'rows': 3, 'class': 'input-xxlarge'}),\n }\n\n\nclass ForCoworkAdminForm(ModelForm):\n\n class Meta:\n model = ForCowork\n exclude = ('created_at', 'updated_at', 'user',)\n widgets = {\n 'contents': AutosizedTextarea(attrs={'rows': 3, 'class': 'input-xxlarge'}),\n 'how_to': AutosizedTextarea(attrs={'rows': 3, 'class': 'input-xxlarge'}),\n }\n","sub_path":"cscenter/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"471281651","text":"\"\"\"Tests for and .\n\n\"\"\"\n__docformat__ = \"reStructuredText\"\n\nimport unittest\n\nimport ZConfig\n\nfrom ZConfig.tests.support import TestBase, CONFIG_BASE\n\n\ndef uppercase(value):\n return str(value).upper()\n\n\nclass TextBlockTestCase(TestBase):\n \"\"\"Tests of the text block support.\"\"\"\n\n def test_simple(self):\n schema = self.load_schema_text(\"\"\"\\\n \n \n \n \n \"\"\")\n conf = self.load_config_text(schema, \"\"\"\\\n \n Some long explanation goes here.\n \n \"\"\")\n self.assertEqual(conf.explanation.strip(),\n \"Some long explanation goes here.\")\n\n def test_with_datatype(self):\n schema = self.load_schema_text(\"\"\"\\\n \n \n \n \n \"\"\" % __name__)\n conf = self.load_config_text(schema, \"\"\"\\\n \n Some long explanation goes here.\n \n \"\"\")\n self.assertEqual(conf.explanation.strip(),\n \"SOME LONG EXPLANATION GOES HERE.\")\n\n def test_with_default(self):\n schema = self.load_schema_text(\"\"\"\\\n \n \n \n \n default value\n \n \n \n \"\"\")\n conf = self.load_config_text(schema, \"\")\n self.assertEqual(conf.explanation.strip(), \"default value\")\n\n def test_named(self):\n schema = self.load_schema_text(\"\"\"\\\n \n \n \n \n \"\"\")\n conf = self.load_config_text(schema, \"\"\"\\\n \n Some long explanation goes here.\n \n \"\"\")\n self.assertEqual(conf.explanation.strip(),\n \"Some long explanation goes here.\")\n\n\ndef test_suite():\n return unittest.makeSuite(TextBlockTestCase)\n","sub_path":"ZConfig/branches/fdrake-textblock/tests/test_textblock.py","file_name":"test_textblock.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"335686729","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n# Q1.\tWrite a Python Program to Find the Factorial of a Number\nn = int(input(\"Enter a number: \"))\nfact = 1\nif n < 0:\n print(\"Please enter a positive integer\")\nelif n == 0:\n print(\"Factorial of 0 is 1\")\nelse:\n for i in range(1, n + 1):\n fact = fact * i\n print(\"Factorial of {0} is {1}\" .format(n , fact)) \n\n\n# In[3]:\n\n\n# Q2.\tWrite a Python Program to Display the multiplication Table\nn = int(input(\"Enter a number: \"))\nfor i in range(1,11):\n print(n * i)\n\n\n# In[4]:\n\n\n# Q3.\tWrite a Python Program to Print the Fibonacci sequence\nn = int(input(\"How many numbers? \"))\nn1, n2 = 0, 1\ncount = 0\nif n <= 0:\n print(\"Please enter a positive integer\")\nelif n == 1:\n print(\"Fibonacci sequence upto\",n,\":\")\n print(n1)\nelse:\n print(\"Fibonacci sequence:\")\n while count < n:\n print(n1)\n nth = n1 + n2\n n1 = n2\n n2 = nth\n count += 1\n\n\n# In[5]:\n\n\n# Q4.\tWrite a Python Program to Check Armstrong Number\nnum = int(input(\"Enter a number: \"))\nsum = 0\ntemp = num\nwhile temp > 0:\n digit = temp % 10\n sum += digit ** 3\n temp //= 10\nif num == sum:\n print(num,\"is an Armstrong number\")\nelse:\n print(num,\"is not an Armstrong number\")\n\n\n# In[6]:\n\n\n# Q5.\tWrite a Python Program to Find Armstrong Number in an Interval\nlower = 100\nupper = 2000\nfor num in range(lower, upper + 1):\n order = len(str(num))\n sum = 0\n temp = num\n while temp > 0:\n digit = temp % 10\n sum += digit ** order\n temp //= 10\n if num == sum:\n print(num)\n\n\n# In[7]:\n\n\n# Q6.\tWrite a Python Program to Find the Sum of Natural Numbers\nnum = int(input(\"Enter a number: \"))\nif num < 0:\n print(\"Enter a positive number\")\nelse:\n sum = 0\n while(num > 0):\n sum += num\n num -= 1\n print(\"The sum is\", sum)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Programming assignments/Programming Assignment 4.py","file_name":"Programming Assignment 4.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"279517284","text":"\"\"\"\"\"\nName: Thuan Tran\nCSS 458\nAngent-Base Modeling: Cane Toad\n\"\"\"\"\"\n\nimport CTConstant\nimport random\n\n\"\"\"\"\"\nThis is a class that is the representation of a toad\nThis class has multiple methods that allow the toad to do all sort of things\n\"\"\"\"\"\n\n\nclass Toad:\n \"\"\"\n Global variables for all toads\n \"\"\"\n # How many are surviving at the moment\n numberAlive = 0\n numberDead = 0\n numberMigrated =0\n\n # The grid all of toad, this will be composed of both Border and Desert class\n theCoordinates = None\n\n \"\"\"\"\"\n This is the constructor for the toad\n The constructor will initilize a random energy and water for the toad\n \"\"\"\"\"\n\n def __init__(self):\n\n # Every time the constructor is initialized\n Toad.numberAlive = Toad.numberAlive + 1\n # X and Y cordinatates of this toad\n self.currentX = 0\n # Start at the east Border\n self.currentY = CTConstant.SIZE - 1\n self.state = 1\n self.amtEat = 0 # Amount Eaten\n\n self.energy = CTConstant.AMT_MIN_INIT + random.uniform(0,CTConstant.INIT_RANGE)\n self.water = CTConstant.AMT_MIN_INIT + random.uniform(0,CTConstant.INIT_RANGE)\n\n self.availableFood = -1\n self.availableMoisture = -1\n self.food = -1\n self.moisture = -1\n\n \"\"\"\"\"\n This is the method that indidicate the toad may eat\n If it is hungry then it eat, else it won't\n \"\"\"\"\"\n\n def toadMayEat(self):\n if self.energy < CTConstant.WOULD_LIKE_EAT:\n self.eat()\n else:\n self.amtEat = 0\n\n \"\"\"\"\"\n This is the eat method for the toad\n The toad will get the food at the current location on the grid\n \"\"\"\"\"\n\n def eat(self):\n # Get the food at the current positon\n self.availableFood = Toad.theCoordinates[self.currentX][self.currentY].food\n self.amtEat = min(CTConstant.AMT_EAT, self.availableFood, 1 - self.energy)\n self.energy = self.energy + self.amtEat\n self.water = min(self.water + CTConstant.FRACTION_WATER * self.amtEat, 1.0)\n\n return self.amtEat\n\n \"\"\"\"\"\n This is the method that indicate ask if a toad want to drink some water\n \n \"\"\"\"\"\n\n def toadMayDrink(self):\n if self.water < CTConstant.WOULD_LIKE_DRINK:\n self.drink()\n\n \"\"\"\"\"\n This is the drink method of the toad \n The method will drink a portion of water or reset to 1 if the current water is over 1\n \"\"\"\"\"\n\n def drink(self):\n if self.water + CTConstant.AMT_DRINK <= 1.0:\n self.water = self.water + CTConstant.AMT_DRINK\n else:\n self.water = 1.0\n\n \"\"\"\"\"\n This is the method that move a toad\n The method will move a toad if it is hungry or thirsty or it might just jump around or stay where it is \n \"\"\"\"\"\n\n def toadMove(self):\n if self.water < CTConstant.WOULD_LIKE_DRINK:\n self.thirsty()\n else:\n if self.energy < CTConstant.WOULD_LIKE_EAT:\n self.lookForFood()\n else:\n # A chance to jump around ?\n if random.uniform(0, 1.0) < CTConstant.MAY_HOP:\n self.hopForFun()\n else:\n self.stayHere()\n\n \"\"\"\"\"\n This is the method that check if a toad is thirsty\n The method will check for its surrounding area and decided where to go to look for water\n \"\"\"\"\"\n def thirsty(self):\n\n if self.currentX == 0 or self.currentX == CTConstant.size -1 or self.currentY == 0 or self.currentY == CTConstant.size -1:\n return\n if Toad.theCoordinates[self.currentX][self.currentY].isAwp:\n self.stayHere()\n return\n if Toad.theCoordinates[self.currentX][self.currentY].isDesert:\n self.lookForMoisture()\n return\n if self.currentY == CTConstant.SIZE - 1 and Toad.theCoordinates[self.currentX][self.currentY - 1].theToad == None:\n self.moveW()\n return\n self.stayHere()\n\n \"\"\"\"\"\n This is the method that look for the moisture of of the surrounding Von-Neumann area\n The toad will jump to the one that has the largest moisture \n \"\"\"\"\"\n def lookForMoisture(self):\n currentMoisture = []\n # Get the moisture in East, South, West, North\n currentMoisture.append(Toad.theCoordinates[self.currentX][self.currentY + 1].moisture)\n\n currentMoisture.append(Toad.theCoordinates[self.currentX + 1][self.currentY].moisture)\n\n currentMoisture.append(Toad.theCoordinates[self.currentX][self.currentY - 1].moisture)\n\n currentMoisture.append(Toad.theCoordinates[self.currentX - 1][self.currentY].moisture)\n max = currentMoisture.index(max(currentMoisture))\n\n # Move the toad to the new location\n if max == 0:\n # Set the current location to be Toad Free\n Toad.theCoordinates[self.currentX][self.currentY].theToad = None\n self.currentY += 1\n # Place the toad in the new location\n Toad.theCoordinates[self.currentX][self.currentY].theToad = self\n self.here()\n self.useWaterEnergyHopping()\n if max == 1:\n Toad.theCoordinates[self.currentX][self.currentY].theToad = None\n self.currentX += 1\n\n Toad.theCoordinates[self.currentX][self.currentY].theToad = self\n self.here()\n self.useWaterEnergyHopping()\n if max == 2:\n Toad.theCoordinates[self.currentX][self.currentY].theToad = None\n self.currentY -= 1\n\n Toad.theCoordinates[self.currentX][self.currentY].theToad = self\n self.here()\n self.useWaterEnergyHopping()\n else:\n if max == 3:\n Toad.theCoordinates[self.currentX][self.currentY].theToad = None\n self.currentX -= 1\n\n Toad.theCoordinates[self.currentX][self.currentY].theToad = self\n self.here()\n self.useWaterEnergyHopping()\n\n \"\"\"\"\"\n This is the method that update the avaialbe food and moisture at current location \n\n \"\"\"\"\"\n def here(self):\n self.availableFood = Toad.theCoordinates[self.currentX][self.currentY].food\n self.availableMoisture = Toad.theCoordinates[self.currentX][self.currentY].moisture\n\n \"\"\"\"\"\n This is the method that indicate the toad want to stay here\n The toad will also use water energy while sitting as well \n \"\"\"\"\"\n def stayHere(self):\n self.here()\n self.useWaterEnergySitting()\n\n \"\"\"\"\"\n This is the method that look for the moisture of of the surrounding Von-Neumann area\n The toad will jump to the one that has the largest moisture \n \"\"\"\"\"\n def hopHere(self):\n self.here()\n self.useWaterEnergyHopping()\n\n \"\"\"\"\"\n This method is used to move all the toad west\n \"\"\"\"\"\n def moveW(self):\n Toad.theCoordinates[self.currentX][self.currentY].theToad = None\n self.currentY -= 1\n self.here()\n self.useWaterEnergyHopping()\n\n \"\"\"\"\"\n This method is used by the toad to look for nearby Food\n \"\"\"\"\"\n def lookForFood(self):\n # On a Desert\n if self.currentY == CTConstant.SIZE - 1 and Toad.theCoordinates[self.currentX][self.currentY - 1].isDesert:\n self.moveW()\n else:\n # At the start border\n if self.currentY == CTConstant.SIZE - 1:\n self.stayHere()\n else:\n self.goToFood()\n\n \"\"\"\"\"\n This method is used by the toad to go to the nearest food \n This method also work in the same way as lookForMoisture where it look for the surrounding Von-Newuman neighbors\n \"\"\"\"\"\n def goToFood(self):\n currentFood = []\n # Get the Food in East, South, West, North\n currentFood.append(Toad.theCoordinates[self.currentX][self.currentY + 1].food)\n\n currentFood.append(Toad.theCoordinates[self.currentX + 1][self.currentY].food)\n\n currentFood.append(Toad.theCoordinates[self.currentX][self.currentY - 1].food)\n\n currentFood.append(Toad.theCoordinates[self.currentX - 1][self.currentY].food)\n max = currentFood.index(max(currentFood))\n if max == 1:\n Toad.theCoordinates[self.currentX][self.currentY].theToad = None\n self.currentX += 1\n\n Toad.theCoordinates[self.currentX][self.currentY].theToad = self\n self.here()\n self.useWaterEnergyHopping()\n if max == 2:\n Toad.theCoordinates[self.currentX][self.currentY].theToad = None\n self.currentY -= 1\n\n Toad.theCoordinates[self.currentX][self.currentY].theToad = self\n self.here()\n self.useWaterEnergyHopping()\n else:\n if max == 3:\n Toad.theCoordinates[self.currentX][self.currentY].theToad = None\n self.currentX -= 1\n\n Toad.theCoordinates[self.currentX][self.currentY].theToad = self\n self.here()\n self.useWaterEnergyHopping()\n else:\n if max == 0:\n Toad.theCoordinates[self.currentX][self.currentY].theToad = None\n self.currentY += 1\n Toad.theCoordinates[self.currentX][self.currentY].theToad = self\n self.here()\n self.useWaterEnergyHopping()\n\n \"\"\"\"\"\n This method is used by the toad when it does nothing and just sit around\n \"\"\"\"\"\n def useWaterEnergySitting(self):\n # Check if it is on the start border or on a desert\n if self.currentY == CTConstant.SIZE - 1:\n self.energy = self.energy - 0.5 * CTConstant.ENERGY_HOPPING\n self.water = self.water - 0.5 * CTConstant.WATER_HOPPING\n return\n if self.theCoordinates[self.currentX][self.currentY].isDesert:\n self.energy = self.energy - 0.5 * CTConstant.ENERGY_HOPPING\n self.water = self.water - 0.5 * CTConstant.WATER_HOPPING\n return\n\n self.energy = self.enery - 0.5 * CTConstant.ENERGY_HOPPING\n\n \"\"\"\"\"\n This method is used by the toad when it does nothing and just sit around\n \"\"\"\"\"\n def useWaterEnergyHopping(self):\n # Check if it on the start border or on the desert\n if self.currentY == CTConstant.SIZE - 1 or self.theCoordinates[self.currentX][self.currentY].isDesert:\n self.energy = self.energy - CTConstant.ENERGY_HOPPING\n self.water = self.water - CTConstant.WATER_HOPPING\n return\n self.energy = self.energy - CTConstant.ENERGY_HOPPING\n\n \"\"\"\"\"\n This method is used by the toad to hop around\n \"\"\"\"\"\n def hopForFun(self):\n # If the toad is on the start border and left is a desert then move west\n\n if self.currentY == CTConstant.SIZE - 1 and self.theCoordinates[self.currentX][self.currentY - 1].isDesert:\n self.moveW()\n return\n # Else just stay here\n if self.currentY == CTConstant.SIZE - 1:\n self.stayHere()\n return\n self.stayHere()\n\n \"\"\"\"\"\n This method is used to update the total number of toads that tis available at the moment\n Toad will die if the water, energy go below the threshold\n \"\"\"\"\"\n def changeCounts(self):\n if self.water < CTConstant.DESICCATE or self.energy < CTConstant.STARVE \\\n or self.currentY == 0:\n\n self.theCoordinates[self.currentX][self.currentY].theToad = None\n if self.currentY == 0:\n Toad.numberMigrated +=1\n else:\n Toad.numberDead +=1\n\n Toad.numberAlive -= 1\n\n self.state = 0\n return\n","sub_path":"CaneToad/Toad.py","file_name":"Toad.py","file_ext":"py","file_size_in_byte":11874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"310016183","text":"\"\"\"empty message\n\nRevision ID: 01577576887b\nRevises: a3517d3bcb2a\nCreate Date: 2017-08-03 10:35:43.786196\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '01577576887b'\ndown_revision = 'a3517d3bcb2a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.drop_table('qbo_blueprint_authentication_tokens')\n\n\ndef downgrade():\n raise Exception\n","sub_path":"migrations/versions/01577576887b_.py","file_name":"01577576887b_.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"293623371","text":"#!/usr/bin/env bash\nimport skfmm\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport os\n\nclass drone_path_planning_model():\n def __init__(self, n, centroid_x, centroid_y, radius):\n self.n = n\n self.word_size = 20.\n self.radius = radius\n self.centroid_x = centroid_x\n self.centroid_y = centroid_y\n self.path = []\n\n def planning(self):\n n = self.n\n word_size = self.word_size\n radius = self.radius\n center_x = self.centroid_x / word_size * n\n center_y = self.centroid_y / word_size * n\n grid_world = np.ones((n + 1, n + 1))\n mask = np.full(np.shape(grid_world), False, dtype=bool)\n for i in range(n + 1):\n for j in range(n + 1):\n if np.sqrt((i - center_x) ** 2 + (j - center_y) ** 2) <= radius / word_size * n:\n mask[i, j] = True\n grid_world_A = np.ma.MaskedArray(np.ones((n + 1, n + 1)), mask)\n grid_world_A[0, 0] = 0\n grid_world_B = np.ma.MaskedArray(np.ones((n + 1, n + 1)), mask)\n grid_world_B[n, n] = 0\n self.dist_map_A = skfmm.travel_time(grid_world_A, np.ones_like(grid_world), dx=word_size / n)\n self.shortest_distance = self.dist_map_A[n, n]\n self.dist_map_B = skfmm.travel_time(grid_world_B, np.ones_like(grid_world), dx=word_size / n)\n self.shortest_path()\n\n def shortest_path(self):\n path = []\n n = self.n\n i = 0\n j = 0\n dist_map = self.dist_map_A + self.dist_map_B\n while not (i == n and j == n):\n path.append([(i / self.n * self.word_size), (j / self.n * self.word_size)])\n dist = 400.\n next_point = [-1, -1]\n for next_i, next_j in [[i + 1, j], [i + 1, j + 1], [i, j + 1]]:\n if next_i <= n and next_j <= n and dist_map[next_i][next_j] < dist:\n dist = dist_map[next_i][next_j]\n next_point[0] = next_i\n next_point[1] = next_j\n if dist == 400.:\n print(\"Error Input\")\n break\n i = next_point[0]\n j = next_point[1]\n path.append([self.word_size, self.word_size])\n self.path = path\n\n def shortest_distance(self):\n return self.shortest_distance\n\n def visualize_path(self):\n data = np.array(self.path)\n storm_plt = plt.Circle((self.centroid_x, self.centroid_y), self.radius, color='grey')\n path_plt = plt.plot(*data.T, color='red')\n ax = plt.gca()\n ax.add_patch(storm_plt)\n ax.legend(path_plt, [\"Shortest Path\"])\n ax.text(self.centroid_x, self.centroid_y, 'Storm')\n ax.text(0, 0, 'Hospital A')\n ax.text(self.word_size, self.word_size, 'Hospital B')\n plt.show()\n\n def save_figure(self, img_path):\n data = np.array(self.path)\n storm_plt = plt.Circle((self.centroid_x, self.centroid_y), self.radius, color='grey')\n path_plt = plt.plot(*data.T, color='red')\n ax = plt.gca()\n ax.add_patch(storm_plt)\n ax.legend(path_plt, [\"Shortest Path\"])\n ax.text(self.centroid_x, self.centroid_y, 'Storm')\n ax.text(0, 0, 'Hospital A')\n ax.text(self.word_size, self.word_size, 'Hospital B')\n if os.path.isfile(img_path):\n os.remove(img_path)\n plt.savefig(img_path)\n plt.close('all')","sub_path":"path_planning_model.py","file_name":"path_planning_model.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"176891595","text":"##\n# Copyright 2015 TWO SIGMA OPEN SOURCE, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\nimport os\nimport pandas as pd\nimport pytest\nimport shutil\nimport sys\nimport pandas.util.testing as pdt\n\n\noriginal_environ = dict(os.environ)\noriginal_sys_path = list(sys.path)\n\n\n@pytest.yield_fixture(scope='module', autouse=True)\ndef cleanup():\n yield\n reset_env()\n\n\ndef reset_env():\n os.environ = dict(original_environ)\n sys.path = list(original_sys_path)\n metastore_db = os.path.join(os.getcwd(), 'metastore_db')\n if os.path.isdir(metastore_db):\n shutil.rmtree(metastore_db)\n\n\n@pytest.fixture(scope='module')\ndef pyspark_types(pyspark):\n import pyspark.sql.types as pyspark_types\n return pyspark_types\n\n@pytest.fixture(scope='module')\ndef py4j(pyspark):\n import py4j\n return py4j\n\n\n@pytest.fixture(scope='module')\ndef flint(pyspark):\n import ts.flint\n return ts.flint\n\n\n@pytest.fixture(scope='module')\ndef summarizers(flint):\n from ts.flint import summarizers\n return summarizers\n\n\n@pytest.fixture(scope='module')\ndef windows(flint):\n from ts.flint import windows\n return windows\n\n\n@pytest.fixture(scope='module')\ndef clocks(flint):\n from ts.flint import clocks\n return clocks\n\n\n@pytest.fixture(scope='module')\ndef sqlContext(pyspark, sc):\n from pyspark.sql import HiveContext\n return HiveContext(sc)\n\n\n@pytest.fixture(scope='module')\ndef flintContext(pyspark, sqlContext):\n from ts.flint import FlintContext\n return FlintContext(sqlContext)\n\n\n@pytest.fixture(scope='module')\ndef tests_utils(flint):\n from . import utils\n return utils\n\n\ndef make_pdf(data, schema):\n d = {schema[i]:[row[i] for row in data] for i in range(len(schema))}\n return pd.DataFrame(data=d)[schema]\n\nintervals_data = [\n (1000,),\n (1100,),\n (1200,),\n (1300,)\n]\n\n\nforecast_data = [\n (1000, 7, 3.0,),\n (1000, 3, 5.0,),\n (1050, 3, -1.5,),\n (1050, 7, 2.0,),\n (1100, 3, -2.4,),\n (1100, 7, 6.4,),\n (1150, 3, 1.5,),\n (1150, 7, -7.9,),\n (1200, 3, 4.6,),\n (1200, 7, 1.4,),\n (1250, 3, -9.6,),\n (1250, 7, 6.0,)\n]\n\n\nprice_data = [\n (1000, 7, 0.5,),\n (1000, 3, 1.0,),\n (1050, 3, 1.5,),\n (1050, 7, 2.0,),\n (1100, 3, 2.5,),\n (1100, 7, 3.0,),\n (1150, 3, 3.5,),\n (1150, 7, 4.0,),\n (1200, 3, 4.5,),\n (1200, 7, 5.0,),\n (1250, 3, 5.5,),\n (1250, 7, 6.0,)\n]\n\n\nvol_data = [\n (1000, 7, 100,),\n (1000, 3, 200,),\n (1050, 3, 300,),\n (1050, 7, 400,),\n (1100, 3, 500,),\n (1100, 7, 600,),\n (1150, 3, 700,),\n (1150, 7, 800,),\n (1200, 3, 900,),\n (1200, 7, 1000,),\n (1250, 3, 1100,),\n (1250, 7, 1200,)\n]\n\n\nvol2_data = [\n (1000, 7, 100,),\n (1000, 7, 100,),\n (1000, 3, 200,),\n (1000, 3, 200,),\n (1050, 3, 300,),\n (1050, 3, 300,),\n (1050, 7, 400,),\n (1050, 7, 400,),\n (1100, 3, 500,),\n (1100, 7, 600,),\n (1100, 3, 500,),\n (1100, 7, 600,),\n (1150, 3, 700,),\n (1150, 7, 800,),\n (1150, 3, 700,),\n (1150, 7, 800,),\n (1200, 3, 900,),\n (1200, 7, 1000,),\n (1200, 3, 900,),\n (1200, 7, 1000,),\n (1250, 3, 1100,),\n (1250, 7, 1200,),\n (1250, 3, 1100,),\n (1250, 7, 1200,)\n]\n\n\nvol3_data = [\n (1000, 7, 100,),\n (1000, 7, 101,),\n (1000, 3, 200,),\n (1000, 3, 201,),\n (1050, 3, 300,),\n (1050, 3, 301,),\n (1050, 7, 400,),\n (1050, 7, 401,),\n (1100, 3, 500,),\n (1100, 7, 600,),\n (1100, 3, 501,),\n (1100, 7, 601,),\n (1150, 3, 700,),\n (1150, 7, 800,),\n (1150, 3, 701,),\n (1150, 7, 801,),\n (1200, 3, 900,),\n (1200, 7, 1000,),\n (1200, 3, 901,),\n (1200, 7, 1001,),\n (1250, 3, 1100,),\n (1250, 7, 1200,),\n (1250, 3, 1101,),\n (1250, 7, 1201,),\n]\n\n\n@pytest.fixture(scope='module')\ndef intervals(flintContext):\n return flintContext.read.pandas(make_pdf(intervals_data, ['time']))\n\n\n@pytest.fixture(scope='module')\ndef forecast(flintContext):\n return flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n\n\n@pytest.fixture(scope='module')\ndef price(flintContext):\n return flintContext.read.pandas(make_pdf(price_data, [\"time\", \"id\", \"price\"]))\n\n\n@pytest.fixture(scope='module')\ndef vol(flintContext):\n return flintContext.read.pandas(make_pdf(vol_data, [\"time\", \"id\", \"volume\"]))\n\n\n@pytest.fixture(scope='module')\ndef vol2(flintContext):\n return flintContext.read.pandas(make_pdf(vol2_data, [\"time\", \"id\", \"volume\"]))\n\n\n@pytest.fixture(scope='module')\ndef vol3(flintContext):\n return flintContext.read.pandas(make_pdf(vol3_data, [\"time\", \"id\", \"volume\"]))\n\n\n#--[ Tests ]--------------------------------------\n\ndef test_addColumnsForCycle(pyspark_types, tests_utils, price, vol3):\n expected_pdf = make_pdf([\n [1000, 7, 0.5, 1.0],\n [1000, 3, 1.0, 2.0],\n [1050, 3, 1.5, 3.0],\n [1050, 7, 2.0, 4.0],\n [1100, 3, 2.5, 5.0],\n [1100, 7, 3.0, 6.0],\n [1150, 3, 3.5, 7.0],\n [1150, 7, 4.0, 8.0],\n [1200, 3, 4.5, 9.0],\n [1200, 7, 5.0, 10.0],\n [1250, 3, 5.5, 11.0],\n [1250, 7, 6.0, 12.0],\n ], [\"time\", \"id\", \"price\", \"adjustedPrice\"])\n\n def fn(rows):\n size = len(rows)\n return {row:row.price*size for row in rows}\n\n new_pdf = price.addColumnsForCycle(\n {\"adjustedPrice\": (pyspark_types.DoubleType(), fn)}\n ).toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf)\n\n expected_pdf = make_pdf([\n [1000, 7, 100, 301],\n [1000, 7, 101, 302],\n [1000, 3, 200, 601],\n [1000, 3, 201, 602],\n [1050, 7, 400, 1201],\n [1050, 7, 401, 1202],\n [1050, 3, 300, 901],\n [1050, 3, 301, 902],\n [1100, 7, 600, 1801],\n [1100, 7, 601, 1802],\n [1100, 3, 500, 1501],\n [1100, 3, 501, 1502],\n [1150, 7, 800, 2401],\n [1150, 7, 801, 2402],\n [1150, 3, 700, 2101],\n [1150, 3, 701, 2102],\n [1200, 7, 1000, 3001],\n [1200, 7, 1001, 3002],\n [1200, 3, 900, 2701],\n [1200, 3, 901, 2702],\n [1250, 7, 1200, 3601],\n [1250, 7, 1201, 3602],\n [1250, 3, 1100, 3301],\n [1250, 3, 1101, 3302],\n ], [\"time\", \"id\", \"volume\", \"totalVolume\"])\n\n def fn(rows):\n volsum = sum([row.volume for row in rows])\n return {row:row.volume + volsum for row in rows}\n\n new_pdf = vol3.addColumnsForCycle(\n {\"totalVolume\": (pyspark_types.LongType(), fn)},\n key=[\"id\"]\n ).toPandas()\n\n # Test API to support key as list.\n tests_utils.assert_same(\n new_pdf,\n vol3.addColumnsForCycle(\n {\"totalVolume\": (pyspark_types.LongType(), fn)},\n key=\"id\"\n ).toPandas()\n )\n\n # XXX: should just do tests_utils.assert_same(new_pdf, expected_pdf, \"with key\")\n # once https://gitlab.twosigma.com/analytics/huohua/issues/26 gets resolved.\n tests_utils.assert_same(\n new_pdf[new_pdf['id'] == 3].reset_index(drop=True),\n expected_pdf[expected_pdf['id'] == 3].reset_index(drop=True),\n \"with key 3\"\n )\n tests_utils.assert_same(\n new_pdf[new_pdf['id'] == 7].reset_index(drop=True),\n expected_pdf[expected_pdf['id'] == 7].reset_index(drop=True),\n \"with key 7\"\n )\n\ndef test_merge(pyspark_types, tests_utils, price):\n price1 = price.filter(price.time > 1100)\n price2 = price.filter(price.time <= 1100)\n merged_price = price1.merge(price2)\n tests_utils.assert_same(merged_price.toPandas(), price.toPandas())\n\ndef test_leftJoin(pyspark_types, tests_utils, price, vol):\n expected_pdf = make_pdf([\n (1000, 7, 0.5, 100,),\n (1000, 3, 1.0, 200,),\n (1050, 3, 1.5, 300,),\n (1050, 7, 2.0, 400,),\n (1100, 3, 2.5, 500,),\n (1100, 7, 3.0, 600,),\n (1150, 3, 3.5, 700,),\n (1150, 7, 4.0, 800,),\n (1200, 3, 4.5, 900,),\n (1200, 7, 5.0, 1000,),\n (1250, 3, 5.5, 1100,),\n (1250, 7, 6.0, 1200,)\n ], [\"time\", \"id\", \"price\", \"volume\"])\n\n new_pdf = price.leftJoin(vol, key=[\"id\"]).toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf)\n tests_utils.assert_same(new_pdf, price.leftJoin(vol, key=\"id\").toPandas())\n\n expected_pdf = make_pdf([\n (1000, 7, 0.5, 100),\n (1000, 3, 1.0, 200),\n (1050, 3, 1.5, None),\n (1050, 7, 2.0, None),\n (1100, 3, 2.5, 500),\n (1100, 7, 3.0, 600),\n (1150, 3, 3.5, 700),\n (1150, 7, 4.0, 800),\n (1200, 3, 4.5, 900),\n (1200, 7, 5.0, 1000),\n (1250, 3, 5.5, 1100),\n (1250, 7, 6.0, 1200),\n ], [\"time\", \"id\", \"price\", \"volume\"])\n\n new_pdf = price.leftJoin(vol.filter(vol.time != 1050), key=\"id\").toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf)\n\n\ndef test_futureLeftJoin(pyspark_types, tests_utils, price, vol):\n expected_pdf = make_pdf([\n (1000, 7, 0.5, 400, 1050),\n (1000, 3, 1.0, 300, 1050),\n (1050, 3, 1.5, 500, 1100),\n (1050, 7, 2.0, 600, 1100),\n (1100, 3, 2.5, 700, 1150),\n (1100, 7, 3.0, 800, 1150),\n (1150, 3, 3.5, 900, 1200),\n (1150, 7, 4.0, 1000, 1200),\n (1200, 3, 4.5, 1100, 1250),\n (1200, 7, 5.0, 1200, 1250),\n (1250, 3, 5.5, None, None),\n (1250, 7, 6.0, None, None),\n ], [\"time\", \"id\", \"price\", \"volume\", \"time2\"])\n\n new_pdf = price.futureLeftJoin(vol.withColumn(\"time2\", vol.time.cast(pyspark_types.LongType())),\n tolerance=pd.Timedelta(\"100ns\"),\n key=[\"id\"], strict_lookahead=True).toPandas()\n new_pdf1 = price.futureLeftJoin(vol.withColumn(\"time2\", vol.time.cast(pyspark_types.LongType())),\n tolerance=pd.Timedelta(\"100ns\"),\n key=\"id\", strict_lookahead=True).toPandas()\n tests_utils.assert_same(new_pdf, new_pdf1)\n tests_utils.assert_same(new_pdf, expected_pdf)\n\n\ndef test_groupByCycle(tests_utils, vol):\n expected_pdf1 = make_pdf([\n (1000, [(1000, 7, 100,), (1000, 3, 200,)]),\n (1050, [(1050, 3, 300,), (1050, 7, 400,)]),\n (1100, [(1100, 3, 500,), (1100, 7, 600,)]),\n (1150, [(1150, 3, 700,), (1150, 7, 800,)]),\n (1200, [(1200, 3, 900,), (1200, 7, 1000,)]),\n (1250, [(1250, 3, 1100,), (1250, 7, 1200,)]),\n ], [\"time\", \"rows\"])\n\n new_pdf1 = vol.groupByCycle().toPandas()\n tests_utils.assert_same(new_pdf1, expected_pdf1)\n\n\ndef test_groupByInterval(tests_utils, vol, intervals):\n id = vol.collect()\n\n expected_pdf = make_pdf([\n (1000, 7, [id[0], id[3]]),\n (1000, 3, [id[1], id[2]]),\n (1100, 7, [id[5], id[7]]),\n (1100, 3, [id[4], id[6]]),\n (1200, 7, [id[9], id[11]]),\n (1200, 3, [id[8], id[10]]),\n ], [\"time\", \"id\", \"rows\"])\n\n new_pdf = vol.groupByInterval(intervals, key=[\"id\"]).toPandas()\n new_pdf1 = vol.groupByInterval(intervals, key=\"id\").toPandas()\n tests_utils.assert_same(new_pdf, new_pdf1)\n\n # XXX: should just do tests_utils.assert_same(new_pdf, expected_pdf)\n # once https://gitlab.twosigma.com/analytics/huohua/issues/26 gets resolved.\n tests_utils.assert_same(\n new_pdf[new_pdf['id'] == 3].reset_index(drop=True),\n expected_pdf[expected_pdf['id'] == 3].reset_index(drop=True),\n )\n tests_utils.assert_same(\n new_pdf[new_pdf['id'] == 7].reset_index(drop=True),\n expected_pdf[expected_pdf['id'] == 7].reset_index(drop=True),\n )\n\n\ndef test_summarizeCycles(summarizers, tests_utils, vol, vol2):\n expected_pdf1 = make_pdf([\n (1000, 300.0,),\n (1050, 700.0,),\n (1100, 1100.0,),\n (1150, 1500.0,),\n (1200, 1900.0,),\n (1250, 2300.0,),\n ], [\"time\", \"volume_sum\"])\n new_pdf1 = vol.summarizeCycles(summarizers.sum(\"volume\")).toPandas()\n tests_utils.assert_same(new_pdf1, expected_pdf1)\n\n expected_pdf2 = make_pdf([\n (1000, 7, 200.0),\n (1000, 3, 400.0),\n (1050, 3, 600.0),\n (1050, 7, 800.0),\n (1100, 3, 1000.0),\n (1100, 7, 1200.0),\n (1150, 3, 1400.0),\n (1150, 7, 1600.0),\n (1200, 3, 1800.0),\n (1200, 7, 2000.0),\n (1250, 3, 2200.0),\n (1250, 7, 2400.0),\n ], [\"time\", \"id\", \"volume_sum\"])\n new_pdf2 = vol2.summarizeCycles(summarizers.sum(\"volume\"), key=\"id\").toPandas()\n tests_utils.assert_same(new_pdf2, expected_pdf2)\n\n\ndef test_summarizeIntervals(flintContext, tests_utils, summarizers, vol):\n clock = flintContext.read.pandas(make_pdf([\n (1000,),\n (1100,),\n (1200,),\n (1300,),\n ], [\"time\"]))\n\n new_pdf1 = vol.summarizeIntervals(clock, summarizers.sum(\"volume\")).toPandas()\n expected_pdf1 = make_pdf([\n (1000, 1000.0),\n (1100, 2600.0),\n (1200, 4200.0),\n ], [\"time\", \"volume_sum\"])\n tests_utils.assert_same(new_pdf1, expected_pdf1)\n\n new_pdf2 = vol.summarizeIntervals(clock, summarizers.sum(\"volume\"), key=\"id\").toPandas()\n expected_pdf2 = make_pdf([\n (1000, 7, 500.0),\n (1000, 3, 500.0),\n (1100, 3, 1200.0),\n (1100, 7, 1400.0),\n (1200, 3, 2000.0),\n (1200, 7, 2200.0),\n ], [\"time\", \"id\", \"volume_sum\"])\n\n tests_utils.assert_same(new_pdf2, expected_pdf2)\n\n\ndef test_summarizeWindows(flintContext, tests_utils, windows, summarizers, vol):\n new_pdf1 = vol.summarizeWindows(windows.past_absolute_time('99ns'), summarizers.sum(\"volume\")).toPandas()\n expected_pdf1 = make_pdf([\n (1000, 7, 100, 300.0),\n (1000, 3, 200, 300.0),\n (1050, 3, 300, 1000.0),\n (1050, 7, 400, 1000.0),\n (1100, 3, 500, 1800.0),\n (1100, 7, 600, 1800.0),\n (1150, 3, 700, 2600.0),\n (1150, 7, 800, 2600.0),\n (1200, 3, 900, 3400.0),\n (1200, 7, 1000, 3400.0),\n (1250, 3, 1100, 4200.0),\n (1250, 7, 1200, 4200.0),\n ], [\"time\", \"id\", \"volume\", \"volume_sum\"])\n tests_utils.assert_same(new_pdf1, expected_pdf1)\n\n new_pdf2 = (vol.summarizeWindows(windows.past_absolute_time('99ns'),\n summarizers.sum(\"volume\"),\n key=\"id\").toPandas())\n expected_pdf2 = make_pdf([\n (1000, 7, 100, 100.0),\n (1000, 3, 200, 200.0),\n (1050, 3, 300, 500.0),\n (1050, 7, 400, 500.0),\n (1100, 3, 500, 800.0),\n (1100, 7, 600, 1000.0),\n (1150, 3, 700, 1200.0),\n (1150, 7, 800, 1400.0),\n (1200, 3, 900, 1600.0),\n (1200, 7, 1000, 1800.0),\n (1250, 3, 1100, 2000.0),\n (1250, 7, 1200, 2200.0),\n ], [\"time\", \"id\", \"volume\", \"volume_sum\"])\n tests_utils.assert_same(new_pdf2, expected_pdf2)\n\n interval_with_id = flintContext.read.pandas(make_pdf([\n (1000, 3),\n (1000, 7),\n (1050, 3),\n (1050, 7),\n (1100, 3),\n (1150, 3),\n (1150, 7),\n (1200, 3),\n (1200, 7),\n (1250, 7),\n ], [\"time\", \"id\"]))\n\n new_pdf3 = (interval_with_id.summarizeWindows(windows.past_absolute_time('99ns'),\n summarizers.sum(\"volume\"),\n key=\"id\",\n other=vol).toPandas())\n expected_pdf3 = make_pdf([\n (1000, 3, 200.0),\n (1000, 7, 100.0),\n (1050, 3, 500.0),\n (1050, 7, 500.0),\n (1100, 3, 800.0),\n (1150, 3, 1200.0),\n (1150, 7, 1400.0),\n (1200, 3, 1600.0),\n (1200, 7, 1800.0),\n (1250, 7, 2200.0),\n ], [\"time\", \"id\", \"volume_sum\"])\n tests_utils.assert_same(new_pdf3, expected_pdf3)\n\n\ndef test_summary_sum(summarizers, tests_utils, vol):\n expected_pdf = make_pdf([\n (0, 7800.0,)\n ], [\"time\", \"volume_sum\"])\n\n new_pdf = vol.summarize(summarizers.sum(\"volume\")).toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf)\n\n expected_pdf = make_pdf([\n (0, 7, 4100.0,),\n (0, 3, 3700.0,),\n ], [\"time\", \"id\", \"volume_sum\"])\n\n new_pdf = vol.summarize(summarizers.sum(\"volume\"), key=[\"id\"]).toPandas()\n new_pdf1 = vol.summarize(summarizers.sum(\"volume\"), key=\"id\").toPandas()\n tests_utils.assert_same(new_pdf, new_pdf1)\n\n # XXX: should just do tests_utils.assert_same(new_pdf, expected_pdf, \"by id\")\n # once https://gitlab.twosigma.com/analytics/huohua/issues/26 gets resolved.\n tests_utils.assert_same(\n new_pdf[new_pdf['id'] == 3].reset_index(drop=True),\n expected_pdf[expected_pdf['id'] == 3].reset_index(drop=True),\n \"by id 3\"\n )\n tests_utils.assert_same(\n new_pdf[new_pdf['id'] == 7].reset_index(drop=True),\n expected_pdf[expected_pdf['id'] == 7].reset_index(drop=True),\n \"by id 7\"\n )\n\n\ndef test_summary_zscore(summarizers, tests_utils, price):\n expected_pdf = make_pdf([\n (0, 1.5254255396193801,)\n ], [\"time\", \"price_zScore\"])\n\n new_pdf = price.summarize(summarizers.zscore(\"price\", in_sample=True)).toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf, \"in-sample\")\n\n expected_pdf = make_pdf([\n (0, 1.8090680674665818,)\n ], [\"time\", \"price_zScore\"])\n\n new_pdf = price.summarize(summarizers.zscore(\"price\", in_sample=False)).toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf, \"out-of-sample)\")\n\n\ndef test_summary_nth_moment(summarizers, tests_utils, price):\n moments = [price.summarize(summarizers.nth_moment(\"price\", i), key=\"id\").collect() for i in range(5)]\n for m in moments:\n m.sort(key=lambda r: r['id'])\n moments = [[r[\"price_{}thMoment\".format(i)] for r in moments[i]] for i in range(len(moments))]\n\n tests_utils.assert_same(moments[0][0], 1.0, \"moment 0: 0\")\n tests_utils.assert_same(moments[0][1], 1.0, \"moment 0: 1\")\n\n tests_utils.assert_same(moments[1][0], 3.0833333333333335, \"moment 1: 1\")\n tests_utils.assert_same(moments[1][1], 3.416666666666667, \"moment 1: 0\")\n\n tests_utils.assert_same(moments[2][0], 12.041666666666668, \"moment 2: 1\")\n tests_utils.assert_same(moments[2][1], 15.041666666666666, \"moment 2: 0\")\n\n tests_utils.assert_same(moments[3][0], 53.39583333333333, \"moment 3: 1\")\n tests_utils.assert_same(moments[3][1], 73.35416666666667, \"moment 3: 0\")\n\n tests_utils.assert_same(moments[4][0], 253.38541666666669, \"moment 4: 1\")\n tests_utils.assert_same(moments[4][1], 379.0104166666667, \"moment 4: 0\")\n\n\ndef test_summary_nth_central_moment(summarizers, tests_utils, price):\n moments = [price.summarize(summarizers.nth_central_moment(\"price\", i), key=\"id\").collect() for i in range(1,5)]\n for m in moments:\n m.sort(key=lambda r: r['id'])\n moments = [[r[\"price_{}thCentralMoment\".format(i+1)] for r in moments[i]] for i in range(len(moments))]\n\n tests_utils.assert_same(moments[0][0], 0.0, \"moment 1: 0\")\n tests_utils.assert_same(moments[0][1], 0.0, \"moment 1: 1\")\n\n tests_utils.assert_same(moments[1][0], 2.534722222222222, \"moment 2: 1\")\n tests_utils.assert_same(moments[1][1], 3.3680555555555554, \"moment 2: 0\")\n\n tests_utils.assert_same(moments[2][0], 0.6365740740740735, \"moment 3: 1\")\n tests_utils.assert_same(moments[2][1], -1.0532407407407405, \"moment 3: 0\")\n\n tests_utils.assert_same(moments[3][0], 10.567563657407407, \"moment 4: 1\")\n tests_utils.assert_same(moments[3][1], 21.227285879629633, \"moment 4: 0\")\n\n\ndef test_summary_correlation(pyspark, summarizers, tests_utils, price, forecast):\n joined = price.leftJoin(forecast, key=\"id\")\n joined = (joined\n .withColumn(\"price2\", joined.price)\n .withColumn(\"price3\", -joined.price)\n .withColumn(\"price4\", 2 * joined.price)\n .withColumn(\"price5\", pyspark.sql.functions.lit(0)))\n\n def price_correlation(column):\n corr = joined.summarize(summarizers.correlation(\"price\", column), key=[\"id\"])\n tests_utils.assert_same(\n corr.toPandas(),\n joined.summarize(summarizers.correlation([\"price\"], [column]), key=\"id\").toPandas()\n )\n tests_utils.assert_same(\n corr.toPandas(),\n joined.summarize(summarizers.correlation([\"price\", column]), key=\"id\").toPandas()\n )\n return corr.collect()\n\n results = [price_correlation(\"price{}\".format(i)) for i in range(2,6)]\n for r in results:\n r.sort(key=lambda r: r['id'])\n results.append(price_correlation(\"forecast\"))\n\n tests_utils.assert_same(results[0][0][\"price_price2_correlation\"], 1.0, \"price2: 1\")\n tests_utils.assert_same(results[0][1][\"price_price2_correlation\"], 1.0, \"price2: 0\")\n\n tests_utils.assert_same(results[1][0][\"price_price3_correlation\"], -1.0, \"price3: 1\")\n tests_utils.assert_same(results[1][1][\"price_price3_correlation\"], -1.0, \"price3: 0\")\n\n tests_utils.assert_same(results[2][0][\"price_price4_correlation\"], 1.0, \"price4: 1\")\n tests_utils.assert_same(results[2][1][\"price_price4_correlation\"], 1.0, \"price4: 0\")\n\n tests_utils.assert_true(np.isnan(results[3][0][\"price_price5_correlation\"]), \"price5: 1\")\n tests_utils.assert_true(np.isnan(results[3][1][\"price_price5_correlation\"]), \"price5: 0\")\n\n tests_utils.assert_same(results[4][0][\"price_forecast_correlation\"], -0.47908485866330514, \"forecast: 1\")\n tests_utils.assert_same(results[4][0][\"price_forecast_correlationTStat\"], -1.0915971793294055, \"forecastTStat: 1\")\n tests_utils.assert_same(results[4][1][\"price_forecast_correlation\"], -0.021896121374023046, \"forecast: 0\")\n tests_utils.assert_same(results[4][1][\"price_forecast_correlationTStat\"], -0.04380274440368827, \"forecastTStat: 0\")\n\n\ndef test_summary_linearRegression(pyspark, summarizers, tests_utils, price, forecast):\n \"\"\"\n Test the python binding for linearRegression. This does NOT test the correctness of the regression.\n \"\"\"\n joined = price.leftJoin(forecast, key=\"id\")\n result = joined.summarize(summarizers.linear_regression(\"price\", [\"forecast\"])).collect()\n\ndef test_summary_max(pyspark, summarizers, tests_utils, forecast):\n expected_pdf = make_pdf([\n (0, 6.4,)\n ], [\"time\", \"forecast_max\"])\n result = forecast.summarize(summarizers.max(\"forecast\")).toPandas()\n pdt.assert_frame_equal(result, expected_pdf)\n\ndef test_summary_mean(pyspark, summarizers, tests_utils, price, forecast):\n expected_pdf = make_pdf([\n (0, 3.25,)\n ], [\"time\", \"price_mean\"])\n joined = price.leftJoin(forecast, key=\"id\")\n result = joined.summarize(summarizers.mean(\"price\")).toPandas()\n pdt.assert_frame_equal(result, expected_pdf)\n\ndef test_summary_weighted_mean(pyspark, summarizers, tests_utils, price, vol):\n expected_pdf = make_pdf([\n (0, 4.166667, 1.547494, 8.237545, 12,)\n ], [\"time\", \"price_volume_weightedMean\", \"price_volume_weightedStandardDeviation\", \"price_volume_weightedTStat\", \"price_volume_observationCount\"])\n joined = price.leftJoin(vol, key=\"id\")\n result = joined.summarize(summarizers.weighted_mean(\"price\", \"volume\")).toPandas()\n\n pdt.assert_frame_equal(result, expected_pdf)\n\ndef test_summary_min(pyspark, summarizers, tests_utils, forecast):\n expected_pdf = make_pdf([\n (0, -9.6,)\n ], [\"time\", \"forecast_min\"])\n result = forecast.summarize(summarizers.min(\"forecast\")).toPandas()\n pdt.assert_frame_equal(result, expected_pdf)\n\ndef test_summary_quantile(sc, summarizers, forecast):\n expected_pdf = make_pdf([\n (0, -2.22, 1.75)\n ], [\"time\", \"forecast_0.2quantile\", \"forecast_0.5quantile\"])\n result = forecast.summarize(summarizers.quantile(sc, \"forecast\", (0.2, 0.5))).toPandas()\n pdt.assert_frame_equal(result, expected_pdf)\n\ndef test_summary_stddev(pyspark, summarizers, tests_utils, price, forecast):\n expected_pdf = make_pdf([\n (0, 1.802775638,)\n ], [\"time\", \"price_stddev\"])\n joined = price.leftJoin(forecast, key=\"id\")\n result = joined.summarize(summarizers.stddev(\"price\")).toPandas()\n pdt.assert_frame_equal(result, expected_pdf)\n\n\ndef test_summary_variance(pyspark, summarizers, tests_utils, price, forecast):\n expected_pdf = make_pdf([\n (0, 3.25,)\n ], [\"time\", \"price_variance\"])\n joined = price.leftJoin(forecast, key=\"id\")\n result = joined.summarize(summarizers.variance(\"price\")).toPandas()\n pdt.assert_frame_equal(result, expected_pdf)\n\n\ndef test_summary_covariance(pyspark, summarizers, tests_utils, price, forecast):\n expected_pdf = make_pdf([\n (0, -1.802083333,)\n ], [\"time\", \"price_forecast_covariance\"])\n joined = price.leftJoin(forecast, key=\"id\")\n result = joined.summarize(summarizers.covariance(\"price\", \"forecast\")).toPandas()\n pdt.assert_frame_equal(result, expected_pdf)\n\ndef test_summary_compose(pyspark, summarizers, tests_utils, price):\n expected_pdf = make_pdf([\n (0, 6.0, 0.5, 3.25, 1.802775638,)\n ], [\"time\", \"price_max\", \"price_min\", \"price_mean\", \"price_stddev\"])\n\n result = price.summarize([summarizers.max(\"price\"),\n summarizers.min(\"price\"),\n summarizers.mean(\"price\"),\n summarizers.stddev(\"price\")]).toPandas()\n pdt.assert_frame_equal(result, expected_pdf)\n\ndef test_addSummaryColumns(summarizers, tests_utils, vol):\n expected_pdf = make_pdf([\n (1000, 7, 100, 100.0),\n (1000, 3, 200, 300.0),\n (1050, 3, 300, 600.0),\n (1050, 7, 400, 1000.0),\n (1100, 3, 500, 1500.0),\n (1100, 7, 600, 2100.0),\n (1150, 3, 700, 2800.0),\n (1150, 7, 800, 3600.0),\n (1200, 3, 900, 4500.0),\n (1200, 7, 1000, 5500.0),\n (1250, 3, 1100, 6600.0),\n (1250, 7, 1200, 7800.0),\n ], [\"time\", \"id\", \"volume\", \"volume_sum\"])\n\n new_pdf = vol.addSummaryColumns(summarizers.sum(\"volume\")).toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf)\n\n expected_pdf = make_pdf([\n (1000, 7, 100, 100.0),\n (1000, 3, 200, 200.0),\n (1050, 3, 300, 500.0),\n (1050, 7, 400, 500.0),\n (1100, 3, 500, 1000.0),\n (1100, 7, 600, 1100.0),\n (1150, 3, 700, 1700.0),\n (1150, 7, 800, 1900.0),\n (1200, 3, 900, 2600.0),\n (1200, 7, 1000, 2900.0),\n (1250, 3, 1100, 3700.0),\n (1250, 7, 1200, 4100.0),\n ], [\"time\", \"id\", \"volume\", \"volume_sum\"])\n\n new_pdf = vol.addSummaryColumns(summarizers.sum(\"volume\"), \"id\").toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf, \"with key\")\n\n\ndef test_addWindows(tests_utils, windows, vol):\n id = vol.collect()\n\n expected_pdf = make_pdf([\n (1000, 7, 100, [id[0], id[1]]),\n (1000, 3, 200, [id[0], id[1]]),\n (1050, 3, 300, [id[0], id[1], id[2], id[3]]),\n (1050, 7, 400, [id[0], id[1], id[2], id[3]]),\n (1100, 3, 500, [id[2], id[3], id[4], id[5]]),\n (1100, 7, 600, [id[2], id[3], id[4], id[5]]),\n (1150, 3, 700, [id[4], id[5], id[6], id[7]]),\n (1150, 7, 800, [id[4], id[5], id[6], id[7]]),\n (1200, 3, 900, [id[6], id[7], id[8], id[9]]),\n (1200, 7, 1000, [id[6], id[7], id[8], id[9]]),\n (1250, 3, 1100, [id[8], id[9], id[10], id[11]]),\n (1250, 7, 1200, [id[8], id[9], id[10], id[11]]),\n ], [\"time\", \"id\", \"volume\", \"window_past_50ns\"])\n\n new_pdf = vol.addWindows(windows.past_absolute_time(\"50ns\")).toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf)\n\n\ndef test_shiftTime(tests_utils, price):\n expected_pdf = price.toPandas()\n expected_pdf.time += 1000\n new_pdf = price.shiftTime(pd.Timedelta(\"1000ns\")).toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf, \"forwards\")\n\n expected_pdf = price.toPandas()\n expected_pdf.time -= 1000\n new_pdf = price.shiftTime(pd.Timedelta(\"1000ns\"), backwards=True).toPandas()\n tests_utils.assert_same(new_pdf, expected_pdf, \"backwards\")\n\n\n@pytest.mark.net\ndef test_read_dataframe_begin_end(sqlContext, flintContext, tests_utils):\n test_url=\"hdfs:///user/tsram/spark_example_data/tsdata/price/ts/datasys/6558\"\n df = sqlContext.read.parquet(test_url)\n\n begin = \"20100101\"\n end = \"20110101\"\n\n begin_nanos = tests_utils.to_nanos(begin)\n end_nanos = tests_utils.to_nanos(end)\n\n df = flintContext.read.dataframe(df, begin, end)\n df2 = df.filter(df.time >= begin_nanos).filter(df.time < end_nanos)\n\n assert(df.count() == df2.count())\n\n\ndef test_uniform_clocks(sqlContext, clocks):\n df = clocks.uniform(sqlContext, '1d', '0s', '2016-11-07', '2016-11-17')\n assert(df.count() == 11)\n # the last timestamp should be 17 Nov 2016 00:00:00 GMT\n assert(df.collect()[-1]['time'] == 1479340800000000000)\n\n\ndef test_from_tsrdd(sqlContext, flintContext, flint):\n df = flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n tsrdd = df.timeSeriesRDD\n df2 = flint.TimeSeriesDataFrame._from_tsrdd(tsrdd, sqlContext)\n tsrdd2 = df2.timeSeriesRDD\n\n assert(tsrdd.count() == tsrdd2.count())\n assert(tsrdd.orderedRdd().getNumPartitions() == tsrdd2.orderedRdd().getNumPartitions())\n\n\ndef test_with_column_preserve_order(sqlContext, flintContext):\n shared_test_partition_preserving(flintContext, lambda df: df.withColumn(\"neg_forecast\", -df.forecast), True)\n\n\ndef test_drop_column_preserve_order(sqlContext, flintContext):\n shared_test_partition_preserving(flintContext, lambda df: df.drop(\"forecast\"), True)\n\n\ndef test_filter_preserve_order(sqlContext, flintContext):\n shared_test_partition_preserving(flintContext, lambda df: df.filter(df.id == 3), True)\n\n\ndef test_select_preserve_order(sqlContext, flintContext):\n shared_test_partition_preserving(flintContext, lambda df: df.select(\"time\", \"id\"), True)\n\n\ndef test_with_column_renamed_preserve_order(sqlContext, flintContext):\n shared_test_partition_preserving(flintContext, lambda df: df.withColumnRenamed(\"forecast\", \"signal\"), True)\n\n\ndef test_replace_preserve_order(sqlContext, flintContext):\n shared_test_partition_preserving(flintContext, lambda df: df.replace([3, 7], [4, 8], 'id'), True)\n\n\ndef test_na_preserve_order(sqlContext, flintContext):\n from pyspark.sql.functions import lit\n from pyspark.sql.types import StringType\n\n def create_dataframe():\n return (flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n .withColumn(\"null_column\", lit(None).cast(StringType())))\n\n shared_test_partition_preserving(flintContext, lambda df: df.fillna(\"v1\"), True, create_dataframe)\n shared_test_partition_preserving(flintContext, lambda df: df.dropna(), True, create_dataframe)\n shared_test_partition_preserving(flintContext, lambda df: df.fillna(\"v1\").replace(\"v1\", \"v2\", 'null_column'), True, create_dataframe)\n\n\ndef test_with_column_udf_preserve_order(sqlContext, flintContext):\n def with_udf_column(df):\n from pyspark.sql.types import DoubleType\n from pyspark.sql.functions import udf\n times_two = udf(lambda x: x * 2, DoubleType())\n return df.withColumn(\"forecast2\", times_two(df.forecast))\n shared_test_partition_preserving(flintContext, with_udf_column, True)\n\n\ndef test_sort_dont_preserve_order(sqlContext, flintContext):\n shared_test_partition_preserving(flintContext, lambda df: df.orderBy(\"id\"), False)\n\n\ndef test_repatition_dont_preserve_order(sqlContext, flintContext):\n shared_test_partition_preserving(flintContext, lambda df: df.repartition(df.rdd.getNumPartitions() * 2), False)\n\n\ndef test_select_aggregate_dont_preserve_order(sqlContext, flintContext):\n from pyspark.sql.functions import sum\n shared_test_partition_preserving(flintContext, lambda df: df.select(sum('forecast')), False)\n\n\ndef test_with_window_column_dont_preserve_order(sqlContext, flintContext):\n def with_window_column(df):\n from pyspark.sql.window import Window\n from pyspark.sql.functions import percent_rank\n windowSpec = Window.partitionBy(df['id']).orderBy(df['forecast'])\n return df.withColumn(\"r\", percent_rank().over(windowSpec))\n shared_test_partition_preserving(flintContext, with_window_column, False)\n\n\ndef test_df_lazy(flintContext):\n df_lazy = flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n assert(df_lazy._is_sorted is True)\n assert(df_lazy._tsrdd_part_info is None)\n\n\ndef test_df_eager(flintContext):\n df_eager = flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n df_eager.timeSeriesRDD\n assert(df_eager._is_sorted)\n assert(df_eager._lazy_tsrdd is not None)\n assert(df_eager._tsrdd_part_info is None)\n\n\ndef test_df_joined(flintContext):\n df = flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n df_joined = df.leftJoin(df, right_alias=\"right\")\n assert(df_joined._is_sorted)\n assert(df_joined._tsrdd_part_info is not None)\n assert(df_joined._jpkg.PartitionPreservingOperation.isPartitionPreservingDataFrame(df_joined._jdf))\n\n\ndef test_df_cached(flintContext):\n df_cached = flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n df_cached.cache()\n df_cached.count()\n assert(df_cached._is_sorted)\n assert(df_cached._tsrdd_part_info is None)\n assert(df_cached._jpkg.PartitionPreservingOperation.isPartitionPreservingDataFrame(df_cached._jdf))\n\n\ndef test_df_cached_joined(flintContext):\n df_cached = flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n df_cached.cache()\n df_cached.count()\n df_cached_joined = df_cached.leftJoin(df_cached, right_alias=\"right\")\n assert(df_cached_joined._is_sorted)\n assert(df_cached_joined._tsrdd_part_info is not None)\n assert(df_cached_joined._jpkg.PartitionPreservingOperation.isPartitionPreservingDataFrame(df_cached_joined._jdf))\n\n\ndef test_df_orderBy(flintContext):\n df = flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n df = df.orderBy(\"time\")\n assert(not df._is_sorted)\n assert(df._tsrdd_part_info is None)\n\n\ndef test_withColumn_time(flintContext):\n from ts.flint import TimeSeriesDataFrame\n from pyspark.sql import DataFrame\n\n df = flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n df = df.withColumn(\"time\", df.time * 2)\n assert(not isinstance(df, TimeSeriesDataFrame))\n assert(isinstance(df, DataFrame))\n\n\ndef test_describe(flintContext):\n from ts.flint import TimeSeriesDataFrame\n from pyspark.sql import DataFrame\n\n df = flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n df.describe()\n\n\ndef shared_test_partition_preserving(flintContext, func, preserve, create = None):\n def create_dataframe():\n return flintContext.read.pandas(make_pdf(forecast_data, [\"time\", \"id\", \"forecast\"]))\n\n if create is None:\n create = create_dataframe\n\n df_lazy = create()\n\n df_eager = create()\n df_eager.timeSeriesRDD\n\n df = create()\n df_joined = df.leftJoin(df, right_alias=\"right\")\n\n df = create()\n df_cached = df.cache()\n df_cached.count()\n\n df_cached_joined = df_cached.leftJoin(df_cached, right_alias=\"right\")\n\n partition_preserving_input_tranforms = [\n lambda df: df,\n lambda df: df.withColumn(\"f2\", df.forecast * 2),\n lambda df: df.select(\"time\", \"id\", \"forecast\"),\n lambda df: df.filter(df.time % 1000 == 0)\n ]\n\n order_preserving_input_tranforms = [\n lambda df: df.orderBy(\"time\")\n ]\n\n input_dfs = [df_lazy, df_eager, df_joined, df_cached, df_cached_joined]\n\n for transform in partition_preserving_input_tranforms:\n for input_df in input_dfs:\n assert_partition_preserving(transform(input_df), func, preserve)\n\n for tranform in order_preserving_input_tranforms:\n for input_df in input_dfs:\n assert_order_preserving(transform(input_df), func, preserve)\n\n df_cached.unpersist()\n\n\ndef assert_sorted(df):\n pdf = df.toPandas()\n pdt.assert_frame_equal(pdf, pdf.sort_values('time'))\n\n\ndef assert_partition_preserving(input_df, func, preserve):\n output_df = func(input_df)\n\n if preserve:\n assert(input_df.rdd.getNumPartitions() == output_df.rdd.getNumPartitions())\n assert(input_df._is_sorted == output_df._is_sorted)\n assert(input_df._tsrdd_part_info == output_df._tsrdd_part_info)\n if output_df._is_sorted:\n assert_sorted(output_df)\n if output_df._tsrdd_part_info:\n output_df.timeSeriesRDD.validate()\n\n else:\n assert(output_df._tsrdd_part_info == None)\n\ndef assert_order_preserving(input_df, func, preserve):\n output_df = func(input_df)\n\n if preserve:\n assert(input_df._is_sorted == output_df._is_sorted)\n if output_df._is_sorted:\n assert_sorted(output_df)\n\n else:\n assert(not output_df._is_sorted)\n assert(output_df._tsrdd_part_info == None)\n","sub_path":"python/tests/ts/flint/test_dataframe.py","file_name":"test_dataframe.py","file_ext":"py","file_size_in_byte":37213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"541728255","text":"class MACA:\n \"\"\"\n This module reads downscaled climate scenarios from the Northwest Knowledge\n Network at the University of Idaho (http://maca.northwestknowledge.net).\n \n It retrieves data for a given year and location.\n \n Common usage:\n import maca\n maca2006 = maca.MACA(2006,46.7,-117.2)\n data = maca2006.retrieveData()\n plot(data['time'],data['tasmax']) # plot max temperature for 2006.\n \n Created on Thu Jan 14 14:23:17 2016\n \n @author: Von P. Walden\n Washington State University\n \"\"\"\n \n def __init__(self, year, lat, lon):\n \"\"\"Initializes an instance of MACA for a particular year, latitude and longitude.\n \n !!! Currently only works for a single model (BNU-ESM) and scenario (RCP 8.5). !!!\"\"\"\n def getMACApixel(maca,lat,lon):\n \n dlat = float(maca.geospatial_lat_resolution)/2.\n dlon = float(maca.geospatial_lon_resolution)/2.\n ilat = np.where((maca.variables['lat'][:] > lat-dlat) & (maca.variables['lat'][:] <= lat+dlat))[0][0]\n ilon = np.where((maca.variables['lon'][:] > lon-dlon) & (maca.variables['lon'][:] <= lon+dlon))[0][0]\n return ilat, ilon\n \n from netCDF4 import Dataset\n from datetime import datetime, timedelta\n import numpy as np\n \n self.year = int(year)\n \n # Define variables and path to MACA file.\n self.directory = 'http://thredds.northwestknowledge.net:8080/thredds/dodsC/agg_macav2metdata_'\n self.scenario = '_BNU-ESM_r1i1p1_rcp85_2006_2099_CONUS_daily.nc'\n self.files = ('huss', \n 'pr', \n 'rhsmax', \n 'rhsmin', \n 'rsds', \n 'tasmax', \n 'tasmin', \n 'uas', \n 'vas')\n self.variables = ('specific_humidity',\n 'precipitation',\n 'relative_humidity',\n 'relative_humidity',\n 'surface_downwelling_shortwave_flux_in_air',\n 'air_temperature',\n 'air_temperature',\n 'eastward_wind',\n 'northward_wind') \n # Get index into lat/lon grid for desired location.\n filename = self.directory + self.files[0] + self.scenario\n data = Dataset(filename)\n if lon<0:\n lon = 360. + lon\n self.ilat, self.ilon = getMACApixel(data,lat,lon)\n \n # Checks that the desired year is within the temporal limits of the data.\n if ( (year>=2006) & (year<=2099)):\n self.date = np.array([]) \n for d in data.variables['time'][:]:\n self.date = np.append(self.date, datetime(1900,1,1)+timedelta(days=int(d)))\n self.iday = np.where( (self.date>=datetime(year,1,1)) & (self.date> 0\"\n self.bttn[\"command\"] = self.update_count\n self.bttn.grid()\n\ndef update_count(self):\n \"\"\" Increase click count and display new total. \"\"\"\n self.bttn_clicks += 1\n self.bttn[\"text\"] = \"Credits >> \" + str(self.bttn_clicks)\n\n#Main\nroot = Tk()\nroot.title(\"PyTrading\")\nroot.geometry(\"200x50\")\n\napp = Application(root)\n\nroot.mainloop()\n\n#File and Close Buttons\ndef close():\n exit()\n\nmenubar = Menu(root)\nfilemenu = Menu(menubar, tearoff=0)\nroot.config(menu=menubar)\nfilemenu.add_command(label=\"Close\", command=close)\n\nmenubar.add_cascade(label=\"File\", menu=filemenu)","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"182708288","text":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------\n# Copyright (c) 2009 Jendrik Seipp\n#\n# RedNotebook is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# RedNotebook is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with RedNotebook; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n# -----------------------------------------------------------------------\n\nfrom __future__ import division\n\n\nclass Statistics(object):\n def __init__(self, journal):\n self.journal = journal\n\n def get_number_of_words(self):\n number_of_words = 0\n for day in self.days:\n number_of_words += day.get_number_of_words()\n return number_of_words\n\n def get_number_of_distinct_words(self):\n word_count_dict = self.journal.get_word_count_dict('word')\n number_of_distinct_words = len(word_count_dict)\n return number_of_distinct_words\n\n def get_number_of_chars(self):\n number_of_chars = 0\n for day in self.days:\n number_of_chars += len(day.text)\n return number_of_chars\n\n def get_number_of_usage_days(self):\n '''Returns the timespan between the first and last entry'''\n sorted_days = self.days\n if len(sorted_days) <= 1:\n return len(sorted_days)\n first_day = sorted_days[0]\n last_day = sorted_days[-1]\n timespan = last_day.date - first_day.date\n return abs(timespan.days) + 1\n\n def get_number_of_entries(self):\n return len(self.days)\n\n def get_edit_percentage(self):\n total = self.get_number_of_usage_days()\n edited = self.get_number_of_entries()\n if total == 0:\n return 0\n percent = round(100 * edited / total, 2)\n return '%s%%' % percent\n\n def get_average_number_of_words(self):\n if self.get_number_of_entries() == 0:\n return 0\n return round(self.get_number_of_words() / self.get_number_of_entries(), 2)\n\n def _get_html_row(self, key, value):\n return (''\n '| ' + key + ' | '\n ' ' + str(value) + ' | '\n '
')\n\n @property\n def overall_pairs(self):\n return [\n [_('Words'), self.get_number_of_words()],\n [_('Distinct Words'), self.get_number_of_distinct_words()],\n [_('Edited Days'), self.get_number_of_entries()],\n [_('Letters'), self.get_number_of_chars()],\n [_('Days between first and last Entry'), self.get_number_of_usage_days()],\n [_('Average number of Words'), self.get_average_number_of_words()],\n [_('Percentage of edited Days'), self.get_edit_percentage()],\n ]\n\n @property\n def day_pairs(self):\n day = self.journal.day\n return [\n [_('Words'), day.get_number_of_words()],\n [_('Lines'), len(day.text.splitlines())],\n [_('Letters'), len(day.text)],\n ]\n\n def get_stats_html(self):\n self.journal.save_old_day()\n page = '\\n'\n stats = self.pairs\n for key, value in stats:\n page += self._get_html_row(key, value)\n\n page += '
'\n return page\n\n def show_dialog(self, dialog):\n self.journal.save_old_day()\n self.days = self.journal.days\n\n day_store = dialog.day_list.get_model()\n day_store.clear()\n for pair in self.day_pairs:\n day_store.append(pair)\n\n overall_store = dialog.overall_list.get_model()\n overall_store.clear()\n for pair in self.overall_pairs:\n overall_store.append(pair)\n\n dialog.show_all()\n dialog.run()\n dialog.hide()\n","sub_path":"util/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"208334575","text":"from django.shortcuts import render,HttpResponse, HttpResponseRedirect, redirect\n\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserForm,CreatePostForm\nfrom django.contrib.auth.models import User\nfrom .models import Profile,Post\nfrom django.core.files.storage import FileSystemStorage\n\n\n\ndef index(request):\n\treturn render(request,'photoapp/home.html')\n\n\ndef signup(request):\n\tif request.method=='POST':\n\t\tform=UserForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/')\n\telse:\n\t\tform=UserForm()\n\targs={'form': form}\n\treturn render(request,'photoapp/signup.html',args)\n\n\ndef login_view(request):\n\tmessage='Log In'\n\tif request.method=='POST':\n\t\t_username=request.POST['username']\n\t\t_password=request.POST['password']\n\t\tuser=authenticate(username=_username,password=_password)\n\t\tif user is not None:\n\t\t\tif user.is_active:\n\t\t\t\tlogin(request,user)\n\t\t\t\treturn redirect('/')\n\t\t\telse:\n\t\t\t\tmessage='Not Activated'\n\t\telse:\n\t\t\tmessage='Invalid Login'\n\tcontext={'message':message}\n\treturn render(request,'photoapp/login.html',context)\n\n@login_required\ndef logout_view(request):\n\tlogout(request)\n\treturn HttpResponseRedirect('/')\n\n@login_required\ndef create_post(request):\n\tif request.method=='POST':\n\t\tform=CreatePostForm(request.POST,request.FILES)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\t\n\t\t\treturn redirect('/')\n\telse:\n\t\tform=CreatePostForm()\n\t\n \n\treturn render(request,'photoapp/postenter.html',{'form':form})\n\n\n","sub_path":"photoapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"231177397","text":"@mod.route('/csv-download/')\ndef csv_download(key):\n \"\"\" download file_name.csv\n :param key: unique to find file\n \"\"\"\n result = img_utils.get_picdb_value(key)\n item = CsvFileInfo.get_by_s3_key(key)\n\n if not item:\n return 'Not Found', 404\n # 修改http返回的http头部内容,页面中直接下载csv\n resp = make_response(result)\n resp.mimetype = 'application/octet-stream'\n resp.headers['Content-Disposition'] = \\\n 'attachment; filename=\"{0}\"'.format(item.file_name.encode('utf-8'))\n\n return resp\n","sub_path":"download_csv.py","file_name":"download_csv.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"274443846","text":"from flask import Flask, render_template, request, redirect, session\napp = Flask(__name__)\napp.secret_key = 'thisissecret'\nimport random\n\n\n@app.route('/')\ndef start():\n if 'totalgold' not in session:\n session['totalgold'] = 0\n if 'result' not in session:\n session['result'] = ' '\n return render_template(\"main.html\")\n\n@app.route('/process_money', methods=['POST'])\ndef display():\n if request.form['building'] == 'farm':\n num = random.randint(10,20)\n session['totalgold'] += num\n session['result'] += \"Earned \" + str(num) + \" gold from the farm! \"\n elif request.form['building'] == 'cave':\n num = random.randint(5,10)\n session['totalgold'] += num\n session['result'] += \"Earned \" + str(num) + \" gold from the cave! \"\n elif request.form['building'] == 'house':\n num = random.randint(2,5)\n session['totalgold'] += num\n session['result'] += \"Earned \" + str(num) + \" gold from the house! \"\n elif request.form['building'] == 'casino':\n chance = random.randint(1,3)\n if chance == 1:\n num = random.randint(0,51)\n session['totalgold'] += num\n session['result'] += \"Entered a casino and won \" + str(num) + \" gold, yay! \"\n elif chance == 2:\n num = random.randint(0,51)\n session['totalgold'] -= num\n session['result'] += \"Entered a casino and lost \" + str(num) + \" gold...Ouch... \"\n return redirect('/')\n\n@app.route('/reset')\ndef reset():\n session.clear()\n return redirect('/')\n\napp.run(debug=True)\n","sub_path":"john_lee/Flask Fundamentals/5-Ninja Gold/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"331916828","text":"from django.shortcuts import render_to_response\nfrom thxyew_note.models import Note, NoteForm\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\n\ndef index(request):\n all_notes = Note.objects.all().order_by('-pub_date')\n return render_to_response('index.html', {'all_notes': all_notes}, context_instance=RequestContext(request))\n \ndef write_note(request):\n form = NoteForm()\n\n variables = RequestContext(request, {\n 'form': form\n })\n\n return render_to_response('write-note.html', variables, context_instance=RequestContext(request))\n\ndef submit_note(request):\n if request.method == 'POST':\n form = NoteForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/')\n else:\n form = NoteForm() \n\n variables = RequestContext(request, {\n 'form': form\n })\n \n return render_to_response('write-note.html', variables, context_instance=RequestContext(request))\n\ndef single_note(request, pk):\n note = Note.objects.get(pk=pk)\n return render_to_response('note.html', {'single_note': note}, context_instance=RequestContext(request))\n ","sub_path":"thxyew/thxyew_note/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"565469697","text":"# algo pour calculer racine carrée par dichotomie\n# ~ complexité logarithmique\n\n\ndef racine(precision):\n \"\"\"\n Fonction qui permet de calculer la racine carrée de nb avec la\n précision donnée en paramètre\n Calcul par dichotomie\n \"\"\"\n nb = 2\n inf = 1\n sup = nb\n while sup - inf > precision:\n moyenne = (inf + sup) / 2\n if moyenne**2 <= nb:\n inf = moyenne\n else:\n sup = moyenne\n return sup\n\ndef heron(nb, approx, nb_iterations):\n racine = approx\n for _ in range(nb_iterations):\n racine = (racine + nb / racine) / 2\n return racine\n\ndef heron_recursif(nb, approx, nb_iterations):\n if nb_iterations == 0:\n return approx\n else:\n return heron(nb, (approx + nb/approx) / 2, nb_iterations - 1)\n\n","sub_path":"divers/heron.py","file_name":"heron.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"415160065","text":"\"\"\"\nPOST objects to test specific CTS workflows.\n\"\"\"\n\nchemspec_post = {\n\t\"chem_struct\":\"aspirin\",\n\t\"get_pka\":\"on\",\n\t\"pKa_decimals\":\"2\",\n\t\"pKa_pH_lower\":\"0\",\n\t\"pKa_pH_upper\":\"14\",\n\t\"pKa_pH_increment\":\"0.2\",\n\t\"pH_microspecies\":\"7.0\",\n\t\"isoelectricPoint_pH_increment\":\"0.5\",\n\t\"get_taut\":\"on\",\n\t\"tautomer_maxNoOfStructures\":\"100\",\n\t\"tautomer_pH\":\"7.0\",\n\t\"get_stereo\":\"on\",\n\t\"stereoisomers_maxNoOfStructures\":\"100\",\n\t\"chemical\":\"aspirin\",\n\t\"orig_smiles\":\"CC(=O)OC1=C(C=CC=C1)C(O)=O\",\n\t\"smiles\":\"CC(=O)OC1=C(C=CC=C1)C(O)=O\",\n\t\"preferredName\":\"Aspirin\",\n\t\"iupac\":\"2-(acetyloxy)benzoic acid\",\n\t\"formula\":\"C9H8O4\",\n\t\"casrn\":\"50-78-2\",\n\t\"cas\":\"50-78-2,11126-35-5,11126-37-7,2349-94-2,26914-13-6,98201-60-6\",\n\t\"dtxsid\":\"DTXSID5020108\",\n\t\"mass\":\"180.159\",\n\t\"exactmass\":\"180.042258738\"\n}\n\nmetabolizer_post = {\n \"structure\": \"CCCC\",\n \"generationLimit\": 1,\n \"transformationLibraries\": [\n\t\"hydrolysis\",\n\t\"abiotic_reduction\"\n ]\n}\n\nenvipath_post = {\n \"chemical\": \"CCCC\",\n \"gen_limit\": 1\n}\n\npchem_post = {\n \"chemical\": \"CCC\",\n \"calc\": \"chemaxon\",\n \"prop\": \"water_sol\"\n}\n\ndef get_post_object(workflow):\n\t\"\"\"\n\tReturns example POST object for a given workflow.\n\t\"\"\"\n\tif workflow == \"chemspec\":\n\t\treturn chemspec_post\n\n\telif workflow == \"metabolizer\":\n\t\treturn metabolizer_post\n\n\telif workflow == \"envipath\":\n\t\treturn envipath_post\n\n\telse:\n\t\tpchem_post[\"calc\"] = workflow \n\t\treturn pchem_post\n","sub_path":"tests/test_objects.py","file_name":"test_objects.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"206260899","text":"import pygame\nimport pygame.font\n\nclass ScoreBoard():\n \"\"\"Класс для вывода игровой информации.\"\"\"\n def __init__(self, ai_setting, screen_game, stats):\n self.ai_setting = ai_setting\n self.screen_rect = screen_game.get_rect()\n self.screen_game = screen_game\n self.stats = stats\n # Настройка шрифта для вывода на экран.\n self.text_color = (30, 30, 30)\n self.font = pygame.font.SysFont(None, 48)\n self.background_text = (0, 100, 100, 255)\n #self.background_text.set_colorkey(ai_setting.white)\n # Подготовка исходного изображения.\n self.prep_score()\n self.prep_numbers()\n \n \n def prep_score(self):\n # Преобразование текста в изображение.\n self.text_str_1 = str(self.stats.score)\n self.score_image = self.font.render(self.text_str_1, True, \n self.text_color, self.background_text)\n # Преобразование текста в прямоугольник.\n self.score_rect = self.score_image.get_rect()\n # Вывод счета в правой вехней части экрана.\n self.score_rect.right = self.screen_rect.right - 20\n self.score_rect.top = 20\n \n def prep_numbers(self):\n # Преобразование текста в изображение.\n self.text_str_2 = str(self.stats.numbers)\n self.numbers_image = self.font.render(self.text_str_2, True, \n self.text_color, self.background_text)\n # Преобразование текста в прямоугольник.\n self.numbers_rect = self.numbers_image.get_rect()\n # Вывод счета в левой вехней части экрана.\n self.numbers_rect.left = self.screen_rect.left + 20\n self.numbers_rect.top = 20\n \n def show_numbers(self):\n \"\"\"Выводит количество пиццы на экран.\"\"\"\n self.screen_game.blit(self.numbers_image, self.numbers_rect)\n \n def show_score(self):\n \"\"\"Выводит счет на экран.\"\"\"\n self.screen_game.blit(self.score_image, self.score_rect)\n\nclass ConfirmExit():\n def __init__(self, ai_setting, screen_game):\n self.ai_setting = ai_setting\n self.screen_game = screen_game\n # Параметры текста для вопроса подтверждения выхода.\n text = \"Вы хотите выйти из игры?\"\n self.help_text = pygame.font.SysFont('Serif', 35)\n self.text_screen = self.help_text.render(text, 1, (255, 255, 255))\n self.text_rect = self.text_screen.get_rect()\n self.text_rect.x = 140\n self.text_rect.y = 200\n \n def blitme_confirm(self):\n self.screen_game.blit(self.text_screen, self.text_rect)\n\nclass Help():\n def __init__(self, ai_setting, screen_game):\n self.ai_setting = ai_setting\n self.screen_game = screen_game\n # Параметры текста справки.\n self.help_text = pygame.font.SysFont('Serif', 25)\n self.text_screen = self.help_text.render(\"Справка по игре\", 1, (0, 0, 0))\n self.text_rect = self.text_screen.get_rect()\n self.text_rect.x = 220\n self.text_rect.y = 10\n \n def blitme_help(self):\n self.screen_game.blit(self.text_screen, self.text_rect)\n\nclass GameStats():\n def __init__(self, ai_setting):\n self.ai_setting = ai_setting\n self.reset_stats()\n self.pizzas_numbers()\n \n # Игра запускается в неактивном состоянии.\n self.game_active = False\n # Флаг перехода в раздел помощь.\n self.help_active = False\n # Флаг подтверждения выхода из игры.\n self.game_exit = False\n # Флаг ускорения пиццы.\n self.moving_speed = False\n # Флаги перемещения сковороды.\n self.moving_right = False\n self.moving_left = False\n \n def reset_stats(self):\n self.score = 0\n \n def pizzas_numbers(self):\n self.numbers = 5\n\n","sub_path":"Panic in the pizzeria/game_text.py","file_name":"game_text.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"355524868","text":"from django.contrib.auth import authenticate, login, logout, update_session_auth_hash\nfrom django.shortcuts import render, redirect\nfrom nickyc975.tools import md5\nfrom django.http import HttpResponse\nfrom blog.models import *\nfrom .models import *\n\n\n# Create your views here.\n\n# Global dictionaries\nblog_dict = {\"tech\": TechEssay, \"life\": LifeEssay, \"photo\": Photo}\nfile_dict = {\"tech\": TechEssays, \"life\": LifeEssays, \"photo\": Photos}\n\n\n# Login admin\ndef admin_login(request, check):\n if request.user.is_authenticated:\n return redirect(\"/myadmin/\")\n\n try:\n admin_check = request.session[\"admin_check\"]\n if admin_check != \"checked\":\n return render(request, \"blog/404.html\")\n except KeyError:\n if md5(check) != \"810e8045fd865643905db84436bad531\":\n return render(request, \"blog/404.html\")\n else:\n request.session[\"admin_check\"] = \"checked\"\n return redirect(\"/myadmin/login/done\")\n\n try:\n admin_name = request.POST[\"name\"]\n admin_passwd = request.POST[\"password\"]\n except KeyError:\n return render(request, \"myadmin/login.html\")\n\n admin_user = authenticate(request, username=admin_name, password=admin_passwd)\n if admin_user is not None:\n login(request, admin_user)\n return redirect(\"/myadmin/\")\n else:\n return render(request, \"myadmin/login.html\")\n\n\n# Index/Home page\ndef index(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n tech_essays = TechEssay.objects.all()\n life_essays = LifeEssay.objects.all()\n photos = Photo.objects.all()\n content = {\n \"tech_essays\": tech_essays,\n \"life_essays\": life_essays,\n \"photos\": photos\n }\n return render(request, \"myadmin/index.html\", content)\n\n\n# Logout admin\ndef admin_logout(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n del request.session[\"admin_check\"]\n except:\n pass\n logout(request)\n return redirect(\"/\")\n\n\n# Manage file page\ndef manage_files(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n tech_files = TechEssays.objects.all()\n life_files = LifeEssays.objects.all()\n photo_files = Photos.objects.all()\n content = {\n \"tech_files\": tech_files,\n \"life_files\": life_files,\n \"photo_files\": photo_files\n }\n return render(request, \"myadmin/managefiles.html\", content)\n\n\n# Upload a file\ndef upload_file(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n blog_type = request.POST[\"blog_type\"]\n blog_file = request.FILES[\"blog_file\"]\n except:\n return render(request, \"blog/404.html\")\n\n try:\n file_model = file_dict[blog_type](blog_file=blog_file)\n except KeyError:\n return HttpResponse(\"failed\")\n file_model.save()\n return HttpResponse(\"done\")\n\n\n# Delete file, this won't delete the file on the disk\ndef delete_file(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n file_id = request.POST[\"file_id\"]\n file_type = request.POST[\"file_type\"]\n except KeyError:\n return render(request, \"blog/404.html\")\n\n try:\n blog_file = file_dict[file_type].objects.get(id=file_id)\n except:\n return HttpResponse(\"no_file_err\")\n\n blog_file.delete()\n return HttpResponse(\"done\")\n\n\n# Change admin password\ndef change_passwd(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n old_passwd = request.POST[\"old_passwd\"]\n new_passwd = request.POST[\"new_passwd\"]\n except KeyError:\n return render(request, \"blog/404.html\")\n\n admin_user = authenticate(request, username=request.user.username, password=old_passwd)\n\n if admin_user is not None:\n admin_user.set_password(new_passwd)\n admin_user.save()\n update_session_auth_hash(request, admin_user)\n return HttpResponse(\"done\")\n else:\n return HttpResponse(\"pwd_err\")\n\n\n# Add blog\ndef add_blog(request, blog_type):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n blog_title = request.POST[\"blog_title\"]\n blog_author = request.POST[\"blog_author\"]\n blog_intro = request.POST[\"blog_intro\"]\n blog_content_url = request.POST[\"blog_content_url\"]\n blog_tags = request.POST[\"blog_tags\"].split(\";\")\n except:\n tags = Tag.objects.all()\n content = {\"tags\": tags, \"blog_type\": blog_type, \"title\": u\"添加博客\"}\n return render(request, \"myadmin/blogdetail.html\", content)\n\n try:\n blog_dict[blog_type].objects.get(title=blog_title)\n return HttpResponse(\"title_exist_err\")\n except KeyError:\n return HttpResponse(\"failed\")\n except:\n blog = blog_dict[blog_type]()\n blog.save()\n\n for blog_tag in blog_tags:\n try:\n tag = Tag.objects.get(name=blog_tag)\n except:\n return HttpResponse(blog_tag+\" not_exist\")\n blog.tags.add(tag)\n\n blog.title = blog_title\n blog.author = blog_author\n blog.intro = blog_intro\n blog.content_url = blog_content_url\n blog.save()\n return HttpResponse(\"done\")\n\n\n# Delete blog\ndef delete_blog(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n blog_type = request.POST[\"blog_type\"]\n blog_id = request.POST[\"blog_id\"]\n except KeyError:\n return render(request, \"blog/404.html\")\n\n try:\n blog = blog_dict[blog_type].objects.get(id=blog_id)\n except:\n return HttpResponse(\"no_blog_err\")\n\n blog.delete()\n return HttpResponse(\"done\")\n\n\n# Edit blog\ndef edit_blog(request, blog_type, blog_id):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n blog_title = request.POST[\"blog_title\"]\n blog_author = request.POST[\"blog_author\"]\n blog_intro = request.POST[\"blog_intro\"]\n blog_content_url = request.POST[\"blog_content_url\"]\n blog_tags = request.POST[\"blog_tags\"].split(\";\")\n except KeyError:\n try:\n tags = Tag.objects.all()\n blog = blog_dict[blog_type].objects.get(id=blog_id)\n content = {\"tags\": tags, \"blog_type\": blog_type, \"blog\": blog, \"title\": u\"编辑博客\"}\n return render(request, \"myadmin/blogdetail.html\", content)\n except:\n return HttpResponse(\"404\")\n\n try:\n blog = blog_dict[blog_type].objects.get(id=blog_id)\n except:\n return HttpResponse(\"failed\")\n\n if blog_title != blog.title:\n try:\n blog_dict[blog_type].objects.get(title=blog_title)\n return HttpResponse(\"title_exist_err\")\n except:\n blog.title = blog_title\n\n for blog_tag in blog_tags:\n try:\n tag = Tag.objects.get(name=blog_tag)\n except:\n return HttpResponse(blog_tag + \" not_exist\")\n\n if tag not in blog.tags.all():\n blog.tags.add(tag)\n\n for tag in blog.tags.all():\n if str(tag.name) not in blog_tags:\n blog.tags.remove(tag)\n\n blog.author = blog_author\n blog.intro = blog_intro\n blog.content_url = blog_content_url\n\n blog.save()\n return HttpResponse(\"done\")\n\n\ndef show_msgs(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n unread = Message.objects.filter(have_read=False)\n all_msg = Message.objects.all()\n unread_count = unread.count()\n\n return render(request, \"myadmin/messages.html\", {\"unread\": unread, \"all_msg\": all_msg, \"unread_count\": unread_count})\n\n\ndef delete_msg(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n msg_id = request.POST[\"msg_id\"]\n except:\n return HttpResponse(\"404\")\n\n try:\n msg = Message.objects.get(id=msg_id)\n except:\n return HttpResponse(\"id_err\")\n\n msg.delete()\n return HttpResponse(\"done\")\n\n\ndef edit_msg(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n msg_id = request.POST[\"msg_id\"]\n option = request.POST[\"option\"]\n except:\n return HttpResponse(\"404\")\n\n try:\n msg = Message.objects.get(id=msg_id)\n except:\n return HttpResponse(\"id_err\")\n\n if option == \"set_read\":\n msg.have_read = not msg.have_read\n elif option == \"set_public\":\n msg.public = not msg.public\n else:\n return HttpResponse(\"option_err\")\n\n msg.save()\n return HttpResponse(\"done\")\n\n\ndef show_tags(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n tags = Tag.objects.all()\n return render(request, \"myadmin/tags.html\", {\"tags\": tags})\n\n\ndef add_tag(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n tag_name = request.POST[\"tag_name\"]\n except KeyError:\n return render(request, \"blog/404.html\")\n\n try:\n Tag.objects.get(name=tag_name)\n return HttpResponse(\"tag_exist\")\n except:\n tag = Tag(name=tag_name)\n tag.save()\n return HttpResponse(\"done\")\n\n\ndef delete_tag(request):\n if not request.user.is_authenticated:\n return render(request, \"blog/404.html\")\n\n try:\n tag_id = request.POST[\"tag_id\"]\n except:\n return HttpResponse(\"404\")\n\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n return HttpResponse(\"no_tag_err\")\n\n tag.delete()\n return HttpResponse(\"done\")\n","sub_path":"myadmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"171394316","text":"from fraction import Fraction\nimport time,sys\ndef continued_frac(seq):\n cur_frac = Fraction(1,0)\n for i in reversed(seq):\n cur_frac = cur_frac.inverse()+i\n\n return cur_frac\n\ndef main():\n e_seq = [2]\n for i in range(1,eval(sys.argv[1])+1):\n e_seq+=[1,2*i,1]\n\n e = continued_frac(e_seq)\n print (sum(map(int,list(str(int(e.num))))))\n\n\nif __name__ == '__main__':\n start = time.time()\n main()\n print (\"Total time taken:\",time.time() - start,\"seconds\")\n","sub_path":"p65_e_continued.py","file_name":"p65_e_continued.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"550010902","text":"import re\nimport logging\n\n\nfrom model.dao.coach_dao import CoachDAO\n\nfrom exceptions import Error, InvalidData\n\n\nclass CoachController:\n\t\n\tdef __init__(self, database_engine):\n\t\tself._database_engine = database_engine\n\n\tdef create_coach(self, data):\n\t\ttry:\n\t\t\twith self._database_engine.new_session() as session:\n\t\t\t\tdao = CoachDAO(session)\n\t\t\t\tcoach = dao.create(data)\n\t\t\t\tcoach_data = coach.to_dict()\n\t\t\t\treturn coach_data\n\t\texcept Error as error:\n\t\t\traise error\n\n\tdef list_coaches(self, person_type=None):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tcoaches = CoachDAO(session).get_all()\n\t\t\tprint(coaches)\n\t\treturn coaches\n\n\tdef gather_informations(self, person_type = None) :\n\t\twith self._database_engine.new_session() as session :\n\t\t\tcoaches = CoachDAO(session).gather_all_informations()\n\t\treturn coaches\n\t\n\tdef get_coach(self, coach_id):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tcoach = CoachDAO(session).get(coach_id)\n\t\t\tcoach_data = coach.to_dict()\n\t\treturn coach_data\n\n\tdef update_coach(self, coach_id, coach_data):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tdao = CoachDAO(session)\n\t\t\tcoach = dao.get(coach_id)\n\t\t\tcoach = dao.update(coach, coach_data)\n\t\t\treturn coach.to_dict()\n\n\tdef delete_coach(self, coach_id):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tdao = CoachDAO(session)\n\t\t\tcoach = dao.get(coach_id)\n\t\t\tdao.delete(coach)\n\n\tdef search_coach(self, firstname, lastname):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tdao = coachDao(session)\n\t\t\tcoach = dao.get_by_name(firstname, lastname)\n\t\t\treturn coach.to_dict()","sub_path":"GLPOO_Project/controller/coach_controller.py","file_name":"coach_controller.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"3594234","text":"load(\"//aspects:utils/java_utils.bzl\", \"get_java_provider\")\nload(\"//aspects:utils/utils.bzl\", \"create_struct\", \"file_location\", \"is_external\", \"map\", \"to_file_location\", \"update_sync_output_groups\")\n\ndef map_with_resolve_files(f, xs):\n results = []\n resolve_files = []\n\n for x in xs:\n if x != None:\n res = f(x)\n if res != None:\n a, b = res\n if a != None:\n results.append(a)\n if b != None:\n resolve_files += b\n\n return results, resolve_files\n\ndef get_interface_jars(output):\n if hasattr(output, \"compile_jar\") and output.compile_jar:\n return [output.compile_jar]\n elif hasattr(output, \"ijar\") and output.ijar:\n return [output.ijar]\n else:\n return []\n\ndef get_source_jars(output):\n if hasattr(output, \"source_jars\"):\n return output.source_jars\n if hasattr(output, \"source_jar\"):\n return [output.source_jar]\n return []\n\ndef get_generated_jars(provider):\n if hasattr(provider, \"java_outputs\"):\n return map_with_resolve_files(to_generated_jvm_outputs, provider.java_outputs)\n\n if hasattr(provider, \"annotation_processing\") and provider.annotation_processing and provider.annotation_processing.enabled:\n class_jar = provider.annotation_processing.class_jar\n source_jar = provider.annotation_processing.source_jar\n output = struct(\n binary_jars = [file_location(class_jar)],\n source_jars = [file_location(source_jar)],\n )\n resolve_files = [class_jar, source_jar]\n return [output], resolve_files\n\n return [], []\n\ndef to_generated_jvm_outputs(output):\n if output == None or output.generated_class_jar == None:\n return None\n\n class_jar = output.generated_class_jar\n source_jar = output.generated_source_jar\n\n output = struct(\n binary_jars = [file_location(class_jar)],\n source_jars = [file_location(source_jar)],\n )\n resolve_files = [class_jar, source_jar]\n return output, resolve_files\n\ndef to_jvm_outputs(output):\n if output == None or output.class_jar == None:\n return None\n\n binary_jars = [output.class_jar]\n interface_jars = get_interface_jars(output)\n source_jars = get_source_jars(output)\n output = struct(\n binary_jars = map(file_location, binary_jars),\n interface_jars = map(file_location, interface_jars),\n source_jars = map(file_location, source_jars),\n )\n resolve_files = binary_jars + interface_jars + source_jars\n return output, resolve_files\n\ndef extract_runtime_jars(target, provider):\n compilation_info = getattr(provider, \"compilation_info\", None)\n\n if compilation_info:\n return compilation_info.runtime_classpath\n\n return getattr(provider, \"transitive_runtime_jars\", target[JavaInfo].transitive_runtime_jars)\n\ndef extract_compile_jars(provider):\n compilation_info = getattr(provider, \"compilation_info\", None)\n transitive_compile_time_jars = getattr(provider, \"transitive_compile_time_jars\", depset())\n\n return compilation_info.compilation_classpath if compilation_info else transitive_compile_time_jars\n\ndef extract_java_info(target, ctx, output_groups):\n provider = get_java_provider(target)\n if not provider:\n return None\n\n if hasattr(provider, \"java_outputs\") and provider.java_outputs:\n java_outputs = provider.java_outputs\n elif hasattr(provider, \"outputs\") and provider.outputs:\n java_outputs = provider.outputs.jars\n else:\n return None\n\n resolve_files = []\n\n jars, resolve_files_jars = map_with_resolve_files(to_jvm_outputs, java_outputs)\n resolve_files += resolve_files_jars\n\n generated_jars, resolve_files_generated_jars = get_generated_jars(provider)\n resolve_files += resolve_files_generated_jars\n\n runtime_jars = extract_runtime_jars(target, provider).to_list()\n compile_jars = extract_compile_jars(provider).to_list()\n source_jars = getattr(provider, \"transitive_source_jars\", depset()).to_list()\n resolve_files += runtime_jars\n resolve_files += compile_jars\n resolve_files += source_jars\n\n runtime_classpath = map(file_location, runtime_jars)\n compile_classpath = map(file_location, compile_jars)\n source_classpath = map(file_location, source_jars)\n\n javac_opts = getattr(ctx.rule.attr, \"javacopts\", [])\n jvm_flags = getattr(ctx.rule.attr, \"jvm_flags\", [])\n args = getattr(ctx.rule.attr, \"args\", [])\n main_class = getattr(ctx.rule.attr, \"main_class\", None)\n\n if (is_external(target)):\n update_sync_output_groups(output_groups, \"external-deps-resolve\", depset(resolve_files))\n\n return create_struct(\n jars = jars,\n generated_jars = generated_jars,\n runtime_classpath = runtime_classpath,\n compile_classpath = compile_classpath,\n source_classpath = source_classpath,\n javac_opts = javac_opts,\n jvm_flags = jvm_flags,\n main_class = main_class,\n args = args,\n )\n\ndef extract_java_toolchain(target, ctx, dep_targets):\n toolchain = None\n\n if hasattr(target, \"java_toolchain\"):\n toolchain = target.java_toolchain\n elif java_common.JavaToolchainInfo != platform_common.ToolchainInfo and \\\n java_common.JavaToolchainInfo in target:\n toolchain = target[java_common.JavaToolchainInfo]\n\n toolchain_info = None\n if toolchain != None:\n java_home = to_file_location(toolchain.java_runtime.java_home, \"\", False, True) if hasattr(toolchain, \"java_runtime\") else None\n toolchain_info = create_struct(\n source_version = toolchain.source_version,\n target_version = toolchain.target_version,\n java_home = java_home,\n )\n else:\n for dep in dep_targets:\n if hasattr(dep.bsp_info, \"java_toolchain_info\"):\n toolchain_info = dep.bsp_info.java_toolchain_info\n break\n\n if toolchain_info != None:\n return toolchain_info, dict(java_toolchain_info = toolchain_info)\n else:\n return None, dict()\n\nJAVA_RUNTIME_TOOLCHAIN_TYPE = \"@bazel_tools//tools/jdk:runtime_toolchain_type\"\n\ndef extract_java_runtime(target, ctx, dep_targets):\n runtime = None\n\n if java_common.JavaRuntimeInfo in target: # Bazel 5.4.0 way\n runtime = target[java_common.JavaRuntimeInfo]\n elif JAVA_RUNTIME_TOOLCHAIN_TYPE in ctx.toolchains: # Bazel 6.0.0 way\n runtime = ctx.toolchains[JAVA_RUNTIME_TOOLCHAIN_TYPE].java_runtime\n else:\n runtime_jdk = getattr(ctx.rule.attr, \"runtime_jdk\", None)\n if runtime_jdk and java_common.JavaRuntimeInfo in runtime_jdk:\n runtime = runtime_jdk[java_common.JavaRuntimeInfo]\n\n runtime_info = None\n if runtime != None:\n java_home = to_file_location(runtime.java_home, \"\", False, True) if hasattr(runtime, \"java_home\") else None\n runtime_info = create_struct(java_home = java_home)\n else:\n for dep in dep_targets:\n if hasattr(dep.bsp_info, \"java_runtime_info\"):\n runtime_info = dep.bsp_info.java_runtime_info\n break\n\n if runtime_info != None:\n return runtime_info, dict(java_runtime_info = runtime_info)\n else:\n return None, dict()\n","sub_path":"aspects/rules/java/java_info.bzl","file_name":"java_info.bzl","file_ext":"bzl","file_size_in_byte":7278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"437457032","text":"# coding: utf-8\r\n\r\nimport sys\r\nimport time\r\nimport requests\r\nimport socket\r\nimport threading\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef is_logined(session):\r\n url = 'http://www.nicovideo.jp/'\r\n res = session.get(url)\r\n\r\n return res.headers['x-niconico-authflag'] == '1'\r\n\r\ndef fetch_live_info(session, live_id):\r\n url = 'http://live.nicovideo.jp/api/getplayerstatus/{}'.format(live_id)\r\n\r\n res = session.get(url)\r\n soup = BeautifulSoup(res.text, 'lxml')\r\n\r\n try:\r\n addr = soup.addr.string\r\n port = int(soup.port.string)\r\n thread = int(soup.thread.string)\r\n\r\n return addr, port, thread\r\n\r\n except:\r\n print('failed to fetch live info')\r\n sys.exit()\r\n\r\ndef make_socket(addr, port, thread):\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((addr, port))\r\n\r\n xml = '\\0'.format(thread)\r\n s.sendall(xml.encode('UTF-8'))\r\n\r\n return s\r\n\r\ndef is_exists_room(addr, thread, port):\r\n s = make_socket(addr, port, thread)\r\n\r\n try:\r\n BeautifulSoup(s.recv(1024), 'lxml').chat.string\r\n return True\r\n except:\r\n return False\r\n\r\n# for debug\r\ndef search_thread_port(addr, base_thread):\r\n thread_range = range(base_thread - 2, base_thread + 5)\r\n port_range = range(2805, 2883)\r\n\r\n for thread in thread_range:\r\n for port in port_range:\r\n if is_exists_room(addr, thread, port):\r\n print(thread, port)\r\n\r\ndef seek_adjacent_room(addr, thread, port, cursor, is_community=False):\r\n ########## port memo ##########\r\n # community 2805 - 2854 step 10\r\n # channel 2805 - 2882 step 13\r\n # official 2805 - 2882 step 13\r\n ###############################\r\n\r\n port_min = 2805\r\n port_max = 2854 if is_community else 2882\r\n step = 10 if is_community else 13\r\n\r\n rooms = []\r\n\r\n while is_exists_room(addr, thread, port):\r\n rooms.append({'addr': addr, 'thread': thread, 'port': port})\r\n\r\n thread += cursor\r\n port = port_min + (((port % port_min) + (step * cursor)) % (port_max - port_min))\r\n\r\n # 先頭は引数で受け取った部屋なので消去\r\n if rooms:\r\n del rooms[0]\r\n\r\n if cursor == -1:\r\n rooms.reverse()\r\n\r\n return rooms\r\n\r\ndef fetch_rooms(session, live_id):\r\n print('fetching live info...')\r\n addr, port, thread = fetch_live_info(session, live_id)\r\n\r\n print('seeking rooms...')\r\n rooms = []\r\n rooms.extend(seek_adjacent_room(addr, thread, port, -1))\r\n rooms.extend(seek_adjacent_room(addr, thread, port, -1, is_community=True))\r\n rooms.append({'addr': addr, 'thread': thread, 'port': port})\r\n rooms.extend(seek_adjacent_room(addr, thread, port, 1))\r\n rooms.extend(seek_adjacent_room(addr, thread, port, 1, is_community=True))\r\n\r\n # print(rooms)\r\n\r\n return rooms\r\n\r\ndef connect(index, room, listener):\r\n s = make_socket(room['addr'], room['port'], room['thread'])\r\n\r\n handle_thread = threading.Thread(\r\n target=receiver,\r\n args=(index, s, listener),\r\n daemon=True\r\n )\r\n\r\n handle_thread.start()\r\n\r\nmemo = set()\r\n\r\ndef receiver(index, s, listener):\r\n while True:\r\n try:\r\n data = s.recv(1024)\r\n\r\n soup = BeautifulSoup(data, 'lxml', from_encoding='UTF-8')\r\n comment = soup.chat.string\r\n\r\n key = soup.chat.get('user_id') + comment\r\n if key in memo:\r\n continue\r\n memo.add(key)\r\n\r\n # /disconnect\r\n\r\n listener(index, comment)\r\n except:\r\n pass\r\n\r\ndef on_comment(room_id, comment):\r\n print('ROOM[{:0>2}] {}'.format(room_id, comment))\r\n\r\nclass NicoLive:\r\n def __init__(self, mail, password):\r\n self.session = requests.session()\r\n\r\n self.mail = mail\r\n self.password = password\r\n self.live_id = None\r\n\r\n self.login()\r\n\r\n def login(self):\r\n url = 'https://account.nicovideo.jp/api/v1/login'\r\n\r\n params = {\r\n 'mail_tel': self.mail,\r\n 'password': self.password,\r\n }\r\n\r\n self.session.post(url, params=params)\r\n\r\n if not is_logined(self.session):\r\n print('failed to login.')\r\n sys.exit()\r\n\r\n def execute(self, live_id, listener=on_comment):\r\n rooms = fetch_rooms(self.session, live_id)\r\n\r\n for index, room in enumerate(rooms):\r\n connect(index, room, listener)\r\n\r\n while True:\r\n # 10秒間隔でmemoをリセットし続ける\r\n time.sleep(10)\r\n memo.clear()","sub_path":"nicolive.py","file_name":"nicolive.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"653259107","text":"import MySQLdb\nimport json\nimport urllib\nimport csv\n\nimport pymysql\n#pymysql.install_as_MySQLdb()\n\n# Create a Cursor object to execute queries.\n\n#get settings\ndef read_params(fn): \n d ={} \n try:\n with open(fn, 'r',encoding=\"utf-8\") as file: \n d = json.load(file) \n except FileNotFoundError:\n print (\"Error. Can't find file \" + fn)\n d = {}\n return d \n\n\n\ndef ifNoneNull(s):\n if s is None:\n return 'NULL'\n return str(s)\n\nclass database:\n def __init__(self, host_, user_, passwd_, db_):\n self.host = host_\n self.user = user_\n self.passwd = passwd_\n self.db_ = db_\n self.db = MySQLdb.connect(host=host_, # your host \n user=user_, # username\n passwd=passwd_, # password\n db=db_\n \n\n ) # name of the database\n self.db.set_character_set('utf8mb4')\n \n def __repr__(self):\n return \"DB(host='%s', name='%s')\" % (self.host, self.name)\n \n def __str__(self):\n return \"<%s dv named %s>\" % (self.host, self.name)\n \n def get_dbs_info(self):\n cur = self.db.cursor()\n query = 'SHOW DATABASES;'\n cur.execute(query) \n\n def create_table(self, dic, table_name):\n cur = self.db.cursor()\n query = 'CREATE TABLE IF NOT EXISTS ' + table_name + \" (\"\n length = len(dic)\n i = 0\n for value in dic:\n query += value + \" \" + dic[value] \n if i != length - 1:\n query += \", \"\n else:\n query += \") \"\n i += 1\n print(query)\n cur.execute(query)\n self.db.commit() \n \n\n def insertDataIntoTableFromCSV(self, name, table_name, key_field = None):\n #csvfile = requests.get(name).csv\n csvfile = open(name)\n reader = csv.DictReader(csvfile)\n for row in reader: \n print(row) \n if key_field != None:\n if self.check_if_exists(table_name, key_field, row[key_field]) == -1:\n self.insert_values(table_name, row.keys(), row.values())\n else:\n return False \n else:\n self.insert_values(table_name, row.keys(), row.values()) \n return True\n\n def getDictFromQueryRes(self, table_name, condition = None, result_fields = None):\n #csvfile = requests.get(name).csv\n cur = self.db.cursor(MySQLdb.cursors.DictCursor)\n\n query = \"SELECT \"\n if result_fields == None:\n query += \"*\"\n else:\n for r in result_fields:\n query += r + \",\"\n query += \"+\"\n query = query.strip().replace(\",+\", \"\")\n query += \" FROM \" + table_name\n if condition != None:\n query += \" WHERE \"\n for q in condition:\n query += \" \" + str(q) + \" = \" + str(condition[q]) + \" AND\"\n query += \"=\"\n query = query.strip().replace(\"AND=\", \"\")\n print(query)\n cur.execute(query)\n return list (cur.fetchall())\n \n def getDictFromQueryText(self, query = None, condition = None):\n #csvfile = requests.get(name).csv\n cur = self.db.cursor(MySQLdb.cursors.DictCursor)\n if query == None:\n query = \"select distinct cast(replace(replace(g.minute, 'PEN', ''), 'OG', '') as signed) as minute, g.minute as goal_time, p.name as player_name, f.datetime, f.stadium, f.city, f.group_name, f.teamHome, f.shortHomeTeam, f.shortAwayTeam, f.teamAway, p.number, p.image, p.position, p.birthdate from goals g join games_fifa_all f on g.matchId = f.match_fifa_id join players p on p.playerId = g.playerId \"\n \n if condition != None:\n query += \" WHERE \"\n for q in condition:\n query += \" \" + str(q) + \" = \" + str(condition[q]) + \" AND\"\n query += \"=\"\n query = query.strip().replace(\"AND=\", \"\")\n query += \" order by datetime, minute \"\n print(query)\n cur.execute(query)\n return list (cur.fetchall())\n \n \n \n def updateTableFromConditions(self, table_name, condition = None, update_fields = None):\n #csvfile = requests.get(name).csv\n cur = self.db.cursor(MySQLdb.cursors.DictCursor)\n \n query = \"UPDATE \" + table_name\n if update_fields == None:\n print(\"No update fields\")\n return\n else:\n query += \" SET \"\n for r in update_fields:\n s = str(r) + \" = '\" + str(ifNoneNull(update_fields[r])) + \"',\"\n if r not in [\"date\", \"status\"]:\n s = s.strip().replace(\"'\", \"\")\n query += s \n query += \"+\"\n query = query.strip().replace(\",+\", \"\")\n if condition != None:\n query += \" WHERE \"\n for q in condition:\n qr = \" \" + str(q) + \" = '\" + str(condition[q]) + \"' AND \"\n if q not in [\"date\", \"status\"]:\n qr = qr.replace(\"'\", \"\")\n query += qr \n query += \"=\"\n query = query.strip().replace(\"AND =\", \"\")\n print(query)\n cur.execute(query)\n self.db.commit() \n \n def insert_values (self, table_name, headers, values):\n cur = self.db.cursor()\n size = len(headers)\n if size != len(values):\n return -1\n query = 'INSERT INTO ' + table_name + \" (\"\n last = size\n i = 0\n for header in headers:\n i += 1\n if i != last:\n query += header + \",\"\n else:\n query = query + header + \")\"\n query += \" VALUES (\"\n i = 0\n for value in values:\n i += 1\n if value == None or value == '':\n value = 'NULL' \n if i != last: \n if value != 'NULL': \n query = query + \"'\" + str(value).replace(\"'\", \"\") + \"',\"\n else:\n query = query + str(value).replace(\"'\", \"\") + \",\"\n else:\n if value != 'NULL': \n query = query + \"'\" + str(value).replace(\"'\", \"\") + \"')\"\n else:\n query = query + str(value).replace(\"'\", \"\") + \")\"\n print(query)\n cur.execute(query)\n self.db.commit() \n\n def show_top(self, table_name, count):\n cur = self.db.cursor()\n query = 'SELECT * FROM ' + table_name + ' limit ' + str(count) \n cur.execute(query)\n for row in cur.fetchall() :\n for field in row:\n print (field, end= \" | \")\n print(\"\\n\") \n \n def check_if_exists(self, table_name, column, value):\n res = -1\n cur = self.db.cursor()\n query = 'SELECT id FROM ' + table_name + ' WHERE '+ column + \" = '\" + str(value).replace(\"'\", \"\") + \"'\"\n print(query)\n cur.execute(query)\n for row in cur.fetchall() :\n for field in row:\n return 1\n return res\n \n def delete_elem(self, table_name, column, value):\n res = -1\n cur = self.db.cursor()\n query = 'DELETE FROM ' + table_name + ' WHERE '+ column + \" = '\" + str(value).replace(\"'\", \"\") + \"'\"\n print(query)\n cur.execute(query)\n self.db.commit() \n\n \n def search_by_values(self, table_name, column, value, precise = False):\n res = -1\n cur = self.db.cursor()\n cur.execute(query)\n query = 'SELECT * FROM ' + table_name + ' WHERE '+ column + \" = '\" + str(value).replace(\"'\", \"\") + \"'\"\n for row in cur.fetchall() :\n for field in row:\n return 1\n print() \n return res\n self.db.commit() \n \n \n def get_count(self, table_name):\n cur = self.db.cursor()\n query = 'SELECT COUNT(*) FROM ' + table_name \n cur.execute(query)\n for row in cur.fetchall() :\n for field in row:\n print (field, end = \" \")\n print() \n self.db.commit()\n \n def drop_table(self, table_name):\n cur = self.db.cursor()\n query = 'DROP TABLE ' + table_name \n cur.execute(query)\n self.db.commit() \n\n\n#settings = read_params(\"settings2.json\")\n#print(settings['sql_host'])\n#url = 'mysql://b1df3776b2b56c:8b4b450a@us-cdbr-iron-east-04.cleardb.net/heroku_0c1d0ea4e380413?reconnect=true'\n#result= urllib.urlparse(url)\n\n#db = database(settings['sql_host'], settings['sql_user'], settings['sql_passwd'], settings['sql_db']) \n\n\n#dict_groups = {\"stadium\":\"text\",\"city\":\"text\",\"homeTeam\":\"text\",\"awayTeam\":\"text\",\"shortHomeTeam\":\"text\",\"shortAwayTeam\":\"text\"} \n#db.create_table(dict_groups, \"games_fifa\")\n#db.insertDataIntoTableFromCSV(\"games_wc.csv\", \"games_fifa\")\n\n\n#print(db.getDictFromQueryRes(\"teams_wc\"))\n#dict_teams = {\"id\":\"int\", \"name\":\"text\",\"shortName\":\"text\", \"crestUrl\":\"text\", \"squadMarketValue\":\"text\"}\n#dict_teams = #{\"goalsAgainst\":\"int\",\"points\":\"int\",\"goals\":\"int\",\"teamId\":\"int\",\"crestURI\":\"text\",\"rank\":\"int\",\"team\":\"text\",\"playedGames\":\"int\",\"group\":\"text\",\"goalDifference\":\"int\"}\n#dict_games = {\"competitionId\":\"int\", \"date\":\"text\" ,\"status\":\"text\", \"homeTeamId\":\"text\", \"awayTeamId\":\"text\", \"goalsHomeTeam\":\"text\", \"goalsAwayTeam\":\"text\", \"CityId\":\"int\"}\n#db.create_table(dict_games, \"games\")\n#db.insertDataIntoTableFromCSV(\"matches2.csv\", \"games\")\n#dict_tournaments = {\"lastUpdated\":\"text\",\"numberOfTeams\":\"int\",\"league\":\"text\",\"caption\":\"text\",\"id\":\"int\",\"year\":\"int\",\"numberOfGames\":\"int\",\"numberOfMatchdays\":\"int\",\"currentMatchday\":\"int\"}\n#db.create_table(dict_tournaments, \"tournaments\")\n#db.insertDataIntoTableFromCSV(\"tournaments.csv\", \"tournaments\")\n#dict_places = {\"id\":\"int\",\"short_name\":\"text\",\"stadium\":\"text\",\"capacity\":\"int\",\"city\":\"text\"} \n#db.create_table(dict_places, \"places\")\n#db.insertDataIntoTableFromCSV(\"places.csv\", \"places\")\n\n\n#dict_rounds = {\"id\":\"int\", \"competitionId\":\"int\", \"title\":\"text\", \"start_at\":\"text\", \"end_at\":\"text\"} \n#db.create_table(dict_rounds, \"rounds\")\n#db.insertDataIntoTableFromCSV(\"rounds.csv\", \"rounds\")\n\n#dict_groups = {\"id\":\"int\", \"title\":\"text\", \"competitionId\":\"text\"} \n#db.create_table(dict_groups, \"groups\")\n#db.insertDataIntoTableFromCSV(\"groups.csv\", \"groups\")\n\n#dict_stand = #{\"goalsAgainst\":\"int\",\"points\":\"int\",\"goals\":\"int\",\"teamId\":\"int\",\"crestURI\":\"text\",\"rank\":\"int\",\"team\":\"text\",\"playedGames\":\"int\",\"group_\":\"text\",\"goalDifference\":\"int\", \"CompetitionId\":\"int\"}\n#db.create_table(dict_stand, \"standings\")\n#db.insertDataIntoTableFromCSV(\"standings.csv\", \"standings\")\n\n\n#competitionId,date,status,homeTeamId,awayTeamId,goalsHomeTeam,goalsAwayTeam\n#378,1930-07-12T23:00:00Z,FINISHED,771,805,3,0\n\n\n#dict_stages = {\"id\":\"int\",\"competitionId\":\"int\",\"title\":\"text\"}\n#db.create_table(dict_stages, \"stages\")\n#db.insertDataIntoTableFromCSV(\"stages.csv\", \"stages\", \"id\")\n\n\n\n\n#db.check_if_exists(\"PROFILES\", \"id\", \"6adbb44d-be67-4e59-b8db-84982829a370\")\n#db.drop_table(\"PROFILES\")\n#db.insert_values(\"PROFILES\", (\"id\", \"name\", \"bio\"), (\"3\", \"al\", \"5j5j5k\"))\n#db.insert_values(\"PROFILES\", (\"id\", \"name\", \"bio\"), (\"2\", \"al2\", \"rj4j\"))\n#db.show_top(\"PROFILES\", 100)\n#db.get_count(\"PROFILES\")\n \n\n\n\n\n\n\n\n\n\n","sub_path":"euro-stat/FlaskApp/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":11360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"273638573","text":"# Groundwater Modeling Coding Assignment #2\n# Jim Finnegan\n# 1D Transport Equation\n# comparison of methods\n\nfrom methods import finite_difference, finite_element, analytical\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n# compare results for the following D-values\nD = [0.1, 1]\nfor i in D:\n # calculate results\n c_fd = finite_difference(i, 1)\n c_fe = finite_element(i, 1)\n c_an = analytical(i)\n c_an = np.array(c_an, dtype=np.float)\n\n # compare results\n fe_diff = np.subtract(c_fe[8, :], c_an[8, :])\n fd_diff = np.subtract(c_fd[8, :], c_an[8, :])\n\n x = np.linspace(0, 200, num=101)\n plt.plot(x, fe_diff, fd_diff)\n title_string = 'Solution comparison - difference from analytical\\n' \\\n + 'D = ' + str(i) + ', R = 1' + ', t = 400 days'\n plt.title(title_string)\n plt.xlabel('distance (m)')\n plt.ylabel('difference in C/C0')\n plt.legend(['FE', 'FD'])\n plt.show()","sub_path":"comparison200d.py","file_name":"comparison200d.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"217125611","text":"from __future__ import division\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport re\nfrom legacy.flexibility_model.mpc_district import planning_and_operation_preprocess_network\n\n__author__ = \"Sebastian Troitzsch\"\n__copyright__ = \"Copyright 2019, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Sebastian Troitzsch\", \"Sreepathi Bhargava Krishna\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"thomas@arch.ethz.ch\"\n__status__ = \"Production\"\n\n# Plotting settings\nplot_all_lines_on_streets = 0 # Plots all possible lines, even if not utilised\nplot_colors = [\n '#ffc130',\n '#ebbb53',\n '#d7b56d',\n '#beb086',\n '#a3aa9c',\n '#82a5b2',\n '#519fc7',\n '#5c92c7',\n '#6984c5',\n '#7276c3',\n '#7969c1',\n '#7f58be',\n '#8447bc']*100\n\nfont = {\n 'family': 'Arial',\n 'weight': 'regular',\n 'size': 13\n}\nmatplotlib.rc('font', **font)\nmarker_size = 6\n\n\ndef initial_network(locator):\n planning_and_operation_preprocess_network.calc_substation_location(locator)\n df_nodes, tranches = planning_and_operation_preprocess_network.connect_building_to_grid(locator)\n df_nodes_processed = planning_and_operation_preprocess_network.process_network(locator, df_nodes)\n (\n dict_length,\n dict_path\n ) = planning_and_operation_preprocess_network.create_length_dict(\n df_nodes_processed,\n tranches\n )\n\n return (\n df_nodes_processed,\n tranches,\n dict_length,\n dict_path\n )\n\n\ndef plot_nodes(\n df_nodes,\n ax\n):\n # Plot Nodes\n for idx, point in df_nodes.iterrows():\n name = str(point['Name'][4::])\n\n if point['Type'] == 'PLANT':\n ax.plot(point.geometry.xy[0], point.geometry.xy[1], marker='s', color='red', markersize=marker_size)\n # ax.text(point.geometry.xy[0][0], point.geometry.xy[1][0], name, fontsize=8)\n elif point['Type'] == 'CONSUMER':\n ax.plot(point.geometry.xy[0], point.geometry.xy[1], marker='o', color='green', markersize=marker_size)\n # ax.text(point.geometry.xy[0][0], point.geometry.xy[1][0], name, fontsize=8)\n # else:\n # ax.plot(point.geometry.xy[0], point.geometry.xy[1], marker='o', color='blue', markersize=marker_size)\n # # ax.text(point.geometry.xy[0][0], point.geometry.xy[1][0], name, fontsize=8)\n\n return ax\n\n\ndef plot_lines_on_street(\n var_x,\n dict_path,\n df_nodes,\n ax,\n):\n for x in var_x:\n if x.value > 0.5 or plot_all_lines_on_streets:\n node_int = re.findall(r'\\d+', x.local_name)\n\n start_node = int(node_int[0])\n end_node = int(node_int[1])\n\n list_path = dict_path[start_node][end_node]\n\n for idx_path, path in enumerate(list_path[:-1]):\n int_node1 = list_path[idx_path]\n int_node2 = list_path[idx_path + 1]\n\n geo_node1 = df_nodes.loc[int_node1].geometry.xy\n geo_node2 = df_nodes.loc[int_node2].geometry.xy\n\n if plot_all_lines_on_streets:\n edge_color = 'black'\n else:\n if int(node_int[2]) < len(plot_colors):\n edge_color = plot_colors[int(node_int[2])]\n else:\n edge_color = 'black'\n\n ax.plot(\n (geo_node1[0][0], geo_node2[0][0]),\n (geo_node1[1][0], geo_node2[1][0]),\n color=edge_color\n )\n\n return ax\n\n\ndef plot_lines(\n var_x,\n df_nodes,\n ax\n):\n for x in var_x:\n if x.value > 0.5:\n node_int = re.findall(r'\\d+', x.local_name)\n\n int_node1 = int(node_int[0])\n int_node2 = int(node_int[1])\n\n geo_node1 = df_nodes.loc[int_node1].geometry.xy\n geo_node2 = df_nodes.loc[int_node2].geometry.xy\n\n if int(node_int[2]) < len(plot_colors):\n edge_color = plot_colors[int(node_int[2])]\n else:\n edge_color = 'black'\n\n ax.plot(\n (geo_node1[0][0], geo_node2[0][0]),\n (geo_node1[1][0], geo_node2[1][0]),\n color=edge_color\n )\n\n return ax\n\n\ndef plot_network_on_street(locator, m):\n (\n df_nodes,\n tranches,\n dict_length,\n dict_path\n ) = initial_network(locator)\n\n var_x = m.var_x.values()\n\n # Plotting Graph\n (fig, ax) = plt.subplots(1, 1)\n\n ax.axis('auto')\n ax.set_aspect('equal')\n ax.set_axis_off()\n\n ax = plot_lines_on_street(\n var_x,\n dict_path,\n df_nodes,\n ax\n )\n\n # Plotting Buildings\n (\n building_points,\n building_poly\n ) = planning_and_operation_preprocess_network.calc_substation_location(locator)\n building_poly.plot(ax=ax, color='white', edgecolor='grey')\n for x, y, name in zip(building_points.geometry.x, building_points.geometry.y, building_points['Name']):\n ax.text(x, y, name, fontsize=8, horizontalalignment='center')\n\n # Plotting Nodes\n ax = plot_nodes(df_nodes, ax)\n plt.tight_layout()\n\n # Get legend entries\n legend_items = [\n matplotlib.lines.Line2D(\n [], [], linestyle='', marker='s', color='red', markersize=marker_size, label='Substation\\nconnection'\n ),\n matplotlib.lines.Line2D(\n [], [], linestyle='', marker='o', color='green', markersize=marker_size, label='Building\\nconnection'\n ),\n # matplotlib.lines.Line2D(\n # [], [], linestyle='', marker='o', color='blue', markersize=marker_size, label='Street\\nintersection'\n # )\n ]\n for line_type in m.set_linetypes:\n legend_items.append(\n matplotlib.lines.Line2D(\n [], [], color=plot_colors[int(line_type)], label='Linetype {}'.format(line_type)\n )\n )\n\n # Add legend\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n plt.legend(handles=legend_items, loc='center', bbox_to_anchor=(1.1, 0.45))\n\n return fig\n\n\ndef plot_network(locator, m):\n df_nodes, tranches, dict_length, dict_path = initial_network(locator)\n\n var_x = m.var_x.values()\n\n # Plotting Graph\n (fig, ax) = plt.subplots(1, 1)\n\n ax.axis('auto')\n ax.set_aspect('equal')\n ax.set_axis_off()\n\n ax = plot_lines(\n var_x,\n df_nodes,\n ax\n )\n\n # Plotting Nodes\n ax = plot_nodes(\n df_nodes,\n ax\n )\n plt.tight_layout()\n\n # Get legend entries\n legend_items = [\n matplotlib.lines.Line2D(\n [], [], linestyle='', marker='s', color='red', markersize=marker_size, label='Substation\\nconnection'\n ),\n matplotlib.lines.Line2D(\n [], [], linestyle='', marker='o', color='green', markersize=marker_size, label='Building\\nconnection'\n ),\n # matplotlib.lines.Line2D(\n # [], [], linestyle='', marker='o', color='blue', markersize=marker_size, label='Street\\nintersection'\n # )\n ]\n for line_type in m.set_linetypes:\n legend_items.append(\n matplotlib.lines.Line2D(\n [], [], color=plot_colors[int(line_type)], label='Linetype {}'.format(line_type)\n )\n )\n\n # Add legend\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n plt.legend(handles=legend_items, loc='center', bbox_to_anchor=(1.1, 0.45))\n\n return fig\n\n\ndef save_plots(locator, m):\n plot_network_on_street(locator, m)\n plt.savefig(locator.get_mpc_results_district_plot_streets())\n plot_network(locator, m)\n plt.savefig(locator.get_mpc_results_district_plot_grid())\n","sub_path":"legacy/flexibility_model/mpc_district/planning_and_operation_plots.py","file_name":"planning_and_operation_plots.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"134604026","text":"import pandas as pd\nimport numpy as np\n\nfrom hinpy.classes.hin_class import *\n\nfrom hinpy.rs.pure_popularity import *\nfrom hinpy.rs.content_based import *\nfrom hinpy.rs.surprise_based import *\nfrom hinpy.rs.random_rs import *\n\ndef ImplicitUtilityMetrics(hin,relation_name,parameters,verbose=False):\n\t\"\"\"\n\tCompute precision, recall, an F1 for implicit RS (IPP,CB,random).\n\n\t\"\"\"\n\n\t# Retrieve the table of relation_name and separate into test and train parts\n\tif 'implicit_metrics_fraction' in parameters:\n\t\tfraction = parameters['implicit_metrics_fraction']\n\telse:\n\t\tfraction = 0.25\n\n\tcompare_cols = ['start_object','start_group','end_object','end_group']\n\n\tsubtable = hin.table[hin.table.relation==relation_name]\n\tgrouped = subtable.groupby('start_object')\n\n\ttest_subtable = grouped.apply(lambda x: x.sample(frac=fraction)).reset_index(drop=True)\n\ttrain_subtable = subtable[~subtable[compare_cols].apply(tuple,1).isin(test_subtable[compare_cols].apply(tuple,1))].reset_index(drop=True)\n\n\ttrain_subtable.loc[:,'relation'] = 'train_like_group'\n\n\t# Create train Link Group from table\n\thin.CreateLinkGroupFromTable(train_subtable,'train_like_group')\n\n\t# Create new 'seen' table subtracting elements from test\n\ttrain_seen_table = hin.table[hin.table.relation==parameters['seen_relation']].copy(deep=True)\n\ttrain_seen_table.loc[:,'relation'] = train_subtable.relation.iloc[0]\n\ttrain_seen_table = train_seen_table[~train_seen_table[compare_cols].apply(tuple,1).isin(test_subtable[compare_cols].apply(tuple,1))].reset_index(drop=True)\n\t# train_seen_table['value'] = np.nan\n\thin.CreateLinkGroupFromTable(train_seen_table,'train_seen_group')\n\n\t# For n in 'implicit_metrics_N':[1,5,10], compute recos\n\t# and compute recall and precision per start object\n\treport_dic = {'topK':parameters['implicit_metrics_N']}\n\tprecision = np.zeros(len(parameters['implicit_metrics_N']))\n\trecall = np.zeros(len(parameters['implicit_metrics_N']))\n\tf1 = np.zeros(len(parameters['implicit_metrics_N']))\n\treco_train_params = {'method':parameters['method'], 'topK_predictions': 4, 'seen_relation':'train_seen_group',\n 'paths':parameters['paths'],\n 'paths_weights':parameters['paths_weights'],\n 'implicit_metrics':False}\n\tfor i,k in enumerate(parameters['implicit_metrics_N']):\n\t\treco_train_params['topK_predictions']=k\n\t\thin.CreateLinkGroupFromRS(relation_name='train_like_group',\n\t\t\t\t\t\t\t\t new_relation_name='implicit_metrics_recs_%d'%k,\n\t\t\t\t\t\t\t\t parameters=reco_train_params)\n\t\t# Here, compute precision, recall and f1\n\t\ttrain_reco_subtable = hin.table[hin.table.relation=='implicit_metrics_recs_%d'%k].copy(deep=True)\n\t\tq = pd.DataFrame(columns=['object','precision','recall','f1'])\n\t\tq['object'] = test_subtable.start_object.unique()\n\t\tfor idx,row in q.iterrows():\n\t\t\tTu = set(test_subtable[test_subtable.start_object==row.object].end_object)\n\t\t\tLu = set(train_reco_subtable[train_reco_subtable.start_object==row.object].end_object)\n\t\t\tif len(Lu)>0:\n\t\t\t\tq.loc[idx,'precision'] = len(Tu&Lu)/len(Lu)\n\t\t\telse:\n\t\t\t\tq.loc[idx,'precision'] = 0\n\t\t\tif len(Tu)>0:\n\t\t\t\tq.loc[idx,'recall'] = len(Tu&Lu)/len(Tu)\n\t\t\telse:\n\t\t\t\tq.loc[idx,'recall'] = 0\n\t\tprecision[i] = q['precision'].mean()\n\t\trecall[i] = q['recall'].mean()\n\t\tf1[i] = 2*precision[i]*recall[i]/(precision[i]+recall[i])\n\t\t# Delete the recommendation\n\t\thin.DeleteLinkGroup('implicit_metrics_recs_%d'%k)\n\t# Delete train Link Group\n\thin.DeleteLinkGroup('train_like_group')\n\thin.DeleteLinkGroup('train_seen_group')\n\n\treport_dic['precision'] = precision\n\treport_dic['recall'] = recall\n\treport_dic['f1'] = f1\n\n\treturn report_dic;\n","sub_path":"hinpy/rs/implicit_utility.py","file_name":"implicit_utility.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"374108385","text":"import numpy as np \r\nimport matplotlib.pyplot as plt\r\nimport datetime\r\nimport json\r\nimport pickle\r\n\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\n\r\ndef cross_validation_visualization(lambds, metric_te, ax):\r\n \"\"\"visualization the curves of mse_tr and mse_te.\"\"\"\r\n #ax.semilogx(lambds, metric_tr, marker=\".\", color='b', label='train accuracy')\r\n ax.semilogx(lambds, metric_te, label='test accuracy')\r\n\r\n\r\ndef visualize_boxplot_cross_validation2(k_data, pos, ax):\r\n ax.grid(True, axis='x', which='major', linestyle='--')\r\n ax.boxplot(k_data,positions = [np.exp(i+1) for i in range(6)], sym='+')\r\n\r\n\r\ndef test_alphas_meth(meth,alphas,X,y,k = 4):\r\n \"\"\"\r\n Test the different values of alpha for a given method and return the best alpha according to the mse criterion.\r\n \"\"\"\r\n res_mse = []\r\n for alpha in alphas: \r\n rid = meth(alpha = alpha)\r\n res_mse.append(-cross_val_score(rid,X,y,cv = k,scoring='neg_mean_squared_error'))\r\n fig,ax = plt.subplots(1)\r\n ax.set_xscale('log')\r\n ax.semilogx(alphas,res_mse)\r\n ax.set_ylabel('Mse')\r\n ax.set_xlabel('alphas')\r\n ax.grid(True)\r\n rse_mse_means = np.mean(res_mse,axis = 1)\r\n #cross_validation_visualization(alphas,rse_mse_means,ax = ax)\r\n #visualize_boxplot_cross_validation2(res_mse,alphas,ax)\r\n return alphas[np.argmin(rse_mse_means)]\r\n\r\n\r\ndef log_model(results,pipeline,param_grid,datapath):\r\n \"\"\"\r\n Write a log file of the model in order to keep trace of it\r\n \"\"\"\r\n \r\n date = (datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\r\n mse = -results['test_neg_mean_squared_error'].mean()\r\n mae = -results['test_neg_mean_absolute_error'].mean()\r\n r2 = results['test_r2'].mean()\r\n file_name = 'mae=%.2f_mse=%.2f_R2%.2f='%(mae,mse,r2) + date \r\n file_path = './log'\r\n \r\n res = {'mae':mae,'mse':mse,'r2':r2}\r\n \r\n pipeline = [str(i) for i in list(pipeline)]\r\n\r\n def defo(obj):\r\n if isinstance(obj, np.ndarray):\r\n return obj.tolist()\r\n raise TypeError('Not serializable')\r\n with open('../log/' + datapath + '/' + file_name + '.txt', 'w',encoding=\"utf-8\",newline='\\r\\n') as file:\r\n json.dump(res,file,indent=4,ensure_ascii=False)\r\n json.dump(pipeline,file,default=defo)\r\n with open('../log/' + datapath + '/' + file_name + '.pickle','wb') as file:\r\n pickle.dump([pipeline,param_grid],file)\r\n print('Log saved')\r\n\r\ndef display_score(cv_results):\r\n \"\"\"\r\n cv_results:dictionarry having\r\n test_neg_mean_squared_error,test_neg_mean_absolute_error and test_r2 as its key\r\n \"\"\"\r\n K = len(cv_results['test_neg_mean_squared_error'])\r\n mse = -cv_results['test_neg_mean_squared_error'].mean()\r\n mae = -cv_results['test_neg_mean_absolute_error'].mean()\r\n r2 = cv_results['test_r2'].mean()\r\n print(\"On %i folds\" % K)\r\n print(\"Obtained MSE on test set %2.2f \" % mse)\r\n print(\"Obtained MAE on test set %2.2f \" % mae)\r\n print(\"Obtained r2 on test set %2.2f \" % r2)\r\n\r\n \r\ndef bias_variance_visualization(scoring_train, scoring_test, mean_scoring_train, mean_scoring_test, data_range, axis, scoring):\r\n \"\"\"\r\n Visulize the bias-variance decomposition on 3 subplots, one for each scoring.\r\n \"\"\" \r\n axis.plot(\r\n data_range,\r\n scoring_train,\r\n 'b',\r\n linestyle=\"-\",\r\n color=([0.7, 0.7, 1]),\r\n label='train',\r\n linewidth=0.3)\r\n axis.plot(\r\n data_range,\r\n scoring_test,\r\n 'r',\r\n linestyle=\"-\",\r\n color=[1, 0.7, 0.7],\r\n label='test',\r\n linewidth=0.3)\r\n axis.plot(\r\n data_range,\r\n mean_scoring_train,\r\n 'b',\r\n linestyle=\"-\",\r\n label='train',\r\n linewidth=3)\r\n axis.plot(\r\n data_range,\r\n mean_scoring_test,\r\n 'r',\r\n linestyle=\"-\",\r\n label='test',\r\n linewidth=3)\r\n axis.set_xlabel(\"data size\")\r\n axis.set_ylabel(\"error\")\r\n axis.set_title(scoring)\r\n #axis.legend(loc='best')\r\n\r\ndef bias_variance_decomposition(data_range, results, seeds):\r\n \"\"\"\r\n Decompose the results and triggers their visualization.\r\n \"\"\"\r\n \r\n print(\"Start printing... \\n\")\r\n \r\n k = len(results[0][0]['fit_time'])\r\n mse_tr, mae_tr, r2_tr, mse_te, mae_te, r2_te = np.zeros((6, len(data_range), len(seeds), k))\r\n \r\n # Splitting the results into the different scorings and training and testing errors.\r\n for i, size in enumerate(data_range):\r\n for index, seed in enumerate(seeds):\r\n \r\n mse_tr[i][index] = -results[i][index]['train_neg_mean_squared_error']\r\n mse_te[i][index] = -results[i][index]['test_neg_mean_squared_error']\r\n \r\n mae_tr[i][index] = -results[i][index]['train_neg_mean_absolute_error']\r\n mae_te[i][index] = -results[i][index]['test_neg_mean_absolute_error']\r\n \r\n r2_tr[i][index] = results[i][index]['train_r2']\r\n r2_te[i][index] = results[i][index]['test_r2']\r\n \r\n # averaging the results over k-fold and then random seeds\r\n mse_tr_mean_kfold = mse_tr.mean(axis=2)\r\n mse_tr_mean_seeds = mse_tr_mean_kfold.mean(axis=1)\r\n mse_te_mean_kfold = mse_te.mean(axis=2)\r\n mse_te_mean_seeds = mse_te_mean_kfold.mean(axis=1)\r\n \r\n mae_tr_mean_kfold = mae_tr.mean(axis=2)\r\n mae_tr_mean_seeds = mae_tr_mean_kfold.mean(axis=1)\r\n mae_te_mean_kfold = mae_te.mean(axis=2)\r\n mae_te_mean_seeds = mae_te_mean_kfold.mean(axis=1)\r\n \r\n r2_tr_mean_kfold = r2_tr.mean(axis=2)\r\n r2_tr_mean_seeds = r2_tr_mean_kfold.mean(axis=1)\r\n r2_te_mean_kfold = r2_te.mean(axis=2)\r\n r2_te_mean_seeds = r2_te_mean_kfold.mean(axis=1)\r\n \r\n # Visualization\r\n fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True)\r\n fig.set_size_inches(14,9)\r\n \r\n bias_variance_visualization(mse_tr_mean_kfold,mse_te_mean_kfold, mse_tr_mean_seeds, mse_te_mean_seeds, data_range, axes[0], 'mse')\r\n bias_variance_visualization(mae_tr_mean_kfold, mae_te_mean_kfold, mae_tr_mean_seeds, mae_te_mean_seeds,data_range, axes[1], 'mae')\r\n bias_variance_visualization(r2_tr_mean_kfold, r2_te_mean_kfold, r2_tr_mean_seeds, r2_te_mean_seeds, data_range, axes[2], 'r2')\r\n \r\n plt.savefig(\"bias-variance\")\r\n \r\n return mse_tr_mean_seeds, mae_tr_mean_seeds, r2_tr_mean_seeds, mse_te_mean_seeds, mae_te_mean_seeds, r2_te_mean_seeds\r\n\r\ndef bias_variance(pipeline, start, stop, number, seed_number):\r\n \"\"\"\r\n Bias-variance decomposition to test the predictive power of a pipeline with subsets of different sizes.\r\n \"\"\"\r\n data_range = np.logspace(np.log10(start),np.log10(stop),number, dtype=int)\r\n results = []\r\n \r\n seeds = range(seed_number)\r\n \r\n for iter_, size in enumerate(data_range):\r\n print('Data size of iteration {i}: {s} \\n'.format(i=iter_, s=size))\r\n results.append(pipeline(size, seeds))\r\n \r\n print('Finished cross-validation...\\n')\r\n \r\n return bias_variance_decomposition(data_range, results, seeds)","sub_path":"src/lin_mods.py","file_name":"lin_mods.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"115931477","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2015 eNovance SAS \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom hardware import generate\nfrom hardware import state\n\nimport netaddr\nimport os\nimport sys\n\nimport argparse\nimport yaml\n\n_VERSION = \"0.0.1\"\n\n\ndef _get_yaml_content(path):\n try:\n with open(path, \"r\") as f:\n return yaml.load(f)\n except (OSError, IOError) as e:\n print(\"Error: cannot open or read file '%s': %s\" % (path, e))\n sys.exit(1)\n\n\ndef collect(config_path):\n # check config directory path\n if not os.path.exists(config_path):\n print(\"Error: --config-dir='%s' does not exist.\" % config_path)\n sys.exit(1)\n\n # get state object\n state_obj = state.State()\n state_obj.load(os.path.join(config_path, 'edeploy') + '/')\n\n # get global conf\n global_conf = _get_yaml_content(\"%s/config-tools/global.yml\" % config_path)\n # expand keys prefixed by \"=\"\n global_conf[\"hosts\"] = generate.generate_dict(global_conf[\"hosts\"], \"=\")\n\n # the virtual configuration of each host\n virt_platform = {\"hosts\": {}}\n\n for hostname in global_conf[\"hosts\"]:\n # construct the host virtual configuration\n virt_platform[\"hosts\"][hostname] = state_obj.hardware_info(hostname)\n\n # add the profile\n virt_platform[\"hosts\"][hostname][\"profile\"] = \\\n global_conf[\"hosts\"][hostname][\"profile\"]\n\n # release the lock obtained during the load call\n state_obj.unlock()\n\n # so far, the nodes are described excepted the install-server\n # the code below adds the install-server from the global conf.\n for hostname in global_conf[\"hosts\"]:\n if global_conf[\"hosts\"][hostname][\"profile\"] == \"install-server\":\n # add the admin_network config\n admin_network = global_conf[\"config\"][\"admin_network\"]\n admin_network = netaddr.IPNetwork(admin_network)\n nics = [{\"name\": \"eth0\",\n \"ip\": global_conf[\"hosts\"][hostname][\"ip\"],\n \"network\": str(admin_network.network),\n \"netmask\": str(admin_network.netmask)}]\n virt_platform[\"hosts\"][hostname][\"nics\"] = nics\n break\n\n return virt_platform\n\n\ndef save_virt_platform(virt_platform, output_path):\n output_file_path = os.path.normpath(\"%s/virt_platform.yml\" % output_path)\n\n try:\n with open(output_file_path, 'w') as outfile:\n outfile.write(yaml.dump(virt_platform, default_flow_style=False))\n print(\"Virtual platform generated successfully at '%s' !\" %\n output_file_path)\n except (OSError, IOError) as e:\n print(\"Error: cannot write file '%s': %s\" % (output_file_path, e))\n sys.exit(1)\n\n\ndef main():\n cli_parser = argparse.ArgumentParser(\n description='Collect architecture information from the edeploy '\n 'directory as generated by config-tools/download.sh.')\n cli_parser.add_argument('--config-dir',\n default=\"./top/etc\",\n help='The config directory absolute path.')\n cli_parser.add_argument('--output-dir',\n default=\"./\",\n help='The output directory of the virtual'\n ' configuration.')\n cli_parser.add_argument('--sps-version',\n required=True,\n help='The SpinalStack version.')\n cli_arguments = cli_parser.parse_args()\n\n virt_platform = collect(cli_arguments.config_dir)\n virt_platform[\"version\"] = cli_arguments.sps_version\n save_virt_platform(virt_platform,\n cli_arguments.output_dir)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"551030047","text":"# Albert Vu 35695286 and Michelle Wong 14502669. ICS 31 Lab sec 12. Lab asst 5.\n\n__author__ = 'dgk'\n\n# RESTAURANT COLLECTION PROGRAM\n# ICS 31, UCI, David G. Kay, Fall 2012\n\n# Implement Restaurant as a namedtuple, collection as a list\n\n##### MAIN PROGRAM (CONTROLLER)\n\ndef restaurants(): # nothing -> interaction\n \"\"\" Main program\n \"\"\"\n print(\"Welcome to the restaurants program!\")\n our_rests = Collection_new()\n our_rests = handle_commands(our_rests)\n print(\"\\nThank you. Good-bye.\")\n\nMENU = \"\"\"\nRestaurant Collection Program --- Choose one\n n: Add a new restaurant to the collection\n r: Remove a restaurant from the collection\n c: Change prices for the dishes served\n s: Search the collection for selected restaurants\n sr: Search the collection for restaurants with prices at or below a specified value\n p: Print all the restaurants\n e: Remove (erase) all the restaurants from the collection\n q: Quit\n\"\"\"\n\ndef handle_commands(C: list) -> list:\n \"\"\" Display menu, accept and process commands.\n \"\"\"\n while True:\n response = input(MENU)\n if response==\"q\":\n return C\n elif response=='n':\n r = Restaurant_get_info()\n C = Collection_add(C, r)\n elif response=='r':\n n = input(\"Please enter the name of the restaurant to remove: \")\n C = Collection_remove_by_name(C, n)\n elif response=='c':\n n = float(input(\"Please enter the percentage change in price: \"))\n C = Collection_change_prices(C, n)\n elif response=='p':\n print(Collection_str(C))\n elif response=='s':\n n = input(\"Please enter the name of the restaurant to search for: \")\n for r in Collection_search_by_name(C, n):\n print(Restaurant_str(r))\n elif response=='sr':\n n = float(input(\"Please enter the price: \"))\n print('\\nList of Restaurants with prices below or at ${:2.2f}'.format(n))\n for r in Collection_search_price(C, n):\n print('\\n' + Restaurant_str(r))\n elif response=='e':\n C = Collection_new()\n else:\n invalid_command(response)\n\ndef invalid_command(response): # string -> interaction\n \"\"\" Print message for invalid menu command.\n \"\"\"\n print(\"Sorry; '\" + response + \"' isn't a valid command. Please try again.\")\n\n\n\n\n##### Restaurant\nfrom collections import namedtuple\nRestaurant = namedtuple('Restaurant', 'name cuisine phone dish price menu')\n# Constructor: r1 = Restaurant('Taillevent', 'French', '01-11-22-33-44', 'Escargots', 23.50)\n\ndef Restaurant_str(self: Restaurant) -> str:\n \"\"\" Return a string representing the Restaurant\n \"\"\"\n if self.menu == []:\n return (\n \"Name: \" + self.name + \"\\n\" +\n \"Cuisine: \" + self.cuisine + \"\\n\" +\n \"Phone: \" + self.phone + \"\\n\" +\n \"Dish: \" + self.dish + \"\\n\" +\n \"Price: ${:2.2f}\".format(self.price) + \"\\n\\n\")\n return (\n \"Name: \" + self.name + \"\\n\" +\n \"Cuisine: \" + self.cuisine + \"\\n\" +\n \"Phone: \" + self.phone + \"\\n\" +\n \"Dish: \" + self.dish + \"\\n\" +\n \"Price: ${:2.2f}\".format(self.price) + \"\\n\"\n \"Menu: \" + Menu_str(self.menu) + \"\\n\\n\")\n\ndef Restaurant_get_info() -> Restaurant:\n \"\"\" Prompt user for fields of Restaurant; create and return.\n \"\"\"\n return Restaurant(\n input(\"Please enter the restaurant's name: \"),\n input(\"Please enter the kind of food served: \"),\n input(\"Please enter the phone number: \"),\n input(\"Please enter the name of the best dish: \"),\n float(input(\"Please enter the price of that dish: \")),\n Menu_enter())\n\ndef Restaurant_change_price(self: Restaurant, change: float) -> Restaurant:\n \"\"\" Takes a Restaurant and a number. Changes price of the food by a percentage corresponding to that number\n \"\"\"\n self = self._replace(price = self.price * (1 + change/100))\n self = self._replace(menu = Menu_change_price(self.menu, change))\n return self\n\ndef Restaurant_search_price(self: Restaurant, price: float) -> bool:\n \"\"\" Given a Restaurant, Checks if the Restaurant prices are below or at price and returns bool\n \"\"\"\n if self.price <= price and Menu_search_price(self.menu, price):\n return True\n return False\n\n#### DISHES\nDish = namedtuple('Dish', 'name price calories')\n\ndef Dish_str(food: Dish) -> str:\n 'takes a Dish object and returns string in form of name (price): calories'\n return food.name + ' (${:2.2f}'.format(food.price) + '): ' + str(food.calories) + ' cal\\n'\n\ndef Dish_get_info() -> Dish:\n \"\"\" Takes input for Dish\n \"\"\"\n return Dish(\n input(\"Please enter the dish's name: \"),\n float(input(\"Please enter the dish's price: \")),\n input(\"Please enter the dish's calories: \"))\n\ndef Dish_change_price(food: Dish, change_in_price: float) -> Dish:\n '''takes a Dish and a number and returns a Dish that has its price changed\n by change_in_price%'''\n food = food._replace(price = food.price*(1.00 + change_in_price/100))\n return food\n\n#### Menu\n\ndef Menu_enter() -> list:\n \"\"\" Takes user input to create a dishlist\n \"\"\"\n dishlist = []\n while True:\n response = input(\"Do you want to add further dishes? (yes/no): \")\n if response == 'yes':\n dishlist.append(Dish_get_info())\n elif response == 'no':\n return dishlist\n else:\n invalid_command(response)\n\ndef Menu_str(dishlist: list) -> str:\n \"\"\" Takes in a list of Dishes and displays the Dishes in format\n \"\"\"\n s = \"\"\n for d in range(0,len(dishlist)):\n if d == 0:\n s += Dish_str(dishlist[d])\n else:\n s += \" \" + Dish_str(dishlist[d]) \n return s\n\ndef Menu_change_price(dishlist: list, num: float) -> list:\n \"\"\" Takes in a list of dishes and a number, changes the prices of all the dishes by percentage of number\n \"\"\"\n result = []\n for d in dishlist:\n result.append(Dish_change_price(d, num))\n return result\n\ndef Menu_search_price(dishlist: list, price: float) -> bool:\n \"\"\" Takes in a list of dishes and returns True if all dishes are below or at price\n \"\"\"\n result = []\n for d in dishlist:\n if d.price <= price:\n result.append(d)\n if len(result) == len(dishlist):\n return True\n return False\n\n#### COLLECTION\n# A collection is a list of restaurants\n\ndef Collection_new() -> list:\n ''' Return a new, empty collection\n '''\n return [ ]\n\ndef Collection_str(C: list) -> str:\n ''' Return a string representing the collection\n '''\n s = \"\"\n for r in C:\n s = s + Restaurant_str(r)\n return s\n\ndef Collection_search_by_name(C: list, name: str) -> list:\n \"\"\" Return list of Restaurants in input list whose name matches input string.\n \"\"\"\n result = [ ]\n for r in C:\n if r.name == name:\n result.append(r)\n return result\n # alternative (using a list comprehension):\n # return [r for r in C if r.name == name]\n\ndef Collection_add(C: list, R: Restaurant) -> list:\n \"\"\" Return list of Restaurants with input Restaurant added at end.\n \"\"\"\n C.append(R)\n return C\n\ndef Collection_remove_by_name(C: list, name: str) -> list:\n \"\"\" Given name, remove all Restaurants with that name from collection.\n \"\"\"\n result = [ ]\n for r in C:\n if r.name != name:\n result.append(r)\n return result\n # Alternative:\n # return [r for r in self.rests if r.name != name]\n\ndef Collection_change_prices(C: list, change: float) -> list:\n \"\"\" Given a number, change the prices of all the restaurants by a percentage corresponding to that number\n \"\"\"\n result = []\n for r in C:\n result.append(Restaurant_change_price(r, change))\n return result\n\ndef Collection_search_price(C: list, price: float) -> list:\n \"\"\" Given a number, search for restaurants with prices at or below the specified value\n \"\"\"\n result = []\n for r in C:\n if Restaurant_search_price(r, price):\n result.append(r)\n return result\n\nrestaurants()\n\n","sub_path":"ICS 31/Lab 5/restaurantsf.py","file_name":"restaurantsf.py","file_ext":"py","file_size_in_byte":8220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"527411951","text":"# -*- coding: utf-8 -*-\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nheaders = requests.utils.default_headers()\r\nheaders.update({'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/69.0'})\r\n\r\nurl = \"https://mtuci.ru/time-table/\"\r\nreq = requests.get(url, headers)\r\nsoup = BeautifulSoup(req.content, 'html.parser')\r\nall_links = soup.find_all('a')\r\nexcel_links = []\r\nfor link in all_links:\r\n href = link.get('href')\r\n # print(href[-1:-5:-1])\r\n if href[-4:-1] == 'xls':\r\n excel_links.append(href)\r\n with open(r'' + href, \"wb\") as f: # открываем файл для записи, в режиме wb\r\n print(href)\r\n ufr = requests.get(url+href) # делаем запрос\r\n f.write(ufr.content) # записываем содержимое в файл; как видите - content запроса\r\n f.close()\r\nprint(excel_links)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"32733184","text":"##\n#\n# nwpsTrkngCG0GUM - Joe Maloney 2016-07-08\n#\n# Smart Init for nwpsTrkngCG0GUM model.\n#\n##\n\nfrom Init import *\nfrom nwpsTrkngCG0 import *\n\n##--------------------------------------------------------------------------\n## Module that calculates surface weather elements from nwpsTrkngCG0 model\n## output.\n##--------------------------------------------------------------------------\nclass nwpsTrkngCG0GUMForecaster(nwpsTrkngCG0Forecaster):\n def __init__(self):\n nwpsTrkngCG0Forecaster.__init__(self, \"nwpsTrkngCG0GUM\", \"nwpsTrkngCG0GUM\")\n\ndef main():\n forecaster = nwpsTrkngCG0GUMForecaster()\n forecaster.run()\n forecaster.notifyGFE('GUM')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"edexOsgi/com.raytheon.edex.plugin.gfe/utility/edex_static/base/smartinit/nwpsTrkngCG0GUM.py","file_name":"nwpsTrkngCG0GUM.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"87597461","text":"from numpy import linspace, zeros\nfrom pyplot import plot, xlabel, ylabel, grid, show, figure\n\ndef _transform_number_to_func(func_or_number):\n \"\"\"\n Undersøk om en variabel er et tall eller en funksjon\n Hvis variabelen er et tall a, returneres en konstant funksjon f(t) = 0\n Hvis variableen er en funksjon, returneres funksjonen tilbake\n Hvis variabelen er noe annet, gis det en feilmelding\n \"\"\"\n\n if type(func_or_number) == float or type(func_or_number) == int:\n u = float(func_or_number)\n def func(*args):\n return func_or_number\n return func\n elif type(func_or_number) == type(_transform_number_to_func):\n return func_or_number\n else:\n raise ValueError('parameter v to function modell_strekning must be a number or a function')\n\n\n\ndef solve(T, RHS, dt=0.1, *args):\n \"\"\"\n Solves the system of differential equations y'(t) = f(t, y),\n where RHS = f(t, y)\n using the forward Euler scheme.\n \"\"\"\n\n # make sure RHS is a function\n RHS = _transform_number_to_func(RHS)\n\n U = args # tuple of initial conditions\n t = [0]\n u = [U]\n k = 0\n\n while t[k] < T:\n u_next = u[k] + dt*RHS(t[k], u[k])\n t_next = t[k] + dt\n\n t.append(t_next)\n u.append(u_next)\n k += 1\n return t, u","sub_path":"programmer/eksempler_vei_fart_tid/ikke_lineare_problemer/abstract_flexible_solve_system_of_differential_equations.py","file_name":"abstract_flexible_solve_system_of_differential_equations.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"281802980","text":"import pytest\n\nfrom . import MockGPSStream\nfrom radicl.gps import USBGPS\nfrom unittest.mock import patch\nfrom types import SimpleNamespace\n\n\n@pytest.fixture(scope='function')\ndef mock_gps_port(payload):\n yield MockGPSStream(payload)\n\n\n@pytest.mark.parametrize('payload, expected', [\n # Lat long found with a retry\n ([b'$GPTXT,01,01,02,u-blox ag - www.u-blox.com*50\\r\\n',\n b'$GPRMC,201209.00,A,4400.0000,N,11600.00000,W,0.065,,230223,,,D*6B\\r\\n'], [44.0, -116.0]),\n # No lat long found\n ([b'$GPTXT,01,01,02,u-blox ag - www.u-blox.com*50'], None),\n # Lat long message found but not interpretable\n ([b'$GPRMC,201209.00,A,,N,,W,0.065,,230223,,,D*6B\\r\\n'], None)\n])\ndef test_get_gps_fix(mock_gps_port, payload, expected):\n \"\"\"\n Mock out the connection and gps. Ensure the managing function handles\n three scenarios.\n Args:\n payload: List of nmea strings to read\n expected: expected lat long outcome\n \"\"\"\n with patch('radicl.gps.get_serial_cnx', return_value=SimpleNamespace(port='dev_fake', description='GPS/GNSS Receiver')):\n with patch('radicl.com.serial.Serial.open', return_value=None):\n with patch('radicl.gps.NMEAReader', return_value=mock_gps_port):\n gps_dev = USBGPS()\n loc = gps_dev.get_fix(max_attempts=2)\n assert loc == expected\n","sub_path":"tests/test_gps.py","file_name":"test_gps.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"498718039","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Base DataLoader \n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport six\nfrom io import open\nfrom collections import namedtuple\nimport numpy as np\nimport tqdm\nimport paddle\nfrom pgl.utils import mp_reader\nimport collections\nimport time\nfrom pgl.utils.logger import log\nimport traceback\n\n\nif six.PY3:\n import io\n sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')\n\ndef scan_batch_iter(data, batch_size, fid, num_workers):\n \"\"\"node_batch_iter\n \"\"\"\n batch = []\n cc = 0\n for line_example in data.scan(): \n cc += 1\n if cc % num_workers != fid:\n continue\n batch.append(line_example)\n if len(batch) == batch_size:\n yield batch \n batch = []\n if len(batch) > 0:\n yield batch \n\ndef scan_batch_iter_shuffle(data, batch_size, fid, num_workers, shuffle_buffer=100000):\n \"\"\"node_batch_iter\n \"\"\"\n batch = []\n buffer = []\n cc = 0\n for line_example in data.scan(): \n cc += 1\n if cc % num_workers != fid:\n continue\n if len(buffer) < shuffle_buffer:\n buffer.append(line_example)\n else:\n index = np.random.randint(0, len(buffer))\n batch.append(buffer[index])\n buffer[index] = line_example\n if len(batch) == batch_size:\n yield batch \n batch = []\n \n for line_example in buffer:\n batch.append(line_example)\n if len(batch) == batch_size:\n yield batch \n batch = [] \n \n if len(batch) > 0:\n yield batch \n\nclass BaseDataGenerator(object):\n \"\"\"Base Data Geneartor\"\"\"\n\n def __init__(self, buf_size, batch_size, num_workers, shuffle=True):\n self.num_workers = num_workers\n self.batch_size = batch_size\n self.line_examples = []\n self.buf_size = buf_size\n self.shuffle = shuffle\n\n def batch_fn(self, batch_examples):\n \"\"\" batch_fn batch producer\"\"\"\n raise NotImplementedError(\"No defined Batch Fn\")\n\n def batch_iter(self, fid, perm):\n \"\"\" batch iterator\"\"\"\n if self.shuffle:\n for batch in scan_batch_iter_shuffle(self, self.batch_size, fid, self.num_workers):\n yield batch\n else:\n for batch in scan_batch_iter(self, self.batch_size, fid, self.num_workers):\n yield batch\n \n def __len__(self):\n return len(self.line_examples)\n\n def __getitem__(self, idx):\n if isinstance(idx, collections.Iterable):\n return [self[bidx] for bidx in idx]\n else:\n return self.line_examples[idx]\n\n def generator(self):\n \"\"\"batch dict generator\"\"\"\n\n def worker(filter_id, perm):\n \"\"\" multiprocess worker\"\"\"\n\n def func_run():\n \"\"\" func_run \"\"\"\n pid = os.getpid()\n for batch_examples in self.batch_iter(filter_id, perm):\n try:\n batch_dict = self.batch_fn(batch_examples)\n except Exception as e:\n traceback.print_exc()\n log.info(traceback.format_exc())\n log.info(str(e))\n continue\n\n if batch_dict is None:\n continue\n yield batch_dict\n\n return func_run\n\n perm = None\n\n if self.num_workers == 1:\n \n def post_fn():\n for batch in worker(0, perm)():\n yield self.post_fn(batch)\n r = paddle.reader.buffered(post_fn, self.buf_size)\n else:\n worker_pool = [worker(wid, perm) for wid in range(self.num_workers)]\n worker = mp_reader.multiprocess_reader(\n worker_pool, use_pipe=True, queue_size=10)\n \n def post_fn():\n for batch in worker():\n yield self.post_fn(batch)\n r = paddle.reader.buffered(post_fn, self.buf_size)\n\n for batch in r():\n yield batch\n\n def scan(self): \n for line_example in self.line_examples:\n yield line_example\n","sub_path":"examples/kddcup2021/MAG240M/r_unimp/dataset/base_dataset.py","file_name":"base_dataset.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"401196331","text":"\"\"\"The Base module of the `cpp_linter` package. This holds the objects shared by\nmultiple modules.\"\"\"\nimport io\nimport os\nimport logging\n\nFOUND_RICH_LIB = False\ntry:\n from rich.logging import RichHandler\n\n FOUND_RICH_LIB = True\n\n logging.basicConfig(\n format=\"%(name)s: %(message)s\",\n handlers=[RichHandler(show_time=False)],\n )\n\nexcept ImportError:\n logging.basicConfig()\n\n#: The logging.Logger object used for outputing data.\nlogger = logging.getLogger(\"CPP Linter\")\nif not FOUND_RICH_LIB:\n logger.debug(\"rich module not found\")\n\n# global constant variables\nGITHUB_SHA = os.getenv(\"GITHUB_SHA\", \"\")\nGITHUB_TOKEN = os.getenv(\"GITHUB_TOKEN\", os.getenv(\"GIT_REST_API\", \"\"))\nAPI_HEADERS = {\n \"Authorization\": f\"token {GITHUB_TOKEN}\",\n \"Accept\": \"application/vnd.github.v3.text+json\",\n}\n\n\nclass Globals:\n \"\"\"Global variables for re-use (non-constant).\"\"\"\n\n PAYLOAD_TIDY = \"\"\n \"\"\"The accumulated output of clang-tidy (gets appended to OUTPUT)\"\"\"\n OUTPUT = \"\"\n \"\"\"The accumulated body of the resulting comment that gets posted.\"\"\"\n FILES = []\n \"\"\"The reponding payload containing info about changed files.\"\"\"\n EVENT_PAYLOAD = {}\n \"\"\"The parsed JSON of the event payload.\"\"\"\n response_buffer = None\n \"\"\"A shared response object for `requests` module.\"\"\"\n\n\nclass GlobalParser:\n \"\"\"Global variables specific to output parsers. Each element in each of the\n following attributes represents a clang-tool's output for 1 source file.\n \"\"\"\n\n tidy_notes = []\n \"\"\"This can only be a `list` of type\n [`TidyNotification`][cpp_linter.clang_tidy.TidyNotification]\"\"\"\n tidy_advice = []\n \"\"\"This can only be a `list` of type\n [`YMLFixit`][cpp_linter.clang_tidy_yml.YMLFixit]\"\"\"\n format_advice = []\n \"\"\"This can only be a `list` of type\n [`XMLFixit`][cpp_linter.clang_format_xml.XMLFixit]\"\"\"\n\n\ndef get_line_cnt_from_cols(file_path: str, offset: int) -> tuple:\n \"\"\"Gets a line count and columns offset from a file's absolute offset.\n\n Args:\n file_path: Path to file.\n offset: The byte offset to translate\n\n Returns:\n A `tuple` of 2 `int` numbers:\n\n - Index 0 is the line number for the given offset.\n - Index 1 is the column number for the given offset on the line.\n \"\"\"\n line_cnt = 1\n last_lf_pos = 0\n cols = 1\n file_path = file_path.replace(\"/\", os.sep)\n # logger.debug(\"Getting line count from %s at offset %d\", file_path, offset)\n with io.open(file_path, \"rb\") as src_file:\n max_len = src_file.seek(0, io.SEEK_END)\n src_file.seek(0, io.SEEK_SET)\n while src_file.tell() != offset and src_file.tell() < max_len:\n char = src_file.read(1)\n if char == b\"\\n\":\n line_cnt += 1\n last_lf_pos = src_file.tell() - 1 # -1 because LF is part of offset\n cols = src_file.tell() - last_lf_pos\n return (line_cnt, cols)\n\n\ndef log_response_msg():\n \"\"\"Output the response buffer's message on a failed request.\"\"\"\n if Globals.response_buffer.status_code >= 400:\n logger.error(\"response returned message: %s\", Globals.response_buffer.text)\n","sub_path":"cpp_linter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"106913576","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author: Li jinjing \ndef ladderLength(beginWord, endWord, wordList):\n from collections import defaultdict\n from collections import deque\n\n if endWord not in wordList or not endWord or not beginWord or not wordList:return 0\n\n all_combo_dict = defaultdict(list)\n L = len(beginWord)\n for word in wordList:\n for i in range(L):\n all_combo_dict[word[:i] + \"*\" + word[i+1:]].append(word)\n\n visited = set()\n queue = deque([(beginWord, 1)])\n\n while queue:\n #import ipdb;ipdb.set_trace()\n current, level = queue.popleft()\n if current not in visited:\n for i in range(L):\n inter = current[:i] + \"*\" + current[i+1:]\n for w in all_combo_dict[inter]:\n if w == endWord:\n return level +1\n else:\n visited.add(current)\n queue.append((w, level+1))\n\n return 0\n\nprint(ladderLength(\"hit\", \"cog\", [\"hot\", \"dot\", \"dog\", \"lot\", \"log\", \"cog\"]))\n","sub_path":"Week_07/127_ladderLength.py","file_name":"127_ladderLength.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"9331694","text":"def read_input(path):\n with open(path, 'r') as f:\n output = tuple(f.read().splitlines())\n return output\n\n\nif __name__ == '__main__':\n eta, bus_ids = read_input('input.txt')\n bus_ids = bus_ids.split(',')\n bus_ids = [int(bus_id) for bus_id in list(filter(lambda s: s != 'x', bus_ids))]\n wait_times = [bus_id - (int(eta) % bus_id) for bus_id in bus_ids]\n print(min(wait_times) * bus_ids[wait_times.index(min(wait_times))])\n\n","sub_path":"task_13/task_13_1.py","file_name":"task_13_1.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"269574638","text":"# %load q02_data_splitter/build.py\n# Default Imports\nfrom greyatomlib.linear_regression.q01_load_data.build import load_data\nimport pandas as pd\ndf = load_data('data/house_prices_multivariate.csv')\n\n\n# Your Code Here\ndef data_splitter(df):\n #feature variable having all values except SalePrice\n \n #Target variable only sale Price\n y = df.iloc[:,-1]\n X = df.loc[:,df.columns != 'SalePrice']\n return X,y\ndata_splitter(df)\n\n\n","sub_path":"q02_data_splitter/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"474885007","text":"import pygame\nimport Content\nimport Game\nimport math\n\nclass PlaceMinesNotification:\n def __init__(self):\n self.max_h = 100 * Game.width / Game.original_width\n self.dist = 0\n self.t = \"\"\n\n def update(self):\n self.dist += 15 * Game.width / Game.original_width * (1 if Game.screenmanager.getScreenNumber() == Game.screenmanager.GAME_MINES else -1)\n self.max_h = 100 * Game.width / Game.original_width\n if self.dist < 0:\n self.dist = 0\n if self.dist > self.max_h:\n self.dist = self.max_h\n\n if self.dist == 0:\n Game.notificationmanager.removeNotification(self)\n\n if Content.GameAsset.getCurrentPlayer() != None:\n amount = 4 - len(Content.GameAsset.getCurrentPlayer().getMines())\n self.t = \"Place \" + str(amount) + \" mine\" + (\"s\" if amount != 1 else \"\")\n if amount == 0:\n self.t = \"Press 'End turn'\"\n\n def draw(self, screen):\n xs = Game.width / 2\n\n grid_size = Content.GameAsset.grid_size\n od = Content.GameAsset.d\n ow = int((Game.original_height / 1.25 / grid_size) * Game.height / Game.original_height)\n ol = ow * grid_size + (grid_size - 1) * od\n\n d = 5 * Game.height / Game.original_height\n w = 195 * Game.width / Game.original_width\n x = xs - ol / 2 - 2 * (w + d) + (w * 2 + d) / 2\n\n color = (55, 55, 55)\n w = (w + d) * 2\n h = 75 * Game.width / Game.original_width\n d = 20 * Game.height / Game.original_height\n loc = (x - w / 2, -self.max_h + self.dist + d, w, h)\n pygame.draw.rect(screen, color, loc, 0)\n pygame.draw.rect(screen, color, loc, int(5 * Game.width / Game.original_width))\n\n font = pygame.font.Font(None, int(50 * Game.width / Game.original_width))\n text = font.render(self.t, 1, Content.color_default)\n screen.blit(text, (x - font.size(self.t)[0] / 2, -self.max_h + self.dist + d + h / 2 - font.size(self.t)[1] / 2))\n","sub_path":"Notification/PlaceMinesNotification.py","file_name":"PlaceMinesNotification.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"195016934","text":"import config\n\nfrom common.globals import *\n\nfrom common.complex import *\n\nfrom common.runner import *\n\nimport random\n\nclass EventManager(object):\n\n\n def __init__(self):\n Globals().load(self)\n\n\n def handle_event(f):\n def inner(self, element, multiplier=0):\n old_time = self.app.state_intrp_time\n time = 60.0 / self.state.bpm * (2 ** multiplier)\n self.cmdcenter.cmd(\"app.state_intrp_time = %f\" % time)\n #self.app.state_intrp_time = 2.0#time\n\n f(self, element, time)\n\n self.cmdcenter.cmd(\"app.state_intrp_time = %f\" % old_time)\n # self.app.state_intrp_time = old_time\n\n return inner\n\n def setT(self, val):\n self.cmdcenter.cmd(\"switch_component('T', '%s')\" % val)\n\n# @handle_event\n# def switch_component(self, component, multiplier=0):\n# ''' Switches a component '''\n\n# self.cmdcenter.cmd(\"inc_data('%s', 1)\" % component)\n\n\n @handle_event\n def rotate360(self, component, time=1):\n ''' Rotates a zn by 360 deg '''\n\n z0 = r_to_p(self.state.zn[component])\n z1 = [z0[0], z0[1]]\n z1[1] += 2.0 * pi\n self.cmdcenter.cmd('radial_2d(\"zn\", %d, %f, %s, %s)' % (component, time, str(z0), str(z1)))\n\n\n @handle_event\n def rotate180(self, component, time=1):\n ''' Rotates a zn by 180 deg '''\n\n z0 = r_to_p(self.state.zn[component])\n z1 = [z0[0], z0[1]]\n z1[1] += 2.0 * pi / 2\n self.cmdcenter.cmd('radial_2d(\"zn\", %d, %f, %s, %s)' % (component, time, str(z0), str(z1)))\n\n\n @handle_event\n def rotate90(self, component, time=1):\n ''' Rotates a zn by 90 deg '''\n\n z0 = r_to_p(self.state.zn[component])\n z1 = [z0[0], z0[1]]\n z1[1] += 2.0 * pi / 4\n self.cmdcenter.cmd('radial_2d(zn, %d, %f, %s, %s)' % (component, time, str(z0), str(z1)))\n\n\n @handle_event\n def rotate45(self, component, time=1):\n ''' Rotates a zn by 45 deg '''\n\n z0 = r_to_p(self.state.zn[component])\n z1 = [z0[0], z0[1]]\n z1[1] += 2.0 * pi / 8\n self.cmdcenter.cmd('radial_2d(zn, %d, %f, %s, %s)' % (component, time, str(z0), str(z1)))\n\n\n @handle_event\n def rotateLoop(self, component, time=1):\n ''' Rotates a zn continuously '''\n\n z0 = r_to_p(self.state.zn[component])\n z1 = [z0[0], z0[1]]\n z1[1] += 2.0 * pi\n self.cmdcenter.cmd(\"Radial2D('zn', %d, %s, {'s' : %s, 'e' : %s, 'loop' : True})\" % (component, 32.0 * time, str(z0), str(z1)))\n\n\n @handle_event\n def transLoop(self, component, time=1):\n ''' Translates a zn continuously '''\n\n z0 = r_to_p(self.state.zn[component])\n z1 = [z0[0], z0[1]]\n z1[0] += 2.0\n self.cmdcenter.cmd(\"Radial2D('zn', %d, %s, {'s' : %s, 'e' : %s, 'loop' : True})\" % (component, 8.0 * time, str(z0), str(z1)))\n\n","sub_path":"cmd/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"21173667","text":"import coreapi\nimport coreschema\nfrom rest_framework.schemas import AutoSchema, ManualSchema\n\n\nclass UserSchema(ManualSchema):\n def get_encoding(self, path, method):\n return \"application/json\"\n\n def __init__(self):\n manual_fields = [\n coreapi.Field(\n name=\"email\",\n required=True,\n location=\"body\",\n schema=coreschema.String(\n title=\"Email\", description=\"Valid email for authentication\"\n ),\n ),\n coreapi.Field(\n name=\"firstName\",\n required=True,\n location=\"body\",\n schema=coreschema.String(title=\"First Name\", description=\"First name\"),\n ),\n coreapi.Field(\n name=\"lastName\",\n required=True,\n location=\"body\",\n schema=coreschema.String(title=\"Last Name\", description=\"Last name\"),\n ),\n ]\n super().__init__(fields=manual_fields, encoding=\"application/json\")\n\n\nclass PositionSchema(ManualSchema):\n def __init__(self):\n fields = [\n coreapi.Field(\n \"longitude\",\n required=True,\n location=\"body\",\n description=\"Longitude string\",\n schema=coreschema.String(),\n example=\"2.7105760574340807\",\n ),\n coreapi.Field(\n \"latitude\",\n required=True,\n location=\"body\",\n description=\"Latitude string\",\n schema=coreschema.String(),\n example=\"15.7105760574340807\",\n ),\n ]\n\n\nclass ParticipantCreateSchema(ManualSchema):\n def __init__(self):\n manual_fields = [\n coreapi.Field(\n \"user\",\n required=True,\n location=\"body\",\n description=\"Participant information\",\n schema=coreschema.Ref(UserSchema()),\n ),\n coreapi.Field(\n \"position\",\n required=True,\n location=\"body\",\n description=\"Geo Position information\",\n schema=coreschema.Ref(PositionSchema()),\n example=\"{'longitude'': '2.7105760574340807',\" \"'latitude': '123.3' }\",\n ),\n coreapi.Field(\n \"type\",\n required=True,\n location=\"formData\",\n description=\"Type of this participant. AF - Affected, \" \"HL - Helper\",\n schema=coreschema.Enum(enum=[\"HL\", \"AF\", \"AU\", \"TP\"], default=\"AF\"),\n ),\n coreapi.Field(\n \"firstLineOfAddress\",\n required=True,\n location=\"formData\",\n description=\"First line of address\",\n schema=coreschema.String(default=\"Goerzalle 135\"),\n ),\n coreapi.Field(\n \"secondLineOfAddress\",\n required=True,\n location=\"formData\",\n description=\"Second line of address\",\n schema=coreschema.String(default=\"\"),\n ),\n coreapi.Field(\n \"postCode\",\n required=True,\n location=\"formData\",\n description=\"Postcode of the location\",\n schema=coreschema.String(default=\"12207\"),\n ),\n coreapi.Field(\n \"city\",\n required=True,\n location=\"formData\",\n description=\"City Name\",\n schema=coreschema.String(default=\"Berlin\"),\n ),\n coreapi.Field(\n \"country\",\n required=True,\n location=\"formData\",\n description=\"Country Code\",\n schema=coreschema.String(default=\"DE\"),\n ),\n coreapi.Field(\n \"placeId\",\n required=True,\n location=\"formData\",\n description=\"Place Id from Maps App\",\n schema=coreschema.String(default=\"ChIJwyyKo7J3X0YRZ5XOMcLx3xo\"),\n ),\n coreapi.Field(\n \"crisis\",\n required=True,\n location=\"formData\",\n description=\"Crisis ID we are dealing with\",\n schema=coreschema.Number(default=1),\n ),\n coreapi.Field(\n \"phone\",\n required=False,\n location=\"formData\",\n description=\"Phone number of the participant\",\n schema=coreschema.String(default=\"+4677777777\"),\n ),\n ]\n super().__init__(fields=manual_fields, encoding=\"application/json\")\n","sub_path":"authentication/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"398787563","text":"#\n# Tarek Sanger\n# 101059686\n#\n#\n#\n\n# Equivalents to square metres\noneGuntha = 101.17\noneBoard = 0.007742\n\n# Guntha - Board convertion\ngunthaInput = float(input('Enter a measurement of Guntha to be converted to its equivalent in Board: ' ))\ngunthaInSquareMetre = oneGuntha * gunthaInput\nnumberOfBoard = gunthaInSquareMetre / oneBoard\nprint(gunthaInput, 'Guntha is equivalent to ' ,numberOfBoard, 'Board')\n\n# Board - Guntha convertion \nboardInput = float(input('Enter a measuremnt of Board to be converted to its equicalent in Guntha: '))\nboardInSquareMetre = oneBoard * boardInput\nnumberOfGuntha = boardInSquareMetre / oneGuntha\nprint(boardInput, 'Board is equivalent to ' ,numberOfGuntha, 'Guntha')\n","sub_path":"A1/101059686-a1q2.py","file_name":"101059686-a1q2.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"212281946","text":"\"\"\"Defines actions that may be associated with flows packets.\"\"\"\n# System imports\nfrom enum import Enum\n\n# Local source tree imports\nfrom pyof.foundation.base import GenericStruct\nfrom pyof.foundation.basic_types import (FixedTypeList, Pad, UBInt8, UBInt16,\n UBInt32)\n\n# Third-party imports\n\n__all__ = ('ActionExperimenterHeader', 'ActionGroup', 'ActionHeader',\n 'ActionMPLSTTL', 'ActionNWTTL', 'ActionOutput', 'ActionPopMPLS',\n 'ActionPush', 'ActionSetField', 'ActionSetQueue', 'ActionType',\n 'ControllerMaxLen')\n\n# Enums\n\n\nclass ActionType(Enum):\n \"\"\"Actions associated with flows and packets.\"\"\"\n\n #: Output to switch port.\n OFPAT_OUTPUT = 0\n #: Set the 802.1q VLAN id.\n OFPAT_SET_VLAN_VID = 1\n #: Set the 802.1q priority.\n OFPAT_SET_VLAN_PCP = 2\n #: Strip the 802.1q header.\n OFPAT_STRIP_VLAN = 3\n #: Ethernet source address.\n OFPAT_SET_DL_SRC = 4\n #: Ethernet destination address.\n OFPAT_SET_DL_DST = 5\n #: IP source address.\n OFPAT_SET_NW_SRC = 6\n #: IP destination address.\n OFPAT_SET_NW_DST = 7\n #: TCP/UDP source port.\n OFPAT_SET_TP_SRC = 8\n #: TCP/UDP destination port.\n OFPAT_SET_TP_DST = 9\n #: Copy TTL \"outwards\" -- from next-to-outermost to outermost\n OFPAT_COPY_TTL_OUT = 11\n #: Copy TTL \"inwards\" -- from outermost to next-to-outermost\n OFPAT_COPY_TTL_IN = 12\n #: MPLS TTL\n OFPAT_SET_MPLS_TTL = 15\n #: Decrement MPLS TTL\n OFPAT_DEC_MPLS_TTL = 16\n #: Push a new VLAN tag\n OFPAT_PUSH_VLAN = 17\n #: Pop the outer VLAN tag\n OFPAT_POP_VLAN = 18\n #: Push a new MPLS tag\n OFPAT_PUSH_MPLS = 19\n #: Pop the outer MPLS tag\n OFPAT_POP_MPLS = 20\n #: Set queue id when outputting to a port\n OFPAT_SET_QUEUE = 21\n #: Apply group.\n OFPAT_GROUP = 22\n #: IP TTL.\n OFPAT_SET_NW_TTL = 23\n #: Decrement IP TTL.\n OFPAT_DEC_NW_TTL = 24\n #: Set a header field using OXM TLV format.\n OFPAT_SET_FIELD = 25\n #: Push a new PBB service tag (I-TAG)\n OFPAT_PUSH_PBB = 26\n #: Pop the outer PBB service tag (I-TAG)\n OFPAT_POP_PBB = 27\n #: Experimenter type\n OFPAT_EXPERIMENTER = 0xffff\n OFPAT_VENDOR = 0xffff\n\n\nclass ControllerMaxLen(Enum):\n \"\"\"A max_len of OFPCML_NO_BUFFER means not to buffer.\n\n The packet should be sent.\n \"\"\"\n\n #: maximum max_len value which can be used to request a specific byte\n #: length.\n OFPCML_MAX = 0xffe5\n #: indicates that no buffering should be applied and the whole packet is to\n #: be sent to the controller.\n OFPCML_NO_BUFFER = 0xffff\n\n\n# Classes\n\n\nclass ActionExperimenterHeader(GenericStruct):\n \"\"\"Action structure for OFPAT_EXPERIMENTER.\"\"\"\n\n #: OFPAT_EXPERIMENTER.\n action_type = UBInt16(ActionType.OFPAT_EXPERIMENTER, enum_ref=ActionType)\n #: Length is multiple of 8.\n length = UBInt16()\n #: Experimenter ID which takes the same form as in struct\n #: ofp_experimenter_header\n experimenter = UBInt32()\n\n def __init__(self, length=None, experimenter=None):\n \"\"\"Action structure for OFPAT_EXPERIMENTER.\n\n Args:\n experimenter (int): The experimenter field is the Experimenter ID,\n which takes the same form as in struct ofp_experimenter.\n \"\"\"\n super().__init__()\n self.length = length\n self.experimenter = experimenter\n\n\nclass ActionGroup(GenericStruct):\n \"\"\"Action structure for OFPAT_GROUP.\"\"\"\n\n #: OFPAT_GROUP.\n action_type = UBInt16(ActionType.OFPAT_GROUP, enum_ref=ActionType)\n #: Length is 8.\n length = UBInt16(8)\n #: Group identifier.\n group_id = UBInt32()\n\n def __init__(self, group_id=None):\n \"\"\"Action structure for OFPAT_GROUP.\n\n Args:\n group_id (int): The group_id indicates the group used to process\n this packet. The set of buckets to apply depends on the group\n type.\n \"\"\"\n super().__init__()\n self.group_id = group_id\n\n\nclass ActionHeader(GenericStruct):\n \"\"\"Action header that is common to all actions.\n\n The length includes the header and any padding used to make the action\n 64-bit aligned.\n NB: The length of an action *must* always be a multiple of eight.\n \"\"\"\n\n #: One of OFPAT_*.\n action_type = UBInt16(enum_ref=ActionType)\n #: Length of action, including this header. This is the length of actions,\n #: including any padding to make it 64-bit aligned.\n length = UBInt16()\n #: Pad for 64-bit alignment.\n pad = Pad(4)\n\n def __init__(self, action_type=None, length=None):\n \"\"\"The following constructor parameters are optional.\n\n Args:\n action_type (ActionType): The type of the action.\n length (int): Length of action, including this header.\n \"\"\"\n super().__init__()\n self.action_type = action_type\n self.length = length\n\n\nclass ActionMPLSTTL(GenericStruct):\n \"\"\"Action structure for OFPAT_SET_MPLS_TTL.\"\"\"\n\n #: OFPAT_SET_MPLS_TTL.\n action_type = UBInt16(ActionType.OFPAT_SET_MPLS_TTL, enum_ref=ActionType)\n #: Length is 8.\n length = UBInt16(8)\n #: MPLS TTL\n mpls_ttl = UBInt8()\n #: Padding\n pad = Pad(3)\n\n def __init__(self, mpls_ttl=None):\n \"\"\"Action structure for OFPAT_SET_MPLS_TTL.\n\n Args:\n mpls_ttl (int): The mpls_ttl field is the MPLS TTL to set.\n \"\"\"\n super().__init__()\n self.mpls_ttl = mpls_ttl\n\n\nclass ActionNWTTL(GenericStruct):\n \"\"\"Action structure for OFPAT_SET_NW_TTL.\"\"\"\n\n #: OFPAT_SET_NW_TTL.\n action_type = UBInt16(ActionType.OFPAT_SET_NW_TTL, enum_ref=ActionType)\n #: Length is 8.\n length = UBInt16(8)\n #: IP TTL\n nw_ttl = UBInt8()\n #: Padding\n pad = Pad(3)\n\n def __init__(self, nw_ttl=None):\n \"\"\"Action structure for OFPAT_SET_NW_TTL.\n\n Args:\n nw_ttl (int): the TTL address to set in the IP header.\n \"\"\"\n super().__init__()\n self.nw_ttl = nw_ttl\n\n\nclass ActionOutput(GenericStruct):\n \"\"\"Defines the actions output.\n\n Action structure for :attr:`ActionType.OFPAT_OUTPUT`, which sends packets\n out :attr:`port`. When the :attr:`port` is the\n :attr:`.Port.OFPP_CONTROLLER`, :attr:`max_length` indicates the max number\n of bytes to send. A :attr:`max_length` of zero means no bytes of the packet\n should be sent.\n \"\"\"\n\n #: OFPAT_OUTPUT.\n action_type = UBInt16(ActionType.OFPAT_OUTPUT, enum_ref=ActionType)\n #: Length is 16.\n length = UBInt16(16)\n #: Output port.\n port = UBInt16()\n #: Max length to send to controller.\n max_length = UBInt16()\n #: Pad to 64 bits.\n pad = Pad(6)\n\n def __init__(self, action_type=None, length=None, port=None,\n max_length=None):\n \"\"\"The following constructor parameters are optional.\n\n Args:\n port (:class:`Port` or :class:`int`): Output port.\n max_length (int): Max length to send to controller.\n \"\"\"\n super().__init__()\n self.action_type = action_type\n self.length = length\n self.port = port\n self.max_length = max_length\n\n\nclass ActionPopMPLS(GenericStruct):\n \"\"\"Action structure for OFPAT_POP_MPLS.\"\"\"\n\n #: OFPAT_POP_MPLS.\n action_type = UBInt16(ActionType.OFPAT_POP_MPLS, enum_ref=ActionType)\n #: Length is 8.\n length = UBInt16(8)\n #: Ethertype\n ethertype = UBInt16()\n\n def __init__(self, ethertype=None):\n \"\"\"Action structure for OFPAT_POP_MPLS.\n\n Args:\n ethertype (int): indicates the Ethertype of the payload.\n \"\"\"\n super().__init__()\n self.ethertype = ethertype\n\n\nclass ActionPush(GenericStruct):\n \"\"\"Action structure for OFPAT_PUSH_VLAN/MPLS/PBB.\"\"\"\n\n #: OFPAT_PUSH_VLAN/MPLS/PBB.\n action_type = UBInt16(enum_ref=ActionType)\n #: Length is 8.\n length = UBInt16(8)\n #: Ethertype\n ethertype = UBInt16()\n #: Padding\n pad = Pad(2)\n\n def __init__(self, ethertype=None):\n \"\"\"Action structure for OFPAT_PUSH_VLAN/MPLS/PBB.\n\n Args:\n ethertype (int): indicates the Ethertype of the new tag.\n \"\"\"\n super().__init__()\n self.ethertype = ethertype\n\n\nclass ActionSetField(GenericStruct):\n \"\"\"Action structure for OFPAT_SET_FIELD.\"\"\"\n\n #: OFPAT_SET_FIELD.\n action_type = UBInt16(ActionType.OFPAT_SET_FIELD, enum_ref=ActionType)\n #: Length is padded to 64 bits.\n length = UBInt16()\n #: Followed by:\n #: - Exactly oxm_len bytes containing a single OXM TLV, then\n #: - Exactly ((oxm_len + 4) + 7)/8*8 - (oxm_len + 4) (between 0 and 7)\n #: bytes of all-zero bytes\n\n #: OXM TLV - Make compiler happy\n field1 = UBInt8()\n field2 = UBInt8()\n field3 = UBInt8()\n field4 = UBInt8()\n\n def __init__(self, length=None, field1=None, field2=None, field3=None,\n field4=None):\n \"\"\"Action structure for OFPAT_SET_FIELD.\n\n Args:\n length (int): length padded to 64 bits.\n field1 (int): OXM field.\n field2 (int): OXM field.\n field3 (int): OXM field.\n field4 (int): OXM field.\n \"\"\"\n super().__init__()\n self.length = length\n self.field1 = field1\n self.field2 = field2\n self.field3 = field3\n self.field4 = field4\n\n\nclass ActionSetQueue(GenericStruct):\n \"\"\"Action structure for OFPAT_SET_QUEUE.\"\"\"\n\n #: OFPAT_SET_QUEUE.\n action_type = UBInt16(ActionType.OFPAT_SET_QUEUE, enum_ref=ActionType)\n #: Length is 8.\n length = UBInt16(8)\n #: Queue id for the packets.\n queue_id = UBInt32()\n\n def __init__(self, queue_id=None):\n \"\"\"Action structure for OFPAT_SET_QUEUE.\n\n Args:\n queue_id (int): The queue_id send packets to given queue on port.\n \"\"\"\n super().__init__()\n self.queue_id = queue_id\n\n\nclass ListOfActions(FixedTypeList):\n \"\"\"List of actions.\n\n Represented by instances of ActionHeader and used on ActionHeader objects.\n \"\"\"\n\n def __init__(self, items=None):\n \"\"\"The constructor just assings parameters to object attributes.\n\n Args:\n items (ActionHeader): Instance or a list of instances.\n \"\"\"\n super().__init__(pyof_class=ActionHeader, items=items)\n","sub_path":"build/lib/pyof/v0x04/common/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":10342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"620090738","text":"import numpy as np\nfrom data_utils import load_csv, convert_code_to_idx, get_text_sequence, get_code_length\nfrom utils import load_word_embedding, load_code_embeddings, create_code_vocab\nfrom keras.utils import to_categorical\n\n\nclass PatentLandscaping:\n def __init__(self, args):\n self.args = args\n self.train = []\n self.val = []\n self.test = []\n \n def load_data(self):\n train_data, val_data, test_data = load_csv(self.args.data_path, self.args.data)\n self.train_y = to_categorical(train_data.valid)\n self.val_y = to_categorical(val_data.valid)\n self.test_y = to_categorical(test_data.valid)\n \n print('Load word embedding')\n self.word2idx, idx2word, self.word_vectors = load_word_embedding(\n self.args.embedding_path,\n self.args.word_embedding,\n self.args.data)\n \n print('Load graph embedding')\n cpc_embed_dict, ipc_embed_dict, uspc_embed_dict = load_code_embeddings(self.args.embedding_path,\n self.args.code_embedding, self.args.data)\n \n self.cpc2idx, idx2cpc, self.cpc_vectors = create_code_vocab(cpc_embed_dict)\n self.ipc2idx, idx2ipc, self.ipc_vectors = create_code_vocab(ipc_embed_dict)\n self.uspc2idx, idx2uspc, self.uspc_vectors = create_code_vocab(uspc_embed_dict)\n\n self.max_cpc_len, self.max_ipc_len, self.max_uspc_len = get_code_length(train_data)\n \n print('Preparing train data')\n train_cpcs, train_ipcs, train_uspcs = convert_code_to_idx(train_data, self.max_cpc_len, self.max_ipc_len, self.max_uspc_len,\n self.cpc2idx, self.ipc2idx, self.uspc2idx)\n train_abs_sequence = get_text_sequence(train_data.abstract_text, self.word2idx, self.args.max_length)\n\n self.train.append(train_cpcs)\n self.train.append(train_ipcs)\n self.train.append(train_uspcs)\n self.train.append(train_abs_sequence)\n \n print('Preparing validation data')\n val_cpcs, val_ipcs, val_uspcs = convert_code_to_idx(val_data, self.max_cpc_len, self.max_ipc_len, self.max_uspc_len,\n self.cpc2idx, self.ipc2idx, self.uspc2idx)\n val_abs_sequence = get_text_sequence(val_data.abstract_text, self.word2idx, self.args.max_length)\n \n self.val.append(val_cpcs)\n self.val.append(val_ipcs)\n self.val.append(val_uspcs)\n self.val.append(val_abs_sequence)\n\n print('preparing test data')\n test_cpcs, test_ipcs, test_uspcs = convert_code_to_idx(test_data, self.max_cpc_len, self.max_ipc_len, self.max_uspc_len,\n self.cpc2idx, self.ipc2idx, self.uspc2idx)\n test_abs_sequence = get_text_sequence(test_data.abstract_text, self.word2idx, self.args.max_length)\n \n self.test.append(test_cpcs)\n self.test.append(test_ipcs)\n self.test.append(test_uspcs)\n self.test.append(test_abs_sequence)\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"607695359","text":"Menschen = {\"Turing\":True,\r\n \"Hoare\":True,\r\n \"Dijkstra\":True,\r\n \"Knuth\":True,\r\n \"Codd\":True}\r\n\r\nFollowing = [[\"Turing\", \"Hoare\"],[\"Turing\", \"Dijkstra\"],[\"Hoare\", \"Turing\"],[\"Hoare\", \"Dijkstra\"],[\"Hoare\", \"Codd\"],[\"Knuth\", \"Turing\"],[\"Knuth\", \"Dijkstra\"],[\"Codd\", \"Turing\"],[\"Codd\", \"Dijkstra\"],[\"Codd\", \"Knuth\"]]\r\nAbfragen = []\r\nsuperstar = False\r\n\r\ndef follow(nameA,nameB):\r\n for i in Following:\r\n if i[0] == nameA and i[1] == nameB:\r\n return True\r\n return False\r\n\r\nfor i in Menschen:\r\n for ii in Menschen:\r\n if Menschen[i] and Menschen[ii] and i != ii:\r\n if follow(i,ii):\r\n Menschen[i] = False\r\n else:\r\n Menschen[ii] = False\r\n Abfragen += [[i,ii]]\r\n print(i,ii)\r\n\r\nfor i in Menschen:\r\n if Menschen[i]:\r\n possiblestar = i\r\n superstar = True\r\n\r\nif superstar:\r\n for i in Menschen:\r\n if [possiblestar, i] not in Abfragen:\r\n if follow(possiblestar, i) and i != possiblestar:\r\n superstar = False\r\n break\r\n print(possiblestar, i)\r\n if [i,possiblestar] not in Abfragen and i != possiblestar:\r\n if not follow(i, possiblestar):\r\n superstar = False\r\n break\r\n print(i, possiblestar)\r\n\r\nif superstar:\r\n print(\"der superstar ist \"+ possiblestar)\r\nelse:\r\n print(\"es gibt keinen superstar\")\r\n\r\n \r\n","sub_path":"Bundeswettbewerb Aufgaben/bwinf 37 Runde 1 Superstar.py","file_name":"bwinf 37 Runde 1 Superstar.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"304382662","text":"import urllib\nfrom django.utils.encoding import smart_unicode\nfrom django.core.urlresolvers import reverse\n\nclass Term(object):\n\n def __init__(self, property, value, is_facet = True):\n self.property = property\n self._value = smart_unicode(value).encode('utf-8')\n self.is_facet = is_facet\n\n @property\n def value(self):\n return self._value\n\n @property\n def term_page_url(self):\n if hasattr(self, '_term_page_url'):\n return self._term_page_url\n if not self.is_facet:\n self._term_page_url = None\n return self._term_page_url\n \n colid = self.property.resource.collection.localid\n property = self.property.name\n value = self.value\n url = reverse('term-page', args = (colid, property, value))\n self._term_page_url = url\n return self._term_page_url\n\n\n @property\n def qs_value(self):\n if self.is_facet:\n val = urllib.quote(self._value)\n return val\n else:\n return None\n \n @property\n def query_string(self):\n if self.is_facet:\n val = smart_unicode(self._value).encode('utf-8')\n val = urllib.quote(val)\n qs = 'facet=%s=%s' % (self.property.name, val)\n return qs\n else:\n return None\n\nclass PropertyBase(object):\n\n def __init__(self, resource, name, values = [], is_facet = True):\n self.resource = resource\n self.name = name\n self._terms = [Term(self, v, is_facet) for v in values]\n \n @property\n def label(self):\n return self.name.replace('_', ' ')\n \n @property\n def terms(self):\n return self._terms\n\n","sub_path":"items/properties/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"313000765","text":"import socket\n\n# Inisiasi objek socket IPv4/UDP\nudp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n# Bind/ikat server di alamat IP dan port tertentu\nudp_sock.bind( (\"0.0.0.0\", 6666) )\nwhile True :\n # Terima data dari client\n data, client_addr = udp_sock.recvfrom(1000)\n # Konversi dari bytes jadi string\n data = data.decode('ascii')\n print(data)\n # Olah string\n data = \"OK \"+data\n # Kirim balik ke client\n udp_sock.sendto( data.encode('ascii'), client_addr)\n\n","sub_path":"kelase/03-udp/simple_udp_server.py","file_name":"simple_udp_server.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"7498903","text":"# 2015-06-19 Runtime: 68 ms\nclass Solution:\n # @param {integer[]} nums\n # @return {integer[][]}\n def subsets(self, nums):\n if not nums: return []\n res = []\n nums.sort()\n # use binary, for [1,2,3], '010' means [2], '101' means [1,3]\n for i in xrange(2 ** len(nums)):\n # for binary, don't forget to add leading zeros to make sure it has len(nums) digits\n binary, subset = bin(i)[2:].zfill(len(nums)), []\n for j in xrange(len(nums)):\n if binary[j] == '1': subset.append(nums[j])\n res.append(subset)\n return res","sub_path":"78_Subsets.py","file_name":"78_Subsets.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"420977828","text":"#package for audio\nimport pyaudio\nimport aubio\nimport numpy as np\nimport random\n\n#package for game\nimport cocos\nfrom cocos.director import director\nfrom cocos.actions import *\n\nfrom cocos.layer import ScrollingManager, ScrollableLayer, ColorLayer\nfrom cocos.tiles import load\nfrom cocos.tiles import MapLayer\n\nfrom pyglet.app import exit\nfrom cocos.menu import *\nfrom cocos.scene import *\nfrom cocos.scenes import FadeTransition\nfrom cocos.layer import *\n## cocos audio\nfrom cocos.audio import pygame\nimport time\n\n#package for plotting\nimport matplotlib.pyplot as plt\n\nimport sys\n\nglobal WIDTH, HEIGHT, num_pitches, x_coors, num_bloomed, num_flowers, flower_under_mouse, num_flowers_list, audiomixer, clicksound, bgmplayed, volumes, pitches, time_data, volume_avg_list, pitch_avg_list, volume_talk_avg, pitch_talk_avg, volume_sing_avg, pitch_sing_avg, volume_std\n\nWIDTH=960\nHEIGHT=568\nnum_pitches=[0]*7\nx_coors=list(range(0,285,15))\nnum_bloomed=0\nnum_flowers=19\nflower_under_mouse=None\n\n## global list for testing volume and pitch\nvolume_avg_list = []\npitch_avg_list = []\nvolume_talk_avg = 0.0003\npitch_talk_avg = 0\nvolume_sing_avg = 0.0003\npitch_sing_avg = 0\nvolume_std=0.0002\n\n## global variable for audio\naudiomixer=pygame.mixer\naudiomixer.init()\nclicksound=audiomixer.Sound('sounds/click.ogg')\nbgmplayed=False\n\n## global list for plotting\nnum_flowers_list=[0]*7\nvolumes=[]\npitches=[]\ntime_data=[]\n\n\ndirector.init(width=WIDTH, height=HEIGHT, autoscale=False, resizable=False)\n\n\n#scroller for testing background\nscroller_test = ScrollingManager()\nmapLayer_test = load(\"assets/map/wood_blocker.tmx\")[\"TileLayer1\"]\nscroller_test.add(mapLayer_test)\n\n#scroller for game background\nscroller = ScrollingManager()\nmapLayer = load(\"assets/map/map_garden_back_01.tmx\")[\"TileLayer1\"]\nscroller.add(mapLayer)\n\n#scroller_menu for menu background\nscroller_menu=ScrollingManager()\nmapLayer_menu = load(\"assets/map/map_menu.tmx\")[\"TileLayer1\"]\nscroller_menu.add(mapLayer_menu)\n\n\n# class for testing voice\nclass Testing(cocos.layer.Layer):\n is_event_handler=True\n\n def __init__(self):\n super(Testing,self).__init__()\n # init voice\n self.CHUNK=1024\n self.RATE=44100\n\n #init voice input\n p=pyaudio.PyAudio()\n\n #Open stream\n self.stream = p.open(format=pyaudio.paFloat32, channels=1, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)\n # Aubio's pitch detection\n self.pDetection = aubio.pitch(\"default\", self.CHUNK*2, self.CHUNK, self.RATE)\n\n #Set unit.\n self.pDetection.set_unit(\"Hz\")\n self.pDetection.set_silence(-40)\n\n self.endmenuLayer=None\n self.time_update=0\n\n self.schedule(self.update)\n\n #Draw Next Button\n self.button=cocos.sprite.Sprite('assets/img/nextbutton.png')\n self.button.position=(850,280)\n self.add(self.button)\n\n #Draw talk test instruction label\n self.talk_1=cocos.text.Label(text='Please talk for a few seconds using your normal',\n position=(80, 300),\n font_name = 'Comic Sans MS',\n color = (57, 34, 3, 255),\n font_size = 23)\n self.talk_2=cocos.text.Label(text='volume and pitch, then click the next button.',\n position=(80, 270),\n font_name = 'Comic Sans MS',\n color = (57, 34, 3, 255),\n font_size = 23)\n self.add(self.talk_1)\n self.add(self.talk_2)\n\n #Initialize test instruction label\n self.sing_1=cocos.text.Label(text='Please sing for a few seconds using your normal',\n position=(80, 300),\n font_name = 'Comic Sans MS',\n color = (57, 34, 3, 255),\n font_size = 23)\n self.sing_2=cocos.text.Label(text='volume and pitch, then click the next button',\n position=(80, 270),\n font_name = 'Comic Sans MS',\n color = (57, 34, 3, 255),\n font_size = 23)\n self.sing_3=cocos.text.Label(text='to enter the game.',\n position=(80, 240),\n font_name = 'Comic Sans MS',\n color = (57, 34, 3, 255),\n font_size = 23)\n\n def update(self,dt):\n global num_pitches, x_coors, num_bloomed, num_flowers, audiomixer, volumes, pitches, time_data\n if (num_bloomed < num_flowers):\n data = self.stream.read(self.CHUNK,exception_on_overflow = False)\n sample = np.fromstring(data, dtype=aubio.float_type)\n pitch=self.pDetection(sample)[0]\n self.time_update+=dt\n volume=np.sum(sample**2)/len(sample)\n if (pitch>0):\n pitch_avg_list.append(pitch)\n time_data.append(self.time_update)\n if (volume>0):\n volume_avg_list.append(volume)\n # print(\"Volume: \")\n # print(volume)\n # print(\"Pitch: \")\n # print(pitch)\n\n def on_mouse_press(self, x, y, buttons, modifiers):\n global volume_avg_list, pitch_avg_list, volume_talk_avg, pitch_talk_avg, volume_sing_avg, pitch_sing_avg, volume_std\n self.position_x, self.position_y = director.get_virtual_coordinates(x, y)\n print(self.position_x)\n print(self.position_y)\n if ((840 < self.position_x <860 ) and (270 < self.position_y < 290)):\n self.remove(self.talk_1)\n self.remove(self.talk_2)\n self.remove(self.button)\n self.button.position=(850,90)\n self.add(self.button)\n self.add(self.sing_1)\n self.add(self.sing_2)\n self.add(self.sing_3)\n volume_talk_avg=np.array(volume_avg_list).mean()\n print(\"Average talk volume is %s\" % (volume_talk_avg))\n pitch_talk_avg=np.array(pitch_avg_list).mean()\n print(\"Average talk pitch is %s\" % (pitch_talk_avg))\n volume_avg_list = []\n pitch_avg_list = []\n\n if ((840 < self.position_x <860 ) and (70 < self.position_y < 90)):\n clicksound.play()\n main_scene = cocos.scene.Scene()\n main_scene.add(scroller)\n main_scene.add(InputVoice())\n volume_sing_avg=np.array(volume_avg_list).mean()\n volume_std=np.array(volume_avg_list).std()\n print(\"Average volume is %s\" % (volume_sing_avg))\n pitch_sing_avg=np.array(pitch_avg_list).mean()\n print(\"Average pitch is %s\" % (pitch_sing_avg))\n # menuLayer_back = MultiplexLayer(MainMenus())\n # main_menu_scene = cocos.scene.Scene(scroller_menu,menuLayer_back)\n director.replace(FadeTransition(main_scene, duration=1))\n\n#class for flower\nclass Flower(cocos.layer.Layer):\n\n def __init__(self, idnum, color):\n global flowers, x_coors\n super(Flower,self).__init__()\n\n #flower property\n self.id=idnum\n self.color=color\n self.water=0\n self.nutrition=0\n self.points=0\n self.stage=0\n x=random.choice(x_coors)\n x_coors.remove(x)\n self.position=x,75\n\n #Draw seed\n self.seed=cocos.sprite.Sprite('ui/seed.png')\n self.seed.scale_y=0.04\n self.seed.scale_x=0.04\n self.seed.position=self.position\n self.seed.image_anchor=0,0\n self.stage2=True\n self.stage3=False\n self.stage4=False\n self.stage5=False\n self.stage6=False\n self.stage7=False\n self.add(self.seed)\n self.schedule(self.update)\n\n def update(self, dt):\n if((self.stage2) and (self.points > 10)):\n print('stage2')\n self.remove(self.seed)\n self.seedling=cocos.sprite.Sprite('ui/Seedling.png')\n self.seedling.scale_y=0.02\n self.seedling.scale_x=0.02\n self.seedling.position=self.position\n self.seedling.image_anchor=0,0\n self.add(self.seedling)\n self.stage2=False\n self.stage3=True\n self.stage=1\n if((self.stage3) and (self.points > 20)):\n print('stage3')\n self.remove(self.seedling)\n self.seedling2=cocos.sprite.Sprite('ui/Seedling2.png')\n self.seedling2.scale_y=0.04\n self.seedling2.scale_x=0.04\n x, y=self.position\n self.seedling2.position=x,y+5\n self.seedling2.image_anchor=0,0\n self.add(self.seedling2)\n self.stage3=False\n self.stage4=True\n self.stage=2\n if((self.stage4) and (self.points > 30)):\n print('stage4')\n self.remove(self.seedling2)\n self.flowerbud=cocos.sprite.Sprite('ui/Flowerbud.png')\n self.flowerbud.scale_y=0.04\n self.flowerbud.scale_x=0.04\n x, y=self.position\n self.flowerbud.position=x,y+5\n self.flowerbud.image_anchor=0,0\n self.add(self.flowerbud)\n self.stage4=False\n self.stage5=True\n self.stage=3\n if((self.stage5) and (self.points > 40)):\n print('stage5')\n self.remove(self.flowerbud)\n self.flowerbud2=cocos.sprite.Sprite('ui/Flowerbud2.png')\n self.flowerbud2.scale_y=0.04\n self.flowerbud2.scale_x=0.04\n x, y=self.position\n self.flowerbud2.position=x,y+5\n self.flowerbud2.image_anchor=0,0\n self.add(self.flowerbud2)\n self.stage5=False\n self.stage6=True\n self.stage=4\n if((self.stage6) and (self.points >= 50)):\n print('stage6')\n self.remove(self.flowerbud2)\n self.flowerstem=cocos.sprite.Sprite('ui/Withoutflower.png')\n self.flowerstem.scale_y=0.04\n self.flowerstem.scale_x=0.04\n x, y=self.position\n self.flowerstem.position=x,y+5\n self.flowerstem.image_anchor=0,0\n self.add(self.flowerstem)\n self.flower=cocos.sprite.Sprite(self.color)\n self.flower.scale_y=0.07\n self.flower.scale_x=0.07\n self.flower.image_anchor=0,0\n self.flower.position=x+15,y+80\n self.add(self.flower)\n self.stage6=False\n self.stage7=True\n self.stage=5\n\n #reset flower\n def reset(self):\n self.water=0\n self.nutrition=0\n self.points=0\n self.stage2=True\n if (self.stage7):\n self.remove(self.flowerstem)\n self.remove(self.flower)\n self.stage7=False\n self.add(self.seed)\n\n#class for nutrition\nclass NutritionBar(cocos.layer.Layer):\n\n def __init__(self):\n super(NutritionBar,self).__init__()\n\n #Draw nutritionbar\n self.nutritionbar=cocos.sprite.Sprite('ui/NutritionBar.png')\n self.nutritionbar.scale_y=0.2\n self.nutritionbar.scale_x=0.2\n self.nutritionbar.position=790-self.nutritionbar.width/2,260\n self.nutritionbar.image_anchor=0,0\n self.add(self.nutritionbar)\n\n #Draw nutritionicon\n self.nutritionicon=cocos.sprite.Sprite('ui/NutritionIcon.png')\n self.nutritionicon.scale_y=0.0625\n self.nutritionicon.scale_x=0.0625\n self.nutritionicon_initial=770-self.nutritionbar.width/2\n self.nutritionicon.position=self.nutritionicon_initial,275\n self.nutritionicon.image_anchor=0,0\n self.add(self.nutritionicon)\n\n # get value of nutritionicon\n def get_value(self):\n position=self.nutritionicon.x-self.nutritionicon_initial\n return(position)\n\n def set_value(self,speed):\n #move=MoveBy((,0))\n #self.watericon.do(move)\n self.nutritionicon.x=self.nutritionicon_initial+min((1+speed)*self.nutritionbar.width/2,self.nutritionbar.width)\n # else:\n # self.reset()\n\n def reset(self):\n self.speed=0\n self.nutritionicon.position=self.nutritionicon_initial,275\n\n#class for flowerbar\nclass FlowerBar(cocos.layer.Layer):\n def __init__(self,flower):\n super(FlowerBar,self).__init__()\n\n self.flower=flower\n #Draw flowerbar\n self.flowerbar=cocos.sprite.Sprite('ui/FlowerBar.png')\n self.flowerbar.scale_y=0.2\n self.flowerbar.scale_x=0.2\n self.flowerbar.image_anchor=0,0\n self.flowerbar.position=790-self.flowerbar.width/2,200\n self.add(self.flowerbar)\n\n self.flowericon=cocos.sprite.Sprite(self.flower.color)\n self.flowericon.scale_y=0.08\n self.flowericon.scale_x=0.08\n self.flowericon_initial=770-self.flowerbar.width/2\n self.flowericon.position=self.flowericon_initial+min(self.flowerbar.width,flower.points/50*self.flowerbar.width),215\n self.flowericon.image_anchor=0,0\n self.add(self.flowericon)\n\n # def set_value(self):\n # self.flowericon.x=self.flowericon_initial+self.flower.stage*(self.flowerbar.width/5)\n #\n # def reset(self):\n # self.flowericon.position=self.flowericon_initial,215\n\n # get value of watericon\n def get_value(self):\n position=self.flowericon.x-self.flowericon_initial\n return(position)\n\n def set_value(self,speed):\n #move=MoveBy((,0))\n #self.watericon.do(move)\n if(speed<=50):\n self.flowericon.x=self.flowericon_initial+speed/50*self.flowerbar.width\n # else:\n # self.reset()\n\n def reset(self):\n self.speed=0\n self.flowericon.position=self.flowericon_initial,215\n\n#class for water\nclass WaterBar(cocos.layer.Layer):\n\n def __init__(self):\n super(WaterBar,self).__init__()\n\n #Draw waterbar\n self.waterbar=cocos.sprite.Sprite('ui/WaterBar.png')\n self.waterbar.scale_y=0.2\n self.waterbar.scale_x=0.2\n self.waterbar.image_anchor=0,0\n self.waterbar.position=790-self.waterbar.width/2,300\n self.add(self.waterbar)\n\n #Draw watericon\n self.watericon=cocos.sprite.Sprite('ui/WaterIcon.png')\n self.watericon.scale_y=0.02\n self.watericon.scale_x=0.02\n self.watericon_initial=770-self.waterbar.width/2\n self.watericon.position=self.watericon_initial,315\n self.watericon.image_anchor=0,0\n self.add(self.watericon)\n\n # get value of watericon\n def get_value(self):\n position=self.watericon.x-self.watericon_initial\n return(position)\n\n def set_value(self,speed):\n #move=MoveBy((,0))\n #self.watericon.do(move)\n self.watericon.x=self.watericon_initial+min((1+speed)*self.waterbar.width/2,self.waterbar.width)\n # else:\n # self.reset()\n\n def reset(self):\n self.speed=0\n self.watericon.position=self.watericon_initial,315\n\n#input voice class\nclass InputVoice(cocos.layer.Layer):\n is_event_handler=True\n\n def __init__(self):\n super(InputVoice,self).__init__()\n # init voice\n self.CHUNK=1024\n self.RATE=44100\n\n self.pitchLabel=cocos.text.Label('Pitch: ',\n font_name='Times New Roman',\n font_size=16,\n anchor_x='center', anchor_y='center')\n\n self.volumeLabel=cocos.text.Label('Volume: ',\n font_name='Times New Roman',\n font_size=16,\n anchor_x='center', anchor_y='center')\n\n self.plantLabel=cocos.text.Label('Number of flowers planted: ',\n font_name='Times New Roman',\n font_size=16,\n anchor_x='center', anchor_y='center')\n\n self.bloomLabel=cocos.text.Label('Number of flowers bloomed: ',\n font_name='Times New Roman',\n font_size=16,\n anchor_x='center', anchor_y='center')\n\n self.colorLabel=cocos.text.Label('Color of the newest flower: ',\n font_name='Times New Roman',\n font_size=16,\n anchor_x='center', anchor_y='center')\n '''\n self.colorLabel2=cocos.text.Label('',\n font_name='Times New Roman',\n font_size=16,\n anchor_x='center', anchor_y='center')\n\n self.stageLabel=cocos.text.Label('',\n font_name='Times New Roman',\n font_size=16,\n anchor_x='center', anchor_y='center')\n\n '''\n self.pitchLabel.position=780,100\n self.volumeLabel.position=780,140\n self.plantLabel.position=780,480\n self.bloomLabel.position=780,440\n self.colorLabel.position=780,400\n #self.colorLabel2.position=780,220\n #self.stageLabel.position=780,180\n\n self.add(self.pitchLabel)\n self.add(self.volumeLabel)\n self.add(self.plantLabel)\n self.add(self.bloomLabel)\n self.add(self.colorLabel)\n #self.add(self.colorLabel2)\n #self.add(self.stageLabel)\n\n #init voice input\n p=pyaudio.PyAudio()\n\n #Open stream\n self.stream = p.open(format=pyaudio.paFloat32, channels=1, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)\n # Aubio's pitch detection\n self.pDetection = aubio.pitch(\"default\", self.CHUNK*2, self.CHUNK, self.RATE)\n\n #Set unit.\n self.pDetection.set_unit(\"Hz\")\n self.pDetection.set_silence(-40)\n\n #add flower\n self.flowers = cocos.cocosnode.CocosNode()\n self.flowerid=1\n self.flower=Flower(self.flowerid,'ui/white.png')\n self.colorLabel.element.text='Color of the newest flower: white'\n self.flowers.add(self.flower)\n num_flowers_list[6]+=1\n self.add(self.flowers)\n\n self.water=WaterBar()\n self.nutrition=NutritionBar()\n self.add(self.water)\n self.add(self.nutrition)\n\n self.flowerbar=FlowerBar(self.flower)\n\n self.endmenuLayer=None\n self.time_update=0\n\n self.schedule(self.update)\n\n # check is mouse is hovering over a flower\n def is_inside(self, flower, x, y):\n position_x,position_y=flower.position\n if (flower.stage4):\n dx1=2\n dx2=18\n dy1=1\n dy2=20\n elif (flower.stage5):\n dx1=2\n dx2=20\n dy1=1\n dy2=30\n elif (flower.stage6):\n dx1=8\n dx2=27\n dy1=0\n dy2=58\n elif (flower.stage7):\n dx1=8\n dx2=27\n dy1=0\n dy2=59\n else:\n dx1=5\n dx2=15\n dy1=0\n dy2=15\n return ((position_x+dx1 < x/2 < position_x+dx2) and (position_y+dy1 < y/2 < position_y+dy2))\n\n # mouse motion handler\n def on_mouse_motion(self, x, y, dx, dy):\n global flower_under_mouse\n x,y=director.get_virtual_coordinates(x,y)\n if (flower_under_mouse != None):\n if (not self.is_inside(flower_under_mouse,x,y)):\n flower_under_mouse=None\n self.remove(self.flowerbar)\n #self.colorLabel2.element.text=''\n #self.stageLabel.element.text=''\n else:\n for flower in self.flowers.get_children():\n if (self.is_inside(flower,x,y)):\n flower_under_mouse=flower\n break\n if (flower_under_mouse != None):\n self.flowerbar=FlowerBar(flower_under_mouse)\n #self.colorLabel2.element.text='Color: '+flower_under_mouse.color[3:-4]\n #self.stageLabel.element.text='Stage: '+flower_under_mouse.stage\n self.add(self.flowerbar)\n\n def add_flower(self, i, color):\n global num_pitches, x_coors, flowers, num_flowers_list\n num_pitches[i]+=1\n if ((num_pitches[i]%50 == 0) and (len(x_coors) > 0)):\n print(self.flowerid)\n self.flowerid+=1\n new_flower=Flower(self.flowerid,'ui/'+color+'.png')\n self.flowers.add(new_flower)\n num_flowers_list[i]+=1\n self.colorLabel.element.text='Color of the newest flower: '+color\n\n def update(self,dt):\n global num_pitches, x_coors, num_bloomed, num_flowers, audiomixer, volumes, pitches, time_data, volume_sing_avg, volume_std\n if (num_bloomed < num_flowers):\n data = self.stream.read(self.CHUNK,exception_on_overflow = False)\n sample = np.fromstring(data, dtype=aubio.float_type)\n pitch=self.pDetection(sample)[0]\n self.time_update+=dt\n volume=np.sum(sample**2)/len(sample)\n pitches.append(pitch)\n time_data.append(self.time_update)\n volumes.append(volume)\n # print(volume,volume_sing_avg,volume_std)\n\n if (0 < pitch < 200):\n self.add_flower(0, 'purple')\n elif (200 <= pitch < 250):\n self.add_flower(1, 'blue')\n elif (250 <= pitch < 300):\n self.add_flower(2, 'cyan')\n elif (300 <= pitch < 400):\n self.add_flower(3, 'orange')\n elif (400 <= pitch < 500):\n self.add_flower(4, 'pink')\n elif (500 <= pitch < 600):\n self.add_flower(5, 'yellow')\n elif (600 <= pitch < 1100):\n self.add_flower(6, 'white')\n\n if(volume > 0.0001):\n n=len(self.flowers.get_children())\n num_bloomed=0\n for flower in self.flowers.get_children():\n flower.points+=max((1-abs(volume-volume_sing_avg)/volume_std)/n,0)\n if (flower.stage7):\n num_bloomed+=1\n\n if (volume > volume_sing_avg):\n self.water.set_value((volume-volume_sing_avg)/volume_std)\n self.nutrition.set_value((volume-volume_sing_avg)/volume_std)\n else:\n self.water.set_value((volume-volume_sing_avg)/volume_sing_avg)\n self.nutrition.set_value((volume-volume_sing_avg)/volume_sing_avg)\n\n volume=\"{:.6f}\".format(volume)\n #print(dt)\n self.pitchLabel.element.text='Pitch: '+pitch.astype('str')\n self.volumeLabel.element.text='Volume: '+volume\n self.plantLabel.element.text='Number of flowers planted: '+str(len(self.flowers.get_children()))\n self.bloomLabel.element.text='Number of flowers bloomed: '+str(num_bloomed)\n if (flower_under_mouse != None):\n self.flowerbar.set_value(flower_under_mouse.points)\n #self.stageLabel.element.text='Stage: '+flower_under_mouse.stage\n if (num_bloomed == num_flowers):\n self.pitchLabel.element.text=''\n self.volumeLabel.element.text=''\n self.colorLabel.element.text=''\n self.congratsLabel=cocos.text.Label('Congratulations!',\n font_name='Times New Roman',\n font_size=36,\n anchor_x='center', anchor_y='center')\n self.congratsLabel.position=780,120\n self.add(self.congratsLabel)\n\n audiomixer.unpause()\n #add end menu\n self.endmenuLayer=MultiplexLayer(GameEnd(self))\n endscene=cocos.scene.Scene(scroller_menu,self.endmenuLayer)\n director.replace(FadeTransition(endscene, duration=2))\n\n def reset(self):\n #remove all flowers\n global num_pitches, num_bloomed, num_flowers, x_coors, pitches, volumes\n for f in self.flowers.get_children():\n if(f.id!=1):\n self.flowers.remove(f)\n\n #reset all children\n self.nutrition.reset()\n self.water.reset()\n self.flower.reset()\n self.flowerbar.reset()\n\n #clear all lists\n num_pitches=[0]*7\n num_flowers_list=[0]*7\n pitches=[]\n volumes=[]\n num_bloomed=0\n x_coors=list(range(0,285,15))\n\n self.flowerid=1\n self.time_update=0\n self.congratsLabel.element.text=''\n\n#class for instruction\nclass Instruction(cocos.layer.Layer):\n is_event_handler = True\n\n def __init__(self):\n super(Instruction,self).__init__()\n\n #Draw Instruction board\n self.test=cocos.sprite.Sprite('assets/img/test.png')\n self.test.position=(480,300)\n self.in1=audiomixer.Sound('sounds/1.ogg')\n self.in1_notplayed=True\n self.in2=audiomixer.Sound('sounds/2.ogg')\n self.in2_notplayed=True\n self.in3=audiomixer.Sound('sounds/3.ogg')\n self.in3_notplayed=True\n self.in4=audiomixer.Sound('sounds/4.ogg')\n self.in4_notplayed=True\n self.in5=audiomixer.Sound('sounds/5.ogg')\n self.in5_notplayed=True\n self.in6=audiomixer.Sound('sounds/6.ogg')\n self.in6_notplayed=True\n\n # self.test.image_anchor=0,0\n self.add(self.test)\n\n #Draw Go Back Button\n self.button=cocos.sprite.Sprite('assets/img/gobackbutton.png')\n self.button.position=(850,80)\n self.add(self.button)\n\n\n def on_mouse_press(self, x, y, buttons, modifiers):\n # This next line seems a bit odd, and that's because it is!\n self.position_x, self.position_y = director.get_virtual_coordinates(x, y)\n print(self.position_x)\n print(self.position_y)\n if ((840 < self.position_x <860 ) and (70 < self.position_y < 90) ):\n clicksound.play()\n menuLayer_back = MultiplexLayer(MainMenus())\n main_menu_scene = cocos.scene.Scene(scroller_menu,menuLayer_back)\n director.replace(FadeTransition(main_menu_scene, duration=1))\n\n def on_mouse_motion(self, x, y, dx, dy):\n x, y=director.get_virtual_coordinates(x,y)\n if ((138 < x < 167 ) and (460 < y < 500) ):\n if(self.in1_notplayed):\n self.in1_notplayed=False\n self.in1.play()\n else:\n self.in1_notplayed=True\n if ((438 < x < 477 ) and (386 < y < 425) ):\n if(self.in2_notplayed):\n self.in2_notplayed=False\n self.in2.play()\n else:\n self.in2_notplayed=True\n if ((680 < x < 705 ) and (285 < y < 335) ):\n if(self.in3_notplayed):\n self.in3_notplayed=False\n self.in3.play()\n else:\n self.in3_notplayed=True\n if ((732 < x < 772 ) and (230 < y < 270) ):\n if(self.in4_notplayed):\n self.in4_notplayed=False\n self.in4.play()\n else:\n self.in4_notplayed=True\n if ((555 < x < 595 ) and (195 < y < 235) ):\n if(self.in5_notplayed):\n self.in5_notplayed=False\n self.in5.play()\n else:\n self.in5_notplayed=True\n if ((552 < x < 595 ) and (120 < y < 165) ):\n if(self.in6_notplayed):\n self.in6_notplayed=False\n self.in6.play()\n else:\n self.in6_notplayed=True\n\n\n\n#class for credits\nclass Credits(cocos.layer.Layer):\n is_event_handler = True\n\n def __init__(self):\n super(Credits,self).__init__()\n\n #Draw Instruction board\n self.test=cocos.sprite.Sprite('assets/img/credits.png')\n self.test.position=(480,300)\n self.add(self.test)\n\n #Draw Go Back Button\n self.button=cocos.sprite.Sprite('assets/img/gobackbutton.png')\n self.button.position=(850,80)\n self.add(self.button)\n\n #Draw Chris\n self.chris = cocos.sprite.Sprite('assets/img/chris.png')\n self.chris.position=(690,400)\n self.chris.scale=0.6\n self.chris.do(Repeat(RotateBy(10,0.2) + RotateBy(-10,0.2)))\n self.add(self.chris)\n self.chris_name = cocos.sprite.Sprite('assets/img/chris_name.png')\n self.chris_name.position=(690,340)\n # self.chris_name.do(Repeat(RotateBy(-10,0.2) + RotateBy(10,0.2)))\n self.add(self.chris_name)\n self.chris_sound=audiomixer.Sound('sounds/chris.ogg')\n self.chris_sound_notplayed=True\n\n #Draw Sherry\n self.sherry = cocos.sprite.Sprite('assets/img/sherry.png')\n self.sherry.position=(238,400)\n self.sherry.scale=0.6\n self.sherry.do(Repeat(RotateBy(10,0.2) + RotateBy(-10,0.2)))\n self.add(self.sherry)\n self.sherry_name = cocos.sprite.Sprite('assets/img/sherry_name.png')\n self.sherry_name.position=(238,340)\n # self.sherry_name.do(Repeat(RotateBy(-10,0.2) + RotateBy(10,0.2)))\n self.add(self.sherry_name)\n self.sherry_sound=audiomixer.Sound('sounds/sherry.ogg')\n self.sherry_sound_notplayed=True\n\n #Draw Hening\n self.hening = cocos.sprite.Sprite('assets/img/hening.png')\n self.hening.position=(467,400)\n self.hening.scale=0.6\n self.hening.do(Repeat(RotateBy(10,0.2) + RotateBy(-10,0.2)))\n self.add(self.hening)\n self.hening_name = cocos.sprite.Sprite('assets/img/hening_name.png')\n self.hening_name.position=(467,340)\n # self.hening_name.do(Repeat(RotateBy(-10,0.2) + RotateBy(10,0.2)))\n self.add(self.hening_name)\n self.hening_sound=audiomixer.Sound('sounds/hening.ogg')\n self.hening_sound_notplayed=True\n\n #Draw Jordan\n self.jordan = cocos.sprite.Sprite('assets/img/jordan.png')\n self.jordan.position=(398,273)\n self.jordan.scale=0.57\n self.jordan.do(Repeat(RotateBy(10,0.2) + RotateBy(-10,0.2)))\n self.add(self.jordan)\n self.jordan_name = cocos.sprite.Sprite('assets/img/jordan_name.png')\n self.jordan_name.position=(540,262)\n # self.jordan_name.do(Repeat(RotateBy(-10,0.2) + RotateBy(10,0.2)))\n self.add(self.jordan_name)\n\n #Draw logo\n self.logo = cocos.sprite.Sprite('assets/img/logo.png')\n self.logo.position=(400,145)\n self.logo.scale=0.55\n self.logo.do(Repeat(RotateBy(10,0.2) + RotateBy(-10,0.2)))\n self.add(self.logo)\n self.logo_name = cocos.sprite.Sprite('assets/img/logo_name.png')\n self.logo_name.position=(540,145)\n # self.logo_name.do(Repeat(RotateBy(-10,0.2) + RotateBy(10,0.2)))\n self.add(self.logo_name)\n self.hcv_sound=audiomixer.Sound('sounds/hcv lab.ogg')\n self.hcv_sound_notplayed=True\n\n\n def on_mouse_press(self, x, y, buttons, modifiers):\n # This next line seems a bit odd, and that's because it is!\n self.position_x, self.position_y = director.get_virtual_coordinates(x, y)\n print(self.position_x)\n print(self.position_y)\n if ((840 < self.position_x <860 ) and (70 < self.position_y < 90) ):\n clicksound.play()\n menuLayer_back = MultiplexLayer(MainMenus())\n main_menu_scene = cocos.scene.Scene(scroller_menu,menuLayer_back)\n director.replace(FadeTransition(main_menu_scene, duration=1))\n\n def on_mouse_motion(self, x, y, dx, dy):\n x, y=director.get_virtual_coordinates(x,y)\n if ((655 < x < 715 ) and (360 < y < 440) ):\n if(self.chris_sound_notplayed):\n self.chris_sound_notplayed=False\n self.chris_sound.play()\n else:\n self.chris_sound_notplayed=True\n if ((207 < x < 268 ) and (365 < y < 440) ):\n if(self.sherry_sound_notplayed):\n self.sherry_sound_notplayed=False\n self.sherry_sound.play()\n else:\n self.sherry_sound_notplayed=True\n if ((440 < x < 495 ) and (355 < y < 440) ):\n if(self.hening_sound_notplayed):\n self.hening_sound_notplayed=False\n self.hening_sound.play()\n else:\n self.hening_sound_notplayed=True\n if ((345 < x < 450 ) and (95 < y < 185) ):\n if(self.hcv_sound_notplayed):\n self.hcv_sound_notplayed=False\n self.hcv_sound.play()\n else:\n self.hcv_sound_notplayed=True\n print(x,y)\n\n\ntest_scene = cocos.scene.Scene()\ntest_scene.add(scroller_test)\ntest_scene.add(Testing())\n\nmain_scene = cocos.scene.Scene()\nmain_scene.add(scroller)\nmain_scene.add(InputVoice())\nscroller_instruction = ScrollingManager()\nscroller_instruction.add(mapLayer_menu)\ninstruction_scene = cocos.scene.Scene()\ninstruction_scene.add(scroller_instruction)\ninstruction_scene.add(Instruction())\n\ncredits_scene = cocos.scene.Scene()\ncredits_scene.add(scroller_instruction)\ncredits_scene.add(Credits())\n\n#class for the main menu\nclass MainMenus(Menu):\n def __init__(self):\n global audiomixer, clicksound, bgmplayed\n super(MainMenus, self).__init__(\" \")\n if (bgmplayed == False):\n self.bgm=audiomixer.Sound('sounds/game music.ogg')\n self.bgm.play(-1)\n bgmplayed=True\n\n pyglet.font.add_directory('.')\n self.font_title['font_size'] = 50\n # self.menu_valign = CENTER+100\n# self.menu_halign = CENTER+300\n\n## self.background=cocos.sprite.Sprite('assets/img/map_garden_back.png')\n## self.background.position = (10,10)\n## self.add(self.background.position,z=0)\n # self.menu_hmargin = 200\n # self.menu_vmargin = 10\n self.font_title = {\n 'text': 'title',\n 'font_name': 'Arial',\n 'font_size': 56,\n 'color': (192, 192, 192, 255),\n 'bold': False,\n 'italic': False,\n 'anchor_y': 'center',\n 'anchor_x': 'center',\n 'dpi': 96,\n 'x': 100, 'y': 200,\n }\n self.font_item = {\n 'font_name': 'Comic Sans MS',\n 'font_size': 28,\n 'bold': True,\n 'italic': False,\n 'anchor_y': \"center\",\n 'anchor_x': \"center\",\n 'color': (57, 34, 3, 255),\n 'dpi': 96,\n }\n self.font_item_selected = {\n 'font_name': 'Comic Sans MS',\n 'font_size': 35,\n 'bold': True,\n 'italic': False,\n 'anchor_y': \"center\",\n 'anchor_x': \"center\",\n 'color': (57, 34, 3, 255),\n 'dpi': 96,\n }\n\n items = []\n items.append(MenuItem(\" Start Game\", self.on_new_game))\n items.append(MenuItem(\" Instruction\", self.on_instruction))\n items.append(MenuItem(\" Credits\", self.on_credits))\n items.append(MenuItem(\" Exit\", self.on_exit))\n items.append(MenuItem(\" \", self.on_ignore))\n items.append(MenuItem(\" \", self.on_ignore))\n\n self.create_menu(items, shake(), shake_back())\n\n def on_new_game(self):\n # director.set_scene(main_scene)\n clicksound.play()\n time.sleep(0.5)\n audiomixer.pause()\n director.replace(FadeTransition(test_scene, duration=2))\n\n def on_instruction(self):\n print(\"To instruction\")\n clicksound.play()\n director.replace(FadeTransition(instruction_scene, duration=1))\n # self.parent.switch_to(2)\n\n def on_credits(self):\n clicksound.play()\n print(\"To cedits\")\n director.replace(FadeTransition(credits_scene, duration=1))\n # self.parent.switch_to(1)\n\n def on_ignore(self):\n pass\n\n def on_quit(self):\n clicksound.play()\n director.pop()\n\n#class for the gameend menu\nclass GameEnd(Menu):\n def __init__(self, game):\n global audiomixer, clicksound\n # call superclass with the title\n super(GameEnd, self).__init__(\" \")\n self.game=game\n pyglet.font.add_directory('.')\n self.font_title['font_size'] = 50\n\n self.font_item = {\n 'font_name': 'Comic Sans MS',\n 'font_size': 28,\n 'bold': True,\n 'italic': False,\n 'anchor_y': \"center\",\n 'anchor_x': \"center\",\n 'color': (57, 34, 3, 255),\n 'dpi': 96,\n }\n self.font_item_selected = {\n 'font_name': 'Comic Sans MS',\n 'font_size': 35,\n 'bold': True,\n 'italic': False,\n 'anchor_y': \"center\",\n 'anchor_x': \"center\",\n 'color': (57, 34, 3, 255),\n 'dpi': 96,\n }\n\n items = []\n items.append(MenuItem(' Save Data ', self.on_save_data))\n items.append(MenuItem(' Replay ', self.on_restart))\n items.append(MenuItem(' Quit ', self.on_quit_game))\n\n self.create_menu(items, shake(), shake_back())\n\n def on_save_data(self):\n global pitches, volumes, time_data\n clicksound.play()\n #use bar plot to plot flower color\n plt.figure()\n num_list=num_flowers_list\n flower_list=['purple','blue','cyan','orange','pink','yellow','white']\n barlist=plt.bar(range(len(num_list)),num_list,tick_label=flower_list)\n barlist[0].set_color('#efd2f8')\n barlist[1].set_color('#cdf6ff')\n barlist[2].set_color('#cbf3d4')\n barlist[3].set_color('#ffb788')\n barlist[4].set_color('#ffbcbc')\n barlist[5].set_color('#ffecbe')\n barlist[6].set_color('#fffbfb')\n plt.savefig('output data/flower.png')\n\n #plot pitch\n plt.figure()\n plt.plot(time_data,pitches)\n plt.ylabel('pitches')\n plt.xlabel('time')\n plt.savefig('output data/pitch.png')\n\n #plot volume\n plt.figure()\n plt.plot(time_data,volumes)\n plt.ylabel('volumes')\n plt.xlabel('time')\n plt.savefig('output data/volume.png')\n\n\n def on_restart(self):\n clicksound.play()\n #go back to main scene\n time.sleep(0.5)\n director.replace(FadeTransition(main_scene, duration=1))\n audiomixer.pause()\n self.game.reset()\n\n def on_quit_game(self):\n clicksound.play()\n time.sleep(0.5)\n director.pop()\n\ndef main():\n #scene for the main game\n menuLayer = MultiplexLayer(MainMenus())\n scene = cocos.scene.Scene(scroller_menu,menuLayer)\n director.run(scene)\n\nif __name__==\"__main__\":\n main()\n","sub_path":"garden/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":39064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"446036097","text":" \nimport boto3\nimport configparser\nfrom lib.iam_role import delete_iam_role\nfrom lib.redshift_cluster import delete_redshift_cluster, wait_for_cluster_deletion\nfrom lib.vpc_security_group import delete_security_group\n\n## Reading Configuration Params\nconfig_path = 'func.cfg'\nconfig = configparser.ConfigParser()\nconfig.read_file(open(config_path))\n\nKEY = config.get('AWS','KEY')\nSECRET = config.get('AWS','SECRET')\n\n## Creating Redshift, S3 and IAM, EC2 clients\n\niam_client = boto3.client(\"iam\",\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\nredshift_client = boto3.client(\"redshift\",\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\ns3 = boto3.resource(\"s3\",\n region_name=\"us-west-2\",\n aws_access_key_id=config.get('AWS','KEY'),\n aws_secret_access_key=config.get('AWS','SECRET')\n )\n\nec2 = boto3.client(\"ec2\",\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n\n\ndef delete_aws_resources():\n \"\"\"Deletes all AWS Resources\n \"\"\"\n print(\"-\" * 15, \"Deleting AWS resources\")\n delete_redshift_cluster(config_path,redshift_client)\n wait_for_cluster_deletion(config_path, redshift_client)\n delete_iam_role(config_path, iam_client)\n delete_security_group(config_path, ec2)\n print(\"-\" * 15, \"All AWS resources have been deleted\")\n print(\"\")\nif __name__ == \"__main__\":\n delete_aws_resources()\n","sub_path":"lib/delete_aws_resources.py","file_name":"delete_aws_resources.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"375204132","text":"from models import HttpRequest, HttpResponse\nfrom wsgiref.simple_server import WSGIRequestHandler, WSGIServer, make_server\n\n\nclass Route:\n\n def __init__(self, path, endpoint, *args, **kwargs):\n self.path = path\n self.endpoint = endpoint\n\n def execute(self, req):\n if callable(self.endpoint):\n return self.endpoint(req)\n\n\nclass API:\n\n def __init__(self, name, version, *args, **kwargs):\n self.name = name\n self.version = version\n self.routes = {}\n self.static_route = \"/static\"\n self.media_route = \"/media\"\n self.template_route = \"/templates\"\n self.apps = {}\n\n def add_route(self, path, endpoint):\n \"\"\"Adds new routes\"\"\"\n self.routes[path] = Route(path, endpoint)\n\n def get_static_url(self, asset):\n return \"%s/%s\" % (self.static_route, str(asset))\n\n def mount_wsgi_app(self, path, app):\n \"\"\"Add new app to route\"\"\"\n self.apps[path] = app\n\n def redirect(self, req, path):\n response = HttpResponse()\n response.status = \"301 MOVED_PERMANENTLY\"\n response.update_headers(\"Location\", path)\n return response\n\n def validate_route_and_call_endpoint(self, path):\n \"\"\"Validates the routes and executes them\"\"\"\n if path in self.routes:\n route = self.routes[path]\n if callable(route.endpoint):\n return route.execute(self.request)\n else:\n raise ValueError('A callable was expected')\n elif path in self.apps:\n app = self.apps[path]\n app()\n else:\n res_404 = HttpResponse(\"404 Not Found\")\n res_404.status = \"404 NOT_FOUND\"\n return res_404\n\n def shift_path(self, level, path):\n \"\"\"Shifts the path by level amount deeper\"\"\"\n path_list = path.split(\"/\")\n path_list = path_list[level+1:]\n temp_list = []\n for p in path_list:\n if p != \" \":\n temp_list.append(p)\n\n new_path = \"/\"\n for p in temp_list:\n new_path = new_path + (\"%s/\" % p)\n return new_path[1:]\n\n def get_base_mount_path(self, path):\n \"\"\"Returns the base mount path of the app\"\"\"\n path_list = path.split(\"/\")\n path_list = path_list[1:2]\n temp_list = []\n for p in path_list:\n if p != \" \":\n temp_list.append(p)\n\n new_path = \"/\"\n for p in temp_list:\n new_path = new_path + (\"%s/\" % p)\n return new_path[1:]\n\n def wsgi(self, environ, start_reponse):\n \"\"\"The main WSGI application callable\"\"\"\n response = self.validate_route_and_call_endpoint(self.request.path)\n headers = []\n for k, v in response.headers.items():\n headers.append((k, v))\n start_reponse(response.status, headers)\n return iter([response.content])\n\n def __call__(self, environ, start_response):\n \"\"\"For running the server by calling the application callable\"\"\"\n self.request = HttpRequest(environ)\n # print(\"__call__ was called for %s\" % self.request.path)\n if self.get_base_mount_path(self.request.path) in self.routes:\n return self.wsgi(environ, start_response)\n elif self.get_base_mount_path(self.request.path) in self.apps:\n app = self.apps[self.get_base_mount_path(self.request.path)]\n environ_cp = environ\n environ_cp['PATH_INFO'] = self.shift_path(1, self.request.path)\n return app(environ_cp, start_response)\n\n def wsgi_app_caller(self, environ, start_response):\n self.request = HttpRequest(environ)\n return self.wsgi(environ, start_response)\n\n def run(self, host, port):\n \"\"\"Run the app on a local development server\"\"\"\n server = make_server(host, port, self.wsgi_app_caller,\n WSGIServer, WSGIRequestHandler)\n try:\n print(\"Listening on port %s\" % port)\n server.serve_forever()\n except KeyboardInterrupt:\n server.server_close()\n\n\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"110072975","text":"from copy import deepcopy\n\n\n# 判断是否结束:行、列、对角相等;A 获胜返回 1,B 获胜返回 2;未结束返回 0\ndef end(now):\n # 行相等\n for i in range(3):\n if now[i][0] == now[i][1] == now[i][2] == 1:\n return 1\n elif now[i][0] == now[i][1] == now[i][2] == 2:\n return 2\n # 列相等\n for j in range(3):\n if now[0][j] == now[1][j] == now[2][j] == 1:\n return 1\n elif now[0][j] == now[1][j] == now[2][j] == 2:\n return 2\n # 对角相等\n if now[0][0] == now[1][1] == now[2][2] == 1 or now[0][2] == now[1][1] == now[2][0] == 1:\n return 1\n if now[0][2] == now[1][1] == now[2][0] == 2 or now[0][0] == now[1][1] == now[2][2] == 2:\n return 2\n # 未获胜,返回 0\n return 0\n\n\ndef get_next(cur_node, player, point):\n i = int(point / 3)\n j = point % 3\n node = deepcopy(cur_node)\n node[i][j] = player\n return node\n\n\ndef res(end, node):\n if end == 1: # win\n res = 1\n for i in range(3):\n for j in range(3):\n if not node[i][j]:\n res += 1\n elif end == 2: # lose\n res = -1\n for i in range(3):\n for j in range(3):\n if not node[i][j]:\n res -= 1\n return res\n\n\ndef maxmin(player, cur_node, alph, bet):\n # 将判断是否结束的返回值赋值给变量 en\n en = end(cur_node)\n # 如果结束,返回分数,正负\n if en:\n return res(en, cur_node)\n\n # 存棋盘元素为 0 的元素位置\n node_list = [] # 0-8\n # 遍历当前棋盘\n for i in range(3):\n for j in range(3):\n # 如果是 0\n if not cur_node[i][j]:\n # 将位置添加到 node_list 数组中,位置 = 行 * 3 + 列\n node_list.append(i * 3 + j)\n # 为 0 的元素个数\n blank = len(node_list)\n\n # 如果为 0 个数为 0,返回 0\n if not blank: # ping\n return 0\n print(cur_node[0],'\\n',cur_node[1],'\\n',cur_node[2])\n print()\n\n # 变量 alph,bet\n alpha = alph\n beta = bet\n if player == 1:\n # best = 0-blank\n alpha = -10\n for i in node_list:\n new_node = get_next(cur_node, player, i)\n\n val = maxmin(2, new_node, alpha, beta)\n if val >= beta:\n return val\n if val >= alpha:\n alpha = val\n print('alpha',alpha)\n return alpha\n else: # 2\n # best = blank\n beta = 10\n for i in node_list:\n new_node = get_next(cur_node, player, i)\n val = maxmin(1, new_node, alpha, beta) # 1\n if val <= alpha:\n return val\n if val <= beta:\n beta = val\n print('beta',beta)\n return beta\n\n\nn = int(input())\ninp = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\nfor i in range(n):\n for j in range(3):\n a = input().split()\n inp[j][0] = int(a[0])\n inp[j][1] = int(a[1])\n inp[j][2] = int(a[2])\n\n result = maxmin(1, inp, -10, 10)\n print(result)\n\n","sub_path":"201803/04_OXchess2.py","file_name":"04_OXchess2.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"402507427","text":"from __future__ import print_function\nimport argparse\nimport os\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optimizer\nfrom torch.autograd import Variable\n\nimport data\nimport models\nimport utils\nimport test\nimport time\nimport loss\n\nimport matplotlib as mlp\nmlp.use('Agg')\nimport matplotlib.pyplot as plt\nfrom sklearn import manifold\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef adjust_learning_rate(optimizer, epoch, step_in_epoch, total_steps_in_epoch):\n lr = args.lr\n epoch = epoch + step_in_epoch / total_steps_in_epoch\n\n lr *= utils.cosine_rampdown(epoch, args.lr_rampdown_epochs)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n# =============================== #\n# ============ START ============ #\n# =============================== #\n\ndevice = torch.device(\"cuda:0\")\n#torch.backends.cudnn.benchmark = True\n\n\nDROPOUT_RATE = 0.05\nNUM_EPOCH = 200\nBATCH_PRINT = 300\nBATCH_SIZE = 64\nTHRESHOLD = 0.4 # for mnist, usps\n#THRESHOLD = 0.5\nPATH = './figs/'+time.strftime(\"%Y%m%d-%H%M%S\")+'/'\n\n\nparser = argparse.ArgumentParser(description='svhn2mnist or usps2mnist')\nparser.add_argument('--task',\n choices=['svhn2mnist', 'usps2mnist', 'mnist2usps',\n 'amazon2dslr', 'dslr2amazon', 'webcam2dslr', 'dslr2amazon',\n 'amazon2webcam', 'webcam2amazon'],\n default='svhn2mnist',\n help='type of task')\nargs = parser.parse_args()\n\nmodel = models.CNN_Model(DROPOUT_RATE).to(device)\n\nif (args.task == 'svhn2mnist'):\n SOURCE_DATASET = data.training_dataset_SVHN(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_MNIST(BATCH_SIZE)\n NUM_CLASSES = 6\n\nelif (args.task == 'usps2mnist'):\n SOURCE_DATASET = data.training_dataset_USPS(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_MNIST(BATCH_SIZE)\n NUM_CLASSES = 6\n\nelif (args.task == 'mnist2usps'):\n SOURCE_DATASET = data.training_dataset_MNIST(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_USPS(BATCH_SIZE)\n NUM_CLASSES = 6\n\nelif (args.task == 'amazon2dslr'):\n SOURCE_DATASET = data.training_dataset_AMAZON(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_DSLR(BATCH_SIZE)\n NUM_CLASSES = 16\n\nelif (args.task == 'dslr2amazon'):\n SOURCE_DATASET = data.training_dataset_DSLR(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_AMAZON(BATCH_SIZE)\n NUM_CLASSES = 16\n\nelif (args.task == 'webcam2dslr'):\n SOURCE_DATASET = data.training_dataset_WEBCAM(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_DSLR(BATCH_SIZE)\n NUM_CLASSES = 16\n\nelif (args.task == 'dslr2webcam'):\n SOURCE_DATASET = data.training_dataset_DSLR(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_WEBCAM(BATCH_SIZE)\n NUM_CLASSES = 16\n\nelif (args.task == 'amazon2webcam'):\n SOURCE_DATASET = data.training_dataset_AMAZON(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_WEBCAM(BATCH_SIZE)\n NUM_CLASSES = 16\n\nelif (args.task == 'webcam2amazon'):\n SOURCE_DATASET = data.training_dataset_WEBCAM(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_AMAZON(BATCH_SIZE)\n NUM_CLASSES = 16\n\n\nelse:\n SOURCE_DATASET = data.training_dataset_SVHN(BATCH_SIZE)\n TARGET_DATASET = data.test_dataset_CIFAR(BATCH_SIZE)\n\nCIFAR_DATASET = data.test_dataset_CIFAR(BATCH_SIZE)\n\n\ncriterion_bce = nn.BCELoss()\ncriterion_cel = nn.CrossEntropyLoss()\ncriterion_epi = loss.UncertaintyLoss(T_samples=10).cuda()\n#optimizer = torch.optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)\noptimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) #MNIST USPS\n\n\n\nbest_prec1 = 0\nbest_pred_y = []\nbest_gt_y = []\nglobal_step = 0\ntotal_steps = 150 * len(SOURCE_DATASET)\n\nos.makedirs(os.path.join(PATH))\n\n'''\ncheckpoint = torch.load('./model_best.pth.tar')\nmodel.load_state_dict(checkpoint['model_state_dict'])\noptimizer.load_state_dict(checkpoint['optimizer_state_dict'])\nloss = checkpoint['loss']\n'''\n\n#try:\nfor epoch in range(1, NUM_EPOCH + 1):\n model.train()\n for batch_idx, (batch_s, batch_t) in enumerate(zip(SOURCE_DATASET, TARGET_DATASET)):\n #adjust_learning_rate(optimizer, epoch, batch_idx, len(SOURCE_DATASET))\n p = global_step / total_steps\n constant = 2. / (1. + np.exp(-10 * p)) - 1\n\n data_s, target_s = batch_s\n data_t, target_t = batch_t\n\n data_s, target_s = data_s.cuda(), target_s.cuda(non_blocking=True)\n data_t, target_t = data_t.cuda(), target_t.cuda(non_blocking=True)\n\n batch_size_s = len(target_s)\n batch_size_t = len(target_t)\n\n optimizer.zero_grad()\n predict_s = model(data_s)\n predict_t = model(data_t, constant = constant, adaption = True)\n\n #print(\"data\", target_s)\n #print(\"predict\", predict_s)\n loss_cel = criterion_cel(predict_s, target_s)\n loss_epi = criterion_epi(data_s, data_t, model)\n\n loss_c = loss_cel + 0.05 * loss_epi\n #loss_c = 4 * loss_cel + 0.01 * loss_epi\n #loss_c = loss_cel\n\n output_t_prob_unk = F.softmax(predict_t, dim=1)[:,-1] \n loss_adv = criterion_bce(output_t_prob_unk, torch.tensor([THRESHOLD]*batch_size_t).cuda())\n loss = loss_c + loss_adv\n\n loss.backward()\n optimizer.step()\n\n global_step += 1\n\n if (batch_idx % BATCH_PRINT == 0):\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss_c: {:.6f}\\tLoss_adv: {:.6f}\\tConstant: {:.4f}'.format(epoch, batch_idx * BATCH_SIZE, len(SOURCE_DATASET.dataset),\n 100. * batch_idx / len(SOURCE_DATASET), loss_c.item(), loss_adv.item(), constant))\n\n if epoch == 1 or epoch % 20 == 0:\n predict, pred_y, true_y = test.test_all(model, SOURCE_DATASET, TARGET_DATASET, BATCH_SIZE, PATH, epoch)\n\n is_best = predict > best_prec1\n best_prec1 = max(predict, best_prec1)\n utils.save_checkpoint({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best)\n if is_best:\n best_gt_y = true_y\n best_pred_y = pred_y\n\n\npredict, pred_y, true_y = test.test_all(model, SOURCE_DATASET, CIFAR_DATASET, BATCH_SIZE, PATH, epoch)\n\n\n\n\nprint (\"------Best Result-------\")\nutils.cal_acc(best_gt_y, best_pred_y, NUM_CLASSES)\ntemperature_scaled = utils.ModelWithTemperature(model)\ntemperature_scaled.set_temperature(TARGET_DATASET)\n#except KeyboardInterrupt:\n# print (\"------Best Result-------\")\n# utils.cal_acc(best_gt_y, best_pred_y, NUM_CLASSES)\n","sub_path":"domain_adaptation/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"433241207","text":"def isMonotonic(array):\n isNonDecreasing = True\n isNonIncreasing = True\n for i in range(1, len(array)):\n if array[i] < array[i-1]:\n isNonDecreasing = False\n if array[i] > array[i-1]:\n isNonIncreasing = False\n return isNonIncreasing or isNonDecreasing\n\n\n#Driver Code:\narray = [-1, -5, -10, -1100, -1100, -1101, -1102, -9001]\nprint(isMonotonic(array))","sub_path":"AEQuestions/0020 - Monotonic_array/iterative.py","file_name":"iterative.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"551889473","text":"from flask import request, jsonify\nfrom flask.views import MethodView\nfrom ..database import session\nfrom ..models import User\nfrom ..authenticate import create_token, get_facebook_userinfo\n\n\nclass FacebookLoginView(MethodView):\n def post(self):\n facebook_userinfo = get_facebook_userinfo(request.json['accessToken'])\n facebook_user_id = facebook_userinfo['id']\n user_name = facebook_userinfo['name']\n\n user = session.query(User).filter_by(facebook_user_id=facebook_user_id).one_or_none()\n if user is None:\n user = User(facebook_user_id)\n session.add(user)\n session.commit()\n\n token = create_token(user.user_id)\n response = jsonify({'token': token, 'username': user_name})\n response.status_code = 201\n return response\n","sub_path":"Code/accountbook/apis/facebook_login_view.py","file_name":"facebook_login_view.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"132335881","text":"# This is a sample Python script.\r\n\r\n# Press Shift+F10 to execute it or replace it with your code.\r\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\r\n\r\n\r\ndef print_hi(name):\r\n # Use a breakpoint in the code line below to debug your script.\r\n print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.\r\n\r\n\r\n# Python program to print the maximum number\r\n# from the set of digits of a given number\r\n\r\n\r\n# Function to print maximum number\r\n\r\ndef printMaximum(inum):\r\n lst_num = [int(x) for x in str(num)]\r\n lst_int_sorted = sorted(lst_num,reverse=True)\r\n lst_str_sorted = map(str,lst_int_sorted)\r\n str_num = [str(i) for i in lst_str_sorted]\r\n res = \"\".join(str_num)\r\n final_number = int(res)\r\n return final_number\r\n\r\n\r\nif __name__ == '__main__':\r\n print_hi('PyCharm')\r\n num = 38293367\r\n\r\n print(printMaximum(num))\r\n\r\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\r\n","sub_path":"pythonProject/HRank/self_create_max_number.py","file_name":"self_create_max_number.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"182570706","text":"import os\nimport csv\nimport json\norigin = 'c:\\\\Users\\\\Allen\\\\Documents\\\\11th-12th_Grade'\npath = os.path.join(origin, 'Proteomes')\nbacteriadict = {}\nf = open(os.path.join(path, 'bacteria.csv'))\nreader = csv.reader(f)\nfor row in reader:\n if row:\n bacteriadict[row[0]] = row[2]\nf.close()\nwith open(os.path.join(path, 'bacteria.json'), 'w') as f_new:\n json.dump(bacteriadict, f_new)\n","sub_path":"bactdictmaker.py","file_name":"bactdictmaker.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"247811180","text":"\"\"\"\n=============================\nPlotting the Optic Radiations\n=============================\n\npyAFQ is designed to be customizable. This example shows how\nyou can customize it to define a new bundle based\non both waypoint ROIs of your design, as well as endpoint\nROIs of your design.\n\nIn these example, we run pyAFQ with both the custom ROIs and\nthe default waypoint ROIs.\n\"\"\"\n\nimport os.path as op\nfrom AFQ.api.group import GroupAFQ\nimport AFQ.api.bundle_dict as abd\nimport AFQ.data.fetch as afd\nfrom AFQ.definitions.image import LabelledImageFile, RoiImage\nimport AFQ.utils.streamlines as aus\n\nafd.organize_stanford_data(clear_previous_afq=True)\n\nor_rois = afd.read_or_templates()\n\nbundles = abd.BundleDict({\n \"L_OR\": {\n \"include\": [\n or_rois[\"left_OR_1\"],\n or_rois[\"left_OR_2\"]],\n \"exclude\": [\n or_rois[\"left_OP_MNI\"],\n or_rois[\"left_TP_MNI\"],\n or_rois[\"left_pos_thal_MNI\"]],\n \"start\": or_rois['left_thal_MNI'],\n \"end\": or_rois['left_V1_MNI'],\n \"cross_midline\": False,\n },\n \"R_OR\": {\n \"include\": [\n or_rois[\"right_OR_1\"],\n or_rois[\"right_OR_2\"]],\n \"exclude\": [\n or_rois[\"right_OP_MNI\"],\n or_rois[\"right_TP_MNI\"],\n or_rois[\"right_pos_thal_MNI\"]],\n \"start\": or_rois['right_thal_MNI'],\n \"end\": or_rois['right_V1_MNI'],\n \"cross_midline\": False\n }\n})\n\n# combine custom ROIs with default BundleDict ROIs\nbundles = bundles + abd.BundleDict()\n\nbrain_mask_definition = LabelledImageFile(\n suffix=\"seg\",\n filters={\"scope\": \"freesurfer\"},\n exclusive_labels=[0])\n\nmy_afq = GroupAFQ(\n bids_path=op.join(\n afd.afq_home,\n 'stanford_hardi'),\n brain_mask_definition=brain_mask_definition,\n tracking_params={\"n_seeds\": 3,\n \"directions\": \"prob\",\n \"odf_model\": \"CSD\",\n \"seed_mask\": RoiImage()},\n bundle_info=bundles)\n\nmy_afq.export_all()\n\nif len(aus.SegmentedSFT.fromfile(my_afq.export(\"clean_bundles\")[\n \"01\"]).get_bundle(\"L_OR\").streamlines) > 1:\n # create bundle montage and bundle combination\n # across subject/session in MNI\n my_afq.montage(\"L_OR\", (1, 1), \"Axial\")\n my_afq.combine_bundle(\"L_OR\")\nelse:\n raise ValueError(\"No L_OR found\")\n","sub_path":"examples/plot_optic_radiations.py","file_name":"plot_optic_radiations.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"76704433","text":"class Dr:\n def __init__(self,name,xl,fl,jcgj,fyl):\n self.name=name\n self.xl=xl\n self.fl=fl\n self.jcgj=jcgj\n self.fyl=fyl\n def print_dr(self):\n print(self.name,self.xl,self.fl,self)\n\n\nlist01=[\nDr('孙悟空',0,8,6,7),\nDr('鲁班七号',36,2,4,3),\nDr('后羿',45,7,3,4),\nDr('灭霸',29,4,8,6)\n]\ndef find():\n for i in list01:\n if i.name=='灭霸':\n print(i.name)\nfind()\n\ndef died():\n for i in list01:\n if i.xl==0:\n print(i.name)\ndied()\ndef pj():\n sum=0\n for i in list01:\n sum+=i.jcgj\n return sum/len(list01)\n\nprint(pj())\n# def min_10():\n# for i in list01[::-1]:\n# if i.fyl<10:\n# list01.remove(i)\ndef min_10():\n for item in range(len(list01)-1,-1,-1):\n if list01[item].fyl<10:\n # list01.remove(list01[item])\n del list01[item]\ndef add():\n for i in list01:\n i.jcgj+=50\n","sub_path":"mounth001/day10/homework01.py","file_name":"homework01.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"344887163","text":"#!/usr/bin/python\n# -*- coding: utf-8\n\nimport codecs\nimport os\nfrom UnicodeConfigParser import UnicodeConfigParser\n\n\nclass Config():\n\n def load_config(self, file=None, default=\"\"):\n # Создаем экземпляр класса для работы с конфигом\n config = UnicodeConfigParser()\n # Нормализуем путь к файлу конфига\n file = os.path.abspath(os.path.normpath(file))\n\n # Если это не файл или его не существует\n if not os.path.exists(file) or not os.path.isfile(file):\n # Создаем новый файл с дефолтными настройками\n with(codecs.open(file, 'w', 'utf-8')) as f:\n f.write(default)\n f.close()\n\n # Считываем файл\n config.readfp(codecs.open(file, 'r', 'utf-8'))\n\n # Обрабатываем содержимое файла\n for section in config.sections():\n for key, val in config.items(section):\n config.set(section, key, val.encode('utf-8'))\n\n return config\n\n def save_config(self, cnf=None, file=None):\n # Нормализуем путь к файлу конфига\n file = os.path.abspath(os.path.normpath(file))\n confFile = codecs.open(file, 'w', 'utf-8')\n cnf.write(confFile)\n confFile.close()\n","sub_path":"facedetect/utils/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"197714984","text":"import pandas as pd\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ncan = pd.read_excel('data/canada.xlsx', skiprows=20, sheet_name='Canada by Citizenship')\r\ncan.head(5)\r\n\r\ncan = can.drop(columns=['AREA', 'REG', 'DEV', 'Type', 'Coverage', 'DevName']).\\\r\n rename(columns={'OdName': 'Pays', 'AreaName': 'Continent', 'RegName': 'Region'})\r\n\r\ncan = can.set_index('Pays')\r\ncan.columns = can.columns.astype(str)\r\n\r\ncan['Total'] = can.iloc[:, 2:].T.sum().T\r\n\r\nhaiti = can.iloc[:, 2:-1].loc['Haiti']\r\n\r\nfig, ax = plt.subplots()\r\nax.set_xlabel('Années')\r\nax.set_ylabel('Nombre de migrants')\r\nplt.title('Immigration depuis Haiti')\r\nplt.text(x=2010, y=6000, va='top', ha='right', s='2010\\nTremblement\\nde terre')\r\nhaiti.plot.line(ax=ax)\r\nplt.show()\r\nplt.close()\r\n\r\nind_chi = can.iloc[:, 2:-1].loc[['India', 'China']]\r\n\r\nfig, ax = plt.subplots()\r\nax.set_xlabel('Années')\r\nax.set_ylabel('Nombre de migrants')\r\nplt.title('Immigrants de Chine et d\\'Inde')\r\nind_chi.T.plot.line(ax=ax)\r\nplt.show()\r\nplt.close()\r\n# Conclusion, l'Inde et la Chine ont tous deux connus une immigration croissante (et logique) jusqu'en 2005\r\n# Date de l'accord sino-indien, permettant une plus grande fléxibilité de transit entre les deux\r\n# territoires, occasionnant donc une baisse de la 'fuite des cerveaux' au profit d'une collaboration accrue\r\n\r\ntop5 = can.iloc[:, 5:][:-2].sort_values(by='Total', ascending=False)[:5].iloc[:, :-1]\r\n\r\nfig, ax = plt.subplots(figsize=(15,10))\r\nax.set_xlabel('Années')\r\nax.set_ylabel('Nombre de migrants')\r\nplt.title('Top 5 des pays immigrants au Canada')\r\ntop5.T.plot.line(ax=ax)\r\nplt.show()\r\nplt.close()\r\n\r\nfig, ax = plt.subplots(figsize=(15,10))\r\nax.set_xlabel('Années')\r\nax.set_ylabel('Nombre de migrants')\r\nplt.title('Top 5 des pays immigrants au Canada')\r\ntop5.T.plot.area(ax=ax)\r\nplt.show()\r\nplt.close()\r\n\r\n\r\nbottom5 = can.iloc[:, 2:][:-2].sort_values(by='Total')[:5].iloc[:, :-1]\r\n\r\nfig, ax = plt.subplots(figsize=(13,9))\r\nax.set_xlabel('Années')\r\nax.set_ylabel('Nombre de migrants')\r\nplt.title('Top 5 des pays ayant le moins d\\'immigrants au Canada')\r\nbottom5.T.plot.area(ax=ax, alpha=0.45)\r\nplt.show()\r\nplt.close()\r\n\r\n\r\nfig, ax = plt.subplots(figsize=(13,9))\r\nax.set_xlabel('Nombre de migrants')\r\nax.set_ylabel('Nombre de pays')\r\nplt.title('Immigration des 185 pays vers le Canada en 2013')\r\ncan.iloc[:,-2:-1][:-2].plot.hist(ax=ax, legend=None)\r\nplt.show()\r\nplt.close()\r\n\r\n\r\ndns = can.iloc[:, 2:-1].loc[['Denmark', 'Norway', 'Sweden']]\r\n\r\nfig, ax = plt.subplots(figsize=(13,9))\r\nax.set_xlabel('Nombre de migrants')\r\nax.set_ylabel('Nombre de pays')\r\nplt.title('Immigration des 185 pays vers le Canada en 2013')\r\ns, e = dns.T.plot.hist(ax=ax, bins=15, alpha=0.5, stacked=True).get_xlim()\r\nax.set_xticks(np.arange(s, e, s), minor=False)\r\nplt.show()\r\nplt.close()\r\n\r\nfig, ax = plt.subplots(figsize=(13,9))\r\nax.set_xlabel('Années')\r\nax.set_ylabel('Nombre de migrants')\r\nplt.title('Immigration des 185 pays vers le Canada en 2013')\r\ncan.loc['Iceland'][2:-1].T.plot(ax=ax, alpha=0.5, kind='bar')\r\nplt.annotate(s='', xy=(33,70), xytext=(28,18),\\\r\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3', lw=1))\r\nplt.text(s='Crise financière de 2008 - 2011', rotation=73, x=31, y=55, ha='right', va='top')\r\nplt.show()\r\nplt.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''___'''\r\n","sub_path":"Python - TD/Pandi/canada/can.py","file_name":"can.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"461326662","text":"\n\n#calss header\nclass _YANG():\n\tdef __init__(self,): \n\t\tself.name = \"YANG\"\n\t\tself.definitions = [u'in Chinese philosophy, the male principle of the universe, represented as light and positive']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_yang.py","file_name":"_yang.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"642253686","text":"\"\"\"empty message\n\nRevision ID: 2a97fdc521d5\nRevises: eb5bd2cc82cb\nCreate Date: 2020-03-15 23:11:14.390103\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2a97fdc521d5'\ndown_revision = 'eb5bd2cc82cb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('post', 'dislike_count',\n existing_type=sa.INTEGER(),\n nullable=False)\n op.alter_column('post', 'like_count',\n existing_type=sa.INTEGER(),\n nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('post', 'like_count',\n existing_type=sa.INTEGER(),\n nullable=True)\n op.alter_column('post', 'dislike_count',\n existing_type=sa.INTEGER(),\n nullable=True)\n # ### end Alembic commands ###\n","sub_path":"Serhii_Hidenko/l_16_web_api/hw/migrations/versions/2a97fdc521d5_.py","file_name":"2a97fdc521d5_.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"212759179","text":"\"\"\"\nNapisati program koji ucitava listu, odredjuje koliko ima duplikata u datoj\nlisti i na izlaz prikazuje one elemente koji se ponavljaju tri ili vise puta.\n\n\n\"\"\"\n\ndef Duplicati (lista):\n diz = {}\n for i in range (len (lista)):\n chiave = lista [i]\n valore = diz.get (chiave, 0)\n valore = valore + 1\n diz [chiave] = valore\n return diz\n\nn = int (input ( \"Quanti elementi ha la lista \"))\nl = []\ni = 0\nwhile i < n:\n x = input (\"Inserisci una lettera \")\n l = l + [x[0]]\n i = i + 1\nprint (l)\nprint (Duplicati (l))\n","sub_path":"EserciziAggiuntivi/03b.py","file_name":"03b.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"629793388","text":"#!/usr/bin/env python\n\nimport sqlite3\n\nwith sqlite3.connect(\"../DATA/PRESIDENTS\") as s3conn:\n\n s3cursor = s3conn.cursor()\n\n party_query = '''\n select lname, fname\n from presidents\n where party = ?\n '''\n\n for party in 'Federalist', 'Whig':\n print(party)\n s3cursor.execute(party_query, (party,))\n print(s3cursor.fetchall())\n print()\n\n","sub_path":"Classes/py3interm/EXAMPLES/db_sqlite_parameterized.py","file_name":"db_sqlite_parameterized.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"218686903","text":"import requests\nfrom bs4 import BeautifulSoup # plug-in library\nimport csv\n\n# determine the category of your site, which will parse\nmain_url = 'https://www.olx.ua/elektronika/kompyutery-i-komplektuyuschie/komplektuyuschie-i-aksesuary/'\n\n\ndef write_csv(result): # write a function that will create fires\n with open('file.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['server=,'])\n for item in result:\n writer.writerow((item['name'],\n item['price'],\n item['address'],\n item['url']\n ))\n\n\n# writing a function to overwrite the file\ndef clean(text):\n return text.replace('\\t', '').replace('\\n', '').strip()\n\n\ndef get_page_data(page_url):\n r = requests.get(page_url)\n soup = BeautifulSoup(r.content)\n table = soup.find('table', {'id': 'offers_table'})\n rows = table.find_all('tr', {'class': 'wrap'})\n result = []\n for row in rows:\n name = clean(row.find('h3').text)\n url = row.find('h3').find('a').get('href')\n price = clean(row.find('p', {'class': \"price\"}).text)\n bottom = row.find('td', {'valign': 'bottom'})\n address = clean(bottom.find('small', {'class': 'breadcrumb x-normal'}).text)\n item = {'name': name, 'price': price, 'address': address, 'url': url, }\n result.append(item)\n return result\n\ndef main(main_url):\n r = requests.get(main_url)\n BeautifulSoup(r.content)\n result = []\n for i in range(1,5+1):\n print('Parsing page # ' + str(i) + ' of ' + str(5))\n page_url = main_url + '?page=' + str(i)\n result += get_page_data(page_url)\n write_csv(result)\n\nif __name__ == '__main__':\n main(main_url)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"632473969","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport socket\nfrom contextlib import closing\n\n\ndef main():\n local_address = '192.168.1.5' # 受信側のPCのIPアドレス\n multicast_group = '239.255.42.99' # マルチキャストアドレス\n port = 1511\n bufsize = 4096\n\n with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(('', port))\n sock.setsockopt(socket.IPPROTO_IP,\n socket.IP_ADD_MEMBERSHIP,\n socket.inet_aton(multicast_group) +\n socket.inet_aton(local_address))\n # while True:\n for i in range(1):\n data = sock.recv(bufsize)\n print(len(data))\n print (data)\n return\n\nif __name__ == '__main__':\n main()\n","sub_path":"UDP/recv_udp_multicast.py","file_name":"recv_udp_multicast.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"330442391","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api\n\n\nclass MusicSuggestion(models.TransientModel):\n _name = \"oomusic.suggestion\"\n _description = \"Music Suggestion page\"\n\n name_tracks = fields.Char(\"Name Tracks\", default=\"Tracks\")\n name_albums = fields.Char(\"Name Albums\", default=\"Albums\")\n\n track_last_played = fields.Many2many(\n \"oomusic.track\", string=\"Last Played\", compute=\"_compute_track_last_played\"\n )\n track_recently_added = fields.Many2many(\n \"oomusic.track\", string=\"Recently Added Tracks\", compute=\"_compute_track_recently_added\"\n )\n track_random = fields.Many2many(\n \"oomusic.track\", string=\"Random Tracks\", compute=\"_compute_track_random\"\n )\n\n album_recently_added = fields.Many2many(\n \"oomusic.album\", string=\"Recently Added Albums\", compute=\"_compute_album_recently_added\"\n )\n album_random = fields.Many2many(\n \"oomusic.album\", string=\"Random Albums\", compute=\"_compute_album_random\"\n )\n\n @api.depends(\"name_tracks\")\n def _compute_track_last_played(self):\n self.track_last_played = [\n p[\"res_id\"]\n for p in self.env[\"oomusic.preference\"]\n .search(\n [(\"play_count\", \">\", 0), (\"res_model\", \"=\", \"oomusic.track\")],\n order=\"last_play desc\",\n limit=10,\n )\n .read([\"res_id\"])\n ]\n\n @api.depends(\"name_tracks\")\n def _compute_track_recently_added(self):\n self.track_recently_added = self.env[\"oomusic.track\"].search([], order=\"id desc\", limit=10)\n\n @api.depends(\"name_tracks\")\n def _compute_track_random(self):\n folder_sharing = (\n \"inactive\" if self.env.ref(\"oomusic.oomusic_track\").sudo().perm_read else \"active\"\n )\n query = \"SELECT id FROM oomusic_track \"\n if folder_sharing == \"inactive\":\n query += \"WHERE user_id = {} \".format(self.env.uid)\n query += \"ORDER BY RANDOM() \"\n query += \"LIMIT 10\"\n self.env.cr.execute(query)\n res = self.env.cr.fetchall()\n if not res:\n return\n\n self.track_random = [r[0] for r in res]\n\n @api.depends(\"name_albums\")\n def _compute_album_recently_added(self):\n self.album_recently_added = self.env[\"oomusic.album\"].search([], order=\"id desc\", limit=10)\n\n @api.depends(\"name_albums\")\n def _compute_album_random(self):\n folder_sharing = (\n \"inactive\" if self.env.ref(\"oomusic.oomusic_track\").sudo().perm_read else \"active\"\n )\n query = \"SELECT id FROM oomusic_album \"\n if folder_sharing == \"inactive\":\n query += \"WHERE user_id = {} \".format(self.env.uid)\n query += \"ORDER BY RANDOM() \"\n query += \"LIMIT 15\"\n self.env.cr.execute(query)\n res = self.env.cr.fetchall()\n if not res:\n return\n\n self.album_random = [r[0] for r in res]\n","sub_path":"models/oomusic_suggestion.py","file_name":"oomusic_suggestion.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"251208655","text":"import random\r\n\r\n\r\nclass wizard:\r\n def __init__(self, name, level):\r\n self.name = name\r\n self.level = level\r\n self.life = 3\r\n\r\n def __repr__(self):\r\n print(\"this is a {} with level {}\".format(self.name, self.level))\r\n\r\n @classmethod\r\n def attack(self,gandalf,active_creature):\r\n my_roll = random.randint(1, 12)*gandalf.level\r\n creature_roll = random.randint(1, 12) * active_creature.level\r\n if my_roll >= creature_roll:\r\n return True\r\n elif gandalf.life > 1:\r\n gandalf.life -= 1\r\n return False\r\n else:\r\n gandalf.life = 0\r\n return False\r\n\r\n\r\nclass creature:\r\n def __init__(self, name, level):\r\n self.name = name\r\n self.level = level\r\n\r\n def __repr__(self):\r\n print(\"this is a {} with level {}\".format(self.name, self.level))\r\n","sub_path":"PycharmProjects/pyStart/wizardGame_logic.py","file_name":"wizardGame_logic.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"254689038","text":"#dicionario com os valores de cada codigo\ncod = {'=.===' : 'a' , '===.=.=.=' : 'b' , '===.=.===.=' : 'c' , '===.=.=' : 'd' , '=' : 'e' , '=.=.===.=' : 'f' , '===.===.=': 'g' , '=.=.=.=':'h' , '=.=' : 'i' , '=.===.===.===' : 'j' , '===.=.===': 'k' , '=.===.=.=' : 'l' , '===.===' : 'm', '===.=': 'n' , '===.===.===' : 'o' , '=.===.===.=' : 'p' , '===.===.=.===' : 'q' , '=.===.=' : 'r' , '=.=.=' : 's' , '===' : 't' , '=.=.===' : 'u' , '=.=.=.===' : 'v' , '=.===.===' : 'w' , '===.=.=.===' : 'x', '===.=.===.===' : 'y' , '===.===.=.=' : 'z' }\n\n#variavel para armazenar a quantidade de testes\nj = int(input())\n\n#laço para executar em cada teste\nfor k in range(j):\n\t#variavel para receber o codigo\n\tx = input()\n\t\n\t#transformando o codigo em array separando os elementos por '.......' \n\t#temos um array de 'palavras em morse'\n\tx = x.split('.......')\n\n\t#varrendo cada palavra do array\n\tfor palavra in range(len(x)):\n\t\t#separando a palavra por 'caracteres'\n\t\tb = x[palavra]\n\t\tb = b.split('...')\n\n\t\t#transformando cada caractere da palavra em uma letra do alfabeto\n\t\tfor i in range(len(b)):\n\t\t\t#busca no dicionario o caractere cuja Chave seja igual ao codigo atual do array X\n\t\t\tprint(cod[b[i]] , end='')\n\n\t\t#imprimindo espaço caso não seja a ultima palavra\n\t\tif palavra != len(x) - 1:\n\t\t\tprint(end=' ')\n\n\t# nova linha apenas\n\tprint()","sub_path":"python/2338.py","file_name":"2338.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"299502903","text":"import difflib\n\ndef normal_leven(str1, str2): #使用一维数组实现\n len_str1 = len(str1) + 1\n len_str2 = len(str2) + 1\n # create matrix\n matrix = [0 for n in range(len_str1 * len_str2)]\n # init x axis\n for i in range(len_str1):\n matrix[i] = i\n # init y axis\n for j in range(0, len(matrix), len_str1): #可以进行跨数的取值,例如:range(0,42,6)为:[0,6,12,18.24.30.36]\n if j % len_str1 == 0:\n matrix[j] = j // len_str1 #整除取商\n\n for i in range(1, len_str1):\n for j in range(1, len_str2):\n if str1[i - 1] == str2[j - 1]:\n cost = 0\n else:\n cost = 1\n matrix[j * len_str1 + i] = min(matrix[(j - 1) * len_str1 + i] + 1,\n matrix[j * len_str1 + (i - 1)] + 1,\n matrix[(j - 1) * len_str1 + (i - 1)] + cost)\n\n return matrix[-1]\n\n\ndef edit(str1, str2): #与上面算法相同思想,使用二维数组实现,比上面的方法更加直观和简洁\n matrix = [[i + j for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]\n\n for i in xrange(1, len(str1) + 1):\n for j in xrange(1, len(str2) + 1):\n if str1[i - 1] == str2[j - 1]:\n d = 0\n else:\n d = 1\n matrix[i][j] = min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + d)\n\n return matrix[len(str1)][len(str2)]\n\n\ndef difflib_leven(str1, str2):\n leven_cost = 0\n s = difflib.SequenceMatcher(None, str1, str2)\n for tag, i1, i2, j1, j2 in s.get_opcodes():\n # print('{:7} a[{}: {}] --> b[{}: {}] {} --> {}'.format(tag, i1, i2, j1, j2, str1[i1: i2], str2[j1: j2]))\n\n if tag == 'replace':\n leven_cost += max(i2 - i1, j2 - j1)\n elif tag == 'insert':\n leven_cost += (j2 - j1)\n elif tag == 'delete':\n leven_cost += (i2 - i1)\n return leven_cost\n\n\nif __name__ == '__main__':\n print(normal_leven('a','cba'))\n print(normal_leven('ab','cba'))\n print(normal_leven('11','cba'))\n print(normal_leven('1','cba'))\n print(normal_leven('batyu','beauty'))\n\n print(\"~~~~~~~~~~~~~~~~~\")\n print(difflib_leven('a','cba'))\n print(difflib_leven('ab', 'cba'))\n print(difflib_leven('11', 'cba'))\n print(difflib_leven('1', 'cba'))\n","sub_path":"suanfa/edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"391518225","text":"# Plotter for truth LR information\n# !!! - Fraction of tracks with wrong LR guess !!!\n# - Fraction of tracks with p-values below 5%\n# - Reduced chi^2\n\n\n#\n# ONLY USE THIS PLOTTER FOR TRUTH STUFF! \n#\n\n\nfrom ROOT import TFile, TCanvas, TH1F, TH1D, TLegend, TAxis, TAttMarker, TGraph, TGraphErrors\nfrom ROOT import gROOT\nfrom array import array\n\ndef Ratio(wrongHist, allHist, rebin):\n\n\t# Clone to be safe\n\tallHist.Rebin(rebin)\n\twrongHist.Rebin(rebin)\n\n\tratio = wrongHist.Clone(\"ratio\")\n\n\tratio.Divide(allHist)\n\n\treturn ratio\n\n\ndef Mean(all_):\n\n\tmeans_ = []\n\n\tfor ihist in range(len(all_)):\n\n\t\tmeans_.append(all_[ihist].GetMean())\n\n\treturn means_\n\n# Return the number of tracks with a p-value less than 5%\n\ndef pValFrac(all_):\n\n \t# Loop over the bins\n \t# Get bin x-values\n \t# if x-value is < 5%, sum the bin contents\n \t# Divide by total entries\n \ttracksLessThan_ = []\n\n \tfor ihist in range(0,len(all_)):\n\n \t\ttracks = 0\n\n \t\tfor bin in range(0, all_[ihist].GetNbinsX()):\n\n \t\t\tpValue = all_[ihist].GetBinCenter(bin+1)\n \t\t\t# print(\"p-value\", pValue)\n \t\t\t# binVal = hist.GetBinContent(bin+1) + binVal\n \t\t\tif(pValue < 0.05):\n\n \t\t\t\ttracks = all_[ihist].GetBinContent(bin+1) + tracks\n \t\t\t\t# fraction = binVal / hist.GetEntries()\n\n \t\t# print(\"tracks \",tracks)\n\n \t\ttracksLessThan_.append(tracks / all_[ihist].GetEntries())\n\n \treturn tracksLessThan_\n\ndef TotalTracks(all_):\n\ttotalTracks_ = []\n\tfor i in range(len(all_)):\n\t\ttotalTracks_.append(all_[i].GetEntries())\n\treturn totalTracks_\n\ndef Frac(rightOrWrong_, all_):\n\tfrac_ = []\n\tfor i in range(0,len(all_)):\n\t\t# print(\"rightOrWrong_[i].GetEntries() \",rightOrWrong_[i].GetEntries())\n\t\t# print(\"all_[i].GetEntries() \",all_[i].GetEntries())\n\t\tfrac_.append(rightOrWrong_[i].GetEntries() / all_[i].GetEntries() )\n\treturn frac_\n\ndef DrawHist1D(hist,title,fname,i): \n\n\tc1 = TCanvas(\"c1\",\"\",800,600)\n\n\t# hist.Rebin(4)\n\thist.SetStats(0)\n\n\thist.SetTitle(title)\n\t\t\t\n\thist.GetXaxis().SetTitleSize(.04)\n\thist.GetYaxis().SetTitleSize(.04)\n\thist.GetXaxis().SetTitleOffset(1.1)\n\thist.GetYaxis().SetTitleOffset(1.25)\n\thist.GetXaxis().CenterTitle(1)\n\thist.GetYaxis().CenterTitle(1)\n\t# hist.GetYaxis().SetRangeUser(.5,.7)\n\tif(i==0): hist.GetXaxis().SetRangeUser(-5,505)\n\tif(i==1): hist.GetXaxis().SetRangeUser(-5,2505)\n\n\t# hist.GetYaxis().SetRangeUser(0.50,0.70)\n\thist.GetYaxis().SetMaxDigits(4)\n\n\thist.SetLineWidth(3)\n\thist.SetLineColor(1)\n\n\thist.Draw()\n\tc1.SaveAs(fname)\n\ndef DefineScat(y_, x_):\n\n\tx, y, ex, ey = array('d'), array('d'), array('d'), array('d')\n\n\tn = len(y_)\n\n\tfor i in range(0,n):\n\n#\t\tfrac = wrong_[i].GetEntries() / all_[i].GetEntries()\n\t\tx.append(x_[i])\n\t\tex.append(0)\n\t\ty.append(y_[i])\n\t\tey.append(0)\n\n\t\t# print(str(DCAs_[i])+\" * \"+str(y_)+\" * \"+str(wrong_[i].GetEntries())+\" * \"+str(all_[i].GetEntries()))\n\n\treturn TGraphErrors(n,x,y,ex,ey)\n\n\ndef DrawScat(plot, title, fname):\n\n\tc2 = TCanvas(\"c2\",\"\",800,600)\n\n\tplot.SetTitle(title)\t\t\t\n\tplot.GetXaxis().SetTitleSize(.04)\n\tplot.GetYaxis().SetTitleSize(.04)\n\tplot.GetXaxis().SetTitleOffset(1.1)\n\tplot.GetYaxis().SetTitleOffset(1.25)\n\tplot.GetXaxis().CenterTitle(1)\n\tplot.GetYaxis().CenterTitle(1)\n\t# plot.GetYaxis().SetRangeUser(0.086,0.106)\\\n\t# plot.GetYaxis().SetRangeUser(0.5,0.7)\n\t# plot.GetXaxis().SetRangeUser(-5,2500)\n\tplot.GetYaxis().SetMaxDigits(4)\n\t#plot.SetMarkerSize(3)\n\t#plot.SetLineWidth(3)\n\tplot.SetMarkerStyle(20) # Full circle\n\t#plot.SetMarkerColor(4)\n\t#plot.SetLineColor(4)\n\tplot.Draw(\"AP\")\n\tc2.SaveAs(fname)\n\n\treturn\n\ndef DrawScatOverlay(plot1, plot2, title, fname):\n\n\tc2 = TCanvas(\"c2\",\"\",800,600)\n\n\tleg = TLegend(0.11,0.69,0.69,0.89)\n\tleg.SetBorderSize(0)\n\n\tplot1.SetTitle(title)\t\t\t\n\tplot1.GetXaxis().SetTitleSize(.04)\n\tplot1.GetYaxis().SetTitleSize(.04)\n\tplot1.GetXaxis().SetTitleOffset(1.1)\n\tplot1.GetYaxis().SetTitleOffset(1.25)\n\tplot1.GetXaxis().CenterTitle(1)\n\tplot1.GetYaxis().CenterTitle(1)\n\t# plot.GetYaxis().SetRangeUser(0.086,0.106)\n\t# plot1.GetXaxis().SetRangeUser(-5,505)\n\tplot1.GetYaxis().SetMaxDigits(4)\n\tplot1.GetYaxis().SetRangeUser(0,1)\n\t#plot.SetMarkerSize(3)\n\t#plot.SetLineWidth(3)\n\tplot1.SetMarkerStyle(20) # Full circle\n\tplot2.SetMarkerStyle(24) # Open circle\n\t#plot.SetMarkerColor(4)\n\t#plot.SetLineColor(4)\n\n\tleg.AddEntry(plot1, \"Fraction of tracks with a wrong LR choice\")\n\tleg.AddEntry(plot2, \"Fraction of tracks with an ambiguous LR choice\")\n\t\n\tplot1.Draw(\"AP\")\n\tplot2.Draw(\"P same\")\n\n\tleg.Draw(\"same\")\n\n\tc2.SaveAs(fname)\n\n\treturn\n\n# Wrap main in a function\ndef main():\n\n\tshortFile = TFile.Open(\"~/Documents/gm2/LowDCAs/ROOT/LowDCAsPlots500_main.root\")\n\t#shortFile = TFile.Open(\"~/Documents/gm2/LowDCAs/ROOT/LowDCAs_SimScanPlotsFull_Ambiguous.root\")\n\tlongFile = TFile.Open(\"~/Documents/gm2/LowDCAs/ROOT/LowDCAsPlotsExtreme-25-11-19.root\")\n\t\n\t# DCA threshold arrays, short and long\n\tDCAs_ = [list(range(0,525,25)), list(range(0,2600,100))]\n\t# \n\t# histType = [\"Run\",\"pValues\"]\n\t# nameType = [\"Fraction of wrong tracks\",\"Fraction of tracks with p-value < 5%\"]\n\t\n\t# Loop over histogram types\n\n\tfiles_ = [shortFile, longFile]\n\n\t#fileType = \".png\"\n\tfileType = \".pdf\"\n\n\t#\n\t# Take ratio of all and wrong DCAs\n\t# \n\n\n\t# ratio_ = []\n\n\t# for i_ratio in range(0,2):\n\n\t# \tratio = Ratio(files_[i_ratio].Get(\"plots0/WrongHits/DCA\"), files_[i_ratio].Get(\"plots0/AllHits/DCA\"), 1)\n\t# \t#ratio = Ratio(files_[0].Get(\"plots0/WrongHits/DCA\"), files_[0].Get(\"plots0/AllHits/DCA\"), 1)\n\tDrawHist1D(files_[0].Get(\"plots0/DCA\"), \";Measured DCA [#mum];Hits\", \"../TestPlots/DCAsRecoShort\"+str(0)+fileType, 0)\n\tDrawHist1D(files_[1].Get(\"plots0/DCA\"), \";Measured DCA [#mum];Hits\", \"../TestPlots/DCAsRecoLong\"+str(0)+fileType, 1)\n\t# \tDrawHist1D(ratio, \";Measured DCA [#mum];Fraction of hits with a wrong LR choice\", \"../HitLevelPlots/DCARatio\"+str(i_ratio)+fileType, i_ratio)\n\t# DrawHist1D(ratio, \";Measured DCA [#mum];Fraction of tracks with a wrong LR choice\", \"../TrackLevelPlots/DCARatio\"+fileType)\n\n\t# fracHits = []\n\t\n\t# Loop over DCA scan\n\tfor ifile in range(0,2):\n\t\n\t\tprint(\"ifile\",ifile)\n\t\n\t\tallHitsTracks_ = []\n\t\tallHitsPValues_ = []\n\t\tallHitsChiSqrDof_ = []\n\n\t\t# wrongHitsTracks_ = []\n\t\t# wrongHitsPValues_ = []\n\t\t# wrongHitsChiSqrDof_ = []\n\n\t\t# rightHitsTracks_ = []\n\t\t# rightHitsPValues_ = []\n\t\t# rightHitsChiSqrDof_ = []\n\n\t\t# ambiguousHitsTracks_ = []\n\t\t# ambiguousHitsPValues_ = []\n\t\t# ambiguousHitsChiSqrDof_ = []\n\t\t\n\t\tfor ihist in range(0,len(DCAs_[ifile])):\n\t\n\t\t\t# print(files_[ifile])\n\t\t\t# print(DCAs_[ifile])\n\t\n\t\t\tallHitsTracks_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/Run\"))\n\t\t\tallHitsPValues_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/pValues\"))\n\t\t\tallHitsChiSqrDof_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/ChiSqrDof\"))\n\n\t\t\t# wrongHitsTracks_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/WrongHits/Run\"))\n\t\t\t# wrongHitsPValues_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/WrongHits/pValues\"))\n\t\t\t# wrongHitsPValues_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/WrongHits/ChiSqrDof\"))\n\n\t\t\t# rightHitsTracks_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/RightHits/Run\"))\t\n\t\t\t# rightHitsPValues_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/RightHits/pValues\"))\n\t\t\t# rightHitsChiSqrDof_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/RightHits/ChiSqrDof\"))\n\n\t\t\t# ambiguousHitsTracks_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/AmbiguousHits/Run\"))\t\n\t\t\t# ambiguousHitsPValues_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/AmbiguousHits/pValues\"))\n\t\t\t# ambiguousHitsChiSqrDof_.append(files_[ifile].Get(\"plots\"+str(ihist)+\"/AmbiguousHits/ChiSqrDof\"))\n\n\t\t# print(DCAsArray[ihist],pValFrac(allHits[ihist]))\n\t\n\t\t# print(\"len(allHits) \"+str(len(allHits)))\n\t\t# print(\"allHits[0].GetMean() \"+str(allHits[0].GetMean()))\n\t\n\t\n\t\t#typeFlag = 0\n\t\t#if (itype > 1): typeFlag = 1\n\t\n\t\t#mean = \"Mean \"\n\t\t#if (typeFlag == 1): mean = \"\" \n\n\t\t\n\t\t#print(\"Threshold [um] * \"+histType[itype])\n\t\t# DrawScat(histsArrayData, DCAsArray, typeFlag, \";Low DCA threshold [#mum];\"+mean+nameType[itype],\"../Plots-25-11-19/\"+histType[itype]+\"Scat500_DATA.pdf\")\n\t\tif(ifile == 0):\n\n\t\t\t# All tracks\n\t\t\tDrawScat(DefineScat(TotalTracks(allHitsTracks_), DCAs_[ifile]), \";Low DCA threshold [#mum];Total number of tracks\", \"../TestPlots/TotalTrackRecoShort\"+fileType)\n\t\t\tDrawScat(DefineScat(pValFrac(allHitsPValues_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of tracks with p-value < 5%\", \"../TestPlots/pValueRecoFracShort\"+fileType)\n\t\t\t# DrawScat(DefineScat(pValFrac(wrongHitsPValues_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of wrong LR tracks with p-value < 5%\", \"../TestPlots/pValueWrongFracShort\"+fileType)\n\t\t\t# DrawScat(DefineScat(pValFrac(ambiguousHitsPValues_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of ambiguous LR tracks with p-value < 5%\", \"../TestPlots/pValueAmbiguousFracShort\"+fileType)\n\t\t\tDrawScat(DefineScat(Mean(allHitsPValues_), DCAs_[ifile]), \";Low DCA threshold [#mum];Mean p-value\", \"../TestPlots/pValueMeansRecoShort\"+fileType)\n\t\t\t# Fractions of each type of track\n\t\t\t# DrawScat(DefineScat(Frac(wrongHitsTracks_, allHitsTracks_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of tracks with a wrong LR choice\", \"../TestPlots/FracWrongTracksShort\"+fileType)\n\t\t\t# DrawScat(DefineScat(Frac(rightHitsTracks_, allHitsTracks_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of tracks with only correct LR choices\", \"../TestPlots/FracRightTracksShort\"+fileType)\n\t\t\t# DrawScat(DefineScat(Frac(ambiguousHitsTracks_, allHitsTracks_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of tracks with an ambiguous LR choice\", \"../TestPlots/FracAmbiguousTracksShort\"+fileType)\n\n\t\t\t# DrawScatOverlay(DefineScat(Frac(wrongHitsTracks_, allHitsTracks_), DCAs_[ifile]),DefineScat(Frac(ambiguousHitsTracks_, allHitsTracks_), DCAs_[ifile]),\";Low DCA threshold [#mum];Fraction of tracks\",\"../TestPlots/FracWrongAmbTracksShort\"+fileType)\n\t\t\n\t\telse:\n\n\t\t\t# print(\"Not using long scan, sorry too complicated\")\n\t\t\t# All tracks\n\t\t\tDrawScat(DefineScat(TotalTracks(allHitsTracks_), DCAs_[ifile]), \";Low DCA threshold [#mum];Total number of tracks\", \"../TestPlots/TotalTracksRecoLong\"+fileType)\n\t\t\tDrawScat(DefineScat(Mean(allHitsPValues_), DCAs_[ifile]), \";Low DCA threshold [#mum];Mean p-value\", \"../TestPlots/pValueMeansRecoLong\"+fileType)\n\t\t\t# DrawScat(DefineScat(Mean(allHitsChiSqrDof_), DCAs_[ifile]), \";Low DCA threshold [#mum];Mean #chi^{2}/ndf\", \"../TestPlots/chiSqrDofLong\"+fileType)\n\t\t\tDrawScat(DefineScat(pValFrac(allHitsPValues_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of tracks with p-value < 5%\", \"../TestPlots/pValueFracRecoLong\"+fileType)\n\t\t\t# # DrawScat(DefineScat(pValFrac(wrongHitsPValues_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of wrong LR tracks with p-value < 5%\", \"../TestPlots/pValueWrongFracLong\"+fileType)\n\t\t\t# # DrawScat(DefineScat(pValFrac(ambiguousHitsPValues_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of ambiguous LR tracks with p-value < 5%\", \"../TestPlots/pValueAmbiguousFracLong\"+fileType)\n\t\t\t# \n\t\t\t# # Fractions of each type of track\n\t\t\t# DrawScat(DefineScat(Frac(wrongHitsTracks_, allHitsTracks_), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of tracks with a wrong LR choice\", \"../TestPlots/FracWrongTracksLong\"+fileType)\n\t\t\t# DrawScat(DefineScat(Frac(rightHitsTracks_, allHitsTracks_, ), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of tracks with only right LR choices\", \"../TestPlots/FracRightTracksLong\"+fileType)\n\t\t\t# DrawScat(DefineScat(Frac(ambiguousHitsTracks_, allHitsTracks_, ), DCAs_[ifile]), \";Low DCA threshold [#mum];Fraction of tracks with an ambiguous LR choice\", \"../TestPlots/FracAmbiguousTracksLong\"+fileType)\n\t\n\t\n\t\n\t# draw1D(histsArrayCut, DCAsArray, \";p-value;Tracks / 0.005\",\"../Plots/pValues1DExtreme.pdf\")\n\t# drawScat(histsArrayCut, DCAsArray, \";Low DCA Threshold [#mum];Mean p-value\",\"../Plots/pValuesScatExtreme.pdf\")\n\n# Execute main\nif __name__ == \"__main__\":\n\tmain()\n\n\n\n\n\n\n\n\n","sub_path":"plotters/attic/MainRecoPlotter.py","file_name":"MainRecoPlotter.py","file_ext":"py","file_size_in_byte":11726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"397691495","text":"from feature_extraction.FactTree import LogicParser, WordStack\nfrom feature_extraction.Models import clause, predicate, compliment\nfrom feature_extraction.Models import proposition\n\n\nclass Proposition():\n #####################################\n # CONSTRUCTOR\n def __init__(self):\n self.__proposition_lst = []\n self.__stack = WordStack.Stack()\n self.__predicates = LogicParser.Tree()\n\n #####################################\n # RESET\n def __reset(self):\n self.__proposition_lst = []\n self.__stack.clear()\n\n #############################################\n # BUILD\n # -------------------------------------------\n # Parses sentence into logical objects\n # clause / predicates\n # From the list of logical objects, make\n # correlation between them\n #\n # sentence: string\n # draw: boolean\n def build(self, sentence, draw=False):\n self.__reset()\n self.__predicates.build(sentence, draw)\n predicate_lst = self.__predicates.get_logic_model()\n self.__create_logic(predicate_lst)\n\n #############################################\n # CREATE LOGIC\n # -------------------------------------------\n # Will perform correct operation depending on\n # logical object type\n # clause, predicate, or compliment\n #\n # logic_lst: list\n def __create_logic(self, logic_lst):\n for i in range(len(logic_lst)):\n if isinstance(logic_lst[i], clause.Clause):\n self.__clause_operation(logic_lst[i], logic_lst, i)\n elif isinstance(logic_lst[i], predicate.Predicate):\n self.__predicate_operation(logic_lst[i], logic_lst, i)\n elif isinstance(logic_lst[i], compliment.Compliment):\n self.__compliment_operation(logic_lst[i], logic_lst, i)\n\n #############################################\n # CLAUSE OPERATION\n # -------------------------------------------\n # 1- If no predicate then append to clause list\n # 2- If predicate before and after the word then\n # create relationship and append word to clause stack\n # 3- else append to compliment stack\n # 4- if last word in the list then extract features\n #\n # logic: Model.AbstractModel\n # logic_lst: list[Model.AbstractMode]\n # index: integer\n def __clause_operation(self, logic, logic_lst, index):\n if self.__stack.peek_predicate() is None:\n self.__stack.clause_stack.append(logic)\n\n elif isinstance(self.__stack.next(logic_lst, index), predicate.Predicate):\n if isinstance(self.__stack.previous(logic_lst, index), predicate.Predicate):\n self.__stack.compliment_stack.append(logic)\n self.__extract_relations()\n self.__stack.clause_stack.append(logic)\n return\n\n else:\n self.__stack.compliment_stack.append(logic)\n\n if self.__stack.next(logic_lst, index) is None:\n self.__extract_relations()\n\n #############################################\n # PREDICATE OPERATION\n # -------------------------------------------\n # 1- if no predicate in stack then append predicate\n # 2- else pop predicate and merge them into 1 phrase\n # append new predicate\n # 3- if last word in list then extract features\n #\n # logic: Model.AbstractModel\n # logic_lst: list[Model.AbstractMode]\n # index: integer\n def __predicate_operation(self, logic_model, logic_lst, index):\n if self.__stack.peek_predicate() is None:\n self.__stack.predicate_stack.append(logic_model)\n\n else:\n model = self.__stack.predicate_stack.pop()\n model.merge(logic_model)\n self.__stack.predicate_stack.append(model)\n\n if self.__stack.next(logic_lst, index) is None:\n self.__extract_relations()\n\n #############################################\n # COMPLIMENT OPERATION\n # -------------------------------------------\n # 1- if last word in list then extract features\n # 2- if word in between 2 predicates then extract\n # features and append word to clause\n # 3- else append to compliment stack\n #\n # logic: Model.AbstractModel\n # logic_lst: list[Model.AbstractMode]\n # index: integer\n def __compliment_operation(self, logic, logic_lst, index):\n if self.__stack.next(logic_lst, index) is None:\n self.__stack.compliment_stack.append(logic)\n self.__extract_relations()\n\n elif isinstance(self.__stack.next(logic_lst, index), predicate.Predicate):\n if isinstance(self.__stack.previous(logic_lst, index), predicate.Predicate):\n self.__stack.compliment_stack.append(logic)\n self.__extract_relations()\n self.__stack.clause_stack.append(logic)\n\n else:\n self.__stack.compliment_stack.append(logic)\n\n #############################################\n # EXTRACT RELATIONS\n # -------------------------------------------\n # 1- Pop predicate\n # 2- For ever clause map them to their compliments\n # 3- clear stack\n def __extract_relations(self):\n try:\n p = self.__stack.predicate_stack.pop()\n except IndexError:\n return\n if len(self.__stack.compliment_stack) == 0:\n self.__extract_double(predicate)\n else:\n self.__extract_triplet(p)\n\n #############################################\n # EXTRACT DOUBLE\n # -------------------------------------------\n # If a clause has a predicate without a\n # compliment\n #\n # predicate: tuple(word, tag)\n def __extract_double(self, p):\n for c in self.__stack.clause_stack:\n model = proposition.PropositionModel()\n model.clause = c\n model.predicate = p\n self.__proposition_lst.append(model)\n self.__stack.clear()\n\n #############################################\n # EXTRACT TRIPLET\n # -------------------------------------------\n # If a clause has a predicate with a compliment\n #\n # predicate: tuple(word, tag)\n def __extract_triplet(self, p):\n for c in self.__stack.clause_stack:\n for cmp in self.__stack.compliment_stack:\n model = proposition.PropositionModel()\n model.clause = c\n model.predicate = p\n model.compliment = cmp\n self.__proposition_lst.append(model)\n self.__stack.clear()\n\n #############################################\n # GET PROPOSITION LIST\n def get_proposition_lst(self):\n return self.__proposition_lst.copy()\n\n\nif __name__ == \"__main__\":\n p = Proposition()\n p.build(\"the months remaining on my lease is 4\", False)\n lst = p.get_proposition_lst()\n for e in lst:\n print(e)\n print()\n","sub_path":"src/nlp_service/feature_extraction/FactTree/PropositionLogic.py","file_name":"PropositionLogic.py","file_ext":"py","file_size_in_byte":6829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"519484031","text":"class gun:\n def __init__(self, name, bullet):\n self.name = name\n self.bullet = bullet\n\n def shoot(self):\n if bullet >= 1:\n print('枪%s发射出了一发子弹!'%self.name)\n else:\n print('枪%s里没有子弹,需要装弹!'%self.name)\n\nclass soldier:\n def __init__(self, name):\n self.name = name\n self.gun_list = []\n\n def get_gun(self, gun):\n print('士兵%s获得了一把%s!'%(self.name, gun.name))\n self.gun_list.append(gun.name)\n\n def fire(self, gun):\n if gun.name in self.gun_list:\n if gun.bullet >= 1:\n print('%s使用%s发射了一发子弹!'%(self.name, gun.name))\n gun.bullet -= 1\n else:\n print('%s中没有子弹,%s无法发射子弹!'%(gun.name, self.name))\n else:\n print('士兵%s暂未获得该枪'%self.name)\n\n def reload(self, gun):\n if gun.name in self.gun_list:\n gun.bullet = 10\n else:\n print('士兵%s暂未获得该枪'%self.name)\n\ndef main():\n AK47 = gun('AK47', 0)\n Rien = soldier('Rien')\n Rien.fire(AK47)\n Rien.get_gun(AK47)\n Rien.fire(AK47)\n Rien.reload(AK47)\n Rien.fire(AK47)\n\nif __name__ == '__main__':\n main()","sub_path":"homework/soldier.py","file_name":"soldier.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"186092719","text":"import pytest\nimport ulmo\nimport test_util\nimport filecmp\n\ndef test_get_attributes():\n with test_util.mocked_urls('usgs/eros/attributes.json'):\n attrs = ulmo.usgs.eros.get_attribute_list()\n assert len(attrs) == 38\n\n\ndef test_get_themes():\n with test_util.mocked_urls('usgs/eros/themes.json'):\n themes = ulmo.usgs.eros.get_themes()\n assert len(themes) == 22\n\n\ndef test_get_available_datasets():\n with test_util.mocked_urls('usgs/eros/datasets.json'):\n bbox = (-78, 32, -76, 36)\n datasets = ulmo.usgs.eros.get_available_datasets(bbox, attrs='AREA_NAME')\n assert len(datasets) >= 30\n\n\ndef test_get_available_format():\n with test_util.mocked_urls('usgs/eros/formats_l1l.json'):\n formats = ulmo.usgs.eros.get_available_formats('L1L')\n assert len(formats) == 1\n\n\ntest_sets = [\n #{'product_key': 'LC6',\n # 'bbox': (-78, 32, -76, 36),\n # 'number_of_tiles': 5,\n #'fmt_file': 'usgs/eros/formats_l1l.json',\n #'file': 'usgs/eros/availability_bbox_test_set_1.json',\n #},\n]\n\n\ndef test_get_raster_availability():\n for dataset in test_sets:\n #file_urls = {\n # 'http://nimbus.cr.usgs.gov/index_service/Index_Service_JSON2.asmx/return_Download_Options': dataset['fmt_file'],\n # 'http://extract.cr.usgs.gov/requestValidationServiceClient/sampleRequestValidationServiceProxy/getTiledDataDirectURLs2.jsp?TOP=36.0&BOTTOM=32.0&LEFT=-78.0&RIGHT=-76.0&LAYER_IDS=L1L02&JSON=true': dataset['file'],\n #}\n #with test_util.mocked_urls(file_urls):\n locs = ulmo.usgs.eros.get_raster_availability(dataset['product_key'], dataset['bbox'])\n assert len(locs['features'])==dataset['number_of_tiles']\n\n\ndef test_get_raster():\n product_key = 'NCP'\n bbox = (-97.992, 31.991, -97.991, 31.992)\n #availability_url = 'http://extract.cr.usgs.gov/requestValidationServiceClient/sampleRequestValidationServiceProxy/getTiledDataDirectURLs2.jsp?TOP=31.992&BOTTOM=31.991&LEFT=-97.992&RIGHT=-97.991&LAYER_IDS=NCP&JSON=true'\n #jp2_url = 'http://tdds2.cr.usgs.gov/lta5/ortho/naip/compressed/TX/2012/201204_texas_naip_1x0000m_cnir/31097/m_3109701_nw_14_1_20120725_20121015.jp2'\n format_url = 'http://nimbus.cr.usgs.gov/index_service/Index_Service_JSON2.asmx'\n availability_url = 'http://extract.cr.usgs.gov/requestValidationServiceClient/sampleRequestValidationServiceProxy/getTiledDataDirectURLs2.jsp'\n jp2_url = 'http://tdds2.cr.usgs.gov/lta5/ortho/naip/compressed/TX/2012/201204_texas_naip_1x0000m_cnir/31097/m_3109701_nw_14_1_20120725_20121015.jp2'\n url_files = {\n format_url: 'usgs/eros/formats_ncp.json',\n availability_url: 'usgs/eros/get_raster_test_availability.json',\n jp2_url: 'usgs/eros/m_3109701_nw_14_1_20120725_20121015.jp2',\n }\n\n test_file = test_util.get_test_file_path('usgs/eros/m_3109701_nw_14_1_20120725_20121015.jp2')\n with test_util.temp_dir() as data_dir:\n with test_util.mocked_urls(url_files):\n locs = ulmo.usgs.eros.get_raster(product_key, bbox, path=data_dir)\n raster_tile = locs['features'][0]['properties']['file']\n assert filecmp.cmp(raster_tile, test_file)\n","sub_path":"test/usgs_eros_test.py","file_name":"usgs_eros_test.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"173465808","text":"import turtle\n\nturtle.speed('fast')\ncolors = ['red', 'green', 'blue', 'purple']\nlength = int(input())\nangle = int(input())\n\n\nfor a in range(4):\n turtle.color(colors[a])\n x = 1\n for b in range(1, length):\n turtle.forward(10)\n turtle.right(angle - x)\n x += 1\n","sub_path":"SoftUni-Level4/6.PythonBasics/Homeworks/1.IntroToPython/5.TurtleMovementLimitedIterations.py","file_name":"5.TurtleMovementLimitedIterations.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"270694071","text":"from mysql.connector import MySQLConnection, Error\nfrom dbconfig import read_config\n\ndef query_with_fetchone(text = \"SELECT * FROM cdr OREDER BY uniqueid DESC LIMIT 10\"):\n fetcher = []\n\n try:\n dbconfig = read_config()\n conn = MySQLConnection(**dbconfig)\n cursor = conn.cursor()\n cursor.execute(text)\n\n row = cursor.fetchone()\n\n while row is not None:\n fetcher.append(row)\n row = cursor.fetchone()\n\n except Error as err:\n print(err)\n\n finally:\n cursor.close()\n conn.close()\n\n return fetcher\n\ndef query_with_fetchall(text = \"SELECT * FROM cdr OREDER BY uniqueid DESC LIMIT 10\"):\n\n try:\n dbconfig = read_config()\n conn = MySQLConnection(**dbconfig)\n cursor = conn.cursor()\n cursor.execute(text)\n\n rows = cursor.fetchall()\n\n print('Total Row(s):', cursor.rowcount)\n\n except Error as err:\n print(err)\n\n finally:\n cursor.close()\n conn.close()\n\n return rows\n\nif __name__ == '__main__':\n print(query_with_fetchall(\n text='SELECT MAX(calldate) as vremya, src, dst, greatest(cast(src AS SIGNED), cast(dst AS SIGNED)) as caller, calldate, disposition FROM cdr WHERE calldate>\"2016-07-06 18:00:00\" AND calldate<\"2016-07-07\" AND dst!=\"909\" AND src!=\"\" GROUP BY caller ORDER BY vremya;'))","sub_path":"mysql_query.py","file_name":"mysql_query.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"372097653","text":"\"\"\"\n PandaFunctions.py\n ~~~~~~~~~~~~~~~~~\n Created: Aug 19, 2015\n \t By: Yifan Dai\n\n Modified: Mar 23, 2016\n \t By: Leo Kim\n Reason: Upgrade to Python 3.4\n \t\t PEP008 styling\n \t\t Switching cURL to Request and ES libraries\n \t\t Restructuring project\n\"\"\"\n\nimport requests\nfrom CONSTANTS import *\nfrom elasticsearch import Elasticsearch\n# from elasticsearch.exceptions import NotFoundError, TransportError\n\ndef get_index(es):\n\tprint('Retrieving index ...')\n\ttry:\n\t\tres = es.get(index=INDEX_NAME, doc_type='rt_id', id=1)\n\t\tif res['found'] == True:\n\t\t\treturn True\n\texcept: \n\t\tprint('Not found index you are looking for ...')\n\t\treturn False\t\n\ndef delete_index(es):\n\t# try:\n\t# \tres = es.get(index=INDEX_NAME, doc_type='rt_id', id=1)\n\t# \tif res['found'] == True:\n\t# \t\tes.delete(index=INDEX_NAME, doc_type='rt_id', id=1)\n\t# except:\n\t# \tprint('Not found')\n\tprint('Deleting index ...')\n\tes.delete(index=INDEX_NAME, doc_type='rt_id', id=1)\n\t\ndef create_index(es):\n\tprint('Creating index ...')\n\tes.index(index=INDEX_NAME, doc_type='rt_id', id=1, body=PAYLOAD)","sub_path":"es_kibana/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"456818120","text":"# =============================================================================\n# Authors: PAR Government\n# Organization: DARPA\n#\n# Copyright (c) 2016 PAR Government\n# All rights reserved.\n# ==============================================================================\n\nfrom skimage.restoration import wiener\nfrom scipy.signal import convolve2d\nfrom skimage import color, data, restoration\nimport cv2\nfrom PIL import Image\nimport numpy\nfrom maskgen import tool_set\n\n\n\ndef transform(img,source,target,**kwargs):\n kernelSize = int(kwargs['kernelSize']) if 'kernelSize' in kwargs else 25\n rgb = img.convert('RGB')\n cv_image = numpy.array(rgb)\n if 'inputmaskname' in kwargs:\n mask = numpy.asarray(tool_set.openImageFile(kwargs['inputmaskname']).to_mask())\n mask[mask>0] == 1\n else:\n mask = numpy.ones((cv_image.shape[0],cv_image.shape[1])).astype('uint8')\n inverted_mask = numpy.ones((cv_image.shape[0], cv_image.shape[1])).astype('uint8')\n inverted_mask[mask==1] = 0\n side = int(kernelSize**(1/2.0))\n psf = numpy.ones((side, side)) / kernelSize\n img = color.rgb2grey(cv_image)\n deconvolved_img = restoration.wiener(img, psf, 1)[0]\n for c in range(3):\n cv_image[:,:,c] =deconvolved_img* cv_image[:,:,c] * mask + cv_image[:,:,c] * inverted_mask\n Image.fromarray(cv_image,'RGB').save(target)\n return {'Blur Type':'Wiener'}, None\n\ndef operation():\n return {\n 'category': 'Filter',\n 'name': 'Blur',\n 'description':'Wiener Filter',\n 'software':'OpenCV',\n 'version':cv2.__version__,\n 'arguments':{\n 'kernelSize': {\n 'type': 'int[1:100]',\n 'defaultValue': 25,\n 'description': 'kernel size'\n },\n 'inputmaskname':{\n 'type':'imagefile',\n 'defaultvalue':None,\n 'description':'Mask image where black pixels identify region to blur'\n },\n 'Blur Type': {\n 'type': 'text',\n 'defaultvalue':'Wiener',\n 'description': ''\n }},\n 'transitions': [\n 'image.image'\n ]\n }\n\ndef suffix():\n return None","sub_path":"plugins/WienerFilter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"273144741","text":"#! /usr/bin/env python\n# test_serial_otherclient.py, part of pyusbiss\n# Copyright (c) 2016, 2018 Andrew Tolmie \n# Created by Geert de Haan\n# Licensed under the MIT License. See LICENSE file in the project root for full license information.\n\n\"\"\"\nGeert de Haan / 19-4-2019\nTesting the serial module with client serial program.\n\nHardware : Connect the Rx (pin 2) and Tx (pin 3) with an external FTDI device \n(usbiss.Rx --> FTDI.Tx, usbiss.Tx --> FTDI.Rx) and start the \ntests\\SerialClient.py program in a separate DosBox (python SerialClient.py)\n\n\"\"\"\n\nimport sys\nimport time\nimport unittest\nfrom usbiss import usbiss\nfrom usbiss import serial\n\nPort = 'COM3'\nBaudrate = 9600\n\nclass I2ctestCase(unittest.TestCase):\n\n\n def setUp(self):\n self._usbissdev = usbiss.USBISS(Port)\n self.serport = serial.SERIAL(self._usbissdev, Baudrate)\n\n def tearDown(self):\n self._usbissdev.close()\n\n def test1_loopback_readline(self):\n #testing the\n send = 'Test1 - Loopbacktest max 60 chars, the USBISS inputbuffer\\n'\n self.serport.serial_write(send)\n time.sleep(.5) # give USBISS time to send the string\n receive = self.serport.readline()\n self.assertEqual(receive +'\\n', send)\n\n def test2_loopback_read_serial(self):\n send = 'Test2 - Loopbacktest with a longer string then 30 chars\\n'\n self.serport.serial_write(send)\n time.sleep(.5)\n #waiting for the data to come back. USBISS\n n = 0\n waiting = 0\n while(n<5 and waiting == 0):\n waiting = self.serport.in_waiting\n n+=1\n time.sleep(.5)\n if waiting==0:\n self.assertEqual('Error' , 'Nothing to receive')\n else:\n time.sleep(.1)\n receive = self.serport.serial_read(waiting)\n self.assertEqual(receive , send)\n\n\nif __name__ == '__main__':\n sys.stdout.write(__doc__)\n unittest.main()","sub_path":"tests/test_serial_otherclient.py","file_name":"test_serial_otherclient.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"180297493","text":"import sqlite3\n\nimport pygame\nfrom Game import Keyborder\nfrom keyboard import read_event\n\ndatabase = r\"..\\db\\mistrz_klawiatury.db\"\ncx = sqlite3.connect(database, check_same_thread=False)\ncu = cx.cursor()\n\n\ndef choose_player(screen=None, player_nick=None): # TODO naprawić wyświetlanie i pobieranie tekstu\n \"\"\"\n #Adrian\n #czyści okno i rysuje swoje\n używa funkcji download_users do pobrania graczy\n rysuje w pygame ekran wyboru z prostokątem ktory podswietla aktualnego gracza\n wybranie gracza nastepuje poprzez enter\n możliwośc wycofania\n możliwość wyboru ADD PLAYER - uruchamia funkcję add_player\n otwiera okienko w tkinter do wpisania hasła /\n pobiera hasło za pomocą pygame (Ignacy zrobi funkcje do pobierania znaków z pygame)\n wyswietla info o niepoprawnym haśle / przechodzi dalej\n mozna z niej zamknąć grę\n :return nazwa gracza (string): name - nazwa gracza\n \"\"\"\n\n # Zmienne pomocnicze\n gracze = download_users()\n font = pygame.font.Font('freesansbold.ttf', 50)\n zaznaczenie = 0\n len_gracze = len(gracze)\n\n # Ustawienia Okna\n screen.fill((255, 255, 255))\n\n # Pętla programu\n # print('login start')\n start = True\n while True:\n if start is False:\n eve = str(read_event())\n else:\n eve = ''\n start = False\n if 'down)' in eve:\n if 'enter' in eve:\n if zaznaczenie != len_gracze:\n key_ind = 0\n for gracz in gracze.keys():\n if key_ind == zaznaczenie:\n is_confirmed = check_pass(gracz, gracze[gracz], screen)\n if is_confirmed:\n return gracz\n else:\n screen.fill((255, 255, 255))\n break\n else:\n key_ind += 1\n else:\n n_gracz = sign_up(screen)\n if n_gracz != '':\n return n_gracz\n else:\n screen.fill((255, 255, 255))\n elif 'up down' in eve:\n if zaznaczenie == 0:\n zaznaczenie = len_gracze\n else:\n zaznaczenie -= 1\n elif 'down down' in eve:\n if zaznaczenie == len_gracze:\n zaznaczenie = 0\n else:\n zaznaczenie += 1\n\n # wypisanie nazw\n n = 0\n instr = font.render(\"Wybór gracza:\", True, (0, 0, 0), (255, 255, 255))\n screen.blit(instr, (100, 100))\n for nazwa_ in gracze.keys():\n if zaznaczenie == n:\n wypis = font.render(nazwa_, True, (0, 0, 0), (175, 255, 100))\n else:\n wypis = font.render(nazwa_, True, (0, 0, 0), (255, 255, 255))\n screen.blit(wypis, (115, (n * 60 + 160)))\n n += 1\n dodaj = font.render(\"+ Nowy gracz\", True, (0, 0, 0),\n ((175, 255, 100) if zaznaczenie == n else (255, 255, 255)))\n screen.blit(dodaj, (115, (n * 60 + 160)))\n pygame.display.flip()\n\n\ndef check_pass(nazwa, haslo, screen=None):\n \"\"\"\n #Adrian\n #czyści okno i rysuje swoje\n otwiera okienko w pygame do wpisania hasła /\n pobiera hasło przy pomocy klasy Keyborder\n wyswietla info o niepoprawnym haśle / zwraca nazwę gracza\n mozna z niej zamknąć grę\n :return nazwa gracza (string): name - nazwa gracza\n \"\"\"\n\n # dane gracza\n nazwa_ = nazwa\n haslo_ = haslo\n\n # Zmienne pomocnicze\n pob_str = Keyborder()\n tekst = \"Wpisz hasło: ( \" + nazwa_ + \" )\"\n check = False\n\n # Tworzenie okna\n screen.fill((255, 255, 255))\n font = pygame.font.Font('freesansbold.ttf', 50)\n fix = False\n # Pętla programu\n start = True\n pob_str.pg_str_input()\n while True:\n if start is False:\n eve = str(read_event())\n else:\n eve = ''\n start = False\n wpis = pob_str.current_input\n dl_wpis = len(wpis)\n if 'down)' in eve:\n if 'esc' in eve:\n return False\n elif 'enter' in eve:\n if wpis == haslo_:\n return True\n else:\n check = True\n fix = False\n elif fix is not True and pob_str.finish is True:\n pob_str.pg_str_input()\n fix = True\n\n # Rysowanie okna\n instr = font.render(tekst, True, (0, 0, 0), (255, 255, 255))\n screen.blit(instr, (100, 100))\n sym = font.render((dl_wpis * \"*\") + (15 - dl_wpis) * \" \", True, (0, 0, 0), (220, 220, 220))\n screen.blit(sym, (100, 160))\n if check:\n error = font.render(\"Błędne hasło!\", True, (200, 0, 0), (255, 255, 255))\n screen.blit(error, (100, 220))\n back = font.render(\"Powrót (ESC)\", True, (0, 0, 0), (255, 255, 255))\n screen.blit(back, (80, 280))\n pygame.display.flip()\n\n\ndef sign_up(screen=None):\n \"\"\"\n #Adrian\n #czyści okno i rysuje swoje\n otwiera okienko w pygame do wpisania hasła i nazwy\n pobiera hasło i nazwę przy pomocy klasy Keyborder\n po zatwierdzeniu wywołuje add_player\n mozna z niej zamknąć grę\n :return nazwa gracza (string): name - nazwa gracza\n \"\"\"\n # Zmienne pomocnicze\n pob_naz = Keyborder()\n pob_has = Keyborder()\n is_name_saved = False\n same = False\n\n # Tworzenie okna\n screen.fill((255, 255, 255))\n font = pygame.font.Font('freesansbold.ttf', 50)\n # Pętla programu\n pob_naz.pg_str_input()\n while True:\n nazwa = pob_naz.current_input\n haslo = pob_has.current_input\n eve = str(read_event())\n if 'down)' in eve:\n if 'esc' in eve:\n if is_name_saved:\n is_name_saved = False\n else:\n return ''\n elif 'enter' in eve:\n if is_name_saved:\n same = add_player(nazwa, haslo)\n if same:\n return nazwa\n else:\n sign_up(screen)\n else:\n is_name_saved = True\n pob_has.pg_str_input()\n\n # Rysowanie okna\n instr1 = font.render(\"Wpisz nazwę użytkownika:\", True, (0, 0, 0), (255, 255, 255))\n screen.blit(instr1, (100, 100))\n ramka_n = font.render(nazwa + (15 - len(nazwa)) * \" \", True, (0, 0, 0), (220, 220, 220))\n screen.blit(ramka_n, (100, 160))\n instr2 = font.render(\"Wpisz hasło:\", True, (0, 0, 0), (255, 255, 255))\n screen.blit(instr2, (100, 220))\n ramka_n = font.render(len(haslo) * \"*\" + (15 - len(haslo)) * \" \",\n True, (0, 0, 0), (220, 220, 220))\n screen.blit(ramka_n, (100, 280))\n if same:\n error = font.render(\"Istnieje użytkownik o takiej nazwie!\", True, (200, 0, 0), (255, 255, 255))\n screen.blit(error, (100, 340))\n back = font.render(\"Powrót (ESC)\", True, (0, 0, 0), (255, 255, 255))\n screen.blit(back, (80, 400))\n pygame.display.flip()\n\n\ndef download_users():\n \"\"\"\n # Gustaw\n pobiera niki użytkowników z hasłami\n :return gracze: lista ze słownikami {'nazwa gracza': 'hasło'}\n \"\"\"\n cu.execute(\"SELECT nick, password FROM players\")\n cx.commit()\n gracze = cu.fetchall() # wynik jest w formie słownika, klucz to nick, wartość to hasło\n nicknames = []\n passwords = []\n\n for el in gracze:\n nicknames.append(el[0])\n passwords.append(el[1])\n\n gracze = dict(zip(nicknames, passwords)) # list to dict conversion\n return gracze\n\n\ndef add_player(nick, password):\n \"\"\"\n #Gustaw\n pozwala dodać gracza do bazy\n jest wywoływana z funkcji chose_player\n sprawdza czy już istanieje gracz o tej nazwi\n :return:\n \"\"\"\n cu.execute(\"SELECT COUNT(*) FROM players WHERE nick==?\", (nick,))\n cx.commit()\n result = cu.fetchone()\n if result == (0,):\n cu.execute(\"INSERT INTO players (nick, password) VALUES (?,?)\", (nick, password))\n cx.commit()\n cu.execute(\n \"CREATE TABLE \" + nick + \"_stat_today (id integer primary key, score int, date text)\")\n cu.execute(\n \"CREATE TABLE \" + nick + \"_stat_week (id integer primary key, score int, date text)\")\n cu.execute(\n \"CREATE TABLE \" + nick + \"_stat_month (id integer primary key, score int, date text)\")\n cu.execute(\n \"CREATE TABLE \" + nick + \"_stat_ever (id integer primary key, score int, date text)\")\n return True # jeśli true, to dodano nowego gracza\n else:\n return False # dodanie nie powiodlo sie, nick juz wystepuje w bazie\n","sub_path":"PLAN_MISZCZ/Login.py","file_name":"Login.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"214169751","text":"# SPDX-License-Identifier: MIT\n# Copyright (c) 2018-2022 Amano Team\n\nfrom typing import Optional, Tuple\n\nfrom pyrogram import Client, filters\nfrom pyrogram.errors import BadRequest\nfrom pyrogram.types import InlineKeyboardMarkup, Message\n\nfrom eduu.config import prefix\nfrom eduu.database import db, dbc\nfrom eduu.utils import button_parser, commands, get_format_keys, require_admin\nfrom eduu.utils.localization import use_chat_lang\n\n\ndef get_welcome(chat_id: int) -> Tuple[Optional[str], bool]:\n dbc.execute(\n \"SELECT welcome, welcome_enabled FROM groups WHERE chat_id = (?)\", (chat_id,)\n )\n return dbc.fetchone()\n\n\ndef set_welcome(chat_id: int, welcome: Optional[str]):\n dbc.execute(\"UPDATE groups SET welcome = ? WHERE chat_id = ?\", (welcome, chat_id))\n db.commit()\n\n\ndef toggle_welcome(chat_id: int, mode: bool):\n dbc.execute(\n \"UPDATE groups SET welcome_enabled = ? WHERE chat_id = ?\", (mode, chat_id)\n )\n db.commit()\n\n\n@Client.on_message(\n filters.command([\"welcomeformat\", \"start welcome_format_help\"], prefix)\n)\n@use_chat_lang()\nasync def welcome_format_message_help(c: Client, m: Message, strings):\n await m.reply_text(strings(\"welcome_format_help_msg\"))\n\n await m.stop_propagation()\n\n\n@Client.on_message(filters.command(\"setwelcome\", prefix) & filters.group)\n@require_admin(permissions=[\"can_change_info\"])\n@use_chat_lang()\nasync def set_welcome_message(c: Client, m: Message, strings):\n if len(m.text.split()) > 1:\n message = m.text.html.split(None, 1)[1]\n try:\n # Try to send message with default parameters\n sent = await m.reply_text(\n message.format(\n id=m.from_user.id,\n username=m.from_user.username,\n mention=m.from_user.mention,\n first_name=m.from_user.first_name,\n # full_name and name are the same\n full_name=m.from_user.first_name,\n name=m.from_user.first_name,\n # title and chat_title are the same\n title=m.chat.title,\n chat_title=m.chat.title,\n count=(await c.get_chat_members_count(m.chat.id)),\n )\n )\n except (KeyError, BadRequest) as e:\n await m.reply_text(\n strings(\"welcome_set_error\").format(\n error=e.__class__.__name__ + \": \" + str(e)\n )\n )\n else:\n set_welcome(m.chat.id, message)\n await sent.edit_text(\n strings(\"welcome_set_success\").format(chat_title=m.chat.title)\n )\n else:\n await m.reply_text(\n strings(\"welcome_set_empty\").format(bot_username=c.me.username),\n disable_web_page_preview=True,\n )\n\n\n@Client.on_message(\n (filters.command(\"welcome\") & ~filters.command([\"welcome on\", \"welcome off\"]))\n & filters.group\n)\n@require_admin(permissions=[\"can_change_info\"])\n@use_chat_lang()\nasync def invlaid_welcome_status_arg(c: Client, m: Message, strings):\n await m.reply_text(strings(\"welcome_mode_invalid\"))\n\n\n@Client.on_message(filters.command(\"getwelcome\", prefix) & filters.group)\n@require_admin(permissions=[\"can_change_info\"])\n@use_chat_lang()\nasync def getwelcomemsg(c: Client, m: Message, strings):\n welcome, welcome_enabled = get_welcome(m.chat.id)\n if welcome_enabled:\n await m.reply_text(\n strings(\"welcome_default\") if welcome is None else welcome, parse_mode=None\n )\n else:\n await m.reply_text(\"None\")\n\n\n@Client.on_message(filters.command(\"welcome on\", prefix) & filters.group)\n@require_admin(permissions=[\"can_change_info\"])\n@use_chat_lang()\nasync def enable_welcome_message(c: Client, m: Message, strings):\n toggle_welcome(m.chat.id, True)\n await m.reply_text(strings(\"welcome_mode_enable\").format(chat_title=m.chat.title))\n\n\n@Client.on_message(filters.command(\"welcome off\", prefix) & filters.group)\n@require_admin(permissions=[\"can_change_info\"])\n@use_chat_lang()\nasync def disable_welcome_message(c: Client, m: Message, strings):\n toggle_welcome(m.chat.id, False)\n await m.reply_text(strings(\"welcome_mode_disable\").format(chat_title=m.chat.title))\n\n\n@Client.on_message(\n filters.command([\"resetwelcome\", \"clearwelcome\"], prefix) & filters.group\n)\n@require_admin(permissions=[\"can_change_info\"])\n@use_chat_lang()\nasync def reset_welcome_message(c: Client, m: Message, strings):\n set_welcome(m.chat.id, None)\n await m.reply_text(strings(\"welcome_reset\").format(chat_title=m.chat.title))\n\n\n@Client.on_message(filters.new_chat_members & filters.group)\n@use_chat_lang()\nasync def greet_new_members(c: Client, m: Message, strings):\n members = m.new_chat_members\n chat_title = m.chat.title\n first_name = \", \".join(map(lambda a: a.first_name, members))\n full_name = \", \".join(\n map(lambda a: a.first_name + \" \" + (a.last_name or \"\"), members)\n )\n user_id = \", \".join(map(lambda a: str(a.id), members))\n username = \", \".join(\n map(lambda a: \"@\" + a.username if a.username else a.mention, members)\n )\n mention = \", \".join(map(lambda a: a.mention, members))\n if not m.from_user.is_bot:\n welcome, welcome_enabled = get_welcome(m.chat.id)\n if welcome_enabled:\n if welcome is None:\n welcome = strings(\"welcome_default\")\n\n if \"count\" in get_format_keys(welcome):\n count = await c.get_chat_members_count(m.chat.id)\n else:\n count = 0\n\n welcome = welcome.format(\n id=user_id,\n username=username,\n mention=mention,\n first_name=first_name,\n # full_name and name are the same\n full_name=full_name,\n name=full_name,\n # title and chat_title are the same\n title=chat_title,\n chat_title=chat_title,\n count=count,\n )\n welcome, welcome_buttons = button_parser(welcome)\n await m.reply_text(\n welcome,\n disable_web_page_preview=True,\n reply_markup=(\n InlineKeyboardMarkup(welcome_buttons)\n if len(welcome_buttons) != 0\n else None\n ),\n )\n\n\ncommands.add_command(\"resetwelcome\", \"admin\")\ncommands.add_command(\"setwelcome\", \"admin\")\ncommands.add_command(\"welcome\", \"admin\")\ncommands.add_command(\"welcomeformat\", \"admin\")\n","sub_path":"eduu/plugins/welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":6589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"352428493","text":"from ...compat.iterable import list\nfrom .base import DelegatorBase\n\nclass MapDelegator(DelegatorBase):\n def __dir__(self):\n return [\n key for key in dir(self.proxy._ptype)\n if not key.startswith('_')\n and callable(getattr(self.proxy._ptype, key))]\n def __call__(self, attrs, key):\n inst = self.proxy._inst\n cls = self.proxy._cls\n pfuncs = [\n getattr(prop, key)\n for prop in attrs.values()]\n return lambda *a, **kw: list([\n pfunc(inst, cls, *a, **kw) for pfunc in pfuncs\n ])\n","sub_path":"pacu/pacu/util/src/descriptor/delegator/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"242628766","text":"__docformat__ = \"restructuredtext en\"\n\nfrom brian.units import mole, mmole, umole, nmole, volt, mvolt, coulomb, joule, kelvin, second,meter, siemens, msiemens\nfrom brian.stdunits import ms,cm,um,uF,pF,uA,pA,nS, uS\n\n# initial values as starting point\nini = {}\n\"\"\" initial values of the system \"\"\"\nini['C'] = 0.073 * umole/((0.1*meter)**3)\nini['N'] = 15000. * umole/((0.1*meter)**3)\nini['V'] = -80.94*mvolt\n\nini['K'] = 100000. * umole/((0.1*meter)**3)\nini['C_ER'] = 99.85 * umole/((0.1*meter)**3)\nini['h'] = 0.78902\nini['IP3'] = 0.1569 * umole/((0.1*meter)**3)\n\nini['N_o'] = 145000.0* umole/((0.1*meter)**3)\nini['C_o'] = 1355.2* umole/((0.1*meter)**3)\nini['K_o'] = 3000. * umole/((0.1*meter)**3)\n\nini['K_Astro2'] = 100000. * umole/((0.1*meter)**3)\nini['V_Astro2'] = -80.94*mvolt\nini['K_o_Astro2'] = 3000. * umole/((0.1*meter)**3)\n\n\npar = {}\n# astrocytic morphology\npar['V_cyto'] = 8.0 * (10*um)**3\npar['V_cyto_ECS'] = par['V_cyto']*0.5\npar['V_er'] = 0.8 * (10*um)**3\npar['A_cap'] = 1.6 * 10**(-2) * cm**2\npar['C_m'] = 1.0 * uF/(cm*cm)\n\n# natural constants\npar['F'] = 96500.0 * coulomb / mole\npar['R'] = 8.314 * joule / (mole*kelvin)\n\n# environment\npar['T'] = 311.0 * kelvin\n\n# simulation parameter\npar['time_step'] = 1.0 * ms\npar['gl'] = 0.0 * umole/((0.1*meter)**3)\npar['g'] = par['gl']\n\n# i_incx\npar['k_NCX'] = 1. * uA/uF\npar['K_mN'] = 87500.0 * umole/((0.1*meter)**3)\npar['K_mC'] = 1380.0 * umole/((0.1*meter)**3)\npar['eta'] = 0.35\npar['k_sat'] = 0.1\n\n# i_nka\npar['K_mN_NKA'] = 10000. * umole/((0.1*meter)**3)\npar['K_mK'] = 1500. * umole/((0.1*meter)**3)\npar['INKA_max'] = 0.045 * (uA/uF)\n\n# inleak\npar['gN'] = 0.00020727 * msiemens/uF\n\n# ikleak\npar['gK'] = 0.001504809 * msiemens/uF\n\n# ikir\npar['gKir'] = 0.001 * msiemens/uF\n\n#iglu\npar['I_max'] = 250 * (umole/(second*(0.1*meter)**3))\npar['K_mN_glu'] = (15000. * (umole/((0.1*meter)**3)))\npar['K_mg'] = (34. * (umole/((0.1*meter)**3)))\n\n# i_serca\npar['Ker'] = 0.1 * umole/((0.1*meter)**3) # FM mode\npar['ver'] = 20.0 * umole * second ** (-1)/((0.1*meter)**3) # Maximal rate of SERCA uptake\n\n# i_ip3 - CICR through I_IP3\npar['d1'] = 0.13 * umole/((0.1*meter)**3) # IP3 dissociation constant\npar['d5'] = 0.08234 * umole/((0.1*meter)**3) # Ca2+ activation dissociation constant\npar['rc'] = 6.0 * second ** (-1) # Maximal CICR rate\n\n#iCerleak\npar['g_Caleak'] = 2.0 * pA/(umole/((0.1*meter)**3))\n\n# IP3 variable\n# IP3 production - PLC_beat\npar['vb'] = 0.05 * umole/ (((0.1*meter)**3)*second)\npar['Kr'] = 1.3 * umole/((0.1*meter)**3)\npar['Kp'] = 10.0 * umole/((0.1*meter)**3)\npar['Kpi'] = 0.6 * umole/((0.1*meter)**3)\n# IP3 production - PLC_delta\npar['vd'] = 0.02 * umole/ (((0.1*meter)**3)*second)\npar['kd'] = 1.5 * umole/((0.1*meter)**3) # Inhibition constant of PLCdelta activity\npar['Kplcd'] = 0.1 * umole/((0.1*meter)**3) # Ca2+ affinity of PLCdetla\n# IP3 degradation - IP3-3K\npar['v3k'] = 2.0 * umole/ (((0.1*meter)**3)*second) # Maximal rate of degradation by IP3-3K\npar['K3'] = 1.0 * umole/((0.1*meter)**3) # IP3 affinity of IP3-3K\npar['KD'] = 0.7 * umole/((0.1*meter)**3) # Ca2+ affinity of IP3-3K\n# IP3 degradation- IP-5P\npar['r5p'] = 0.04 * second ** (-1)\n\n# h variable\npar['d2'] = 1.049 * umole/((0.1*meter)**3) # Ca2+ inactivation dissociation constant\npar['d3'] = 0.9434 * umole/((0.1*meter)**3) # IP3 dissociation constant\npar['a2'] = 0.2 * ((0.1*meter)**3)/(umole*second) # IP3R binding rate for Ca2+ inhibition\n\n# i_ca_er-leak\npar['rl'] = 0.019651089219916 * second ** (-1) # Maximal rate of Ca2+ leak from the ER\n\n# I_K_Astro2\npar['DK'] = 2.7 * 10**(-5) * (1./(ms))\n\n\n\n\n","sub_path":"myparameters.py","file_name":"myparameters.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"136479914","text":"\"\"\"\nTest for REST API for bills\n\"\"\"\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom rest_framework import status\n\nfrom apps.bills.api import IMAGE_ALREADY_UPLOADED_ERROR\nfrom apps.bills.models import Bill\nfrom .check_image import CHECK_IMAGE as DEFAULT_CHECK_IMAGE\nfrom .helpers import BillTestCase\n\n\nclass UploadBillRestAPITest(BillTestCase):\n \"\"\"\n Test rest api endpoints for uploading and managing bills\n \"\"\"\n def setUp(self):\n self.user = self.get_or_create_user()\n\n def upload_bill(\n self, auth_needed=True):\n \"\"\"\n Helper to upload bill via api\n \"\"\"\n if auth_needed:\n self.client.force_login(self.user)\n return self.client.post(\n reverse('bill'),\n {\n 'image': SimpleUploadedFile(\n name='test_check.jpg',\n content=DEFAULT_CHECK_IMAGE,\n content_type='image/jpeg')\n },\n format='multipart')\n\n def test_upload_bill__successfull_response(self):\n \"\"\"\n We return 201 created when bill is successfully uploaded\n \"\"\"\n response = self.upload_bill()\n self.assertEqual(\n response.status_code, status.HTTP_201_CREATED)\n\n def test_upload_bill__bill_created(self):\n \"\"\"\n We create a bill instanse after successful upload\n \"\"\"\n self.upload_bill()\n self.assertTrue(\n Bill.objects.filter(\n sha256_hash_hex=self.calculate_expected_hash()).exists())\n\n def test_upload_bill__creator_saved(self):\n \"\"\"\n We save bill creator\n \"\"\"\n self.upload_bill()\n bill = Bill.objects.get(\n sha256_hash_hex=self.calculate_expected_hash())\n self.assertTrue(\n bill.user, self.user)\n\n def test_upload_bill__parsed_bill_returned(self):\n \"\"\"\n We return parsed bill info after successful upload\n \"\"\"\n response = self.upload_bill()\n bill = Bill.objects.latest('create_time')\n self.assertEqual(\n response.data,\n {\n 'bill': bill.id,\n })\n\n def test_upload_bill_already_exists__existing_bill_returned(self):\n \"\"\"\n We return existing bill if bill was already uploaded\n \"\"\"\n bill = self.create_bill()\n response = self.upload_bill() \n self.assertDictEqual(\n response.data,\n {\n 'bill': bill.id\n })\n\n def test_upload_bill_already_exists__ok_response_returned(self):\n \"\"\"\n We return 200 OK if bill was already uploaded\n \"\"\"\n self.create_bill()\n response = self.upload_bill() \n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK)\n\n def test_not_authenticated_tries_load_bill__error_returned(self):\n \"\"\"\n We return 403 forbidden if user is not logged in\n while he is trying to create a bill\n \"\"\" \n response = self.upload_bill(\n auth_needed=False)\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN)\n\n\nclass RetrieveBillRestAPITest(BillTestCase):\n \"\"\"\n Test rest api endpoint for retrieving bills\n \"\"\"\n def setUp(self):\n self.user = self.get_or_create_user()\n self.bill = self.create_bill()\n\n def retrieve_bill(\n self, \n bill_id=None, \n auth_needed=True):\n \"\"\"\n Helper to retrieve bill via api\n \"\"\"\n bill_id = bill_id or self.bill.id\n if auth_needed:\n self.client.force_login(self.user)\n return self.client.get(\n reverse(\n 'retrieve-update-bill', \n kwargs={\n 'bill_id': bill_id\n }))\n\n def test_retrieve_bill__successfull_response(self):\n \"\"\"\n We return 200 OK when bill is successfully retrieved\n \"\"\"\n response = self.retrieve_bill()\n self.assertEqual(\n response.status_code, status.HTTP_200_OK)\n\n def test_retrieve_bill__bill_and_categories_returned(self):\n \"\"\"\n We return basic bill info and all linked categories\n \"\"\"\n self.create_categories_for_bill(self.bill)\n response = self.retrieve_bill()\n self.assertDictEqual(\n response.data,\n {\n 'image': self.bill.image.url,\n 'date': self.bill.date,\n 'categories': [\n {\n 'category': {\n 'name': 'a-test',\n 'id': 1,\n },\n 'id': 1,\n 'amount': 10\n },\n {\n 'category': {\n 'name': 'b-test',\n 'id': 2\n },\n 'id': 2,\n 'amount': 20\n },\n ]\n })\n\n def test_retrieve_bill_without_auth__error_returned(self):\n \"\"\"\n We return 403 forbidden if user is not logged in\n \"\"\" \n response = self.retrieve_bill(\n auth_needed=False)\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN)\n\n def test_retrieve_bill_for_another_user__error_returned(self):\n \"\"\"\n We return 403 forbidden if user tries to retrieve bill\n created by another user\n \"\"\"\n new_user = self.get_or_create_user(\n email='new-test-1@test.com')\n self.bill.user = new_user\n self.bill.save(update_fields=['user', ])\n response = self.retrieve_bill()\n self.assertEqual(\n response.status_code, \n status.HTTP_403_FORBIDDEN)\n\n def test_retrieve_non_existing_bill___error_returned(self):\n \"\"\"\n We return 404 not found if user tries to retrieve bill\n with invalid id\n \"\"\" \n response = self.retrieve_bill(bill_id=self.bill.id + 1)\n self.assertEqual(\n response.status_code, \n status.HTTP_404_NOT_FOUND)\n\n\nclass UpdateBillRestAPITest(BillTestCase):\n \"\"\"\n Test rest api endpoint for updating bill categories\n \"\"\"\n def setUp(self):\n self.user = self.get_or_create_user()\n self.bill = self.create_bill()\n\n def update_bill(\n self,\n data=None,\n bill_id=None, \n auth_needed=True):\n \"\"\"\n Helper to retrieve bill via api\n \"\"\"\n import json\n data = data or {\n 'categories': []\n }\n bill_id = bill_id or self.bill.id\n if auth_needed:\n self.client.force_login(self.user)\n return self.client.patch(\n reverse(\n 'retrieve-update-bill',\n kwargs={\n 'bill_id': bill_id\n }),\n json.dumps(data),\n content_type='application/json')\n\n def test_update_bill__successfull_response(self):\n \"\"\"\n We return 200 OK when bill is successfully updated\n \"\"\"\n response = self.update_bill()\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK)\n\n def test_add_new_category_to_bill__categories_updated(self):\n \"\"\"\n We successfully add new category to bill\n \"\"\"\n from apps.budgets.models import Category\n self.create_categories_for_bill(self.bill)\n category = Category.objects.create(name='d-test')\n response = self.update_bill(\n data={\n 'categories': [\n {\n 'category': {\n 'id': 1,\n 'name': 'a-test'\n },\n 'id': 1,\n 'amount': 10\n },\n {\n 'category': {\n 'id': 2,\n 'name': 'b-test'\n },\n 'id': 2,\n 'amount': 20\n },\n {\n 'category': {\n 'id': category.id,\n 'name': 'd-test'\n },\n 'amount': 30\n },\n ]})\n self.assertTrue(\n self.bill.categories.\\\n filter(id=category.id).exists())\n\n def test_delete_category_to_bill__categories_updated(self):\n \"\"\"\n We successfully remove category from bill\n \"\"\"\n self.create_categories_for_bill(self.bill)\n response = self.update_bill(\n data={\n 'categories': [\n {\n 'category': {\n 'id': 1,\n 'name': 'a-test'\n },\n 'id': 1,\n 'amount': 10\n }\n ]})\n self.assertFalse(\n self.bill.categories.\\\n filter(name='b-test').exists())\n\n def test_edit_category_amount__categories_updated(self):\n \"\"\"\n We successfully edit category amount\n \"\"\"\n self.create_categories_for_bill(self.bill)\n response = self.update_bill(\n data={\n 'categories': [\n {\n 'category': {\n 'id': 1,\n 'name': 'a-test'\n },\n 'id': 1,\n 'amount': 10\n },\n {\n 'category': {\n 'id': 2,\n 'name': 'b-test'\n },\n 'id': 2,\n 'amount': 30\n },\n ]})\n self.assertTrue(\n self.bill.categories.\\\n filter(\n name='b-test',\n category_to_bill__amount=30.0).exists())\n\n def test_add_non_existing_category__400_returned(self):\n \"\"\"\n We return 400 bad request if category doesn't exist\n \"\"\"\n self.create_categories_for_bill(self.bill)\n response = self.update_bill(\n data={\n 'categories': [\n {\n 'category': {\n 'id': 1,\n 'name': 'a-test'\n },\n 'id': 1,\n 'amount': 10\n },\n {\n 'category': {\n 'id': 2,\n 'name': 'b-test'\n },\n 'id': 2,\n 'amount': 20\n },\n {\n 'category': {\n 'id': 3,\n 'name': 'd-test'\n },\n 'amount': 30\n },\n ]})\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST)\n\n def test_add_non_existing_category__new_category_wasnt_created(self):\n \"\"\"\n We do not create new category if category doesn't exist\n \"\"\"\n from apps.budgets.models import Category\n self.create_categories_for_bill(self.bill)\n response = self.update_bill(\n data={\n 'categories': [\n {\n 'category': {\n 'id': 1,\n 'name': 'a-test'\n },\n 'id': 1,\n 'amount': 10\n },\n {\n 'category': {\n 'id': 2,\n 'name': 'b-test'\n },\n 'id': 2,\n 'amount': 20\n },\n {\n 'category': {\n 'id': 3,\n 'name': 'd-test'\n },\n 'amount': 30\n },\n ]})\n self.assertFalse(\n Category.objects.filter(name='d-test').exists())\n\n def test_update_bill_without_auth__error_returned(self):\n \"\"\"\n We return 403 forbidden if user is not logged in\n \"\"\"\n response = self.update_bill(\n auth_needed=False)\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN)\n\n def test_update_bill_for_another_user__error_returned(self):\n \"\"\"\n We return 403 forbidden if user tries to update bill\n created by another user\n \"\"\"\n new_user = self.get_or_create_user(\n email='new-test-1@test.com')\n self.bill.user = new_user\n self.bill.save(update_fields=['user', ])\n response = self.update_bill()\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN)\n\n def test_update_non_existing_bill___error_returned(self):\n \"\"\"\n We return 404 not found if user tries to update bill\n with invalid id\n \"\"\"\n response = self.update_bill(bill_id=self.bill.id + 1)\n self.assertEqual(\n response.status_code,\n status.HTTP_404_NOT_FOUND)\n\n\nclass ListBillRestAPITest(BillTestCase):\n \"\"\"\n Test rest api endpoints for listing bills\n \"\"\"\n def setUp(self):\n self.user = self.get_or_create_user()\n self.bill = self.create_bill()\n\n def list_bills(\n self, auth_needed=True,\n only_uncategorised=False):\n \"\"\"\n Helper to upload bill via api\n \"\"\"\n if auth_needed:\n self.client.force_login(self.user)\n client_args = []\n if only_uncategorised:\n client_args = [\n {\n 'uncategorised': True\n }\n ]\n return self.client.get(\n reverse('bill'),\n *client_args)\n\n def test_list_bill__successfull_response(self):\n \"\"\"\n We return 200 when bills are successfully listed\n \"\"\"\n response = self.list_bills()\n self.assertEqual(\n response.status_code, \n status.HTTP_200_OK)\n\n def test_list_bill__valid_information_returned(self):\n \"\"\"\n We return valid information about bill\n \"\"\"\n response = self.list_bills()\n self.assertListEqual(\n response.data,\n [\n {\n 'id': self.bill.id,\n 'image': self.bill.image.url,\n 'has_categories': False\n }\n ]\n )\n\n def test_list_bill_with_categories(self):\n \"\"\"\n We return flag that showes if bill has categories\n \"\"\"\n self.create_categories_for_bill(self.bill)\n response = self.list_bills()\n self.assertTrue(\n response.data[0]['has_categories'])\n\n\n def test_list_uncategorised_bill(self):\n \"\"\"\n We filter out bills with categories\n if flag passed\n \"\"\"\n self.create_categories_for_bill(self.bill)\n response = self.list_bills(\n only_uncategorised=True)\n self.assertListEqual(\n response.data, [])\n\n def test_list_bill__filter_out_bills_for_different_user(self):\n \"\"\"\n We show only bills for current user\n \"\"\"\n new_user = self.get_or_create_user(\n email='new-test-1@test.com')\n self.bill.user = new_user\n self.bill.save(update_fields=['user', ])\n response = self.list_bills()\n self.assertListEqual(\n response.data, [])\n\n def test_not_authenticated_tries_to_list_bills__error_returned(self):\n \"\"\"\n We return 403 forbidden if user is not logged in\n \"\"\"\n response = self.list_bills(\n auth_needed=False)\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN)\n","sub_path":"monthly_expenses/apps/bills/tests/test_bill_rest_api.py","file_name":"test_bill_rest_api.py","file_ext":"py","file_size_in_byte":16246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"482864641","text":"import pytz\nimport logging\nfrom itertools import groupby\nfrom pytz import common_timezones\nfrom datetime import datetime, timedelta\nfrom taggit.models import Tag, TaggedItem\nfrom django.utils.timezone import localtime\nfrom memento.timegate import TimeGateView, MementoDetailView\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom archive.models import Update, Site, Screenshot, Champion\nfrom django.views.generic import (\n TemplateView,\n ListView,\n DetailView\n)\nlogger = logging.getLogger(__name__)\n\n\nclass ScreenshotTimeGate(TimeGateView):\n \"\"\"\n A Memento TimeGate that parses a request from the headers\n and redirects to the corresponding screenshot detail page.\n \"\"\"\n queryset = Screenshot.objects.all().only(\"id\")\n url_field = 'site__url'\n datetime_field = 'timestamp'\n timemap_pattern_name = \"timemap-url-link-feed\"\n\n\nclass Fail(TemplateView):\n template_name = 'fail.html'\n\n\nclass Status(TemplateView):\n \"\"\"\n A private page for reviewing the screenshotting success rate, etc.\n \"\"\"\n template_name = 'status.html'\n\n def get_context_data(self, **kwargs):\n site_list = Site.objects.stats()\n update_list = Update.objects.stats(limit=101)\n update_list.reverse()\n context = {\n 'site_list': site_list,\n 'min_date': min([d['first_screenshot'] for d in site_list]),\n 'screenshot_total': sum([d['total_images'] for d in site_list]),\n 'update_list': update_list,\n }\n return context\n\n\nclass AboutDetail(TemplateView):\n \"\"\"\n Some background on this site.\n \"\"\"\n template_name = 'about.html'\n\n\nclass CryForHelp(TemplateView):\n \"\"\"\n A cry for help.\n \"\"\"\n template_name = 'cry_for_help.html'\n\n def get_context_data(self, **kwargs):\n context = super(CryForHelp, self).get_context_data(**kwargs)\n context['champion_list'] = Champion.objects.all()\n return context\n\n\nclass ChampionsList(ListView):\n \"\"\"\n A list of the people who have given money to support the site.\n \"\"\"\n queryset = Champion.objects.all()\n template_name = 'champion_list.html'\n\n\nclass Index(TemplateView):\n \"\"\"\n The homepage.\n \"\"\"\n template_name = 'index.html'\n\n def get_context_data(self, **kwargs):\n update = Update.objects.live()\n if not update:\n raise Http404\n object_list = update.screenshot_set.exclude(\n internetarchive_id=''\n ).filter(\n site__on_the_homepage=True\n )\n object_list = object_list.select_related(\"site\")\n object_list = group_objects_by_number(object_list, 4)\n return {\n 'update': update,\n 'object_list': object_list,\n }\n\n\nclass ScreenshotDetail(MementoDetailView):\n \"\"\"\n All about a particular screenshot. See the whole thing full size.\n \"\"\"\n template_name = 'screenshot_detail.html'\n queryset = Screenshot.objects.filter(\n site__status='active'\n ).select_related(\"update\")\n datetime_field = 'timestamp'\n timemap_pattern_name = \"timemap-url-link-feed\"\n timegate_pattern_name = \"timegate-screenshot\"\n\n def get_original_url(self, obj):\n return obj.site.url\n\n def get_context_data(self, **kwargs):\n context = super(ScreenshotDetail, self).get_context_data(**kwargs)\n try:\n next = Screenshot.objects.filter(\n site=context['object'].site,\n has_image=True,\n id__gt=context['object'].id\n ).order_by(\"id\").only(\"id\")[0]\n except IndexError:\n next = None\n try:\n prev = Screenshot.objects.filter(\n site=context['object'].site,\n has_image=True,\n id__lt=context['object'].id\n ).order_by(\"-id\").only(\"id\")[0]\n except IndexError:\n prev = None\n context.update({\n 'next': next,\n 'prev': prev,\n })\n return context\n\n\nclass ScreenshotDetailHyperlinksCSV(DetailView):\n queryset = Screenshot.objects.filter(\n site__status='active'\n ).select_related(\"update\")\n\n def get_context_data(self, **kwargs):\n if not self.object.has_html:\n raise Http404\n return {\n 'object': self.object,\n 'archive_obj': self.object.html.archive_obj,\n }\n\n def render_to_response(self, context, **kwargs):\n r = HttpResponse(content_type='text/csv')\n r['Content-Disposition'] = 'attachment; filename=\"hyperlinks.csv'\n r = context['archive_obj'].write_hyperlinks_csv_to_file(r)\n return r\n\n\nclass SiteDetail(DetailView):\n \"\"\"\n All about a particular site.\n \"\"\"\n template_name = 'site_detail.html'\n queryset = Site.objects.active()\n\n def convert_timezone(self, dt, tz):\n if not tz:\n return localtime(dt)\n else:\n return tz.normalize(dt.astimezone(tz))\n\n def get_context_data(self, **kwargs):\n # Pull all the live screenshots for this site\n qs = Screenshot.objects.filter(\n site=self.object,\n ).defer(\n \"html\",\n \"has_html\",\n \"has_crop\",\n \"has_image\"\n ).order_by(\"-id\")\n # Slice off the latest hundred for display\n screenshot_list = list(qs[:50])\n try:\n # Get the latest screeenshot\n latest_screenshot = screenshot_list[0]\n screenshot_groups = []\n # Check if this site has a timezone we need to adjust for\n if self.object.timezone:\n tz = pytz.timezone(self.object.timezone)\n else:\n tz = None\n # Group screenshots from recent days, adjusting for the timezone\n # if necessary\n for key, group in groupby(\n screenshot_list[1:],\n lambda x: self.convert_timezone(x.timestamp, tz).date()\n ):\n screenshot_groups.append(\n (key, group_objects_by_number(list(group), 5))\n )\n # Find the min and max dates where this site appears\n #min_timestamp = qs.aggregate(Min(\"timestamp\"))['timestamp__min']\n min_timestamp = min([\n o.timestamp for o in screenshot_list if o.timestamp\n ])\n max_timestamp = max([\n o.timestamp for o in screenshot_list if o.timestamp\n ])\n # ... and convert them to their timezone\n min_date = self.convert_timezone(min_timestamp, tz).date()\n max_date = self.convert_timezone(max_timestamp, tz).date()\n except IndexError:\n latest_screenshot = None\n screenshot_groups = []\n min_date, max_date = None, None\n return {\n 'object': self.object,\n 'latest_screenshot': latest_screenshot,\n 'screenshot_list': screenshot_groups,\n 'min_date': min_date,\n 'max_date': max_date,\n }\n\n\nclass UpdateDetail(DetailView):\n \"\"\"\n All about a particular update.\n \"\"\"\n template_name = \"update_detail.html\"\n queryset = Update.objects.all()\n\n def get_context_data(self, **kwargs):\n screenshot_list = Screenshot.objects.filter(update=self.object)\n screenshot_groups = group_objects_by_number(\n screenshot_list,\n number_in_each_group=4\n )\n return {\n 'object': self.object,\n 'screenshot_groups': screenshot_groups,\n }\n\n\nclass TagDetail(DetailView):\n \"\"\"\n All about a particular update.\n \"\"\"\n template_name = \"tag_detail.html\"\n queryset = Tag.objects.all()\n\n def get_context_data(self, **kwargs):\n object_list = [\n i.content_object for i in\n TaggedItem.objects.filter(tag=self.object)\n ]\n update = Update.objects.live()\n screenshot_list = Screenshot.objects.filter(\n update=update,\n site__in=object_list\n )\n screenshot_groups = group_objects_by_number(\n screenshot_list,\n number_in_each_group=4\n )\n return {\n 'object': self.object,\n 'update': update,\n 'screenshot_groups': screenshot_groups,\n }\n\n\nclass FeedList(TemplateView):\n \"\"\"\n A list of all our RSS feeds in one easy place.\n \"\"\"\n template_name = 'feed_list.html'\n\n def get_context_data(self, **kwargs):\n return {\n 'site_list': Site.objects.active(),\n 'tag_list': Tag.objects.all().order_by(\"name\")\n }\n\n\nclass AdvancedSearch(TemplateView):\n \"\"\"\n An opportunity for users to craft more complex searches of the database.\n \"\"\"\n template_name = 'advanced_search.html'\n\n def convert_timezone(self, dt, tz):\n return tz.normalize(dt.astimezone(tz))\n\n def get_context_data(self, **kwargs):\n context = super(AdvancedSearch, self).get_context_data(**kwargs)\n\n # Pull the data for the form fields\n site_list = Site.objects.active()\n context['site_list'] = site_list\n tag_list = Tag.objects.all().order_by(\"name\")\n context['tag_list'] = tag_list\n context['timezone_list'] = common_timezones\n context['timezone'] = 'UTC'\n\n # Check if any qs variables have been provided\n is_search = len(self.request.GET.keys()) > 0\n context['is_search'] = is_search\n\n # If not just drop out now\n if not is_search:\n return context\n\n# # Check if this page has already been cached\n# ckey = 'advsearch:%s' % (\n# urllib.urlencode(dict(self.request.GET))\n# )\n# ckey = sha1(ckey).hexdigest()\n# cdata = cache.get(ckey)\n# if cdata:\n# return cdata\n\n # Examine the valid keys and see what's been submitted\n site = self.request.GET.get('site', None)\n tag = self.request.GET.get('tag', None)\n user_timezone = self.request.GET.get('timezone', None)\n start_date = self.request.GET.get('start_date', None)\n end_date = self.request.GET.get('end_date', None)\n if start_date == 'YYYY/MM/DD':\n start_date = None\n if end_date == 'YYYY/MM/DD':\n end_date = None\n\n # Since you can't search both site and tag, if we have both\n # we should throw an error\n if site and tag:\n context['has_error'] = True\n context['error_message'] = 'Sorry. You cannot filter by both \\\nsite and tag at the same time.'\n return context\n\n # Validate the timezone\n if not user_timezone:\n user_timezone = 'UTC'\n if user_timezone not in common_timezones:\n context['has_error'] = True\n context['error_message'] = 'Sorry. The timezone you submitted \\\nis not supported.'\n return context\n context['timezone'] = user_timezone\n user_timezone = pytz.timezone(user_timezone)\n\n # A dict to store filters depending on what has been submitted\n filters = {}\n\n # First the site or tag\n if site:\n try:\n site = Site.objects.get(slug=site)\n except Site.DoesNotExist:\n context['has_error'] = True\n context['error_message'] = 'Sorry. The site you submitted \\\ndoes not exist.'\n return context\n filters['site'] = site\n # Gotta give it a longer name so it isn't overridden by site\n # context processor\n context['searched_site'] = site\n elif tag:\n try:\n tag = Tag.objects.get(slug=tag)\n except Tag.DoesNotExist:\n context['has_error'] = True\n context['error_message'] = 'Sorry. The tag you submitted \\\ndoes not exist.'\n return context\n tagged_list = [i.content_object for i in\n TaggedItem.objects.filter(tag=tag)\n ]\n filters['site__in'] = tagged_list\n context['tag'] = tag\n\n # Then the date range\n if not start_date and not end_date:\n context['has_error'] = True\n context['error_message'] = 'Sorry. You must submit both a \\\nstart and end date.'\n #cache.set(ckey, context)\n return context\n elif start_date and not end_date:\n context['has_error'] = True\n context['error_message'] = 'Sorry. You must submit both a \\\nstart and end date.'\n #cache.set(ckey, context)\n return context\n elif end_date and not start_date:\n context['has_error'] = True\n context['error_message'] = 'Sorry. You must submit both a \\\nstart and end date.'\n #cache.set(ckey, context)\n return context\n elif start_date and end_date:\n # Validate the start date\n try:\n start_date = datetime.strptime(start_date, \"%Y/%m/%d\")\n start_date = start_date.replace(tzinfo=user_timezone)\n except ValueError:\n context['has_error'] = True\n context['error_message'] = 'Sorry. Your start date was \\\nnot properly formatted.'\n #cache.set(ckey, context)\n return context\n # Validate the end date\n try:\n end_date = datetime.strptime(end_date, \"%Y/%m/%d\")\n end_date = end_date.replace(tzinfo=user_timezone)\n except ValueError:\n context['has_error'] = True\n context['error_message'] = 'Sorry. Your end date was \\\nnot properly formatted.'\n #cache.set(ckey, context)\n return context\n # Add dates to the context\n context.update({\n 'start_date': start_date.strftime(\"%Y/%m/%d\"),\n 'end_date': end_date.strftime(\"%Y/%m/%d\"),\n })\n # Make sure dates are in the right order\n if end_date < start_date:\n context['has_error'] = True\n context['error_message'] = 'Sorry. Your end date comes \\\nbefore you start date.'\n #cache.set(ckey, context)\n return context\n # Limit date range to seven days\n if (end_date-start_date).days > 7:\n context['has_error'] = True\n context['error_message'] = 'Sorry. The maximum date range \\\nallowed is seven days. You requested %s.' % ((end_date-start_date).days)\n #cache.set(ckey, context)\n return context\n # Add a day so the search is \"greedy\" and includes screenshots\n # that happened on the end_date\n filters.update({\n 'timestamp__gte': start_date,\n 'timestamp__lt': end_date + timedelta(days=1),\n })\n\n # Execute the filters and pass out the result\n context['object_list'] = Screenshot.objects.filter(\n **filters\n ).order_by(\"timestamp\")[:500]\n context['object_count'] = context['object_list'].count()\n screenshot_groups = []\n for key, group in groupby(\n context['object_list'],\n lambda x: self.convert_timezone(\n x.update.start,\n user_timezone\n ).date()\n ):\n screenshot_groups.append(\n (key, group_objects_by_number(list(group), 6))\n )\n context['object_groups'] = screenshot_groups\n #cache.set(ckey, context)\n return context\n\n\ndef group_objects_by_number(object_list, number_in_each_group=3):\n \"\"\"\n Accepts an object list and groups it into sets.\n\n Intended for displaying the data in a three-column grid.\n \"\"\"\n new_list = []\n i = 0\n while i < len(object_list):\n new_list.append([x for x in object_list[i:i+number_in_each_group]])\n i += number_in_each_group\n return new_list\n","sub_path":"archive/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"278647429","text":"from wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\n#https://github.com/amueller/word_cloud\n\npositive = open('neutral_bridge.txt', \"r\").read()\n\n\n# Generating word cloud\nwc = WordCloud(background_color=\"black\", max_font_size=50, min_font_size=10, max_words=10000).generate(positive)\n\n# Display the generated image\nplt.figure(figsize=[10,10])\nplt.imshow(wc, interpolation=\"bilinear\")\nplt.axis(\"off\")\nplt.show()\n\n# Save the image in the img folder\nwc.to_file(\"/Users/ickyv/PycharmProjects/QM/twitter/wordcloud/London bridge attack/neutral_wordcloud.png\")\n\n\n","sub_path":"wordcloud1.py","file_name":"wordcloud1.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"426481168","text":"# -*- coding: utf-8 -*-\n#**第 0007 题:**有个目录,里面是你自己写过的程序,统计一下你写过多少行代码。包括空行和注释,但是要分别列出来。\n\nimport os, re\n\ndef findCodeLine(url):\n reg = re.compile(r\"^#\")\n code_line = 0\n exp_line = 0\n space_line = 0\n with open(url, 'r', encoding='utf-8') as f:\n for line in f:\n if line.strip() == '':\n space_line += 1\n elif reg.findall(line.strip()):\n exp_line += 1\n else:\n code_line +=1\n fileInfo = {'code_line': code_line, 'exp_line': exp_line, 'space_line': space_line}\n return fileInfo\n\ndef findPY(url):\n for py in os.listdir(url):\n #如果是以py为后缀的文件\n if len(py.split('.')) == 2 and py.split('.')[1] == 'py':\n fileInfo = findCodeLine(url + py)\n print('file %s infomation: %s' %(url + py, fileInfo))\n\nfindPY('0007/')","sub_path":"0007.py","file_name":"0007.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"445304096","text":"vi=int(input())\nv1=2**vi\nlist1=[]\nfor i in range(0,v1):\n l=bin(i)[2:].zfill(vi)\n if(len(l) 0:\n taxes = re.split(\"#\", listTaxName)\n for i in range(0, len(taxes)):\n if taxes[i] != \"\":\n self.showtax.select_tax_multiple_by_index(i, taxes[i])\n else:\n self.showtax.unselect_checkbox(\"detail_show_taxes_multiple_checkbox\")\n else:\n self.logger.info(\"Show taxes checkbox is not visible\")\n return self\n \n def input_expense_multiple_value(self, catparent=None, childcat=None, report=None, client=None, attendee=None, note =None, department=None, clazz=None, location=None,\n project=None, listTag=None, reimbursable=None, billable=None, showTaxesFlag=None, listTaxName=None):\n if catparent is not None:\n self.logger.info(\">>> Select category expense <<<\")\n self.select_category_dropdown(catparent, childcat)\n if report is not None:\n self.logger.info(\">>> Select report expense <<<\")\n self.select_report_dropdown(report)\n if project is not None:\n self.logger.info(\">>> Input Project <<<\")\n self.input_project(project)\n if client is not None:\n self.logger.info(\">>> Input Client <<<\")\n self.input_client(client)\n if attendee is not None:\n self.logger.info(\">>> Input Attendees <<<\")\n self.input_attendees(attendee)\n if note is not None:\n self.logger.info(\">>> Input Attendee <<<\")\n self.input_note(note)\n if department is not None:\n self.logger.info(\">>> Input Department <<<\")\n self.input_deparment(department)\n if clazz is not None:\n self.logger.info(\">>> Input Class <<<\")\n self.input_class(clazz)\n if location is not None:\n self.logger.info(\">>> Input Location <<<\")\n self.input_location(location)\n if listTag is not None:\n self.logger.info(\">>> Input tax <<<\")\n self.add_new_tag(listTag)\n if reimbursable is not None:\n self.logger.info(\">>> Select Reimbursable <<<\")\n self.select_reimbursable(reimbursable)\n if billable is not None:\n self.logger.info(\">>> Select Billable <<<\")\n self.select_billable(billable)\n if showTaxesFlag is not None:\n self.show_taxes(showTaxesFlag, listTaxName)\n self._wait_to_load()\n return self","sub_path":"expense-ui-robot-tests/PythonExpenseAutomationTest/python/component/dialog/EditMultipleExpenseDialogComponent.py","file_name":"EditMultipleExpenseDialogComponent.py","file_ext":"py","file_size_in_byte":8438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"284707578","text":"from flask import (\n Blueprint,\n request,\n g,\n)\n\nfrom models.comment import Comment\n\nfrom api.auth import (\n login_required,\n same_user_required,\n)\n\nfrom api.helper import (\n json_response,\n comments_dict,\n comment_dict,\n)\n\n\ndef all(weibo_id):\n comments = Comment.all(weibo_id=weibo_id)\n comments = comments_dict(comments)\n return json_response(comments)\n\n\ndef add():\n form = request.get_json()\n options = {\n 'content': form['content'],\n 'weibo_id': form['weibo_id'],\n 'user_id': g.user.id,\n }\n comment = Comment.new(**options)\n comment = comment_dict(comment)\n return json_response(comment)\n\n\ndef delete(id):\n comment = Comment.delete(id)\n comment = comment_dict(comment)\n return json_response(comment)\n\n\ndef update(id):\n form = request.get_json()\n options = {\n 'content': form['content'],\n }\n comment = Comment.update(id, **options)\n comment = comment_dict(comment)\n return json_response(comment)\n\n\ndef init_routes():\n main = Blueprint('api_comment', __name__)\n main.route('//all')(login_required(all))\n main.route('/add', methods=['POST'])(login_required(add))\n main.route('/', methods=['DELETE'])(login_required(same_user_required(delete, Comment)))\n main.route('/', methods=['POST'])(login_required(same_user_required(update, Comment)))\n return main\n","sub_path":"api/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"528022035","text":"import cv2 as cv\r\nfrom skimage.color import rgb2gray\r\nimport numpy as np\r\nfrom skimage import filters\r\nfrom scipy import signal\r\nfrom skimage.segmentation import watershed\r\nfrom matplotlib import pyplot\r\n\r\n\r\ndata = cv.VideoCapture('ball2.mp4')\r\n\r\n\r\nkernel = np.ones((5,5), np.uint8)\r\nret, frame1 = data.read()\r\nwhile data.isOpened():\r\n try:\r\n ret, frame2 = data.read()\r\n gray1 = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY)\r\n gray2 = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)\r\n except cv.error as e:\r\n break\r\n gray1 = cv.blur(gray1, (7, 7))\r\n gray2 = cv.blur(gray2, (7, 7))\r\n\r\n edge = cv.Laplacian(gray2, -1)\r\n # gray = np.uint8(gray)\r\n diff1 = cv.absdiff(gray2, gray1)\r\n ret, bin1 = cv.threshold(diff1, 0, 255, cv.THRESH_OTSU)\r\n ret, edge_bin = cv.threshold(edge, 0, 1, cv.THRESH_BINARY_INV+cv.THRESH_OTSU)\r\n erosion1 = cv.dilate(bin1, kernel, iterations=2)\r\n erosion1 = cv.erode(erosion1, kernel, iterations=7)\r\n morpho = cv.erode(edge_bin, kernel, iterations=25)\r\n dummy = morpho.copy()\r\n # erosion1 = cv.dilate(erosion1, kernel, iterations=3)\r\n ret, markers = cv.connectedComponents(erosion1)\r\n markers1 = morpho + markers\r\n markers1 = np.int32(markers1)\r\n dummy1 = markers1.copy()\r\n ganesh = cv.watershed(frame2, markers1)\r\n ganesh = np.uint8(ganesh)\r\n ganesh1 = frame2.copy()\r\n ganesh1[:,:,1] = cv.add(ganesh1[:,:,1] ,ganesh)\r\n # pyplot.imshow(ganesh)\r\n # pyplot.show()\r\n # cv.imshow('ganesh',frame2)\r\n cv.imshow('ganesh2',ganesh1)\r\n\r\n cv.waitKey(40)\r\n frame1 = frame2.copy()\r\n\r\n\r\n\r\n # for cnt in contours:\r\n # rect = cv.minAreaRect(cnt)\r\n # box = cv.boxPoints(rect)\r\n # box = np.int0(box)\r\n # ganesh1 = cv.drawContours(ganesh, [box], 0, (0, 255, 0), 20)\r\n # print(cnt)\r\n # pyplot.subplot(221)\r\n # pyplot.imshow(markers)\r\n # pyplot.subplot(222)\r\n # pyplot.imshow(morpho)\r\n # pyplot.subplot(223)\r\n # pyplot.imshow(dummy1)\r\n # pyplot.subplot(224)\r\n # pyplot.imshow(ganesh)\r\n # pyplot.show()\r\ncv.destroyAllWindows()\r\ndata.release()","sub_path":"Ball2/trial1.0.4.5(final).py","file_name":"trial1.0.4.5(final).py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"359517026","text":"from sys import argv\nimport random\nscript = argv\nmax_q = int(input(\"max_q available-->\"))\nfile=open(\"quess.txt\", 'a')\n\ndef entry():\n global ques_no\n print(\"\"\"Enter the option of operation which you want to do\n 1.face questions 2.assign answers\"\"\")\n a = int(input(\"...\"))\n if a == 1:\n ques_no = int(input(\"Enter the no.of questions you want to face-->\"))\n return file_write()\n elif a==2:\n assign_ans()\n\n else:\n exit(0)\n\ndef file_write():\n global numbers\n global ques_no\n i = 1\n numbers = []\n file.truncate(0)\n\n while i < max_q+1:\n numbers.append(i)\n i = i + 1\n \n print(\"DONE!!!\")\n gettin_q()\n\ndef gettin_q():\n global numbers\n global used\n\n used = []\n while len(used) <= ques_no-1:\n b = random.randint(0,5)\n a = numbers[b]\n used.append(a)\n used = [x for n, x in enumerate(used) if x not in used[:n]]\n \n used.sort()\n print(used)\n gettin_a()\n\ndef gettin_a():\n global used\n ans = open('ans.txt').read().splitlines()\n ans_dis = []\n n = 0\n\n while len(ans_dis) != len(used):\n input(\"To get the ans press enter:--->\")\n m = (used[n])-1\n print(ans[m])\n ans_dis.append(ans[m])\n n+=1\n\ndef assign_ans():\n ans = 1\n file = open('ans.txt','w')\n file.truncate(0)\n while ans != max_q+1:\n print(f\"Enter the ans for {ans} ans\")\n answ = input(\">\")\n file.write(answ)\n file.write('\\n')\n ans+=1\n\nentry()\n","sub_path":"result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"341088217","text":"#!/usr/bin/python3\n#\n# Grabs local versions of the known dotfiles.\n# \nimport os\nimport shutil\nimport sys\n\nprint(\"Saving settings into repo.\")\n\nhome_dir = os.path.expanduser('~')\nsave_dir = 'profile_customization'\nsaved_dotfiles = os.listdir(save_dir)\n\nfor filename in saved_dotfiles:\n src = os.path.join(home_dir, filename)\n dst = os.path.join(save_dir, filename)\n\n if not os.path.exists(src):\n print(f'No local version of {filename} exists')\n continue\n\n if not os.path.isfile(src):\n print('Unable to copy {src}')\n continue\n\n sys.stdout.write(f'Copying {filename} ... ')\n written_file = shutil.copy(src, dst)\n\n if not os.path.exists(written_file):\n print('failed')\n else:\n print('done')\n\n","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"189297927","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models\n\n\nclass Folios(models.Model):\n _name = 'folio.datos'\n\n prefijo = fields.Char(\n string=\"Prefijo\",\n size=20,\n required=True,\n )\n\n secuencial = fields.Integer(\n string=\"Número actual\",\n default=0,\n )\n\n _sql_constraints = [\n ('prefijo_unique',\n 'UNIQUE(prefijo)',\n (\"El prefijo ya existe, asegurese de que este sea único \\\n o eliminar el anterior para reiniciar la cuenta\")),\n ]\n\n @api.model\n def get_folio(self, usoprefijo):\n # Buscar si existe el folio en la tabla de folios\n datos_folio = self.search([('prefijo', '=', usoprefijo)])\n\n # Verificar ya esta registrado el folio\n if datos_folio:\n numeralfinal = datos_folio.secuencial\n numeral_consecutivo = numeralfinal + 1\n datos_folio.write({\n 'secuencial': numeral_consecutivo,\n })\n longfolio = 1000 + numeral_consecutivo\n strfolio = str(longfolio)\n folio_final = strfolio[1:4]\n return folio_final\n else:\n numerostr = \"001\"\n numero = 1\n self.create({\n 'prefijo': usoprefijo,\n 'secuencial': numero,\n })\n return numerostr\n","sub_path":"folios/models/folios.py","file_name":"folios.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"433414142","text":"''' In determinate occasioni ci capita di dover scrivere i numeri in lettere, \nad esempio quando dobbiamo compilare un assegno. \nPuo' capitare che alcuni numeri facciano sorgere in noi qualche dubbio.\n\nLe perplessita' nascono soprattutto nella scrittura dei numeri composti con 1 e 8. \nTutti i numeri come venti, trenta, quaranta, cinquanta, ecc... elidono la vocale \nfinale (la \"i\" per 20, la \"a\" per tutti gli altri) fondendola con la vocale iniziale \ndel numero successivo; scriveremo quindi ventuno, ventotto, trentotto, \ncinquantuno ecc...\n\nIl numero cento, nella formazione dei numeri composti con uno e otto, non si comporta \ncosi'; il numero \"cento\" e tutte le centinaia (duecento, trecento, ecc...), \ninfatti, non elidono la vocale finale. Dunque non scriveremo centuno, trecentotto ma centouno, \ntrecentootto, ecc...\n\nI numeri composti dalle centinaia e dalla decina \"ottanta\" invece tornano ad elidere \nla vocale finale; scriveremo quindi centottanta, duecentottanta, ecc..., \nnon centoottanta, duecentoottanta, ...\n\nIl numero \"mille\" non elide in nessun numero composto la vocale finale; scriveremo \nquindi milleuno, milleotto, milleottanta, ecc...\n\nAltri esempi sono elencati nel file grade02.txt\n\n\nScrivere una funzione conv(n) che prende in input un intero n, con 020 and n<=100:\n return venti_cento(n,stringa,dict1)\n if n>100 and n<=999:\n return cento_novecentonovantanove(n,stringa,dict1) \n if n>=1000 and n<=9999:\n return mille_novemila(n,stringa,dict1)\n if n>=10000 and n<=99999:\n return diecimila_novantanovemila(n,stringa,dict1)\n if n>=100000 and n<999999999:\n return centomila_inp(n,stringa,dict1) \n \n'''questa funzione scrive i numeri da 0 a 20''' \ndef zero_venti(n,stringa,dict1):\n stringa+=dict1[n]\n return stringa \n\n'''questa funzione scrive i numeri da venti a cento'''\ndef venti_cento(n,stringa,dict1):\n num=str(n) #numero trasformato in stringa\n n1= n-int(num[-1]) \n n2= int(num[-1])\n primoElemento= dict1[n1]\n if n2==0:\n stringa+=primoElemento\n return stringa\n elif dict1[n2]=='uno' or dict1[n2]=='otto':\n primoElemento=primoElemento[:-1]\n stringa+= primoElemento+dict1[n2]\n return stringa\n else:\n stringa+= dict1[n1]+dict1[n2]\n return stringa \n \ndef cento_novecentonovantanove(n,stringa,dict1): \n dict2={1:'cento',2:'duecento',3:'trecento',4:'quattrocento',5:'cinquecento',6:'seicento',7:'settecento',8:'ottocento',9:'novecento'}\n dict3={1:'dieci',2:'venti',3:'trenta',4:'quaranta',5:'cinquanta',6:'sessanta', 7:'settanta',8:'ottanta', 9:'novanta'}\n num=str(n)\n primaCifra=int(num[0])\n secondaCifra=int(num[1])\n terzaCifra=int(num[2])\n stringa+=dict2[primaCifra]\n \n if n==118:\n stringa='centodiciotto'\n return stringa\n if secondaCifra==0:\n if terzaCifra==8:\n stringa+='otto'\n return stringa\n else:\n stringa+=dict1[terzaCifra]\n return stringa\n elif secondaCifra==1 and terzaCifra!=0:\n num2=str(n)\n secondaCifra2=int(num2[1:])\n stringa+=dict1[secondaCifra2]\n return stringa\n elif secondaCifra==8:\n if terzaCifra==0:\n stringa+='ttanta' #cambiata per il test\n return stringa\n else:\n #caso 981--ecc\n cifra3=dict1[terzaCifra]\n if cifra3[0]=='u' or cifra3[0]=='o':\n stringa+='ttant'+dict1[terzaCifra] #cambiata per il test\n return stringa\n else:\n stringa+='ttanta'+dict1[terzaCifra] #cambiata per il test\n return stringa\n \n else:\n stringa+=dict3[secondaCifra]\n if terzaCifra!=0:\n if terzaCifra==8:\n stringa=stringa[:-1]\n stringa+='otto'\n return stringa\n else:\n stringa+=dict1[terzaCifra]\n return stringa\n else:\n return stringa\ndef mille_novemila(n,stringa,dict1):\n dict4={1:'mille',2:'duemila',3:'tremila',4:'quattromila',5:'cinquemila',6:'seimila',7:'settemila',8:'ottomila',9:'novemila'}\n num2=str(n)\n for i in range(1000,9999,1000):\n numeroStr=str(i)\n if numeroStr[0]==num2[0]:\n stringa=(dict4[int(numeroStr[0])])\n break\n if int(num2[1:])==000:\n return stringa\n if int(num2[1])!=0:\n stringaSum=''\n numSecondaCifra=int(num2[1:])\n stringaSum=cento_novecentonovantanove(numSecondaCifra,stringaSum,dict1)\n stringa+=stringaSum\n return stringa\n elif int(num2[1])==0:\n stringaSum=''\n numSecondaCifra=int(num2[2:])\n terzaCifra=int(num2[2])\n print(terzaCifra)\n if terzaCifra==0:\n quartaCifra=int(num2[3])\n stringaSum=zero_venti(quartaCifra,stringaSum,dict1)\n stringa+=stringaSum\n return stringa\n else: \n stringaSum=venti_cento(numSecondaCifra,stringaSum,dict1)\n stringa+=stringaSum\n return stringa\ndef diecimila_novantanovemila(n,stringa,dict1):\n num3=str(n)\n dict5={1:'cento',2:'duecento',3:'trecento',4:'quattrocento',5:'cinquecento',6:'seicento',7:'settecento',8:'ottocento',9:'novecento'}\n dict6={2:'dodici',3:'tredici',4:'quattordici',5:'quindici',6:'sedici',7:'diciassette',8:'diciotto',9:'diciannove'}\n for i in range(1000,100000,1000):\n numeroStr=str(i)\n if numeroStr[0]==num3[0] and num3[1:]=='0000':\n #valido con un numero del tipo 10000\n numeroStr=int(num3[0])\n stringa+=dict5[numeroStr]+'mila'\n return stringa\n if numeroStr[0]==num3[0] and num3[1]!='0' and num3[2:]=='000':\n #valido con un numero del tipo 17000\n if num3[1]=='1':\n stringa+='undicimila'\n if num3[2]!='0':\n #correggi lerrore 11100\n stringa+=dict5[int(num3[2])]\n if num3[3]!='0' and num3[3:]<20:\n stringa+=zero_venti(int(num3[3:]),stringa,dict1)\n return stringa\n elif num3[3]!='0' and num3[3:]>=20:\n stringa+=venti_cento(int(num3[3:]),stringa,dict1)\n return stringa\n else:\n return stringa\n \n else:\n stringa+=dict6[int(num3[1])]+'mila'\n return stringa\n elif numeroStr[0]==num3[0] and num3[1]!='0' and num3[2]=='0':\n #valido con un numero del tipo 12040\n if int(num3[3:])<20:\n stringa+=dict6[int(num3[1])]+'mila'+zero_venti(int(num3[3:]),stringa,dict1)\n return stringa\n elif int(num3[3:])>20:\n stringa+=dict6[int(num3[1])]+'mila'+venti_cento(int(num3[3:]),stringa,dict1)\n return stringa\n if numeroStr[0]==num3[0] and num3[1]!='0' and num3[2]!='0' and num3[3:]=='00':\n #valido con un numero del tipo 17200\n stringa+=dict6[int(num3[1])]+'mila'+dict5[int(num3[2])]\n return stringa\n elif numeroStr[0]==num3[0] and num3[1]=='0' and num3[2]!='0':\n #valido con un numero del tipo 10200\n stringa+=dict5[int(num3[0])]+'mila'+dict5[int(num3[2])]\n return stringa\n if numeroStr[0]==num3[0] and num3[1]!=0 and num3[2]!='0' and num3[3]!='0' and num3[4]=='0':\n #valido con un numero del tipo 17210\n if int(num3[3:])<20:\n stringa+=dict6[int(num3[1])]+'mila'+dict5[2]+zero_venti(int(num3[3:]),stringa,dict1)\n return stringa\n else:\n stringa+=dict6[int(num3[1])]+'mila'+dict5[2]+venti_cento(int(num3[3:]),stringa,dict1)\n return stringa\n if numeroStr[0]==num3[0] and num3[1]!=0 and num3[2]!='0' and num3[3]!='0' and num3[4]!='0':\n #valido con un numero del tipo 14597\n if int(num3[3:])<20:\n stringa+=dict6[int(num3[1])]+'mila'+dict5[2]+zero_venti(int(num3[3:]),stringa,dict1)\n return stringa\n elif int(num3[3:])>20:\n stringa+=dict6[int(num3[1])]+'mila'+dict5[2]+venti_cento(int(num3[3:]),stringa,dict1)\n return stringa\n if numeroStr[0]==num3[0] and num3[3:]!='00':\n #valido per numeri del tipo 10020\n if int(num3[3:])<20:\n stringa+=dict5[int(num3[0])]+'mila'+zero_venti(int(num3[3:]),stringa,dict1)\n return stringa\n elif int(num3[0])>=20:\n stringa+=dict5[int(num3[0])]+'mila'+venti_cento(int(num3[3:]),stringa,dict1)\n return stringa\n \ndef centomila_inp(n,stringa,dict1):\n num4=str(n)\n numparte1=int(num4[:3])\n stringapp1=cento_novecentonovantanove(numparte1,stringa,dict1)+'milioni'\n numparte2=int(num4[3:])\n if numparte2<=9999:\n stringapp2=mille_novemila(numparte2,stringa,dict1)\n stringa+=stringapp1+stringapp2\n return stringa\n elif numparte2>=10000:\n stringapp2=diecimila_novantanovemila(numparte2,stringa,dict1)\n stringa+=stringapp1+stringapp2\n return stringa\n \n \n \n \n \n ","sub_path":"students/1815606/homework01/program02.py","file_name":"program02.py","file_ext":"py","file_size_in_byte":11531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"390159602","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/python\n\nimport json, urllib,urllib2, os\n\ndef getURL(parsed):\n q = parsed['~query'] if '~query' in parsed else \"\";\n q = q.lower().strip();\n url = \"http://cocoapods.wantedly.com/?o=popularity&q=\"+format(urllib.quote(q))\n # print url;\n return url;\n\ndef getContent(url):\n response = urllib2.urlopen(url).read()\n response=response.replace('href=\"/','href=\"http://cocoapods.wantedly.com/')\n return response\n\ndef results(parsed, original_query):\n html = \"\"\n try:\n url = getURL(parsed)\n html = getContent(url)\n except:\n error_file = open('error.html')\n html = error_file.read().decode('utf-8')\n error_file.close()\n # print html\n\n return {\n \"title\": \"CocoaPods\",\n \"html\": html,\n }\n\n# results({\"~query\":\"20140822\"},{});","sub_path":"CocoaPods.bundle/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"641324365","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ns = list(input())\n\nfor i in range(len(s)):\n if not (s[i] == s[-(i+1)] or s[i] == '*' or s[-(i+1)] == '*'):\n print('NO')\n break\nelse:\n print('YES')\n\n \n","sub_path":"Atcoder/ARC035/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"594853678","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom matplotlib import rc\nrc(\"font\", family=\"serif\", size=12)\nrc(\"text\", usetex=True)\nrc(\"text.latex\",\n preamble=r\"\\input{{{0}}}\"\n .format(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"vars\")))\n\nimport daft\n\npgm = daft.PGM([3, 2.5], origin=[-1, 1])\n\npgm.add_plate(daft.Plate([-0.5, 1.4, 2.0, 1.1], label=r\"$n=1,\\cdots,N$\",\n position=\"bottom right\"))\n\npgm.add_node(daft.Node(\"hyperhyper\", r\"\\hyperhyper\", -0.5, 3.0, fixed=True))\npgm.add_node(daft.Node(\"hyper\", r\"$\\hyper$\", 0.5, 3.0))\npgm.add_node(daft.Node(\"local\", r\"$\\local_n$\", 0, 2))\npgm.add_node(daft.Node(\"data\", r\"$\\data_n$\", 1, 2, observed=True))\n\npgm.add_edge(\"hyperhyper\", \"hyper\")\npgm.add_edge(\"hyper\", \"local\")\npgm.add_edge(\"hyper\", \"data\")\npgm.add_edge(\"local\", \"data\")\n\npgm.render()\npgm.figure.savefig(\"abstract.png\", dpi=150)\npgm.figure.savefig(\"abstract.pdf\")\n","sub_path":"document/abstract.py","file_name":"abstract.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"377352114","text":"from app import db\n\nproject_tree = db.Table(\n 'project_tree',\n db.Column('parent_id', db.Integer, db.ForeignKey('project.id')),\n db.Column('children_id', db.Integer, db.ForeignKey('project.id'))\n)\n\nclass Project(db.Model):\n __tablename__ = 'project'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), index=True, unique=True)\n description = db.Column(db.String)\n start_time = db.Column(db.DateTime)\n stop_time = db.Column(db.DateTime)\n parents = db.relationship(\n 'Project',\n secondary=project_tree,\n primaryjoin=(\"Project.id==project_tree.c.children_id\"),\n secondaryjoin=(\"Project.id==project_tree.c.parent_id\"),\n backref=db.backref('children', lazy='dynamic'),\n lazy='dynamic'\n )\n\n def __repr__(self):\n return ''.format(self.name)\n\n def __init__(self, name, description):\n self.name = name\n self.description = description\n\n def serialize(self):\n return {\n \"name\": self.name,\n \"description\": self.description,\n \"start_time\": self.start_time.timestamp() if self.start_time else None,\n \"stop_time\": self.stop_time.timestamp() if self.stop_time else None,\n \"id\": self.id\n }\n","sub_path":"backend/app/models/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"532492021","text":"from tkinter import *\nfrom tkinter import ttk\n\n\nclass bSetPlayers(ttk.Frame):\n \"\"\"Binokel Players Frame\"\"\"\n\n def __init__(self, master=None, **kw):\n self._playerlist = kw.pop('playerlist', None)\n if not self._playerlist:\n print('NO PLAYERS SET')\n\n ttk.Frame.__init__(self, master, **kw)\n\n self._master = master\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n self.rowconfigure(1, weight=0)\n\n self._text = self._addText()\n self._addOKCancel()\n for player in self._playerlist:\n self._text.insert(END, player + '\\n')\n\n def _addText(self):\n _text = Text(self, width=15, height=20, font=\"Helvetica 16\")\n _text.grid(row=0, column=0, sticky=(W, N, E, S))\n return _text\n\n def _addOKCancel(self):\n _s = ttk.Style()\n _s.configure('players.TButton', font=('Helvetica', 16))\n\n _okc_frame = ttk.Frame(self)\n _okc_frame.grid(row=1, column=0, sticky=(N, E, S))\n\n _c_btn = ttk.Button(\n _okc_frame, text='←',\n command=self._quit,\n style='players.TButton')\n _c_btn.grid(row=0, column=0, sticky=E, padx=5)\n _ok_btn = ttk.Button(\n _okc_frame, text='OK',\n command=self._save,\n style='players.TButton')\n _ok_btn.grid(row=0, column=1, sticky=E, padx=5)\n\n def _save(self):\n self._playerlist.clear()\n self._playerlist += ([s.strip()\n for s in self._text.get(\"1.0\", END).split('\\n')\n if s])\n self._quit()\n\n def _quit(self):\n self._master.grab_release()\n self._master.destroy()\n\n\ndef test():\n _root = Tk()\n _root.columnconfigure(0, weight=1)\n _root.rowconfigure(0, weight=1)\n _pl = bSetPlayers(_root, players=['a', 'b', 'c'])\n _pl.grid(row=0, column=0, sticky=(W, N, E, S))\n\n _root.mainloop()\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"archive/binokel/bSetPlayers.py","file_name":"bSetPlayers.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"509180150","text":"from collections import deque\n\nn = int(input())\nfirstQueue = deque(list(map(int, input().split())))\nsecondQueue = deque(list(map(int, input().split())))\n\n# print(first, second)\n\ndef firstBeatsSecond(first, second, n):\n if first == 0 and second == n-1:\n return True\n if second == 0 and first == n-1:\n return False\n if first > second:\n return True\n return False\n\ndef solve(firstQueue, secondQueue, n):\n for i in range(1, 2*(10**5)+1):\n first = firstQueue.popleft()\n second = secondQueue.popleft()\n if firstBeatsSecond(first, second, n):\n firstQueue.append(first)\n firstQueue.append(second)\n else:\n secondQueue.append(first)\n secondQueue.append(second)\n if len(firstQueue) == 0:\n print(\"second\", i)\n return\n if len(secondQueue) == 0:\n print(\"first\", i)\n return\n\n print(\"draw\")\n\n\n\n\nsolve(firstQueue, secondQueue, n)","sub_path":"codeforces/contests/timur/stack/E_drunk.py","file_name":"E_drunk.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"318449612","text":"#might contain imports which are not used\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom rest_framework import generics\nfrom serializers import MozioProviderSerializer, PolygonSerializer\nfrom models import MozioProvider, Polygon\nfrom django.http import JsonResponse\nimport json\nfrom django.core import serializers\nfrom django.forms.models import model_to_dict\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom django.contrib.gis.geos import Point\nfrom django.db.models import Count\n\n\nclass JSONResponse(HttpResponse):\n\t\"\"\"\n\tAn HttpResponse that renders its content into JSON.\n\t\"\"\"\n\tdef __init__(self, data, **kwargs):\n\t\tcontent = JSONRenderer().render(data)\n\t\tkwargs['content_type'] = 'application/json'\n\t\tsuper(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef index(request):\n return MozioProvider.objects.all()\n\n\n@csrf_exempt\ndef providers_list(request):\n\t\"\"\"\n\tList all providers, or create a new provider.\n\t\"\"\"\n\tif request.method == 'GET':\n\t snippets = MozioProvider.objects.all()\n\t serializer = MozioProviderSerializer(snippets, many=True)\n\t return JSONResponse(serializer.data)\n\n\telif request.method == 'POST':\n\t data = JSONParser().parse(request)\n\t serializer = MozioProviderSerializer(data=data)\n\t if serializer.is_valid():\n\t serializer.save()\n\t return JSONResponse(serializer.data, status=201)\n\t return JSONResponse(serializer.errors, status=400)\n\n@csrf_exempt\ndef provider_detail(request, pk):\n\t\"\"\"\n\tRetrieve, update or delete a provider.\n\t\"\"\"\n\ttry:\n\t snippet = MozioProvider.objects.get(pk=pk)\n\texcept MozioProvider.DoesNotExist:\n\t return HttpResponse(status=404)\n\n\tif request.method == 'GET':\n\t serializer = MozioProviderSerializer(snippet)\n\t return JSONResponse(serializer.data)\n\n\telif request.method == 'PUT':\n\t data = JSONParser().parse(request)\n\t serializer = MozioProviderSerializer(snippet, data=data)\n\t if serializer.is_valid():\n\t serializer.save()\n\t return JSONResponse(serializer.data)\n\t return JSONResponse(serializer.errors, status=400)\n\n\telif request.method == 'DELETE':\n\t snippet.delete()\n\t return HttpResponse(status=204)\n\n\n@csrf_exempt\ndef polygon_list(request):\n\t\"\"\"\n\tList all polygons, or create a polygon.\n\t\"\"\"\n\tif request.method == 'GET':\n\t snippets = Polygon.objects.all()\n\t serializer = PolygonSerializer(snippets, many=True)\n\t return JSONResponse(serializer.data)\n\n\telif request.method == 'POST':\n\t data = JSONParser().parse(request)\n\t serializer = PolygonSerializer(data=data)\n\t if serializer.is_valid():\n\t serializer.save()\n\t return JSONResponse(serializer.data, status=201)\n\t return JSONResponse(serializer.errors, status=400)\n\n@csrf_exempt\ndef polygon_detail(request, pk):\n\t\"\"\"\n\tRetrieve, update or delete a polygon.\n\t\"\"\"\n\ttry:\n\t snippet = Polygon.objects.get(pk=pk)\n\texcept Polygon.DoesNotExist:\n\t return HttpResponse(status=404)\n\n\tif request.method == 'GET':\n\t serializer = PolygonSerializer(snippet)\n\t return JSONResponse(serializer.data)\n\n\telif request.method == 'PUT':\n\t data = JSONParser().parse(request)\n\t serializer = PolygonSerializer(snippet, data=data)\n\t if serializer.is_valid():\n\t serializer.save()\n\t return JSONResponse(serializer.data)\n\t return JSONResponse(serializer.errors, status=400)\n\n\telif request.method == 'DELETE':\n\t snippet.delete()\n\t return HttpResponse(status=204)\n\n\n\ndef check_point_in_polygon(request):\n\t\"\"\"\n\t\tReads GET request params 'lat' and 'lng' from the URL and makes a point using the params\n\t\tReturns a querySet containing multiple Polygon objects, such that the point lies inside\n\t\tthe geopolygon\n\t\"\"\"\n\tif request.method == 'GET':\n\t latitude = float(request.GET.get('lat', ''))\n\t longitude = float(request.GET.get('lng', ''))\n\t point = Point(latitude,longitude)\n\t snippets = Polygon.objects.filter(geopolygon__bbcontains=point)\n\t serializer = PolygonSerializer(snippets, many=True)\n\t return JSONResponse(serializer.data)\n\n\n\"\"\"\nFollowing classes provide a nice UI to handle GET/POST/PUT/DELETE requests on the models from the browser\n\"\"\"\nclass MozioList(generics.ListCreateAPIView):\n\tqueryset = MozioProvider.objects.all()\n\tserializer_class = MozioProviderSerializer\n\nclass MozioDetail(generics.RetrieveUpdateDestroyAPIView):\n\tqueryset = MozioProvider.objects.all()\n\tserializer_class = MozioProviderSerializer\n\nclass MozioPolygonList(generics.ListCreateAPIView):\n\tqueryset = Polygon.objects.all()\n\tserializer_class = PolygonSerializer\n\nclass MozioPolygonDetail(generics.RetrieveUpdateDestroyAPIView):\n\tqueryset = Polygon.objects.all()\n\tserializer_class = PolygonSerializer\n","sub_path":"mozio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"520492576","text":"from subprocess import check_output\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n# Any results you write to the current directory are saved as output.\n\nimport sys, os, re, csv, codecs, numpy as np, pandas as pd\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation\nfrom keras.layers import Bidirectional, GlobalMaxPool1D\nfrom keras.models import Model\nfrom keras import initializers, regularizers, constraints, optimizers, layers\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, accuracy_score, auc, roc_curve, precision_recall_fscore_support\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras import backend as K\nimport tensorflow as tf\n\n\n#set parameters\nembed_size = 50 # how big is each word vector\nmax_features = 25000 # how many unique words to use (i.e num rows in embedding vector)\ntrain_review_path = \"/home/yankun/spamGAN/dividedData/labeled50/train_review.txt\"\ntrain_label_path = \"/home/yankun/spamGAN/dividedData/labeled50/train_label.txt\"\nval_review_path = \"/home/yankun/spamGAN/dividedData/labeled50/val_review.txt\"\nval_label_path = \"/home/yankun/spamGAN/dividedData/labeled50/val_label.txt\"\ntest_review_path = \"/home/yankun/spamGAN/dividedData/test_review.txt\"\ntest_label_path = \"/home/yankun/spamGAN/dividedData/test_label.txt\"\nembedding_file_path = \"/home/yankun/rnn/embedding/glove.6B.50d.txt\"\nresult_path = \"/home/yankun/rnn/result/result.csv\"\n\nfor i in range(8):\n def load_dataset():\n train_review, train_label, val_review, val_label, test_review, test_label= [],[],[],[],[],[]\n with open(train_review_path, 'r') as f1, open(train_label_path, 'r') as f2:\n txts = f1.readlines()\n labs = f2.readlines()\n train_review = [txt for txt in txts]\n train_label = [int(lab) for lab in labs]\n with open(val_review_path, 'r') as f1 ,open(val_label_path, 'r') as f2:\n txts = f1.readlines()\n labs = f2.readlines()\n val_review = [txt for txt in txts]\n val_label = [int(lab) for lab in labs]\n with open(test_review_path, 'r') as f1 ,open(test_label_path, 'r') as f2:\n txts = f1.readlines()\n labs = f2.readlines()\n test_review = [txt for txt in txts]\n test_label = [int(lab) for lab in labs]\n return train_review, train_label, val_review, val_label, test_review, test_label\n\n train_review, train_label, val_review, val_label, test_review, test_label = load_dataset()\n arr=[1 if i == 0 else 0 for i in train_label]\n y_train = np.stack((train_label, arr), axis=1)\n\n arr2 = [1 if i == 0 else 0 for i in test_label]\n y_test = np.stack((test_label, arr2), axis=1)\n\n arr3 = [1 if i == 0 else 0 for i in val_label]\n y_val = np.stack((val_label, arr3), axis=1)\n\n\n #get the max length of sentences\n sentences = train_review\n tokenized_sentences = [word_tokenize(sentence) for sentence in sentences]\n maxlength = max(len(sentences) for sentences in tokenized_sentences)\n\n tokenizer = Tokenizer(num_words=max_features)\n tokenizer.fit_on_texts(list(train_review))\n list_tokenized_train = tokenizer.texts_to_sequences(train_review)\n list_tokenized_val = tokenizer.texts_to_sequences(val_review)\n list_tokenized_test = tokenizer.texts_to_sequences(test_review)\n X_t = pad_sequences(list_tokenized_train, maxlen=maxlength)\n X_te = pad_sequences(list_tokenized_test, maxlen=maxlength)\n X_val = pad_sequences(list_tokenized_val, maxlen=maxlength)\n\n val = (np.array(X_val), y_val)\n\n def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')\n embeddings_index = dict(get_coefs(*o.strip().split()) for o in open(embedding_file_path, 'r'))\n\n all_embs = np.stack(embeddings_index.values())\n emb_mean,emb_std = all_embs.mean(), all_embs.std()\n\n #assign embedding vector to each word\n word_index = tokenizer.word_index\n nb_words = min(max_features, len(word_index))\n embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))\n print(nb_words, len(word_index), embedding_matrix.shape)\n for word, i in word_index.items():\n if i >= max_features: continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None: embedding_matrix[i-1] = embedding_vector\n\n max_features = min(max_features, len(word_index))\n checkpoint = ModelCheckpoint('/home/yankun/rnn/output/{acc:.4f}.hdf5', monitor='acc', verbose=1, save_best_only=True, mode='auto')\n inp = Input(shape=(maxlength,))\n x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)\n x = Bidirectional(LSTM(100, return_sequences=True, dropout=0.25, recurrent_dropout=0.1))(x)\n x = GlobalMaxPool1D()(x)\n x = Dense(100, activation=\"relu\")(x)\n x = Dropout(0.25)(x)\n x = Dense(2, activation=\"sigmoid\")(x)\n model = Model(inputs=inp, outputs=x)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.fit(X_t, y_train, batch_size=64, epochs=7, callbacks=[checkpoint,EarlyStopping(monitor='acc', patience=3,restore_best_weights=True)],validation_split=0.1, validation_data=val)\n\n #best = save_best_model(10, \"output\", 5, \".hdf5\")\n\n y_pred = model.predict([X_te], batch_size=32, verbose=1)\n\n\n #print(y_pred.shape)\n\n y_pred_keras = y_pred[:,0]\n #print(y_pred_keras)\n # print(y_test)\n # print(y_test[:,0])\n fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test[:,0], y_pred_keras, pos_label=0)\n\n class_label = [ 1 if y[0]>=y[1] else 0 for y in y_pred]\n tn, fp, fn, tp = confusion_matrix(y_test[:,0], class_label, labels=[0,1]).ravel()\n print(confusion_matrix(y_test[:,0], class_label, labels=[0,1]))\n print(auc(fpr_keras,tpr_keras))\n precision, recall, f_score, support = precision_recall_fscore_support(y_test[:,0],class_label)\n print(precision, recall, f_score)\n #print(y_pred, y_pred.argmax(axis=1))\n print(accuracy_score(y_test[:,0],class_label))\n\n\n thisdict =\t{\n \"tn\": tn,\n \"tp\":tp,\n \"fp\":fp,\n \"fn\":fn,\n \"auc\": auc(fpr_keras,tpr_keras),\n \"precision +ve\": precision[0],\n \"recall +ve\": recall[0],\n \"fscore +ve\": f_score[0],\n \"accuracy\": accuracy_score(y_test[:,0],class_label),\n \"precision -ve\": precision[1],\n \"recall -ve\": recall[1],\n \"fscore -ve\": f_score[1],\n }\n\n import csv\n file_exists = os.path.isfile(result_path)\n f = open(result_path,'a')\n w = csv.DictWriter(f, thisdict.keys())\n if not file_exists:\n print(\"writing header\")\n w.writeheader()\n w.writerow(thisdict)\n f.close()\n","sub_path":"other_methods/rcnn.py","file_name":"rcnn.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"101651622","text":"from fabric.api import *\nimport os\nimport time\n\n# submodules\nimport tyr\nimport maintenance\nimport openssh\nimport iscsi\n\nenv.use_ssh_config = True\nenv.no_agent = False\n#env.ssh_config_path = os.path.expanduser( '~/.fabric/ssh-config' )\n\n@task\ndef sync( mode = 'manual', type = 'tunnel' ):\n\tdirectories = { '~/Documents': ( '/srv/data/home/martin/Documents', 'documents' )\n#\t , '~/Music' : ( '/srv/data/home/Music', 'music' )\n\t , '~/IBR': ( '/srv/data/home/martin/IBR', 'ibr' )\n\t , '~/Photos' : ( '/srv/data/home/martin/Photos', 'photos' )\n\t , '~/Projects': ( '/srv/data/home/martin/Projects', 'projects' )\n\t , '~/RAW' : ( '/srv/data/home/martin/RAW', 'raw' )\n\t , '~/Videos' : ( '/srv/data/home/martin/Videos', 'videos' )\n\t }\n\ttunnelUser = 'martin'\n\ttunnelProvider = 'mittwinter.no-ip.org'\n\ttunnelLocalPort = 2200\n\tserver = '10.0.1.10'\n\tserverPort = 22\n\tserverUser = 'martin'\n\tfor syncDir, ( source, profile ) in directories.iteritems():\n\t\tsyncDir = os.path.expanduser( syncDir )\n\t\tif os.path.exists( syncDir ):\n\t\t\tif type == 'tunnel':\n\t\t\t\tlocal( 'ssh -F %s -fL %d:%s:%d %s@%s sleep 2' % ( os.path.expanduser( env.ssh_config_path ), tunnelLocalPort, server, serverPort, tunnelUser, tunnelProvider ) )\n\t\t\twith settings( warn_only = True ):\n\t\t\t\tlocal( 'unison %s -log=false -times -sshargs \"-F %s -p %d\" %s %s ssh://%s@%s/%s' % ( '-auto' if mode == 'auto' else '', os.path.expanduser( env.ssh_config_path ), tunnelLocalPort if type == 'tunnel' else serverPort, profile, syncDir, serverUser, 'localhost' if type == 'tunnel' else server, source ) )\n\t\t\t\ttime.sleep( 2 )\n\n@task\ndef vm():\n\tvirtualBoxModules = [ 'vboxdrv'\n\t , 'vboxpci'\n\t , 'vboxnetflt'\n\t , 'vboxnetadp'\n\t ]\n\tfor m in virtualBoxModules:\n\t\tlocal( 'sudo modprobe %s' % ( m ) )\n\tlocal('VirtualBox')\n\tfor m in reversed( virtualBoxModules ):\n\t\tlocal( 'sudo rmmod %s' % ( m ) )\n\n","sub_path":"fabric/.fabric/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"3640330","text":"#######################################\n### BEST Library build Purification ###\n#######################################\n\n## Description of procedure ##\n#\n#\n# Things do before procedure\n#\n#\t1. Ensure samples are room temperature and place libraries in magnetic module\n# \t2. Ensure SPRI beads are room temperature\n# 3. Make freshly made 80% Ethanol for purification\n# 4. Distribute:\n# SPRI beads to Column 1,\n# Ethanol to Column 2 and 3\n# Elution Buffer to Column 12\n#\n# Procedure\n#\n#\t\tPurification\n# \t1.\tDistribute 1.5x beads to library and mixes\n#\t2.\tRemoves supernatant and adds ethanol for washing, washing will be processed twice\n# 3. Beads will air dry for 4 minutes and 35µl elution buffer will be added\n#\t4.\tElutes will incubate for 15 minutes at room temperature and be eluted to a new plate in slot 1\n#\n#\tGood Luck!\n#\n######## IMPORT LIBRARIES ########\nfrom opentrons import protocol_api\n\n#### METADATA ####\n\nmetadata = {\n 'protocolName': 'BEST_Purification',\n 'author': 'Jacob Agerbo Rasmussen ',\n 'apiLevel': '2.2',\n 'description': 'Purification procedure of Automated single tube library preparation after Carøe et al. 2017',\n }\n\ndef run(protocol):\n #### LABWARE SETUP ####\n mag_deck = protocol.load_module('magdeck', 10)\n\n\n trough = protocol.load_labware('usascientific_12_reservoir_22ml', 7) # to add proper model of labware from https://labware.opentrons.com/\n trash_box = protocol.load_labware('agilent_1_reservoir_290ml', 8) # to add proper model of labware\n mag_plate = mag_deck.load_labware('biorad_96_wellplate_200ul_pcr')\n elution_plate = protocol.load_labware('biorad_96_wellplate_200ul_pcr', 2)\n\n tipracks_200_1 = protocol.load_labware('opentrons_96_filtertiprack_200ul', 4)\n tipracks_200_2 = protocol.load_labware('opentrons_96_filtertiprack_200ul', 1)\n tipracks_200_3 = protocol.load_labware('opentrons_96_filtertiprack_200ul', 5)\n tipracks_200_4 = protocol.load_labware('opentrons_96_filtertiprack_200ul', 6)\n\n #### PIPETTE SETUP ####\n m300 = protocol.load_instrument('p300_multi_gen2', mount='left', tip_racks=(tipracks_200_1, tipracks_200_2, tipracks_200_3, tipracks_200_4))\n # From the v1 the mount is right, but all the other protocols switch from right in v1 to left in v2. Why? Also here?\n\n ## Purification reagents SETUP\n SPRI_beads = trough['A1']\n EtOH1 = trough['A2']\n EtOH2 = trough['A3']\n Elution_buffer = trough['A12']\n\n Liquid_trash = trash_box['A1']\n\n ## Sample Setup\n sample_number = 96\n col_num = sample_number // 8 + (1 if sample_number % 8 > 0 else 0)\n samples = [col for col in mag_plate.columns()[:col_num]]\n\n #### VOLUME SETUP\n\n sample_vol = 50\n bead_vol = 1.5*sample_vol\n EtOH_vol = 120\n EtOH_vol2 = 120\n Elution_vol = 35\n wash_mix = 90\n #### PROTOCOL ####\n ### Beads addition\n mag_deck.disengage()\n\n list_of_cols = ['A1','A2','A3','A4','A5','A6','A7','A8','A9','A10','A11','A12']\n\n for i in list_of_cols:\n #### Transfer beads to mag_plate\n m300.flow_rate.aspirate = 100\n m300.flow_rate.dispense = 100\n m300.pick_up_tip(tipracks_200_1[i]) # Slow down head speed 0.5X for bead handling\n m300.move_to(SPRI_beads.top(-30))\n m300.mix(3, bead_vol, SPRI_beads.bottom(2))\n m300.flow_rate.aspirate = 50\n m300.flow_rate.dispense = 50\n m300.aspirate(bead_vol, SPRI_beads.bottom(2))\n m300.move_to(mag_plate[i].bottom(1))\n m300.dispense(bead_vol, mag_plate[i].bottom(4))\n m300.flow_rate.aspirate = 100\n m300.flow_rate.dispense = 100\n m300.mix(5, bead_vol, mag_plate[i].bottom(4))\n protocol.delay(seconds=5)\n # m300.flow_rate.aspirate = 100\n # m300.flow_rate.dispense = 100\n m300.move_to(mag_plate[i].top(-4))\n m300.blow_out(mag_plate[i].top(-4))\n # max_speed_per_axis\n # robot.head_speed\n m300.return_tip()\n\n\n protocol.comment(\"Incubating the beads and PCR products at room temperature \\\n for 5 minutes. Protocol will resume automatically.\")\n\n protocol.delay(minutes=5)\n mag_deck.engage()\n protocol.delay(minutes=2)\n\n for i in list_of_cols:\n ### Remove supernatant, by re-using tiprack 1\n ### remove supernatant from mag_plate\n m300.flow_rate.aspirate = 100\n m300.flow_rate.dispense = 100\n m300.pick_up_tip(tipracks_200_1[i]) # Slow down head speed 0.5X for bead handling\n m300.aspirate(180, mag_plate[i].bottom(1))\n m300.dispense(180, trash_box['A1'].top(-5))\n protocol.delay(seconds=5)\n m300.flow_rate.aspirate = 130\n m300.flow_rate.dispense = 130\n m300.blow_out(trash_box['A1'].top(-5))\n m300.air_gap(height = 3)\n m300.return_tip()\n\n\n for i in list_of_cols:\n ### Wash 1 with Ethanol, using tiprack 2\n ### Transfer Wash 1 to mag_plate\n m300.flow_rate.aspirate = 100\n m300.flow_rate.dispense = 100\n m300.pick_up_tip(tipracks_200_2[i]) # Slow down head speed 0.5X for bead handling\n m300.move_to(EtOH1.top(-16))\n m300.aspirate(EtOH_vol, EtOH1.bottom(2))\n m300.dispense(EtOH_vol, mag_plate[i].top(-4))\n m300.flow_rate.aspirate = 100\n m300.flow_rate.dispense = 100\n m300.mix(5, wash_mix, mag_plate[i].bottom(5))\n protocol.delay(seconds=5)\n m300.flow_rate.aspirate = 130\n m300.flow_rate.dispense = 130\n #m300.move_to(mag_plate[i].top(-4))\n m300.blow_out(mag_plate[i].top(-4))\n m300.air_gap(height = 3)\n # m300.touch_tip()\n m300.return_tip()\n\n mag_deck.engage(height=16) # or mag_mod ?\n protocol.delay(minutes=2)\n\n for i in list_of_cols:\n ### Remove supernatant, by re-using tiprack 2\n ### remove supernatant from mag_plate\n m300.flow_rate.aspirate = 100\n m300.flow_rate.dispense = 100\n m300.pick_up_tip(tipracks_200_2[i]) # Slow down head speed 0.5X for bead handling\n m300.aspirate(EtOH_vol, mag_plate[i].bottom(1))\n m300.dispense(EtOH_vol, trash_box['A1'].top(-5))\n protocol.delay(seconds=5)\n m300.flow_rate.aspirate = 130\n m300.flow_rate.dispense = 130\n m300.blow_out(trash_box['A1'].top(-5))\n m300.air_gap(height = 3)\n m300.drop_tip()\n\n\n for i in list_of_cols:\n ### Wash 2 with Ethanol, using tiprack 3\n ### Transfer Wash 2 to mag_plate\n m300.flow_rate.aspirate = 100\n m300.flow_rate.dispense = 100\n m300.pick_up_tip(tipracks_200_3[i]) # Slow down head speed 0.5X for bead handling\n m300.move_to(EtOH2.top(-16))\n m300.aspirate(EtOH_vol2, EtOH2.bottom(2))\n m300.dispense(EtOH_vol2, mag_plate[i].top(-4))\n m300.flow_rate.aspirate = 100\n m300.flow_rate.dispense = 100\n m300.mix(5, wash_mix, mag_plate[i].bottom(5))\n protocol.delay(seconds=5)\n m300.flow_rate.aspirate = 130\n m300.flow_rate.dispense = 130\n # m300.move_to(mag_plate[i].top(-4))\n m300.blow_out(mag_plate[i].top(-4))\n m300.air_gap(height = 3)\n # m300.touch_tip()\n m300.return_tip()\n\n\n mag_deck.engage(height=16)\n protocol.delay(minutes=2)\n\n for i in list_of_cols:\n ### Remove supernatant, by re-using tiprack 3\n ### remove supernatant from mag_plate\n m300.flow_rate.aspirate = 100\n m300.flow_rate.dispense = 100\n m300.pick_up_tip(tipracks_200_3[i]) # Slow down head speed 0.5X for bead handling\n m300.aspirate(EtOH_vol2, mag_plate[i].bottom(1))\n m300.dispense(EtOH_vol2, trash_box['A1'].top(-5))\n protocol.delay(seconds=5)\n m300.flow_rate.aspirate = 130\n m300.flow_rate.dispense = 130\n m300.blow_out(trash_box['A1'].top(-5))\n m300.air_gap(height = 3)\n m300.return_tip()\n\n\n # Dry beads before elution\n protocol.delay(minutes=4)\n\n for i in list_of_cols:\n ### Transfer Elution Buffer to mag_plate\n m300.flow_rate.aspirate = 40\n m300.flow_rate.dispense = 40\n m300.pick_up_tip(tipracks_200_4[i]) # Slow down head speed 0.5X for bead handling\n m300.move_to(Elution_buffer.top(-16))\n m300.aspirate(Elution_vol, Elution_buffer.bottom(2))\n m300.dispense(Elution_vol, mag_plate[i].top(-4))\n m300.flow_rate.aspirate = 50\n m300.flow_rate.dispense = 50\n m300.mix(3, Elution_vol, mag_plate[i].bottom(5))\n protocol.delay(seconds=5)\n # m300.move_to(mag_plate[i].top(-10))\n m300.blow_out(mag_plate[i].top(-10))\n m300.return_tip()\n\n ## Incubate elutes for 15 minutes at room temperature\n protocol.pause(\"Please, incubate samples for 10 min at 37ºC and press resume after it\")\n\n for i in list_of_cols:\n ## Transfer elutes to new plates.\n ## Transfer Elution buffer to elution_plate\n m300.flow_rate.aspirate = 50\n m300.flow_rate.dispense = 50\n m300.pick_up_tip(tipracks_200_4[i])\n m300.aspirate(Elution_vol, mag_plate[i].bottom(1))\n m300.dispense(Elution_vol, elution_plate[i].bottom(2))\n protocol.delay(seconds=5)\n m300.flow_rate.aspirate = 130\n m300.flow_rate.dispense = 130\n m300.move_to(elution_plate[i].top(-10))\n m300.blow_out()\n m300.return_tip()\n\n mag_deck.disengage()\n\n protocol.pause(\"Yay! \\ Purification has finished \\ Please store purified libraries as -20°C \\ Press resume when finished.\")\n","sub_path":"Library_Build/BEST/BEST_purification_14102020.py","file_name":"BEST_purification_14102020.py","file_ext":"py","file_size_in_byte":9584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"518398012","text":"import sys,math\nimport numpy as np\n\ndef sigmoid(x): return 1.0/(1.0+ np.exp(-x) )\n\ndef dsigmoid(x): return sigmoid(x)*(1.0 - sigmoid(x))\n\ndef forward(expected,inputs,ihw,how,i):\n hidden = []\n output = 0\n input = inputs[i]\n newputs =[0,0]\n for j in range(2): newputs[j] = sigmoid(input[0]*ihw[0][j] + input[1]*ihw[1][j] + input[2]*ihw[2][j])\n for j in range(len(how[0])):output = sigmoid(newputs[0]*how[0][j] + newputs[1]*how[1][j])\n return (newputs,output)\n\ndef backpropogation(expected,how,hidden_outputs,outputs,input):\n hidden_outputs = np.array(hidden_outputs)\n outputs = np.array(outputs)\n deltaj1 = []\n deltaj2 = []\n deltak = (outputs-expected[input])*outputs*(1-outputs)\n deltaj1.append(deltak*hidden_outputs[0]*(1-hidden_outputs[0])*how[0][0])\n deltaj2.append(deltak*hidden_outputs[1]*(1-hidden_outputs[1])*how[1][0])\n return [deltak,deltaj1,deltaj2]\n\ndef network():\n\n learningrate = 10.0\n num_input = 3 #number of input nodes +1\n num_hidden = 2 #number of hidden nodes\n num_output = 1 #number of output nodes\n inputs = np.array([[0,0,1],[1,0,1],[0,1,1],[1,1,1]])\n expected = np.array([0,1,1,0])\n ihw = 10*np.random.randn(num_input, num_hidden)\n how = 10*np.random.randn(num_hidden, num_output)\n error=1\n while error>.005:\n output= []\n for p in range(len(expected)):\n hiddenactive, outputs = forward(expected,inputs,ihw,how,p)\n output.append(outputs)\n deltas = backpropogation(expected,how,hiddenactive,outputs,p)\n\n for i in range(ihw.shape[0]):\n for j in range(ihw.shape[1]):\n derivative = -1*learningrate*deltas[j+1][0]*inputs[p,i]\n ihw[i,j]+=derivative\n\n for i in range(how.shape[0]):\n for j in range(how.shape[1]):\n derivative = -1*learningrate*deltas[0]*hiddenactive[i]\n how[i,j]+=derivative\n output = np.array(output)\n error = np.sum((output-expected)**2)\n print(error)\n print(ihw,how)\n print(output)\n print(\"End Error: \"+str(error))\n\nnetwork()\n","sub_path":"Documents/TJ 2015-2016/AI/Neuralnetwork.py","file_name":"Neuralnetwork.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"89223686","text":"import sys\ninput_path,output_path=sys.argv[1:]\n\nwith open(input_path,\"r\") as input_file:\n with open(output_path,\"w\") as output_file:\n for line in input_file:\n if line.startswith(\"#\"):\n continue\n name=line.split()[0]\n evalue=float(line.split()[7])\n if evalue <= 1e-5:\n output_file.write(name+\"\\n\")","sub_path":"Read_tab.py","file_name":"Read_tab.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"502839788","text":"from CRABClient.UserUtilities import config, getUsernameFromSiteDB\nconfig = config()\n\nconfig.General.requestName = 'Samples2018_BJpsiX_MuMu'\nconfig.General.workArea = 'crab_privateMC_miniAOD'\nconfig.General.transferOutputs = True\nconfig.General.transferLogs = False\n\nconfig.JobType.pluginName = 'Analysis'\n#config.JobType.disableAutomaticOutputCollection = True\nconfig.JobType.maxMemoryMB = 2500\nconfig.JobType.psetName='miniAOD_crab_cfg.py'\n\nconfig.JobType.numCores=1\n\n#config.JobType.disableAutomaticOutputCollection = True \n#config.JobType.outputFiles = ['fileAOD.root','miniAOD.root']\n\nconfig.Data.inputDataset=''\nconfig.Data.inputDBS='phys03'\n\n#config.Data.splitting = 'Automatic'\nconfig.Data.splitting = 'FileBased'\nconfig.Data.unitsPerJob = 30 #5\n#config.Data.unitsPerJob = 1000 #500\n#config.Data.totalUnits = 800000 #200000\nconfig.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())\nconfig.Data.publication = True\n#config.Data.outputPrimaryDataset = 'BcJpsiMuNu_MiniAOD'\nconfig.Data.outputDatasetTag ='Fall18_10_2_9-MINIAODSIM'\nconfig.Data.lumiMask =\"\"\nconfig.Data.ignoreLocality = True\nconfig.Site.whitelist=['T2_US_Nebraska', 'T2_FR_IPHC', 'T2_DE_RWTH','T2_CH_CSCS', 'T2_CH_CERN','T2_FR_GRIF_LLR']\n\nconfig.Site.storageSite = 'T2_CH_CSCS'\n","sub_path":"2018/crab_miniAOD_prod.py","file_name":"crab_miniAOD_prod.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"411676234","text":"\nimport sys\nimport absRel3\n \nimport os\nimport ast\nimport sys\nimport color\ndef read(path):\n color.blue(\"Reading \"+path)\n content = \"\"\n with open(path, 'r', encoding='utf-8') as content_file:\n content = content_file.read()\n \n # print(content)\n return content \n\ndef readToObj(filename):\n data = None\n with open(filename) as fp:\n data = [ast.literal_eval(line) for line in fp if line.strip()]\n result = data[0] if len(data) else None\n return result\n\ndef write(path, content):\n content = str(content)\n print(\"Writing file \"+path+'---------------------------------------------------+++++++++++++++++++++++++++++++++++++++++')\n text_file = open(path, \"w\", encoding='utf-8')\n \n text_file.write(content)\n text_file.close()\n \n print(content)\n\ndef writeEvenIfNoDir(path, content):\n directory = absRel3.folder (path)\n if not os.path.exists(directory):\n os.makedirs(directory) \n write (path, content) \n\n\n","sub_path":"util/filer2.py","file_name":"filer2.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"577242804","text":"import collections\nimport functools\nimport pprint\nimport re\n\npp = pprint.PrettyPrinter(indent=4)\n\nfile = 'a.txt'\ndata = [_.strip() for _ in open(file).readlines()]\n\n\ndef score(s):\n level = 0\n total = 0\n for c in s:\n if c == '{':\n level += 1\n if c == '}':\n total += level\n level -= 1\n return total\n\n\nnew_data = []\nfor line in data:\n token = line\n # Cancel out according to !\n token = re.sub(r\"(!.)\", '', token)\n token = re.sub(r\"(<.*?>)\", '', token)\n token = re.sub(r\"(,,+)\", '', token)\n # token = re.sub(r\"(,)\", '', token)\n new_data.append(score(token))\n\ndata = new_data\n\npp.pprint(data)\n","sub_path":"09/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"347331164","text":"def is_palindrome(num_input):\n if len(num_input) == 1:\n isPalindrome = True\n return isPalindrome\n else:\n isPalindrome = True\n length_half = len(num_input) // 2\n\n # comparing 'k'th letter starting from the front and\n # 'k'th letter starting from the back\n k = 0\n while k < length_half:\n if num_input[k] != num_input[-k-1]:\n isPalindrome = False\n break\n k += 1\n return isPalindrome\n\nans = 0\nfor n in range(1, 1000000):\n num_dec = str(n)\n num_bin = bin(n)[2:]\n if is_palindrome(num_dec) and is_palindrome(num_bin):\n ans += n\nprint('answer:', ans)\n\n\n","sub_path":"Project Euler/Problem 36.py","file_name":"Problem 36.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"569085798","text":"import smbus2\r\nimport time\r\n\r\n# This is the address we setup in the Arduino Program\r\n#This address is determined by which pins the I2C Connection is\r\n#plugged into on the Raspberry PI\r\naddress = None\r\n\r\n#list of Strings used to denote request types\r\n#could use enum in the future, but this seems to work ok\r\n#add other types to the list, index reperesents the \r\n#I2C code\r\nrequestType = [\"HALL_COUNT\"]\r\n\r\n\r\ndef writeRequest(request):\r\n #this is another way to write to bus using smbus2\r\n with smbus2.SMBusWrapper(1) as bus:\r\n # Write a byte to address, offset 0\r\n #not sure what offset means, but will look into it\r\n bus.write_byte_data(address, 0, request)\r\n\r\ndef readNumber():\r\n #this is another way to read from I2C using smbus2\r\n with smbus2.SMBusWrapper(1) as bus:\r\n #reads a byte from given address, offset 0\r\n #not sure what offset means, but will look into it\r\n number = bus.read_byte_data(address, 0)\r\n return int(number)\r\n\r\nwhile True:\r\n \r\n #This is just for testing purposes if we want to test with a second arduino\r\n if (input(\"Enter 1 for address (0x04) and 2 for address (0x05): \") == \"1\"):\r\n address = 0x04\r\n else:\r\n address = 0x05\r\n\r\n #get request Type from console input\r\n request = int(input(\"Select 0 for hall count \"))\r\n \r\n #sends a request to the RPI\r\n writeRequest(request)\r\n print( \"RPI: Hi Arduino, I sent you a \" + requestType[request] + \" request\")\r\n\r\n #reads data sent from Arduino (arduino will reset the Hall Count after a request)\r\n number = readNumber()\r\n print (\"Data recieved from adrduino \"+ str(number))","sub_path":"Hall_Sensor_Counter_PI.py","file_name":"Hall_Sensor_Counter_PI.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"378507179","text":"from ...core.pipeline import Pipeline\nfrom ...core.stream.generator_stream import GeneratorSlidingWindowStream\nfrom ...core.accelerometer import generator\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nimport sys\nimport pytest\n\n\ndef _pipeline_test_processor(chunk_list, **kwargs):\n import pandas as pd\n result = {'NAME': [],\n 'START_TIME': [], 'STOP_TIME': []}\n for data, st, et, prev_st, prev_et, name in chunk_list:\n result['NAME'].append(name)\n result['START_TIME'].append(data.iloc[0, 0])\n result['STOP_TIME'].append(data.iloc[-1, 0])\n result = pd.DataFrame.from_dict(result)\n return result\n\n\n@pytest.mark.skipif(sys.platform == 'linux', reason=\"does not run on linux\")\ndef test_Pipeline():\n # test on a single stream\n stream_config = {\n \"generator\": generator.normal_dist,\n 'kwargs': {\n \"grange\": 8,\n \"buffer_size\": 100,\n \"sleep_interval\": 0,\n \"sigma\": 1,\n \"sr\": 80\n }\n }\n\n window_size = 12.8\n start_time = datetime.now()\n stream = GeneratorSlidingWindowStream(\n stream_config, window_size=window_size, name='stream-1')\n\n pipeline = Pipeline(max_processes=2, scheduler='threads',\n name='single-stream-pipeline')\n pipeline.add_stream(stream)\n pipeline.set_processor(_pipeline_test_processor)\n pipeline.start(start_time=start_time, process_start_time=start_time)\n\n results = []\n for result, st, et, prev_st, prev_et, name in pipeline.get_iterator():\n result['WINDOW_ST'] = st\n result['WINDOW_ET'] = et\n result['PREV_WINDOW_ST'] = prev_st\n result['PREV_WINDOW_ET'] = prev_et\n result['STREAM_NAME'] = name\n results.append(result)\n if len(results) == 5:\n break\n pipeline.stop()\n results = pd.concat(results, axis=0, sort=False)\n durations = (results['WINDOW_ET'] -\n results['WINDOW_ST']) / pd.Timedelta(1, unit='S')\n np.testing.assert_array_almost_equal(durations, window_size, decimal=1)\n\n # test on three streams, using the same pipeline but change the parameter of the first stream a little bit\n stream2_config = {\n \"generator\": generator.normal_dist,\n 'kwargs': {\n \"grange\": 4,\n \"buffer_size\": 400,\n \"sleep_interval\": 1,\n \"sigma\": 2,\n \"sr\": 50\n }\n }\n\n start_time = datetime.now()\n stream2 = GeneratorSlidingWindowStream(\n stream2_config, window_size=window_size, name='stream-2')\n pipeline.add_stream(stream2)\n pipeline.start(start_time=start_time, process_start_time=start_time)\n\n results = []\n for result, st, et, prev_st, prev_et, name in pipeline.get_iterator():\n result['WINDOW_ST'] = st\n result['WINDOW_ET'] = et\n result['PREV_WINDOW_ST'] = prev_st\n result['PREV_WINDOW_ET'] = prev_et\n result['STREAM_NAME'] = name\n np.testing.assert_array_equal(\n result['START_TIME'].values, result['START_TIME'].values[0])\n results.append(result)\n if len(results) == 5:\n break\n pipeline.stop()\n results = pd.concat(results, axis=0, sort=False)\n durations = (results['WINDOW_ET'] -\n results['WINDOW_ST']) / pd.Timedelta(1, unit='S')\n np.testing.assert_array_almost_equal(durations, window_size, decimal=1)\n","sub_path":"arus/core/tests/_test_pipeline.py","file_name":"_test_pipeline.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"190146874","text":"from ApiADA.loggers import logging\nfrom core.exceptions.customexceptions import ApiException\nimport traceback\nimport json\nimport datetime\nfrom json_tricks import dumps\nfrom core.vodafone.smart import smart_query\n\nlog = logging.getLogger(__name__)\n\nclass AnalysisProcessInstanceComb:\n \n \n # inputs:\n # contractObjid: Objid de contract\n # searchDate: Fecha a partir de la cual buscar\n # output:\n # return una cadena con las validaciones localizadas\n #\n def getValidations(contractObjid, searchDate):\n log.info('Start: getValidations')\n strSearchDate=searchDate.strftime('%Y-%m-%d %H:%M:%S')\n query_get_logs=\"\"\"select *\n from \n (\n select detalle, entrada \n from sa.nbspm_processinstancescomb\n where contract = %s\n and entrada >= TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS')\n order by entrada desc\n )\n where rownum=1\"\"\" %(contractObjid, strSearchDate)\n\n query_get_logs_output=smart_query.my_custom_sql('smart_gg', query_get_logs)\n if (len(query_get_logs_output)>0):\n log.info('End: getValidations')\n return query_get_logs_output[0]\n else:\n log.info('End: getValidations')\n return None","sub_path":"core/beans/analysis_processinstancecomb/analysis_processinstancecomb.py","file_name":"analysis_processinstancecomb.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"330915091","text":"# test plot\nimport numpy as np\nimport mlpy\nimport matplotlib.pyplot as plt\nfrom matplotlib.mlab import PCA\n#from mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.font_manager import FontProperties\n\niris = np.loadtxt('/home/ruige/KNN/trainIris.txt',delimiter=',')\nx,y = iris[:,:4], iris[:,4].astype(np.int)\n#print x,y\npca = PCA(x)\nx1 = []\ny1 = []\nfor item in pca.Y:\n\tx1.append(item[0])\n\ty1.append(item[1])\nfig1 = plt.figure()\npltData = [x1,y1]\nplt.scatter(pltData[0],pltData[1],c=y)\nplt.show()\n#pca.learn(x)\n#print pca\n#fig1 = plt.figure(1)\n#plt.scatter(pca,c=y)\n#plt.show()\n","sub_path":"Python/PythonC++Scala/dj1/knn/KNN/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"414964479","text":"import os\nimport json\nimport re\nimport argparse\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom html.parser import HTMLParser\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\nfrom scipy.sparse import csr_matrix, hstack\nfrom sklearn.linear_model import Ridge\n\n\nclass MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.strict = False\n self.convert_charrefs = True\n self.fed = []\n\n def handle_data(self, d):\n self.fed.append(d)\n\n def get_data(self):\n return ''.join(self.fed)\n\n\ndef strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n\ndef read_json_line(line=None):\n result = None\n try:\n result = json.loads(line)\n except Exception as e:\n # Find the offending character index:\n idx_to_replace = int(str(e).split(' ')[-1].replace(')', ''))\n # Remove the offending character:\n new_line = list(line)\n new_line[idx_to_replace] = ' '\n new_line = ''.join(new_line)\n return read_json_line(line=new_line)\n return result\n\n\ndef prepare_features(path_to_data, is_train, authors_dict, authors_scaler, title_vectorizer, content_vectorizer,\n reading_time_scaler):\n prefix = 'train' if is_train else 'test'\n\n features = ['content', 'published', 'title', 'author', 'meta_tags']\n features_lists = [[] for x in features]\n with open(os.path.join(path_to_data, '{0}.json'.format(prefix)), encoding='utf-8') as json_input_file:\n for line in tqdm(json_input_file):\n json_data = read_json_line(line)\n for i in range(0, len(features)):\n feature = features[i]\n data = json_data[feature]\n\n if feature == 'published':\n data = datetime.strptime(data['$date'], '%Y-%m-%dT%H:%M:%S.%fZ')\n elif feature == 'author':\n data = data['url'].split('@')[1].upper()\n if data not in authors_dict and is_train:\n authors_dict[data] = len(authors_dict) + 1\n data = authors_dict[data] if data in authors_dict else 0\n elif feature == 'meta_tags':\n data = np.log1p(float(re.findall('(\\\\d+) min read', data['twitter:data1'])[0]))\n else:\n data = data.replace('\\n', ' ').replace('\\r', ' ')\n data = strip_tags(data)\n\n features_lists[i].append(data)\n\n authors_ids = np.reshape(features_lists[features.index('author')], newshape=(-1, 1))\n if is_train:\n authors_feature = authors_scaler.fit_transform(authors_ids)\n else:\n authors_feature = authors_scaler.transform(authors_ids)\n\n publish_dates = features_lists[features.index('published')]\n publish_dates_df = pd.DataFrame(publish_dates, index=np.arange(1, len(publish_dates) + 1), columns=['published'])\n\n titles = features_lists[features.index('title')]\n if is_train:\n titles_feature = title_vectorizer.fit_transform(titles)\n else:\n titles_feature = title_vectorizer.transform(titles)\n\n contents = features_lists[features.index('content')]\n if is_train:\n contents_feature = content_vectorizer.fit_transform(contents)\n else:\n contents_feature = content_vectorizer.transform(contents)\n\n reading_times = np.reshape(features_lists[features.index('meta_tags')], newshape=(-1, 1))\n if is_train:\n reading_times_feature = reading_time_scaler.fit_transform(reading_times)\n else:\n reading_times_feature = reading_time_scaler.transform(reading_times)\n\n return authors_feature, publish_dates_df, titles_feature, contents_feature, reading_times_feature\n\n\ndef prepare_publish_date_features(df):\n df['year'] = df['published'].apply(lambda x: x.year)\n df['month'] = df['published'].apply(lambda x: x.month)\n df['day'] = df['published'].apply(lambda x: x.day)\n df['dow'] = df['published'].apply(lambda x: x.weekday())\n df['tod'] = df['published'].apply(lambda x: x.hour)\n\n df['is_year_other'] = df['year'].apply(lambda x: 1 if x < 2012 else 0)\n df['is_year_2012'] = df['year'].apply(lambda x: 1 if x == 2012 else 0)\n df['is_year_2013'] = df['year'].apply(lambda x: 1 if x == 2013 else 0)\n df['is_year_2014'] = df['year'].apply(lambda x: 1 if x == 2014 else 0)\n df['is_year_2015'] = df['year'].apply(lambda x: 1 if x == 2015 else 0)\n df['is_year_2017'] = df['year'].apply(lambda x: 1 if x == 2017 else 0)\n\n df['is_weekend'] = df['dow'].apply(lambda x: 1 if x in (5, 6) else 0)\n\n df['is_night'] = df['tod'].apply(lambda x: 1 if 23 <= x <= 24 or 0 <= x < 8 else 0)\n df['is_morning'] = df['tod'].apply(lambda x: 1 if 8 <= x < 11 else 0)\n df['is_day'] = df['tod'].apply(lambda x: 1 if 11 <= x < 19 else 0)\n\n time_bool_features = df[\n ['is_year_other', 'is_year_2012', 'is_year_2013', 'is_year_2014', 'is_year_2015', 'is_year_2017', 'is_weekend',\n 'is_night', 'is_morning', 'is_day']]\n time_categorical_features = OneHotEncoder(n_values=[12, 24]).fit_transform(df[['month', 'tod']])\n return hstack([time_bool_features, time_categorical_features]).tocsr()\n\n\ndef write_submission_file(prediction, path_to_data, filename):\n path_to_sample = os.path.join(path_to_data, 'sample_submission.csv')\n submission = pd.read_csv(path_to_sample, index_col='id')\n\n submission['log_recommends'] = prediction\n submission.to_csv(os.path.join(path_to_data, filename))\n\n\ndef main():\n parser = argparse.ArgumentParser(add_help=True, description='Compute solution for Medium competition')\n parser.add_argument('--path_to_data', default=os.getcwd(), help='Path to files with data')\n args = parser.parse_args()\n\n path_to_data = args.path_to_data\n\n authors_dict = {}\n authors_scaler = OneHotEncoder(handle_unknown='ignore')\n title_vectorizer = TfidfVectorizer(ngram_range=(1, 2), max_features=100000, sublinear_tf=True)\n content_vectorizer = TfidfVectorizer(ngram_range=(1, 2), max_features=100000, sublinear_tf=True)\n reading_time_scaler = StandardScaler()\n\n (train_authors, train_publish, train_titles, train_contents, train_reading_times) = prepare_features(path_to_data, True, authors_dict, authors_scaler, title_vectorizer, content_vectorizer, reading_time_scaler)\n (test_authors, test_publish, test_titles, test_contents, test_reading_times) = prepare_features(path_to_data, False, authors_dict, authors_scaler, title_vectorizer, content_vectorizer, reading_time_scaler)\n train_target = pd.read_csv(os.path.join(path_to_data, 'train_log1p_recommends.csv'), index_col='id')\n\n y_train = train_target['log_recommends'].values\n x_train_sparse = csr_matrix(hstack(\n [train_contents, train_titles, train_authors, prepare_publish_date_features(train_publish),\n train_reading_times]))\n x_test_sparse = csr_matrix(hstack(\n [test_contents, test_titles, test_authors, prepare_publish_date_features(test_publish), test_reading_times]))\n\n ridge = Ridge(alpha=1.2, random_state=17)\n ridge.fit(x_train_sparse, y_train)\n\n ridge_test_pred = ridge.predict(x_test_sparse)\n avg = np.mean(ridge_test_pred)\n ridge_test_pred = list(map(lambda x: x - avg + 4.33328, ridge_test_pred))\n\n write_submission_file(ridge_test_pred, path_to_data, 'assignment6_medium_submission.csv')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"competitions/medium.py","file_name":"medium.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"564422397","text":"#!/usr/bin/env python\n\n# Working:\n#!/mnt/common/rh6/ngsdev/python-2.7/bin/python\n\n'''\nPubSub.py\n Module for interacting with Pioneer PubSub servers\n'''\n\nimport os, sys, re\nfrom optparse import OptionParser\n\nfrom datetime import datetime\nfrom time import time\nimport pytz\nimport base64\n\nimport pdb\nimport logging\nimport json\n\nimport BioneerSOAClient\n\nlogging.basicConfig(format = '%(asctime)s %(lineno)6d %(levelname)-8s %(message)s',\n datefmt = '%m-%d %H:%M')\npubsub_log = logging.getLogger('PubSub')\npubsub_log.setLevel(logging.INFO)\n\npubsub_servers = { \n 'dev': 'http://tcs.phidev.com/tcs/pubsub',\n 'qa': 'http://tcs.phiqa.com/tcs/pubsub',\n 'tr': 'http://tcs.phitr.com/tcs/pubsub',\n 'prod': 'http://tcs.phibred.com/tcs/pubsub'\n}\n\nclass PubSub:\n def __init__(self, base_url, env='qa'):\n self.base_url = base_url\n self.test_url = \"http://lxbtdv005.phibred.com:8009\"\n\n self.client = BioneerSOAClient.Request(env=env)\n\n def topics(self):\n \"\"\"Return a list of topics on the server\"\"\"\n r = self.client\n\n response = r.get( self.base_url + '/api/topics')\n pubsub_log.debug(\"HEADERS:\\n\" + json.dumps(r.headers, indent=4))\n pubsub_log.debug(\"Got response %s [%s]\" % ( response.status_code, response.text ))\n return json.loads(response.text)\n\n def topic(self, topic):\n \"\"\"Return a list of topics on the server\"\"\"\n r = self.client\n\n response = r.get( self.base_url + '/api/topics/' + topic)\n pubsub_log.debug(\"HEADERS:\\n\" + json.dumps(r.headers, indent=4))\n pubsub_log.debug(\"Got response %s [%s]\" % ( response.status_code, response.text ))\n return json.loads(response.text)\n\n def subscriptions(self, topic=None):\n r = self.client\n\n response = r.get( self.base_url + '/api/subscriptions/')\n pubsub_log.debug(\"HEADERS:\\n\" + json.dumps(r.headers, indent=4))\n pubsub_log.debug(\"Got response %s [%s]\" % ( response.status_code, response.text ))\n subs=json.loads(response.text)\n if topic:\n return filter(lambda x: topic.lower() in x['Topic'].lower(), subs)\n return subs\n\n def subscribe(self, topic=\"simplex_image\", callback_url=\"http://titanrest_test.phibred.com:8182/test_callback\"):\n r = self.client\n\n s = {\n 'Add': True,\n 'RestCallbackUri': callback_url,\n 'Topic': topic\n }\n\n response = r.post( self.base_url + '/api/subscriptions', json=s, headers={'Content-type':'application/json'} )\n pubsub_log.debug(\"HEADERS:\\n\" + json.dumps(r.headers, indent=4))\n pubsub_log.debug(\"DATA:\\n\" + json.dumps(s, indent=4))\n pubsub_log.debug(\"Got response %s [%s]\" % ( response.status_code, response.text ))\n return json.loads(response.text)\n\n def unsubscribe(self, topic=\"simplex_image\", callback_url=\"http://titanrest_test.phibred.com:8182/test_callback\"):\n r = self.client\n\n s = {\n 'Add': True,\n 'RestCallbackUri': callback_url,\n 'Topic': topic\n }\n\n response = r.post( self.base_url + '/api/admin/subscriptions', json=s, headers={'Content-type':'application/json'} )\n pubsub_log.debug(\"HEADERS:\\n\" + json.dumps(r.headers, indent=4))\n pubsub_log.debug(\"DATA:\\n\" + json.dumps(s, indent=4))\n pubsub_log.debug(\"Got response %s [%s]\" % ( response.status_code, response.text ))\n return json.loads(response.text)\n\n def publish(self, topic=\"simplex_image\", keys={'image_id': 1, 'status': 'testing'}):\n r = self.client\n\n p = {\n 'MessageTimestamp': datetime.now(pytz.utc).strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'Topic': topic\n }\n\n params = []\n for key in keys:\n params.append({ 'Key': key, 'Value': keys[key] });\n\n p['Parameters'] = params\n\n data=json.dumps(p)\n\n response = r.post( self.base_url + '/api/publications', json=p, headers={'Content-type':'application/json'} )\n pubsub_log.debug(\"HEADERS:\\n\" + json.dumps(r.headers, indent=4))\n pubsub_log.debug(\"DATA:\\n\" + data)\n pubsub_log.debug(\"Got response %s [%s]\" % ( response.status_code, response.text ))\n if response.text: return json.loads(response.text)\n return response.status_code == 200\n \n\ndef parse_args():\n usage = 'usage: %prog [options] action args\\n \\\nScript to publish/subscribe to messages using Pioneer PubSub\\n \\\nactions:\\n \\\ntopics: \\tlist all topics\\n \\\ntopic : \\tdescribe topic\\n \\\nsubscriptions [topic_name]: \\tlist all subscriptions, or subscriptions to given topic\\n \\\nsubscribe :\\tsubscribe to the given topic with the given callback url\\n \\\npublish : \\tpublish message to topic. message=json key/value pairs\\n\\\n'\n\n parser = OptionParser(usage=usage)\n parser.add_option('-e','--env',dest='env',default='qa',\n help='option to set the target environment (default=qa): ' + ','.join(pubsub_servers.keys()))\n\n parser.add_option('-q','--quiet',dest='quiet',default=False,action='store_true')\n parser.add_option('-v','--verbose',dest='verbose',default=False,action='store_true')\n\n options, args = parser.parse_args()\n\n if not args: \n parser.print_help()\n sys.exit(0)\n\n return options, args\n \ndef main():\n\n options, args = parse_args()\n\n if options.quiet: \n pubsub_log.setLevel(logging.WARN)\n sys.tracebacklimit=0\n\n if options.verbose: pubsub_log.setLevel(logging.DEBUG)\n\n if options.env:\n try:\n server = pubsub_servers[options.env]\n except KeyError:\n raise ValueError('--env (%s) not valid, should be (%s)' % ( options.env, ','.join(pubsub_servers.keys()) ) )\n else:\n raise ValueError('PubSub Environment or server must be set')\n\n# pubsub_log.debug(\"Using server %s: %s\" % (options.env, server))\n\n client = PubSub(server, options.env)\n pubsub_log.debug(\"Got PubSub client for %s\" % client.base_url )\n\n action = args.pop(0)\n pubsub_log.debug(\"Action: %s\" % action)\n pubsub_log.debug(\"Args: %s\" % json.dumps(args))\n\n result = getattr(client, action)(*args)\n sys.stdout.write(action + \":\\n\" + json.dumps(result, indent=4) + \"\\n\")\n\nif __name__ == '__main__':\n main()\n\n \n","sub_path":"perl/scripts/PubSub.py","file_name":"PubSub.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"30768392","text":"\"\"\"The test the Honeywell thermostat module.\"\"\"\nimport unittest\nfrom unittest import mock\n\nimport pytest\nimport requests.exceptions\nimport somecomfort\nimport voluptuous as vol\n\nfrom homeassistant.components.climate.const import (\n ATTR_FAN_MODE,\n ATTR_FAN_MODES,\n ATTR_HVAC_MODES,\n)\nimport homeassistant.components.honeywell.climate as honeywell\nfrom homeassistant.const import (\n CONF_PASSWORD,\n CONF_USERNAME,\n TEMP_CELSIUS,\n TEMP_FAHRENHEIT,\n)\n\npytestmark = pytest.mark.skip(\"Need to be fixed!\")\n\n\nclass TestHoneywell(unittest.TestCase):\n \"\"\"A test class for Honeywell themostats.\"\"\"\n\n @mock.patch(\"somecomfort.SomeComfort\")\n @mock.patch(\"homeassistant.components.honeywell.climate.HoneywellUSThermostat\")\n def test_setup_us(self, mock_ht, mock_sc):\n \"\"\"Test for the US setup.\"\"\"\n config = {\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n honeywell.CONF_REGION: \"us\",\n }\n bad_pass_config = {CONF_USERNAME: \"user\", honeywell.CONF_REGION: \"us\"}\n bad_region_config = {\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n honeywell.CONF_REGION: \"un\",\n }\n\n with pytest.raises(vol.Invalid):\n honeywell.PLATFORM_SCHEMA(None)\n\n with pytest.raises(vol.Invalid):\n honeywell.PLATFORM_SCHEMA({})\n\n with pytest.raises(vol.Invalid):\n honeywell.PLATFORM_SCHEMA(bad_pass_config)\n\n with pytest.raises(vol.Invalid):\n honeywell.PLATFORM_SCHEMA(bad_region_config)\n\n hass = mock.MagicMock()\n add_entities = mock.MagicMock()\n\n locations = [mock.MagicMock(), mock.MagicMock()]\n devices_1 = [mock.MagicMock()]\n devices_2 = [mock.MagicMock(), mock.MagicMock]\n mock_sc.return_value.locations_by_id.values.return_value = locations\n locations[0].devices_by_id.values.return_value = devices_1\n locations[1].devices_by_id.values.return_value = devices_2\n\n result = honeywell.setup_platform(hass, config, add_entities)\n assert result\n assert mock_sc.call_count == 1\n assert mock_sc.call_args == mock.call(\"user\", \"pass\")\n mock_ht.assert_has_calls(\n [\n mock.call(mock_sc.return_value, devices_1[0], 18, 28, \"user\", \"pass\"),\n mock.call(mock_sc.return_value, devices_2[0], 18, 28, \"user\", \"pass\"),\n mock.call(mock_sc.return_value, devices_2[1], 18, 28, \"user\", \"pass\"),\n ]\n )\n\n @mock.patch(\"somecomfort.SomeComfort\")\n def test_setup_us_failures(self, mock_sc):\n \"\"\"Test the US setup.\"\"\"\n hass = mock.MagicMock()\n add_entities = mock.MagicMock()\n config = {\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n honeywell.CONF_REGION: \"us\",\n }\n\n mock_sc.side_effect = somecomfort.AuthError\n result = honeywell.setup_platform(hass, config, add_entities)\n assert not result\n assert not add_entities.called\n\n mock_sc.side_effect = somecomfort.SomeComfortError\n result = honeywell.setup_platform(hass, config, add_entities)\n assert not result\n assert not add_entities.called\n\n @mock.patch(\"somecomfort.SomeComfort\")\n @mock.patch(\"homeassistant.components.honeywell.climate.HoneywellUSThermostat\")\n def _test_us_filtered_devices(self, mock_ht, mock_sc, loc=None, dev=None):\n \"\"\"Test for US filtered thermostats.\"\"\"\n config = {\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n honeywell.CONF_REGION: \"us\",\n \"location\": loc,\n \"thermostat\": dev,\n }\n locations = {\n 1: mock.MagicMock(\n locationid=mock.sentinel.loc1,\n devices_by_id={\n 11: mock.MagicMock(deviceid=mock.sentinel.loc1dev1),\n 12: mock.MagicMock(deviceid=mock.sentinel.loc1dev2),\n },\n ),\n 2: mock.MagicMock(\n locationid=mock.sentinel.loc2,\n devices_by_id={21: mock.MagicMock(deviceid=mock.sentinel.loc2dev1)},\n ),\n 3: mock.MagicMock(\n locationid=mock.sentinel.loc3,\n devices_by_id={31: mock.MagicMock(deviceid=mock.sentinel.loc3dev1)},\n ),\n }\n mock_sc.return_value = mock.MagicMock(locations_by_id=locations)\n hass = mock.MagicMock()\n add_entities = mock.MagicMock()\n assert honeywell.setup_platform(hass, config, add_entities) is True\n\n return mock_ht.call_args_list, mock_sc\n\n def test_us_filtered_thermostat_1(self):\n \"\"\"Test for US filtered thermostats.\"\"\"\n result, client = self._test_us_filtered_devices(dev=mock.sentinel.loc1dev1)\n devices = [x[0][1].deviceid for x in result]\n assert [mock.sentinel.loc1dev1] == devices\n\n def test_us_filtered_thermostat_2(self):\n \"\"\"Test for US filtered location.\"\"\"\n result, client = self._test_us_filtered_devices(dev=mock.sentinel.loc2dev1)\n devices = [x[0][1].deviceid for x in result]\n assert [mock.sentinel.loc2dev1] == devices\n\n def test_us_filtered_location_1(self):\n \"\"\"Test for US filtered locations.\"\"\"\n result, client = self._test_us_filtered_devices(loc=mock.sentinel.loc1)\n devices = [x[0][1].deviceid for x in result]\n assert [mock.sentinel.loc1dev1, mock.sentinel.loc1dev2] == devices\n\n def test_us_filtered_location_2(self):\n \"\"\"Test for US filtered locations.\"\"\"\n result, client = self._test_us_filtered_devices(loc=mock.sentinel.loc2)\n devices = [x[0][1].deviceid for x in result]\n assert [mock.sentinel.loc2dev1] == devices\n\n @mock.patch(\"evohomeclient.EvohomeClient\")\n @mock.patch(\"homeassistant.components.honeywell.climate.HoneywellUSThermostat\")\n def test_eu_setup_full_config(self, mock_round, mock_evo):\n \"\"\"Test the EU setup with complete configuration.\"\"\"\n config = {\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n honeywell.CONF_REGION: \"eu\",\n }\n mock_evo.return_value.temperatures.return_value = [{\"id\": \"foo\"}, {\"id\": \"bar\"}]\n hass = mock.MagicMock()\n add_entities = mock.MagicMock()\n assert honeywell.setup_platform(hass, config, add_entities)\n assert mock_evo.call_count == 1\n assert mock_evo.call_args == mock.call(\"user\", \"pass\")\n assert mock_evo.return_value.temperatures.call_count == 1\n assert mock_evo.return_value.temperatures.call_args == mock.call(\n force_refresh=True\n )\n mock_round.assert_has_calls(\n [\n mock.call(mock_evo.return_value, \"foo\", True, 20.0),\n mock.call(mock_evo.return_value, \"bar\", False, 20.0),\n ]\n )\n assert add_entities.call_count == 2\n\n @mock.patch(\"evohomeclient.EvohomeClient\")\n @mock.patch(\"homeassistant.components.honeywell.climate.HoneywellUSThermostat\")\n def test_eu_setup_partial_config(self, mock_round, mock_evo):\n \"\"\"Test the EU setup with partial configuration.\"\"\"\n config = {\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n honeywell.CONF_REGION: \"eu\",\n }\n\n mock_evo.return_value.temperatures.return_value = [{\"id\": \"foo\"}, {\"id\": \"bar\"}]\n\n hass = mock.MagicMock()\n add_entities = mock.MagicMock()\n assert honeywell.setup_platform(hass, config, add_entities)\n mock_round.assert_has_calls(\n [\n mock.call(mock_evo.return_value, \"foo\", True, 16),\n mock.call(mock_evo.return_value, \"bar\", False, 16),\n ]\n )\n\n @mock.patch(\"evohomeclient.EvohomeClient\")\n @mock.patch(\"homeassistant.components.honeywell.climate.HoneywellUSThermostat\")\n def test_eu_setup_bad_temp(self, mock_round, mock_evo):\n \"\"\"Test the EU setup with invalid temperature.\"\"\"\n config = {\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n honeywell.CONF_REGION: \"eu\",\n }\n\n with pytest.raises(vol.Invalid):\n honeywell.PLATFORM_SCHEMA(config)\n\n @mock.patch(\"evohomeclient.EvohomeClient\")\n @mock.patch(\"homeassistant.components.honeywell.climate.HoneywellUSThermostat\")\n def test_eu_setup_error(self, mock_round, mock_evo):\n \"\"\"Test the EU setup with errors.\"\"\"\n config = {\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n honeywell.CONF_REGION: \"eu\",\n }\n mock_evo.return_value.temperatures.side_effect = (\n requests.exceptions.RequestException\n )\n add_entities = mock.MagicMock()\n hass = mock.MagicMock()\n assert not honeywell.setup_platform(hass, config, add_entities)\n\n\nclass TestHoneywellRound(unittest.TestCase):\n \"\"\"A test class for Honeywell Round thermostats.\"\"\"\n\n def setup_method(self, method):\n \"\"\"Test the setup method.\"\"\"\n\n def fake_temperatures(force_refresh=None):\n \"\"\"Create fake temperatures.\"\"\"\n temps = [\n {\n \"id\": \"1\",\n \"temp\": 20,\n \"setpoint\": 21,\n \"thermostat\": \"main\",\n \"name\": \"House\",\n },\n {\n \"id\": \"2\",\n \"temp\": 21,\n \"setpoint\": 22,\n \"thermostat\": \"DOMESTIC_HOT_WATER\",\n },\n ]\n return temps\n\n self.device = mock.MagicMock()\n self.device.temperatures.side_effect = fake_temperatures\n self.round1 = honeywell.RoundThermostat(self.device, \"1\", True, 16)\n self.round1.update()\n self.round2 = honeywell.RoundThermostat(self.device, \"2\", False, 17)\n self.round2.update()\n\n def test_attributes(self):\n \"\"\"Test the attributes.\"\"\"\n assert self.round1.name == \"House\"\n assert self.round1.temperature_unit == TEMP_CELSIUS\n assert self.round1.current_temperature == 20\n assert self.round1.target_temperature == 21\n assert not self.round1.is_away_mode_on\n\n assert self.round2.name == \"Hot Water\"\n assert self.round2.temperature_unit == TEMP_CELSIUS\n assert self.round2.current_temperature == 21\n assert self.round2.target_temperature is None\n assert not self.round2.is_away_mode_on\n\n def test_away_mode(self):\n \"\"\"Test setting the away mode.\"\"\"\n assert not self.round1.is_away_mode_on\n self.round1.turn_away_mode_on()\n assert self.round1.is_away_mode_on\n assert self.device.set_temperature.call_count == 1\n assert self.device.set_temperature.call_args == mock.call(\"House\", 16)\n\n self.device.set_temperature.reset_mock()\n self.round1.turn_away_mode_off()\n assert not self.round1.is_away_mode_on\n assert self.device.cancel_temp_override.call_count == 1\n assert self.device.cancel_temp_override.call_args == mock.call(\"House\")\n\n def test_set_temperature(self):\n \"\"\"Test setting the temperature.\"\"\"\n self.round1.set_temperature(temperature=25)\n assert self.device.set_temperature.call_count == 1\n assert self.device.set_temperature.call_args == mock.call(\"House\", 25)\n\n def test_set_hvac_mode(self) -> None:\n \"\"\"Test setting the system operation.\"\"\"\n self.round1.set_hvac_mode(\"cool\")\n assert self.round1.current_operation == \"cool\"\n assert self.device.system_mode == \"cool\"\n\n self.round1.set_hvac_mode(\"heat\")\n assert self.round1.current_operation == \"heat\"\n assert self.device.system_mode == \"heat\"\n\n\nclass TestHoneywellUS(unittest.TestCase):\n \"\"\"A test class for Honeywell US thermostats.\"\"\"\n\n def setup_method(self, method):\n \"\"\"Test the setup method.\"\"\"\n self.client = mock.MagicMock()\n self.device = mock.MagicMock()\n self.cool_away_temp = 18\n self.heat_away_temp = 28\n self.honeywell = honeywell.HoneywellUSThermostat(\n self.client,\n self.device,\n self.cool_away_temp,\n self.heat_away_temp,\n \"user\",\n \"password\",\n )\n\n self.device.fan_running = True\n self.device.name = \"test\"\n self.device.temperature_unit = \"F\"\n self.device.current_temperature = 72\n self.device.setpoint_cool = 78\n self.device.setpoint_heat = 65\n self.device.system_mode = \"heat\"\n self.device.fan_mode = \"auto\"\n\n def test_properties(self):\n \"\"\"Test the properties.\"\"\"\n assert self.honeywell.is_fan_on\n assert self.honeywell.name == \"test\"\n assert self.honeywell.current_temperature == 72\n\n def test_unit_of_measurement(self):\n \"\"\"Test the unit of measurement.\"\"\"\n assert self.honeywell.temperature_unit == TEMP_FAHRENHEIT\n self.device.temperature_unit = \"C\"\n assert self.honeywell.temperature_unit == TEMP_CELSIUS\n\n def test_target_temp(self):\n \"\"\"Test the target temperature.\"\"\"\n assert self.honeywell.target_temperature == 65\n self.device.system_mode = \"cool\"\n assert self.honeywell.target_temperature == 78\n\n def test_set_temp(self):\n \"\"\"Test setting the temperature.\"\"\"\n self.honeywell.set_temperature(temperature=70)\n assert self.device.setpoint_heat == 70\n assert self.honeywell.target_temperature == 70\n\n self.device.system_mode = \"cool\"\n assert self.honeywell.target_temperature == 78\n self.honeywell.set_temperature(temperature=74)\n assert self.device.setpoint_cool == 74\n assert self.honeywell.target_temperature == 74\n\n def test_set_hvac_mode(self) -> None:\n \"\"\"Test setting the operation mode.\"\"\"\n self.honeywell.set_hvac_mode(\"cool\")\n assert self.device.system_mode == \"cool\"\n\n self.honeywell.set_hvac_mode(\"heat\")\n assert self.device.system_mode == \"heat\"\n\n def test_set_temp_fail(self):\n \"\"\"Test if setting the temperature fails.\"\"\"\n self.device.setpoint_heat = mock.MagicMock(\n side_effect=somecomfort.SomeComfortError\n )\n self.honeywell.set_temperature(temperature=123)\n\n def test_attributes(self):\n \"\"\"Test the attributes.\"\"\"\n expected = {\n honeywell.ATTR_FAN: \"running\",\n ATTR_FAN_MODE: \"auto\",\n ATTR_FAN_MODES: somecomfort.FAN_MODES,\n ATTR_HVAC_MODES: somecomfort.SYSTEM_MODES,\n }\n assert expected == self.honeywell.extra_state_attributes\n expected[\"fan\"] = \"idle\"\n self.device.fan_running = False\n assert self.honeywell.extra_state_attributes == expected\n\n def test_with_no_fan(self):\n \"\"\"Test if there is on fan.\"\"\"\n self.device.fan_running = False\n self.device.fan_mode = None\n expected = {\n honeywell.ATTR_FAN: \"idle\",\n ATTR_FAN_MODE: None,\n ATTR_FAN_MODES: somecomfort.FAN_MODES,\n ATTR_HVAC_MODES: somecomfort.SYSTEM_MODES,\n }\n assert self.honeywell.extra_state_attributes == expected\n\n def test_heat_away_mode(self):\n \"\"\"Test setting the heat away mode.\"\"\"\n self.honeywell.set_hvac_mode(\"heat\")\n assert not self.honeywell.is_away_mode_on\n self.honeywell.turn_away_mode_on()\n assert self.honeywell.is_away_mode_on\n assert self.device.setpoint_heat == self.heat_away_temp\n assert self.device.hold_heat is True\n\n self.honeywell.turn_away_mode_off()\n assert not self.honeywell.is_away_mode_on\n assert self.device.hold_heat is False\n\n @mock.patch(\"somecomfort.SomeComfort\")\n def test_retry(self, test_somecomfort):\n \"\"\"Test retry connection.\"\"\"\n old_device = self.honeywell._device\n self.honeywell._retry()\n assert self.honeywell._device == old_device\n","sub_path":"tests/components/honeywell/test_climate.py","file_name":"test_climate.py","file_ext":"py","file_size_in_byte":16071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"187674755","text":"from PIL import Image\nimport numpy as np\nimport os, sys\n\ndistricts = ['Bangalore', 'Chennai', 'Delhi', 'Gurgaon', 'Hyderabad', 'Kolkata', 'Mumbai']\nmain_input_folder = 'CBU_CNBU_Changing_Maps'\n\ndestination_directory = main_input_folder+'/Colored_CBU-CNBU_Changing_Maps'\n\nfor district in districts:\n input_file_path = main_input_folder+'/'+district+'_CBU_CNBU_Changing.png'\n image_1d = np.array( Image.open(input_file_path) )\n\n image_3d = np.zeros( [image_1d.shape[0],image_1d.shape[1],3], dtype=np.uint8 )\n \n for i in range(image_1d.shape[0]):\n for j in range(image_1d.shape[1]):\n if image_1d[i][j] == 195:\n image_3d[i][j] = [255,0,0]\n else:\n if image_1d[i][j] == 0: #background\n image_3d[i][j] = [0,0,0]\n elif image_1d[i][j] == 65: #BU\n image_3d[i][j] = [160,160,160]\n elif image_1d[i][j] == 130: #NBU\n image_3d[i][j] = [0,255,0] \n\n image_3d = Image.fromarray(image_3d)\n \n os.makedirs( destination_directory, exist_ok = True )\n image_3d.save(destination_directory+'/'+district+'_Colored_CBU_CNBU_Changing.png')\n\nprint(\"Your CBU/CNBU/Changing maps are successfully color-coded!! Background (black), CBU (gray), CNBU (green), and Changing (red)\\n\")\n","sub_path":"AUTHOR_VERSION_Difference_Based_Change_Detection/Create_Colored_Change_Maps.py","file_name":"Create_Colored_Change_Maps.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"631576196","text":"class Solution(object):\n def jump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n curMost = 0\n rightMost = 0\n count = 0\n for i, n in enumerate(nums):\n if i > curMost:\n curMost = rightMost\n count += 1\n if curMost >= len(nums): break\n rightMost = max(rightMost, i + n)\n return count","sub_path":"Python/045JumpGameii/Oct2116.py","file_name":"Oct2116.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"364734718","text":"#!/bin/python\nfrom string import Template\nimport os\n\ndefinedErrors = [\n'SessionExpiresError',\n'SessionNotFoundError',\n'SessionInvalidError',\n'UserAlreadyExistError',\n'TooWeakPasswordError',\n'InvalidMailError',\n'UserNotFoundError',\n'IncorrectPasswordError',\n'AuthenticationFailedError',\n'CommandNotFoundError',\n'IllegalArgumentError',\n'AlreadyLoginError',\n'ServerSystemError'\n]\n\ncodeHeader='''// Data structure used in comstock server, client\n// INFO: This file is generated by error-gen.py\npackage model\n'''\nsnippet = '''type $error struct {\n msg string\n}\n\nfunc (e *$error) Error() string{\n return e.msg\n}\n\nfunc (e *$error) SetError(msg string) *$error{\n e.msg = msg\n return e\n}\n'''\n\nFILENAME=\"error-gen.go\"\ndef generate() :\n fo = open(FILENAME, \"w\")\n\n fo.write(codeHeader)\n s = Template(snippet)\n for item in definedErrors:\n fo.write(s.substitute(error=item))\n \n fo.close()\n\nif __name__ == '__main__':\n generate();\n os.system(\"gofmt \" + FILENAME + \" > \" + \"model/error.go\")\n os.system(\"rm \" + FILENAME)\n","sub_path":"model/error-gen.py","file_name":"error-gen.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"156984417","text":"# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Unit test for bisect_group module.\"\"\"\n\nimport json\nimport unittest\n\nimport webapp2\nimport webtest\n\nfrom dashboard import bisect_group\nfrom dashboard import testing_common\nfrom dashboard import utils\nfrom dashboard.models import anomaly\nfrom dashboard.models import graph_data\n\n\nclass BisectGroupTest(testing_common.TestCase):\n\n def setUp(self):\n super(BisectGroupTest, self).setUp()\n app = webapp2.WSGIApplication(\n [('/bisect_group', bisect_group.BisectGroupHandler)])\n self.testapp = webtest.TestApp(app)\n\n def _AddSampleData(self):\n \"\"\"Adds sample data and returns a list of sample Anomaly keys.\"\"\"\n testing_common.AddDataToMockDataStore(\n ['master'], ['linux-release', 'android-motoe'],\n {\n 'page_cycler.moz': {'cold_times': {'page_load_time': {}}},\n 'cc_perftests': {'foo': {'bar': {}}},\n })\n return [\n # 0: 200% regression in page_cycler.moz on linux, 201:300\n self._AddAnomaly(\n 'master/linux-release/page_cycler.moz/cold_times/page_load_time',\n start_revision=100201, end_revision=100300,\n median_before_anomaly=50, median_after_anomaly=150,\n bug_id=1234, is_improvement=False),\n # 1: 100% regression in page_cycler.moz on android, 221:320\n self._AddAnomaly(\n 'master/android-motoe/page_cycler.moz/cold_times/page_load_time',\n start_revision=100221, end_revision=100320,\n median_before_anomaly=50, median_after_anomaly=100,\n bug_id=1234, is_improvement=False),\n # 2: 50% regression in cc_perftests on linux, 181:280\n self._AddAnomaly(\n 'master/linux-release/cc_perftests/foo/bar',\n start_revision=100181, end_revision=100280,\n median_before_anomaly=50, median_after_anomaly=75,\n bug_id=2000, is_improvement=False),\n ]\n\n def _AddAnomaly(self, test_path, **properties):\n \"\"\"Adds an Anomaly and returns its key.\"\"\"\n test_key = utils.TestKey(test_path)\n return anomaly.Anomaly(test=test_key, **properties).put()\n\n def testGet_WithBugID(self):\n \"\"\"Tests a request for parameters of a group of alerts specified by key.\"\"\"\n self._AddSampleData()\n response = self.testapp.get('/bisect_group?bug_id=1234')\n # Linux is used since the regression is larger on Linux.\n expected_parameters = {\n 'bisect_bot': 'linux_perf_bisect',\n 'command': ('tools/perf/run_benchmark -v '\n '--browser=release '\n 'page_cycler.moz'),\n 'metric': 'cold_times/page_load_time',\n 'good_revision': 100220,\n 'bad_revision': 100300,\n }\n self.assertEqual(expected_parameters, json.loads(response.body))\n\n def testGet_WithKeys(self):\n \"\"\"Tests a request for parameters of a group of alerts specified by key.\"\"\"\n keys = self._AddSampleData()\n response = self.testapp.get('/bisect_group?keys=%s,%s' %\n (keys[1].urlsafe(), keys[2].urlsafe()))\n # Android is used, since the regression was larger on Android.\n expected_parameters = {\n 'bisect_bot': 'android_motoe_perf_bisect',\n 'command': ('tools/perf/run_benchmark -v '\n '--browser=android-chrome-shell '\n 'page_cycler.moz'),\n 'metric': 'cold_times/page_load_time',\n 'good_revision': 100220,\n 'bad_revision': 100280,\n }\n self.assertEqual(expected_parameters, json.loads(response.body))\n\n def testGet_NoParamsGivesError_ReturnsError(self):\n \"\"\"Tests a bare request to /bisect_group with no parameters.\"\"\"\n # Not giving required parameters is considered \"invalid input\", status 400.\n self.testapp.get('/bisect_group', status=400)\n\n def testChooseRevisionRange_InvalidRevisionNum_UsesCommitHashes(self):\n \"\"\"Tests that git hashes are gotten if revision is timestamp.\"\"\"\n testing_common.AddDataToMockDataStore(\n ['MyMaster'], ['win'], {'sunspider': {'Total': {}}})\n test_key = utils.TestKey('MyMaster/win/sunspider/Total')\n parent_key = utils.GetTestContainerKey(test_key)\n r1 = '074b44b4f25dfd3e37651ed91d56674f3a740f24'\n r2 = 'ce6fede39f55d8328f7eadaa5bd931552d5b6c07'\n graph_data.Row(parent=parent_key, id=1000000100, r_chromium=r1,\n value=1).put()\n graph_data.Row(parent=parent_key, id=1000000200, r_chromium=r2,\n value=1).put()\n anomaly_entity = anomaly.Anomaly(\n test=test_key, start_revision=1000000101, end_revision=1000000200,\n median_before_anomaly=50, median_after_anomaly=150,\n bug_id=1234, is_improvement=False)\n\n good, bad = bisect_group.ChooseRevisionRange([anomaly_entity])\n self.assertEqual(r1, good)\n self.assertEqual(r2, bad)\n\n def testChooseRevisionRange_InvalidRevisionNumNoHash_ReturnsNone(self):\n \"\"\"Tests the handling of revision ranges with invalid revisions.\"\"\"\n testing_common.AddDataToMockDataStore(\n ['MyMaster'], ['win'], {'sunspider': {'Total': {}}})\n test_key = utils.TestKey('MyMaster/win/sunspider/Total')\n anomaly_entity = anomaly.Anomaly(\n test=test_key, start_revision=1000010000, end_revision=1000020000,\n median_before_anomaly=50, median_after_anomaly=150,\n bug_id=1234, is_improvement=False)\n\n # The revisions don't appear to be SVN revisions and there are no\n # corresponding points in the datastore, so None is returned.\n good, bad = bisect_group.ChooseRevisionRange([anomaly_entity])\n self.assertIsNone(good)\n self.assertIsNone(bad)\n\n def testGet_NonOverlappingRevision_NoConfigReturned(self):\n \"\"\"Tests the getting of a config with some invalid inputs.\"\"\"\n keys = self._AddSampleData()\n other_key = self._AddAnomaly(\n 'master/linux-release/cc_perftests/foo/bar',\n start_revision=200, end_revision=300,\n median_before_anomaly=50, median_after_anomaly=150,\n bug_id=1234, is_improvement=False)\n\n response = self.testapp.get('/bisect_group?keys=%s,%s' %\n (keys[1].urlsafe(), other_key.urlsafe()))\n # The revision range was non-overlapping, so no config is returned.\n expected_parameters = {}\n self.assertEqual(expected_parameters, json.loads(response.body))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"dashboard/dashboard/bisect_group_test.py","file_name":"bisect_group_test.py","file_ext":"py","file_size_in_byte":6448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"390282400","text":"\n\n\nimport json\nimport ntptime\nimport os\nimport utime\nimport machine\nfrom umqtt.simple import MQTTClient\nfrom code_download import *\nimport _thread\n\n#Variaveis\nval_t = 0\nval_u = 0\n#Verifica se existe o arquivo de versionamento\ntry:\n f = open('src/.version')\n f.close()\n verifica = 1\nexcept:\n verifica = 0\n \ndef sub_cb(topic, msg): \n arquivo = open('comando.txt', 'w')\n arquivo.write(str(msg))\n arquivo.close()\n\n#Configurar o Broker MQTT\nmq = MQTTClient(\"AtualizacaoOTA\",\"mqtt.dioty.co\",1883,\"lucas.penning@sou.ucpel.edu.br\",\"ae8d0e1c\")\nmq.set_callback(sub_cb) \n\n\n#Led AMARELO(ATUALIZACOES)\npin17 = machine.Pin(17, machine.Pin.OUT)\n#Led AMARELO(STATUS DA ATUALIZACAO)\npin18 = machine.Pin(18, machine.Pin.OUT)\npin18.value(1)\n\nif(verifica == 1):\n f = open('src' + '/' + '.version')\n version = f.read()\n print(version)\n f.close()\n \ndef ver_atualizacao():\n #Virificando atualizacoes\n if(verifica == 1):\n if(version != version_v):\n os.chdir('/')\n print(\"Novas atualizacoes\")\n horario_embarcado = rtc.datetime()\n ano_b = horario_embarcado[0]\n mes_b = horario_embarcado[1]\n dia_b = horario_embarcado[2]\n hrs_b = horario_embarcado[4]\n min_b = horario_embarcado[5]\n seg_b = horario_embarcado[6]\n arquivo_h = open(\"historico.txt\", \"a\")\n arquivo_h.write(\"Nova Atualiza鑾借尗o: Vers鑼玱 \"+str(version_v)+\" Data: \"+str(dia_b)+\"/\"+str(mes_b)+\"/\"+str(ano_b)+\" - \"+str(hrs_b)+\":\"+str(min_b)+\":\"+str(seg_b)+\"\\n\")\n arquivo_h.close()\n #Conectando ao Broker MQTT e publicando \n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"Sistema reiniciado, NOVO atualizacao, versao \"+str(version_v))\n utime.sleep(1)\n #Rotina de piscagem do LED AMARELO\n pin17.value(1)\n utime.sleep_ms(1000)\n pin17.value(0)\n utime.sleep_ms(1000)\n pin17.value(1)\n utime.sleep_ms(1000)\n pin17.value(0)\n elif(version == version_v):\n print(\"Sem novas atualizacoes\")\n #Conectando ao Broker MQTT e publicando\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"Sistema reiniciado, SEM atualizar, versao \"+str(version))\n utime.sleep(1)\n #Rotina de piscagem do LED AMARELO\n pin17.value(1)\n utime.sleep_ms(1000)\n pin17.value(0) \n \n#Baixando e Atualizando os codigos\ndef atualiza_ota():\n try:\n c = Code_download()\n c.download_update()\n pin18.value(0)\n _thread.start_new_thread(ver_atualizacao, ()) \n except Exception as e:\n print('ERRO: ' +str(e)) \n finally:\n os.chdir('/') #: usado para voltar para o diretorio / apos a execucao do codigo da pasta\n \natualiza_ota() \n \nif(verifica == 1):\n g = open('src' + '/' + '.version')\n version_v = g.read()\n print(version_v)\n g.close()\n \n\ndef escutando(): \n while True: \n gc.collect()\n gc.mem_alloc()\n utime.sleep(2)\n arquivo = open('comando.txt', 'r')\n linha = arquivo.readline()\n comprimento = len(linha)\n palavra = linha[2:comprimento-1]\n print(\"Comando:\" + palavra)\n mq.subscribe(topic=\"/lucas.penning@sou.ucpel.edu.br/\")\n if(palavra != ''):\n arquivo = open('comando.txt', 'w')\n arquivo.write('')\n arquivo.close()\n if(palavra == 'version'):\n #Conectando ao Broker MQTT e publicando \n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"\"+str(version_v))\n utime.sleep(1)\n if(palavra == 'reiniciar'):\n arquivo = open('comando.txt', 'w')\n arquivo.write('')\n arquivo.close()\n machine.reset()\n utime.sleep(1)\n if(palavra == 'horario'):\n rtc = machine.RTC()\n horario_embarcado = rtc.datetime()\n ano = horario_embarcado[0]\n mes = horario_embarcado[1]\n dia = horario_embarcado[2]\n hrs = horario_embarcado[4]\n min = horario_embarcado[5]\n seg = horario_embarcado[6]\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"\"+str(dia)+\"/\"+str(mes)+\"/\"+str(ano)+\" - \"+str(hrs)+\":\"+str(min)+\":\"+str(seg))\n utime.sleep(1)\n if(palavra == 'uptime'):\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"\"+str(dia_a)+\"/\"+str(mes_a)+\"/\"+str(ano_a)+\" - \"+str(hrs_a)+\":\"+str(min_a)+\":\"+str(seg_a))\n utime.sleep(1)\n if(palavra == 'atualizar'):\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"Software atualizado\")\n utime.sleep(1)\n atualiza_ota()\n if(palavra == 'sensores'):\n file = open('conf.json', 'r') #: le os dados do json\n file_json = ujson.loads(file.read())\n sensores = file_json['sensores']['tipo']\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"\"+str(sensores))\n utime.sleep(1)\n if(palavra == 'temperatura'):\n val_t = 0\n content_variable = open('sensor_temperatura.txt ', \"r\")\n file_lines = content_variable.readlines()\n content_variable.close()\n last_line = file_lines[len(file_lines)-1]\n val_t = last_line[6:10]\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"\"+str(val_t))\n utime.sleep(1)\n if(palavra == 'umidade'):\n val_u = 0\n content_variable = open('sensor_umidade.txt ', \"r\")\n file_lines = content_variable.readlines()\n content_variable.close()\n last_line = file_lines[len(file_lines)-1]\n val_u = last_line[6:10]\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"\"+str(val_u))\n utime.sleep(1)\n if(palavra == 'logt'): \n arquivo = open('sensor_temperatura.txt', 'r')\n unica_string = arquivo.read()\n arquivo.close()\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"\"+str(unica_string))\n utime.sleep(1)\n if(palavra == 'logu'): \n arquivo = open('sensor_umidade.txt', 'r')\n unica_string = arquivo.read()\n arquivo.close()\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"\"+str(unica_string))\n utime.sleep(1)\n if(palavra == 'hist'): \n arquivo = open('historico.txt', 'r')\n unica_string = arquivo.read()\n arquivo.close()\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"\"+str(unica_string))\n utime.sleep(1)\n if(palavra == 'parametro'): \n arquivo = open('tempo.txt', 'r')\n unica_string = arquivo.read()\n arquivo.close()\n if(int(unica_string) > 60):\n r = 10\n else:\n r = int(unica_string) + 10\n arquivo_h = open(\"tempo.txt\", \"w\")\n arquivo_h.write(str(r))\n arquivo_h.close()\n mq.connect()\n mq.publish(b\"/lucas.penning@sou.ucpel.edu.br/\",b\"Tempo de leitura:\"+str(r))\n utime.sleep(1)\n \n#Definindo Hor璋﹔io via NTP\ndef uptime():\n ntptime.settime()\n rtc = machine.RTC()\n utc_shift = -3\n tm = utime.localtime(utime.mktime(utime.localtime()) + utc_shift*3600)\n tm = tm[0:3] + (0,) + tm[3:6] + (0,)\n rtc.datetime(tm)\n \n \ndef rotina_verificacao():\n while True:\n rtc = machine.RTC()\n horario_embarcado = rtc.datetime()\n hrs = horario_embarcado[4]\n min = horario_embarcado[5]\n utime.sleep(30)\n if(hrs == 15 and min == 39):\n machine.reset()\n \ndef rotina_leitura():\n while True:\n rtc = machine.RTC()\n horario_embarcado = rtc.datetime()\n hrs = horario_embarcado[4]\n if hrs == 23:\n hrs = 0\n else:\n hrs += 1\n \n with open(\"sensor_umidade.txt\", \"r\") as f:\n texto=f.readlines()\n with open(\"sensor_umidade.txt\", \"w\") as f:\n for i in texto:\n if \"id:\"+str(hrs) not in i:\n f.write(i)\n \n with open(\"sensor_temperatura.txt\", \"r\") as j:\n texto_a=j.readlines()\n with open(\"sensor_temperatura.txt\", \"w\") as j:\n for i_a in texto_a:\n if \"id:\"+str(hrs) not in i_a:\n j.write(i_a)\n utime.sleep(1200)\n\n#THREAD's\n_thread.start_new_thread(uptime, ())\n_thread.start_new_thread(rotina_leitura, ())\nutime.sleep(1)\n_thread.start_new_thread(rotina_verificacao, ())\n\nutime.sleep(1)\n_thread.start_new_thread(escutando, ())\nutime.sleep(1)\n\n#Ajustando hora\nuptime()\nrtc = machine.RTC()\nhorario_embarcado = rtc.datetime()\nano_a = horario_embarcado[0]\nmes_a = horario_embarcado[1]\ndia_a = horario_embarcado[2]\nhrs_a = horario_embarcado[4]\nmin_a = horario_embarcado[5]\nseg_a = horario_embarcado[6]\n\nprint(\"\\n\\nAgora vai comecar o codigo que foi baixado\\n\")\nexec(open('src/exemplo.py').read()) #: como ja esta no diretorio que foi baixado os codigos executa o exemplo.py\n\n\n\n","sub_path":"boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":8430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"263947559","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport time\nimport numpy as np\nimport pickle\n\n\nsince = time.time()\n\n\ndata_dir = '../data/'\nsave_dir = '../saves/'\n\n# load_name = 'train_set.csv'\nload_name = 'test_set.csv'\nload_name = load_name[:-4]\n# print(load_name)\ndt = pickle.load(open(save_dir+load_name+'_dict.save', \"rb\"))\ndf = pd.read_csv(save_dir+load_name+\".csv\",\n dtype=dt)\ndel dt\n\n\ndef insert_this(on):\n global df\n on = on[:-4]\n df1 = pd.read_csv('../saves/feature/'+on+'.csv')\n df1.drop('id', axis=1, inplace=True)\n on = on[-10:]\n # print(on)\n df1.rename(columns={'target': on}, inplace=True)\n # print(df1.head(10))\n df = df.join(df1)\n del df1\n\n\ninsert_this('[0.67982]_0.6788_Light_gbdt_1512750240.csv')\ninsert_this('[0.62259]_0.6246_Light_gbdt_1512859793.csv')\n\n# print(df.head(10))\nprint()\nprint('>'*20)\nprint('>'*20)\nprint('dtypes of df:')\n\nprint(df.dtypes)\nprint('number of rows:', len(df))\nprint('number of columns:', len(df.columns))\n# print('<'*20)\n\n\n# for on in df.columns:\n# print()\n# print('inspecting:', on)\n# # print('>'*20)\n# print('any null:', df[on].isnull().values.any())\n# print('null number:', df[on].isnull().values.sum())\n# print()\n# print(on, 'dtype:', df[on].dtypes)\n# print('describing', on, ':')\n# print(df[on].describe())\n# print('<'*20)\n# l = df[on]\n# s = set(l)\n# print('list len:', len(l))\n# print('set len:', len(s))\n# print()\nprint('<'*20)\nprint('<'*20)\nprint('<'*20)\n\n\n\nprint()\ntime_elapsed = time.time() - since\nprint('[timer]: complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n\n\n","sub_path":"kaggle_song_git/code_box/drill_train_and_compare_V1006/checker_V1001.py","file_name":"checker_V1001.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"375423366","text":"from datetime import datetime\r\nimport time\r\nimport os\r\ndef func():\r\n os.system(r\"python start.py >> C:\\Users\\Administrator\\Desktop\\python_demo\\message.log\")\r\nHour = 14\r\nwhile True:\r\n timel = datetime.now().strftime(\"%H\")\r\n if Hour == int(timel):\r\n func()\r\n Hour +=2\r\n if Hour > 24:\r\n Hour = Hour-24\r\n print(Hour, \"--\")\r\n time.sleep(5)\r\n\r\n\r\n","sub_path":"contabtime.py","file_name":"contabtime.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"148679928","text":"#!/usr/bin/env python\n\n# author : Patrice Camousseigt et Margaux Debure\n# maladie.py : genere une maladie en fonction des symptomes\n\n## import\n\nimport xml.etree.ElementTree\nfrom calcul import *\nfrom symptome import *\n\n\nmaladies = []\n\nSYMPTOME_INCONNU = \"Symptôme inconnu\"\n\n## methode\n\ndef charger_maladies(fichier):\n\ttree = xml.etree.ElementTree.parse(fichier)\n\troot = tree.getroot()\n\ti = 0\n\tfor maladie in root:\n\t\tlst_int = [int(x) for x in root[i][2].text.split(\",\")]\n\t\tmaladies.append(Maladie(root[i][0].text, root[i][1].text, lst_int))\n\t\ti+=1\n\ndef recherche_valeur(cle):\n\treturn SYMPTOMES.get(cle, SYMPTOME_INCONNU)\n\n\ndef recherche_valeur_liste(liste):\n\tl = []\n\tfor mot in liste :\n\t\tsymptome = recherche_valeur(mot)\n\t\tif(symptome != SYMPTOME_INCONNU) :\n\t\t\tl.append(symptome)\n\treturn l\n\ndef nombre_symptomes_communs(maladie, liste):\n\tnombre_symptome_commun = 0\n\tfor id_symptome in liste :\n\t\tif id_symptome in maladie.get_liste_id_symptomes() :\n\t\t\tnombre_symptome_commun += 1\n\treturn nombre_symptome_commun\n\ndef liste_symptomes_communs(maladie, liste):\n\tl = []\n\tfor id_symptome in liste :\n\t\tif id_symptome in maladie.get_liste_id_symptomes() :\n\t\t\tl.append(maladie.get_nom_symptome(id_symptome))\n\treturn l\n\ndef trouver_maladie(liste_symptomes_id):\n\tliste_maladies = []\n\n\tfor maladie in maladies :\n\t\tliste_symptome_commun = liste_symptomes_communs(maladie, liste_symptomes_id)\n\t\tproba_maladie = probabilite_maladie(len(liste_symptome_commun), len(liste_symptomes_id))\n\t\tliste_maladies.append([maladie, liste_symptome_commun, proba_maladie])\n\n\tliste_maladies.sort(key=lambda tup: tup[2], reverse=True) # trie par le troisième argument de la liste (proba)\n\n\treturn liste_maladies\n\n\ndef obtenir_liste_symptomes():\n\tl = []\n\tfor sympt in SYMPTOMES:\n\t\tl.append(sympt)\n\treturn l\n\n\n## classe\n\nclass Maladie:\n\n\tdef __init__(self, nom_maladie, gravite_maladie, liste_symptomes):\n\t\tself.nom = nom_maladie\n\t\tself.gravite = gravite_maladie\n\t\tself.symptomes = liste_symptomes\n\n\tdef get_nom_maladie(self):\n\t\treturn self.nom\n\n\tdef get_gravite_maladie(self):\n\t\treturn self.gravite\n\n\tdef get_liste_id_symptomes(self):\n\t\treturn self.symptomes\n\n\tdef get_nom_symptome(self, ident):\n\t\tfor cle,valeur in SYMPTOMES.items():\n\t\t\tif(valeur == ident):\n\t\t\t\treturn cle\n\t\treturn \"Erreur\";\n\n\tdef get_liste_noms_symptomes(self):\n\t\tl = [];\n\t\tfor i in self.symptomes:\n\t\t\tl.append(self.get_nom_symptome(i))\n\t\treturn l\n\n\n\n","sub_path":"src/maladie.py","file_name":"maladie.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"272325810","text":"import re\nimport os\nimport matplotlib.pyplot as plt\n\nfrom wordcloud import WordCloud\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.corpus import stopwords as sw\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.mixture import GaussianMixture as GMM\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.preprocessing import Normalizer\n\n\nclass LemmaTokenizer(object):\n\n def __init__(self):\n\n self.lemmatizer = WordNetLemmatizer()\n\n def __call__(self, documents):\n\n # Lemmas collection\n lemmas = []\n mails = []\n\n # Extract raw tokens\n raw_tokens = re.split(r'(?!\\@|\\.)\\W+|\\n+|\\r+', documents.lower())\n # Iterate over raw tokens\n for raw_token in raw_tokens:\n\n tokens = []\n\n # Preserve emails\n if '@' in raw_token:\n mails += raw_token.split('@')\n # Tokenize the rest\n else:\n tokens = word_tokenize(raw_token)\n\n if len(tokens) > 0:\n for token in tokens:\n if len(token) > 2 and '-' not in token:\n lemmas.append(self.lemmatizer.lemmatize(token))\n\n lemmas = mails + lemmas\n\n # Get extracted lemmas\n return lemmas\n\n\nif __name__ == '__main__':\n\n # News collection\n news = {}\n # Store filenames\n filenames = sorted(map(lambda x: int(x), os.listdir('T-newsgroups')))\n\n # Collect all news\n for filename in filenames:\n with open(f'T-newsgroups/{filename}', 'r') as file:\n news[filename] = file.read()\n\n # Stopwords collector\n stopwords = []\n\n # Collect all stopwords\n with open(f'src/stopwords.txt', 'r') as file:\n for stopword in file:\n stopwords.append(stopword.strip())\n\n # Tokenizer\n lemmaTokenizer = LemmaTokenizer()\n\n # Initialize vectorizer\n vectorizer = TfidfVectorizer(tokenizer=lemmaTokenizer,\n stop_words=(sw.words('english') + stopwords),\n max_df=.28,\n min_df=3,\n ngram_range=(1, 1))\n\n # Vectorize the text\n tdidf_X = vectorizer.fit_transform(list(news.values()))\n\n # Scaling data\n normalizer = Normalizer()\n norm_tdidf_X = normalizer.fit_transform(tdidf_X)\n\n svd = TruncatedSVD(n_components=5)\n X_red = svd.fit_transform(norm_tdidf_X)\n\n # Clustering with kmeans\n kmeans = GMM(n_components=4, max_iter=200, n_init=3)\n cluster_labels = kmeans.fit_predict(X_red)\n\n # Label collection\n labels = dict(zip(range(4), [[] for i in range(4)]))\n # Collect label for each document\n for index, label in enumerate(cluster_labels):\n labels[label].append(index)\n\n # Get all textes\n textes = list(news.values())\n plot_titles = ['Sport', 'Space', 'Medicine', 'Firearms']\n\n # Iterate over clusters to draw wordcloud\n for index, cluster in enumerate(labels.values()):\n\n # Words collector\n text = ''\n # Collect all words to extract word cloud for each cluster\n for document in cluster:\n text += textes[document].lower()\n\n # Generate and plot a word cloud image\n wordcloud = WordCloud(stopwords=(sw.words('english') + stopwords), background_color=\"white\").generate(text)\n\n # Display generated word cloud\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.title(plot_titles[index])\n plt.axis(\"off\")\n plt.show()\n","sub_path":"L05/scripts/E01-02.py","file_name":"E01-02.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"237739474","text":"from skorch.callbacks import EpochTimer, BatchScoring, PrintLog\nfrom skorch.utils import train_loss_score, valid_loss_score, noop\nfrom skorch.classifier import NeuralNet\nfrom skorch.classifier import NeuralNetClassifier\nimport torch\nfrom torch.utils.data.dataloader import DataLoader\nimport numpy as np\n\n\nclass EEGClassifier(NeuralNetClassifier):\n \"\"\"Classifier that does not assume softmax activation.\n Calls loss function directly without applying log or anything.\n \"\"\"\n\n # pylint: disable=arguments-differ\n def get_loss(self, y_pred, y_true, *args, **kwargs):\n \"\"\"Return the loss for this batch by calling NeuralNet get_loss.\n Parameters\n ----------\n y_pred : torch tensor\n Predicted target values\n y_true : torch tensor\n True target values.\n X : input data, compatible with skorch.dataset.Dataset\n By default, you should be able to pass:\n * numpy arrays\n * torch tensors\n * pandas DataFrame or Series\n * scipy sparse CSR matrices\n * a dictionary of the former three\n * a list/tuple of the former three\n * a Dataset\n If this doesn't work with your data, you have to pass a\n ``Dataset`` that can deal with the data.\n training : bool (default=False)\n Whether train mode should be used or not.\n\n \"\"\"\n return NeuralNet.get_loss(self, y_pred, y_true, *args, **kwargs)\n\n def get_iterator(self, dataset, training=False, drop_index=True):\n iterator = super().get_iterator(dataset, training=training)\n if drop_index:\n return ThrowAwayIndexLoader(self, iterator)\n else:\n return iterator\n\n\n def on_batch_end(self, net, X, y, training=False, **kwargs):\n # If training is false, assume that our loader has indices for this\n # batch\n if not training:\n cbs = self._default_callbacks + self.callbacks\n epoch_cbs = []\n for name, cb in cbs:\n if (cb.__class__.__name__ == 'CroppedTrialEpochScoring') and (\n hasattr(cb, 'supercrop_inds_')) and (cb.on_train == False):\n epoch_cbs.append(cb)\n # for trialwise decoding stuffs it might also be we don't have\n # cropped loader, so no indices there\n if len(epoch_cbs) > 0:\n assert hasattr(self, '_last_supercrop_inds')\n for cb in epoch_cbs:\n cb.supercrop_inds_.append(self._last_supercrop_inds)\n del self._last_supercrop_inds\n\n\n def predict_with_supercrop_inds_and_ys(self, dataset):\n preds = []\n i_supercrop_in_trials = []\n i_supercrop_stops = []\n supercrop_ys = []\n for X, y, i in self.get_iterator(dataset, drop_index=False):\n i_supercrop_in_trials.append(i[0].cpu().numpy())\n i_supercrop_stops.append(i[2].cpu().numpy())\n preds.append(self.predict_proba(X))\n supercrop_ys.append(y.cpu().numpy())\n preds = np.concatenate(preds)\n i_supercrop_in_trials = np.concatenate(i_supercrop_in_trials)\n i_supercrop_stops = np.concatenate(i_supercrop_stops)\n supercrop_ys = np.concatenate(supercrop_ys)\n return dict(\n preds=preds, i_supercrop_in_trials=i_supercrop_in_trials,\n i_supercrop_stops=i_supercrop_stops, supercrop_ys=supercrop_ys)\n\n\n # Removes default EpochScoring callback computing 'accuracy' to work properly\n # with cropped decoding.\n @property\n def _default_callbacks(self):\n return [\n (\"epoch_timer\", EpochTimer()),\n (\n \"train_loss\",\n BatchScoring(\n train_loss_score,\n name=\"train_loss\",\n on_train=True,\n target_extractor=noop,\n ),\n ),\n (\n \"valid_loss\",\n BatchScoring(\n valid_loss_score, name=\"valid_loss\", target_extractor=noop,\n ),\n ),\n (\"print_log\", PrintLog()),\n ]\n\n\nclass ThrowAwayIndexLoader(object):\n def __init__(self, net, loader):\n self.net = net\n self.loader = loader\n self.last_i = None\n\n def __iter__(self, ):\n normal_iter = self.loader.__iter__()\n for batch in normal_iter:\n if len(batch) == 3:\n x,y,i = batch\n # Store for scoring callbacks\n self.net._last_supercrop_inds = i\n else:\n x,y = batch\n\n # TODO: should be on dataset side\n if hasattr(x, 'type'):\n x = x.type(torch.float32)\n y = y.type(torch.int64)\n yield x,y\n","sub_path":"braindecode/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"266757598","text":"#! /usr/bin/env python3\n\nfrom typing import Tuple, Dict\nimport re\nfrom dataclasses import dataclass\nfrom functools import partial\n\nimport rospy\nimport rosnode\n\nfrom task_allocator_msgs.msg import Confirmation\nfrom bidder_msgs.msg import Bid\n\n# Node name\nNODE_NAME = 'task_allocator'\n\n# Topics and services\nBID_TOPIC = 'bid'\nCONFIRMATION_TOPIC = 'confirmation'\n\n\n@dataclass\nclass StoredBid:\n bid: Bid\n num_bids: int\n active: bool = True\n\n\nclass TaskAllocator:\n \"\"\"\n Class responsible for assigning tasks to the best bid (lowest cost).\n Tasks are identified by their coordinates\n\n Tasks are either confirmed once all bids have been heard or after a timeout runs out.\n \"\"\"\n bid_store: Dict[Tuple[float, float, int], StoredBid] = dict()\n\n def __init__(self, no_of_robots=2, timeout_sec=3, refresh_robot_number_interval=30):\n self.num_robots = no_of_robots\n self.timeout = timeout_sec\n self.confirm_pub = None\n self.bid_sub = None\n\n self.__init_subscribers()\n self.__init_publishers()\n\n rospy.loginfo(f\"[{NODE_NAME}] node is ready - \"\n f\"\\n\\tlistening for new bids on '{self.bid_sub.resolved_name}\"\n f\"\\n\\tpublishing task allocations to '{self.confirm_pub.resolved_name}'\")\n\n rospy.Timer(rospy.Duration(refresh_robot_number_interval),\n self.update_no_robots)\n rospy.spin()\n\n def __init_publishers(self):\n self.confirm_pub = rospy.Publisher(\n CONFIRMATION_TOPIC, Confirmation, queue_size=10)\n\n def __init_subscribers(self):\n self.bid_sub = rospy.Subscriber(BID_TOPIC, Bid, self.bid_cb)\n\n def update_no_robots(self, _) -> None:\n reg = re.compile('/[^/]+/bidder')\n self.num_robots = len(\n {name for name in rosnode.get_node_names() if reg.match(name)})\n rospy.loginfo(\n f\"[{NODE_NAME}] updated number of robots to: {self.num_robots} (just a refresh)\")\n\n def __store_bid(self, coordinate: Tuple[float, float, int], bid: Bid) -> None:\n \"\"\"\n Stores a bid: either create a new entry, add the bid if its cost is better or simply increment the\n seen counter.\n\n :param coordinate: identifying coordinate of bid\n :param bid: the bid\n \"\"\"\n\n # new bid - put in store and start timer\n if coordinate not in self.bid_store:\n stored_bid = StoredBid(\n bid=bid,\n num_bids=1,\n )\n self.bid_store[coordinate] = stored_bid\n rospy.Timer(rospy.Duration(self.timeout), partial(\n self.confirm, coordinate), oneshot=True)\n # task is already timed out -> ignore bid\n elif not self.bid_store[coordinate].active:\n return\n # bid is better than previous bids\n elif self.bid_store[coordinate].bid.cost > bid.cost:\n self.bid_store[coordinate].bid = bid\n self.bid_store[coordinate].num_bids += 1\n # bid is worse than previous -> just increment counter\n else:\n self.bid_store[coordinate].num_bids += 1\n\n def bid_cb(self, bid: Bid) -> None:\n \"\"\"\n Callback for ROS subscription. Store bid and send out confirmation if all robots proposed for that task\n :param bid: the bid\n \"\"\"\n\n coordinates: Tuple[float, float, int] = (\n bid.task.x, bid.task.y, bid.task.header.stamp.secs)\n self.__store_bid(coordinates, bid)\n if self.bid_store[coordinates].num_bids >= self.num_robots:\n self.confirm(coordinates)\n\n def confirm(self, coordinates: Tuple[float, float, int], _=None) -> None:\n \"\"\"\n Sends out confirmation with current best bid and closes the Task afterwards\n :param coordinates: identifier (coordinates) of the task that should be confirmed\n :param _: unused param that is only passed when this is called using rospy.Timer\n \"\"\"\n\n if self.bid_store[coordinates].active:\n rospy.loginfo(\n f\"[{NODE_NAME}] confirming task at {coordinates} for robot \"\n f\"'{self.bid_store[coordinates].bid.robot_id}'\")\n self.confirm_pub.publish(Confirmation(Bid=self.bid_store[coordinates].bid,\n robot_id=self.bid_store[coordinates].bid.robot_id))\n\n self.bid_store[coordinates].active = False\n\n\nif __name__ == '__main__':\n rospy.init_node(NODE_NAME, anonymous=True)\n _no_of_robots_ = rospy.get_param('~no_of_robots', 2)\n _timeout_sec_ = rospy.get_param('~timeout_sec', 5)\n _refresh_robot_number_interval_ = rospy.get_param(\n '~refresh_robot_number_interval', 30)\n\n TaskAllocator(_no_of_robots_, _timeout_sec_,\n _refresh_robot_number_interval_)\n","sub_path":"ros/src/task_allocator/scripts/TaskAllocator.py","file_name":"TaskAllocator.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"223301209","text":"from django.views.generic.edit import CreateView, UpdateView, DeleteView\n#from django.views.generic import TemplateView\nfrom django.urls import reverse_lazy, reverse\nfrom django.shortcuts import render\nfrom .models import Product, Customer, Supplier, SalesOrder,\\\n OrderStatus,PurchaseOrderPayment,SaledOrderPayment\nfrom .forms import PurchaseOrderForm,PurchaseOrderProductForm,PurchaseOrderPaymentForm,\\\n SalesOrderForm,SalesOrderProductForm,SaledOrderPaymentForm\n\n\"\"\" \n#######################################################\n#*****************Product view************************# \n#######################################################\n\"\"\"\n\ndef ProductList(request):\n product = Product.objects.all()\n context = {'product':product}\n return render(request,'dokan/index.html',context)\n\n\n\ndef ProductDetail(request, pk):\n product_detail = Product.objects.get(pk = pk)\n context = {'product_detail': product_detail}\n return render(request, 'dokan/product_detail.html', context)\n\n\n\nclass ProductCreate(CreateView):\n model = Product\n fields = '__all__'\n template_name = 'dokan/product_form.html'\n\n\nclass ProductUpdate(UpdateView):\n model = Product\n fields = '__all__'\n template_name = 'dokan/product_update.html'\n\n\nclass ProductDelete(DeleteView):\n model = Product\n template_name = 'dokan/product_delete.html'\n #success_url = reverse_lazy(Product.name)\n\n def get_success_url(self):\n return reverse('dokan:productlist')\n\n\"\"\"\n#############################################################\n#*********************Customer view*************************#\n#############################################################\n\"\"\"\n\ndef CustomerList(request):\n customer = Customer.objects.all()\n context = {'customer':customer}\n return render(request,'dokan/customer.html',context)\n\n\ndef CustomerDetail(request, pk):\n Customer_detail = Customer.objects.get(pk = pk)\n context = {'customer_detail': Customer_detail}\n return render(request, 'dokan/customer_detail.html', context)\n\n\nclass CustomerCreate(CreateView):\n model = Customer\n fields = '__all__'\n template_name = 'dokan/customer_form.html'\n\nclass CustomerUpdate(UpdateView):\n model = Customer\n fields = '__all__'\n template_name = 'dokan/customer_update.html'\n\nclass CustomerDelete(DeleteView):\n model = Customer\n template_name = 'dokan/customer_delete.html'\n #success_url = reverse_lazy(Product.name)\n\n def get_success_url(self):\n return reverse('dokan:customerlist')\n\n\n\"\"\"\n##############################################################\n#**************************Supplier view*********************#\n##############################################################\n\"\"\"\n\ndef SupplierList(request):\n supplier = Supplier.objects.all()\n context = {'supplier':supplier}\n return render(request,'dokan/supplier.html',context)\n\n\ndef SupplierDetail(request, pk):\n Supplier_detail = Supplier.objects.get(pk = pk)\n context = {'Supplier_detail': Supplier_detail}\n return render(request, 'dokan/supplier_detail.html', context)\n\n\nclass SupplierCreate(CreateView):\n model = Supplier\n fields = '__all__'\n template_name = 'dokan/supplier_form.html'\n\nclass SupplierUpdate(UpdateView):\n model = Supplier\n fields = '__all__'\n template_name = 'dokan/supplier_update.html'\n\nclass SupplierDelete(DeleteView):\n model = Supplier\n template_name = 'dokan/supplier_delete.html'\n #success_url = reverse_lazy(Product.name)\n\n def get_success_url(self):\n return reverse('dokan:Supplierlist')\n\n\ndef purchase_order(request):\n print(\"hasib\")\n if request.method == 'POST':\n print(\"hasi\")\n form = PurchaseOrderForm(request.POST)\n print(\"has\")\n form.save()\n form = PurchaseOrderForm()\n form = {'form':form}\n print(\"Nakib\")\n return render(request,'purchase_order.html', form)\n\n\ndef purchase_order_product(request):\n if request.method == 'POST':\n form = PurchaseOrderProductForm(request.POST)\n form.save()\n form = PurchaseOrderProductForm\n context = {'form':form}\n return render(request, 'purchase_order_product.html', context)\n\ndef purchase_order_payment(request):\n if request.method == 'POST':\n form = PurchaseOrderPaymentForm(request.POST)\n form.save()\n form = PurchaseOrderPaymentForm\n context = {'form':form}\n return render(request, 'purchase_order_payment.html', context)\n\n\ndef purchase_due(request):\n try:\n print(\"nakib\")\n # dues = PurchaseOrderPayment.objects.all()\n dues = PurchaseOrderPayment.objects.filter(PurchaseOrderPaymentdue__gt = 0)\n context = {'dues': dues}\n # return render(request, 'purchase_due.html', context)\n except:\n print(\"kalam\")\n dues_massage = \"No dues to show\"\n print(\"kala\")\n context = {'dues_massage': dues_massage}\n print(\"kal\")\n # return render(request, 'purchase_due.html', context)\n # context = {'dues':dues}\n return render(request,'purchase_due.html', context)\n\n\ndef sales_order(request):\n if request.method == 'POST':\n form = SalesOrderForm(request.POST)\n form.save()\n form = SalesOrderForm\n context = {'form':form}\n return render(request, 'sales_order.html', context)\n\n\ndef sales_order_product(request):\n if request.method == 'POST':\n form = SalesOrderProductForm(request.POST)\n form.save()\n form = SalesOrderProductForm\n context = {'form':form}\n return render(request, 'sales_order_product.html', context)\n\ndef sales_order_payment(request):\n if request.method == 'POST':\n form = SaledOrderPaymentForm(request.POST)\n form.save()\n form = SaledOrderPaymentForm\n context = {'form':form}\n return render(request, 'sales_order_payment.html', context)\n\ndef sales_due(request):\n try:\n due = SaledOrderPayment.objects.filter(SaledOrderPaymentdue__gt = 0)\n context = {'dues': due}\n except:\n context = {'dues_massage':\"No dues to show\"}\n return render(request, 'sales_due.html', context)\n","sub_path":"dokan/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"482119709","text":"\"\"\" Node is defined as\nclass node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\"\"\"\n\ndef checkBST(root):\n visited = {}\n return explore(root, visited)[2]\n\n\n\ndef explore(node, visited):\n\n if (node != None):\n\n if node.data in visited:\n return 0, 0, False\n else:\n visited[node.data] = True\n\n l_min, l_max, l_status = explore(node.left, visited)\n r_min, r_max, r_status = explore(node.right, visited)\n\n cur_min = node.data\n cur_max = node.data\n\n if l_status == False or r_status == False:\n return 0, 0, False\n\n\n if l_min == None: l_min = cur_min\n if r_min == None: r_min = cur_min\n\n if l_max == None: l_max = cur_max\n if r_max == None: r_max = cur_max\n\n\n if max(l_min, l_max) > node.data:\n return 0, 0, False\n\n if min(r_min, r_max) < node.data:\n return 0, 0, False\n\n\n return min(l_min, l_max, r_min, r_max, cur_min), max(l_min, l_max, r_min, r_max, cur_max), True\n\n else:\n return None, None, True\n","sub_path":"iprep/trees/is_bintree.py","file_name":"is_bintree.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"645674644","text":"from Utils.ViewPluginBase import ViewPluginBase\nfrom Utils.ServiceLocator import ServiceLocator, ServiceNames\nimport Utils.UserEvents\nimport Utils.ViewPointer\nimport pygame\n\nclass CheatKeyPlugin(ViewPluginBase):\n \"\"\"Plugin to hook keystrokes only for development use.\"\"\"\n def __init__(self):\n return super().__init__()\n\n def initializePlugin(self, parentView):\n super().initializePlugin(parentView)\n self.registerEventHandler()\n\n def handleEvents(self, events):\n for event in events:\n if event.type == pygame.KEYDOWN:\n self.handleKeyboardEvent(event)\n\n def handleKeyboardEvent(self, event):\n if event.key == pygame.K_t:\n # On 't' change to Training Level.\n changeviewEvent = pygame.event.Event(Utils.UserEvents.EVENT_CHANGEVIEW, ViewName='Training')\n pygame.event.post(changeviewEvent)\n elif event.key == pygame.K_F1:\n changeviewEvent = pygame.event.Event(Utils.UserEvents.EVENT_CHANGEVIEW, ViewName='Level1')\n pygame.event.post(changeviewEvent)\n elif event.key == pygame.K_F2:\n changeviewEvent = pygame.event.Event(Utils.UserEvents.EVENT_CHANGEVIEW, ViewName='Level2')\n pygame.event.post(changeviewEvent)\n elif event.key == pygame.K_F3:\n changeviewEvent = pygame.event.Event(Utils.UserEvents.EVENT_CHANGEVIEW, ViewName='Level3')\n pygame.event.post(changeviewEvent)\n elif event.key == pygame.K_F4:\n changeviewEvent = pygame.event.Event(Utils.UserEvents.EVENT_CHANGEVIEW, ViewName='Demo')\n pygame.event.post(changeviewEvent)\n elif event.key == pygame.K_F5:\n changeviewEvent = pygame.event.Event(Utils.UserEvents.EVENT_CHANGEVIEW, ViewName='DemoStart')\n pygame.event.post(changeviewEvent)\n elif event.key == pygame.K_F6:\n changeviewEvent = pygame.event.Event(Utils.UserEvents.EVENT_CHANGEVIEW, ViewName='DemoFinal')\n pygame.event.post(changeviewEvent)\n\n\n\n","sub_path":"SimpleGame/SimpleGame/Src/Plugins/CheatKeyPlugin.py","file_name":"CheatKeyPlugin.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"342493061","text":"from api.game.card_set import card_set\nfrom random import randint\n\n\nclass WrongCell(Exception):\n pass\n\n\nclass CardLogic:\n COMPARE_QUEUE = (\n ('top', 'bot'),\n ('bot', 'top'),\n ('left', 'right'),\n ('right', 'left')\n )\n\n CARD_SIDE_QUEUE = (\n 'top',\n 'bot',\n 'left',\n 'right'\n )\n\n\nclass Card(CardLogic):\n def __init__(self, top=0, bot=0, left=0, right=0, player=0):\n self.top = top\n self.bot = bot\n self.left = left\n self.right = right\n self.player = player\n self.score = 0\n\n @property\n def json_repr(self):\n return {\n \"top\": self.top,\n \"bot\": self.bot,\n \"left\": self.left,\n \"right\": self.right,\n \"player\": self.player\n }\n\n def __add__(self, other):\n return tuple(\n map(\n lambda x: getattr(self, x[0]) + getattr(other, x[1]),\n self.COMPARE_QUEUE\n )\n ) if getattr(other, 'player', None) \\\n else (-100, -200, -300, -400)\n\n def __sub__(self, other):\n return tuple(\n map(\n lambda x: getattr(self, x[0]) - getattr(other, x[1]),\n self.COMPARE_QUEUE\n )\n ) if getattr(other, 'player', None)\\\n else (-100, -200, -300, -400)\n\n def __repr__(self):\n return f'{self.top}-{self.bot}-' \\\n f'{self.left}-{self.right}-__{self.player}__'\n\n\nclass Table:\n def __init__(self, side, plus=True, same=True, base_attack=True):\n self.side = side\n self.table = [[Card() for __ in range(side)] for _ in range(side)]\n self.mods = {\n 'plus': plus,\n 'same': same,\n 'base_attack': base_attack\n }\n self.mods_queue = ('plus', 'same', 'base_attack')\n\n @property\n def json_repr(self):\n table_repr = []\n for row in self.table:\n for card in row:\n table_repr.append(card.json_repr)\n return table_repr\n\n def print_table(self):\n print(' ', end='')\n for i in range(self.side):\n print(f' {i} ', end='')\n print()\n for i in range(self.side):\n print(f'{i} {self.table[i]}')\n\n def place_card(self, i, j, card):\n if self.table[i][j].player:\n raise WrongCell\n\n self.table[i][j] = card\n return self.compare_cards(i, j, card)\n\n def compare_cards(self, i, j, card):\n for mod in self.mods_queue:\n if self.mods[mod]:\n score = getattr(self, f'_{mod}')(i, j, card)\n if score:\n return score\n return 0\n\n def _same(self, i, j, card):\n score = 0\n _effected_card_list = list(\n filter(\n lambda x: x,\n (self.get_card(coord)\n if not (card - self.get_card(coord))[index] else None\n for index, coord in enumerate(self.neighbours[i, j]))\n )\n )\n\n if len(_effected_card_list) > 1:\n for attaked_card in _effected_card_list:\n if attaked_card.player != card.player:\n attaked_card.player = card.player\n score += 1\n return score\n\n def _plus(self, i, j, card):\n score = 0\n _coord_list = self.neighbours[i, j]\n _sum_list = [\n (card + self.get_card(coord))[index]\n for index, coord in enumerate(_coord_list)\n ]\n _coord_dict = {}\n\n for index, _sum in enumerate(_sum_list):\n attaked_card = self.get_card(_coord_list[index])\n\n if attaked_card:\n _coord_dict.setdefault(_sum, []).append(attaked_card)\n\n for key in _coord_dict.keys():\n if len(_coord_dict[key]) > 1:\n for attaked_card in _coord_dict[key]:\n if attaked_card.player != card.player:\n attaked_card.player = card.player\n score += 1\n return score\n\n def _base_attack(self, i, j, card):\n score = 0\n _effected_card_list = list(\n filter(\n lambda x: x,\n (self.get_card(coord)\n if (card - self.get_card(coord))[index] > 0 else None\n for index, coord in enumerate(self.neighbours[i, j]))\n )\n )\n\n for attaked_card in _effected_card_list:\n if attaked_card.player != card.player:\n attaked_card.player = card.player\n score += 1\n return score\n\n @property\n def neighbours(self):\n _neighbours = []\n for i in range(self.side):\n for j in range(self.side):\n _neighbours.append(\n [\n (i, j),\n (i - 1, j) if i - 1 >= 0 else None,\n (i + 1, j) if i + 1 < self.side else None,\n (i, j - 1) if j - 1 >= 0 else None,\n (i, j + 1) if j + 1 < self.side else None\n ]\n )\n\n return {x[0]: x[1:] for x in _neighbours}\n\n def get_card(self, *args):\n if args[0]:\n return self.table[args[0][0]][args[0][1]]\n\n\nclass Player:\n def __init__(self, number):\n self.number = number\n self.hand = [\n Card(*card_set['level_8'][randint(0, 3)], player=number),\n Card(*card_set['level_7'][randint(0, 3)], player=number),\n Card(*card_set['level_5'][randint(0, 3)], player=number),\n Card(*card_set['level_3'][randint(0, 3)], player=number),\n Card(*card_set['level_2'][randint(0, 3)], player=number),\n ]\n self.score = 5\n\n @property\n def json_repr(self):\n return [card.json_repr for card in self.hand]\n\n\nclass Game:\n def __init__(self, n=3, **kwargs):\n mods = kwargs\n self.table = Table(n, **mods)\n self.player_1 = Player(1)\n self.player_2 = Player(2)\n self.turn = 1\n\n def res(self):\n print(f'{self.player_1.number} '\n f'card_set{self.player_1.hand} '\n )\n print('#' * 20)\n self.table.print_table()\n print('#' * 20)\n\n print(f'{self.player_2.number} '\n f'card_set{self.player_2.hand} '\n )\n print(f'score_1: {self.player_1.score} '\n f'score_2: {self.player_2.score}'\n )\n\n @property\n def json_repr(self):\n return {\n \"table\": self.table.json_repr,\n \"player_1_hand\": self.player_1.json_repr,\n \"player_2_hand\": self.player_2.json_repr,\n \"turn\": self.turn,\n \"player_1_score\": self.player_1.score,\n \"player_2_score\": self.player_2.score,\n }\n\n def next_turn(self, card_index, i, j, get_player=0):\n if not get_player:\n player_number = 1 if self.turn % 2 else 2\n else:\n player_number = get_player\n\n card = getattr(self,\n f'player_{player_number}'\n ).hand.pop(card_index)\n _score = self.table.place_card(i, j, card)\n getattr(\n self,\n f'player_{1 if self.turn % 2 else 2}'\n ).score += _score\n getattr(\n self,\n f'player_{2 if self.turn % 2 else 1}'\n ).score -= _score\n self.turn += 1\n\n\ndef input_card():\n card_index = int(input('card_index = '))\n i = int(input('row = '))\n j = int(input('column = '))\n return card_index, i, j\n\n\ndef main():\n g = Game()\n print('Start Game')\n for _ in range(9):\n g.res()\n card_index, i, j = input_card()\n g.next_turn(card_index, i, j)\n g.res()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"api/game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"464993699","text":"import nengo\nfrom nengo.utils.matplotlib import rasterplot\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom nengo_learn_assoc_mem.learning_rules.rec_bcm import RecBCM\n\n\ndef rec_legend(root: str, li):\n return [f\"{root}$\\\\rightarrow${ll}\" for ll in li]\n\n\ndef enable_func(t):\n if t > 1.:\n return 1.\n else:\n return 0.\n\n\ndef stim_func(t):\n if (t % 1) > 0.5:\n return 0.3\n else:\n return -0.3\n\n\ndt = 0.001\nseed = 0\nsim_len = 10\nlr = 5e-6\npre_nrn = 4\n\nens_params = dict(encoders=[[1], [1], [-1], [-1]], intercepts=[-0.5, -0.1, -0.1, -0.5], max_rates=[250, 300, 300, 250])\n\nrec_inhib = (-1*np.ones(pre_nrn) + np.eye(pre_nrn)) / 1e4\n\nwith nengo.Network() as model:\n stim = nengo.Node(stim_func)\n enabled = nengo.Node(enable_func)\n\n ens = nengo.Ensemble(pre_nrn, 1, **ens_params)\n\n rec_bcm = RecBCM(pre_nrn, rec_inhib, learning_rate=5e-7, threshold=120, max_inhib=-0.02)\n\n nengo.Connection(stim, ens, synapse=None)\n\n nengo.Connection(ens.neurons, rec_bcm.in_neurons, synapse=0.01)\n nengo.Connection(ens.neurons, rec_bcm.out_neurons, synapse=0.01)\n nengo.Connection(enabled, rec_bcm.enable, synapse=None)\n nengo.Connection(rec_bcm.output, ens.neurons, synapse=None)\n\n p_in = nengo.Probe(stim)\n p_spikes = nengo.Probe(ens.neurons)\n p_out = nengo.Probe(ens, synapse=0.01)\n\n\nwith nengo.Simulator(model) as sim:\n sim.run(sim_len)\n\n\nw_hist = np.array(rec_bcm.weight_history)\nw_hist_trange = np.concatenate(([0], sim.trange(dt=0.1),))\n\nplt.figure(figsize=(12, 8))\nplt.subplot(3, 1, 1)\nplt.plot(sim.trange(), sim.data[p_out], label=\"Post\")\nplt.ylabel(\"Decoded value\")\nplt.ylim(-1.6, 1.6)\nplt.legend(loc=\"lower left\")\n\nplt.subplot(3, 1, 2)\nplt.plot(w_hist_trange, w_hist[:, 0], alpha=0.6)\nplt.ylabel(\"Connection weights\\nfrom first neuron\")\nplt.legend(rec_legend(\"A\", (\"A\", \"B\", \"C\", \"D\")))\n\nplt.subplot(3, 1, 3)\nplt.plot(w_hist_trange, w_hist[:, -1], alpha=0.6)\nplt.ylabel(\"Connection weight\\nfrom last neuron\")\nplt.legend(rec_legend(\"D\", (\"A\", \"B\", \"C\", \"D\")))\n\nplt.show()\n\nplt.figure(figsize=(12, 8))\nwin = int(2e3)\n\nax = plt.subplot(2, 1, 1)\nrasterplot(sim.trange()[:win], sim.data[p_spikes][:win], ax)\nax.set_ylabel('Neuron')\nax.set_yticklabels((\"A\", \"B\", \"C\", \"D\"))\nax.set_xlabel('Time (s)')\nax.set_title('Before learning')\n\nax = plt.subplot(2, 1, 2)\nrasterplot(sim.trange()[-win:], sim.data[p_spikes][-win:], ax)\nax.set_ylabel('Neuron')\nax.set_yticklabels((\"A\", \"B\", \"C\", \"D\"))\nax.set_xlabel('Time (s)')\nax.set_title('After learning')\n\nplt.tight_layout()\nplt.show()\n","sub_path":"nengo_learn_assoc_mem/learning_rules/tests/test_rec_bcm.py","file_name":"test_rec_bcm.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"244249277","text":"import numpy as np\n\n\t# États de la population (0: en santé, 1: malade, 2: mort)\nHEALTHY, SICK, DEAD = 0, 1, 2\n\n\t# Initialisations des constantes\npopulation = 5000\nmaxIterations = 5000\nsurvivalTime = 20\n\n\t# Fonction simulateur\ndef outbreak(gridSize, snapshot, seed = False, animate = False):\n\tif (seed): np.random.seed(seed)\n\n\t\t# Valider {snapshot}\n\tif (snapshot < 0): snapshot = 0\n\tif (snapshot > maxIterations): snapshot = maxIterations - 1\n\tsnapshots = None\n\n\t\t# Initialisation des tableaux des états de la population\n\talivePopulation, sickPopulation = [np.empty(maxIterations, dtype = \"int\") for i in (0, 1)]\n\n\t\t# Tableaux d'animations à retourner\n\tif (animate):\n\t\txArray, yArray, statusArray = [np.empty([population, maxIterations], dtype = \"int\") for i in (0, 1, 2)]\n\n\t\t# Sélection aléatoire des positions initiales et du patient zéro\n\tx, y = [np.random.randint(1, gridSize + 1, population) for i in (0, 1)]\n\tpatientZero = np.random.randint(0, population)\n\n\tstatus = np.full(population, HEALTHY, dtype = \"int\")\t\t\t\t# Statut de la population\n\tremainingLife = np.full(population, survivalTime, dtype = \"int\")\t# Temps de survie restant une fois infecté\n\n\tstatus[patientZero] = SICK\t# Infection du patient zéro\n\tsickCounter = 1\t\t\t\t# Nombre total de personnes malades\n\titeration = 0\t\t\t\t# Compteur d'itération\n\n\t\t# Boucle d'itération temporelle\n\twhile ((sickCounter > 0) and (iteration < maxIterations)):\n\t\t\t# Indices des personnes en vie\n\t\talive = (status != DEAD).nonzero()[0]\n\n\t\t\t# Choix de direction et de pas pour les personnes en vie\n\t\tstep = 2*np.round(np.random.random(alive.size)) - 1\n\t\tdirection = np.round(np.random.random(alive.size))\n\t\thorizontal, vertical = [np.where(direction == i, step, 0) for i in (0, 1)]\n\n\t\t\t# Déplacement des personnes en vie\n\t\tx[alive] = np.clip(x[alive] + horizontal, 1, gridSize)\n\t\ty[alive] = np.clip(y[alive] + vertical, 1, gridSize)\n\n\t\t\t# Boucle sur les personnes malades\n\t\tfor i in ((status == SICK).nonzero()[0]):\n\t\t\t\t# Indices des personnes saines ayant un contact avec des personnes malades\n\t\t\tspread = ((x == x[i]) & (y == y[i]) & (status == HEALTHY)).nonzero()[0]\n\t\t\t\t# Contagion de la maladie\n\t\t\tif (spread.size > 0):\n\t\t\t\tstatus[spread] = SICK\n\t\t\t\tsickCounter += spread.size\n\n\t\t\t\t# Réduction de la vie restante, décès si 0\n\t\t\tremainingLife[i] -= 1\n\t\t\tif (remainingLife[i] == 0):\n\t\t\t\tstatus[i] = DEAD\n\t\t\t\tsickCounter -= 1\n\n\t\t\t# Assignation itérative des tableaux temporels de l'état de la population\n\t\talivePopulation[iteration] = ((status != DEAD).nonzero()[0]).size\n\t\tsickPopulation[iteration] = sickCounter\n\n\t\t\t# Sauvegarde de la {snapshot}-ième itération de l'évolution de la pandémie\n\t\tif (iteration == snapshot):\n\t\t\tsnapshots = (np.copy(x), np.copy(y), np.copy(status), snapshot)\n\n\t\t\t# Assignation itérative des tableaux temporels de la position et statut de la population\n\t\tif (animate):\n\t\t\txArray[:, iteration], yArray[:, iteration] = x, y\n\t\t\tstatusArray[:, iteration] = status\n\n\t\titeration += 1\n\n\t\t# Sauvegarde de la dernière itération de l'évolution de la pandémie s'il n'y a pas eu assez d'itérations\n\tif (snapshots == None):\n\t\tsnapshots = (x, y, status, iteration)\n\n\tif (animate):\n\t\tpositions = (xArray[:, :iteration], yArray[:, :iteration], statusArray[:, :iteration])\n\telse:\n\t\tpositions = (x, y, status)\n\n\treturn positions, snapshots, alivePopulation[:iteration], sickPopulation[:iteration], iteration\n\nif __name__ == '__main__':\n\timport matplotlib.animation as mplAnim\n\timport matplotlib.pyplot as plt\n\tfrom matplotlib.gridspec import GridSpec\n\tfrom multiprocessing import Pool\n\tfrom scipy.interpolate import interp1d\n\n\tdef parallelSimulator(gridSize, snapshot, threads, simulations, seed, animate = False):\n\t\t\t# Initialisations des listes à retourner\n\t\tpositionsArray, snapshotsArray, alivesArray, sicksArray, iterationsArray = [[[] for j in range(0, simulations)] for i in range(0, 5)]\n\n\t\t\t# Simuler {threads} pandémie en parallèles\n\t\twith Pool(processes = threads) as pool:\n\t\t\tprocess = [pool.apply_async(outbreak, (gridSize, snapshot, seed*(i + 1), animate,)) for i in range(0, simulations)]\n\t\t\tfor i in range(0, simulations):\n\t\t\t\tpositionsArray[i], snapshotsArray[i], alivesArray[i], sicksArray[i], iterationsArray[i] = process[i].get()\n\n\t\treturn positionsArray, snapshotsArray, alivesArray, sicksArray, iterationsArray\n\n\t\t# Nombre de processus maximales à générer. Ajustez selon votre configuration\n\t\t# 8 processus et 12 simulations requiert ~250MB de RAM sans animation, ~3GB avec animation et sans sauvegarde. Attention!\n\tthreads = 8\n\tsimulations = 12\n\n\t\t# Animer, et sauvegarder, la simulation?\n\tanimate = False\n\tif (animate):\n\t\tsaveAnimation = False\t# Prend pas mal de temps\n\t\tif (saveAnimation):\n\t\t\tffmpegPath = \"D:\\\\Tools\\\\VideoCoders\\\\ffmpeg\\\\ffmpeg.exe\"\n\n\t\t# Seed pour {np.random.seed()}. {False} pour désactiver\n\tseed = False\n\t\"\"\"\n\t\tChaque calcul est effectué avec un seed différents, mais non-aléatoire\n\t\t\tEx.: calcul1(seed*(1)) -> np.random.seed(seed*(1))\n\t\t\t\t calcul2(seed*(2)) -> np.random.seed(seed*(2))\n\t\t\t\t et ainsi de suite\n\n\t\tSeed {41856086}, testé avec 8 threads et 12 simulations, valeurs prises après la L190\n\t\t\tDéviation standard du nombre de morts par simulation : ±714\n\t\t\tDéviation standard de la durée de l'épidémie par simulation : ±372\n\n\t\t\tmostIterations\t\t\t-> 2, indice des tableaux où l'itération est maximale\n\t\t\titerationsArray[2]\t\t-> 1465 itérations\n\t\t\talivesArray[2].min()\t-> 2833 vivants\n\t\t\tsicksArray[2].min()\t\t-> 0 malades\n\t\t\tdeathTolls[2]\t\t\t-> 2167 morts\n\t\"\"\"\n\n\t\t# Afficher la {snapshot}-ième itération de l'évolution de la pandémie\n\t\t# Affiche la dernière itération si {snapshot >= itération}\n\tsnapshot = 600\n\n\t\t# Initialisation des variables Q1\n\tgridSize = 128\n\n\t\t# Simulation des {simulations} épidémie Q1\n\tpositionsArray, snapshotsArray, alivesArray, sicksArray, iterationsArray =\\\n\t\tparallelSimulator(gridSize, snapshot, threads, simulations, seed, animate)\n\n\t\t# Calcul des variations Q1\n\tdeathTolls = [(population - alivesArray[i][-1]) for i in range(0, simulations)]\n\tdeathTollsDeviation = np.std(deathTolls, ddof = 1)\n\toutbreakDeviation = np.std(iterationsArray, ddof = 1)\n\n\t\t# Résultat Q1\n\tprint(f\"Déviation standard du nombre de morts par simulation : ±{deathTollsDeviation:.0f}\")\n\tprint(f\"Déviation standard de la durée de l'épidémie par simulation : ±{outbreakDeviation:.0f}\")\n\n\t\t# Initialisation des variables Q2\n\trho = np.arange(0.15, 0.55, 0.05)\n\tgridSizes = (np.sqrt(population/rho)).astype(dtype = \"int\")\n\n\t\t# Calculs des taux de mortalité et de la durée des épidémies, en plus de leurs moyennes et déviations standards respectives\n\tdeathRatesMean, deathRatesDeviation = [np.empty(gridSizes.size) for i in (0, 1)]\t\t\t\t# Q2.2\n\toutbreakDurationsMean, outbreakDurationsDeviation = [np.empty(gridSizes.size) for i in (0, 1)]\t# Q2.3\n\tdeathRates, outbreakDurations = [np.empty([gridSizes.size, simulations]) for i in (0, 1)]\t\t# Q2.4\n\tfor i, gSize in enumerate(gridSizes):\n\t\talivesTemp, _, outbreakDurations[i, :] = parallelSimulator(gSize, snapshot, threads, simulations, seed)[2:]\n\n\t\t\t# Q2.2\n\t\tdeathRates[i, :] = [(population - alivesTemp[j][-1])/population for j in range(0, simulations)]\n\t\tdeathRatesMean[i] = np.mean(deathRates[i, :])\n\t\tdeathRatesDeviation[i] = np.std(deathRates[i, :], ddof = 1)\n\n\t\t\t# Q2.3\n\t\toutbreakDurationsMean[i] = np.mean(outbreakDurations[i, :])\n\t\toutbreakDurationsDeviation[i] = np.std(outbreakDurations[i, :], ddof = 1)\n\n\t\t# Section des graphiques\n\tmostIterations = np.argmax(iterationsArray)\n\tx, y, status = positionsArray[mostIterations]\n\tsnapX, snapY, snapStatus, snapshot = snapshotsArray[mostIterations]\n\talivePopulation, sickPopulation = alivesArray[mostIterations], sicksArray[mostIterations]\n\titerations = iterationsArray[mostIterations]\n\n\t\t# Interpolations pour une courbe des moyennes\n\tinterpolatedRho = np.arange(rho.min(), rho.max(), 0.001)\n\tinterpolation1 = interp1d(rho, deathRatesMean, fill_value = \"extrapolate\", kind = \"cubic\")\n\tinterpolation2 = interp1d(rho, outbreakDurationsMean, fill_value = \"extrapolate\", kind = \"cubic\")\n\tinterpolatedDRM = interpolation1(interpolatedRho)\n\tinterpolatedODM = interpolation2(interpolatedRho)\n\n\t\t# Début et fin de l'animation\n\tbeginIteration, endIteration = 0, iterations\n\tif (animate):\n\t\tif (beginIteration < 0): beginIteration = 0\n\t\tif (beginIteration > iterations): beginIteration = iterations - 1\n\t\tif (endIteration < beginIteration): endIteration = beginIteration\n\t\tif (endIteration > iterations): endIteration = iterations\n\n\t\t# Couleurs des nuages de points selon les indices\n\tdeadColor, healthyColor, sickColor = \"dimgrey\", \"royalblue\", \"darkred\"\n\t\t# Couleurs des graphiques\n\taliveColor, sickColor = \"tab:blue\", \"tab:red\"\n\tdeathRateColor, outbreakDurationColor = \"darkorange\", \"darkblue\"\n\t\t# Couleur des barres d'erreurs\n\tdeathRateEColor, outbreakDurationEColor = \"orange\", \"royalblue\"\n\n\t\t# Paramètres des figures\n\tfigureSize = (12, 8)\n\t\t# Noms des fichiers à sauvegarder\n\tfilenames = [\"tauxVivantsMorts_1.1.png\",\n\t\t\t\t \"instantanePandemie_1.2.png\",\n\t\t\t\t \"tMdE-densite_2.2-2.3.png\",\n\t\t\t\t \"tM-dE_2.4.png\"]\n\t\t# Titres des graphiques\n\ttitles = [f\"Évolution d'une pandémie sur {iterations} itérations\",\n\t\t\t f\"Population de {population} personnes après {snapshot} itérations\",\n\t\t\t \"Taux de mortalités et durées des épidémies moyenne selon la densité de population\",\n\t\t\t \"Taux de mortalité selon la durée des épidémies\"]\n\tif (animate):\n\t\tfilenames.append(\"pandemie_1.3.mkv\")\n\t\ttitles.append(f\"Évolution de la population de {population} personnes,\\n\"\n\t\t\t\t\t f\"entre les itérations {beginIteration} et {endIteration}\")\n\n\t\t# Création des figure et des axes\n\tfigures = [plt.figure(figsize = figureSize) for filename in filenames]\n\tgrids = [GridSpec(1, 1, figure) for figure in figures]\n\taxes = []\n\t\"\"\"\n\t\tfigures[0].grids[0].axes[0] -> Double graphique 1: Nombre de personnes encore en vie selon les itérations\n\t\tfigures[0].grids[0].axes[1] -> Double graphique 1: Nombre de personnes malades selon les itérations\n\t\tfigures[1].grids[1].axes[2] -> Population de {population} personnes après {snapshot} itérations\n\n\t\tfigures[2].grids[2].axes[3] -> Double graphique 2: Taux moyen de mortalité selon la densité de population\n\t\tfigures[2].grids[2].axes[4] -> Double graphique 2: Durée moyenne des épidémies selon la densité de population\n\t\tfigures[4].grids[4].axes[5] -> Durée moyenne des épidémies selon la densité de population\n\n\t\tSi {animate}\n\t\t\tfigures[-1].grids[-1].axes[-1]\t-> Animation de l'évolution d'une pandémie\n\t\t\tfigures[-1].grids[-1].scat[0:3]\t-> Nuage de points à animer\n\t\"\"\"\n\n\t\t# Indices des positions, pour leurs couleurs respective dans le nuage de points\n\tdeadIndices, healthyIndices, sickIndices = (status == DEAD), (status == HEALTHY), (status == SICK)\n\t\t# Indices des positions de la {snapshot}-ième (ou dernière) itération, pour leurs couleurs respective dans le nuage de points\n\tsnapDeadIndices, snapHealthyIndices, snapSickIndices = (snapStatus == DEAD), (snapStatus == HEALTHY), (snapStatus == SICK)\n\n\t\t# Double graphique de la population en vie et malades par itération Q1\n\ttIterations = np.arange(0, iterations)\n\taxes.append(figures[0].add_subplot(grids[0][0, 0]))\n\taxes.append(axes[0].twinx())\n\taxes[0].plot(tIterations, sickPopulation, color = sickColor)\n\taxes[1].plot(tIterations, alivePopulation, color = aliveColor)\n\t\t# Paramètres des graphiques Q1\n\taxes[0].set(xlabel = \"Itération\", xlim = (0, iterations + 1), ylim = 0)\n\taxes[0].set_title(titles[0])\n\taxes[0].set_ylabel(\"Malade\", color = sickColor)\n\taxes[0].tick_params(axis = \"y\", labelcolor = sickColor)\n\taxes[1].set_ylabel(\"En vie\", color = aliveColor)\n\taxes[1].tick_params(axis = \"y\", labelcolor = aliveColor)\n\n\t\t# Graphique de la {snapshot}-ième (ou dernière) itération de l'évolution d'une pandémie Q1\n\taxes.append(figures[1].add_subplot(grids[1][0, 0]))\n\taxes[2].scatter(snapX[snapHealthyIndices], snapY[snapHealthyIndices], c = healthyColor)\n\taxes[2].scatter(snapX[snapSickIndices], snapY[snapSickIndices], c = sickColor)\n\taxes[2].scatter(snapX[snapDeadIndices], snapY[snapDeadIndices], c = deadColor)\n\t\t# Paramètres des graphiques Q1\n\taxes[2].set(xlabel = \"X\", ylabel = \"Y\", xlim = (-1, gridSize + 2), ylim = (-1, gridSize + 2))\n\taxes[2].set_title(titles[1])\n\n\t\t# Double graphique des taux moyen de mortalité Q2.2 & Q2.3\n\taxes.append(figures[2].add_subplot(grids[2][0, 0]))\n\taxes.append(axes[3].twinx())\n\taxes[3].plot(interpolatedRho, interpolatedDRM, color = deathRateColor, linestyle = \"-\")\n\taxes[4].plot(interpolatedRho, interpolatedODM, color = outbreakDurationColor, linestyle = \"-\")\n\taxes[3].errorbar(rho, deathRatesMean, yerr = deathRatesDeviation, linestyle = \"\", marker = \"o\",\n\t\t\t\t\t color = deathRateColor, ecolor = deathRateEColor, uplims = True, lolims = True)\n\taxes[4].errorbar(rho, outbreakDurationsMean, yerr = outbreakDurationsDeviation, linestyle = \"\", marker = \"o\",\n\t\t\t\t\t color = outbreakDurationColor, ecolor = outbreakDurationEColor, uplims = True, lolims = True)\n\t\t# Paramètres des graphiques Q2.2 & Q2.3\n\taxes[3].set(xlabel = r\"Densité de population $\\rho$\")\n\taxes[3].set_title(titles[2])\n\taxes[3].set_ylabel(r\"Taux moyen de mortalité $\\bar \\mu$\", color = deathRateColor)\n\taxes[3].tick_params(axis = \"y\", labelcolor = deathRateColor)\n\taxes[4].set_ylabel(r\"Durée moyenne des épidémies $\\bar T$\", color = outbreakDurationColor)\n\taxes[4].tick_params(axis = \"y\", labelcolor = outbreakDurationColor)\n\n\t\t# Graphique des taux moyen de mortalité Q2.4\n\taxes.append(figures[3].add_subplot(grids[3][0, 0]))\n\tfor i in range(0, simulations):\n\t\tcolor = next(axes[5]._get_lines.prop_cycler)[\"color\"]\n\t\taxes[5].plot(outbreakDurations[:, i], deathRates[:, i], color = color, linestyle = \"\", marker = \"o\", label = f\"Simulation {i}\")\n\t\t# Paramètres des graphiques Q2.4\n\taxes[5].set(xlabel = \"Durée des épidémies T\", ylabel = r\"Taux de mortalité $\\mu$\")\n\taxes[5].legend(loc = \"upper right\")\n\taxes[5].set_title(titles[3])\n\n\tif (animate):\n\t\t\t# Grilles des nuages de points à animer\n\t\tscat = []\n\t\t\t# Initialisation du graphique animé\n\t\taxes.append(figures[-1].add_subplot(grids[-1][0, 0]))\n\t\tscat.append(axes[-1].scatter(x[healthyIndices[:, 0], 0], y[healthyIndices[:, 0], 0], c = healthyColor))\n\t\tscat.append(axes[-1].scatter(x[sickIndices[:, 0], 0], y[sickIndices[:, 0], 0], c = sickColor))\n\t\tscat.append(axes[-1].scatter(x[deadIndices[:, 0], 0], y[deadIndices[:, 0], 0], c = deadColor))\n\t\tanimatedTitle = axes[-1].text(0.5, 0.95, f\" Itération {0} \", bbox = {\"edgecolor\":\"grey\", \"facecolor\":\"w\", \"alpha\":0.95},\n\t\t\t\t\t\t\t transform = axes[-1].transAxes, ha = \"center\")\n\n\t\taxes[-1].set(xlabel = \"X\", ylabel = \"Y\", xlim = (-1, gridSize + 2), ylim = (-1, gridSize + 2))\n\t\taxes[-1].set_title(titles[-1])\n\n\t\t\t# Fonction animateur\n\t\tdef iterativeAnimator(i):\n\t\t\tscat[0].set_offsets(np.array([x[healthyIndices[:, i], i], y[healthyIndices[:, i], i]]).T)\n\t\t\tscat[1].set_offsets(np.array([x[sickIndices[:, i], i], y[sickIndices[:, i], i]]).T)\n\t\t\tscat[2].set_offsets(np.array([x[deadIndices[:, i], i], y[deadIndices[:, i], i]]).T)\n\n\t\t\tanimatedTitle.set_text(f\" Itération {i} \")\n\n\t\t\treturn scat[0], scat[1], scat[2], animatedTitle\n\n\t\t\t# Animation de la pandémie\n\t\tanim = mplAnim.FuncAnimation(figures[-1], iterativeAnimator, frames = range(beginIteration, endIteration), interval = 100, repeat = False, blit = True)\n\n\t\t\t# Sauvegarde des figures, et de l'animation si demandé\n\t\t[figures[i].savefig(filename) for i, filename in enumerate(filenames[:-1])]\n\t\tif (saveAnimation):\n\t\t\tplt.rcParams[\"animation.ffmpeg_path\"] = ffmpegPath\n\t\t\tffmpegWriter = mplAnim.FFMpegWriter(fps = 10, codec = \"libx264\")\n\t\t\tanim.save(filename = filenames[-1], writer = ffmpegWriter)\n\telse:\n\t\t\t# Sauvegarde des figures\n\t\t[figures[i].savefig(filename) for i, filename in enumerate(filenames)]\n\n\tplt.show()\n","sub_path":"source/tp/10/epidemic.py","file_name":"epidemic.py","file_ext":"py","file_size_in_byte":15795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"82757479","text":"from math import *\nimport numpy as np\nfrom matrix import matrix\nfrom parametricObject import parametricObject\n\nclass parametricCylinder(parametricObject):\n\n def __init__(self,\n T=matrix(np.identity(4)),\n height=1.0,\n radius=1.0,\n color=(0,0,0),\n reflectance=(0.0,0.0,0.0),\n uRange=(0.0,0.0),\n vRange=(0.0,0.0),\n uvDelta=(0.0,0.0)):\n super().__init__(T,color,reflectance,uRange,vRange,uvDelta)\n \n self.__height = height\n self.__radius = radius\n\n def getPoint(self,u,v):\n __P = matrix(np.array([\n [self.__radius*cos(v)],\n [self.__radius*sin(v)],\n [self.__height*u],\n [1]\n ]))\n return __P\n \n def getRadius(self):\n return self.__radius\n \n def getHeight(self):\n return self.__height","sub_path":"parametricCylinder.py","file_name":"parametricCylinder.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"163464278","text":"#!/usr/bin/python3\n\nimport time\nimport sys\n\nclass Meter:\n def __init__(self, max_val, min_val):\n self.max_value = max_val\n self.min_value = min_val\n self.cur_value = self.min_value\n \n def increase_val(self, step):\n if (self.cur_value + step) > self.max_value:\n self.cur_value = self.max_value\n else:\n self.cur_value += step\n \n def decrease_val(self, step):\n if (self.cur_value - step) < self.min_value:\n self.cur_value = self.min_value\n else:\n self.cur_value -= step\n \n\nclass Speedometer(Meter):\n def __init__(self, max_val, min_val):\n Meter.__init__(self, max_val, min_val)\n self.accel_step = 3\n self.decel_step = 1\n self.brake_step = 8\n \n def accel(self):\n self.increase_val(self.accel_step)\n \n def decel(self):\n self.decrease_val(self.decel_step)\n \n def brake(self):\n self.decrease_val(self.brake_step)\n\ndef Trace(message, value, max_value):\n digits = len(str(max_value -1))\n delete = \"\" *(digits+len(message))\n print(\"{0}{3}{1:{2}}\".format(delete, value, digits, message), end=\"\")\n sys.stdout.flush()\n \n\nspeedometer = Speedometer(240, 0)\n\nmessage = \"\"\nwhile speedometer.cur_value < 240:\n speedometer.accel()\n Trace(message, speedometer.cur_value, speedometer.max_value)\n time.sleep(0.1)\n\nwhile speedometer.cur_value > 220:\n speedometer.decel()\n Trace(message, speedometer.cur_value, speedometer.max_value)\n time.sleep(0.1)\n\nwhile speedometer.cur_value > 0:\n speedometer.brake()\n Trace(message, speedometer.cur_value, speedometer.max_value)\n time.sleep(0.1)\n\nprint(\"\\n\")\n","sub_path":"compteur.py","file_name":"compteur.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"482815538","text":"#! /usr/bin/python2.7\n# -*- coding: UTF-8 -*-\n\"\"\"\n__author__ = 'Marc Oggier, aka MegaVolts'\n\nscript running to find analyse ice core data in function of the number of freezing and thawing degree day from breakup\n\"\"\"\nimport os\nimport datetime as dt\nimport warnings\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n\n\n\n\n\n\n# load personal modules\nimport MBS as mbs\nimport seaice as si\nimport icecoredata as icd\nimport icdtools as icdt\nimport csv\n\n# =====================================================================================================================#\n# USER VARIABLE INPUT\n# =====================================================================================================================#\nmyhost = os.uname()[1]\nif myhost == 'islay':\n\tdata_path = '/home/megavolts/UAF/data/'\nelif myhost == 'arran':\n\tdata_path = '/mnt/SeaIceData/data/'\nelse:\n\twarnings.warn('Machine not define, default data path used : \"/mnt/SeaIceData/data/\"')\n\tdata_path = '/mnt/SeaIceData/data/'\n\nics_file = data_path + 'ice_cores/SVL/data-SVL.txt' # ice core file list\n#mbs_dir = data_path + 'mass_balance_site/' # mbs data directory\n#weather_input = data_path + 'model/2014-0724Svalbard/JINforcing.txt' # weahter data file\nweather_input = '/mnt/SeaIceData/data/weather/SVL-NY_ALESUND-1998.txt'\n\nshort_core=['BRW_CS-20000510','BRW_CS-20000301A', 'BRW_CS-20000301B', 'BRW_CS-20000607', 'BRW_CS-20000609',\n 'BRW_CS-20000610', 'BRW_CS-20000611','BRW_CS-20000612', 'BRW_CS-20000614', 'BRW_CS-20000615',\n 'BRW_CS-20000619', 'BRW_CS-20000621', 'BRW_CS-20010607', 'BRW_CS-20010604', 'BRW_CS-20010614',\n 'BRW_CS-20080606D', 'BRW_CS-20090325B', 'BRW_CS-20080606A', 'BRW_CS-20080606B', 'BRW_CS-20080606C',\n 'BRW_CS-20080606D', 'BRW_CS-20100611C', 'BRW_CS-20100611D', 'BRW_CS-20100611E', 'BRW_CS-20100611F'] # 'BRW_CS-20000623'\n\n# variable\nsection_thickness = 0.05 # cm\ntime_interval_growth = 14 # days\ntime_interval_melt = 2 # days\ngrowth_start = 245 # Sept 1 is set arbitrary to the new winter day\n\nHdawn = 1\nHdusk = 24\nTfreeze = -1.8\n\n#\nlstat = 'y'\nlcore = 'y'\n\nfig_export='y'\nfig_display='n'\n\n\n# freez-up day\n# TODO: add freezup day to MBS.py\nfreezup_day = 47\n# OUTPUT\n# figure directory\nif Tfreeze == 0:\n\tfig_dir = \"/home/megavolts/Desktop/SVL/NA\"+str('%.0f' % Hdawn)+'_'+str('%.0f' % Hdusk)\nelif Tfreeze == -1.8:\n\tfig_dir = \"/home/megavolts/Desktop/SVL/NA\"+str('%.0f' % Hdawn)+'_'+str('%.0f' % Hdusk)\nelse:\n\twarnings.warn('Output directory not defined. Set output directory to $USER/output')\n\tfig_dir = os.path.expanduser(\"~\")\n\n# toggle flag\nlplot = 'y'\nlcomment = 'n'\n\n\n# =====================================================================================================================#\n# VARIABLE INITIALISATION\n# do not modify below this lines\n# =====================================================================================================================#\ncol_Tair = 13 - 1\n\n# check input/output variable\nif not os.path.isdir(fig_dir): # create output directory if not present\n\tos.makedirs(fig_dir)\n\nplt.close() # close all plot\nflag_plot_no = 0 # set first plot in fig 0\n\n# import ice core data\nics_data = icd.importIceCoreFile(ics_file, section_thickness, commentflag='n')\ncore_list = sorted(ics_data.keys())\n\n# import weather data\n#fopen = open(weather_input, 'rU')\n#data = np.genfromtxt(weather_input)\nfopen = open(weather_input, 'rU')\nnheader=44\nsource = csv.reader(fopen, )\ndata = []\n# skip header\nfor iiHeader in range(0, nheader):\n\tnext(source)\nnheader = 44\nrownum = 0\nfor row in source:\n\td = []\n\tdata.append([])\n\tfor ii in row:\n\t\td.append(ii.split('\\t'))\n\td=[item for sublist in d for item in sublist]\n\tdata[rownum].append(int((dt.datetime(int(d[1]), int(d[2]), int(d[3]), int(d[4]), 0)+dt.timedelta(0,3600)).strftime(\"%Y\")))\n\tdata[rownum].append(int((dt.datetime(int(d[1]), int(d[2]), int(d[3]), int(d[4]), 0)+dt.timedelta(0,3600)).strftime(\"%m\")))\n\tdata[rownum].append(int((dt.datetime(int(d[1]), int(d[2]), int(d[3]), int(d[4]), 0)+dt.timedelta(0,3600)).strftime(\"%d\")))\n\tdata[rownum].append(int((dt.datetime(int(d[1]), int(d[2]), int(d[3]), int(d[4]), 0)+dt.timedelta(0,3600)).strftime(\"%H\")))\n\tdata[rownum].append(-9999) # cloud\n\tdata[rownum].append(-9999) # shortwave radiation (W/m2)\n\tif float(d[5])==-9999:\n\t\td[5] = np.nan\n\tdata[rownum].append(float(d[5])+273) # Tair (K)\n\t# TODO : wind speed (mph) in d[8], wind direction in d[7] (cardinal)\n\tdata[rownum].append(-9999) # uwind (m/s).strftime(\"%s\")\n\tdata[rownum].append(-9999) # vwind (m/s)\n\t# TODO : relative humidity % in d[4]\n\tdata[rownum].append(-9999) # specific humidity (kg/kg)\n\tdata[rownum].append(-9999) # precipitation (kg/m2/s)\n\trownum += 1\ndata=np.array(data)\n\n# # define period\n# TODO:Marc: add FDD calculation in MBS.py\n# make a list of the day of ice core\nics_days = []\nics_dict = {}\nics_years = []\n\nfor ii in range(0, len(core_list)):\n\tcore_data = ics_data[core_list[ii]]\n\tics_days.append(core_data.core.date)\n\tics_years.append(core_data.core.date.year)\n\tics_dict[str(core_data.core.name)] = core_data.core.date\n\nics_days = icdt.unique(ics_days)\nics_years = icdt.unique(ics_years)\n\n\n# import data from mbs\nmbs_data = {}\nfor ii in range(2006, ics_years[-1] + 1):\n\tmbs_name = 'BRW' + str(ii)[2:] + '_MBS.txt'\n\tif 2012 < ii:\n\t\tmbs_name = mbs_name[:-4] + '-all.csv'\n\tmbs_path = mbs_dir + mbs_name\n\tmbs_data[ii] = np.array(mbs.read_MBS_data(mbs_path))\n\n# calculate FDD for every year\nFDD = {}\nl_FDD = 0\nTDD = {}\nfor iiYear in range(ics_years[0], ics_years[-1] + 1):\n\t# TODO:lookup table until day of minimal thickness\n\t# calculate the FDD of each day from freezup day to day of September, 1\n\tday = dt.datetime(iiYear, 1, 1) + dt.timedelta(growth_start)\n\tfreezup_day = dt.datetime(iiYear , 1, 1) + dt.timedelta(freezup_day-1)\n\tif l_FDD == 0:\n\t\tFDD, TDD = mbs.FDD(data, 7, freezup_day, day, Tfreeze=Tfreeze, Hdawn=Hdawn, Hdusk=Hdusk)\n\t\tl_FDD = 1\n\telse:\n\t\ttemp_FDD, temp_TDD = mbs.FDD(data, 7, freezup_day, day, Tfreeze=Tfreeze, Hdawn=Hdawn, Hdusk=Hdusk)\n\t\tFDD.update(temp_FDD)\n\t\tTDD.update(temp_TDD)\nprint('FDD and TDD done')\n\n# compute FDD table for all the ice core date\nics_FDD = {}\nics_TDD = {}\nfor ii in ics_days:\n\tif growth_start < int(ii.strftime(\"%j\")) < 367:\n\t\tyear = ii.year + 1\n\telse:\n\t\tyear = ii.year\n\tif not TDD[ii] < 0:\n\t\tics_FDD[ii] = FDD[ii]\n\telse:\n\t\tics_TDD[ii] = TDD[ii]\n\nics_FDD_unique = sorted(icdt.unique(ics_FDD.values()))\nics_TDD_unique = sorted(icdt.unique(ics_TDD.values()))\n\nics_doy = {}\nfor day in ics_days:\n\tics_doy[day] = int(day.strftime(\"%j\"))\n\nlegend_label = {}\ncolor_years = {}\nfor ii in ics_years:\n\tlegend_label[ii] = (str(ii))\n\tcolor_years[ii] = cm.jet((float(ii) - ics_years[0]) / len(ics_years))\n\n# create period\nperiod_growth = np.array(\n\t[0, 10])\n#period_melt = np.array([-45, -47.5, -50])\n#period_melt = np.array([-5,-10,-20,-40])\nperiod_melt = np.array([0,-10,-20,-30])\n\nn_period = len(period_growth) - 1 + len(period_melt) - 1\nperiod = {}\nii_FDD = 0\nii_TDD = 0\nlegend_period = []\nplot_name = []\nfor ii in range(0, len(period_growth) - 1):\n\tif ii == 0:\n\t\tlegend_period = [str(period_growth[ii]) + ' to ' + str(period_growth[ii + 1]) + ' [FDD]']\n\t\tplot_name = ['FDD-' + str('%04.0f' % period_growth[ii]) + '_' + str('%04.0f' % period_growth[ii + 1])]\n\t\tperiod[ii_FDD] = ii_FDD\n\t\tii_FDD += 1\n\telse:\n\t\tlegend_period.append(str(period_growth[ii]) + ' to ' + str(period_growth[ii + 1]) + ' [FDD]')\n\t\tplot_name.append('FDD-' + str('%04.0f' % period_growth[ii]) + '_' + str('%04.0f' % period_growth[ii + 1]))\n\t\tperiod[ii] = ii_FDD\n\t\tii_FDD += 1\nfor ii in range(len(period_growth) - 1, n_period):\n\tlegend_period.append(str(period_melt[ii - len(period_growth) + 1]) + ' to ' + str(\n\t\tperiod_melt[ii - len(period_growth) + 2]) + ' [TDD]')\n\tplot_name.append('TDD_' + str('%04.0f' % period_melt[ii - len(period_growth) + 1]) + '-' + str(\n\t\t'%04.0f' % period_melt[ii - len(period_growth) + 2]))\n\tperiod[ii] = ii_TDD + ii_FDD\n\tii_TDD += 1\n\niiCore = 0\nNSTcore = np.zeros([n_period, 3])\n\nSprofile = icdt.nan_mat([n_period, 1, 1])\nTprofile = icdt.nan_mat([n_period, 1, 1])\nVbprofile = icdt.nan_mat([n_period, 1, 1])\nSigprofile = icdt.nan_mat([n_period, 1, 1])\nkprofile = icdt.nan_mat([n_period, 1, 1])\nHiprofile = icdt.nan_mat([n_period, 1, 1])\n\nSlegend = [[] for x in range(n_period)]\nTlegend = [[] for x in range(n_period)]\nVblegend = [[] for x in range(n_period)]\nSiglegend = [[] for x in range(n_period)]\nklegend = [[] for x in range(n_period)]\nHilegend = [[] for x in range(n_period)]\n\nhIce = icdt.nan_mat([n_period, 1, 1])\nTmbs_day = []\nlSflag = np.zeros([n_period, 1])\n\nprint('initialisation complete')\n\nwhile iiCore < len(core_list):\n\tcore_data = ics_data[core_list[iiCore]] # read 1st ice core of a core set\n\tcore_day = core_data.core.date\n\tcore_name = core_data.core.name\n\n\tif core_day in ics_FDD:\n\t\tl_FDD = 1\n\t\tindex_period = np.where(period_growth <= ics_FDD[core_day])[0][-1]\n\telse:\n\t\tl_FDD = 0\n\t\tindex_period = len(period_growth) - 2 + np.where(period_melt <= ics_TDD[core_day])[0][0]\n\n\t# import salinity profile\n\tSdata = core_data.S\n\tHidata = np.array([core_data.core.length])\n\tTCore = core_data.T\n\ttry:\n\t\tlen(Sdata)\n\texcept TypeError:\n\t\tt = 1\n\t# print('%s no salinity data, skip to next core' % core_data.core.name)\n\telse:\n\t\t# look for temperature for all core of the set\n\t\tnCoreSet = core_data.core.number\n\t\tiiCoreSet = 0\n\n\t\tTtemp = icdt.nan_mat([nCoreSet, 1])\n\t\tTCore_flag = Ttemp\n\t\tlTcore = 0\n\t\tlTobs = 0\n\t\t# print('%s : %.0f / %.0f - index %.0f' % (core_data.core.name, iiCoreSet + 1, nCoreSet, index_period))\n\t\twhile iiCoreSet < nCoreSet:\n\t\t\tif iiCoreSet == 0:\n\t\t\t\tcorenames = core_data.core.corenames.split(\",\")\n\t\t\tif corenames[iiCoreSet] in core_list:\n\t\t\t\tcore_data = ics_data[corenames[iiCoreSet]]\n\t\t\t\tTdata = core_data.T\n\t\t\t\ttry:\n\t\t\t\t\tlen(Tdata)\n\t\t\t\texcept TypeError:\n\t\t\t\t\tt = 1\n\t\t\t\t#\t\t\t\t\tprint('%s : no temperature' % core_data.core.name)\n\t\t\t\telse:\n\t\t\t\t\tif not all(Tdata < 0.2):\n\t\t\t\t\t\t#\t\t\t\t\t\tprint('%s : some temperature above freezing point' % core_data.core.name)\n\t\t\t\t\t\tTdata = np.nan\n\t\t\t\t\telse:\n\t\t\t\t\t\tdeltaT = len(Tdata) - Ttemp.shape[1]\n\t\t\t\t\t\tif deltaT >= 0:\n\t\t\t\t\t\t\ttemp = np.empty((Ttemp.shape[0], deltaT))\n\t\t\t\t\t\t\ttemp[:] = np.nan\n\t\t\t\t\t\t\tTtemp = np.concatenate((Ttemp, temp), 1)\n\t\t\t\t\t\t\ttemp = Tdata\n\t\t\t\t\t\telif deltaT < 0:\n\t\t\t\t\t\t\ttemp = np.empty((-deltaT))\n\t\t\t\t\t\t\ttemp[:] = np.nan\n\t\t\t\t\t\t\ttemp = np.concatenate((Tdata, temp))\n\t\t\t\t\t\tTtemp[lTcore][:] = temp\n\t\t\t\t\tlTobs += 1\n\t\t\t\tlTcore += 1\n\t\t\tiiCoreSet += 1\n\t\tTavg = np.nanmean(Ttemp, axis=0)\n\n\t\tif not np.isnan(Tavg).all():\n\t\t\tVbdata = si.brine_volumefraction(Tavg, Sdata)\n\t\t\tSigdata = si.seaice_electricconductivity(Tavg, Sdata)\n\t\t\tkdata = si.seaice_permeability(Tavg, Sdata)\n\t\t\tVblegend[index_period].append(core_name)\n\t\t\tSiglegend[index_period].append(core_name)\n\t\t\tSlegend[index_period].append(core_name)\n\t\t\tklegend[index_period].append(core_name)\n\t\t\tHilegend[index_period].append(core_name)\n\n\t\t\tTlegend[index_period].append(core_name + '(set)')\n\t\t\tTdata = Tavg\n\n\t\t\t# # Output all S, T, Vb data in one table [period][cores][dataProfile]\n\t\t\tSprofile = icdt.merge_mat(Sdata, Sprofile, index_period, 1, int(lSflag[index_period]))\n\t\t\tTprofile = icdt.merge_mat(Tdata, Tprofile, index_period, 1, int(lSflag[index_period]))\n\t\t\tVbprofile = icdt.merge_mat(Vbdata, Vbprofile, index_period, 1, int(lSflag[index_period]))\n\t\t\tSigprofile = icdt.merge_mat(Sigdata, Sigprofile, index_period, 1, int(lSflag[index_period]))\n\t\t\tkprofile = icdt.merge_mat(kdata, kprofile, index_period, 1, int(lSflag[index_period]))\n\t\t\tHiprofile = icdt.merge_mat(Hidata, Hiprofile, index_period, 1, int(lSflag[index_period]))\n\t\t\tprint(core_name, lSflag[index_period], Sdata)\n\t\t\tlSflag[index_period] += 1\n\tiiCore += 1\nprint('processing done')\n\n#---------------------------------------------------------------------------------------------------------------------#\nSstat = icdt.make_stat(Sprofile)\nTstat = icdt.make_stat(Tprofile)\nVbstat = icdt.make_stat(Vbprofile)\nSigstat = icdt.make_stat(Sigprofile)\nkstat = icdt.make_stat(kprofile)\nhistat = icdt.make_stat(Hiprofile)\n\n#---------------------------------------------------------------------------------------------------------------------#\n\nlegend_label = {}\ncolor_years = {}\nfor ii in ics_years:\n\tlegend_label[ii] = 'year ' +str(ii)\n\tcolor_years[ii] = cm.brg((float(ii) - ics_years[0]) / len(ics_years))\n\n## TDD/FDD versus DoY\nif 1 == 1:\n\tXF = {}\n\tXT = {}\n\tYF = {}\n\tYT = {}\n\n\tfor day in ics_doy.keys():\n\t\tif day in ics_FDD:\n\t\t\tif not ics_FDD[day] == 0:\n\t\t\t\tif not XF.has_key(day.year):\n\t\t\t\t\tif day.month>9:\n\t\t\t\t\t\tXF[day.year] = [ics_doy[day]-365]\n\t\t\t\t\telse:\n\t\t\t\t\t\tXF[day.year] = [ics_doy[day]]\n\t\t\t\t\tYF[day.year] = [ics_FDD[day]]\n\t\t\t\telse:\n\t\t\t\t\tif day.month>9:\n\t\t\t\t\t\ttemp = XF[day.year]\n\t\t\t\t\t\ttemp.append(ics_doy[day]-365)\n\t\t\t\t\t\tXF[day.year] = temp\n\t\t\t\t\telse:\n\t\t\t\t\t\ttemp = XF[day.year]\n\t\t\t\t\t\ttemp.append(ics_doy[day])\n\t\t\t\t\t\tXF[day.year] = temp\n\t\t\t\t\ttemp = YF[day.year]\n\t\t\t\t\ttemp.append(ics_FDD[day])\n\t\t\t\t\tYF[day.year] = temp\n\t\telse:\n\t\t\tif not XT.has_key(day.year):\n\t\t\t\tXT[day.year] = [ics_doy[day]]\n\t\t\t\tYT[day.year] = [ics_TDD[day]]\n\t\t\telse:\n\t\t\t\ttemp = XT[day.year]\n\t\t\t\ttemp.append(ics_doy[day])\n\t\t\t\tXT[day.year] = temp\n\t\t\t\ttemp = YT[day.year]\n\t\t\t\ttemp.append(ics_TDD[day])\n\t\t\t\tYT[day.year] = temp\n\n\tflag_plot_no += 1\n\tplt.figure(flag_plot_no, figsize=(10, 5), dpi=300)\n\tax1 = plt.subplot(1, 2, 1)\n\tfor ii in ics_years:\n\t\tif XF.has_key(ii):\n\t\t\tplt.scatter(XF[ii], YF[ii], color=color_years[ii], label=legend_label[ii])\n\tplt.ylim([0, 5000])\n\tplt.xlabel('Day of Year')\n\tplt.ylabel('Freezing Degree Days [K day]')\n\tbox = ax1.get_position()\n\tax1.set_position([box.x0*0.95, box.y0 + box.height * 0.15, box.width, box.height * 0.85])\n\tax1.legend(loc='upper center', bbox_to_anchor=(0.1, -0.05, 2, -0.15), ncol=8, borderaxespad=0., frameon=0,\n\t prop={'size': 9})\n\tax2 = plt.subplot(1, 2, 2)\n\tfor ii in ics_years:\n\t\tif XT.has_key(ii):\n\t\t\tplt.scatter(XT[ii], YT[ii], color=color_years[ii], label=legend_label[ii])\n\tplt.ylim([-100, 0])\n\tplt.xlabel('Day of Year')\n\tplt.ylabel('Thawing Degree Days [K day]')\n\tbox = ax2.get_position()\n\tax2.set_position([box.x0*1.05, box.y0 + box.height * 0.15, box.width, box.height * 0.85])\n\tif fig_export == 'y':\n\t\tfigname = 'DD_doys.png'\n\t\tfigpath = fig_dir + '/' + figname\n\t\tplt.savefig(figpath)\n\tif fig_display == 'y':\n\t\tplt.show()\n\n## FIGURE\n# Ice Core Profile\nl_profile = 'y'\nif l_profile =='y':\n\tplt.close()\n\tfor iiPeriod in range(1, n_period):\n\t\tflag_plot_no += 1\n\t\tplt.figure(flag_plot_no, figsize=(18, 8), dpi=300)\n\t\tSCore_flag = 0\n\t\tTCore_flag = 0\n\t\tVbCore_flag = 0\n\t\tiiYear = []\n\t\tp = []\n\t\tfor iiCore in range(0, lSflag[iiPeriod]):\n\t\t\tif iiCore == 0:\n\t\t\t\taxS = plt.subplot(1, 5, 1)\n\t\t\t\tplt.suptitle(legend_period[iiPeriod])\n\t\t\t\tplt.gca().invert_yaxis()\n\t\t\tyS = np.arange(section_thickness / 2, section_thickness * Sprofile.shape[2], section_thickness)\n\t\t\tyear=ics_dict[str(Slegend[iiPeriod][iiCore])].year\n\t\t\ttemp, = plt.plot(Sprofile[iiPeriod][iiCore][:], yS, label=Slegend[iiPeriod][iiCore], color=color_years[year]) #[SCore_flag])\n\t\t\tplt.xlabel('Salinity [PSU]')\n\t\t\tplt.ylabel('Depth [m]')\n\t\t\tplt.ylim(1.7, 0)\n\t\t\tif iiPeriod < len(period_growth):\n\t\t\t\tplt.xlim(0, 16)\n\t\t\telse:\n\t\t\t\tplt.xlim(0, 8)\n\t\t\tif iiCore == 0: # SCore_flag == 0:\n\t\t\t\tbox = axS.get_position()\n\t\t\taxS.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])\n\t\tfor iiCore in range(0, lSflag[iiPeriod]):\n\t\t\tif iiCore == 0: # TCore_flag == 0:\n\t\t\t\taxT = plt.subplot(1, 5, 2)\n\t\t\t\tplt.gca().invert_yaxis()\n\t\t\tyear=ics_dict[str(Slegend[iiPeriod][iiCore])].year\n\t\t\tyT = np.arange(section_thickness / 2, section_thickness * Tprofile.shape[2], section_thickness)\n\t\t\ttemp, = plt.plot(Tprofile[iiPeriod][iiCore][:], yT, label=Tlegend[iiPeriod][iiCore], color=color_years[year])\n\t\t\tif not year in iiYear:\n\t\t\t\tiiYear.append(year)\n\t\t\t\tp.append(temp)\n\t\t\tplt.xlabel('Temperature [$^\\circ$C]')\n\t\t\tplt.ylim(1.7, 0)\n\t\t\tif iiPeriod=iiDepth:\n\t\t\t\tfS.write('\\t'+str('%.01f' % Sstat[iiPeriod, 0, iiDepth])+ '\\t' + str('%.01f' % Sstat[iiPeriod, 1, iiDepth]) + '\\t' + str('%.0f' % Sstat[iiPeriod, 4, iiDepth]))\n\t\t\tif Tstat.shape[2]>iiDepth:\n\t\t\t\tfT.write('\\t'+str('%.01f' % Tstat[iiPeriod, 0, iiDepth])+ '\\t' + str('%.01f' % Tstat[iiPeriod, 1, iiDepth]) + '\\t' + str('%.0f' % Tstat[iiPeriod, 4, iiDepth]))\n\tfS.close()\n\tfT.close()","sub_path":"JINinput-SVL-V2.py","file_name":"JINinput-SVL-V2.py","file_ext":"py","file_size_in_byte":27968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"209428460","text":"voters = int(input(\"Enter number of voters:\"))\n\nvotes=[]\n\nfor i in range(voters):\n name = input(\"Enter candidate name:\")\n votes.append(name)\n\ncand_dict={}\n\nfor i in votes:\n cand_dict[i] = cand_dict.get(i,0)+1\n\nsorted_dict =sorted(cand_dict.items(), key= lambda x:x[0], reverse=True)\n\n\nwin_cand = list(sorted(sorted_dict, key = lambda x:x[1]))\n\nprint(\"\\nCandidate with highest votes is:\", win_cand[-1][0])\n","sub_path":"day4/program5.py","file_name":"program5.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"625906257","text":"\"\"\"\r\nThis script combines the output of the pandemic model with the average QALY losses due to long-COVID and COVID-19 deaths\r\nVisualizations are saved to results/preprocessing/QALY/long_COVID\r\n\r\n\"\"\" \r\n\r\n__author__ = \"Wolf Demuynck\"\r\n__copyright__ = \"Copyright (c) 2022 by W. Demuynck, BIOMATH, Ghent University. All Rights Reserved.\"\r\n\r\nfrom covid19_DTM.models.utils import output_to_visuals\r\nfrom covid19_DTM.models.utils import initialize_COVID19_SEIQRD_hybrid_vacc\r\nfrom covid19_DTM.visualization.output import _apply_tick_locator \r\nfrom covid19_DTM.models.QALY import lost_QALYs\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nimport os\r\n\r\nif __name__ == '__main__':\r\n ################################\r\n ## Define simulation settings ##\r\n ################################\r\n\r\n # Number of simulations\r\n N=10\r\n # Number of neg. binomial draws/ simulation\r\n K=20\r\n # Number of cpu's\r\n processes=18\r\n # Number of age groups\r\n age_stratification_size=10\r\n # End of simulation\r\n end_sim='2021-07-01'\r\n # Confidence level used to visualise model fit\r\n conf_int=0.05\r\n\r\n ##########################\r\n ## Initialize the model ##\r\n ##########################\r\n\r\n print('\\n1) Initialize model')\r\n\r\n model, BASE_samples_dict, initN = initialize_COVID19_SEIQRD_hybrid_vacc(age_stratification_size=age_stratification_size, start_date='2020-03-15')\r\n\r\n warmup = float(BASE_samples_dict['warmup'])\r\n dispersion = float(BASE_samples_dict['dispersion'])\r\n start_sim = BASE_samples_dict['start_calibration']\r\n\r\n #########################\r\n ## Perform simulations ##\r\n #########################\r\n\r\n from covid19_DTM.models.draw_functions import draw_fnc_COVID19_SEIQRD_hybrid_vacc as draw_function\r\n\r\n print('\\n2) Simulating COVID19_SEIQRD_hybrid_vacc '+str(N)+' times')\r\n out = model.sim([start_sim,end_sim], warmup=warmup, processes=processes, N=N, samples=BASE_samples_dict, draw_function=draw_function)\r\n\r\n #######################\r\n ## QALY calculations ##\r\n #######################\r\n\r\n print('\\n3) Calculating QALYs')\r\n out_AD = lost_QALYs(out,AD_non_hospitalised=True)\r\n out_no_AD = lost_QALYs(out,AD_non_hospitalised=False)\r\n\r\n ####################\r\n ## Visualisations ##\r\n ####################\r\n\r\n print('\\n4) Visualise results')\r\n\r\n abs_dir = os.path.dirname(__file__)\r\n result_folder = '../../results/covid19_DTM/analysis/QALY/long_COVID'\r\n\r\n states = ['QALY_NH', 'QALY_C', 'QALY_ICU','QALY_D']\r\n titles = ['Non-hospitalised', 'Cohort', 'ICU','Deaths']\r\n colors = ['green','yellow','red','black']\r\n\r\n for scenario,out in zip(['no_AD','AD'],[out_no_AD,out_AD]):\r\n\r\n # With confidence interval\r\n df_2plot = output_to_visuals(out, states, alpha=dispersion, n_draws_per_sample=K, UL=1-conf_int*0.5, LL=conf_int*0.5)\r\n simtime = out['date'].values\r\n\r\n fig,axs = plt.subplots(nrows=4,ncols=1,sharex=True,figsize=(12,10))\r\n axs=axs.reshape(-1)\r\n for ax, QALYs, title, color in zip(axs, states,titles,colors):\r\n\r\n ax.plot(df_2plot[QALYs,'mean'],'--', color=color)\r\n ax.fill_between(simtime, df_2plot[QALYs,'lower'], df_2plot[QALYs,'upper'],alpha=0.20, color = color)\r\n\r\n ax = _apply_tick_locator(ax)\r\n ax.set_title(title,fontsize=20)\r\n ax.set_ylabel('lost QALYs')\r\n ax.grid(False)\r\n\r\n plt.subplots_adjust(hspace=0.5)\r\n fig.savefig(os.path.join(abs_dir,result_folder,f'QALY_losses_{scenario}.png'))\r\n\r\n # QALYS per age group\r\n Palette=cm.get_cmap('tab10_r', initN.size).colors\r\n age_group=['0-12','12-18','18-25','25-35','35-45','45-55','55-65','65-75','75-85','85+']\r\n\r\n fig, axs = plt.subplots(4,figsize=(10,10),sharex=True)\r\n axs=axs.reshape(-1)\r\n for ax, QALYs, title, color in zip(axs,states,titles,colors):\r\n\r\n ax.stackplot(simtime,np.transpose(out[QALYs].mean(dim=\"draws\").sum(dim='doses').values),linewidth=3, labels=age_group, colors=Palette, alpha=0.8)\r\n ax.set_title(title,fontsize=20)\r\n ax.set_ylabel('lost QALYs')\r\n ax = _apply_tick_locator(ax) \r\n ax.grid(False)\r\n axs[0].legend(fancybox=True, frameon=True, framealpha=1, fontsize=15,title='Age Group', loc=\"upper left\", bbox_to_anchor=(1,1)) \r\n\r\n plt.subplots_adjust(hspace=0.5)\r\n fig.savefig(os.path.join(abs_dir,result_folder,f'QALY_losses_per_age_group_{scenario}.png'), dpi=600)","sub_path":"notebooks/analysis/woldmuyn_long_COVID.py","file_name":"woldmuyn_long_COVID.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"195059597","text":"from core import metrics, controls\r\nfrom core.enums import DeviceType, MetricType\r\nimport time\r\n\r\n#min_temperature = 12\r\n\r\nkW_event_threshold = 0.5 # 2.0 kilowatts\r\nfall_events = []\r\npower_events = []\r\n\r\ninactivity_alarm = 3 # 180 minutes\r\ninactivity_concern = 2 # 120 minutes\r\ninactivity_events = []\r\n\r\nimmobility_alarm = 300 # 60 seconds\r\ntimer_mins = time.localtime()[4]\r\ntimer_secs = time.localtime()[5]\r\n\r\nalarm_raised = False\r\nconcern_raised = False\r\n\r\ndef urgent_alarm(message):\r\n global alarm_raised\r\n alarm_raised = True\r\n\r\n controls.set_device_state(DeviceType.RED_LAMP, True)\r\n controls.set_device_state(DeviceType.BLUE_LAMP, False)\r\n controls.set_device_state(DeviceType.GREEN_LAMP, False)\r\n \r\n print(message)\r\n print('red lamp on, blue and green lamps off') #testing\r\n\r\n return\r\n\r\ndef countdown(instruction = 'count'):\r\n global timer_mins, timer_secs\r\n if instruction != 'count': # reset countdown timer\r\n timer_mins = time.localtime()[4]\r\n timer_secs = time.localtime()[5]\r\n else: # calculate time elapsed\r\n mins = time.localtime()[4] - timer_mins\r\n secs = time.localtime()[5] - timer_secs\r\n if mins < 0: # correct for minutes resetting on next hour\r\n mins += 60\r\n interval = mins*60 + secs\r\n if interval > immobility_alarm:\r\n urgent_alarm('No heartrate or respiration detected')\r\n return\r\n\r\ndef raise_alarm(message):\r\n global alarm_raised\r\n alarm_raised = True\r\n\r\n controls.set_device_state(DeviceType.RED_LAMP, True)\r\n controls.set_device_state(DeviceType.BLUE_LAMP, False)\r\n controls.set_device_state(DeviceType.GREEN_LAMP, False)\r\n \r\n print(message)\r\n print('red lamp on, blue and green lamps off') #testing\r\n return\r\n\r\ndef handle_all(metric):\r\n #print(metric)\r\n #sleep(10)\r\n pass\r\n\r\ndef handle_temperature(temperature):\r\n if temperature.value <18:\r\n controls.set_device_state(DeviceType.HEATER, True)\r\n elif temperature.value >21:\r\n controls.set_device_state(DeviceType.HEATER, False) \r\n return\r\n\r\ndef handle_co2(co2):\r\n #print(f\"CO2: {co2.value}\")\r\n pass\r\n\r\n#myeventhandlers\r\ndef handle_vibration(vibration):\r\n\r\n if vibration.value > 0:\r\n print(f\"Vibration: {vibration.value}\")\r\n \r\n if vibration.value == 0:\r\n #set countdown alarm for cardiorespiratory arrest\r\n pass\r\n elif vibration.value <= 5 and not alarm_raised:\r\n controls.set_device_state(DeviceType.GREEN_LAMP, True) \r\n print('green lamp on') # testing\r\n elif vibration.value > 10:\r\n fall_events.append(time.localtime()) #date & exact time\r\n raise_alarm('Fall detected, please call to check')\r\n\r\n return\r\n\r\ndef handle_meter(kilowatts):\r\n global concern_raised\r\n #print(kilowatts.value) #testing\r\n\r\n if not power_events: #set initial value\r\n print('tested True')\r\n power_events.append(((time.localtime()[3],\r\n time.localtime()[4]),\r\n kilowatts.value))\r\n print(power_events) # testing\r\n \r\n #retrieve last recorded significant change in power use\r\n last_time, last_power = power_events[-1] \r\n\r\n if kilowatts.value > kW_event_threshold:\r\n if kilowatts.value > 2*last_power:\r\n #log significant increase in power consumption \r\n power_events.append(((time.localtime()[3], #hours\r\n time.localtime()[4]), #mins\r\n kilowatts.value))\r\n print(power_events) #testing\r\n\r\n elif kilowatts.value < last_power/2:\r\n #log significant drop in power consumption\r\n power_events.append(((time.localtime()[3], #hours\r\n time.localtime()[4]), #mins\r\n kilowatts.value))\r\n print(power_events) #testing\r\n\r\n else:\r\n #calculate interval since last significant power change\r\n interval_hours = time.localtime()[3] - last_time[0]\r\n interval_minutes = time.localtime()[4] - last_time[1]\r\n #correct negative hours if passed midnight\r\n if interval_hours < 0:\r\n interval_hours += 24\r\n interval = interval_hours*60 + interval_minutes\r\n\r\n #respond to time interval since eg kettle last boiled\r\n if interval > inactivity_alarm and not alarm_raised:\r\n raise_alarm(f'No activity for over {inactivity_alarm} mins!'\r\n f'\\nPlease check urgently')\r\n elif interval > inactivity_concern and concern_raised == False:\r\n concern_raised = True\r\n controls.set_device_state(DeviceType.BLUE_LAMP, True)\r\n print('blue lamp on') #testing\r\n \r\n return\r\n \r\ndef run():\r\n metrics.connect()\r\n\r\n print(\"Successfully connected to metric stream\")\r\n print(f'Started at {time.localtime()[3]}:{time.localtime()[4]}')\r\n\r\n controls.set_device_state(DeviceType.RED_LAMP, False) \r\n controls.set_device_state(DeviceType.BLUE_LAMP, False) \r\n controls.set_device_state(DeviceType.GREEN_LAMP, False) \r\n\r\n\r\n metrics.handle_all(handle_all)\r\n metrics.handle(MetricType.IFM_TEMPERATURE, handle_temperature)\r\n metrics.handle(MetricType.EPC_CO2, handle_co2)\r\n\r\n #mycode\r\n metrics.handle(MetricType.IFM_VIBRATION, handle_vibration)\r\n metrics.handle(MetricType.METER_KW, handle_meter)\r\n #metrics.handle(MetricType.METER_AMPS, handle_meter)\r\n\r\n\r\n # Wait for the program to exit\r\n while True:\r\n pass\r\n\r\nif __name__ == '__main__':\r\n run()\r\n","sub_path":"health_app1.py","file_name":"health_app1.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"230376370","text":"#!/usr/bin/env python\n# Written by Greg Ver Steeg\n# See readme.pdf for documentation\n# Or go to http://www.isi.edu/~gregv/npeet.html\n\nimport scipy.spatial as ss\nfrom scipy.special import digamma\nfrom math import log\nimport numpy.random as nr\nimport numpy as np\nimport random\nfrom numpy import linalg as la\ntry:\n import multiprocessing as processing\nexcept:\n import processing\n\n\n\n\ndef __remote_process_query(rank, qin, qout, tree, K, leafsize):\n while 1:\n # read input queue (block until data arrives)\n nc, data = qin.get()\n # search for the distance to the k nearest points; the blank is their locations\n knn,_ = tree.query(data, K, p=float('inf'))\n # write to output queue\n qout.put((nc,knn))\n\ndef __remote_process_ball(rank, qin, qout, tree, leafsize):\n while 1:\n # read input queue (block until data arrives)\n nc, data, eps = qin.get()\n assert len(eps) == data.shape[0]\n knn = []\n #listOfPoints = tree.query_ball_point(data, eps, p=float('inf'))\n for i in range(len(eps)):\n dist = eps[i]-1e-15\n knn += [len(tree.query_ball_point(data[i,:], dist, p=float('inf')))+1]\n # write to output queue\n qout.put((nc,knn))\n\ndef knn_search_parallel(data, K, qin=None, qout=None, tree=None, t0=None, eps=None, leafsize=None, copy_data=False):\n \"\"\" find the K nearest neighbours for data points in data,\n using an O(n log n) kd-tree, exploiting all logical\n processors on the computer. if eps <= 0, it returns the distance to the kth point. On the other hand, if eps > 0 \"\"\"\n # print(\"starting the parallel search\")\n if eps is not None:\n assert data.shape[0]==len(eps)\n # build kdtree\n if copy_data:\n dataCopy = data.copy()\n # print('copied data')\n else:\n dataCopy = data\n if tree is None and leafsize is None:\n tree = ss.cKDTree(dataCopy)\n elif tree is None:\n tree = ss.cKDTree(dataCopy, leafsize=leafsize)\n if t0 is not None:\n print('time to tree formation: %f' %(clock()-t0))\n ndata = data.shape[0]\n nproc = 20\n # print('made the tree')\n # compute chunk size\n chunk_size = int(data.shape[0] / (4*nproc))\n chunk_size = 100 if chunk_size < 100 else chunk_size\n if qin==None or qout==None:\n # set up a pool of processes\n qin = processing.Queue(maxsize=int(ndata/chunk_size))\n qout = processing.Queue(maxsize=int(ndata/chunk_size))\n if eps is None:\n pool = [processing.Process(target=__remote_process_query,\n args=(rank, qin, qout, tree, K, leafsize))\n for rank in range(nproc)]\n else:\n pool = [processing.Process(target=__remote_process_ball,\n args=(rank, qin, qout, tree, leafsize))\n for rank in range(nproc)]\n for p in pool: p.start()\n # put data chunks in input queue\n cur, nc = 0, 0\n while 1:\n _data = data[cur:cur+chunk_size, :]\n if _data.shape[0] == 0: break\n if eps is None:\n qin.put((nc,_data))\n else:\n _eps = eps[cur:cur+chunk_size]\n qin.put((nc,_data,_eps))\n cur += chunk_size\n nc += 1\n # read output queue\n knn = []\n while len(knn) < nc:\n knn += [qout.get()]\n # avoid race condition\n _knn = [n for i,n in sorted(knn)]\n knn = []\n for tmp in _knn:\n knn += [tmp]\n # terminate workers\n for p in pool: p.terminate()\n\n if eps is None:\n output = np.zeros((sum([ x.shape[0] for x in knn]),knn[0].shape[1]))\n else:\n output = np.zeros(sum([ len(x) for x in knn]))\n outputi = 0\n for x in knn:\n if eps is None:\n nextVal = x.shape[0]\n else:\n nextVal = len(x)\n output[outputi:(outputi+nextVal)] = x\n outputi += nextVal\n return output\n\n# CONTINUOUS ESTIMATORS\n\ndef entropy(x, k=3, base=2.0,printing=False, qin=None, qout=None):\n \"\"\" The classic K-L k-nearest neighbor continuous entropy estimator\n x should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]\n if x is a one-dimensional scalar and we have four samples\n \"\"\"\n assert k <= len(x) - 1, \"Set k smaller than num. samples - 1\"\n d = len(x[0])\n N = len(x)\n intens = 1e-10 # small noise to break degeneracy, see doc.\n x = x + intens*nr.rand(x.shape[0],x.shape[1])\n nn = knn_search_parallel(x,k,qin=qin, qout=qout)[:,-1]\n #tree = ss.ccKDTree(x)\n # [tree.query(point, k + 1, p=float('inf'))[0][k] for point in x]\n const = digamma(N) - digamma(k) + d * log(2)\n h = (const + d * np.mean(np.log(nn))) / log(base)\n if printing:\n print(h)\n return h\n\ndef centropy(x, y, k=3, base=2):\n \"\"\" The classic K-L k-nearest neighbor continuous entropy estimator for the\n entropy of X conditioned on Y.\n \"\"\"\n hxy = entropy([xi + yi for (xi, yi) in zip(x, y)], k, base)\n hy = entropy(y, k, base)\n return hxy - hy\n\ndef column(xs, i):\n return [[x[i]] for x in xs]\n\ndef tc(xs, k=3, base=2):\n xis = [entropy(column(xs, i), k, base) for i in range(0, len(xs[0]))]\n return np.sum(xis) - entropy(xs, k, base)\n\ndef ctc(xs, y, k=3, base=2):\n xis = [centropy(column(xs, i), y, k, base) for i in range(0, len(xs[0]))]\n return np.sum(xis) - centropy(xs, y, k, base)\n\ndef corex(xs, ys, k=3, base=2):\n cxis = [mi(column(xs, i), ys, k, base) for i in range(0, len(xs[0]))]\n return np.sum(cxis) - mi(xs, ys, k, base)\n\ndef mi(xp, yp, k=3, base=2,normalize=True,qin=None, qout=None):\n \"\"\" Mutual information of x and y\n x, y should eiter be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]] or a matrix with (#examples x dimension). Their types should match\n \"\"\"\n assert xp.shape[0] == yp.shape[0], \"Lists should have same length\"\n assert type(xp[0])==type(yp[0]), \"Should be the same kind of list\"\n assert k <= len(xp) - 1, \"Set k smaller than num. samples - 1. Way smaller.\"\n x = (xp-np.mean(xp,axis=0))/np.std(xp, axis=0)\n y = (yp-np.mean(yp,axis=0))/np.std(yp, axis=0)\n intens = 1e-10\n points = np.append(x,y,axis=1) + intens*nr.rand(x.shape[0],x.shape[1]+y.shape[1])\n # Find nearest neighbors in joint space, p=inf means max-norm\n dvec = knn_search_parallel(points, k+1, qin=qin, qout=qout)\n dvec = dvec[:,-1]\n #tree = ss.ccKDTree(points)\n #dvec = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in points]\n a, b, c, d = avgdigamma(x, dvec,qin=qin, qout=qout), avgdigamma(y, dvec, qin=qin, qout=qout), digamma(k), digamma(points.shape[0])\n mmi = (-a - b + c + d) / log(base)\n return mmi\n\n\ndef cmi(x, y, z, k=3, base=2, qin=None, qout=None):\n \"\"\" Mutual information of x and y, conditioned on z\n x, y, z should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]\n if x is a one-dimensional scalar and we have four samples\n \"\"\"\n assert len(x) == len(y), \"Lists should have same length\"\n assert k <= len(x) - 1, \"Set k smaller than num. samples - 1\"\n intens = 1e-10 # small noise to break degeneracy, see doc.\n x = [list(p + intens * nr.rand(len(x[0]))) for p in x]\n y = [list(p + intens * nr.rand(len(y[0]))) for p in y]\n z = [list(p + intens * nr.rand(len(z[0]))) for p in z]\n points = zip2(x, y, z)\n # Find nearest neighbors in joint space, p=inf means max-norm\n dvec = knn_search_parallel(points,k,qin=qin, qout=qout)[:,-1]\n #tree = ss.ccKDTree(points)\n #[tree.query(point, k + 1, p=float('inf'))[0][k] for point in points]\n a, b, c, d = avgdigamma(zip2(x, z), dvec,qin=qin, qout=qout), avgdigamma(zip2(y, z), dvec), avgdigamma(z, dvec), digamma(k)\n return (-a - b + c + d) / log(base)\n\n\ndef kldiv(x, xp, k=3, base=2, qin=None, qout=None):\n \"\"\" KL Divergence between p and q for x~p(x), xp~q(x)\n x, xp should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]\n if x is a one-dimensional scalar and we have four samples\n \"\"\"\n assert k <= len(x) - 1, \"Set k smaller than num. samples - 1\"\n assert k <= len(xp) - 1, \"Set k smaller than num. samples - 1\"\n assert len(x[0]) == len(xp[0]), \"Two distributions must have same dim.\"\n d = len(x[0])\n n = len(x)\n m = len(xp)\n const = log(m) - log(n - 1)\n tree = ss.ccKDTree(x)\n treep = ss.ccKDTree(xp)\n nn = knn_search_parallel(x, k, tree=tree, qin=qin, qout=qout)\n #[tree.query(point, k + 1, p=float('inf'))[0][k] for point in x]\n nnp = knn_search_parallel(x, k-1, tree=treep, qin=qin, qout=qout)\n #nnp = [treep.query(point, k, p=float('inf'))[0][k - 1] for point in x]\n return (const + d * np.mean(map(log, nnp)) - d * np.mean(map(log, nn))) / log(base)\n\n\n# DISCRETE ESTIMATORS\ndef entropyd(sx, base=2):\n \"\"\" Discrete entropy estimator\n Given a list of samples which can be any hashable object\n \"\"\"\n return entropyfromprobs(hist(sx), base=base)\n\n\ndef midd(x, y, base=2):\n \"\"\" Discrete mutual information estimator\n Given a list of samples which can be any hashable object\n \"\"\"\n return -entropyd(zip(x, y), base) + entropyd(x, base) + entropyd(y, base)\n\ndef cmidd(x, y, z):\n \"\"\" Discrete mutual information estimator\n Given a list of samples which can be any hashable object\n \"\"\"\n return entropyd(zip(y, z)) + entropyd(zip(x, z)) - entropyd(zip(x, y, z)) - entropyd(z)\n\ndef centropyd(x, y, base=2):\n \"\"\" The classic K-L k-nearest neighbor continuous entropy estimator for the\n entropy of X conditioned on Y.\n \"\"\"\n return entropyd(zip(x, y), base) - entropyd(y, base)\n\ndef tcd(xs, base=2):\n xis = [entropyd(column(xs, i), base) for i in range(0, len(xs[0]))]\n hx = entropyd(xs, base)\n return np.sum(xis) - hx\n\ndef ctcd(xs, y, base=2):\n xis = [centropyd(column(xs, i), y, base) for i in range(0, len(xs[0]))]\n return np.sum(xis) - centropyd(xs, y, base)\n\ndef corexd(xs, ys, base=2):\n cxis = [midd(column(xs, i), ys, base) for i in range(0, len(xs[0]))]\n return np.sum(cxis) - midd(xs, ys, base)\n\ndef hist(sx):\n sx = discretize(sx)\n # Histogram from list of samples\n d = dict()\n for s in sx:\n if type(s) == list:\n s = tuple(s)\n d[s] = d.get(s, 0) + 1\n return map(lambda z: float(z) / len(sx), d.values())\n\n\ndef entropyfromprobs(probs, base=2):\n # Turn a normalized list of probabilities of discrete outcomes into entropy (base 2)\n return -sum(map(elog, probs)) / log(base)\n\n\ndef elog(x):\n # for entropy, 0 log 0 = 0. but we get an error for putting log 0\n if x <= 0. or x >= 1.:\n return 0\n else:\n return x * log(x)\ntmp = [x for x in range(20)]\n# MIXED ESTIMATORS\ndef micd(x, y, k=3, base=2, warning=True, qin=None,qout=None):\n \"\"\" If x is continuous and y is discrete, compute mutual information\n \"\"\"\n overallentropy = entropy(x, k, base, qin=qin,qout=qout)\n\n if type(y)==np.ndarray:\n y = y.tolist()\n n = len(y)\n word_dict = dict()\n for i in range(len(y)):\n if type(y[i]) == list:\n y[i] = tuple(y[i])\n for sample in y:\n word_dict[sample] = word_dict.get(sample, 0) + 1. / n\n yvals = list(set(word_dict.keys()))\n\n mi = overallentropy\n for yval in yvals:\n xgiveny = x[[k==1 for k in y],:]\n if k <= len(xgiveny) - 1:\n mi -= word_dict[yval] * entropy(xgiveny, k, base,qin=qin,qout=qout)\n else:\n if warning:\n print(\"Warning, after conditioning, on y=\", yval, \" insufficient data. Assuming maximal entropy in this case.\")\n mi -= word_dict[yval] * overallentropy\n print(np.abs(mi))\n return np.abs(mi) # units already applied\n\ndef midc(x, y, k=3, base=2, warning=True, qin=None,qout=None):\n return micd(y, x, k, base, warning)\n\ndef centropydc(x, y, k=3, base=2, warning=True):\n return entropyd(x, base) - midc(x, y, k, base, warning)\n\ndef centropycd(x, y, k=3, base=2, warning=True):\n return entropy(x, k, base) - micd(x, y, k, base, warning)\n\ndef ctcdc(xs, y, k=3, base=2, warning=True):\n xis = [centropydc(column(xs, i), y, k, base, warning) for i in range(0, len(xs[0]))]\n return np.sum(xis) - centropydc(xs, y, k, base, warning)\n\ndef ctccd(xs, y, k=3, base=2, warning=True):\n xis = [centropycd(column(xs, i), y, k, base, warning) for i in range(0, len(xs[0]))]\n return np.sum(xis) - centropycd(xs, y, k, base, warning)\n\ndef corexcd(xs, ys, k=3, base=2, warning=True):\n cxis = [micd(column(xs, i), ys, k, base, warning) for i in range(0, len(xs[0]))]\n return np.sum(cxis) - micd(xs, ys, k, base, warning)\n\ndef corexdc(xs, ys, k=3, base=2, warning=True):\n #cxis = [midc(column(xs, i), ys, k, base, warning) for i in range(0, len(xs[0]))]\n #joint = midc(xs, ys, k, base, warning)\n #return np.sum(cxis) - joint\n return tcd(xs, base) - ctcdc(xs, ys, k, base, warning)\n\n# UTILITY FUNCTIONS\ndef vectorize(scalarlist):\n \"\"\" Turn a list of scalars into a list of one-d vectors\n \"\"\"\n return [[x] for x in scalarlist]\n\n\ndef shuffle_test(measure, x, y, z=False, ns=200, ci=0.95, **kwargs):\n \"\"\" Shuffle test\n Repeatedly shuffle the x-values and then estimate measure(x, y, [z]).\n Returns the mean and conf. interval ('ci=0.95' default) over 'ns' runs.\n 'measure' could me mi, cmi, e.g. Keyword arguments can be passed.\n Mutual information and CMI should have a mean near zero.\n \"\"\"\n xp = x[:] # A copy that we can shuffle\n outputs = []\n for i in range(ns):\n random.shuffle(xp)\n if z:\n outputs.append(measure(xp, y, z, **kwargs))\n else:\n outputs.append(measure(xp, y, **kwargs))\n outputs.sort()\n return np.mean(outputs), (outputs[int((1. - ci) / 2 * ns)], outputs[int((1. + ci) / 2 * ns)])\n\n\n# INTERNAL FUNCTIONS\n\ndef avgdigamma(points, dvec, qin=None, qout=None):\n # This part finds number of neighbors in some radius in the marginal space\n # returns expectation value of \n N = points.shape[0]\n avg = 0.\n num_points = knn_search_parallel(points, 3, eps=dvec-1e-15, qin=qin, qout=qout)\n avg = sum([digamma(x)/N for x in num_points])\n #tree = ss.ccKDTree(points)\n\n #for i in range(N):\n #dist = dvec[i]\n # subtlety, we don't include the boundary point,\n # but we are implicitly adding 1 to kraskov def bc center point is included\n\n #num_points = len(tree.query_ball_point(points[i,:], dist - 1e-15, p=float('inf')))\n #avg += digamma(num_points) / N\n return avg\n\n\ndef zip2(*args):\n # zip2(x, y) takes the lists of vectors and makes it a list of vectors in a joint space\n # E.g. zip2([[1], [2], [3]], [[4], [5], [6]]) = [[1, 4], [2, 5], [3, 6]]\n return [sum(sublist, []) for sublist in zip(*args)]\n\ndef discretize(xs):\n def discretize_one(x):\n if len(x) > 1:\n return tuple(x)\n else:\n return x[0]\n # discretize(xs) takes a list of vectors and makes it a list of tuples or scalars\n return [discretize_one(x) for x in xs]\n\n#if __name__ == \"__main__\":\n# print(\"NPEET: Non-parametric entropy estimation toolbox. See readme.pdf for details on usage.\")\n\nfrom time import clock\nfrom numpy import random as rand\ndef test(t0):\n return knn_search_parallel(data, K,t0=t0)\n\nif __name__ == '__main__':\n dY, dX = (3072,3072)\n ndata = 6000\n A = np.block([[10*nr.randn(dX,dX), nr.randn(dX,dY)], [nr.randn(dY,dX), 10*nr.randn(dY,dY)]])\n A = A.T@A\n AX = A[0:dX,0:dX]\n AY = A[dX:,dX:]\n data = 10*nr.multivariate_normal(np.zeros(dX+dY), A, ndata)\n eps = 3*nr.rand(ndata)\n ss.cKDTree(data.copy())\n #print('avgdigamma with random eps: %f' % test3)\n print('Testing mutual information calculation:')\n mi(data[:,0:dX],data[:,dX:])\n print('Theoretical Value')\n print((la.slogdet(AX)[1]+la.slogdet(AY)[1]-la.slogdet(A)[1])/2)\n","sub_path":"entropy_estimators.py","file_name":"entropy_estimators.py","file_ext":"py","file_size_in_byte":15756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"112529376","text":"''' \n# or better yet...\n\ndef get_primes(input_list):\n return (element for element in input_list if is_prime(element))\n''' \n\ndef is_prime(number):\n if number > 1:\n if number == 2:\n return True\n if number % 2 == 0:\n return False\n for current in range(3, number-1, 2):\n if number % current == 0: \n return False\n return True\n return False\n\ndef get_primes_with_yield(input_list):\n for element in input_list:\n if is_prime(element):\n yield element\n\ndef get_primes(input_list):\n result_list = list()\n for element in input_list:\n if is_prime(element):\n result_list.append(element)\n\n return result_list\n\nnum_list = [2, 5, 7, 8, 11, 15, 23, 29]\n\nresult = get_primes(num_list)\nprint (result)\nprint (\"type of result \", type(result))\n\nresult = get_primes_with_yield(num_list)\nprint (result)\nprint (\"type of result \", type(result))\n\nfor num in result:\n print (num)\n\nfor num in get_primes_with_yield(num_list):\n print (num)\n\n","sub_path":"Assain/More_data_type/A-gen-yield.py","file_name":"A-gen-yield.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"219400295","text":"from bs4 import BeautifulSoup\nimport requests\nfrom dateutil.parser import parse\nimport datetime\nfrom blogposts import Post, Comment\n\n\ndef main():\n source = requests.get('http://inspiringscience.net').text\n soup = BeautifulSoup(source, 'lxml')\n\n posts = []\n\n articles = soup.find('div', id='content').find_all('article')\n\n for article in articles:\n headline = article.header.h1.a.text\n\n summarySource = requests.get(article.header.h1.a['href']).text\n summarySoup = BeautifulSoup(summarySource, 'lxml')\n summaryList = summarySoup.find('div', class_='post-entry').find_all('p')\n summary = \"\"\n for s in summaryList:\n summary = summary + \" \" + s.text\n\n try:\n comments = summarySoup.find('ol', class_='commentlist').find_all('li')\n except Exception as e:\n comments = []\n\n id = 0\n\n commList = []\n\n anonymous = 1\n\n for comment in comments:\n id = id + 1\n\n refid = 1\n\n user = comment.find('p', class_='comment-author').span.text\n if user == 'Unknown' or user == 'Anonymous':\n user = 'Anonymous_user_' + str(anonymous)\n anonymous = anonymous + 1\n\n date = comment.find('p', class_='comment-date').a.time.text\n parsed_date = parse(date)\n timestamp = datetime.datetime.timestamp(parsed_date)\n\n msg = ''\n msgList = comment.find('div', class_='comment-text').find_all('p')\n del msgList[-1]\n for m in msgList:\n msg = msg + m.text + \" \"\n\n comm = Comment(id, refid, timestamp, user, msg)\n commList.append(comm)\n\n fail_condition = True\n while fail_condition:\n try:\n replies = comment.find('ul', class_='children').find_all('li')\n\n refid = id\n\n for reply in replies:\n id = id + 1\n\n user = reply.find('p', class_='comment-author').span.text\n if user == 'Unknown' or user == 'Anonymous':\n user = 'Anonymous_user_' + str(anonymous)\n anonymous = anonymous + 1\n\n date = reply.find('p', class_='comment-date').a.time.text\n parsed_date = parse(date)\n timestamp = datetime.datetime.timestamp(parsed_date)\n\n msg = ''\n msgList = reply.find('div', class_='comment-text').find_all('p')\n del msgList[-1]\n for m in msgList:\n msg = msg + m.text + \" \"\n\n comm = Comment(id, refid, timestamp, user, msg)\n commList.append(comm)\n\n fail_condition = False\n except Exception as e:\n fail_condition = False\n\n posts.append(Post(headline, summary, commList))\n\n for post in posts:\n print(post)\n","sub_path":"scrapper/blogs/inspiringscience.py","file_name":"inspiringscience.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"585370029","text":"import tensorflow as tf\nfrom tensorflow.python.keras import layers\n\ndef make_discriminator_model():\n model = tf.keras.Sequential()\n\n model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[32, 128, 1]))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Conv2D(256, (5, 5), strides=(2, 2), padding='same'))\n model.add(layers.LeakyReLU())\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(1))\n\n return model\n\n# class Discriminator(tf.keras.Model):\n# def __init__(self):\n# super(Discriminator, self).__init__()\n#\n# self.conv1 = layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1])\n# self.batchnorm1 = layers.BatchNormalization()\n# self.activation1 = layers.LeakyReLU()\n# self.dropout1 = layers.Dropout(0.3)\n#\n# self.conv2 = layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')\n# self.batchnorm2 = layers.BatchNormalization()\n# self.activation2 = layers.LeakyReLU()\n# self.dropout2 = layers.Dropout(0.3)\n#\n# # self.conv3 = layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')\n# # self.batchnorm3 = layers.BatchNormalization()\n# # self.activation3 = layers.LeakyReLU()\n# #\n# # self.conv4 = layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')\n# # self.batchnorm4 = layers.BatchNormalization()\n# # self.activation4 = layers.LeakyReLU()\n#\n# self.flatten5 = layers.Flatten()\n# self.dense5 = layers.Dense(1)\n# self.activation5 = layers.Activation('sigmoid')\n#\n# def call(self, images, **kwargs):\n# training = kwargs.get(\"training\")\n#\n# x = self.conv1(images)\n# x = self.batchnorm1(x, training=training)\n# x = self.activation1(x)\n# x = self.dropout1(x, training=training)\n#\n# x = self.conv2(x)\n# x = self.batchnorm2(x, training=training)\n# x = self.activation2(x)\n# x = self.dropout2(x, training=training)\n#\n# # x = self.conv3(x)\n# # x = self.batchnorm3(x, training=training)\n# # x = self.activation3(x)\n# #\n# # x = self.conv4(x)\n# # x = self.batchnorm4(x, training=training)\n# # x = self.activation4(x)\n#\n# x = self.flatten5(x)\n# x = self.dense5(x)\n# x = self.activation5(x)\n#\n# return x","sub_path":"ganny/models/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"103125413","text":"# 地図データの入力\ns = input()\nwhile ( len(s) < 1 or len(s) > 100 ) or ( ( s.count('0') + s.count('1')) > len(s) ):\n s = input()\n\n# 体力の入力\nt = int(input())\nwhile ( t < 1 or t > 100):\n t = int(input())\n\nfor i in range(len(s)):\n if(s[i] == '1'):\n t -= 1\n\nif(t > 0):\n print(str(t))\nelse:\n print('No')\n\n\n\n\n","sub_path":"Paiza_Prac/LogicSummoner/Rank-D/ダメージ床/D-DamageFloor.py","file_name":"D-DamageFloor.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"268001857","text":"def make_album(artist, album_title, number_songs = None):\n \"\"\"Returns the given album info in a dictionary\"\"\"\n\n result = {}\n\n if number_songs != None:\n result['title'] = album_title.title()\n result['artist'] = artist.title()\n result['songs'] = number_songs\n else:\n result['title'] = album_title.title()\n result['artist'] = artist.title()\n\n return result\n\nprint(make_album(artist = 'jimi hendrix', album_title = 'johny b. good',\\\nnumber_songs = 5))\nprint(make_album(artist = 'red hot chili peppers',\\\nalbum_title = 'blood sugar sex magic'))\nprint(make_album(artist = 'red hot chili peppers',\\\nalbum_title = 'californication', number_songs = 18))\n\n","sub_path":"ex8-/8-7.py","file_name":"8-7.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"599020599","text":"from seleniumbase import BaseCase\n\nclass NavigationTest(BaseCase):\n\n # задаем базовый урл и переменную-словарь\n def setup_class(self):\n self.base_url = \"https://www.babyshop.com\"\n self.menu_dict = {\"Brands\":['//a[@data-class=\"brand\"]', self.base_url+\"/brands/s/618\"],\n \"Сlothing\":['//a[@data-class=\"babyclothes\"]', self.base_url+\"/clothing/s/619\"],\n \"Footwear\":['//a[@data-class=\"babyshoes\"]',self.base_url+\"/footwear/s/620\"]\n }\n def test_basket(self):\n # Идем на страницу товаров\n self.get(self.base_url+'/dolce-gabbana/s/1495')\n # выбор товара\n self.hover_and_click('//article[1]', '//article[1]//div[@class=\"quickshop-button \"]')\n # выбор размера\n self.click('//div[@id=\"id-slct\"]')\n self.click('//div[@id=\"id-slct\"]/ul/li[2]')\n #\n \n self.click('//button[@class=\"add-to-cart green large\"]')\n self.sleep(0.1)\n self.click('//a[@class=\"to-checkout white button\"]')\n ","sub_path":"test_basket.py","file_name":"test_basket.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"383932967","text":"from django import forms\nfrom django.forms import CharField, Form, IntegerField,ChoiceField, MultipleChoiceField\nfrom cookbook.models import Ingredient\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\n\n\nDISH_TYPE_CHOICES = (\n (\"breakfast\", \"Breakfast\"),\n (\"salad\", \"Salad\"),\n (\"appetizer\", \"Appetizer\"),\n (\"soup\", \"Soup\"),\n (\"main course\", \"Main course\"),\n (\"dessert\", \"Dessert\"),\n (\"\", \"All Types\")\n)\n\nresult = []\nfor ingredient in Ingredient.objects.all():\n result.append((ingredient.ingredient_name, ingredient.ingredient_name))\n\nINGREDIENT_CHOICES = result\n\n\nclass MyIntegerField(IntegerField):\n default_error_messages = {\n 'invalid': 'Enter the maximum preparation time in whole minutes!',\n }\n\n\nclass RecipeSearchForm(Form):\n ingredients = MultipleChoiceField(widget=forms.SelectMultiple, choices=INGREDIENT_CHOICES)\n dish_type = ChoiceField(choices=DISH_TYPE_CHOICES, required=False)\n max_preparation_time = MyIntegerField(help_text='minutes', required=False,\n widget=forms.TextInput(attrs={'placeholder': 'Field not required'}))\n\n","sub_path":"cookbook/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"625864922","text":"def canAdd(a, b):\n \"\"\"\n проверка на возможность добавления новой строки\n требуется для фукнции перемножения матирц multBoolMatrix()\n \"\"\"\n for i in range(len(a)):\n if ((a[i] == 0 and b[i] == 1) or (a[i] == 1 and b[i] == 0)):\n return False\n return True\n\n\ndef addRow(a, b):\n \"\"\"\n перемножение троичной строки\n требуется для фукнции перемножения матирц multBoolMatrix()\n \"\"\"\n c = []\n for i in range(len(a)):\n # если 2 * 2\n if(a[i] * b[i] == 4):\n c.append(2)\n # при умнажении на 2 * 1\n elif(a[i] * b[i] == 2):\n c.append(1)\n # просто перемножение\n else:\n c.append(a[i] * b[i])\n return c\n\n\ndef multBoolMatrix(a, b):\n \"\"\"перемножение троичных матриц\"\"\"\n # результирующая матрица\n c = []\n # цикл для первой матрицы\n for i in range(len(a)):\n # сохранение текущей строки в первой матрице\n aRow = a[i]\n # цикл для второй матрицы\n for j in range(len(b)):\n # сохранение текущей строки во второй матирце\n bRow = b[j]\n\n # добавляем новую строку в матрицу С если это возможно\n if canAdd(aRow, bRow):\n c.append(addRow(aRow, bRow))\n # выход если перемножение 0 * 1 или 1 * 0\n else:\n continue\n return c\n\n\ndef beautiPrint(a):\n \"\"\"красивый вывод в консоль троичной матрицы\"\"\"\n for i in a:\n for j in i:\n if j == 2:\n print('-', end=\" \")\n else:\n print(j, end=\" \")\n print()\n\n\ndef minTooLine(a, b):\n \"\"\"\n минификация двух строк, если строки можно сжать то сжимает, иначе false\n требуется для функции минификации троичной матрицы minifyMatrix()\n \"\"\"\n # результат\n c = []\n \n # минимизация двух строк\n for i in range(len(a)):\n if a[i] == b[i]:\n c.append(a[i])\n else:\n c.append(2)\n\n # количетсво \"2\" в строке\n countTheToo = 0\n for i in range(len(c)):\n if c[i] == 2:\n countTheToo += 1\n\n # если количество \"2\" больше 1 то возвращаем сложение строк, иначе возвращаем false\n if countTheToo <= 1:\n return c\n else:\n False\n\n\ndef minifyMatrix(a):\n \"\"\"минификация троичной матрицы\"\"\"\n # копия с входной матрицы, не хорошо перезаписывать входные данные\n dublicate = a.copy()\n # результат минимизации\n result = []\n\n firstRow = 0\n while firstRow <= len(dublicate) - 1:\n # полу��аем текущую строку с которой будем работать\n currentLine = dublicate[firstRow]\n\n # обнуляем переменную для второго цикла\n sekondRow = 0\n while sekondRow < len(dublicate) - firstRow - 1:\n # индекс для прохода по остальным строкам матирцы\n index = firstRow + 1 + sekondRow\n\n # если мы можем объединить текущую строку и следующую строку, то делаем это\n if(minTooLine(currentLine, dublicate[index])):\n # объединяем результаты\n currentLine = minTooLine(currentLine, dublicate[index])\n # удаляем ненужное значение\n dublicate.pop(index)\n\n # если ни чего не поменялось то идем к сделующему элементу\n else:\n sekondRow += 1\n\n # добавляем текущую строку, если она была изменена, а если нет то тоже добавляем\n result.append(currentLine)\n firstRow += 1\n return result\n\n\nif __name__ == \"__main__\":\n # тест перемножения троичных матриц\n a = [[1, 2, 2, 0],\n [2, 0, 2, 1],\n [2, 1, 0, 0]]\n\n b = [[0, 2, 0, 2],\n [2, 2, 1, 1]]\n\n c = multBoolMatrix(a, b)\n beautiPrint(c)\n\n print('\\n==========\\n')\n\n # тест минимизации троичной матрицы\n q = [\n [2, 1, 0],\n [1, 1, 0],\n [0, 1, 0],\n [1, 1, 1],\n [1, 0, 1]\n ]\n beautiPrint(q)\n print('\\nafter minification\\n')\n beautiPrint(minifyMatrix(q))\n\n print('\\n==========\\n')\n\n # тест из методички\n # страница 35\n a = [\n [1, 2, 2],\n [2, 0, 2]\n ]\n\n b = [\n [0, 2, 2],\n [2, 2, 1]\n ]\n beautiPrint(multBoolMatrix(a, b))","sub_path":"multBullMatrix.py","file_name":"multBullMatrix.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"617461115","text":"'''\r\nCreated on Mar 28, 2017\r\n\r\n@author: jason\r\n'''\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\n\r\nfrom application import db\r\nfrom application.models import ProductReserve, GrowWeek, PlantGrowSupply, PlantGrow,LastUpdate\r\n\r\ndef set_reserves():\r\n lstup = LastUpdate.get_last_update(\"PlantReserves\")\r\n ProductReserve.set_for_update(lstup.last_updated)\r\n lstup.update()\r\n\r\ndef get_reserves(update=True):\r\n #lstup = LastUpdate.get_last_update(\"PlantReserves\")\r\n #prs = ProductReserve.get_lastupdated(lstup.last_updated)\r\n prs = ProductReserve.get_for_update()\r\n summ_d = []\r\n print(\"Processing {} reserves\".format(len(prs)))\r\n for pr in prs:\r\n presv = ProductReserve.get_db_reserve(pr.id)\r\n summ_d.append({'week_id':presv['week_id'],'plant_id':presv['plant_id']})\r\n add_reserves(presv,update=update)\r\n pr.reset_dw_sync()\r\n \r\n for summ in summ_d:\r\n #getadd_summary(summ['week_id'], summ['plant_id'])\r\n pg = PlantGrow.get_plant_wp(summ['week_id'], summ['plant_id'])\r\n if pg:\r\n pg.set_dw_sync()\r\n \r\n return 0\r\n\r\ndef add_reserves(presv,update=True):\r\n instance = db.session.query(PlantReserves).filter_by(id=presv['_id']).first()\r\n if instance:\r\n del presv['_id']\r\n if update:\r\n instance.update(**presv)\r\n db.session.commit()\r\n else:\r\n instance = PlantReserves(**presv)\r\n db.session.add(instance)\r\n db.session.commit()\r\n \r\n\r\nclass PlantReserves(db.Model):\r\n __tablename__ = 'plant_reserves'\r\n id = db.Column(db.String(150), primary_key=True)\r\n plant = db.Column(db.String(150))\r\n product = db.Column(db.String(150))\r\n plant_id = db.Column(db.String(150))\r\n product_id = db.Column(db.String(150))\r\n num_reserved = db.Column(db.String(150))\r\n week_id = db.Column(db.String(150))\r\n customer = db.Column(db.String(150))\r\n customer_id = db.Column(db.String(150))\r\n sales_rep = db.Column(db.String(150))\r\n add_date = db.Column(db.String(150))\r\n soft_delete = db.Column(db.String(1))\r\n \r\n def update(self, plant, product, plant_id, product_id, num_reserved, week_id, customer, customer_id, sales_rep, add_date, soft_delete):\r\n self.plant=plant\r\n self.product=product\r\n self.plant_id=plant_id\r\n self.product_id=product_id\r\n self.num_reserved=0 if not num_reserved else num_reserved\r\n self.week_id=week_id\r\n self.customer=customer\r\n self.customer_id=customer_id\r\n self.sales_rep=sales_rep\r\n self.add_date=add_date.date()\r\n self.soft_delete=soft_delete\r\n \r\n def __init__(self, _id, plant, product, plant_id, product_id, num_reserved, week_id, customer, customer_id, sales_rep, add_date,soft_delete):\r\n self.id = _id\r\n self.plant=plant\r\n self.product=product\r\n self.plant_id=plant_id\r\n self.product_id=product_id\r\n self.num_reserved=0 if not num_reserved else num_reserved\r\n self.week_id=week_id\r\n self.customer=customer\r\n self.customer_id=customer_id\r\n self.sales_rep=sales_rep\r\n self.add_date=add_date.date()\r\n self.soft_delete=soft_delete\r\n\r\ndef set_next_2yrs():\r\n dt = datetime.now()\r\n dt2 = dt + timedelta(days=(365*2))\r\n dt1 = dt\r\n while dt1 <= dt2:\r\n add_date(dt1)\r\n dt1 = dt1 + timedelta(days=1) \r\n\r\ndef add_date(dt_entry):\r\n dwdb = {}\r\n dwdb['_id'] = str(dt_entry.timetuple().tm_yday).zfill(3)+str(dt_entry.year)\r\n dwdb['date_entry'] = dt_entry\r\n week = GrowWeek.create_week(dt_entry)\r\n dwdb['week_id'] = week.id\r\n instance = db.session.query(DateWeek).filter_by(id=dwdb['_id']).first()\r\n if not instance:\r\n instance = DateWeek(**dwdb)\r\n db.session.add(instance)\r\n db.session.commit()\r\n\r\nclass DateWeek(db.Model):\r\n __tablename__ = 'date_week'\r\n id = db.Column(db.String(150), primary_key=True)\r\n date_entry = db.Column(db.String(150))\r\n week_id = db.Column(db.String(150))\r\n \r\n def update(self, date_entry, week_id):\r\n self.date_entry = date_entry\r\n self.week_id = week_id\r\n \r\n def __init__(self, _id, date_entry, week_id):\r\n self.id = _id\r\n self.date_entry = date_entry.date()\r\n self.week_id = week_id\r\n\r\ndef set_supply():\r\n lstup = LastUpdate.get_last_update(\"PlantSupplies\")\r\n PlantGrowSupply.set_for_update(lstup.last_updated)\r\n lstup.update()\r\n\r\ndef get_supply(update=True):\r\n #lstup = LastUpdate.get_last_update(\"PlantSupplies\")\r\n summ_d = []\r\n #pgss = PlantGrowSupply.get_lastupdated(lstup.last_updated)\r\n pgss = PlantGrowSupply.get_for_update()\r\n print(\"Processing {} supplies\".format(len(pgss)))\r\n for pgs in pgss:\r\n pgsdb = PlantGrowSupply.get_supply(pgs.id)\r\n summ_d.append({'week_id':pgsdb['week_id'],'plant_id':pgsdb['plant_id']})\r\n add_supply(pgsdb,update=update)\r\n pgs.reset_dw_sync()\r\n \r\n for summ in summ_d:\r\n #getadd_summary(summ['week_id'], summ['plant_id'])\r\n PlantGrow.get_plant_wp(summ['week_id'], summ['plant_id']).set_dw_sync()\r\n \r\n return 0\r\n\r\ndef add_supply(pgsdb,update=True):\r\n instance = db.session.query(PlantSupplies).filter_by(id=pgsdb['_id']).first()\r\n if instance:\r\n del pgsdb['_id']\r\n if update:\r\n instance.update(**pgsdb)\r\n db.session.commit()\r\n else:\r\n instance = PlantSupplies(**pgsdb)\r\n db.session.add(instance)\r\n db.session.commit()\r\n \r\nclass PlantSupplies(db.Model):\r\n __tablename__ = 'plant_supplies'\r\n id = db.Column(db.String(150), primary_key=True)\r\n supplier = db.Column(db.String(150))\r\n supplier_id = db.Column(db.String(150))\r\n forecast = db.Column(db.String(150))\r\n week_id = db.Column(db.String(150))\r\n add_date = db.Column(db.String(150))\r\n plant = db.Column(db.String(150))\r\n plant_id = db.Column(db.String(150))\r\n soft_delete = db.Column(db.String(1))\r\n \r\n def update(self,supplier, supplier_id, forecast, week_id, add_date, plant, plant_id,soft_delete):\r\n self.supplier=supplier\r\n self.supplier_id=supplier_id\r\n self.forecast=0 if not forecast else forecast\r\n self.week_id=week_id\r\n self.add_date=add_date.date()\r\n self.plant=plant\r\n self.plant_id=plant_id\r\n self.soft_delete=soft_delete\r\n \r\n def __init__(self, _id, supplier, supplier_id, forecast, week_id, add_date, plant, plant_id,soft_delete):\r\n self.id = _id\r\n self.supplier=supplier\r\n self.supplier_id=supplier_id\r\n self.forecast=0 if not forecast else forecast\r\n self.week_id=week_id\r\n self.add_date=add_date.date()\r\n self.plant=plant\r\n self.plant_id=plant_id\r\n self.soft_delete=soft_delete\r\n\r\ndef set_summary():\r\n lstup = LastUpdate.get_last_update(\"PlantSummary\")\r\n PlantGrow.set_for_update(lstup.last_updated)\r\n lstup.update()\r\n \r\n\r\ndef get_summary(update=True):\r\n #lstup = LastUpdate.get_last_update(\"PlantSummary\")\r\n pgs = PlantGrow.get_for_update()\r\n print(\"Processing {} summaries\".format(len(pgs)))\r\n for pg in pgs:\r\n if pg:\r\n add_summary(pg.pg_summary(),update=update)\r\n pg.reset_dw_sync()\r\n #lstup.update()\r\n return 0\r\n\r\ndef getadd_summary(week_id, plant_id):\r\n pg = PlantGrow.plant_summary(week_id, plant_id)\r\n if pg:\r\n add_summary(pg)\r\n \r\ndef add_summary(pg,update=True):\r\n instance = db.session.query(PlantSummary).filter_by(id=pg['_id']).first()\r\n if instance:\r\n del pg['_id']\r\n if update:\r\n instance.update(**pg)\r\n db.session.commit()\r\n else:\r\n instance = PlantSummary(**pg)\r\n db.session.add(instance)\r\n db.session.commit()\r\n \r\nclass PlantSummary(db.Model):\r\n __tablename__ = 'plant_summary'\r\n id = db.Column(db.String(150), primary_key=True)\r\n plant = db.Column(db.String(150))\r\n plant_id = db.Column(db.String(150))\r\n week_id = db.Column(db.String(150))\r\n num_reserved = db.Column(db.String(150))\r\n forecast = db.Column(db.String(150))\r\n actual = db.Column(db.String(150))\r\n \r\n def update(self, plant, plant_id, week_id, num_reserved, forecast, actual):\r\n self.plant=plant\r\n self.plant_id=plant_id\r\n self.week_id=week_id\r\n self.num_reserved=0 if not num_reserved else num_reserved\r\n self.forecast=0 if not forecast else forecast\r\n self.actual=0 if not actual else actual\r\n \r\n def __init__(self, _id, plant, plant_id, week_id, num_reserved, forecast, actual):\r\n self.id=_id\r\n self.plant=plant\r\n self.plant_id=plant_id\r\n self.week_id=week_id\r\n self.num_reserved=0 if not num_reserved else num_reserved\r\n self.forecast=0 if not forecast else forecast\r\n self.actual=0 if not actual else actual\r\n\r\ndef set_date():\r\n lstup = LastUpdate.get_last_update(\"Weeks\")\r\n GrowWeek.set_for_update(lstup.last_updated)\r\n lstup.update()\r\n\r\ndef get_date():\r\n #lstUp = LastUpdate.get_last_update('Weeks')\r\n #wks = GrowWeek.get_lastupdated(lstUp.last_updated)\r\n wks = GrowWeek.get_for_update()\r\n for wk in wks:\r\n add_week(wk)\r\n wk.reset_dw_sync()\r\n \r\n return 0\r\n\r\ndef add_week(gw):\r\n #gw = GrowWeek.get_by_id(week_id)\r\n week={}\r\n week['_id'] = gw.id\r\n week['week_number'] = gw.week_number\r\n week['year'] = gw.year\r\n week['monday_date'] = gw.week_monday\r\n instance = db.session.query(Weeks).filter_by(id=week['_id']).first()\r\n if instance:\r\n del week['_id']\r\n #instance.update(**week)\r\n #db.session.commit()\r\n else:\r\n instance = Weeks(**week)\r\n db.session.add(instance)\r\n db.session.commit()\r\n\r\nclass Weeks(db.Model):\r\n __tablename__ = 'weeks'\r\n id = db.Column(db.String(150), primary_key=True)\r\n week_number = db.Column(db.String(150))\r\n year = db.Column(db.String(150))\r\n monday_date = db.Column(db.DateTime())\r\n \r\n def update(self, week_number, year, monday_date):\r\n self.week_number=week_number\r\n self.year=year\r\n self.monday_date=monday_date\r\n \r\n def __init__(self, _id, week_number, year, monday_date):\r\n self.id=_id\r\n self.week_number=week_number\r\n self.year=year\r\n self.monday_date=monday_date","sub_path":"sales-inv-corchids/application/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":10417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"596489957","text":"import numpy as np\r\nimport warnings\r\nimport os\r\nimport time\r\n\r\nfrom sklearn.model_selection import RepeatedKFold\r\nfrom sklearn.utils import check_array, check_X_y, gen_batches\r\nfrom sklearn.utils.multiclass import unique_labels\r\nfrom sklearn.utils.extmath import _incremental_mean_and_var\r\nfrom sklearn import neighbors, datasets\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.decomposition import PCA\r\n\r\n\r\nclass IncrementalLDA:\r\n \"\"\"Incremental Linear Discriminant Analysis (ILDA).\r\n\r\n Incremental Linear Discriminant Analysis is a discriminant model that can be updated as datasets arrive.\r\n Alternatively it can also be used to improve regular Linear Discriminant Analysis by splitting the inputs\r\n into batches using the parameter batch_size\r\n \"\"\"\r\n def __init__(self, shrinkage=None, priors=None, n_components=None, batch_size=None):\r\n self.shrinkage = shrinkage\r\n self.priors = priors\r\n self.n_components = n_components\r\n self.batch_size = batch_size\r\n\r\n def predict(self, X):\r\n X = check_array(X, dtype=[np.float64, np.float32])\r\n n_inputs, n_features = X.shape\r\n # Have to improve\r\n # check_is_fitted(self)\r\n if not hasattr(self, 'within_scatter'):\r\n raise Exception(\"Model has not been trained yet\")\r\n\r\n print(self.within_scatter)\r\n print(self.between_scatter)\r\n\r\n lda_matrix = np.dot(np.linalg.pinv(self.within_scatter), self.between_scatter)\r\n eigVals, eigVecs = np.linalg.eigh(lda_matrix)\r\n ldaVec = eigVecs[:, np.argsort(eigVals)[::-1]]\r\n ldaVec /= np.linalg.norm(ldaVec, axis=0)\r\n # print(\"X, eigVecs and ldaVec shapes: \", X.shape, eigVecs.shape, ldaVec.shape)\r\n updatedX = np.dot(X, ldaVec)\r\n updated_class_means = np.dot(self.class_mean_, ldaVec)\r\n\r\n yVals = np.zeros((n_inputs,))\r\n\r\n # print( \"X :::: \" , X)\r\n # print( \"Means:::: \", self.means)\r\n # print( \"ldaVec::: \", ldaVec)\r\n # print(\"updatedX: \", updatedX.shape)\r\n # print(\"updatedMean: \", updated_class_means.shape)\r\n\r\n for i in np.arange(n_inputs):\r\n currX = np.reshape(updatedX[i, :], (1, n_features))\r\n # print('updated_class_means.shape', updated_class_means.shape)\r\n # print('currX.shape', currX.shape)\r\n distVal = np.linalg.norm(np.subtract(updated_class_means, currX), axis=1)\r\n\r\n # print(\"distVal: \", distVal)\r\n # print(\"amin Value: \", np.amin(distVal, axis=0))\r\n # print(\"amin\", distVal == np.amin(distVal, axis=0))\r\n\r\n yVals[i] = self.classes_[distVal == distVal.min()]\r\n\r\n print(\"The given point(s) belong to the following class(es) in the same order: \", yVals)\r\n # print(\"Given number of points: \", X.shape[0], \" and yVals shape: \", yVals.shape)\r\n return yVals\r\n\r\n def fit(self, X, y):\r\n # Test if single fit or multiple fit\r\n X, y = check_X_y(X, y, estimator=self, ensure_min_samples=1)\r\n self.classes_ = np.sort(unique_labels(y))\r\n\r\n if self.priors is None: # estimate priors from sample\r\n _, y_t = np.unique(y, return_inverse=True) # non-negative ints\r\n self.priors_ = np.bincount(y_t) / float(len(y))\r\n else:\r\n self.priors_ = np.asarray(self.priors)\r\n\r\n if (self.priors_ < 0).any():\r\n raise ValueError(\"priors must be non-negative\")\r\n if not np.isclose(self.priors_.sum(), 1.0):\r\n warnings.warn(\"The priors do not sum to 1. Renormalizing\",\r\n UserWarning)\r\n self.priors_ = self.priors_ / self.priors_.sum()\r\n\r\n # Get the maximum number of components\r\n if self.n_components is None:\r\n self._max_components = len(self.classes_) - 1\r\n else:\r\n self._max_components = min(len(self.classes_) - 1,\r\n self.n_components)\r\n\r\n # LDA Logic begins here\r\n n_samples, n_features = X.shape\r\n\r\n if self.batch_size is None:\r\n self.batch_size = 5 * n_features\r\n\r\n for batch in gen_batches(n_samples, self.batch_size):\r\n self.partial_fit(X[batch], y[batch])\r\n\r\n return self\r\n\r\n def partial_fit(self, X, y, check_input=False):\r\n n_samples, n_features = X.shape\r\n\r\n print('X.shape', X.shape)\r\n # This is the first partial_fit\r\n if not hasattr(self, 'n_samples_seen_'):\r\n self.n_samples_seen_ = 0\r\n self.class_n_samples_seen_ = np.zeros(self.classes_.shape)\r\n\r\n self.mean_ = np.zeros((1, n_features))\r\n self.class_mean_ = np.zeros((np.size(self.classes_), n_features))\r\n\r\n self.var_ = .0\r\n\r\n self.between_scatter = np.zeros((n_features, n_features))\r\n self.within_scatter = np.zeros((n_features, n_features))\r\n self.class_within_scatter = np.zeros((n_features, n_features, self.classes_.size))\r\n\r\n # If the number of samples is more than 1, we use a batch fit algorithm as in the reference paper Pang et al.\r\n if n_samples > 1:\r\n self._batch_fit(X, y, check_input)\r\n # Else if there is only 1 sample, we use a single fit algorithm as in the reference paper Pang et al.\r\n else:\r\n self._single_fit(X, y, check_input)\r\n return self\r\n\r\n\r\n def _single_fit(self, X, y, check_input=False):\r\n print('Single Fit')\r\n if check_input:\r\n X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)\r\n\r\n # if self.means.size > 0:\r\n # totalMean = np.add(np.multiply(self.means, self.numbers), X) / (np.sum(self.numbers) + 1)\r\n # else:\r\n # totalMean = X\r\n # # New data belongs to Existing Class\r\n # if y in self.classes:\r\n # # Update Mean and Number of datasets in the class\r\n # indicesVal, = np.where(self.classes == Y)[0]\r\n # yClassMean = self.means[indicesVal, :]\r\n # yNums = self.numbers[indicesVal]\r\n # yClassMean = np.add((np.asarray(yClassMean) * yNums), X) / (yNums + 1)\r\n # yClassMean = np.asarray(yClassMean)[0,:]\r\n # self.means[indicesVal,:] = yClassMean\r\n # self.numbers[indicesVal] = yNums + 1\r\n #\r\n # # Update S_b\r\n # updatedS_b = np.zeros((X.shape[1], X.shape[1]))\r\n # idx = 0\r\n # for classVal in self.classes:\r\n # if classVal == y:\r\n # thisClassDiff = np.subtract(yClassMean, totalMean)\r\n # else:\r\n # thisClassDiff = np.subtract(self.means[idx, :], totalMean)\r\n # updatedS_b += self.numbers[idx] * np.dot(thisClassDiff, thisClassDiff.T)\r\n # idx += 1\r\n # self.s_b = updatedS_b\r\n #\r\n # # Update S_w\r\n # interClassDiff = np.subtract(X, yClassMean)\r\n # if (self.s_w.size > 0):\r\n # self.s_w = np.add(self.s_w, ((yNums / (yNums + 1)) *\r\n # np.dot(interClassDiff.T,\r\n # interClassDiff)))\r\n # else:\r\n # self.s_w = (yNums / (yNums + 1)) * \\\r\n # np.dot(interClassDiff.T, interClassDiff)\r\n # # New data belongs to New Class\r\n # else:\r\n # # Update formulae\r\n # if self.means.size > 0:\r\n # self.means = np.vstack((self.means, X))\r\n # self.numbers = np.append(self.numbers, 1)\r\n # self.s_b = self.s_b + np.dot(X - totalMean, (X - totalMean).T)\r\n # self.classes = np.append(self.classes, unique_labels(y))\r\n # # Create formulae\r\n # else:\r\n # self.means = X\r\n # self.numbers = np.array([1])\r\n # self.s_b = np.dot(X - totalMean, (X - totalMean).T)\r\n # self.classes = unique_labels(y)\r\n\r\n def _batch_fit(self, X, y, check_input=False):\r\n print('Batch fit')\r\n if check_input:\r\n X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)\r\n\r\n current_n_samples, n_features = X.shape\r\n # Update stats - they are 0 if this is the first step\r\n updated_mean, updated_var, updated_n_samples_seen_ = _incremental_mean_and_var(X, last_mean=self.mean_,\r\n last_variance=self.var_,\r\n last_sample_count=self.n_samples_seen_)\r\n # Whitening\r\n if self.n_samples_seen_ == 0:\r\n # If it is the first step, simply whiten X\r\n X = np.subtract(X, updated_mean)\r\n else:\r\n col_batch_mean = np.mean(X, axis=0)\r\n X = np.subtract(X, col_batch_mean)\r\n\r\n # Updating algorithm\r\n # First update class means\r\n updated_class_mean = self.class_mean_\r\n updated_class_n_samples_seen_ = self.class_n_samples_seen_\r\n # print('updated_class_n_samples_seen_', updated_class_n_samples_seen_)\r\n # print('updated_class_mean', updated_class_mean)\r\n for i, current_class in enumerate(self.classes_):\r\n current_class_samples = X[y == current_class, :]\r\n n_current_class_samples = current_class_samples.shape[0]\r\n previous_n_class_samples = updated_class_n_samples_seen_[i]\r\n if n_current_class_samples > 0 and previous_n_class_samples > 0:\r\n previous_class_sum_current_class = updated_class_mean[i, :] * updated_class_n_samples_seen_[i]\r\n current_class_sum_current_class = np.sum(current_class_samples, axis=0)\r\n\r\n # print('previous_class_sum_current_class.shape', previous_class_sum_current_class.shape)\r\n # print('current_class_sum_current_class.shape', current_class_sum_current_class.shape)\r\n # print('updated_class_mean.shape', updated_class_mean.shape)\r\n # print('updated_class_n_samples_seen_.shape', updated_class_n_samples_seen_[i])\r\n\r\n updated_class_n_samples_seen_[i] += n_current_class_samples\r\n updated_class_mean[i, :] = (previous_class_sum_current_class + current_class_sum_current_class) /\\\r\n previous_n_class_samples\r\n elif n_current_class_samples > 0:\r\n updated_class_mean[i, :] = np.mean(current_class_samples, axis=0)\r\n updated_class_n_samples_seen_[i] = n_current_class_samples\r\n\r\n # Then update between class scatter\r\n updated_between_scatter = self.between_scatter\r\n for i, current_class_mean in enumerate(updated_class_mean):\r\n n = X[y == self.classes_[i], :].shape[0]\r\n current_class_mean = current_class_mean.reshape(1, n_features)\r\n updated_mean = updated_mean.reshape(1, n_features)\r\n if n > 0:\r\n updated_between_scatter += n * (current_class_mean - updated_mean).T.dot(\r\n current_class_mean - updated_mean)\r\n\r\n # if np.any(np.isnan(updated_between_scatter)):\r\n # print('Reached nan:::: ', n)\r\n # print('Updatec class mean:::', updated_class_mean)\r\n # print('updated mean::::', updated_mean)\r\n\r\n updated_class_within_scatter = self.class_within_scatter\r\n for i, current_class_mean in enumerate(updated_class_mean):\r\n current_class_samples = X[y == self.classes_[i], :]\r\n n_current_class_samples = current_class_samples.shape[0]\r\n l_c = current_class_samples.shape[0]\r\n n_c = self.class_n_samples_seen_[i]\r\n mean_y_c = np.reshape(np.mean(current_class_samples, axis=0), (n_features, 1))\r\n\r\n if n_current_class_samples > 0 and n_c > 0:\r\n # print('current_class_samples.shape', current_class_samples.shape)\r\n mean_x_c = np.reshape(self.class_mean_[i, :], (n_features, 1))\r\n\r\n D_c = (mean_y_c - mean_x_c).dot((mean_y_c - mean_x_c).T)\r\n\r\n E_c = np.zeros(D_c.shape)\r\n for current_samples, j in enumerate(current_class_samples):\r\n E_c += (current_samples - mean_x_c).dot((current_samples - mean_x_c).T)\r\n\r\n F_c = np.zeros(D_c.shape)\r\n for current_samples, j in enumerate(current_class_samples):\r\n F_c += (current_samples - mean_y_c).dot((current_samples - mean_y_c).T)\r\n\r\n updated_class_within_scatter[:, :, i] += ((n_c * l_c * l_c) * D_c / np.square(n_c + l_c)) + \\\r\n ((np.square(n_c) * E_c) / np.square(n_c + l_c)) + \\\r\n ((l_c * (l_c + (2 * n_c)) * F_c) / np.square(n_c + l_c))\r\n elif n_current_class_samples > 0:\r\n updated_class_within_scatter[:, :, i] = (current_class_samples - mean_y_c).dot(\r\n (current_class_samples - mean_y_c).T)\r\n updated_within_scatter = np.sum(updated_class_within_scatter, axis=2)\r\n\r\n # Final values after computation\r\n self.n_samples_seen_ = updated_n_samples_seen_\r\n self.class_n_samples_seen_ = updated_class_n_samples_seen_\r\n self.mean_ = updated_mean\r\n self.class_mean_ = updated_class_mean\r\n self.var_ = updated_var\r\n self.between_scatter = updated_between_scatter\r\n self.within_scatter = updated_within_scatter\r\n self.class_within_scatter = updated_class_within_scatter\r\n\r\n\r\ndef run_on_iris():\r\n # Load the dataset\r\n data = datasets.load_iris()\r\n #\r\n X = data.data\r\n y = data.target\r\n start_time = time.time()\r\n path = \"Datasets/AR/\"\r\n # X = np.load(os.path.join(path, 'X.npy'))\r\n # y = np.load(os.path.join(path, 'Y.npy'))\r\n\r\n n_splits = 5\r\n n_repeats = 1\r\n kf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats)\r\n accuracy_values = []\r\n\r\n count_val = 0\r\n for train_index, test_index in kf.split(X):\r\n X_train, X_test = X[train_index], X[test_index]\r\n y_train, y_test = y[train_index], y[test_index]\r\n\r\n # Fit and predict using LDA\r\n ilda = IncrementalLDA(batch_size=25)\r\n print(\"Fitting\")\r\n ilda.fit(X_train, y_train)\r\n\r\n print(\"Predicting\")\r\n y_train_predicted = ilda.predict(X_train)\r\n y_test_predicted = ilda.predict(X_test)\r\n\r\n accuracy = accuracy_score(y_train, y_train_predicted)\r\n print(\"Train Accuracy:\", accuracy)\r\n\r\n accuracy = accuracy_score(y_test, y_test_predicted)\r\n print(\"Test Accuracy:\", accuracy)\r\n\r\n accuracy_values.append(accuracy)\r\n count_val = count_val + 1\r\n\r\n print('Accuracy Values:')\r\n print(accuracy_values)\r\n\r\n average_accuracy = np.average(accuracy_values)\r\n print('Average accuracy:')\r\n print(average_accuracy)\r\n\r\n f = open(os.path.join(path, 'average_accuracy_iris.txt'), 'w')\r\n f.write('Average Accuracy on IRIS data= ' + str(average_accuracy) + '\\n')\r\n f.close()\r\n print(\"--- Total time taken %s seconds ---\" % (time.time() - start_time))\r\n # pca = PCA()\r\n # pca.plot_in_2d(X_test, y_pred, title=\"LDA\", accuracy=accuracy)\r\n\r\n\r\ndef run_on_data(path):\r\n start_time = time.time()\r\n X = np.load(os.path.join(path, 'X.npy'))\r\n y = np.load(os.path.join(path, 'Y.npy'))\r\n\r\n n_splits = 5\r\n n_repeats = 1\r\n kf = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats)\r\n accuracy_values = []\r\n\r\n if X.shape[1] > 10000:\r\n pca = PCA()\r\n X = pca.fit_transform(X)\r\n\r\n count_val = 0\r\n for train_index, test_index in kf.split(X):\r\n X_train, X_test = X[train_index], X[test_index]\r\n y_train, y_test = y[train_index], y[test_index]\r\n\r\n # Fit and predict using LDA\r\n ilda = IncrementalLDA(batch_size=50)\r\n print(\"Fitting\")\r\n ilda.fit(X_train, y_train)\r\n\r\n print(\"Predicting\")\r\n y_train_predicted = ilda.predict(X_train)\r\n y_test_predicted = ilda.predict(X_test)\r\n\r\n accuracy = accuracy_score(y_train, y_train_predicted)\r\n print(\"Train Accuracy:\", accuracy)\r\n\r\n accuracy = accuracy_score(y_test, y_test_predicted)\r\n print(\"Test Accuracy:\", accuracy)\r\n\r\n accuracy_values.append(accuracy)\r\n count_val = count_val + 1\r\n\r\n print('Accuracy Values:')\r\n print(accuracy_values)\r\n\r\n average_accuracy = np.average(accuracy_values)\r\n print('Average accuracy:')\r\n print(average_accuracy)\r\n\r\n f = open(os.path.join(path, 'ilda_accuracy.txt'), 'w')\r\n f.write('Average Accuracy on AR data = ' + str(average_accuracy) + '\\n')\r\n f.close()\r\n print(\"--- Total time taken %s seconds ---\" % (time.time() - start_time))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ar_path = \"Datasets/AR/\"\r\n # cacd_path = \"Datasets/CACD/\"\r\n run_on_iris()\r\n # run_on_data(ar_path)\r\n # run_on_data(cacd_path)\r\n # ar_data_test_pca()\r\n # iris_data()\r\n","sub_path":"ilda.py","file_name":"ilda.py","file_ext":"py","file_size_in_byte":17187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"143284226","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport emcee\nimport corner\nimport random\nimport math\nimport subprocess\nfrom astropy.io import ascii\nimport pickle\nfrom matplotlib.ticker import MaxNLocator\nimport sys\nimport idlsave\nfrom scipy.stats.kde import gaussian_kde\nimport scipy.stats as stats\nimport matplotlib.mlab as mlab\nimport tables\nfrom scipy.interpolate import interp1d\nfrom chainconsumer import ChainConsumer\nfrom multiprocessing import Pool\nimport os\nimport time\n\n# -------------------------------------------------------------------------#\n## load local modules\nfrom settle import settle\nfrom burstrain import *\nfrom run_model import runmodel\nfrom get_data import get_obs\nfrom mrprior import mr_prior\nfrom get_data import *\nfrom initialise import init\n\n## Now we define the functions that emcee requires\n# define likelihood as a function of theta, x, y and yerr as this is what emcee expects as the inputs\n\nndim, nwalkers, nsteps, run_id, theta, x, y, yerr, tref, bstart, pflux, pfluxe, tobs, numburstssim, numburstsobs, bc, ref_ind, gti_checking, fluen, restart = init()\n\ndef lnlike(theta, x, y, yerr):\n\n # define y = \"data\" parameters\n\n for x, i in zip(\n [x for x in range(0, len(bstart) - 1) if x != ref_ind],\n [i for i in range(0, len(bstart) - 1) if i != ref_ind],\n ):\n globals()[\"t%s\" % i] = y[x]\n for x, i in zip(\n range(len(bstart) - 1, len(fluen) + len(bstart) - 1), range(0, len(bstart))\n ):\n globals()[\"Eb%s\" % i] = y[x]\n for x, i in zip(\n range(len(fluen) + len(bstart) - 1, len(y)), range(0, len(bstart - 1))\n ):\n globals()[\"a%s\" % i] = y[x]\n\n # define yerr as variance terms (errors) for our data parameters (listed in same order as for y)\n # *note that we have to enter three time errors for the code to work however in reality the error should be the same for all of them (st0, st2 and st3 are really dummy parameters)\n\n for x, i in zip(\n [x for x in range(0, len(bstart) - 1) if x != ref_ind],\n [i for i in range(0, len(bstart) - 1) if i != ref_ind],\n ):\n globals()[\"st%s\" % i] = yerr[x]\n for x, i in zip(\n range(len(bstart) - 1, len(fluen) + len(bstart) - 1), range(0, len(bstart))\n ):\n globals()[\"sEb%s\" % i] = yerr[x]\n for x, i in zip(\n range(len(fluen) + len(bstart) - 1, len(y)), range(0, len(bstart - 1))\n ):\n globals()[\"sa%s\" % i] = yerr[x]\n\n # define theta = model parameters, which we define priors for\n\n X, Z, Q_b, f_a, f_E, r1, r2, r3, mass, radius = theta\n\n # Instead of treating s_t as a parameter, we just hardwire it here\n\n s_t = 10.0 / 1440.0\n\n # call model from IDL code defined as modeldata(base, z, x, r1, r2 ,r3)\n if gti_checking == 1:\n model, valid = runmodel(\n theta, y, tref, bstart, pflux, pfluxe, tobs, numburstssim, ref_ind, gti_checking,gti_start=st, gti_end=et\n )\n else:\n model, valid = runmodel(\n theta, y, tref, bstart, pflux, pfluxe, tobs, numburstssim, ref_ind, gti_checking\n )\n\n if not valid:\n return -np.inf, model\n\n # multiplying by scaling factors to match with the data\n model[len(bstart) - 1 : len(fluen) + len(bstart) - 1] *= r3\n model[len(fluen) + len(bstart) - 1 : len(y)] *= r2\n\n # To simplify final likelihood expression we define inv_sigma2 for each data parameter that describe the error.\n # The variance (eg sEb0) is underestimated by some fractional amount, f, for each set of parameters.\n\n sEb = yerr[len(bstart) - 1 : len(fluen) + len(bstart) - 1]\n sa = yerr[len(fluen) + len(bstart) - 1 : len(yerr)]\n\n inv_sigma2 = []\n for i in range(0, len(bstart) - 1):\n inv_sigma2.append(1.0 / (s_t ** 2))\n for i in range(0, len(bstart)):\n inv_sigma2.append(1.0 / ((sEb[i] * f_E) ** 2))\n for i in range(0, len(bstart) - 1):\n inv_sigma2.append(1.0 / ((sa[i] * f_a) ** 2))\n\n # Final likelihood expression\n cpts = (y - (model)) ** 2 * inv_sigma2 - (np.log(inv_sigma2))\n\n # Test if the result string is defined here. It is, so we return the selected elements of result\n # instead of the downselection in model\n\n base = Q_b\n z = Z\n x = X\n r1 = r1\n r2 = r2\n r3 = r3\n mass = mass\n radius = radius\n\n model2 = generate_burst_train(\n base,\n z,\n x,\n r1,\n r2,\n r3,\n mass,\n radius,\n bstart,\n pflux,\n pfluxe,\n tobs,\n numburstssim,\n run=run,\n double=double,\n debug=debug,\n )\n\n #model2 = np.string_(model2, dtype='S1000')\n model2 = str(model2).encode('ASCII')\n\n # Now also return the model\n return -0.5 * np.sum(cpts), model2\n\n\n# -------------------------------------------------------------------------#\n# This is the expression for the prior on the model parameters.\n\n# Define priors for theta. mr prior function is located in mrprior.py\n\n\ndef lnZprior(z):\n # This beta function for the metallicity prior is from Andy Casey and is an approximation of the metallicity of a mock galaxy\n # at 2.5-4.5 kpc for the location of 1808. Assuming ZCNO = 0.01 is average value.\n from scipy import stats\n import numpy as np\n\n beta = stats.beta\n ZCNO = 0.01\n\n return np.log(\n beta(10, 3).pdf((np.log10(z / ZCNO) + 3) / 3.75) / (3.75 * np.log(10) * z)\n )\n\n\ndef lnprior(theta):\n import numpy as np\n\n X, Z, Q_b, f_a, f_E, r1, r2, r3, mass, radius = theta\n\n if (\n 0.00001 < X < 0.76\n and 0.00001 < Z < 0.056\n and 0.000001 <= Q_b < 5.0\n and 0 < f_a < 100\n and 0 < f_E < 100\n and 0.005 < r1 < 1.0\n and 0.005 < r2 < 3.0\n and 0 < r3 * 1e3 < 1000\n and 1.15 < mass < 2.5\n and 9 < radius < 17\n ): # upper bound and lower bounds of each parameter defined here. Bounds were found by considering an estimated value for each parameter then giving reasonable limits.\n return 0.0 + lnZprior(Z) + mr_prior(mass, radius)\n return -np.inf\n\n\n# -------------------------------------------------------------------------#\n# Finally we combine the likelihood and prior into the overall lnprob function, called by emcee\n\n# define lnprob, which is the full log-probability function\ndef lnprob(theta, x, y, yerr):\n import numpy as np\n\n lp = lnprior(theta)\n\n # Now also returns the model, to accumulate along with the likelihoods\n\n like, model = lnlike(theta, x, y, yerr)\n\n if (not np.isfinite(lp)) or (not np.isfinite(like)):\n return -np.inf, -np.inf, model\n\n # we return the logprobability as well as the theta parameters at this point so we can extract results later\n return lp + like, lp, model\n \n\n\n# -------------------------------------------------------------- #","sub_path":"beans/likelihood.py","file_name":"likelihood.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"298451852","text":"# -*- coding: utf-8 -*-\n\"\"\"\nYOUR HEADER COMMENT HERE\n\n@author: Sarah Barden\n\n\"\"\"\n\nimport random\nimport math\nfrom load import load_seq\nrandom.seed(5845)\n\nfrom amino_acids import aa, codons, aa_table # you may find these useful\n\n\ndef shuffle_string(s):\n \"\"\"Shuffles the characters in the input string\n NOTE: this is a terhelper function, you do not\n have to modify this in any way \"\"\"\n return ''.join(random.sample(s, len(s)))\n\n\n# YOU WILL START YOUR IMPLEMENTATION FROM HERE DOWN ###\ndef get_complement(nucleotide):\n \"\"\" Returns the complementary nucleotide\n nucleotide: a nucleotide (A, C, G, or T) represented as a string\n returns: the complementary nucleotide\n >>> get_complement('A')\n 'T'\n >>> get_complement('C')\n 'G'\n \"\"\"\n if nucleotide == 'A':\n return 'T'\n elif nucleotide == 'T':\n return 'A'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'\n\n\ndef get_reverse_complement(dna):\n \"\"\" Computes the reverse complementary sequence of DNA for the specfied DNA\n sequence\n\n dna: a DNA sequence represented as a string\n returns: the reverse complementary DNA sequence represented as a string\n >>> get_reverse_complement(\"ATGCCCGCTTT\")\n 'AAAGCGGGCAT'\n >>> get_reverse_complement(\"CCGCGTTCA\")\n 'TGAACGCGG'\n \"\"\"\n rev_comp = ''\n for i in range(0, len(dna)):\n nucleo = dna[i]\n comp = get_complement(nucleo)\n rev_comp = comp + rev_comp\n return rev_comp\n\n\ndef split_into_codons(dna):\n \"\"\" Takes a DNA sequence (a string) and splits it into a list of codons\n as a list of strings.\n\n dna: a DNA sequence\n returns: DNA sequence split into codons\n >>> split_into_codons(\"ATGTGATAG\")\n ['ATG', 'TGA', 'TAG']\n >>> split_into_codons(\"ATGTGATAGCC\")\n ['ATG', 'TGA', 'TAG', 'CC']\n \"\"\"\n dna_split = []\n length = math.ceil(len(dna)/3)\n for i in range(0, length):\n j = 3*i\n codon = dna[j:j+3]\n dna_split += [codon]\n return dna_split\n\n\ndef rest_of_ORF(dna):\n \"\"\" Takes a DNA sequence that is assumed to begin with a start\n codon and returns the sequence up to but not including the\n first in frame stop codon (TGA, TAA, or TAG). If there is\n no in frame stop codon, returns the whole string.\n\n dna: a DNA sequence\n returns: the open reading frame represented as a string\n >>> rest_of_ORF(\"ATGTGAA\")\n 'ATG'\n >>> rest_of_ORF(\"ATGAGATAGG\")\n 'ATGAGA'\n \"\"\"\n index = -1\n dna_split = split_into_codons(dna)\n\n for i, j in enumerate(dna_split):\n if j == \"TAG\" or j == \"TGA\" or j == \"TAA\":\n index = i\n if index == -1:\n return ''.join(dna_split) # if there is no stop codon\n return ''.join(dna_split[:index])\n\n\ndef find_all_ORFs_oneframe(dna):\n \"\"\" Finds all non-nested open reading frames in the given DNA\n sequence and returns them as a list. This function should\n only find ORFs that are in the default frame of the sequence\n (i.e. they start on indices that are multiples of 3).\n By non-nested we mean that if an ORF occurs entirely within\n another ORF, it should not be included in the returned list of ORFs.\n\n dna: a DNA sequence\n returns: a list of non-nested ORFs\n >>> find_all_ORFs_oneframe(\"ATGCATGAATGTAGATAGATGTGCCC\")\n ['ATGCATGAATGTAGA', 'ATGTGCCC']\n >>> find_all_ORFs_oneframe(\"ATGCCCATGTTTTAG\")\n ['ATGCCCATGTTT']\n \"\"\"\n dna_split = split_into_codons(dna)\n i = 0\n all_ORFs_oneframe = []\n length_of_orf = 0\n while i < len(dna_split):\n if dna_split[i] == \"ATG\":\n orf = rest_of_ORF(dna[int(i)*3:])\n all_ORFs_oneframe += [orf]\n length_of_orf = math.ceil(len(orf)/3)\n i += length_of_orf\n else:\n i += 1\n return all_ORFs_oneframe\n\n\ndef find_all_ORFs(dna):\n \"\"\" Finds all non-nested open reading frames in the given DNA sequence in\n all 3 possible frames and returns them as a list. By non-nested we\n mean that if an ORF occurs entirely within another ORF and they are\n both in the same frame, it should not be included in the returned list\n of ORFs.\n\n dna: a DNA sequence\n returns: a list of non-nested ORFs\n\n >>> find_all_ORFs(\"ATGCATGAATGTAG\")\n ['ATGCATGAATGTAG', 'ATGAATGTAG', 'ATG']\n \"\"\"\n all_ORFs = []\n for i in range(0, 3):\n dna_new = dna[i:]\n all_ORFs += find_all_ORFs_oneframe(dna_new)\n return all_ORFs\n\n\ndef find_all_ORFs_both_strands(dna):\n \"\"\" Finds all non-nested open reading frames in the given DNA sequence on both\n strands.\n\n dna: a DNA sequence\n returns: a list of non-nested ORFs\n >>> find_all_ORFs_both_strands(\"ATGCGAATGTAGCATCAAA\")\n ['ATGCGAATG', 'ATGCTACATTCGCAT']\n \"\"\"\n orfs = []\n orfs = find_all_ORFs(dna) + find_all_ORFs(get_reverse_complement(dna))\n return orfs\n\n\ndef longest_ORF(dna):\n \"\"\" Finds the longest ORF on both strands of the specified DNA and returns it\n as a string\n >>> longest_ORF(\"ATGCGAATGTAGCATCAAA\")\n 'ATGCTACATTCGCAT'\n \"\"\"\n orfs = find_all_ORFs_both_strands(dna)\n longest = max(orfs, key=len)\n return longest\n\n\ndef longest_ORF_noncoding(dna, num_trials):\n \"\"\" Computes the maximum length of the longest ORF over num_trials shuffles\n of the specfied DNA sequence\n\n dna: a DNA sequence\n num_trials: the number of random shuffles\n returns: the maximum length longest ORF\n \"\"\"\n i = 0\n longest_each_trial = []\n while i < num_trials:\n shuffled_dna = shuffle_string(dna)\n longest_each_trial.append(longest_ORF(shuffled_dna))\n i += 1\n\n longest_longest = max(longest_each_trial, key=len)\n return len(longest_longest)\n\n\ndef coding_strand_to_AA(dna):\n \"\"\" Computes the Protein encoded by a sequence of DNA. This function\n does not check for start and stop codons (it assumes that the input\n DNA sequence represents an protein coding region).\n\n dna: a DNA sequence represented as a string\n returns: a string containing the sequence of amino acids encoded by the\n the input DNA fragment\n\n >>> coding_strand_to_AA(\"ATGCGA\")\n 'MR'\n >>> coding_strand_to_AA(\"ATGCCCGCTTT\")\n 'MPA'\n \"\"\"\n dna_codons = split_into_codons(dna)\n i = 0\n aa_string = ''\n while i < len(dna_codons):\n if len(dna_codons[i]) == 3:\n amino_acid = aa_table[dna_codons[i]]\n aa_string += amino_acid\n i += 1\n return aa_string\n\n\ndef gene_finder(dna):\n \"\"\" Returns the amino acid sequences that are likely coded by the specified dna\n\n dna: a DNA sequence\n returns: a list of all amino acid sequences coded by the sequence dna.\n \"\"\"\n orfs = find_all_ORFs_both_strands(dna)\n print(orfs)\n threshold = longest_ORF_noncoding(dna, 1000)\n print('threshold is', threshold)\n print('number of orfs:', len(orfs))\n aa_sequences = []\n i = 0\n while i < len(orfs):\n print(len(orfs[i]))\n if len(orfs[i]) > threshold:\n print('if')\n aa_sequences += [coding_strand_to_AA(orfs[i])]\n i += 1\n print(aa_sequences)\n\n\nif __name__ == \"__main__\":\n import doctest\n dna = load_seq(\"./data/X73525.fa\")\n gene_finder(dna)\n # doctest.testmod(verbose=True)\n # doctest.run_docstring_examples(coding_strand_to_AA, globals())\n","sub_path":"gene_finder.py","file_name":"gene_finder.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"293871428","text":"#!/usr/bin/env python3\nimport main\nimport numpy as np\nimport mnist_parser\nfrom math import *\nfrom PIL import Image\n\ntestNet = main.NeuralNet (784, [30, 10])\ntestNet.importNet(\"bigTestNet784_30_10.txt\")\n\nsample_number = 502\n\nmp = mnist_parser.mnistParser ('../mnist/t10k-labels.idx1-ubyte', '../mnist/t10k-images.idx3-ubyte')\nmp.open()\nlbl = mp.parse_labels()[sample_number]\nimage_vector = mp.parse_img()[sample_number]\n\narr = np.zeros ((mp.imfrows, mp.imfcoll), dtype = 'uint8')\nfor i in range(mp.imfrows):\n for j in range(mp.imfcoll):\n arr[i][j] = int(image_vector[i*mp.imfcoll+j]*255.0)\nimg = Image.fromarray(arr, mode = 'L')\nimg.show()\nmp.close()\n\ntestNet.input(image_vector)\ntestNet.update()\n\nprint('Input value')\nprint (lbl)\nprint (\"Error\")\nprint (testNet.verify(image_vector, lbl))\nprint (\"Given value\")\nprint(testNet.output())\n","sub_path":"src/usertest.py","file_name":"usertest.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"249304183","text":"import sys\nf1 = open(sys.argv[2]+\"/justice/\"+sys.argv[1]+\"/data.log\",\"r\")\nf2 = open(sys.argv[2]+\"/nojustice/\"+sys.argv[1]+\"/data.log\",\"r\")\nf3 = open(sys.argv[1].lower(), \"w\")\n\ncontent1 = f1.readlines()\ncontent2 = f2.readlines()\n\nf3.write(\"comjustica\\tsemjustica\\n\")\n\nfor i in range(0, len(content1)):\n\tf3.write(content1[i].replace(\"\\n\", \"\") + '\\t' + content2[i].replace(\"\\n\", \"\") +\"\\n\")\n","sub_path":"scripts/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"401147534","text":"import cv2\nimport sys\nimport config as cfg\nimport numpy as np\nimport PCA as pca\n\npredict_name = ''\nimage_width = cfg.img_width\nxml_path = cfg.xml_path\n\n# 初始化分类器\ncascade = cv2.CascadeClassifier(xml_path)\n\n# 加载训练好的数据\nmean_value = pca.read_mean_value()\neigen_vectors_k = pca.read_eigen_vectors_k()\nface_name_train_list, feature_data_train_list = pca.read_feature_data()\n\n# 摄像头\ncap = cv2.VideoCapture(0)\nret, frame = cap.read()\n\n\nwhile True:\n # 识别人脸\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rect = cascade.detectMultiScale(gray, 1.3, 5)\n\n face_name_list = []\n face_num = 0\n face_test_data_array = np.zeros((1, image_width * image_width))\n font=cv2.FONT_HERSHEY_SIMPLEX\n\n for x, y, w, h in rect:\n\n cv2.putText(frame, predict_name, (x, y - 50), font, 4, (0,255,0), 2)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n roi = gray[y:y + h, x:x + h]\n roi = cv2.resize(roi, (image_width, image_width))\n roi = cv2.equalizeHist(roi)\n\n face_num += 1\n roi_vector = roi.reshape(1, image_width * image_width)\n face_test_data_array = np.vstack((face_test_data_array, roi_vector))\n face_name_list.append(' ')\n\n # 如果图片中包含人脸\n if len(rect) != 0:\n\n face_test_data_array = face_test_data_array[1:, :]\n\n data_mat = face_test_data_array - mean_value\n feature_data = np.dot(data_mat, eigen_vectors_k.T)\n\n face_name_predict_list = []\n predict_variance_list = []\n\n # 遍历测试图像集,与训练好的样本逐一比对,挑选偏差的方差最小者为预测结果\n for i in range(0, face_num):\n face_name_predict_list.append([])\n predict_variance_list.append([])\n face_name_predict_list[i].append(face_name_train_list[0])\n predict_variance_list[i].append(1.23e+10)\n\n # 与训练好的样本逐一比对\n for j in range(0, face_name_train_list.__len__()):\n if face_name_predict_list[i][-1] != face_name_train_list[j]:\n face_name_predict_list[i].append(face_name_train_list[j])\n predict_variance_list[i].append(1.23e+10)\n variance = 0\n bias = feature_data[0] - feature_data_train_list[j]\n for x in bias:\n variance += x * x\n if predict_variance_list[i][-1] > variance:\n predict_variance_list[i][-1] = int(variance)\n\n # 挑选偏差的方差最小者为预测结果\n variance_min = 1.23e+10\n predict_name = ''\n for j in range(0, face_name_predict_list[i].__len__()):\n if variance_min > predict_variance_list[i][j]:\n variance_min = predict_variance_list[i][j]\n predict_name = face_name_predict_list[i][j]\n print(predict_name)\n\n cv2.imshow('frame', frame)\n ret, frame = cap.read()\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"camera_test.py","file_name":"camera_test.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"479831855","text":"from utils import *\nimport tensorflow as tf\nimport numpy as np\nfrom loadData import DataSet\nimport random\n\nclass abpNet(object):\n\n def __init__(self, category='Mnist', prior=1, vis_step=10, Train_Epochs=200, batch_size=128, z_size=100,\n langevin_num=20, lr=0.001, theta=1, delta=0.001, history_dir='./', checkpoint_dir='./', logs_dir='./',\n recon_dir='./', gen_dir='./'):\n self.test = False\n self.prior = prior\n self.category = category\n self.epoch = Train_Epochs\n self.img_size = 28 if (category == 'Fashion-Mnist' or category == 'Mnist') else 64\n self.batch_size = batch_size\n self.z_size = z_size\n self.langevin_num = langevin_num\n self.vis_step = vis_step\n\n self.lr = lr\n self.theta = theta\n self.delta = delta\n self.channel = 1 if (category == 'Fashion-Mnist' or category == 'Mnist') else 3\n\n self.history_dir = history_dir\n self.checkpoint_dir = checkpoint_dir\n self.logs_dir = logs_dir\n self.recon_dir = recon_dir\n self.gen_dir = gen_dir\n\n self.z = tf.placeholder(tf.float32, shape=[self.batch_size, self.z_size], name='latent')\n\n self.x = tf.placeholder(tf.float32, shape=[self.batch_size, self.img_size, self.img_size, self.channel],\n name='image')\n\n def build_Model(self):\n self.gen = self.Generator(self.z, reuse=False)\n self.langevin, grad = self.langevin(self.z, self.x)\n\n \"\"\"\n Loss and Optimizer\n \"\"\"\n self.gen_loss = self.l2loss(self.gen, self.x)\n self.var = [var for var in tf.trainable_variables() if var.name.startswith('Gen')]\n\n self.optimizer = tf.train.AdamOptimizer(self.lr)\n self.grad = self.optimizer.compute_gradients(self.gen_loss, var_list=self.var)\n self.compute_grad = self.optimizer.apply_gradients(self.grad)\n \"\"\"\n Logs\n \"\"\"\n tf.summary.scalar('gen_loss', tf.reduce_mean(self.gen_loss))\n # TODO showing specifically\n # tf.summary.histogram('hyper params', self.hyper_var)\n self.summary_op = tf.summary.merge_all()\n\n\n def langevin(self, z_arg, x):\n def cond(i, z, grad):\n return tf.less(i, self.langevin_num)\n\n def body(i, z, grad):\n noise = tf.random_normal(shape=[self.batch_size, self.z_size], name='noise')\n gen = self.Generator(z, reuse=True)\n gen_loss = self.l2loss(gen, x)\n grad = tf.gradients(gen_loss, z, name='gen_grad')[0]\n\n z = z - 0.5 * self.delta * self.delta * (grad + self.prior*z) + self.delta*noise\n # x = x - 0.5 * self.config.delta2 * self.config.delta2 * (prior + grad) + 0.001*noise\n\n return tf.add(i, 1), z, grad\n\n with tf.name_scope(\"langevin_dynamic\"):\n i = tf.constant(0)\n grad = tf.constant(0, shape=(list(z_arg.shape)), dtype=tf.float32)\n i, z, grad = tf.while_loop(cond, body, [i, z_arg, grad])\n\n return z, tf.reduce_mean(grad)\n\n def Generator(self, z, reuse=False):\n with tf.variable_scope('Gen', reuse=reuse):\n if self.category == 'Fashion-Mnist' or self.category == 'Mnist':\n z = tf.reshape(z, [-1, self.z_size])\n\n fc1 = tf.layers.dense(inputs=z, units=1024, name='fc1')\n\n fc1 = tf.nn.leaky_relu(tf.contrib.layers.batch_norm(fc1, is_training=True))\n\n fc2 = tf.layers.dense(inputs=fc1, units=1568, name='fc2')\n\n fc2 = tf.nn.leaky_relu(tf.contrib.layers.batch_norm(fc2, is_training=True))\n\n fc2 = tf.reshape(fc2, [self.batch_size, 7, 7, -1])\n\n dc1 = deconv2d(fc2, (self.batch_size, self.img_size // 2, self.img_size // 2, 128), kernal=(5, 5),\n name='dc1')\n dc1 = tf.contrib.layers.batch_norm(dc1, is_training=True)\n dc1 = tf.nn.leaky_relu(dc1)\n\n dc2 = deconv2d(dc1, (self.batch_size, self.img_size // 1, self.img_size // 1, 1), kernal=(5, 5),\n name='dc2')\n\n output = tf.nn.tanh(dc2)\n\n return output\n\n def l2loss(self, syn, obs):\n a = 1.0 / (2 * self.theta * self.theta) * tf.square(syn - obs)\n # a = tf.reduce_sum(a, axis=-1)\n # a = tf.reduce_sum(a, axis=-1)\n l2loss = tf.reduce_mean(tf.reduce_sum(a,axis=(1,2,3)), axis=0)\n # l2loss = tf.reduce_mean(1.0 / (2 * self.theta * self.theta) * tf.square(syn - obs), axis=0)\n return l2loss\n\n def train(self, sess):\n self.build_Model()\n\n data = DataSet(img_size=self.img_size, batch_size=self.batch_size, category=self.category)\n\n sess.run(tf.global_variables_initializer())\n\n\n saver = tf.train.Saver(max_to_keep=10)\n\n writer = tf.summary.FileWriter(self.logs_dir, sess.graph)\n\n start = 0\n latest_checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir)\n\n if latest_checkpoint:\n latest_checkpoint.split('-')\n start = int(latest_checkpoint.split('-')[-1])\n saver.restore(sess, latest_checkpoint)\n print('Loading checkpoint {}.'.format(latest_checkpoint))\n\n tf.get_default_graph().finalize()\n\n latent_gen = np.random.normal(size=(len(data), self.z_size))\n\n for epoch in range(start + 1, self.epoch):\n num_batch = int(len(data) / self.batch_size)\n losses = []\n for step in range(num_batch):\n obs = data.NextBatch(step)\n z = latent_gen[step * self.batch_size: (step + 1) * self.batch_size].copy()\n z = sess.run(self.langevin, feed_dict={self.z: z, self.x: obs})\n\n l2loss, summary, _ = sess.run([self.gen_loss, self.summary_op, self.compute_grad],\n feed_dict={self.z: z, self.x: obs})\n latent_gen[step * self.batch_size: (step + 1) * self.batch_size] = z\n losses.append(l2loss)\n writer.add_summary(summary, global_step=epoch)\n\n print(epoch, \": Loss : \", np.mean(losses))\n if epoch % self.vis_step == 0:\n self.visualize(saver, sess, len(data), epoch, latent_gen, data)\n\n def visualize(self, saver, sess, num_data, epoch, latent_gen, data):\n saver.save(sess, \"%s/%s\" % (self.checkpoint_dir, 'model.ckpt'), global_step=epoch)\n idx = random.randint(0, int(num_data / self.batch_size) - 1)\n z = latent_gen[idx * self.batch_size: (idx + 1) * self.batch_size]\n \"\"\"\n Recon\n \"\"\"\n obs = data.NextBatch(idx)\n z = sess.run(self.langevin, feed_dict={self.z: z, self.x: obs})\n sys = sess.run(self.gen, feed_dict={self.z: z})\n sys = np.array((sys + 1) * 127.5, dtype=np.float)\n path = self.recon_dir + 'epoch' + str(epoch) + 'recon.jpg'\n # show_z_and_img(epoch, path, z, sys, self.row, self.col)\n show_in_one(path, sys, column=16, row=8)\n\n \"\"\"\n Generation\n \"\"\"\n # obs = data.NextBatch(idx, test=True)\n z = np.random.normal(size=(self.batch_size, self.z_size))\n # z = sess.run(self.langevin, feed_dict={self.z: z, self.x: obs})\n sys = sess.run(self.gen, feed_dict={self.z: z})\n sys = np.array((sys + 1) * 127.5, dtype=np.float)\n path = self.gen_dir + 'epoch' + str(epoch) + 'gens.jpg'\n show_in_one(path, sys, column=16, row=8)","sub_path":"Tensorflow/abp_net.py","file_name":"abp_net.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"530458999","text":"import networkx as nx\nfrom networkx.utils.random_sequence import (powerlaw_sequence)\nimport numpy as np\nfrom tqdm import tqdm\nimport sys\nimport copy\nfrom multiprocessing import Pool\n\n\ndef create_degree_sequence(n, sfunction=None, max_tries=50, **kwds):\n \"\"\" Attempt to create a valid degree sequence of length n using\n specified function sfunction(n,**kwds).\n\n Parameters\n ----------\n n : int\n Length of degree sequence = number of nodes\n sfunction: function\n Function which returns a list of n real or integer values.\n Called as \"sfunction(n,**kwds)\".\n max_tries: int\n Max number of attempts at creating valid degree sequence.\n \n source: https://networkx.github.io/documentation/networkx-1.10\n \"\"\"\n tries=0\n max_deg=n\n while tries < max_tries:\n trialseq=sfunction(n,**kwds)\n # round to integer values in the range [0,max_deg]\n seq=[min(max_deg, max( int(round(s)),0 )) for s in trialseq]\n # if graphical return, else throw away and try again\n if nx.is_graphical(seq):\n return seq\n tries+=1\n raise nx.NetworkXError(\"Exceeded max (%d) attempts at a valid sequence.\"%max_tries)\n\ndef power_law_graph(exp, num, seed=1234, plot=False):\n \"\"\"\n Function which creates power law graph based on a number of nodes (num),\n an exponent (exp) and a seed. \n \n source: http://nbviewer.jupyter.org/gist/Midnighter/248f1a5d8c21b39525ae\n \"\"\"\n \n print(\"Power law exponent = -{0:.2f}\".format(exp))\n \n # create graph\n sequence = create_degree_sequence(num, powerlaw_sequence, exponent=exp)\n graph = nx.configuration_model(sequence, seed=seed)\n\n # count parallel edges and avoid counting A-B as well as B-A\n num_par = sum(\n len(graph[node][neigh]) for node in graph\n for neigh in graph.neighbors(node)) // 2\n print(\"Power law graph has {0:d} parallel edges\".format(num_par))\n loops = graph.selfloop_edges()\n\n # remove them\n graph = nx.Graph(graph)\n graph.remove_edges_from(loops)\n \n # get largest connected component\n # unfortunately, the iterator over the components is not guaranteed to be sorted by size\n components = sorted(nx.connected_components(graph), key=len, reverse=True)\n lcc = graph.subgraph(components[0])\n print(\"Size of largest connected component = {0:d}\".format(len(lcc)))\n \n if plot == True:\n\n # new degree sequence\n simple_seq = [deg for (node, deg) in lcc.degree()]\n\n # create histograms\n counts = np.bincount(sequence)\n mask = (counts > 0)\n plt.figure()\n plt.plot(\n np.arange(len(counts))[mask],\n counts[mask] / counts.sum(),\n \"o\",\n label=\"MultiGraph\")\n simple_counts = np.bincount(simple_seq)\n mask = (simple_counts > 0)\n\n # distribution is shifted for visibility\n plt.plot(\n np.arange(len(simple_counts))[mask],\n simple_counts[mask] / simple_counts.sum() / 10.0,\n \"o\",\n label=\"Simple LCC\")\n\n # plot distribution\n x = np.arange(1, len(counts))\n plt.plot(x, np.power(x, -exp))\n plt.xlabel(r\"Degree $k$\")\n plt.xscale(\"log\")\n plt.ylabel(r\"Probability $P(k)$\")\n plt.yscale(\"log\")\n plt.title(r\"$N = {0:d}, \\\\quad \\\\lambda = {1:.2f}$\".format(num, exp))\n plt.legend(loc=\"best\")\n plt.show()\n nx.powerlaw_cluster_graph\n\n # visualize network\n plt.figure()\n pos = nx.spring_layout(graph)\n nx.draw(graph, pos, node_color='b', node_size=10, with_labels=False)\n plt.show()\n return graph\n\ndef random_network(p, num, seed=1234, plot=False):\n \"\"\"\n Function which creates random network based on edge probability (p),\n a given number of nodes (num) and a seed. \n \"\"\"\n \n # create graph\n graph = nx.fast_gnp_random_graph(num, p, seed=seed, directed=False)\n\n # plot\n if plot == True:\n plt.figure()\n pos = nx.spring_layout(graph)\n nx.draw(graph, pos, node_color='b', node_size=5, with_labels=False)\n plt.show()\n\n plt.figure()\n degree_sequence = sorted([d for n, d in graph.degree()], reverse=True)\n # print \"Degree sequence\", degree_sequence\n dmax = max(degree_sequence)\n\n plt.semilogy(degree_sequence, 'b-', marker='o')\n plt.title(\"Degree rank plot\")\n plt.ylabel(\"degree\")\n plt.xlabel(\"rank\")\n plt.show()\n return graph\n\nclass Individual():\n \"\"\" \n Contains data about status of infection for each person.\n \n time_since_infection equals -1 if person is not infected.\n \n The disease status is 0 for no disease, 1 for the sensitive\n strain and 2 for the resistant strain.\n \"\"\"\n \n def __init__(self,i):\n self.identifier = i\n self.disease_status = 0\n self.time_since_infection = -1\n\n\ndef network_model(beta, tau, nu, mu, init, num_steps, graph, doInit = False, disable_progress=False):\n \"\"\"\n Function which runs disease spreading model on specified network.\n \n beta = transmission probability\n tau = probability of treatment\n nu = probability of recovering spontaneously\n init = initial number of infecteds (with sensitive strain)\n num_steps = number of iterations to run model\n graph = an initialized graph, e.g. random or scale-free\n doInit = boolean specifying to initialize or not\n \"\"\"\n \n # arrays/set containing number of diseased\n num_infected = np.zeros(num_steps)\n num_res = np.zeros(num_steps)\n infected = set()\n \n # initialization of infected individuals\n if doInit:\n for i in range(len(graph)):\n graph.node[i][\"Data\"].disease_status = 0\n for i in init:\n graph.node[i][\"Data\"].disease_status = 1\n infected.add(i)\n else:\n for i in range(len(graph)):\n if graph.node[i][\"Data\"].disease_status:\n infected.add(i)\n \n # iterate over time\n for t in tqdm(range(num_steps), position = 0, disable=disable_progress):\n infected_copy = infected.copy()\n\n # iterate over infecteds \n for i in infected_copy:\n\n # prob of recovering\n if np.random.rand() < nu:\n graph.node[i][\"Data\"].disease_status = 0\n infected.remove(i)\n\n # prob of treatment\n elif graph.node[i][\"Data\"].disease_status == 1 and np.random.rand() < tau*(1-mu):\n graph.node[i][\"Data\"].disease_status = 0\n infected.remove(i)\n\n # prob of getting the resistant strain\n elif graph.node[i][\"Data\"].disease_status == 1 and np.random.rand() < tau*mu:\n graph.node[i][\"Data\"].disease_status = 2\n\n # spreading of disease to neigbours\n if graph.node[i][\"Data\"].disease_status:\n NEIGHBOR = [n for n in graph.neighbors(i)]\n for neighbor in NEIGHBOR:\n if graph.node[neighbor][\"Data\"].disease_status == 0 and np.random.rand() < beta:\n highest_disease = max(\n graph.node[i][\"Data\"].disease_status,\n graph.node[neighbor][\"Data\"].disease_status)\n graph.node[i][\"Data\"].disease_status = highest_disease\n graph.node[neighbor][\"Data\"].disease_status = highest_disease\n infected.add(i)\n infected.add(neighbor)\n\n # keep track of total number of resistant infecteds\n tot = 0\n for i in infected:\n if graph.node[i][\"Data\"].disease_status == 2:\n tot += 1\n num_res[t] = tot \n num_infected[t] = len(infected)\n return (num_res, num_infected, graph)\n","sub_path":"Code/network_model.py","file_name":"network_model.py","file_ext":"py","file_size_in_byte":7840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"613756616","text":"import inotify.adapters\nimport pandas as pd\nimport time\nfrom datetime import datetime\nimport os\n\n#list of action present in inotify\nlist = ['IN_MOVED_TO', \"IN_DELETE\" , 'IN_CREATE', 'IN_MODIFY', 'IN_CLOSE_WRITE','IN_MOVED_FROM']#actions to register\ncolumnsNames = [\"path\",\"actions\",\"time\"] #columns names of the CSV files\n\n# exported main method\ndef files(path1,path2, q, b):\n outputPath = path1#path of the CSV output file\n while True:\n i = inotify.adapters.InotifyTree(path2)\n for event in i.event_gen(yield_nones=False):\n (_, type_names, path, filename) = event\n #Look if it is and interested event and change the output format \n if any(elem in list for elem in type_names):\n if type_names[0] == 'IN_MOVED_TO':\n eventType = \"moved\"\n elif type_names[0] == 'IN_DELETE':\n eventype = \"deleted\"\n elif type_names[0] == 'IN_CREATE':\n eventype = \"created\"\n elif type_names[0] == 'IN_MODIFY':\n eventype = \"modified\"\n elif type_names[0] == 'IN_CLOSE_WRITE':\n eventype = \"modified\"\n else:\n eventype = \"moved\"\n date_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n pattern = \"%Y-%m-%d %H:%M:%S\"\n date = int(time.mktime(time.strptime(date_time, pattern)))\n event = {\"eventType\": 3, \"date\":date, \"path\": os.path.join(path, filename), \"type\": eventype}\n q.put(event)\n \n #Only if CSV option is active\n if b:\n fileEvent = [os.path.join(path, filename), type_names, datetime.now()]\n df1 = pd.DataFrame([fileEvent], columns = columnsNames)\n if not os.path.isfile(outputPath):\n df1.to_csv(outputPath, index=None, header=True)\n else:\n df1.to_csv(outputPath, index=None, mode='a', header=False)\n\n\n\n","sub_path":"uba/src/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"531168656","text":"import numpy as np\nimport pandas\n\nfrom modin.data_management.utils import compute_chunksize\nfrom modin.engines.base.io.file_reader import FileReader\n\n\nclass ColumnStoreReader(FileReader):\n @classmethod\n def call_deploy(cls, fname, col_partitions, **kwargs):\n from modin.pandas import DEFAULT_NPARTITIONS\n\n return np.array(\n [\n cls.deploy(\n cls.parse,\n DEFAULT_NPARTITIONS + 2,\n dict(\n fname=fname,\n columns=cols,\n num_splits=DEFAULT_NPARTITIONS,\n **kwargs\n ),\n )\n for cols in col_partitions\n ]\n ).T\n\n @classmethod\n def build_partition(cls, partition_ids, row_lengths, column_widths):\n return np.array(\n [\n [\n cls.frame_partition_cls(\n partition_ids[i][j],\n length=row_lengths[i],\n width=column_widths[j],\n )\n for j in range(len(partition_ids[i]))\n ]\n for i in range(len(partition_ids))\n ]\n )\n\n @classmethod\n def build_index(cls, partition_ids):\n from modin.pandas import DEFAULT_NPARTITIONS\n\n index_len = cls.materialize(partition_ids[-2][0])\n index = pandas.RangeIndex(index_len)\n index_chunksize = compute_chunksize(\n pandas.DataFrame(index=index), DEFAULT_NPARTITIONS, axis=0\n )\n if index_chunksize > index_len:\n row_lengths = [index_len] + [0 for _ in range(DEFAULT_NPARTITIONS - 1)]\n else:\n row_lengths = [\n index_chunksize\n if i != DEFAULT_NPARTITIONS - 1\n else index_len - (index_chunksize * (DEFAULT_NPARTITIONS - 1))\n for i in range(DEFAULT_NPARTITIONS)\n ]\n return index, row_lengths\n\n @classmethod\n def build_columns(cls, columns):\n from modin.pandas import DEFAULT_NPARTITIONS\n\n column_splits = (\n len(columns) // DEFAULT_NPARTITIONS\n if len(columns) % DEFAULT_NPARTITIONS == 0\n else len(columns) // DEFAULT_NPARTITIONS + 1\n )\n col_partitions = [\n columns[i : i + column_splits]\n for i in range(0, len(columns), column_splits)\n ]\n column_widths = [len(c) for c in col_partitions]\n return col_partitions, column_widths\n\n @classmethod\n def build_dtypes(cls, partition_ids, columns):\n # Compute dtypes concatenating the results from each of the columns splits\n # determined above. This creates a pandas Series that contains a dtype for every\n # column.\n dtypes = pandas.concat(cls.materialize(list(partition_ids)), axis=0)\n dtypes.index = columns\n return dtypes\n\n @classmethod\n def build_query_compiler(cls, path, columns, **kwargs):\n col_partitions, column_widths = cls.build_columns(columns)\n partition_ids = cls.call_deploy(path, col_partitions, **kwargs)\n index, row_lens = cls.build_index(partition_ids)\n remote_parts = cls.build_partition(partition_ids[:-2], row_lens, column_widths)\n dtypes = cls.build_dtypes(partition_ids[-1], columns)\n new_query_compiler = cls.query_compiler_cls(\n cls.frame_cls(\n remote_parts, index, columns, row_lens, column_widths, dtypes=dtypes,\n )\n )\n return new_query_compiler\n","sub_path":"modin/engines/base/io/column_stores/column_store_reader.py","file_name":"column_store_reader.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"589664694","text":"from parsimonious.grammar import Grammar\nimport ast\n\n\nclass Mini(object):\n def __init__(self, env={}):\n self.env = env\n # Add built-in functions\n self.env['sum'] = lambda *args: sum(args)\n self.text = ''\n\n def __line(self, node):\n \"\"\"Return the 1-based line number\"\"\"\n return self.text.count('\\n', 0, node.start) + 1\n\n def __column(self, node):\n \"\"\"Return the 1-based column\"\"\"\n try:\n return node.start - self.text.rindex('\\n', 0, node.start)\n except ValueError:\n return node.start + 1\n\n def parse(self, source, grammar_rule=\"program\"):\n # Extract grammar rules from doc strings\n grammar = '\\n'.join(v.__doc__ for k, v in vars(self.__class__).items()\n if '__' not in k and hasattr(v, '__doc__') and v.__doc__)\n return Grammar(grammar)[grammar_rule].parse(source)\n\n def _eval(self, node):\n method = getattr(self, node.expr_name, lambda node, children: children)\n return method(node, [self._eval(n) for n in node])\n\n def compile(self, source):\n # Compile an expression\n parsed = self.get_ast(source, 'program')\n fixed = ast.fix_missing_locations(parsed)\n codeobj = compile(fixed, '', 'exec')\n eval(codeobj, self.env)\n\n def eval(self, source):\n # Evaluate an expression\n parsed = self.get_ast(source, 'expressions')\n fixed = ast.fix_missing_locations(parsed)\n codeobj = compile(fixed, '', 'eval')\n return eval(codeobj, self.env)\n\n def get_ast(self, source, grammar_rule):\n self.text = source\n node = self.parse(source, grammar_rule)\n return self._eval(node)\n\n # --------------------------------------------------------------------------------------------------------\n # Grammar methods\n #\n\n def program(self, node, children):\n 'program = statement+'\n return ast.Module(body=children,\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n\n def statement(self, node, children):\n 'statement = _ (func / assignment) _'\n return children[1][0]\n\n def expressions(self, node, children):\n 'expressions = expr*'\n exp = ast.Expression(\n ast.List(\n elts=children,\n ctx=ast.Load(),\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n )\n return exp\n\n def func(self, node, children):\n 'func = name _ \"=\" _ \"(\" parameters \")\" _ \"->\" _ expr'\n name, _, _equals, _, lbrace, params, rbrace, _, _arrow, _, expr = children\n\n # Name will return us an AST node called name, we just need it's name\n name = name.id\n\n funcdef = ast.FunctionDef(name=name,\n args=ast.arguments(\n args=params,\n vararg=None, kwarg=None, defaults=[],\n lineno=self.__line(node),\n col_offset=self.__column(node)\n\n ),\n body=[ast.Return(value=expr,\n lineno=expr.lineno,\n col_offset=expr.col_offset\n )],\n decorator_list=[],\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n return funcdef\n\n def parameters(self, node, children):\n 'parameters = parameter*'\n return children\n\n def parameter(self, node, children):\n 'parameter = ~\"[a-z]+\" _ '\n return ast.Name(id=node.text.strip(),\n ctx=ast.Param(),\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n\n def expr(self, node, children):\n 'expr = _ (ifelse / call / infix / number / name) _'\n return children[1][0]\n\n def ifelse(self, node, children):\n 'ifelse = \"if\" _ expr _ \"then\" _ expr _ \"else\" _ expr'\n _if, _, cond, _, _then, _, cons, _, _else, _, alt = children\n return ast.IfExp(test=cond, body=cons, orelse=alt,\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n\n def call(self, node, children):\n 'call = name \"(\" arguments \")\"'\n name, lbrace, args, rbrace = children\n\n return ast.Call(\n func=name,\n args=args,\n keywords=[],\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n\n def arguments(self, node, children):\n 'arguments = argument*'\n return children\n\n def argument(self, node, children):\n 'argument = expr _'\n return children[0]\n\n def infix(self, node, children):\n 'infix = \"(\" _ expr _ operator _ expr _ \")\"'\n _lbrace, _, expr1, _, op, _, expr2, _, rbrace = children\n return ast.BinOp(\n expr1,\n op,\n expr2,\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n\n def operator(self, node, children):\n 'operator = \"+\" / \"-\" / \"*\" / \"/\"'\n operators = {\"+\": ast.Add, \"-\": ast.Sub, \"*\": ast.Mult, \"/\": ast.Div}\n return operators[node.text.strip()](lineno=self.__line(node), col_offset=self.__column(node))\n\n def number(self, node, children):\n 'number = ~\"[0-9]+\"'\n return ast.Num(int(node.text),\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n\n def name(self, node, children):\n 'name = ~\"[a-z]+\" _ '\n return ast.Name(id=node.text.strip(),\n ctx=ast.Load(),\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n\n def assignment(self, node, children):\n 'assignment = lvalue _ \"=\" _ expr'\n lvalue, _, equals, _, expr = children\n return ast.Assign(targets=[lvalue],\n value=expr,\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n\n def lvalue(self, node, children):\n 'lvalue = ~\"[a-z]+\" _ '\n return ast.Name(id=node.text.strip(),\n ctx=ast.Store(),\n lineno=self.__line(node),\n col_offset=self.__column(node)\n )\n\n def _(self, node, children):\n '_ = ~\"\\s*\"'\n","sub_path":"mini.py","file_name":"mini.py","file_ext":"py","file_size_in_byte":7042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"418463441","text":"#!/usr/bin/env python\n\nimport time\nimport fourletterphat as flp\nfrom subprocess import Popen, PIPE\n\ntemps = []\n\nwhile True:\n temp = Popen([\"vcgencmd\", \"measure_temp\"], stdout=PIPE)\n temp = temp.stdout.read().decode('utf-8')\n temp = temp[5:].replace(\".\", \"\").replace(\"'\",\"\").strip()\n flp.clear()\n flp.print_str(temp)\n flp.set_decimal(1, 1)\n flp.show()\n time.sleep(1)\n","sub_path":"examples/cpu-temp.py","file_name":"cpu-temp.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"154958905","text":" #!/usr/bin/env python\n\n\"\"\"Project module.\n\nThis module defines:\n\n* the :class:`flasker.project.Project` class which contains\n all the logic between the Flask and Celery applications and the SQLAlchemy\n sessions.\n\n* the ``current_project`` proxy\n\nFor convenience, both these variables are also available directly in the\n``flasker`` namespace.\n\n.. note::\n\n In most cases :class:`flasker.project.Project` will not need to be\n instantiated explicitely (the console tool handles the setup) and will only\n be accessed via the ``current_project`` proxy. In some cases however the\n constructor can be called to create a project (for example from an IPython\n notebook or to use a separate WSGI server).\n\n .. code:: python\n\n from flasker import Project\n\n # instantiating the project\n pj = Project('path/to/config.cfg')\n\n # the application that would be passed to a WSGI server\n application = pj.flask\n\n\"\"\"\n\nfrom logging import getLogger, NullHandler, StreamHandler, DEBUG\nfrom os.path import abspath, dirname, join, sep, split, splitext\nfrom sys import path\nfrom werkzeug.local import LocalProxy\n\nfrom .util.helpers import parse_config\n\n\nclass ProjectImportError(Exception):\n\n pass\n\n\nclass Project(object):\n\n \"\"\"Project class.\n\n :param conf_path: path to the configuration file. The following sections\n are special: ``PROJECT``, ``ENGINE``, ``FLASK``, ``CELERY``. All but \n ``PROJECT`` are optional. See below for a list of available options in each\n section.\n :type conf_path: str\n\n The following options are available each section of the configuration file\n (as a convenience, parameters stored as JSON strings are also accepted,\n which can be useful for example to configure Celery queues):\n\n * ``PROJECT``\n\n * ``MODULES``: comma separated list of the project's modules. They must be\n importable from the configuration file's folder.\n * ``DEBUG``\n * ``DISABLE_FLASK``\n * ``DISABLE_CELERY``\n\n * ``ENGINE``\n\n * ``URL``: the url to the database\n * any valid arguments to ``sqlalchemy.create_engine``\n\n * ``SESSION``\n\n * ``SMARTCOMMIT``: if ``True`` (default), all database transactions\n will be committed after each Flask app request and Celery task\n completion. If ``False`` the session will simply be removed.\n * any valid arguments to ``sqlalchemy.orm.session_maker``\n\n * ``FLASK``\n\n * ``ROOT_FOLDER``: path to the Flask application's root folder\n relative to the configuration file (defaults to ``app``).\n * ``STATIC_FOLDER``: the application's ``static_folder`` relative to\n the application's root folder (defaults to ``static``).\n * ``TEMPLATE_FOLDER``: the application's ``template_folder`` relative\n to the application's root folder (defaults to ``templates``).\n * any valid Flask configuration option\n\n * ``CELERY``\n\n * ``MAIN`` \n * any valid Celery configuration option\n \n \"\"\"\n\n #: Default configuration\n default_conf = {\n 'PROJECT': {\n 'MODULES': '',\n 'DEBUG': False,\n 'DISABLE_FLASK': False,\n 'DISABLE_CELERY': False,\n },\n 'ENGINE': {\n 'URL': 'sqlite://',\n },\n 'SESSION': {\n 'SMARTCOMMIT': True,\n },\n 'FLASK': {\n 'NAME': 'app',\n 'ROOT_FOLDER': 'app',\n 'STATIC_FOLDER': 'static',\n 'TEMPLATE_FOLDER': 'templates',\n },\n 'CELERY': {\n 'MAIN': '__main__',\n },\n }\n\n #: Dictionary of configuration values\n conf = None\n\n #: Path to current configuration file\n conf_path = None\n\n #: Logger\n logger = None\n\n _flask = None\n _celery = None\n _session = None\n\n __state = {}\n\n def __init__(self, conf_path=None):\n\n self.__dict__ = self.__state\n\n if conf_path is None:\n\n if not self.conf_path:\n raise ProjectImportError('Project instantiation outside the Flasker '\n 'command line tool requires a '\n 'configuration file path.')\n\n else:\n\n if self.conf_path and abspath(conf_path) != self.conf_path:\n raise ProjectImportError('Cannot instantiante projects for different '\n 'configuration files in the same process.')\n\n elif not self.conf_path:\n\n # load configuration\n self.conf = parse_config(\n conf_path,\n default=self.default_conf,\n case_sensitive=True\n )\n self.conf_path = abspath(conf_path)\n\n # load logger\n self.logger = getLogger(__name__)\n if self.conf['PROJECT']['DEBUG']:\n self.logger.setLevel(DEBUG)\n self.logger.addHandler(StreamHandler())\n else:\n self.logger.addHandler(NullHandler())\n\n # load all project modules\n self._funcs = []\n path.append(dirname(self.conf_path))\n project_modules = [\n module_name.strip()\n for module_name in self.conf['PROJECT']['MODULES'].split(',')\n ]\n for module_name in project_modules:\n __import__(module_name)\n self.logger.debug(\n '%s modules imported (%s)' % (\n len(project_modules),\n ', '.join(project_modules),\n )\n )\n for func in self._funcs:\n func(self)\n self.logger.debug(\n '%s handlers found and run (%s)' % (\n len(self._funcs), \n ', '.join(func.__name__ for func in self._funcs),\n )\n )\n\n def __repr__(self):\n return '' % (self.conf_path, )\n\n @property\n def flask(self):\n \"\"\"Flask application.\n\n Lazily initialized.\n\n \"\"\"\n if self._flask is None and not self.conf['PROJECT']['DISABLE_FLASK']:\n\n from flask import Flask\n\n flask_app = Flask(\n self.conf['FLASK']['NAME'],\n static_folder=self.conf['FLASK']['STATIC_FOLDER'],\n template_folder=self.conf['FLASK']['TEMPLATE_FOLDER'],\n instance_path=join(\n dirname(self.conf_path),\n self.conf['FLASK']['ROOT_FOLDER'],\n ),\n instance_relative_config=True,\n )\n\n flask_app.config.update({\n k: v\n for k, v in self.conf['FLASK'].items()\n if not k in self.default_conf['FLASK']\n })\n\n self.logger.debug('flask app loaded')\n self._flask = flask_app\n return self._flask\n\n @property\n def celery(self):\n \"\"\"Celery application.\n\n Lazily initialized.\n\n \"\"\"\n if self._celery is None and not self.conf['PROJECT']['DISABLE_CELERY']:\n\n from celery import Celery\n from celery.task import periodic_task\n\n celery_app = Celery(self.conf['CELERY']['MAIN'])\n\n celery_app.conf.update({\n k: v\n for k, v in self.conf['CELERY'].items()\n if not k in self.default_conf['CELERY']\n })\n\n # proxy for easy access\n celery_app.periodic_task = periodic_task\n\n # maybe not required with lazy session initialization\n # TODO: check this\n # @worker_process_init.connect\n # def create_worker_connection(*args, **kwargs):\n # self._create_session()\n\n self.logger.debug('celery app loaded')\n self._celery = celery_app\n return self._celery\n\n @property\n def session(self):\n \"\"\"SQLAlchemy scoped sessionmaker.\n\n Lazily initialized.\n\n \"\"\"\n if self._session is None:\n\n from celery.signals import task_postrun\n from flask.signals import request_tearing_down\n\n from sqlalchemy import create_engine \n from sqlalchemy.exc import InvalidRequestError\n from sqlalchemy.orm import scoped_session, sessionmaker\n\n engine = create_engine(\n self.conf['ENGINE']['URL'],\n **{\n k.lower(): v \n for k, v in self.conf['ENGINE'].items()\n if not k in self.default_conf['ENGINE']\n }\n )\n session = scoped_session(\n sessionmaker(\n bind=engine,\n **{\n k.lower(): v\n for k, v in self.conf['SESSION'].items()\n if not k in self.default_conf['SESSION']\n }\n )\n )\n\n task_postrun.connect(_remove_session)\n request_tearing_down.connect(_remove_session)\n\n self.logger.debug('session loaded')\n self._session = session\n return self._session\n\n def run_after_module_imports(self, func):\n \"\"\"Hook to run a function right after all project modules are imported.\n\n :param func: the function to be called right before startup. It will be\n passed the project as single argument.\n :type func: callable\n\n This decorator can be used to run functions after all the components of\n the project have been created.\n \n \"\"\"\n self._funcs.append(func)\n\n def _remove_session(self):\n \"\"\"Remove database connections.\"\"\"\n try:\n if self.conf['SESSION']['SMARTCOMMIT']:\n self.session.commit()\n self.logger.debug('session committed')\n except InvalidRequestError as e:\n self.session.rollback()\n self.session.expunge_all()\n raise e\n finally:\n self.session.remove()\n self.logger.debug('session removed')\n\n def _reset(self):\n \"\"\"Reset current project.\"\"\"\n self.__class__.__state = {}\n\n\n#: Proxy to the current project\ncurrent_project = LocalProxy(Project)\n\ndef _remove_session(*args, **kwargs):\n \"\"\"Globally namespaced function for signals to work.\"\"\"\n current_project._remove_session()\n\n","sub_path":"venv/Lib/site-packages/flasker/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":9336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"195125410","text":"import pickle\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torchvision import transforms\nfrom yamlparams.utils import Hparam\n\nfrom . import detection\n\n\nPIC_SIZE = 150\nPROBABILITY_THRESHOLD = 0.8\n\n\n# Model\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\nclass ConvModel(nn.Module):\n def __init__(self, cat_n):\n super(ConvModel, self).__init__()\n \n def conv_block(in_ch, out_ch):\n return nn.Sequential(nn.Conv2d(in_ch, out_ch, 3),\n nn.MaxPool2d(2),\n nn.BatchNorm2d(out_ch))\n \n base = 10\n # (N, 1, 150, 150) -> (N, 512)\n self.conv = nn.Sequential(conv_block(1, base*3),\n conv_block(base*3, base*2),\n conv_block(base*2, base*3),\n conv_block(base*3, base*2),\n conv_block(base*2, base*3),\n Flatten())\n \n def linear_block(in_dim, out_dim):\n return nn.Sequential(nn.Linear(in_dim, out_dim),\n nn.LeakyReLU(0.3),\n nn.Dropout(0.05))\n \n self.fc = nn.Sequential(linear_block(base*3*4, 256),\n linear_block(256, cat_n))\n \n\n def forward(self, batch):\n \n def conv_pics(pic_list):\n return list(map(self.conv, list(map(lambda t: torch.unsqueeze(t, dim=0), pic_list))))\n \n conved = list(map(conv_pics, batch))\n conved = list(map(lambda lt: torch.stack(lt).sum(dim=0), conved))\n conved = torch.stack(conved)\n \n clss = self.fc(conved).squeeze(dim=1)\n \n return torch.sigmoid(clss)\n \n \ndef pilpaper2operations(pil_img):\n cv_img = detection.pil2cv(pil_img)\n cv_imgs = detection.crop_conturs(cv_img)\n pil_imgs = list(map(detection.cv2pil, cv_imgs))\n tensor_imgs = list(map(transform, pil_imgs))\n pred = model([tensor_imgs])\n probs = pred[0, [ixs]].detach().numpy()\n detail_ops = confident_ops[probs[0] > PROBABILITY_THRESHOLD]\n return list(detail_ops)\n\n\n\nconfig = Hparam('./config.yml')\n\nif config.run.models.operations:\n # all operations\n all_ops = np.array(pickle.load(open('./models/predict_operations/all_ops.pkl', 'rb')))\n # Confident predictable operations\n ixs = [3, 6, 10, 12, 24, 34, 35, 36, 37, 39, 40, 41, 43, 44, 48, 54]\n confident_ops = all_ops[ixs]\n\n\n model = ConvModel(len(all_ops)).eval()\n model.load_state_dict(torch.load('./models/predict_operations/conv_model.pt', map_location='cpu'))\n transform = transforms.Compose([transforms.Grayscale(),\n transforms.Resize((PIC_SIZE, PIC_SIZE)),\n transforms.ToTensor()])\nelse:\n print(\"Operations prediction havent loaded due config settings\")","sub_path":"service/models/predict_operations/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"374035318","text":"#!/usr/bin/env python\n\nimport sys\nimport argparse\nimport plantcv.utils\n\n\n# Parse command-line arguments\n###########################################\ndef options():\n \"\"\"Parse command line options.\n \"\"\"\n\n # Create an argument parser\n parser = argparse.ArgumentParser(description=\"A collection of utilities for PlantCV.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # Create subcommand parsers\n subparsers = parser.add_subparsers()\n\n # Create the Naive Bayes subcommand\n json2csv_cmd = subparsers.add_parser(\"json2csv\", help=\"Convert PlantCV output JSON files to CSV.\")\n json2csv_cmd.add_argument(\"-j\", \"--json\", help=\"Input PlantCV JSON filename.\", required=True)\n json2csv_cmd.add_argument(\"-c\", \"--csv\", help=\"Output CSV filename prefix.\", required=True)\n json2csv_cmd.set_defaults(func=run_json2csv)\n\n # If no arguments are given, print the help menu\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n # Parse command-line options\n args = parser.parse_args()\n # Execute the selected method\n args.func(args)\n\n###########################################\n\n\n# run_json2csv\n###########################################\ndef run_json2csv(args):\n plantcv.utils.json2csv(json_file=args.json, csv_file=args.csv)\n###########################################\n\n\n# Main\n###########################################\ndef main():\n \"\"\"Main program.\n \"\"\"\n # Parse command-line options and run the selected method\n options()\n###########################################\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"plantcv-utils.py","file_name":"plantcv-utils.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"614382995","text":"import argparse\nimport sys\nimport os\nimport csv\nfrom figure.figure_set import FigureSet\nfrom figure.figure import Figure\nfrom figure.misc import map_label\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description='Convert PanelSeg data set to CSV format.')\n parser.add_argument('--list_path',\n help='The path to the list file.',\n default='\\\\Users\\\\jie\\\\projects\\\\PanelSeg\\\\programs\\\\PanelSeg_Keras\\\\exp\\\\all.txt',\n type=str)\n parser.add_argument('--annotation_path',\n help='The output annotation CSV file.',\n default='\\\\Users\\\\jie\\\\projects\\\\PanelSeg\\\\programs\\\\PanelSeg_Keras\\\\exp\\\\all.csv',\n type=str)\n # parser.add_argument('mapping_path',\n # help='The output class to ID mapping CSV file.',\n # default='Z:\\\\Users\\\\jie\\\\projects\\\\PanelSeg\\\\ExpKeras\\\\mapping.csv',\n # type=str)\n return parser.parse_args(args)\n\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n figure_set = FigureSet()\n figure_set.load_list(args.list_path)\n with open(args.annotation_path, 'w', newline='') as csvfile:\n csv_writer = csv.writer(csvfile, delimiter=',')\n for idx, file in enumerate(figure_set.files):\n # if '1465-9921-6-55-4' not in file:\n # continue\n print('Processing Image {:d}: {:s}.'.format(idx, file))\n figure = Figure(file)\n figure.load_image()\n xml_path = os.path.join(figure.image_path.replace('.jpg', '_data.xml'))\n figure.load_annotation_iphotodraw(xml_path)\n\n # write to CSV file\n # The format is:\n # image_path,panel_x1,panel_y1,panel_x2,panel_y2,label_x1,label_y1,label_x2,label_y2,label\n # if there is no label, the format becomes:\n # image_path,panel_x1,panel_y1,panel_x2,panel_y2,,,,,\n for panel in figure.panels:\n row = list()\n row.append(figure.image_path) # add image_path\n row.append(str(panel.panel_rect[0]))\n row.append(str(panel.panel_rect[1]))\n row.append(str(panel.panel_rect[2]))\n row.append(str(panel.panel_rect[3]))\n row.append('panel')\n if panel.label_rect is None:\n row.append('')\n row.append('')\n row.append('')\n row.append('')\n row.append('')\n else:\n label = map_label(panel.label)\n row.append(str(panel.label_rect[0]))\n row.append(str(panel.label_rect[1]))\n row.append(str(panel.label_rect[2]))\n row.append(str(panel.label_rect[3]))\n row.append(label)\n csv_writer.writerow(row)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"PanelSeg_Keras/panel_seg/bin/convert_annotation_to_csv.py","file_name":"convert_annotation_to_csv.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"115686231","text":"import pygame as pg\nfrom math import *\nimport time\nimport threading\n\n### VARIABLES TO TWEAK\nscreenw, screenh = (1920, 1080)\n# radius\nr = 500\n# number of points\np = 500\n# the first number of table in sequence\nn = 2\n\n\nwin = pg.display.set_mode((screenw, screenh))\n\ndef Distance(a, b):\n return sqrt((b[0] - a[0])**2 + (b[1] - a[1])**2)\n\n\ndef Calc():\n\n pg.draw.circle(win, (255, 255, 255), (screenw//2, screenh//2), r, 2)\n\n pts = []\n\n for i in range(pts):\n pt = (r * cos(i / pts * 2 * pi) + screenw / 2, r * sin(i / pts * 2 * pi) + screenh / 2)\n pts.append(pt)\n\n for i in range(len(pts)):\n pg.draw.line(win, (255, 255, 255), pts[i % pts], pts[n * i % pts])\n\n\n\nupdate = True\n\ndef Repeat():\n while update:\n win.fill((0, 0, 0))\n Calc()\n global n\n n += 1\n time.sleep(1)\n pg.display.update()\n time.sleep(0.1)\n\n# make a thread for the function to repeat and main thread is for event listening\nnewThread = threading.Thread(target=Repeat)\nnewThread.start()\nwhile update:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n update = False\n newThread.join()\n\n","sub_path":"Cardioid.py","file_name":"Cardioid.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"296519252","text":"import adaptive_learning as al\nfrom collections import Counter\n\nimport time\nimport timeit\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\n#from pegasospak import pegasos\nfrom sklearn import preprocessing\nfrom sklearn import cross_validation as cv\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.datasets import load_digits\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron\nimport pandas as pd\n\n\n\nprint('hello world!')\nacc_headers = ['XMAX', 'XMIN', 'XSTND', 'XAVG', 'XOFFSET', 'XFRQ', 'XENERGYSTND','X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8', 'X9','XQUANTILE20', 'XQUANTILE40', 'XQUANTILE60', 'XQUANTILE80', 'YMAX','YMIN', 'YSTND', 'YAVG', 'YOFFSET', 'YFRQ', 'YENERGYSTND', 'Y0','Y1', 'Y2', 'Y3', 'Y4', 'Y5', 'Y6', 'Y7', 'Y8', 'Y9', 'YQUANTILE20','YQUANTILE40', 'YQUANTILE60', 'YQUANTILE80', 'ZMAX', 'ZMIN','ZSTND', 'ZAVG', 'ZOFFSET', 'ZFRQ', 'ZENERGYSTND', 'Z0', 'Z1', 'Z2','Z3', 'Z4', 'Z5', 'Z6',\n 'Z7', 'Z8', 'Z9', 'ZQUANTILE20','ZQUANTILE40', 'ZQUANTILE60', 'ZQUANTILE80', 'ACC_MAGNITUDE_MAX','ACC_MAGNITUDE_MIN', 'ACC_MAGNITUDE_STND', 'ACC_MAGNITUDE_AVG','ACC_MAGNITUDE_OFFSET', 'ACC_MAGNITUDE_FRQ','ACC_MAGNITUDE_ENERGYSTND', 'ACC_MAGNITUDE_0', 'ACC_MAGNITUDE_1','ACC_MAGNITUDE_2', 'ACC_MAGNITUDE_3', 'ACC_MAGNITUDE_4','ACC_MAGNITUDE_5', 'ACC_MAGNITUDE_6', 'ACC_MAGNITUDE_7','ACC_MAGNITUDE_8', 'ACC_MAGNITUDE_9', 'ACC_MAGNITUDE_QUANTILE20','ACC_MAGNITUDE_QUANTILE40',\n 'ACC_MAGNITUDE_QUANTILE60','ACC_MAGNITUDE_QUANTILE80']\nrotation_headers = ['X_Rotation_MAX', 'X_Rotation_MIN', 'X_Rotation_STND', 'X_Rotation_AVG', 'X_Rotation_OFFSET','X_Rotation_FRQ', 'X_Rotation_ENERGYSTND', 'X_Rotation_0', 'X_Rotation_1', 'X_Rotation_2', 'X_Rotation_3', 'X_Rotation_4','X_Rotation_5', 'X_Rotation_6', 'X_Rotation_7', 'X_Rotation_8','X_Rotation_9', 'X_Rotation_QUANTILE20', 'X_Rotation_QUANTILE40', 'X_Rotation_QUANTILE60', 'X_Rotation_QUANTILE80', 'Y_RotationMAX','Y_RotationMIN', 'Y_RotationSTND',\n 'Y_RotationAVG','Y_RotationOFFSET', 'Y_RotationFRQ', 'Y_RotationENERGYSTND', 'Y_Rotation0', 'Y_Rotation1', 'Y_Rotation2', 'Y_Rotation3','Y_Rotation4', 'Y_Rotation5', 'Y_Rotation6', 'Y_Rotation7','Y_Rotation8', 'Y_Rotation9', 'Y_RotationQUANTILE20','Y_RotationQUANTILE40', 'Y_RotationQUANTILE60','Y_RotationQUANTILE80', 'Z_RotationMAX', 'Z_RotationMIN','Z_RotationSTND', 'Z_RotationAVG', 'Z_RotationOFFSET','Z_RotationFRQ', 'Z_RotationENERGYSTND', 'Z_Rotation0',\n 'Z_Rotation1', 'Z_Rotation2', 'Z_Rotation3', 'Z_Rotation4','Z_Rotation5', 'Z_Rotation6', 'Z_Rotation7', 'Z_Rotation8','Z_Rotation9', 'Z_RotationQUANTILE20', 'Z_RotationQUANTILE40','Z_RotationQUANTILE60', 'Z_RotationQUANTILE80']\nlight_headers = ['Screen_brightness_MAX', 'Screen_brightness_MIN','Screen_brightness_STND', 'Screen_brightness_AVG']\npressure_headers = ['pressure_MAX','pressure_MIN', 'pressure_STND', 'pressure_AVG', 'magEnergy_MAX']\nmag_headers = ['magEnergy_MIN', 'magEnergy_STND', 'magEnergy_AVG','magEnergy_FREQ', 'magEnergy_OFFSET']\n\ndf_header = acc_headers + rotation_headers + light_headers + pressure_headers + mag_headers + ['UID','class']\n\ndef prepareTravelData():\n android_winter_data_file = '/Users/Xing/Dropbox/TravelData/data/Android_winter_allmode_segLen8_lag2_arbpm_normalize_uid_2017-03-04.csv'\n android_summer_data_file = '/Users/Xing/Dropbox/TravelData/data/Android_summer_allmode_segLen8_lag2_arbpm_normalize_uid_2017-03-04.csv'\n iphone_data_file = '/Users/Xing/Dropbox/TravelData/data/iPhone_summer_allmode_segLen8_lag2_arbpm_normalize_uid_2017-03-02.csv'\n df_iphone = pd.read_csv(iphone_data_file,header = None)\n df_android_summer = pd.read_csv(android_summer_data_file,header = None)\n df_android_winter = pd.read_csv(android_winter_data_file,header = None)\n df_all = pd.concat([df_iphone, df_android_summer,df_android_winter])\n df_all.columns = df_header\n df_android_winter.columns = df_header\n df_android_summer.columns = df_header\n df_iphone.columns = df_header\n return df_all,df_android_winter,df_android_summer,df_iphone\n\n\n\ndef similar_data_weight_prepare(uid,topn,C,df,xi,ratio,ns_similarity_matrix,cz = False):\n print('current uid is:',uid)\n scaler = preprocessing.StandardScaler().fit(df.as_matrix()[:,:-2].astype(float))\n ## for test data\n print('prepare T')\n test_df = df.loc[df['UID'] == int(uid)]\n test = test_df.as_matrix()\n test_x = test[:,:-2]\n test_y = test[:,-1]\n print('Test data summary:')\n print(Counter(test_y))\n test_x_scaled = scaler.transform(test_x.astype(float))\n test_x_scaled, train_x_addin, test_y,train_y_addin = cv.train_test_split(test_x_scaled,test_y,test_size = ratio,random_state = 42)\n #get data of similarity, as well as non_similar_data.\n top_n_learning_data, non_similar_data = al.get_similar_data_from_mode_uid_pair_rank(uid,topn,df,xi,train_x_addin,ns_similarity_matrix,size_control = cz)\n cm = pd.unique(df.loc[df['UID'] == int(uid),'class'])\n\n ## for training data:\n train = top_n_learning_data.as_matrix()\n train_x = train[:,:-2] #because #2 is the uid\n train_y = train[:,-1]\n print('Top n similar learning data summary:')\n print(Counter(train_y))\n\n\n ### normalize (is there a better way to normalize?):\n print('************************************************************************')\n train_x_scaled = scaler.transform(train_x.astype(float))\n ##### Here we split test data into two different sets: one part uses as labeled data, for sample reweigting. The other part is for testing.\n ### the test_size in train_test_split is correspond to train_x_addin (which is the ratio in T we use for transfer learning)\n #weight = al.get_theoretical_weights(train_x_scaled,train_y,train_x_addin,train_y_addin,C)\n train_y = np.append(train_y, train_y_addin,0)\n train_x_scaled = np.append(train_x_scaled,train_x_addin,0)\n #max_w_s = max(weight) + 1\n #weight += [max_w_s for i in range(len(train_y_addin))]\n return train_x_scaled, train_y, test_x_scaled, test_y#,weight, max_w_s\n\n\n### for every batch, first verify the model on it, and then update the model with this batch , and test on next batch. and so forth.\ndef online_offline_learning(uid,topn,C,df,xi,ratio,ns_similarity_matrix,cz = False):\n train_x_scaled, train_y,test_x_scaled, test_y = similar_data_weight_prepare(uid,topn,C,df,xi,ratio,ns_similarity_matrix,cz)\n online_acc_list = []\n offline_acc_list = []\n ##### SGD data from top 3 similar uids:\n random_state = np.random.RandomState(0)\n clf_online = SGDClassifier(loss = 'hinge',penalty = 'l2',shuffle = True,n_iter = 100,random_state = random_state)#,class_weight = 'balanced')\n clf_offline = SGDClassifier(loss = 'hinge',penalty = 'l2',shuffle = True,n_iter = 100,random_state = random_state)#,class_weight = 'balanced')\n clf_online.partial_fit(train_x_scaled, train_y,classes = np.unique(train_y))#,sample_weight = weight)\n clf_offline.fit(train_x_scaled,train_y)#,sample_weight=weight)\n batch_size = 50\n batch_start_index = 0\n print(\"total length of the available data:\",len(test_x_scaled))\n while batch_start_index + batch_size < len(test_x_scaled):\n print(\"Current batch starts at:\",batch_start_index)\n batch_end_index = batch_start_index + batch_size\n online_predict = clf_online.predict(test_x_scaled[batch_start_index:batch_end_index,:])\n online_acc = sum(online_predict == test_y[batch_start_index:batch_end_index])*1.0/len(test_y[batch_start_index:batch_end_index])\n print(\"\\033[1;31;40m top similar data accuracy: %f\\033[0;32;40m\" % online_acc)\n\n offline_predict = clf_offline.predict(test_x_scaled[batch_start_index:batch_end_index])\n offline_acc = sum(offline_predict == test_y[batch_start_index:batch_end_index])*1.0/len(test_y[batch_start_index:batch_end_index])\n print(\"\\033[1;31;40m top similar data accuracy: %f\\033[0;32;40m\" % offline_acc)\n online_acc_list.append(online_acc)\n offline_acc_list.append(offline_acc)\n #### online updating with new data\n clf_online.partial_fit(test_x_scaled[batch_start_index:batch_end_index,:], test_y[batch_start_index:batch_end_index])#,sample_weight = max_w_s)\n #### prepare new data for offline training\n train_y = np.append(train_y, test_y[batch_start_index:batch_end_index],0)\n train_x_scaled = np.append(train_x_scaled,test_x_scaled[batch_start_index:batch_end_index,:],0)\n #weight += [max_w_s for i in range(batch_size)]\n #### offline retrain with updated data\n clf_offline.fit(train_x_scaled,train_y)#,sample_weight = weight)\n batch_start_index = batch_end_index\n return online_acc_list, offline_acc_list\n\n### separate the train_x_scaled into inital_training and add_up training.\ndef online_offline_learning2(uid,topn,C,df,xi,ratio,ns_similarity_matrix,cz = False):\n train_x_scaled, train_y,test_x_scaled, test_y = similar_data_weight_prepare(uid,topn,C,df,xi,ratio,ns_similarity_matrix,cz)\n train_x_scaled_initial,train_x_addup,train_y_initial,train_y_addup = cv.train_test_split(train_x_scaled,train_y,test_size = 0.5, random_state = 1)\n online_acc_list = []\n offline_acc_list = []\n ##### SGD data from top 3 similar uids:\n random_state = np.random.RandomState(0)\n clf_online = SGDClassifier(loss = 'hinge',penalty = 'l2',shuffle = True,n_iter = 100,random_state = random_state)#,class_weight = 'balanced')\n clf_offline = SGDClassifier(loss = 'hinge',penalty = 'l2',shuffle = True,n_iter = 100,random_state = random_state)#,class_weight = 'balanced')\n clf_online.partial_fit(train_x_scaled_initial, train_y_initial,classes = np.unique(train_y))#,sample_weight = weight)\n clf_offline.fit(train_x_scaled_initial,train_y_initial)#,sample_weight=weight)\n batch_size = 20\n batch_start_index = 0\n print(\"total length of the available data:\",len(test_x_scaled))\n while batch_start_index + batch_size < len(test_x_addup):\n print(\"Current batch starts at:\",batch_start_index)\n batch_end_index = batch_start_index + batch_size\n online_predict = clf_online.predict(test_x_scaled)\n online_acc = sum(online_predict == test_y)*1.0/len(test_y)\n print(\"\\033[1;31;40m top similar data accuracy: %f\\033[0;32;40m\" % online_acc)\n\n offline_predict = clf_offline.predict(test_x_scaled)\n offline_acc = sum(offline_predict == test_y)*1.0/len(test_y)\n print(\"\\033[1;31;40m top similar data accuracy: %f\\033[0;32;40m\" % offline_acc)\n online_acc_list.append(online_acc)\n offline_acc_list.append(offline_acc)\n #### online updating with new data\n clf_online.partial_fit(train_x_addup[batch_start_index:batch_end_index,:], train_y_addup[batch_start_index:batch_end_index])#,sample_weight = max_w_s)\n #### prepare new data for offline training\n train_y = np.append(train_y_initial, train_y_addup[batch_start_index:batch_end_index],0)\n train_x_scaled_initial = np.append(train_x_scaled_initial,train_x_addup[batch_start_index:batch_end_index,:],0)\n #weight += [max_w_s for i in range(batch_size)]\n #### offline retrain with updated data\n clf_offline.fit(train_x_scaled,train_y)#,sample_weight = weight)\n batch_start_index = batch_end_index\n return online_acc_list, offline_acc_list\n\ndef test_online_offline_similarity(topn,C,xi,ratio,merge_minor = True,hierarchy = False):\n df_all,df_android_winter,df_android_summer,df_iphone = prepareTravelData()\n acc = []\n if merge_minor:\n df_all.loc[df_all['UID'] == 4,['UID']] = 1\n df_all.loc[df_all['UID'] == 6,['UID']] = 2\n df_all.loc[df_all['UID'] == 7,['UID']] = 3\n uids = sorted(pd.unique(df_all['UID']))\n uu = [1,2,3,5,8,9,10,21,22,23,31,32,33]\n if hierarchy:\n for u in uids:\n test_usr_mode_similarity_hierarchical(u,topn,C,df_all,cz = False)\n else:\n for u in uu:\n acc.append(test_usr_mode_similarity(u,topn,C,df_all,xi,ratio,ns_similarity_matrix,cz=True))\n tops_acc = [e[0] for e in acc]\n random_acc = [e[1] for e in acc]\n weighted_tops_acc = [e[2] for e in acc]\n rest_uid_acc = [e[3] for e in acc]\n rest_uid_weighted_acc = [e[4] for e in acc]\n ratio_compare_acc = [e[5] for e in acc]\n xi_text = str(xi*10)\n ratio_text = str(int(ratio*100))\n file_name = '../data/T_add_weight1_usr_mode_ratio_' + ratio_text+'_top_'+ str(topn) +'_performance_weighted_vs_random_C0dot003_xi'+xi_text+'.csv'\n result = pd.DataFrame({'uid':uu,'tops_acc':tops_acc,'random_acc':random_acc,'weighted_tops_acc':weighted_tops_acc,'rest_uid_acc':rest_uid_acc,'weighted_rest_uid_acc':rest_uid_weighted_acc,'ratio_compare_acc':ratio_compare_acc})\n result.to_csv(file_name, sep=',')\n print('finish generating file %s' % file_name)\n\n\ndef usr_online_offline_learning_comparision():\n df_all,df_android_winter,df_android_summer,df_iphone = prepareTravelData()\n mergeMinor = True\n #uid = 8\n #paramter_performance_tour(uid)\n ratio = 0.02\n ratio_text = str(int(ratio*100))\n xi = 0.2\n xi_text = str(xi*10)\n C = 0.003\n topn = 3\n ns_similarity_matrix = al.similarity_calc.calculate_ns_similarity(xi)\n for u in [1]:\n online_acc_list,offline_acc_list = online_offline_learning(u,topn,C,df_all,xi,ratio,ns_similarity_matrix,cz = False)\n file_name = '../data/usr_' + str(u) + '_weighted_ratio_' + ratio_text+'_top_'+ str(topn) +'_online_offline_performance_C0dot003_xi'+xi_text+'.csv'\n result = pd.DataFrame({'online_acc':online_acc_list,'offline_acc':offline_acc_list})\n result.to_csv(file_name, sep=',')\n print('finish generating file %s' % file_name)\n\n\nif __name__ == '__main__':\n usr_online_offline_learning_comparision()\n\n\n","sub_path":"online_offline_compare.py","file_name":"online_offline_compare.py","file_ext":"py","file_size_in_byte":13873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"474853687","text":"import unittest\n\n\n\nclass TestHandlerCase(unittest.TestCase):\n\n def test_response(self):\n print(\"testing response.\")\n #result = index.handler(None, None)\n #print(result)\n #self.assertEqual(result['statusCode'], 200)\n #self.assertEqual(result['headers']['Content-Type'], 'application/json')\n #self.assertIn('Hello World', result['body'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_handler.py","file_name":"test_handler.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"28717077","text":"blog_1 = 'I am awesome'\nblog_2 = 'Cars are cool'\nblog_3 = 'Cat pics!'\n\nsite_title = 'My Blog title'\n\n\ndef blog_posts(title, *args):\n print(title)\n for post in args:\n print(post)\n\n\nblog_posts(site_title, blog_1, blog_2, blog_3)\n\n\ndef blog_title_posts(title, *args, **kwargs):\n print(title)\n for i in args:\n print(i)\n for p_title, posts in kwargs.items():\n print(p_title, posts)\n\n\nblog_title_posts(site_title, 1, 2, 3, blog_1='I am awesome', blog_2 = 'Cars are cool', blog_3 = 'Cat pics!')\n\n\ndef graph_op(x, y):\n print('function that graphs {} and {}'.format(str(x), str(y)))\n\n\nx1 = [1, 2, 3]\ny1 = [2, 3, 1]\n\ngraph_me = [x1, y1]\n\ngraph_op(*graph_me)\n\n","sub_path":"__intermediate/sentdex/intermediate/09argskwargs.py","file_name":"09argskwargs.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"444543849","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2005-2007 TUBITAK/UEKAE\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt\n\nfrom pisi.actionsapi import autotools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import get\n\nWorkDir=\"util-linux-ng-%s\" % get.srcVERSION().replace(\"_\",\"-\")\n\ndef setup():\n shelltools.export(\"SUID_CFLAGS\", \"-fpie\")\n shelltools.export(\"SUID_LDFLAGS\", \"-pie\")\n\n autotools.configure('--prefix=/ \\\n --enable-nls \\\n --enable-agetty \\\n --enable-cramfs \\\n --enable-partx \\\n --enable-raw \\\n --enable-rdev \\\n --enable-rename \\\n --enable-write \\\n --with-fsprobe=blkid \\\n --with-audit \\\n --disable-init \\\n --disable-kill \\\n --disable-last \\\n --disable-mesg \\\n --disable-reset \\\n --disable-login-utils \\\n --disable-wall \\\n --disable-static')\n\ndef build():\n autotools.make()\n\ndef install():\n autotools.rawInstall(\"DESTDIR=%s\" % get.installDIR())\n\n pisitools.dodoc(\"HISTORY\", \"MAINTAINER\", \"README\", \"VERSION\", \"example.files/*\")\n","sub_path":"pardus/tags/2008.1/system/base/util-linux/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"339101105","text":"import json\nimport psycopg2 as pg\nimport os\n\nclass Database:\n \"\"\" Handles the main connection to the database of the app setting \"\"\"\n def __init__(self, db_url):\n self.db_url = db_url\n self.cur = None\n self.conn = None\n\n def connect(self):\n self.conn = pg.connect(self.db_url)\n self.cur = self.conn.cursor()\n\n def execute_insert(self,query,name):\n try:\n self.cur.execute(query, (name,))\n self.conn.commit()\n print(\" Added {} to group_members table.\".format(name))\n except Exception as e:\n self.conn.rollback()\n raise e\n\n def close(self):\n self.cur.close()\n self.conn.close()\n \n def execute_select(self,query):\n self.cur.execute(query)\n return self.cur.fetchall()\n\n \"\"\" Add/Alter/Drop DB Tables \"\"\" \n def create_group_table(self):\n \"\"\" creates group members tables \"\"\"\n query = \"\"\"CREATE TABLE group_members(Id serial PRIMARY KEY,name varchar(100) NOT NULL)\"\"\"\n self.cur.execute(query)\n self.conn.commit()\n\n def drop_group_table(self):\n \"\"\" Deletes group table in the app \"\"\"\n query = \"\"\"DROP TABLE IF EXISTS group_members;\"\"\"\n self.cur.execute(query)\n self.conn.commit\n\ndb = os.getenv('DATABASE_URL')\ndbConn = Database(db)\ndbConn.connect()","sub_path":"dbOperations.py","file_name":"dbOperations.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"176479961","text":"def intersection(arrays):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n occurrences = {}\n\n # print(arrays)\n\n for array in arrays:\n\n for number in array:\n if number in occurrences:\n occurrences[number] += 1\n else:\n occurrences[number] = 1\n\n # for data in occurrences.items():\n # # data[0] == key\n # # data[1] == value\n # if data[1] == len(arrays):\n # return data\n return [data[0] for data in occurrences.items() if data[1] == len(arrays)]\n\n\nif __name__ == \"__main__\":\n arrays = []\n\n arrays.append(list(range(1000000, 2000000)) + [1, 2, 3])\n arrays.append(list(range(2000000, 3000000)) + [1, 2, 3])\n arrays.append(list(range(3000000, 4000000)) + [1, 2, 3])\n\n print(intersection(arrays))\n","sub_path":"hashtables/ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"197178214","text":"import sys\nimport socket\nfrom time import sleep\nfrom datetime import datetime\nimport json\nimport threading\n\nfrom PyQt5.QtCore import pyqtSignal, Qt\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QStackedWidget, QLabel, QLineEdit, QPlainTextEdit, QSpinBox, QTextBrowser, QPushButton, QVBoxLayout,QSplitter, QSpacerItem, QSizePolicy)\n\n\nclass LoginWidget(QWidget):\n onLogin = pyqtSignal(str, str, int)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._initUi()\n self._initSignals()\n self._initLayouts()\n\n def _initUi(self):\n #всплывающее сообщение\n self.flashMessage = QLabel(self)\n self.flushMessage()\n\n self.usernameEdit = QLineEdit(self)\n self.hostEdit = QLineEdit('127.0.0.1', self)\n\n self.portEdit = QSpinBox(self)\n self.portEdit.setMaximum(65535)\n self.portEdit.setMinimum(100)\n self.portEdit.setValue(6666)\n\n self.loginBtn = QPushButton('Войти', self)\n\n def showMessage(self, message):\n self.flashMessage.setText(message)\n self.flashMessage.show()\n\n def flushMessage(self):\n self.flashMessage.clear()\n self.flashMessage.hide()\n\n\n def _initSignals(self):\n self.loginBtn.clicked.connect(self.onClickLoginBtn)\n\n def onClickLoginBtn(self):\n username = self.usernameEdit.text()\n\n if username:\n self.onLogin.emit(username, self.hostEdit.text(), self.portEdit.value())\n else:\n self.showMessage('введите логин')\n\n\n\n def _initLayouts(self):\n self.spacer_1 = QSpacerItem(0, 0, QSizePolicy.Minimum, QSizePolicy.Expanding)\n\n self.mainLayout = QVBoxLayout(self)\n self.mainLayout.addItem(self.spacer_1)\n\n self.mainLayout.addWidget(self.flashMessage)\n self.mainLayout.addWidget(self.usernameEdit)\n self.mainLayout.addWidget(self.hostEdit)\n self.mainLayout.addWidget(self.portEdit)\n self.mainLayout.addWidget(self.loginBtn)\n\n\n self.spacer_2 = QSpacerItem(0, 0, QSizePolicy.Minimum, QSizePolicy.Expanding)\n self.mainLayout.addItem(self.spacer_2)\n\n\n\n\nclass ChatWidget(QWidget):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._initUi()\n self._initSignals()\n self._initLayouts()\n\n def _initUi(self):\n pass\n\n def _initSignals(self):\n pass\n\n def _initLayouts(self):\n pass\n\n\nclass ChatClient(QMainWindow):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__clientSocket = None\n\n self._initUi()\n self._initSignals()\n self._initLayouts()\n\n def _initUi(self):\n self.loginWidget = LoginWidget(self)\n self.chatWidget = ChatWidget(self)\n\n self.stackedWidget = QStackedWidget(self)\n self.stackedWidget.addWidget(self.loginWidget)\n self.stackedWidget.addWidget(self.chatWidget)\n self.stackedWidget.setCurrentWidget(self.loginWidget)\n\n self.setCentralWidget(self.stackedWidget)\n\n self.resize(600, 400)\n\n def _initSignals(self):\n self.loginWidget.onLogin.connect(self.connect)\n\n def _initLayouts(self):\n pass\n\n def connect(self, username, host, port):\n self.__clientSocket = sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n sock.connect((host, port))\n #создаём поток\n thread = threading.Thread(target=self.wait, daemon = True)\n thread.start()\n\n self.stackedWidget.setCurrentWidget(self.chatWidget)\n except:\n self.loginWidget.showMessage('Сервер не доступен')\n return False\n\n def disconnect(self):\n if self.__clientSocket:\n self.__clientSocket.close()\n self.__clientSocket = None\n\n def wait(self):\n if self.__clientSocket is None:\n return\n while 1:\n message = self.__clientSocket.recv(4096)\n\n if not message:\n self.disconnect()\n break\n\n #\n\n sleep(1)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n w = ChatClient()\n w.show()\n\n sys.exit(app.exec_())","sub_path":"QT/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"197536917","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/fabrom/workspace/jability-python-package/jabilitypyup/test_common.py\n# Compiled at: 2013-05-25 04:38:30\nimport datetime, os, shutil, time\nmodule_dir = os.path.dirname(__file__)\ntests_dir = os.path.join(module_dir, 'tests/temp/')\ntest_filename_template = 'test_%d.tmp'\ntest_qty = 10\n\ndef now():\n return datetime.datetime.now()\n\n\ndef now_str(format='%Y%m%d%H%M%S'):\n return now().strftime(format)\n\n\ndef def_times(qty, daytimedelta=True):\n today = now()\n tlist = list()\n if daytimedelta:\n for i in range(qty):\n pastday = today - datetime.timedelta(days=i)\n atime = int(time.mktime(pastday.timetuple()))\n mtime = atime\n times = (atime, mtime)\n tlist.append(times)\n\n else:\n for i in range(qty):\n pastday = today - datetime.timedelta(minutes=i * 5)\n atime = int(time.mktime(pastday.timetuple()))\n mtime = atime\n times = (atime, mtime)\n tlist.append(times)\n\n return tlist\n\n\ndef init_dir(daydelta=True):\n shutil.rmtree(tests_dir)\n if not os.path.exists(tests_dir):\n os.makedirs(tests_dir)\n i = 0\n flist = list()\n for times in def_times(test_qty, daydelta):\n fpath = os.path.join(tests_dir, test_filename_template % i)\n open(fpath, 'w').close()\n os.utime(fpath, times)\n i += 1\n flist.append(fpath)\n\n return flist\n\n\ndef clean_dir(flist, delete_base_dir=False):\n for file in flist:\n if os.path.exists(file):\n os.remove(file)\n\n if delete_base_dir:\n os.rmdir(tests_dir)\n\n\ndef count_existing_files(flist):\n i = 0\n for file in flist:\n if os.path.exists(file):\n i += 1\n\n return i\n\n\ndef test_tests_functions():\n files = init_dir()\n assert count_existing_files(files) == test_qty\n clean_dir(files)\n assert count_existing_files(files) == 0\n\n\nif __name__ == '__main__':\n import doctest, nose\n doctest.testmod()\n nose.main()","sub_path":"pycfiles/jability-pyup-1.1.7.tar/test_common.py","file_name":"test_common.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"581601711","text":"from difflib import get_close_matches\nimport os\nfrom pathlib import Path\nfrom thefuck.utils import sudo_support\nfrom thefuck.shells import thefuck_alias, get_aliases\n\n\ndef _safe(fn, fallback):\n try:\n return fn()\n except OSError:\n return fallback\n\n\ndef _get_all_callables():\n tf_alias = thefuck_alias()\n return [exe.name\n for path in os.environ.get('PATH', '').split(':')\n for exe in _safe(lambda: list(Path(path).iterdir()), [])\n if not _safe(exe.is_dir, True)] + [\n alias for alias in get_aliases() if alias != tf_alias]\n\n\n@sudo_support\ndef match(command, settings):\n return 'not found' in command.stderr and \\\n bool(get_close_matches(command.script.split(' ')[0],\n _get_all_callables()))\n\n\n@sudo_support\ndef get_new_command(command, settings):\n old_command = command.script.split(' ')[0]\n new_command = get_close_matches(old_command,\n _get_all_callables())[0]\n return ' '.join([new_command] + command.script.split(' ')[1:])\n\n\npriority = 3000\n","sub_path":"thefuck/rules/no_command.py","file_name":"no_command.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"490520661","text":"import csv\nimport sqlite3\nimport pandas\nimport os\n\npath = r\"D:\\code\\MRparse\\MR_parser\\get_point\\road.csv\"\n# conn = sqlite3.connect(r'E:\\mr\\MDT\\mdt+\\db_grid.db',check_same_thread=False)\nconn = sqlite3.connect(r':memory:',check_same_thread=False)\ndf = pandas.read_csv(path, encoding='utf-8')\ndf.to_sql('mdt', conn, if_exists='append', index=False)\n#\n# path1 = r\"E:\\mr\\MDT\\cellname.csv\"\n# df1 = pandas.read_csv(path1, encoding='utf-8')\n# df1.to_sql('cellname', conn, if_exists='append', index=False)\n\n\nf = open(r\"D:\\code\\MRparse\\MR_parser\\get_point\\sql_DT_潮安.sql\", encoding='utf-8-sig')\nsql_scr = f.read()\ncu = conn.cursor()\ncu.execute(sql_scr)\nf = open(r'E:\\mr\\MDT\\mdt+\\DT_潮安.csv','w', newline='')\nf_csv = csv.writer(f)\nf_csv.writerows(cu.fetchall())\n","sub_path":"get_point/get_point.py","file_name":"get_point.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"177987712","text":"import typing\r\nfrom NHentaidesu.scaffold import Scaffold\r\nfrom NHentaidesu import types\r\nfrom NHentaidesu.errors import NotFound\r\n\r\nfrom typing import List\r\nfrom datetime import datetime\r\n\r\nclass GetTagInfo(Scaffold):\r\n async def get_tag(\r\n self,\r\n tag_id: int,\r\n page: int = 1,\r\n offset: int = 0,\r\n limit: int = 10\r\n ) -> List[\"types.DoujinInfo\"]:\r\n if offset > limit:\r\n offset = 0\r\n \r\n res = await self.request_api(\r\n f\"/galleries/tagged\",\r\n params={\r\n 'tag_id': str(tag_id),\r\n 'page': str(page)\r\n }\r\n )\r\n\r\n if res['result'] == []:\r\n raise NotFound(\"Query not found.\")\r\n\r\n pages = types.List()\r\n doujins = types.List()\r\n split_tag = {}\r\n merge_tag = types.List()\r\n \r\n for res in res['result'][offset:limit]:\r\n\r\n for img, num in zip(res['images']['pages'], range(len(res['images']['pages']))):\r\n if img['t'] == 'j':\r\n ext = 'jpg'\r\n else:\r\n ext = 'png'\r\n pages.append(f\"{self.IMG_URL}/galleries/{res['media_id']}/{num+1}.{ext}\")\r\n\r\n for tag in res['tags']:\r\n tp = tag['type']\r\n if tp not in split_tag:\r\n split_tag[tp] = []\r\n tag['url'] = f\"{self.HOME_URL}{tag['url']}\"\r\n split_tag[tp].append(tag)\r\n\r\n merge_tag.append(\r\n types.MergedTags._parse(\r\n self,\r\n id=tag['id'],\r\n type=tag['type'],\r\n name=tag['name'],\r\n url=tag['url'],\r\n count=tag['count'],\r\n )\r\n )\r\n\r\n if res['images']['cover'] == 'j':\r\n ext = '.jpg'\r\n else:\r\n ext = '.png'\r\n images = types.DoujinImage._parse(\r\n self,\r\n pages=pages,\r\n cover=f\"{self.THUMB_URL}/galleries/{res['media_id']}/cover{ext}\",\r\n thumbnail=f\"{self.THUMB_URL}/galleries/{res['media_id']}/1{ext}\"\r\n )\r\n\r\n doujins.append(\r\n types.DoujinInfo._parse(\r\n self,\r\n id=res['id'],\r\n media_id=res['media_id'],\r\n title=res['title'],\r\n images=images,\r\n scanlator=None if res['scanlator'] == \"\" else res['scanlator'],\r\n released=datetime.fromtimestamp(res['upload_date']),\r\n tags=types.DoujinTags._parse(\r\n self,\r\n split=split_tag,\r\n merge=merge_tag\r\n ),\r\n pages=res['num_pages'],\r\n favorites=res['num_favorites']\r\n )\r\n )\r\n return doujins\r\n","sub_path":"NHentaidesu/methods/general/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"160292019","text":"# Bézier curves, elliptical arcs, lines, ellipses and paths\n# Parcly Taxel / Jeremy Tan, 2017\n# https://parclytaxel.tumblr.com\nimport numpy as np\nfrom cmath import rect, polar\nfrom collections import deque\nfrom .matrices import mapp, pmatrix, ptranslate, pscale, protate, svg2mat\nfrom .algebra import ccquad, newton\nfrom .regexes import pcomm_re, num_re, flt\nPol, tau = np.polynomial.Polynomial, 2 * np.pi\n\nclass ellipt:\n def __init__(self, v, endpoints = []):\n \"\"\"Constructs the ellipse from its transformation from the unit circle and (optionally)\n a pair of endpoints representing t-parameters on that unit circle (0 at start, 1 at end, radians).\n The curve runs in the positive-t direction iff the second endpoint is larger than the first.\"\"\"\n self.mat = v.copy()\n self.endpoints = list(endpoints)\n self.lenf = lambda t: np.hypot(*(self.mat[:2,:2] @ np.stack([-np.sin(t), np.cos(t)])))\n def __str__(self): return(f\"ellipt({self.mat},{self.endpoints})\")\n def __repr__(self): return str(self)\n \n def getends(self): return self.endpoints if self.endpoints else (0, tau)\n def __call__(self, t):\n start, end = self.getends()\n u = start * (np.ones_like(t) - t) + np.array(end) * t\n return self.mat @ (np.cos(u) + 1j * np.sin(u))\n def __getitem__(self, z):\n if type(z) == slice:\n start, end = self.getends()\n t0 = 0 if z.start == None else z.start\n t1 = 1 if z.stop == None else z.stop\n return ellipt(self.mat, [start * (1 - t0) + end * t0, start * (1 - t1) + end * t1])\n return self(z)\n def __neg__(self): return ellipt(self.mat, self.endpoints[::-1])\n def __rmatmul__(self, m): return ellipt(m @ self.mat, self.endpoints)\n def wrap(self, p):\n \"\"\"Wraps p into the ellipse's endpoints by adding integer multiples of tau. Returns the smallest possible t-parameter.\"\"\"\n start, end = self.getends()\n return (p - start) % (tau * (-1) ** (end < start)) / (end - start)\n \n def fromsvg(head, data, cursor):\n \"\"\"Constructs the corresponding elliptical arc from the given SVG path data.\"\"\"\n end = complex(data[5], data[6]) + (cursor if head == \"a\" else 0)\n if cursor == end: return None\n if data[0] == 0 or data[1] == 0: return bezier(cursor, end)\n rx, ry, theta, large, sweep = abs(data[0]), abs(data[1]), data[2], bool(data[3]), bool(data[4])\n endprime = pscale(1 / rx, 1 / ry) @ protate(-theta) @ (end - cursor)\n middle = endprime / 2\n midabs, midarg = polar(middle)\n if middleabs >= 1: # Unit circle is too small to span the endpoints, so we scale it up until the endpoints are a diameter\n res = ellipt(pmatrix(midabs, 0, 0, midabs, middle.real, middle.imag), [midarg + (-1) ** sweep * np.pi, midarg])\n else:\n centre = rect(1, midarg + (-1) ** (large == sweep) * np.arccos(midabs))\n t = [np.angle(-centre), np.angle(endprime - centre)]\n if t[0] > t[1] == sweep: t[min(t) > 0 != sweep] += (-1) ** (min(t) > 0) * tau\n res = ellipt(pmatrix(1, 0, 0, 1, centre.real, centre.imag), t)\n res = protate(theta) @ pscale(rx, ry) @ res\n return res, end\n def orthogonal(self):\n \"\"\"Returns a visually identical ellipse or elliptical arc with orthogonal direction vectors.\"\"\"\n u, sigma, v = np.linalg.svd(self.mat[:2,:2])\n if u[0,1] * u[1,0] > 0: u[:,1], v[1] = -u[:,1], -v[1] # u is a reflection matrix, right-multiply it and left-multiply v by [[1, 0], [0, -1]] so u becomes a rotation\n return ellipt(np.block([[u * sigma, self.mat[:2,2:]], [0, 0, 1]]), [np.arcsin(v[1,0]) + t * (-1) ** (v[0,1] * v[1,0] > 0) for t in self.endpoints])\n \n def orthogonalise(tag, attrs):\n \"\"\"Ignoring style, orthogonalises the given circle/ellipse (represented by its tag and attribute dictionary) so only a simple rotation is used.\n Returns the new tag and attribute dictionary.\"\"\"\n if tag.endswith(\"circle\"): rs = pscale(float(attrs[\"r\"]))\n else: rs = pscale(float(attrs[\"rx\"]), float(attrs[\"ry\"]))\n m = ellipt(svg2mat(attrs.get(\"transform\", \"\")) @ ptranslate(float(attrs.get(\"cx\", \"0\")), float(attrs.get(\"cy\", \"0\"))) @ rs).orthogonal().mat\n rx, theta, ry, centre = *polar(complex(*m[:2,0])), abs(complex(*m[:2,1])), complex(*m[:2,2])\n rx, ry = flt(rx), flt(ry)\n if rx == ry: tagout, attrout = \"circle\", {\"r\": rx}\n else:\n tagout, attrout = \"ellipse\", {\"rx\": rx, \"ry\": ry}\n centre = centre * rect(1, -theta)\n theta = flt(np.degrees(theta))\n if theta != \"0\": attrout[\"transform\"] = f\"rotate({theta})\"\n cx, cy = flt(centre.real), flt(centre.imag)\n if cx != \"0\": attrout[\"cx\"] = cx\n if cy != \"0\": attrout[\"cy\"] = cy\n return tagout, attrout\n \n def quadric(self):\n \"\"\"Return the quadric (Cartesian) representation of this ellipse [a, b, c, d, e, f] with ax²+bxy+cy²+dx+ey+f=0.\"\"\"\n xv, yv = (~self.mat)[:2]\n zz = np.outer(xv, xv) + np.outer(yv, yv)\n return [zz[0,0], 2 * zz[0,1], zz[1,1], 2 * zz[0,2], 2 * zz[1,2], zz[2,2] - 1]\n def fromquadric(q):\n \"\"\"Return the ellipse with the given quadric representation and (since many affine forms map to the same quadric) orthogonal direction vectors.\"\"\"\n qm = np.array([[q[0], q[1] / 2, q[3] / 2], [q[1] / 2, q[2], q[4] / 2], [q[3] / 2, q[4] / 2, q[5]]])\n mqf = qm[:2,:2]\n detqm, detmqf = np.linalg.det(qm), np.linalg.det(mqf)\n if detmqf <= 0 or (q[0] + q[2]) * detqm >= -1e-12: raise ValueError(\"not a real ellipse\")\n eigenvalues, eigenvectors = np.linalg.eig(mqf)\n dirvecs = (-detmqf / detqm * eigenvalues) ** -0.5 * eigenvectors\n centre = np.linalg.inv(mqf) @ -qm[:2,2:]\n return ellipt(np.block([[dirvecs, centre], [0, 0, 1]]))\n def frompoints_circle(pts):\n \"\"\"Returns the least-squares-fitted circle to the given sequence of at least three points.\"\"\"\n # Algorithm from I.D. Coope (1993), \"Circle fitting by linear and nonlinear least squares\", Journal of Optimization Theory and Applications, vol. 76 (2), p. 381\n if len(pts) < 3: raise TypeError(\"three points define a circle\")\n x, y, c = np.linalg.lstsq(np.stack([z.real, z.imag, 1] for z in pts), [abs(z) ** 2 for z in pts])[0]\n r = np.sqrt(c + (x ** 2 + y ** 2) / 4)\n return ellipt(pmatrix(r, 0, 0, r, x / 2, y / 2))\n def frompoints(pts):\n \"\"\"Returns the least-squares-fitted ellipse to the given sequence of at least five points.\"\"\"\n # Algorithm from R. Halíř and J. Fluser (1998), \"Numerically Stable Direct Least Squares Fitting of Ellipses\", Winter School of Computer Graphics, vol. 6\n # http://wscg.zcu.cz/wscg1998/papers98/Halir_98.ps.gz\n if len(pts) < 5: raise TypeError(\"five points define an ellipse\")\n d1, d2 = np.stack([z.real * z.real, z.real * z.imag, z.imag * z.imag] for z in pts), np.stack([z.real, z.imag, 1] for z in pts)\n s1, s2, s3 = d1.T @ d1, d1.T @ d2, d2.T @ d2\n lqfactor = -np.linalg.inv(s3) @ s2.T\n evs = np.linalg.eig([[0.5], [-1], [0.5]] * (s1 + s2 @ lqfactor)[::-1])[1]\n qpart = evs[:,4 * evs[0] * evs[2] > evs[1] ** 2].flatten()\n return ellipt.fromquadric(np.concatenate([qpart, lqfactor @ qpart]))\n \n def projection(self, p):\n \"\"\"t-value of projection of p.\"\"\"\n pass # TODO\n def length(self, end_t = 1, start_t = 0, raw = False):\n \"\"\"Arc length of curve. The raw parameter if true does not linearly transform the input t-parameters.\"\"\"\n if raw: return abs(ccquad(self.lenf, start_t, end_t))\n start, end = self.getends()\n return abs(ccquad(self.lenf, start * (1 - start_t) + end * start_t, start * (1 - end_t) + end * end_t))\n def t_length(self, target):\n \"\"\"Computes t with self.length(t) = target.\"\"\"\n L = self.length()\n if target <= 0: return 0\n if target >= L: return 1\n start, end = self.getends()\n factor, post = np.arccos(np.linalg.svd(self.mat[:2,:2])[2][0,0]), start < end\n flip = (-1) ** (not post)\n brange = np.arange((start - factor) // (np.pi / 2) + post, (end - factor) // (np.pi / 2) + post, flip) * np.pi / 2 + factor\n b = np.stack([[start, 0], *([t, self.length(start, t, True)] for t in brange), [end, L]])\n i = np.searchsorted(b[:,1], target)\n x0 = b[i - (self.lenf(b[i - 1,0]) > self.lenf(b[i,0])),0]\n x = newton(lambda t: self.length(start, t, True), lambda t: flip * self.lenf(t), x0, target)\n return (x - start) / (end - start)\n\nclass bezier:\n topol_matrices = (np.array([[1, 0], [-1, 1]]),\n np.array([[1, 0, 0], [-2, 2, 0], [1, -2, 1]]),\n np.array([[1, 0, 0, 0], [-3, 3, 0, 0], [3, -6, 3, 0], [-1, 3, -3, 1]]))\n topoint_matrices = (np.array([[1, 1], [0, 1]]),\n np.array([[1, 1, 1], [0, 1/2, 1], [0, 0, 1]]),\n np.array([[1, 1, 1, 1], [0, 1/3, 2/3, 1], [0, 0, 1/3, 1], [0, 0, 0, 1]]))\n # These two functions convert control points to coordinate polynomials and back\n def topols(points): return [Pol(r) for r in (bezier.topol_matrices[len(points) - 2] @ np.stack([z.real, z.imag] for z in points)).T]\n def topoints(pols): return [complex(*r) for r in (np.stack(pol.coef for pol in pols) @ bezier.topoint_matrices[len(pols[0]) - 2]).T]\n # Polynomial root-finding may return complex numbers; we filter out the real ones in [0, 1] – points on the curve – using this\n def realroots(pol): return [t.real for t in pol.roots() if abs(t.imag) < 1e-14 and 0 <= t <= 1]\n \n def __init__(self, *v):\n if len(v) == 1: # v is a pair of linear/quadratic/cubic polynomials (x(t), y(t))\n self.pols = [v[0][0], v[0][1]]\n self.points = bezier.topoints(self.pols)\n if 1 < len(v) < 5: # v is a sequence of complex numbers representing the control points\n self.points = list(v)\n self.pols = bezier.topols(self.points)\n self.integrand2 = self.pols[0].deriv() ** 2 + self.pols[1].deriv() ** 2\n self.lenf = lambda t: np.sqrt(self.integrand2(t)) # Integrate this to get arc length; this gives the name to integrand2\n def __str__(self): return \"bezier({})\".format(\",\".join([str(z).strip(\"()\") for z in self.points]))\n def __repr__(self): return str(self)\n def __call__(self, t):\n if t == 0: return self.points[0]\n if t == 1: return self.points[-1]\n return self.pols[0](np.array(t)) + 1j * self.pols[1](np.array(t))\n def __getitem__(self, z): # Curve segmentation; this can be done by substituting for t a linear function mapping 0 to start and 1 to stop\n if type(z) == slice:\n begin = 0 if z.start == None else z.start\n end = 1 if z.stop == None else z.stop\n return bezier([pol(Pol([begin, end - begin])) for pol in self.pols])\n return self(z)\n def __neg__(self): return bezier(*self.points[::-1])\n def __rmatmul__(self, m): return bezier(*mapp(m, self.points))\n def fromsvg(head, data, cursor, rpoint):\n \"\"\"Constructs the corresponding Bézier curve from the given arguments.\n head is a character in LHVCSQT and their lowercases; data is a list of floats.\n cursor is self-explanatory; rpoint is the absolute reflected control point used by the S/T commands.\n Returns the Bézier object and the new cursor/rpoint.\"\"\"\n if head in \"Hh\": pts = [complex(data[0] + (cursor.real if head == \"h\" else 0), cursor.imag)]\n elif head in \"Vv\": pts = [complex(cursor.real, data[0] + (cursor.imag if head == \"v\" else 0))]\n else:\n pts = [complex(*z) for z in zip(*[iter(data)] * 2)]\n if head.islower(): pts = [pt + cursor for pt in pts]\n if head in \"SsTt\": pts = [rpoint] + pts\n pts = [cursor] + pts\n return bezier(*pts), pts[-1], 2 * pts[-1] - pts[-2] if head in \"CcSsQqTt\" else pts[-1]\n \n def inflections(self):\n \"\"\"t-values of inflection points, if any. This entails solving x'y'' - x''y' = 0.\"\"\"\n if len(self.points) < 4: return []\n return bezier.realroots(self.pols[0].deriv() * self.pols[1].deriv(2) - self.pols[0].deriv(2) * self.pols[1].deriv())\n def selfintersections(self):\n \"\"\"Returns t-values of self-intersection, if any.\"\"\"\n # Is there one at all? (generate canonical curve)\n if len(self.points) < 4: return []\n vx, vy, vz = self.points[2] - self.points[1], self.points[1] - self.points[0], self.points[3] - self.points[0]\n try: x, y = np.linalg.solve([[vx.real, vy.real], [vx.imag, vy.imag]], [vz.real, vz.imag])\n except np.linalg.LinAlgError: return [] # The only way to get a \"self-intersection\" here is for points 3 and 4 to be the same, but visually there wouldn't be a loop\n # Boundaries according to https://pomax.github.io/bezierinfo/#canonical:\n # | cusp | t = 1 loop | t = 0 loop\n if x > 1 or 4 * y > (x + 1) * (3 - x) or x > 0 and 2 * y + x < np.sqrt(3 * x * (4 - x)) or 3 * y < x * (3 - x): return []\n # Consider the coordinate polynomials x(t) = at³+bt²+ct+p, y(t) = dt³+et²+ft+q and parameters of self-intersection as λ and μ.\n # x(λ) - x(μ) = a(λ³-μ³)+b(λ²-μ²)+c(λ-μ) = 0 and y(λ) - y(μ) = d(λ³-μ³)+e(λ²-μ²)+f(λ-μ) = 0.\n # Dividing by the trivial solution λ = μ and expanding we get a(λ²+λμ+μ²)+b(λ+μ)+c = 0 and d(λ²+λμ+μ²)+e(λ+μ)+f = 0.\n # Yet we only need to find the parameters for the canonical curve, since we took it through an affine transformation.\n # Thus we have (x-3)(λ²+λμ+μ²)+3(λ+μ) = 0 and y(λ²+λμ+μ²)-3(λ+μ)+3 = 0.\n # Eliminating λ²+λμ+μ² gives (-3-3y/(x-3))(λ+μ) + 3 = 0, rearranging to λ+μ = 1 - y/(x+y-3).\n rootsum = 1 - y / (x + y - 3)\n # Adding (x-3)λμ to the first equation above and rearranging gives λμ = (λ+μ)²+3(λ+μ)/(x-3).\n rootprod = rootsum * (rootsum + 3 / (x - 3))\n # Viète's formulas then give λ and μ themselves. We divided by x+y-3 and x-3, but they are guaranteed to be non-zero.\n return bezier.realroots(Pol([rootprod, -rootsum, 1]))\n def projection(self, p):\n \"\"\"t-value of projection of p. This entails solving (x-p)x' + (y-p)y' = 0 and then comparing those paramaters alongside 0 and 1.\"\"\"\n cands = bezier.realroots((self.pols[0] - p.real) * self.pols[0].deriv() + (self.pols[1] - p.imag) * self.pols[1].deriv()) + [0, 1]\n return cands[np.argmin([abs(self(t) - p) for t in cands])]\n def length(self, end_t = 1, start_t = 0):\n \"\"\"Arc length of curve.\"\"\"\n if len(self.points) == 2: return abs(self.points[1] - self.points[0]) * (end_t - start_t)\n return ccquad(self.lenf, start_t, end_t)\n def t_length(self, target):\n \"\"\"Computes t with self.length(t) = target.\"\"\"\n L = self.length()\n if target <= 0: return 0\n if target >= L: return 1\n if len(self.points) == 2: return target / L\n # To guarantee convergence of Newton's method, compute extrema of integrand2, which correspond to inflections in lenf\n b = np.stack([[0, 0], *([t, self.length(t)] for t in bezier.realroots(self.integrand2.deriv())), [1, L]])\n i = np.searchsorted(b[:,1], target)\n x0 = b[i - (self.integrand2(b[i - 1,0]) > self.integrand2(b[i,0])),0]\n return newton(self.length, self.lenf, x0, target)\n\ndef intersect_ee(e1, e2):\n \"\"\"Elliptical–elliptical arc intersection.\"\"\"\n pass # TODO\n\ndef intersect_be(bez, ell):\n \"\"\"Bézier–elliptical arc intersection by implicitisation.\"\"\"\n out, tbez = [], ~ell.mat @ bez\n cands = bezier.realroots(tbez.pols[0] ** 2 + tbez.pols[1] ** 2 - 1)\n for cand in cands:\n ell_t = ell.wrap(np.angle(tbez(cand)))\n if 0 <= ell_t <= 1: out.append((cand, ell_t))\n return out\n\ndef intersect_bb(b1, b2):\n \"\"\"Bézier–Bézier intersection. Implicitises when lines, collinear curves or quadratics are present;\n uses Newton's method otherwise splitting into quasi-parabolic segments.\"\"\"\n lin1, lin2 = len(b1.points) == 2, len(b2.points) == 2\n if lin1 and lin2: # Line–line intersection can be handled directly\n l1p1, l1p2, l2p1, l2p2 = *b1.points, *b2.points\n delta1, delta2, delta3 = l1p2 - l1p1, l2p1 - l2p2, l2p1 - l1p1\n try: t, u = np.linalg.solve([[delta1.real, delta2.real], [delta1.imag, delta2.imag]], [delta3.real, delta3.imag])\n except np.linalg.LinAlgError: return []\n return [(t, u)] if 0 <= t <= 1 and 0 <= u <= 1 else []\n \n out = []\n if lin1 or lin2: b1coll, b2coll = lin1, lin2 # Handle lines and collinear curves\n else:\n collm1 = np.linalg.svd(np.stack([z.real, z.imag, 1] for z in b1.points), compute_uv=0)\n b1coll = collm1[0] * 1e-12 > collm1[-1]\n collm2 = np.linalg.svd(np.stack([z.real, z.imag, 1] for z in b2.points), compute_uv=0)\n b2coll = collm2[0] * 1e-12 > collm2[-1]\n if b1coll or b2coll:\n coll, other = (b1, b2) if b1coll else (b2, b1)\n o, span = coll.points[0], np.max(coll.points) - np.min(coll.points)\n invmat = ~pmatrix(span.real, span.imag, -span.imag, span.real, o.real, o.imag)\n tcoll, tother = invmat @ coll, invmat @ other\n cands = bezier.realroots(tother.pols[1])\n for cand in cands:\n corrs = bezier.realroots(tcoll.pols[0] - tother.pols[0](cand))\n for corr in corrs: out.append((corr, cand) if b1coll else (cand, corr))\n return out\n \n quad1, quad2 = len(b1.points) == 3, len(b2.points) == 3\n if quad1 or quad2: # For quadratics, transform to y = x² with x in [0, 1]\n parab, curve = (b1, b2) if quad1 else (b2, b1)\n p1, p2, p3 = parab.points\n delta, gamma = 2 * (p2 - p1), p3 - 2 * p2 + p1\n tcurve = ~pmatrix(delta.real, delta.imag, gamma.real, gamma.imag, p1.real, p1.imag) @ curve\n cands = bezier.realroots(tcurve.pols[0] ** 2 - tcurve.pols[1])\n for cand in cands:\n parab_t = tcurve.pols[0](cand)\n if 0 <= parab_t <= 1: out.append((parab_t, cand) if quad1 else (cand, parab_t))\n return out\n # A quasi-parabolic segment displays monotone, non-zero curvature within and turns less than 90°\n # Two such segments intersect in at most two points\n b1x, b1y, b2x, b2y = b1.pols[0], b1.pols[1], b2.pols[0], b2.pols[1]\n d1x, d1y, d2x, d2y = b1x.deriv(), b1y.deriv(), b2x.deriv(), b2y.deriv()\n cross1, cross2 = d1x * d1y.deriv() - d1x.deriv() * d1y, d2x * d2y.deriv() - d2x.deriv() * d2y\n s1 = [0, 1] + bezier.realroots(cross1.deriv() * b1.integrand2 - 1.5 * cross1 * b1.integrand2.deriv()) + bezier.realroots(cross1)\n s2 = [0, 1] + bezier.realroots(cross2.deriv() * b2.integrand2 - 1.5 * cross2 * b2.integrand2.deriv()) + bezier.realroots(cross2)\n queue = deque((t0, u0) for t0 in s1 for u0 in s2)\n cands = []\n while queue:\n t, u = queue.popleft()\n for q in range(24):\n try: dt, du = np.linalg.solve([[d1x(t), -d2x(u)], [d1y(t), -d2y(u)]], [b2x(u) - b1x(t), b2y(u) - b1y(t)])\n except np.linalg.LinAlgError:\n # (t, u) is in the curve pair's Julia set. Perturb the iterate in four directions and try again.\n # The step sizes in positive and negative directions are in the golden ratio to prevent lattice formation,\n # i.e. returning to the Julia set and falling into an infinite loop.\n queue.extend([(t + 0.016180339887498948, u), (t - 0.01, u), (t, u + 0.016180339887498948), (t, u - 0.01)]); break\n if np.hypot(dt, du) < 1e-12:\n if 0 <= t <= 1 and 0 <= u <= 1: cands.append(np.array([t, u]))\n break\n t, u = t + dt, u + du\n for cand in cands:\n add = True\n for cent in out:\n count, centroid = cent\n if np.hypot(*(centroid - cand)) < 1e-7 * (1 + 1 / count):\n cent[1] = (centroid * count + cand) / (count + 1)\n cent[0] += 1; add = False; break\n if add: out.append([1, cand])\n return sorted([tuple(cent[1]) for cent in out])\n\ndef intersect_segment(s1, s2):\n \"\"\"Intersection of two segments (either Bézier curves or elliptical arcs). Wrapper for the intersect_{bb, be, ee} functions.\"\"\"\n s1e, s2e = type(s1) == ellipt, type(s2) == ellipt\n if s1e and s2e: return intersect_ee(s1, s2)\n if s2e: return intersect_be(s1, s2)\n if s1e: return sorted([pair[::-1] for pair in intersect_be(s2, s1)])\n return intersect_bb(s1, s2)\n\n# The path format is a list of closing indices followed by the segments themselves (numbered from one).\n# Positive integers mean ends of an open path, negative ones that of a closed path.\n# e.g. [[-4, 7, -8, 9], s1, ..., s9] <=> 1-2-3-4-close 5-6-7 8-close 9\n# The lineto implied by a z on a path with distant endpoints is made explicit here.\nclass path:\n strides = {'M': 2, 'Z': 0, 'L': 2, 'H': 1, 'V': 1, 'C': 6, 'S': 4, 'Q': 4, 'T': 2, 'A': 7}\n def __init__(self, d):\n self.segs, cursor, rpoint = [[0]], 0, 0\n for command in pcomm_re.finditer(d):\n head, data = command.groups()\n datas = zip(*[float(n) for n in num_re.findall(data)] * path.strides[head.upper()])\n if head in \"Mm\":\n cursor = complex(*next(datas)) + (cursor if head == 'm' else 0)\n if len(self.segs) - 1 != abs(self.segs[-1]): self.segs[0].append(len(self.segs) - 1)\n ll = chr(ord(head) - 1)\n for data in datas:\n mint = bezier(ll, data, cursor, rpoint)\n self.segs.append(mint[0])\n cursor, rpoint = mint[1], mint[2]\n elif head in \"Zz\":\n start, end = self.segs[abs(self.segs[0][-1]) + 1](0), self.segs[-1](1)\n if abs(start - end) > 1e-9 * abs(start): self.segs.append(bezier(end, start))\n self.segs[0].append(1 - len(self.segs))\n cursor = rpoint = start\n elif head in \"Aa\": # The elliptical arc fromsvg may return None, indicating no segment, so this has to be checked for\n for data in datas:\n mint = ellipt.fromsvg(head, data, cursor)\n if mint:\n self.segs.append(mint[0])\n cursor = rpoint = mint[1]\n else:\n for data in datas:\n mint = bezier.fromsvg(head, data, cursor, rpoint)\n self.segs.append(mint[0])\n cursor, rpoint = mint[1], mint[2]\n self.segs = self.segs[1:]\n\ndef parsepath(p):\n out = \"\"\n for headload in pcomm_re.finditer(p):\n head, load = headload.groups()\n load = flt(*[float(n) for n in num_re.findall(load)])\n out += head + load\n print(out)\n","sub_path":"kinback/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":23090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"374180497","text":"from tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import cross_validate\nfrom platypus import GeneticAlgorithm, Problem, Binary\nfrom radiomics_all_svm import specificity_loss_func, read_data, validate, get_model\nimport pickle\n \nclass SVM(Problem):\n def __init__(self):\n super(SVM, self).__init__(1, 1)\n self.X, self.Y = read_data('deep_radiomics.csv')\n self.types[:] = Binary(self.X.shape[1])\n self.model = get_model()\n self.directions[:] = Problem.MAXIMIZE\n\n def evaluate(self, solution):\n columns = solution.variables[:]\n\n # Selecting the columns\n X = self.X[:, columns[0]]\n\n scores = {'AUC': 'roc_auc', 'ACC': 'accuracy', 'F1': 'f1', 'Sensitivity': 'recall',\n 'Precision': 'precision', 'Specificity': make_scorer(specificity_loss_func, greater_is_better=True)}\n results = cross_validate(\n self.model, X, self.Y, scoring=scores, cv=3, return_estimator=True, n_jobs=-1)\n solution.objectives[:] = np.mean(results['test_AUC']) \n #print(solution.objectives)\n\nif __name__ == \"__main__\":\n experiment='deep_radiomics_ga_svm'\n X, Y = read_data('deep_radiomics.csv')\n \n num_iter, generations, pop = 5, 5, 5\n \n gen_scores = [[] for i in range(num_iter)]\n gen_std = []\n gen_mean = []\n \n x = np.arange(1, generations+1, 1)\n \n results = {'acc_mean': [],'acc_std': [], 'spec_mean': [], 'spec_std': [], 'sens_mean': [], 'sens_std': [], 'f1_score_mean': [], 'f1_score_std': [], 'auc_mean': [], 'auc_std': []}\n \n for i in range(num_iter):\n #Reset alogirthm each iteration\n algorithm = GeneticAlgorithm(SVM(), population_size=pop)\n \n for j in tqdm(range(generations), desc=\"Iteration \" + str(i + 1)):\n algorithm.step()\n gen_scores[i].append(algorithm.fittest.objectives[:])\n \n best_solution = algorithm.fittest\n features = best_solution.variables[0]\n model = get_model(probability=True)\n result = validate(model, X[:, features], Y, plot=False)\n \n results['acc_mean'].append(np.mean(result['acc']))\n results['acc_std'].append(np.std(result['acc']))\n results['spec_mean'].append(np.mean(result['spec']))\n results['spec_std'].append(np.std(result['spec']))\n results['sens_mean'].append(np.mean(result['sens']))\n results['sens_std'].append(np.std(result['sens']))\n results['f1_score_mean'].append(np.mean(result['f1_score']))\n results['f1_score_std'].append(np.std(result['f1_score']))\n results['auc_mean'].append(np.mean(result['auc']))\n results['auc_std'].append(np.std(result['auc']))\n\n\n gen_mean = np.mean(gen_scores,axis=0)\n gen_std = np.std(gen_scores,axis=0)\n \n dict_results = {'mean':gen_mean,\n 'std':gen_std}\n \n outfile = open('results/' + experiment + '.pickle','wb') \n pickle.dump(dict_results, outfile)\n outfile.close()\n \n # Generations values\n df = pd.DataFrame(results)\n df.to_csv('results/' + experiment + '.csv')","sub_path":"deep_radiomics_ga_svm .py","file_name":"deep_radiomics_ga_svm .py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"426879965","text":"# Copyright 2013-2014 MongoDB, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test Motor, an asynchronous driver for MongoDB and Tornado.\"\"\"\n\nimport asyncio\nimport functools\nimport greenlet\nimport random\nimport unittest\n\nimport pymongo.errors\nfrom test.asyncio_tests import asyncio_test, AsyncIOTestCase\n\nimport test\nfrom test import assert_raises, SkipTest\nfrom test.utils import delay, one\n\n\nclass AIOMotorPoolTest(AsyncIOTestCase):\n @asyncio_test\n def test_max_size_default(self):\n yield from self.cx.open()\n pool = self.cx._get_primary_pool()\n\n # Current defaults\n self.assertEqual(100, pool.max_size)\n self.assertEqual(None, pool.wait_queue_timeout)\n self.assertEqual(None, pool.wait_queue_multiple)\n\n @asyncio_test(timeout=30)\n def test_max_size(self):\n if not test.env.v8:\n raise SkipTest(\"Need multithreaded Javascript in mongod for test\")\n\n max_pool_size = 5\n cx = self.asyncio_client(max_pool_size=max_pool_size)\n\n # Lazy connection.\n self.assertEqual(None, cx._get_primary_pool())\n yield from cx.motor_test.test_collection.remove()\n pool = cx._get_primary_pool()\n self.assertEqual(max_pool_size, pool.max_size)\n self.assertEqual(1, len(pool.sockets))\n self.assertEqual(1, pool.motor_sock_counter)\n\n # Grow to max_pool_size.\n ops_completed = asyncio.Future(loop=self.loop)\n nops = 100\n results = []\n\n def callback(i, result, error):\n self.assertFalse(error)\n self.assertFalse(pool.motor_sock_counter > max_pool_size)\n results.append(i)\n if len(results) == nops:\n ops_completed.set_result(None)\n\n collection = cx.motor_test.test_collection\n yield from collection.insert({}) # Need a document.\n\n for i in range(nops):\n # Introduce random delay, avg 5ms, just to make sure we're async.\n collection.find_one(\n {'$where': delay(random.random() / 10)},\n callback=functools.partial(callback, i))\n\n yield from ops_completed\n\n # All ops completed, but not in order.\n self.assertEqual(list(range(nops)), sorted(results))\n self.assertNotEqual(list(range(nops)), results)\n\n self.assertEqual(max_pool_size, len(pool.sockets))\n self.assertEqual(max_pool_size, pool.motor_sock_counter)\n cx.close()\n\n @asyncio_test(timeout=30)\n def test_force(self):\n cx = self.asyncio_client(max_pool_size=2, waitQueueTimeoutMS=100)\n yield from cx.open()\n pool = cx._get_primary_pool()\n\n def get_socket():\n s = pool.get_socket(force=True)\n self.loop.call_later(0, functools.partial(future.set_result, s))\n self.addCleanup(s.close)\n\n future = asyncio.Future(loop=self.loop)\n greenlet.greenlet(get_socket).switch()\n socket_info = yield from future\n self.assertEqual(1, pool.motor_sock_counter)\n\n future = asyncio.Future(loop=self.loop)\n greenlet.greenlet(get_socket).switch()\n socket_info2 = yield from future\n self.assertEqual(2, pool.motor_sock_counter)\n\n future = asyncio.Future(loop=self.loop)\n greenlet.greenlet(get_socket).switch()\n forced_socket_info = yield from future\n self.assertEqual(3, pool.motor_sock_counter)\n\n future = asyncio.Future(loop=self.loop)\n greenlet.greenlet(get_socket).switch()\n forced_socket_info2 = yield from future\n self.assertEqual(4, pool.motor_sock_counter)\n\n # First returned sockets are closed, since our outstanding sockets\n # exceed max_pool_size.\n pool.maybe_return_socket(socket_info)\n self.assertTrue(socket_info.closed)\n self.assertEqual(0, len(pool.sockets))\n self.assertEqual(3, pool.motor_sock_counter)\n\n pool.maybe_return_socket(socket_info2)\n self.assertTrue(socket_info2.closed)\n self.assertEqual(0, len(pool.sockets))\n self.assertEqual(2, pool.motor_sock_counter)\n\n # Closed socket isn't pooled, but motor_sock_counter is decremented.\n forced_socket_info.close()\n pool.maybe_return_socket(forced_socket_info)\n self.assertEqual(0, len(pool.sockets))\n self.assertEqual(1, pool.motor_sock_counter)\n\n # Returned socket is pooled, motor_sock_counter not decremented.\n pool.maybe_return_socket(forced_socket_info2)\n self.assertFalse(forced_socket_info2.closed)\n self.assertEqual(1, len(pool.sockets))\n self.assertEqual(1, pool.motor_sock_counter)\n\n cx.close()\n\n @asyncio_test(timeout=30)\n def test_wait_queue_timeout(self):\n # Do a find_one that takes 1 second, and set waitQueueTimeoutMS to 500,\n # 5000, and None. Verify timeout iff max_wait_time < 1 sec.\n where_delay = 1\n yield from self.collection.insert({})\n for waitQueueTimeoutMS in (500, 5000, None):\n cx = self.asyncio_client(\n max_pool_size=1, waitQueueTimeoutMS=waitQueueTimeoutMS)\n\n yield from cx.open()\n pool = cx._get_primary_pool()\n if waitQueueTimeoutMS:\n self.assertEqual(\n waitQueueTimeoutMS, pool.wait_queue_timeout * 1000)\n else:\n self.assertTrue(pool.wait_queue_timeout is None)\n\n collection = cx.motor_test.test_collection\n future = collection.find_one({'$where': delay(where_delay)})\n if waitQueueTimeoutMS and waitQueueTimeoutMS < where_delay * 1000:\n with assert_raises(pymongo.errors.ConnectionFailure):\n yield from collection.find_one()\n else:\n # No error\n yield from collection.find_one()\n yield from future\n cx.close()\n\n @asyncio_test(timeout=30)\n def test_wait_queue_multiple(self):\n cx = self.asyncio_client(max_pool_size=2,\n waitQueueTimeoutMS=100,\n waitQueueMultiple=3)\n yield from cx.open()\n pool = cx._get_primary_pool()\n\n def get_socket_on_greenlet(future):\n try:\n s = pool.get_socket()\n future.set_result(s)\n except Exception as e:\n future.set_exception(e)\n\n def get_socket():\n future = asyncio.Future(loop=self.loop)\n fn = functools.partial(get_socket_on_greenlet, future)\n greenlet.greenlet(fn).switch()\n return future\n\n s1 = yield from get_socket()\n self.assertEqual(1, pool.motor_sock_counter)\n\n yield from get_socket()\n self.assertEqual(2, pool.motor_sock_counter)\n\n start = self.loop.time()\n\n with self.assertRaises(pymongo.errors.ConnectionFailure):\n yield from get_socket()\n\n # 100-millisecond timeout.\n self.assertAlmostEqual(0.1, self.loop.time() - start, places=1)\n self.assertEqual(2, pool.motor_sock_counter)\n\n # Give a socket back to the pool, a waiter receives it.\n s1_future = get_socket()\n pool.maybe_return_socket(s1)\n self.assertEqual(s1, (yield from s1_future))\n self.assertEqual(2, pool.motor_sock_counter)\n\n # max_pool_size * waitQueueMultiple = 6 waiters are allowed.\n for _ in range(6):\n get_socket()\n\n start = self.loop.time()\n with self.assertRaises(pymongo.errors.ConnectionFailure):\n yield from get_socket()\n\n # Fails immediately.\n self.assertAlmostEqual(0, self.loop.time() - start, places=3)\n self.assertEqual(2, pool.motor_sock_counter)\n cx.close()\n yield from asyncio.sleep(0, loop=self.loop)\n\n @asyncio_test\n def test_connections_unacknowledged_writes(self):\n # Verifying that unacknowledged writes don't open extra connections\n collection = self.cx.motor_test.test_collection\n yield from collection.drop()\n pool = self.cx._get_primary_pool()\n self.assertEqual(1, pool.motor_sock_counter)\n\n nops = 10\n for i in range(nops - 1):\n collection.insert({'_id': i}, w=0)\n\n # We have only one socket open, and it's already back in the pool\n self.assertEqual(1, pool.motor_sock_counter)\n self.assertEqual(1, len(pool.sockets))\n\n # Acknowledged write; uses same socket and blocks for all inserts\n yield from collection.insert({'_id': nops - 1})\n self.assertEqual(1, pool.motor_sock_counter)\n\n # Socket is back in the idle pool\n self.assertEqual(1, len(pool.sockets))\n\n # All ops completed\n docs = yield from collection.find().sort('_id').to_list(length=100)\n self.assertEqual(list(range(nops)), [doc['_id'] for doc in docs])\n\n @asyncio_test\n def test_check_socket(self):\n # Test that MotorPool._check(socket_info) replaces a closed socket\n # and doesn't leak a counter.\n yield from self.cx.open()\n pool = self.cx._get_primary_pool()\n pool._check_interval_seconds = 0 # Always check.\n counter = pool.motor_sock_counter\n sock_info = one(pool.sockets)\n sock_info.sock.close()\n pool.maybe_return_socket(sock_info)\n\n # New socket replaces closed one.\n yield from self.cx.server_info()\n sock_info2 = one(pool.sockets)\n self.assertNotEqual(sock_info, sock_info2)\n\n # Counter isn't leaked.\n self.assertEqual(counter, pool.motor_sock_counter)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/asyncio_tests/test_asyncio_pool.py","file_name":"test_asyncio_pool.py","file_ext":"py","file_size_in_byte":10163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"52666064","text":"import numpy as np\nimport random as rand\nimport os\n\nclass Yard():\n def __init__(self, file, fromFile = True):\n #From the file name, we set the size.\n\n #### Loads the Yard ####\n if \"optimo\" in os.path.basename(file.name):\n return False\n\n #Gets file yard size from file name.\n yardInfo = os.path.basename(file.name).split(\"_\")\n self.x,self.y = int(yardInfo[1]), int(yardInfo[2])\n #print(yardInfo)\n self.opt = int(yardInfo[3])\n\n self.state = np.zeros(shape=(self.x,self.y), dtype=np.int)\n\n #Loads the data from the file.\n lines = file.readlines()\n for i in range(len(lines)):\n pos = 0\n #Preparing the line reading.\n for num in lines[i].replace(\"\\n\",\"\").split(\" \"):\n if num.isdigit():\n self.state[i][pos] = int(num)\n pos = pos + 1\n\n #######END#####################\n\n self.max = np.amax(self.state)\n self.min = np.amin(self.state)\n\n\n def getTop(self,i):\n for pos in range(self.y-1, -1, -1):\n if self.state[i][pos] > 0:\n return self.state[i][pos]\n return 0\n\n def isSorted(self,i):\n lastNum = 999999\n for num in np.array(self.state[i]):\n if num == 0:\n break\n if num > lastNum:\n return False\n lastNum = num\n return True\n\n def isStackEmpty(self, i):\n return self.state[i][0] == 0\n\n def isStackFull(self, i):\n return self.state[i][self.y-1] != 0\n\n def moveStack(self, src, dest):\n value = 0\n \n #---> Primero, verificamos que la accion se pueda realizar.\n if self.isStackFull(dest) or self.isStackEmpty(src):\n return False\n\n #---> Segundo, conseguimos y eliminamos el valor en top.\n for pos in range(self.y-1, -1, -1):\n if self.state[src][pos] > 0:\n value = self.state[src][pos]\n self.state[src][pos] = 0\n break\n\n #---> Dejamos el valor \n for pos in range(self.y):\n if self.state[dest][pos] == 0:\n self.state[dest][pos] = value\n break\n return True\n\n def toArray(self):\n return np.asarray(self.state).reshape(-1)\n\n def render(self):\n rend = np.rot90(self.state, k=1)\n print(rend)\n\n def countStackBlocks(self,i):\n count=0\n for num in self.state[i]:\n if num != 0:\n count = count + 1\n else:\n break\n return count\n\n def getAllTops(self):\n tops = np.zeros(self.x)\n for i in range(self.x):\n tops[i] = self.getTop(i)\n return tops\n\n def getAllSorts(self):\n sorted = np.zeros(self.x, dtype=np.bool)\n for i in range(self.x):\n sorted[i] = self.isSorted(i)\n return sorted\n\n def isDone(self):\n for sort in self.getAllSorts():\n if not sort:\n return False\n return True\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"DQN-Optimizer/containeryard/yard.py","file_name":"yard.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"89028351","text":"import pickle\nfrom sklearn.linear_model import LogisticRegression\n\ndef train(sen_list, ids):\n x,y = sen_list \n lo = LogisticRegression()\n lo.fit(x,y)\n return lo\n\nif __name__ == '__main__':\n sen_list_file = 'sen_list.feature'\n ids_file = 'feature.ids'\n with open(sen_list_file,'rb') as sen_list_data, open(ids_file, 'rb') as ids_data:\n sen_list = pickle.load(sen_list_data)\n ids = pickle.load(ids_data)\n lo_ = train(sen_list, ids)\n with open('model.log', 'wb') as model:\n pickle.dump(lo_, model)\n","sub_path":"kurosawa/chapter08/knock73.py","file_name":"knock73.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"317612139","text":"f_apc = configdir + 'aperture_correction.dat'\napc = ascii.read(f_apc)\nidx = apc['GalName'] == GalName\napc_v = apc[idx]['vc'][0]\napc_i = apc[idx]['ic'][0]\n# inf_vega_v = 0.843 # http://www.stsci.edu/hst/wfc3/analysis/uvis_zpts/uvis1_infinite\n# inf_vega_i = 0.712 - 1.\nap10_vega_v = 0.737 # http://www.stsci.edu/hst/wfc3/analysis/uvis_zpts/uvis1_infinite#r10\nap10_vega_i = 0.598 - 1.\nH16GalList = ['n1015', 'n1448', 'n2442', 'n3447', 'n3972', 'n4424', 'n5584', 'n5917', 'n7250', 'u9391']\nif GalName in H16GalList: # For comparison purpose\n ap10_vega_v = 0.741 # http://www.stsci.edu/hst/wfc3/documents/ISRs/WFC3-2016-03.pdf\n ap10_vega_i = 0.603 - 1.\nvband_zp = apc_v + ap10_vega_v ## aperture correction & vega zp\niband_zp = apc_i + ap10_vega_i\n","sub_path":"vpc2/par_apcload.py","file_name":"par_apcload.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"248086233","text":"from .kartu_keluarga_model import KartuKeluarga\nfrom .model import *\n\n\nclass Penduduk(BaseModel):\n id = FixedCharField(primary_key=True, max_length=17) # nomor ktp\n nama = CharField()\n tempat_lahir = CharField()\n tanggal_lahir = DateField()\n jenis_kelamin = CharField(max_length=5) # L or P\n darah = CharField(max_length=5, null=True)\n alamat = TextField()\n kecamatan = CharField()\n kelurahan = CharField()\n rt = IntegerField()\n rw = IntegerField()\n agama = CharField()\n perkawinan = CharField()\n kewarganegaraan = CharField()\n status_hidup = CharField()\n kartukeluarga_id = ForeignKeyField(KartuKeluarga, backref='penduduk')\n","sub_path":"models/penduduk_model.py","file_name":"penduduk_model.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"107690826","text":"#!/usr/bin/env python3\n\nfrom OutilsCrypto import *\n\n\ntxt = 'bonjour'\npaq = 1\nclef = 3\n\n#txt = input(\"Entrez votre texte ici : \")\n#paq = int(input(\"Entrez nobre de paquet : \"))\n#clef = int(input(\"Entrez votre clef : \"))\n\n#transformer le dictionnaire en list\ndef dicLit(dic):\n list = []\n for value in dic.values():\n list.append(value)\n return list\n\n#ajouter la clef a la liste\ndef addclef(intList, clef):\n compteur=0\n for value in intList:\n intList[compteur]+=clef\n compteur+=1\n\n#transformer les int de la liste en str et stocker dans une autre liste \"liste1\"\ndef intStr(intList):\n list1 = []\n for value in intList:\n list1.append(str(value))\n return list1\n\n#ajouter les 0 devant un nombre pour creer les paquet\ndef addpaq(strlist, paq):\n compteur=0\n for value in strlist:\n if len(value) < paq*2:\n a=paq*2-len(value)\n strlist[compteur]=(a*'0')+value\n compteur+=1\n\n#separer les chiffre 2 par 2 et retourne une liste int\ndef separater(strlist, paq):\n list2 = []\n for value in strlist:\n compteur1 = 0\n compteurL = 0\n\n while compteur1 < paq:\n a=''\n compteur = 0\n while compteur < 2:\n a+=value[compteurL]\n compteur+=1\n compteurL+=1\n list2.append(int(a))\n compteur1+=1\n return list2\n\n#transformer les chiffre regroupe 2 par 2 en lettre\ndef chiffreLettre(listInt):\n motcrypte = ''\n for element in listInt:\n motcrypte+=xedoc(element)\n return motcrypte\n\n return motcrypte\n\n\ndef Ecesar(txt, paq, clef) :\n\n #transformer le texte en chiffre par paquet de paq et stocker dans un dictionnaire\n dic=paquet(txt, paq)\n\n #transformer le dictionnaire en list\n list = dicLit(dic)\n\n #ajouter la clef a la liste \"list\"\n addclef(list, clef)\n\n #transformer les int de la liste en str et stocker dans une autre liste str \"list1\"\n list1 = intStr(list)\n\n #ajouter les 0 devant un nombre pour creer les paquet\n addpaq(list1, paq)\n\n\n #separer les chiffre 2 par 2 et returne une list int\n list2 = separater(list1, paq)\n\n #transformer les chiffre regroupe 2 par 2 en lettre\n return chiffreLettre(list2)\n\n\nprint(Ecesar(txt, paq, clef ))\n","sub_path":"crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"263370669","text":"import heapq\nimport logging\nimport threading\nimport time\nfrom collections import defaultdict\n\nfrom redis import Redis\n\nfrom colored_bitcoins import setup\nfrom util import hashEncode\nimport util\n\n\nclass ColorsProcessor(object):\n def __init__(self):\n self.r = Redis(**setup.redis_setup)\n self.q = []\n self.log = logging.getLogger()\n logging.basicConfig(level=logging.INFO)\n self.color_lock = threading.RLock()\n\n def genesis(self, tx, color_id=None):\n \"\"\"\n Marks the tx transaction as genesis transaction for the color specified. If no color is specified,\n creates a new color for it.\n\n :param tx: Transaction in encoded format\n :param color_id: The id of the color for which the transaction is a genesis transaction\n\n :return: A tuple (transaction, the color ID, sum of the outputs of the transaction)\n \"\"\"\n outputs = self.r.hgetall(tx)\n total = sum(map(int, [outputs[k] for k in outputs if \":v\" in k]))\n if color_id is None:\n color_id = self.r.incr(\"color_id\")\n self.log.debug(\"Using color %d\", color_id)\n\n return tx, color_id, total\n\n def load_txs(self, queue, cache):\n \"\"\"\n Loads the transaction details to the cache.\n\n :param queue: A collection of the transactions to be loaded\n :param cache: A dictionary in which the data is stored. Format : transaction -> details\n \"\"\"\n neededTxs = [x for x in queue if x not in cache]\n self.log.debug(\"loading %d transactions\", len(neededTxs))\n if neededTxs:\n pipe = self.r.pipeline()\n for tx in neededTxs:\n pipe.hgetall(tx)\n result = pipe.execute()\n for k, v in zip(neededTxs, result):\n cache[k] = v\n\n def color(self, tx=None, init_balances=None):\n \"\"\"\n Performs the actual coloring of the transaction given. Also propagates all balances from\n init_balances.\n\n :param tx: Raw transaction hash\n :param init_balances: A dictionary of initial balances for the transactions and colors. Format :\n transaction -> color -> balance\n\n :return: A 2-tuple (new color ID, dictionary of updated transactions)\n \"\"\"\n self.log.debug(\"Waiting for lock...\")\n with self.color_lock:\n self.log.debug(\"Lock acquired.\")\n begin = time.clock()\n cache = {}\n keyscache = {}\n balances = defaultdict(lambda: defaultdict(int))\n mentioned_txs = set()\n if init_balances is not None:\n # Copy the appropriate balances to local structures\n for d in init_balances:\n for k in init_balances[d]:\n balances[d][k] += init_balances[d][k]\n mentioned_txs.add(d)\n if tx is not None:\n # set the initial genesis transaction\n t, id, balance = self.genesis(hashEncode(tx))\n balances[t][id] += balance\n mentioned_txs.add(t)\n else:\n balance = 0\n id = None\n # load details about the transactions\n self.load_txs(mentioned_txs, cache)\n\n # initialize the queue\n # The queue is ordered by block numbers. This rapidly speeds up the execution as each\n # transaction is ideally processed only once and its balance doesn't change later.\n q = [(int(cache[t][\"b\"]), t) for t in mentioned_txs]\n heapq.heapify(q)\n enqueued = mentioned_txs\n while q:\n _, t = heapq.heappop(q)\n enqueued.remove(t)\n self.log.debug(\"Processing tx %s\", util.hashDecode(t))\n outs = cache[t]\n outsget = outs.get\n # holds the valid output keys, i.e. those with available uncolored balance\n if t in keyscache:\n validkeys = keyscache[t]\n else:\n validkeys = [k for k in outs if \":v0\" in k and outs[k] != 0 and outs[k] != \"0\"]\n validkeys.sort(key=lambda z: int(z.partition(\":\")[0]))\n keyscache[t] = validkeys\n deleteto = 0\n toadd = []\n # process the colors in order (first come, first served)\n colorids = sorted(balances[t].keys())\n for colorid in colorids:\n value = balances[t][colorid]\n for i in xrange(deleteto, len(validkeys)):\n if value == 0:\n break\n key = validkeys[i]\n capacity = outs[key]\n capacity = int(capacity)\n if capacity > value:\n # the capacity of the outtx is bigger than the amount of colored coins\n deleteto = i\n moving = value\n else:\n # the colored balance is bigger\n deleteto = i+1\n moving = capacity\n # Transfer the colored coins\n prefix = key.partition(\":\")[0]\n newkey = prefix + \":v\" + str(colorid)\n outs[key] = int(outs[key]) - moving\n outs[newkey] = int(outsget(newkey, 0)) + moving\n value -= moving\n successor = prefix + \":s\"\n outsucc = outsget(successor, None)\n if outsucc is not None:\n # there is a successor for the coin - add it to the queue if needed\n balances[outsucc][colorid] += moving\n toadd.append(outsucc)\n self.log.debug(\"Moving %s to %s\", moving, util.hashDecode(outsucc))\n else:\n self.log.debug(\"Moving %s - no successor\", moving)\n balances[t][colorid] = value\n self.load_txs(toadd, cache)\n for x in toadd:\n if not x in enqueued:\n heapq.heappush(q, (int(cache[x][\"b\"]), x))\n enqueued.add(x)\n del validkeys[0:deleteto]\n mid = time.clock()\n\n sumremains = 0\n for d in balances.values():\n sumremains += sum(d.values())\n\n self.log.info(\"visited %d transaction(s). It took %f. Lost %d of %d satoshi.\", len(cache), mid-begin, sumremains,\n balance)\n\n # Finally, commit the changes to the database and release the lock\n pipe = self.r.pipeline()\n for t, d in cache.iteritems():\n for dk in d.keys():\n if \":v\" in dk:\n if d[dk] != 0 and d[dk] != \"0\":\n pipe.hset(t, dk, d[dk])\n else:\n pipe.hdel(t, dk)\n pass\n pipe.execute()\n end = time.clock()\n self.log.info(\"saved, it took %f\", end-mid)\n return id, cache\n\n","sub_path":"colored_bitcoins/colors_processor.py","file_name":"colors_processor.py","file_ext":"py","file_size_in_byte":7429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"485167006","text":"import os\n\nfrom flask_babel import Babel\nfrom flask_babel import gettext as _\nfrom flask import g\n\nfrom .core import socketio\nfrom .core.actor import Actor\nfrom .core.actor import ActorNotFound\nfrom .core.exceptions import AuthPermsDataError\nfrom .core.exceptions import BaseArgumentsError\nfrom .core.managers import DatabaseManager\nfrom .core.routes import auth_submodule as auth_submodule\nfrom .core.utils import get_session_token\n\nLOCALIZATION_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'localization')\n\n\ndef set_actor():\n \"\"\"\n Function that save actor in flask g.\n \"\"\"\n session_token = get_session_token()\n if session_token and not hasattr(g, 'actor'):\n try:\n actor = Actor.objects.get_by_session(session_token=session_token)\n except ActorNotFound:\n return\n\n setattr(g, 'actor', actor)\n\n return\n\n\nclass AuthPerms:\n \"\"\"\n Submodule configuration class. Needs to configure submodule variables and save it in correct places for usage in\n submodule.\n \"\"\"\n\n def __init__(self, app, settings_module, config_mode='PRODUCTION', database_credentials=None,\n database_credentials_dsn=None, is_manager=False, **kwargs):\n self.app = app\n self.config_mode = config_mode\n self.database_credentials = database_credentials\n self.database_credentials_dsn = database_credentials_dsn\n self.base_args = kwargs if not isinstance(kwargs, dict) else {}\n self.settings_module = settings_module\n self.is_manager = is_manager\n self.register_pybabel(app)\n\n self.parse_variables()\n self.configure_db(app=app)\n # Check if this is manager command\n if not self.table_exists(table_name='actor') or self.is_manager:\n self.set_base_args(app=app)\n return\n\n self.validate_base_args()\n self.set_base_args(app=app)\n self.app.register_blueprint(auth_submodule)\n socketio.init_app(app, async_mode=app.config.get('SOCKET_ASYNC_MODE'),\n path=self.base_args.get('SOCKET_PATH'))\n self.set_before_request_functions(app=app)\n\n def parse_variables(self):\n \"\"\"\n Parse variables from settings file with names which are in list.\n SERVICE_PRIVATE_KEY - service private key\n SERVICE_PUBLIC_KEY - service public key\n AUTH_PUB_KEY - auth service public key\n SERVICE_UUID - service uuid that could be received from auth database\n SOCKET_ASYNC_MODE - socket message broker. With runserver command set None, with daemon - gevent or eventlet\n PRIMARY_KEY_ONLY - when there is actor signature verification, check signature with initial key only\n DEFAULT_GROUP_NAME - group name where service need to add actor after registration by default\n SESSION_STORAGE - place where session stores. Could be SESSION or HEADERS or None\n SOCKET_PATH - path where socket server will work\n DATABASE - database credentials in dict\n DATABASE_URI - database URI\n BABEL_TRANSLATION_DIRECTORIES - directories where translations are stores\n BABEL_DEFAULT_LOCALE - default language code\n LANGUAGES - list of language codes which have translations\n LANGUAGE_COOKIE_KEY - cookie key where language code is stores\n LANGUAGES_INFORMATION - list of dicts with language information like name, code, block.\n DB_MINIMUM_CONNECTIONS - count of minimum connections in pool\n DB_MAXIMUM_CONNECTIONS - count of maximum connections in pool\n \"\"\"\n for key in dir(self.settings_module):\n if key.isupper() and key in ['SERVICE_PRIVATE_KEY', 'SERVICE_PUBLIC_KEY', 'AUTH_PUB_KEY', 'SERVICE_UUID',\n 'SOCKET_ASYNC_MODE', 'PRIMARY_KEY_ONLY', 'DEFAULT_GROUP_NAME',\n 'SESSION_STORAGE', 'SOCKET_PATH', 'DATABASE', 'DATABASE_URI',\n 'BABEL_TRANSLATION_DIRECTORIES', 'BABEL_DEFAULT_LOCALE', 'LANGUAGES',\n 'LANGUAGE_COOKIE_KEY', 'LANGUAGES_INFORMATION', 'DB_MINIMUM_CONNECTIONS',\n 'DB_MAXIMUM_CONNECTIONS', 'DEPENDED_SERVICES', 'REDIRECT_URL',\n 'AUTH_STANDALONE']:\n self.base_args[key] = getattr(self.settings_module, key)\n\n def validate_base_args(self):\n \"\"\"\n Validation of parsed variables\n \"\"\"\n if not self.base_args.get('SERVICE_PRIVATE_KEY'):\n raise AuthPermsDataError('SERVICE_PRIVATE_KEY is required parameter for configuring auth permission part. '\n '\\n SERVICE_PRIVATE_KEY - your service private key.')\n\n if not self.base_args.get('AUTH_STANDALONE'):\n if not self.base_args.get('AUTH_PUB_KEY'):\n raise AuthPermsDataError('AUTH_PUB_KEY is required parameter for configuring auth permission part. '\n '\\n AUTH_PUB_KEY - auth public key where your service registered.')\n\n if not self.base_args.get('AUTH_STANDALONE'):\n query = \"\"\"SELECT EXISTS(SELECT 1 FROM actor WHERE initial_key=%s)\"\"\"\n if not self.app.db.fetchone(query, [self.base_args.get('AUTH_PUB_KEY')]).get('exists'):\n raise AuthPermsDataError('Auth service is not registered in your database. \\n Please register it.')\n\n if not self.base_args.get('SERVICE_UUID'):\n service_uuid = self.get_current_service_uuid()\n if not service_uuid:\n raise AuthPermsDataError('SERVICE_UUID or SERVICE_PUBLIC_KEY is required parameter for configuring '\n 'auth permission part. \\n If set SERVICE_PUBLIC_KEY you should add row with your '\n 'service information in database.'\n '\\n SERVICE_UUID - your service uuid that you received on the auth service during '\n 'registration.')\n\n self.base_args['SERVICE_UUID'] = service_uuid\n\n query = \"\"\"SELECT EXISTS(SELECT 1 FROM actor WHERE uuid=%s)\"\"\"\n if not self.app.db.fetchone(query, [self.base_args.get('SERVICE_UUID')]).get('exists'):\n raise AuthPermsDataError('Your service is not registered in your database. \\n Please register it.')\n\n query = \"\"\"SELECT EXISTS(SELECT 1 FROM actor WHERE actor_type='group' AND uinfo->>'group_name' = 'DEFAULT')\"\"\"\n if not self.app.db.fetchone(query).get('exists'):\n raise AuthPermsDataError('There is no DEFAULT group in your database. \\n Please create it based on auth '\n 'information.')\n\n query = \"\"\"SELECT EXISTS(SELECT 1 FROM actor WHERE actor_type='group' AND uinfo->>'group_name' = 'ADMIN')\"\"\"\n if not self.app.db.fetchone(query).get('exists'):\n raise AuthPermsDataError('There is no ADMIN group in your database. \\n Please create it based on auth '\n 'information.')\n\n query = \"\"\"SELECT EXISTS(SELECT 1 FROM actor WHERE actor_type='group' AND uinfo->>'group_name' = 'BAN')\"\"\"\n if not self.app.db.fetchone(query).get('exists'):\n raise AuthPermsDataError('There is no BAN group in your database. \\n Please create it based on auth '\n 'information.')\n\n if not self.base_args.get('SOCKET_ASYNC_MODE') and self.config_mode == 'PRODUCTION':\n raise AuthPermsDataError('SOCKET_ASYNC_MODE is required parameter for configuring auth permission part. '\n '\\n SOCKET_ASYNC_MODE - should be set in gevent or eventlet.')\n else:\n print('NOTICE! IMPORTANT ! NOTICE! \\n You have not set SOCKET_ASYNC_MODE option. '\n 'Set this option in gevent or eventlet or stay None if you are using runserver.')\n\n if not self.base_args.get('PRIMARY_KEY_ONLY'):\n print('NOTICE! You have not set PRIMARY_KEY_ONLY option. This option is used for signature verification '\n 'on authentication with only first time generated keys pair.')\n\n if not self.base_args.get('DEFAULT_GROUP_NAME'):\n print('NOTICE! You have not set DEFAULT_GROUP_NAME option. This option is used for automatically adding '\n 'actor in specified group during registration. ! IMPORTANT ! Auth service will set default group '\n 'which is set on auth service.')\n\n if not self.base_args.get('SESSION_STORAGE'):\n print('NOTICE! You have not set SESSION_STORAGE option. This option is used for adding default js '\n 'configuration on base template. Set it in SESSION value if you need base js scripts.')\n\n if not self.base_args.get('SOCKET_PATH') or not self.base_args.get('SOCKET_PATH', '').startswith('/'):\n print('NOTICE! You have not pass SOCKET_PATH variable. This variable is used to set custom socket path. '\n 'Default set in /socket. This variable should start with /.')\n self.base_args['SOCKET_PATH'] = '/socket'\n\n if not self.base_args.get('DEPENDED_SERVICES'):\n print('NOTICE! You have not pass DEPENDED_SERVICES variable. This variable is used to set depended services. '\n 'This variable should be a dictionary.')\n self.base_args['DEPENDED_SERVICES'] = {}\n\n if not self.base_args.get('REDIRECT_URL'):\n print('NOTICE! You have not pass REDIRECT_URL variable.'\n 'This variable is used to set first page after login, if you request login doesn\\'t have referrer. ')\n self.base_args['REDIRECT_URL'] = None\n\n if not self.base_args.get('BABEL_DEFAULT_LOCALE'):\n print('NOTICE! You have not pass BABEL_DEFAULT_LOCALE variable. This variable is used to set default '\n 'language locale. Default set in en.')\n self.base_args['BABEL_DEFAULT_LOCALE'] = 'en'\n\n if not self.base_args.get('AUTH_STANDALONE'):\n self.base_args['AUTH_STANDALONE'] = False\n\n if not self.base_args.get('LANGUAGES'):\n self.base_args['LANGUAGES'] = []\n\n if not self.base_args.get('LANGUAGE_COOKIE_KEY'):\n self.base_args['LANGUAGE_COOKIE_KEY'] = 'language'\n\n if not self.base_args.get('LANGUAGES_INFORMATION'):\n self.base_args['LANGUAGE_INFORMATION'] = []\n if self.base_args.get('LANGUAGES'):\n for code in self.base_args.get('LANGUAGES'):\n if code == 'ru':\n name = _('Russian')\n elif code == 'cn':\n name = _('Chinese')\n elif code == 'en':\n name = _('English')\n else:\n print('Unknown language code in LANGUAGES variable, please check it - %s. '\n 'Add language information in list LANGUAGES_INFORMATION in settings file like\\n '\n 'LANGUAGES_INFORMATION=[{\"code\": \"en\", \"name\": \"English\"}, ...]' % code)\n raise BaseArgumentsError(message=\"LANGUAGE_INFORMATION error. Unknown code.\")\n\n self.base_args['LANGUAGE_INFORMATION'].append({\"code\": code, \"name\": name})\n else:\n for language in self.base_args.get('LANGUAGES_INFORMATION'):\n if not language.get('code'):\n print('There is no language code for %s.' % language)\n raise BaseArgumentsError(message=\"LANGUAGE_INFORMATION code error.\")\n\n if not language.get('name'):\n print('There is no language name for %s.' % language)\n raise BaseArgumentsError(message=\"LANGUAGE_INFORMATION name error.\")\n\n if not self.base_args.get('DB_MINIMUM_CONNECTIONS'):\n self.base_args['DB_MINIMUM_CONNECTIONS'] = 1\n else:\n if not isinstance(self.base_args.get('DB_MINIMUM_CONNECTIONS'), int):\n try:\n self.base_args['DB_MINIMUM_CONNECTIONS'] = int(self.base_args.get('DB_MINIMUM_CONNECTIONS'))\n except Exception as e:\n self.base_args['DB_MINIMUM_CONNECTIONS'] = 1\n\n if not self.base_args.get('DB_MAXIMUM_CONNECTIONS'):\n self.base_args['DB_MAXIMUM_CONNECTIONS'] = 10\n else:\n if not isinstance(self.base_args.get('DB_MAXIMUM_CONNECTIONS'), int):\n try:\n self.base_args['DB_MAXIMUM_CONNECTIONS'] = int(self.base_args.get('DB_MAXIMUM_CONNECTIONS'))\n except Exception as e:\n self.base_args['DB_MAXIMUM_CONNECTIONS'] = 10\n\n def set_base_args(self, app):\n \"\"\"\n Save parsed arguments in app config.\n \"\"\"\n if not self.base_args.get('BABEL_TRANSLATION_DIRECTORIES') or LOCALIZATION_PATH not in \\\n self.base_args.get('BABEL_TRANSLATION_DIRECTORIES'):\n if not self.base_args.get('BABEL_TRANSLATION_DIRECTORIES'):\n self.base_args['BABEL_TRANSLATION_DIRECTORIES'] = LOCALIZATION_PATH + ';'\n else:\n self.base_args['BABEL_TRANSLATION_DIRECTORIES'] += LOCALIZATION_PATH \\\n if self.base_args['BABEL_TRANSLATION_DIRECTORIES'].endswith(';') else ';' + LOCALIZATION_PATH + ';'\n\n if not self.base_args.get('SERVICE_UUID') and self.table_exists(table_name=\"actor\"):\n self.base_args['SERVICE_UUID'] = self.get_current_service_uuid()\n\n app.config.update(self.base_args)\n\n def configure_db(self, app):\n \"\"\"\n Configure database manager and add it as app attribute named db.\n \"\"\"\n min_connections = app.config.get('DB_MINIMUM_CONNECTIONS', self.base_args.get('DB_MINIMUM_CONNECTIONS', 1))\n max_connections = app.config.get('DB_MAXIMUM_CONNECTIONS', self.base_args.get('DB_MAXIMUM_CONNECTIONS', 10))\n if self.database_credentials:\n DatabaseManager(database=self.database_credentials, min_connection=min_connections,\n max_connections=max_connections).init_app(app=app)\n elif self.database_credentials_dsn:\n DatabaseManager(dsn=self.database_credentials_dsn, min_connection=min_connections,\n max_connections=max_connections).init_app(app=app)\n elif self.base_args.get('DATABASE'):\n DatabaseManager(database=self.base_args.get('DATABASE'), min_connection=min_connections,\n max_connections=max_connections).init_app(app=app)\n elif self.base_args.get('DATABASE_URI'):\n DatabaseManager(dsn=self.base_args.get('DATABASE_URI'), min_connection=min_connections,\n max_connections=max_connections).init_app(app=app)\n else:\n raise AuthPermsDataError('There was no database information passed. You can pass in database_credentials '\n 'dict with database information or database_credentials_dsn or DATABASE from '\n 'kwargs or DATABASE_URI from kwargs.'\n '\\n database_credentials - argument with database credentials in dict. '\n '\\n database_credentials_dsn - argument with database credentials in string. '\n '\\n DATABASE - kwargs argument with database credentials in dict. '\n '\\n DATABASE_URI - kwargs argument with database credentials in string.')\n\n if self.base_args.get('DATABASE'):\n self.base_args.pop('DATABASE')\n\n if self.base_args.get('DATABASE_URI'):\n self.base_args.pop('DATABASE_URI')\n\n def table_exists(self, table_name):\n \"\"\"\n Check if table with received table_name exists in database.\n :param table_name: str. Required. Table name.\n :return: True or False.\n \"\"\"\n query = \"SELECT EXISTS(SELECT 1 FROM information_schema.tables \" \\\n \"WHERE table_schema='public' AND table_name=%s)\"\n values = [table_name]\n return self.app.db.fetchone(query, values).get('exists')\n\n def get_current_service_uuid(self):\n \"\"\"\n Get service uuid from database with SERVICE_PUBLIC_KEY variable. Searching in initial key or in secondary keys.\n :return: UUID or None.\n \"\"\"\n if not self.base_args.get('SERVICE_PUBLIC_KEY'):\n return None\n\n query = \"\"\"SELECT uuid AS uuid FROM actor WHERE actor_type='service' AND (initial_key=%s OR \n %s = ANY(SELECT value FROM jsonb_each_text(secondary_keys))) LIMIT 1\"\"\"\n result = self.app.db.fetchone(query, [self.base_args.get('SERVICE_PUBLIC_KEY'),\n self.base_args.get('SERVICE_PUBLIC_KEY')])\n if not result:\n return None\n\n return result.get('uuid')\n\n @staticmethod\n def register_pybabel(app):\n \"\"\"\n Initialize pybabel extension\n \"\"\"\n Babel(app=app)\n\n @staticmethod\n def set_before_request_functions(app):\n \"\"\"\n Create before request function for saving actor in g.\n \"\"\"\n app.before_request(set_actor)\n","sub_path":"auth_perms/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":17448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"323561453","text":"import csv\n\nVERSION = '1.2.1'\nUPDATE_DATE = '2021/09/09'\n\n\n# listに楽曲データを追加\ndef add_data_to_list(data_list, title, genre, difficulty, version, const, score):\n new_data = {}\n new_data['title'] = title\n new_data['genre'] = genre\n new_data['difficulty'] = difficulty\n new_data['version'] = version\n new_data['const'] = const\n new_data['score'] = score\n\n # スコアから倍率計算\n if score == 1000000:\n ratio = 4.0\n elif score >= 940000:\n ratio = (score - 940000) // 10000 * 0.25 + 2.75\n elif score >= 900000:\n ratio = (score - 900000) // 20000 * 0.5 + 2\n elif score >= 800000:\n ratio = (score - 800000) // 50000 * 0.5 + 1\n elif score > 0:\n ratio = score // 100000 * 0.1 + 0.1\n else:\n ratio = 0\n\n if const != 'NA':\n new_data['rating'] = round(const * ratio, 2)\n else:\n new_data['rating'] = 0\n\n data_list.append(new_data)\n\n\n# 定数・スコアを読み込んでlistを作成\ndef make_list():\n # wacca_const.csvの読み込み\n # CSV: title, version, genre, h_const, e_const, i_const, i_version\n # これを、以下の構造のリストに整形して格納\n # [title, genre, [version, h_const], [version, e_const], [version, i_const]]\n const_file = open('wacca_const.csv', 'r', encoding='utf-8')\n fc = csv.reader(const_file)\n header = next(fc)\n const_list = []\n for row in fc:\n new_data = [row[0], row[2]]\n new_data.append([row[1], row[3]])\n new_data.append([row[1], row[4]])\n if row[5] != 'NA':\n if row[6] != 'NA':\n new_data.append([row[6], row[5]])\n else:\n new_data.append([row[1], row[5]])\n else:\n new_data.append('NA')\n const_list.append(new_data)\n\n # wacca_score.csvの読み込み\n # CSV: title, n_score, h_score, e_score, i_score\n # この順のままリストにして格納\n score_file = open('wacca_score.csv', 'r', encoding='utf-8')\n fs = csv.reader(score_file)\n header = next(fs)\n score_list = []\n for row in fs:\n score_list.append(row)\n\n data_list = []\n\n for const in const_list:\n is_played = False\n for score in score_list:\n if const[0] == score[0]:\n # プレイ済なので要素を追加\n is_played = True\n if const[2] != 'NA': # hard\n add_data_to_list(data_list, const[0], const[1], 'HRD', const[2][0], float(const[2][1]), int(score[2]))\n if const[3] != 'NA': # expert\n add_data_to_list(data_list, const[0], const[1], 'EXP', const[3][0], float(const[3][1]), int(score[3]))\n if const[4] != 'NA': # inferno\n add_data_to_list(data_list, const[0], const[1], 'INF', const[4][0], float(const[4][1]), int(score[4]))\n score_list.remove(score)\n break\n if not is_played:\n # 未プレイなのでスコアを0にして要素を追加\n if const[2] != 'NA': # hard\n add_data_to_list(data_list, const[0], const[1], 'HRD', const[2][0], float(const[2][1]), 0)\n if const[3] != 'NA': # expert\n add_data_to_list(data_list, const[0], const[1], 'EXP', const[3][0], float(const[3][1]), 0)\n if const[4] != 'NA': # inferno\n add_data_to_list(data_list, const[0], const[1], 'INF', const[4][0], float(const[4][1]), 0)\n\n # 定数データのないスコアデータを定数を'NA'にして追加\n no_const_num = 0\n for score in score_list:\n if score[2] != '0': # hard\n no_const_num += 1\n add_data_to_list(data_list, score[0], 'NA', 'HRD', 'NA', 'NA', int(score[2]))\n if score[3] != '0': # expert\n no_const_num += 1\n add_data_to_list(data_list, score[0], 'NA', 'EXP', 'NA', 'NA', int(score[3]))\n if score[4] != '0': # inferno\n no_const_num += 1\n add_data_to_list(data_list, score[0], 'NA', 'INF', 'NA', 'NA', int(score[4]))\n if no_const_num > 0:\n print(\"[NOTICE] {} song have no const data. Rating may be incorrect value. Please check if your 'wacca_const.csv' is the newest version.\\n\".format(no_const_num))\n\n return data_list\n\n\n# listの中身を整形して出力\ndef print_list(list):\n for row in list:\n if row['difficulty'] == 'HRD':\n print(row['title'] + ' (HRD)')\n elif row['difficulty'] == 'INF':\n print(row['title'] + ' (INF)')\n else:\n print(row['title'])\n print(\"\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(row['version'], row['genre'], row['difficulty'], row['const'], row['score'], row['rating']))\n\n\n# レート対象曲・候補曲を表示\ndef show_ratings(data_list):\n # プレイ済みの楽曲データを単曲レート降順でソート\n tmp_list = [data for data in data_list if data['rating'] > 0]\n tmp_list = sorted(tmp_list, key=lambda x: (int)(x['rating']/0.1)*0.1, reverse=True)\n\n new_value = 0\n old_value = 0\n new_list = []\n old_list = []\n new_list_len = 0\n old_list_len = 0\n new_candidate_list = []\n old_candidate_list = []\n\n # 新枠対象曲を追加\n for data in tmp_list[:]:\n if data['version'] == 'Re':\n if new_list_len < 15:\n new_value += data['rating']\n new_list.append(data)\n tmp_list.remove(data)\n new_list_len += 1\n else:\n break\n \n # 旧枠対象曲を追加\n for data in tmp_list[:]:\n if data['version'] != 'Re':\n if old_list_len < 35:\n old_value += data['rating']\n old_list.append(data)\n tmp_list.remove(data)\n old_list_len += 1\n else:\n break\n\n if (new_list_len == 15):\n # 新枠候補曲を追加\n for data in tmp_list:\n if data['version'] == 'Re' and data['score'] < 990000:\n if len(new_candidate_list) < 10 and data['const'] * 4.0 >= new_list[14]['rating']:\n new_candidate_list.append(data)\n else:\n break\n else:\n # 新枠が埋まりきっていないので空データを追加\n for i in range(len(new_list), 15):\n add_data_to_list(new_list, \"Not played\", 'NA', 'NA', 'NA', 0, 0)\n\n if (old_list_len == 35):\n # 旧枠候補曲を追加\n for data in tmp_list:\n if data['version'] != 'Re' and data['score'] < 990000:\n if len(old_candidate_list) < 10 and data['const'] * 4.0 >= old_list[34]['rating']:\n old_candidate_list.append(data)\n else:\n break\n else:\n # 旧枠が埋まりきっていないので空データを追加\n for i in range(len(old_list), 35):\n add_data_to_list(old_list, \"Not played\", 'NA', 'NA', 'NA', 0, 0)\n\n print(\"Rating: {:.3f}\\n\".format(new_value + old_value))\n if new_list_len != 0:\n print(\"--- 新枠 対象曲 (average: {:.3f})\\n\".format(new_value / new_list_len))\n print_list(new_list)\n else:\n print(\"--- 新枠 対象曲 (average: --)\\n\")\n if old_list_len != 0:\n print(\"--- 旧枠 対象曲 (average: {:.3f})\\n\".format(old_value / old_list_len))\n print_list(old_list)\n else:\n print(\"--- 旧枠 対象曲 (average: --)\\n\")\n print(\"--- 新枠 候補曲\\n\")\n if (new_list_len == 15):\n print_list(new_candidate_list)\n print(\"--- 旧枠 候補曲\\n\")\n if (old_list_len == 35):\n print_list(old_candidate_list)\n\n\n\ndef main():\n print(\"WACCA Tool ver.{} (released on {})\\n\".format(VERSION, UPDATE_DATE))\n data_list = make_list()\n show_ratings(data_list)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"231702456","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\ndef recursion(n):\n return recursion_iter(n,1)\n\ndef recursion_iter(num,lastres):\n if num == 1:\n return lastres\n return recursion_iter(num - 1, num * lastres)\n\n# print(recursion(101))\n\n# 汉诺塔移动,它接收参数n,表示3个柱子A、B、C中第1个柱子A的盘子数量,然后打印出把所有盘子从A借助B移动到C的方法,例如:\ndef move(n,a='A',b='B',c='C'):\n if n == 1:\n print('%s->%s'%(a,c))\n return None\n else:\n move(n-1,a,c,b)\n print('n是:%s,把%s->%s'%(n-1,a,c))\n move(n-1,b,a,c)\n return None\n\nmove(4)\n\n\nfor x, y in [(1, 1), (2, 4), (3, 9)]:\n print(x, y)","sub_path":"demo/demo_recursion.py","file_name":"demo_recursion.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"13439215","text":"import copy\nfrom io import StringIO\nimport os\nfrom unittest.mock import patch\n\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.core.management import call_command\n\nfrom blast_ncbi.utils import BLASTNcbi\n\n\nTEST_PATH = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(TEST_PATH, \"CP100-10_COI-begin.xml\"), \"r\") as handle:\n ncbi_return_handle1 = StringIO(handle.read())\n ncbi_return_handle2 = copy.copy(ncbi_return_handle1)\n\n\nclass TestNcbiBlast(TestCase):\n def setUp(self):\n args = []\n opts = {'dumpfile': settings.MEDIA_ROOT + 'test_data.xml', 'verbosity': 0}\n cmd = 'migrate_db'\n call_command(cmd, *args, **opts)\n\n self.blast = BLASTNcbi(blast_type=\"remote\", voucher_code=\"CP100-10\",\n gene_code=\"COI-begin\")\n\n @patch(\"Bio.Blast.NCBIWWW.qblast\", return_value=ncbi_return_handle1)\n def test_blast_with_accession_number_in_header(self, mock_qblast):\n self.blast.save_query_to_file()\n self.blast.do_blast()\n result = self.blast.parse_blast_output()\n self.blast.delete_query_output_files()\n self.assertTrue(len(result) > 0)\n\n @patch(\"Bio.Blast.NCBIWWW.qblast\", return_value=ncbi_return_handle2)\n def test_index(self, mock_blast):\n response = self.client.get('/blast_ncbi/CP100-10/COI-begin/')\n self.assertEqual(200, response.status_code)\n","sub_path":"blast_ncbi/tests/tests_blast_ncbi.py","file_name":"tests_blast_ncbi.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"189153644","text":"# coding=utf-8\n# Author: Wonder \n\n\"\"\"\nSimple HyperText Markup Language document tree Writer.\n\nThe output conforms to the XHTML version 1.0 Transitional DTD\n(*almost* strict). The output contains a minimum of formatting\ninformation. The cascading style sheet \"html4css1.css\" is required\nfor proper viewing with a modern graphical browser.\n\"\"\"\n\n__docformat__ = 'reStructuredText'\n\nimport sys, os, os.path, time, re\nfrom docutils import nodes\n\ntry:\n import Image # check for the Python Imaging Library\nexcept ImportError:\n Image = None\n\nimport docutils\nfrom docutils import frontend, nodes, utils, writers, languages, io\nfrom docutils.transforms import writer_aux\nfrom docutils.writers import html4css1\n\nclass Writer(html4css1.Writer): # 我的Writer基于html4css1\n supported = ('html', 'xhtml', '''Formats this writer supports.''')\n \n default_stylesheets = ['pretty.css']\n default_stylesheet_path = ','.join(\n [os.path.join(os.path.dirname(__file__), stylesheet)\n for stylesheet in default_stylesheets])\n\n config_section = 'prettyhtml writer'\n config_section_dependencies = ('writers', 'html4css1 writer')\n\n settings_spec = frontend.filter_settings_spec(\n html4css1.Writer.settings_spec,\n 'field_name_limit', 'option_limit',\n stylesheet_path = (\n 'Specify comma separated list of stylesheet paths. '\n 'With --link-stylesheet, '\n 'the path is rewritten relative to the output HTML file. '\n 'Default: \"%s\"' % default_stylesheet_path,\n ['--stylesheet-path'],\n {'metavar': '', 'overrides': 'stylesheet',\n 'default': default_stylesheet_path}))\n\n def __init__(self):\n writers.Writer.__init__(self)\n self.translator_class = HTMLTranslator\n\nclass HTMLTranslator(html4css1.HTMLTranslator):\n def __init__(self, document):\n html4css1.HTMLTranslator.__init__(self, document)\n\n def namespaced_id(self, node_or_id):\n '''\n 为避免名字冲突, 给文档内部ID加上修饰,\n '''\n ns = 'rstns-'\n if isinstance(node_or_id, nodes.Node):\n return ns + node_or_id['refid']\n else:\n return ns + node_or_id\n\n def starttag(self, node, tagname, suffix='\\n', empty=0, **attributes):\n \"\"\"\n Construct and return a start tag given a node (id & class attributes\n are extracted), tag name, and optional attributes.\n \"\"\"\n tagname = tagname.lower()\n prefix = []\n atts = {}\n for (name, value) in attributes.items():\n atts[name.lower()] = value\n \n #Handle class attribute\n final_classes = node.get('classes', [])\n if 'class' in atts:\n final_classes.append(atts['class'])\n if final_classes:\n atts['class'] = ' '.join(final_classes)\n\n # Handle id attribute\n assert 'id' not in atts\n final_ids = []\n final_ids.extend(node.get('ids', []))\n if 'ids' in atts:\n final_ids.extend(atts['ids'])\n del atts['ids']\n if final_ids:\n # Added by Wonder:\n # add namespace to internal ids\n final_ids = [ self.namespaced_id(i) for i in final_ids ]\n atts['id'] = final_ids[0]\n for id in final_ids[1:]:\n # Add empty \"span\" elements for additional IDs. Note\n # that we cannot use empty \"a\" elements because there\n # may be targets inside of references, but nested \"a\"\n # elements aren't allowed in XHTML (even if they do\n # not all have a \"href\" attribute).\n if empty:\n # Empty tag. Insert target right in front of element.\n prefix.append('' % id)\n else:\n # Non-empty tag. Place the auxiliary tag\n # *inside* the element, as the first child.\n suffix += '' % id\n attlist = atts.items()\n attlist.sort()\n parts = [tagname]\n for name, value in attlist:\n # value=None was used for boolean attributes without\n # value, but this isn't supported by XHTML.\n assert value is not None\n if isinstance(value, list):\n values = [unicode(v) for v in value]\n parts.append('%s=\"%s\"' % (name.lower(),\n self.attval(' '.join(values))))\n else:\n parts.append('%s=\"%s\"' % (name.lower(),\n self.attval(unicode(value))))\n if empty:\n infix = ' /'\n else:\n infix = ''\n return ''.join(prefix) + '<%s%s>' % (' '.join(parts), infix) + suffix\n\n def visit_citation_reference(self, node):\n href = '#' + self.namespaced_id(node)\n self.body.append(self.starttag(\n node, 'a', '[', CLASS='citation-reference', href=href))\n\n def footnote_backrefs(self, node):\n backlinks = []\n #backrefs = node['backrefs']\n # Added by wonder:\n backrefs = [ self.namespaced_id(i) for i in node['backrefs'] ]\n if self.settings.footnote_backlinks and backrefs:\n if len(backrefs) == 1:\n self.context.append('')\n self.context.append('')\n self.context.append(''\n % backrefs[0])\n else:\n i = 1\n for backref in backrefs:\n backlinks.append('%s'\n % (backref, i))\n i += 1\n self.context.append('(%s) ' % ', '.join(backlinks))\n self.context += ['', '']\n else:\n self.context.append('')\n self.context += ['', '']\n # If the node does not only consist of a label.\n if len(node) > 1:\n # If there are preceding backlinks, we do not set class\n # 'first', because we need to retain the top-margin.\n if not backlinks:\n node[1]['classes'].append('first')\n node[-1]['classes'].append('last')\n\n def visit_footnote_reference(self, node):\n href = '#' + self.namespaced_id(node)\n format = self.settings.footnote_references\n if format == 'brackets':\n suffix = '['\n self.context.append(']')\n else:\n assert format == 'superscript'\n suffix = ''\n self.context.append('')\n self.body.append(self.starttag(node, 'a', suffix,\n CLASS='footnote-reference', href=href))\n\n def visit_problematic(self, node):\n if node.hasattr('refid'):\n self.body.append('' % self.namespaced_id(node))\n self.context.append('')\n else:\n self.context.append('')\n self.body.append(self.starttag(node, 'span', '', CLASS='problematic'))\n\n def visit_reference(self, node):\n atts = {'class': 'reference'}\n if 'refuri' in node:\n atts['href'] = node['refuri']\n if ( self.settings.cloak_email_addresses\n and atts['href'].startswith('mailto:')):\n atts['href'] = self.cloak_mailto(atts['href'])\n self.in_mailto = 1\n atts['class'] += ' external'\n else:\n assert 'refid' in node, \\\n 'References must have \"refuri\" or \"refid\" attribute.'\n atts['href'] = '#' + self.namespaced_id(node)\n atts['class'] += ' internal'\n if not isinstance(node.parent, nodes.TextElement):\n assert len(node) == 1 and isinstance(node[0], nodes.image)\n atts['class'] += ' image-reference'\n self.body.append(self.starttag(node, 'a', '', **atts))\n\n def visit_system_message(self, node):\n self.body.append(self.starttag(node, 'div', CLASS='system-message'))\n self.body.append('')\n backref_text = ''\n if len(node['backrefs']):\n # Edited by Wonder:\n #backrefs = node['backrefs']\n backrefs = [ self.namespaced_id(i) for i in node['backrefs'] ]\n if len(backrefs) == 1:\n backref_text = ('; backlink'\n % backrefs[0])\n else:\n i = 1\n backlinks = []\n for backref in backrefs:\n backlinks.append('%s' % (backref, i))\n i += 1\n backref_text = ('; backlinks: %s'\n % ', '.join(backlinks))\n if node.hasattr('line'):\n line = ', line %s' % node['line']\n else:\n line = ''\n self.body.append('System Message: %s/%s '\n '(%s%s)%s
\\n'\n % (node['type'], node['level'],\n self.encode(node['source']), line, backref_text))\n\n ##def visit_target(self, node):\n ## if not ('refuri' in node or 'refid' in node\n ## or 'refname' in node):\n ## self.body.append(self.starttag(node, 'span', '', CLASS='target'))\n ## self.context.append('')\n ## else:\n ## self.context.append('')\n\n def visit_title(self, node):\n \"\"\"Only 6 section levels are supported by HTML.\"\"\"\n check_id = 0\n close_tag = '\\n'\n if isinstance(node.parent, nodes.topic):\n self.body.append( # 目录\n self.starttag(node, 'div', '', CLASS='topic-title first'))\n elif isinstance(node.parent, nodes.sidebar):\n self.body.append(\n self.starttag(node, 'div', '', CLASS='sidebar-title'))\n elif isinstance(node.parent, nodes.Admonition):\n self.body.append(\n self.starttag(node, 'div', '', CLASS='admonition-title'))\n elif isinstance(node.parent, nodes.table):\n self.body.append(\n self.starttag(node, 'caption', ''))\n close_tag = '\\n'\n elif isinstance(node.parent, nodes.document):\n self.body.append( # 文档级标题\n self.starttag(node, 'h1', '', CLASS='doctitle'))\n close_tag = '\\n'\n self.in_document_title = len(self.body)\n else:\n assert isinstance(node.parent, nodes.section)\n h_level = self.section_level + self.initial_header_level - 1\n atts = {}\n if (len(node.parent) >= 2 and\n isinstance(node.parent[1], nodes.subtitle)):\n atts['CLASS'] = 'with-subtitle'\n self.body.append(\n self.starttag(node, 'h%s' % h_level, CLASS=\"sectitle\", **atts))\n atts = {}\n if node.hasattr('refid'):\n atts['class'] = 'toc-backref'\n atts['title'] = 'back to content'\n atts['href'] = '#' + self.namespaced_id(node)\n if atts:\n self.body.append(self.starttag({}, 'a', '', **atts))\n close_tag = '\\n' % (h_level)\n else:\n close_tag = '\\n' % (h_level)\n self.context.append(close_tag)\n \n def depart_title(self, node):\n self.body.append(self.context.pop())\n if self.in_document_title:\n self.title = self.body[self.in_document_title:-1]\n self.in_document_title = 0\n self.body_pre_docinfo.extend(self.body)\n self.html_title.extend(self.body)\n del self.body[:]\n # Added by Wonder:\n if( isinstance(node.parent, nodes.section)\n and not isinstance(node, nodes.subtitle) ):\n # BEGIN OF SECTION_BODY\n self.body.append('\\n')\n\n def visit_section(self, node):\n self.section_level += 1\n # Edited by Wonder:\n class_name = 'section section%d' % self.section_level\n self.body.append(\n self.starttag(node, 'div', CLASS=class_name))\n\n def depart_section(self, node):\n self.section_level -= 1\n # Edited by Wonder:\n # END OF SECTION_BODY and SECTION\n self.body.append('
\\n\\n')\n\n def visit_field_name(self, node):\n atts = {}\n if self.in_docinfo:\n atts['class'] = 'docinfo-name'\n else:\n atts['class'] = 'field-name'\n if ( self.settings.field_name_limit\n and len(node.astext()) > self.settings.field_name_limit):\n atts['colspan'] = 2\n self.context.append('\\n| | ')\n else:\n self.context.append('')\n self.body.append(self.starttag(node, 'th', '', **atts))\n\n def visit_admonition(self, node):\n self.body.append(self.starttag(node, 'div', '', CLASS='admonition'))\n self.set_first_last(node)\n\n def depart_admonition(self, node=None):\n self.body.append('\\n')\n\n","sub_path":"wonder/rst2html/prettywriter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"36882325","text":"#!/usr/bin/env python2.7\n\n\"\"\"\nColumbia's COMS W4111.001 Introduction to Databases\nExample Webserver\n\nTo run locally:\n\n python server.py\n\nGo to http://localhost:8111 in your browser.\n\nA debugger such as \"pdb\" may be helpful for debugging.\nRead about it online.\n\"\"\"\n\nimport os\nfrom sqlalchemy import create_engine, text\nfrom sqlalchemy.pool import NullPool\nfrom flask import Flask, request, render_template, g, redirect, Response, abort, jsonify\n\ntmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\napp = Flask(__name__, template_folder=tmpl_dir)\n\n\n#\n# The following is a dummy URI that does not connect to a valid database. You will need to modify it to connect to your Part 2 database in order to use the data.\n#\n# XXX: The URI should be in the format of:\n#\n# postgresql://USER:PASSWORD@104.196.18.7/w4111\n#\n# For example, if you had username biliris and password foobar, then the following line would be:\n#\n# DATABASEURI = \"postgresql://biliris:foobar@104.196.18.7/w4111\"\n#\n# DATABASEURI = \"postgresql://user:password@104.196.18.7/w4111\"\n# DATABASEURI = \"postgresql://postgres:postgres@127.0.0.1/HW2\"\nDATABASEURI = \"postgresql://uf2110:pzdpzd@34.74.207.68/proj1part2\"\n\n#\n# This line creates a database engine that knows how to connect to the URI above.\n#\nengine = create_engine(DATABASEURI)\n\n#\n# Example of running queries in your database\n# Note that this will probably not work if you already have a table named 'test' in your database, containing meaningful data. This is only an example showing you how to run queries in your database using SQLAlchemy.\n#\n\n# Preload the list of domains\nDomainList = [domain[0] for domain in engine.execute(\"SELECT name FROM domain\")]\nOrganizationList = [org[0] for org in engine.execute(\"SELECT name FROM Organization\")]\nIPList = [ip[0] for ip in engine.execute(\"SELECT ip FROM Endpoint\")]\n\n\n@app.before_request\ndef before_request():\n \"\"\"\n This function is run at the beginning of every web request \n (every time you enter an address in the web browser).\n We use it to setup a database connection that can be used throughout the request.\n\n The variable g is globally accessible.\n \"\"\"\n try:\n g.conn = engine.connect()\n except:\n print(\"uh oh, problem connecting to database\")\n import traceback; traceback.print_exc()\n g.conn = None\n\n@app.teardown_request\ndef teardown_request(exception):\n \"\"\"\n At the end of the web request, this makes sure to close the database connection.\n If you don't, the database could run out of memory!\n \"\"\"\n try:\n g.conn.close()\n except Exception as e:\n pass\n\n#\n@app.route('/')\ndef index():\n \"\"\"\n request is a special object that Flask provides to access web request information:\n\n request.method: \"GET\" or \"POST\"\n request.form: if the browser submitted a form, this contains the data in the form\n request.args: dictionary of URL arguments, e.g., {a:1, b:2} for http://localhost?a=1&b=2\n\n See its API: http://flask.pocoo.org/docs/0.10/api/#incoming-request-data\n \"\"\"\n #\n # render_template looks in the templates/ folder for files.\n # for example, the below file reads template/index.html\n #\n return render_template(\"index.html\")\n\n\n@app.route('/getDomainList')\ndef getDomainlist():\n return jsonify(DomainList)\n\n\n@app.route('/getDomainData')\ndef getDomainData():\n domain = request.args.get('domain')\n result = engine.execute(text(\"SELECT A.ip FROM AssociatedDomain A WHERE A.DomainName = :domain\"), domain=domain)\n endpoint_ips = [row[0] for row in result]\n\n endpoints = []\n for ip in endpoint_ips:\n endpoints.append(getEndpointInfo(ip))\n\n domainData = {'name': domain, 'endpoints': endpoints}\n return jsonify(domainData)\n\n\ndef getOpenPortsForIP(ip):\n result = engine.execute(text(\"SELECT Ex.PortNumber, Ex.PortType, I.ServiceName FROM ExposesPort Ex, Implements I\"\n \" WHERE Ex.IP = :ip AND I.PortNumber = Ex.PortNumber AND I.PortType = Ex.PortType\"), ip=ip)\n result = list(result)\n openPorts = [\n {\n 'number': row[0],\n 'type': row[1],\n 'serviceName': row[2]\n } for row in result\n ]\n\n return openPorts\n\ndef getEndpointInfo(ip):\n result = engine.execute(text(\"SELECT O.ip, O.OrgName, A.domainname, L.city, L.country, L.latitude, L.longitude FROM OwnsEndpoint O, LocatedIn L, AssociatedDomain A\"\n \" WHERE A.ip = :ip AND A.ip = O.ip AND L.ip = O.ip\"), ip=ip)\n result = list(result)[0]\n\n endpointInfo = {\n 'IP': result[0],\n 'org': result[1],\n 'domain': result[2],\n 'location': {'city': result[3], 'country': result[4], 'latitute': result[5], 'longtitude': result[6]}\n }\n\n # Get open oprts fo rthe Endpoitn\n endpointInfo['openPorts'] = getOpenPortsForIP(ip)\n return endpointInfo\n\n# export interface Endpoint {\n# IP : string\n# location : Location\n# openPorts : Port[]\n# domain : string\n# }\n@app.route('/getEndpointData')\ndef getEndpointData():\n # Get Endpoint Information\n ip = request.args.get('ip')\n endpointInfo = getEndpointInfo(ip)\n\n return jsonify(endpointInfo)\n\n@app.route('/getOrgData')\ndef getOrgData():\n orgname = request.args.get('org')\n result = engine.execute(text(\"SELECT O.ip FROM OwnsEndpoint O WHERE O.OrgName = :name\"), name=orgname)\n endpoint_ips = [row[0] for row in result]\n\n endpoints = []\n for ip in endpoint_ips:\n endpoints.append(getEndpointInfo(ip))\n\n orgData = {'name': orgname, 'endpoints': endpoints}\n return jsonify(orgData)\n\n@app.route('/getByService')\ndef getByService():\n service = request.args.get('service')\n result = engine.execute(text(\"SELECT Ex.IP FROM ExposesPort Ex, Implements I WHERE Ex.PortType = I.PortType AND Ex.PortNumber = I.PortNumber AND I.ServiceName = :name\"), name=service)\n endpoint_ips = [row[0] for row in result]\n endpoints = []\n for ip in endpoint_ips:\n endpoints.append(getEndpointInfo(ip))\n\n orgData = {'name': service, 'endpoints': endpoints}\n return jsonify(orgData)\n\n@app.route('/getOrgList')\ndef getOrgList():\n return jsonify(OrganizationList)\n\n@app.route('/getIPList')\ndef getIPList():\n return jsonify(IPList)\n\nif __name__ == \"__main__\":\n import click\n\n @click.command()\n @click.option('--debug', is_flag=True)\n @click.option('--threaded', is_flag=True)\n @click.argument('HOST', default='0.0.0.0')\n @click.argument('PORT', default=8111, type=int)\n def run(debug, threaded, host, port):\n \"\"\"\n This function handles command line parameters.\n Run the server using:\n\n python server.py\n\n Show the help text using:\n\n python server.py --help\n\n \"\"\"\n\n HOST, PORT = host, port\n print(\"running on %s:%d\" % (HOST, PORT))\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)\n\n\n run()\n","sub_path":"webserver/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"475132283","text":"from random import randint\nfrom math import log, pi \nimport time\n\nstart = time.time() #время начала работы программы\nN = 10**100 #крайняя правая граница выбора двух чисел а и b\nM = 10000 #кол-во испытаний\n\n#сравнение остатка от деления с половиной делителя\ndef comparison(b, N): \n if N%b < b/2:\n return 1\n else:\n return 0\n\ni = 0\nitter = 0\nwhile i < M:\n b = randint(1, N) #генерация случайного числа а\n itter += comparison(b, N) #складываем кол-во итераций, когда остаток от деления меньше половины делителя\n i += 1\n\nprint(\"Кол-во итераций, когда остаток от деления меньше половины делителя: \", itter)\nprint(\"Вероятность, что остаток от деления N на b меньше половины делителя: \", itter/M)\nprint(\"Теоретическая оценка: \", 2-2*log(2))\n\nif (2-2*log(2)) > (itter/M):\n print(\"Теоретическая оценка выше экспериментальной.\")\nelse:\n print(\"Экспериментальная оценка выше теоретической.\")\nprint(\"Время выполнения: \", time.time() - start, \"секунд.\")\n","sub_path":"lab3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"644347182","text":"import json\n\nclass Logger:\n def __init__(self, nrx_windows, output=\"stdout\"):\n self.total_uplinks = 0\n self.effective_uplinks = 0\n self.ineffective_uplinks = 0\n self.ratio = -1\n self.output = output\n self.has_logged = False\n self.seen = set()\n self.nrx_windows = nrx_windows\n\n def write(self):\n self.calc_ratio()\n log_dict = {\n \"total_uplinks\": self.effective_uplinks + self.ineffective_uplinks,\n \"effective_uplinks\": self.effective_uplinks,\n \"ineffective_uplinks\": self.ineffective_uplinks,\n \"ratio\": self.ratio\n }\n\n if self.output == \"stdout\":\n print(json.dumps(log_dict))\n else:\n with open(self.output, \"w+\") as f:\n f.write(json.dumps(log_dict) + \"\\n\")\n\n def uplink_rcvd(self, inds):\n self.total_uplinks += 1\n for ind in inds:\n if ind not in self.seen:\n self.effective_uplinks += 1\n self.ratio += 1\n self.seen.add(ind)\n else:\n self.ineffective_uplinks += 1\n\n # def effective_uplink(self):\n # self.total_uplinks += 1\n # self.effective_uplinks += 1\n\n def ineffective_uplink(self):\n self.total_uplinks += 1\n self.ineffective_uplinks += 1\n self.effective_uplinks -= 1\n\n # def ratio_up(self):\n # self.ratio += 1\n\n # def ratio_down(self):\n # self.ratio -= 1 # Account for the resend, which will increment the ratio\n\n def calc_ratio(self):\n self.ratio = self.ratio / self.total_uplinks\n self.ratio += 1","sub_path":"device-emulation/kevins-protocol-classB-extension/class-b-device/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"83064003","text":"from tweepy.streaming import StreamListener\nfrom tweepy.streaming import Stream\nimport logging\n\nfrom twitter_api import twitter_api\n\nimport rabbit\n\nlogger = logging.getLogger(__name__)\n\n\nclass stream_listener(StreamListener):\n def on_status(self, status):\n rabbit_dict = {\n 'user': status.user.screen_name,\n 'follow_request_sent': status.user.follow_request_sent,\n 'userfollows_count': status.user.followers_count,\n 'tweet_text': status.text,\n 'tweet_id': status.id,\n 'created_time': str(status.created_at),\n 'in_reply_to_user': status.in_reply_to_status_id, ## if None, run regular query, if \"x (ie. @INeedAVerse Thanks!)\" do something\n }\n logger.info(\"queuing tweet: {}\".format(rabbit_dict))\n rabbit.enqueue_incoming_tweet(rabbit_dict)\n\n def on_error(self, status_code):\n if status_code == 420:\n return False\n\n\ndef listen_for_tweets():\n listener = stream_listener()\n stream = Stream(auth=twitter_api().auth, listener=listener)\n stream.filter(track=[\"@INeedAVerse\"])\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s:%(levelname)s:%(name)s:%(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n listen_for_tweets()\n","sub_path":"python/listen_for_tweets_loop.py","file_name":"listen_for_tweets_loop.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"494841940","text":"\nfrom labstep.entities.experimentSignatureRequest.model import ExperimentSignatureRequest\nimport labstep.generic.entity.repository as entityRepository\nfrom labstep.constants import UNSPECIFIED\n\n\ndef newExperimentSignatureRequest(user, experiment_id, user_id, message=UNSPECIFIED):\n fields = {\n \"experiment_workflow_id\": experiment_id,\n \"message\": message,\n \"user_id\": user_id\n }\n return entityRepository.newEntity(user, ExperimentSignatureRequest, fields=fields)\n\n\ndef getExperimentSignatureRequests(user, experiment_id):\n params = {\n \"experiment_workflow_id\": experiment_id,\n \"search\": None,\n }\n return entityRepository.getEntities(user, ExperimentSignatureRequest, filterParams=params, count=UNSPECIFIED)\n","sub_path":"labstep/entities/experimentSignatureRequest/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"189550086","text":"from flask import Flask , render_template , request\nfrom flask import jsonify\nimport subprocess # nosec #pylint-disable type: ignore\nimport os\nimport json_config\nimport pafy\nimport vlc\nfrom modules import youtube_videos\nfrom modules import coverpy\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\nInstance = vlc.Instance('--no-video')\nplayer = Instance.media_player_new()\nurl = ''\nyoutube = youtube_videos.youtube_results()\ncoverpy = coverpy.CoverPy()\n\nclass search_play_recommend:\n def search(self, search_query):\n search = youtube.youtube_search(search_query)\n art = coverpy.art(search_query)\n result = dict([('title', search[0][0]), ('id', search[0][1]), ('album art', art)])\n return(result)\n\n def play(self, video_id):\n url = 'https://www.youtube.com/watch?v=' + video_id\n video = pafy.new(url)\n streams = video.audiostreams\n best = streams[3]\n playurl = best.url\n # print(playurl)\n return playurl\n\n def recommend(self, video_id):\n related_result = youtube.youtube_related(video_id)\n items = []\n for video in related_result:\n items.append(dict([('title', video[0]), ('id', video[1])]))\n return items\n\nsong = search_play_recommend()\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/search', methods=['GET'])\ndef w_youtube():\n search_query = request.args.get('vid')\n res = song.search(search_query)\n resp = jsonify(res)\n resp.status_code = 200\n return resp\n\n@app.route('/recommend', methods=['GET'])\ndef recommended_songs():\n video_id = request.args.get('vid')\n recommended = song.recommend(video_id)\n res = {\"items\" : recommended}\n resp = jsonify(res)\n resp.status_code = 200\n return resp\n\n@app.route('/play', methods=['GET'])\ndef wo_youtube():\n video_id = request.args.get('vid')\n url = song.play(video_id)\n print(url)\n display_message = {\"status\":\"song started\",\"url\":url}\n resp = jsonify(display_message)\n resp.status_code = 200\n return resp\n\n@app.route('/pause')\ndef pause():\n \"\"\" Rn, doing nothing but expecting a post request to for user activity\n \"\"\"\n display_message = {\"status\":\"song paused\"}\n resp = jsonify(display_message)\n resp.status_code = 200\n return resp\n\n@app.route('/stop')\ndef stop():\n \"\"\" Rn, doing nothing but expecting a post request to for user activity\n \"\"\"\n display_message = {\"status\":\"song stopped\"}\n resp = jsonify(display_message)\n resp.status_code = 200\n return resp\n\n@app.route('/restart')\ndef restart():\n \"\"\" Rn, doing nothing but expecting a post request to for user activity\n \"\"\"\n display_message = {\"status\":\"song restarted\"}\n resp = jsonify(display_message)\n resp.status_code = 200\n return resp\n\n@app.route('/resume')\ndef play():\n \"\"\" Rn, doing nothing but expecting a post request to for user activity\n \"\"\"\n display_message = {\"status\":\"song resumed\"}\n resp = jsonify(display_message)\n resp.status_code = 200\n return resp\n\nif __name__ == '__main__':\n app.run(debug=True,port=7070,host= '0.0.0.0')\n\n\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"32452521","text":"import tensorflow as tf\nfrom util import myCallbacks as c\nfrom util import formaters as f\nimport tensorflow_datasets as tfds\n\n\nucf101_dataset, ucf101_info = tfds.load(name=\"ucf101\", with_info=True)\nucf101_train , ucf101_test = ucf101_dataset[\"train\"], ucf101_dataset[\"test\"]\nprint(type(ucf101_info))\nassert isinstance(ucf101_train, tf.data.Dataset)\nassert isinstance(ucf101_test, tf.data.Dataset)\n\ntrain = ucf101_train.map(f.format_videos)\ntrain = train.map(f.select_first_frame)\ntrain = train.map(f.convert_to_tuple)\n\ntest = ucf101_test.map(f.format_videos)\ntest = test.map(f.select_first_frame)\ntest = test.map(f.convert_to_tuple)\n\nBATCH_SIZE = 32\nSHUFFLE_BUFFER_SIZE = 20 #1000\ntrain_batches = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)\ntest_batches = test.batch(BATCH_SIZE)\n# import tensorflow_datasets as tfds\n\nIMG_SIZE = 170\nIMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)\n\ncallbacks = c.highAccCallback()\n\ncallbacks = c.highAccCallback()\nbase_model = tf.keras.applications.vgg19.VGG19(input_shape=IMG_SHAPE,\n include_top=False,\n weights='imagenet')\n\nbase_model.trainable = False\nbase_model.summary()\n\nglobal_average_layer = tf.keras.layers.GlobalAveragePooling2D()\n\nprediction_layer = tf.keras.layers.Dense(101, activation='softmax')\n\nmodel = tf.keras.Sequential([\n base_model,\n tf.keras.layers.Dropout(0.5),\n global_average_layer,\n prediction_layer\n])\n\nbase_learning_rate = 0.0001\nmodel.compile(optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\nmodel.summary()\n\nmodel.fit(train_batches,\n epochs=1,verbose=1,\n callbacks=[callbacks] )\n\nprint('\\n# Evaluate on test data')\nresults = model.evaluate(test_batches)\nprint('test loss, test acc:', results)\n","sub_path":"code/cluster_items/msc-project/models/vGG19.py","file_name":"vGG19.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"463503442","text":"######## CRAWLER ##########\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"--incognito\")\nbrowser = webdriver.Chrome(chrome_options=chrome_options)\nbrowser2 = webdriver.Chrome(chrome_options=chrome_options)\n\nMain_Page = \"https://tabelog.com/\"\nLocations = {\n \"Shibuya_Station\":\"tokyo/A1303/A130301/R4698/\"\n}\nCafe_Label = \"rstLst/CC03/\"\n\ndf = pd.DataFrame(columns=[\"Location\",\"Name\",\"Rst_Page\",\"Rank\",\"Lunch_Budget\",\"Dinner_Budget\"])\nbudget_map = {\"~999\":0, \"1000~1999\":1, \"2000~2999\":2, \"3000:3999\":3,\"4000~4999\":4}\nbudget_map = {{\"{}~{}\".format(1000*k,1000*(k+1)-1):j*(j+1)/2+k for k in range(j)} for j in range(4)}\nindice = 0\nfor location,location_path in Locations.items():\n # for area in location:\n url = Main_Page+location_path+Cafe_Label\n browser.get(url)\n parsed_page = BeautifulSoup(browser.page_source,\"html.parser\")\n rst_wraps = parsed_page.find_all(\"div\",{\"class\":\"list-rst__wrap\"})\n\n for rst_wrap in rst_wraps:\n Name = \"\"\n Rst_page = \"\"\n Rank = 0\n Lunch_Budget = 0\n Dinner_Budget = 0\n try:\n ## from search page\n Name = rst_wrap.find(\"a\",{\"class\":\"list-rst__rst-name-target\"}).get_text()\n Rst_Page = rst_wrap.find(\"a\",{\"class\":\"list-rst__rst-name-target\"})[\"href\"]\n Rank = rst_wrap.find(\"span\",{\"class\":\"list-rst__rating-val\"}).get_text()\n Lunch_Budget = rst_wrap.find(\"span\",{\"class\":\"cpy-lunch-budget-val\"}).get_text()\n Dinner_Budget = rst_wrap.find(\"span\",{\"class\":\"cpy-dinner-budget-val\"}).get_text()\n ## from hotel page\n\n\n ## from google\n df.loc[indice] = [location,Name,Rst_Page,Rank,Lunch_Budget,Dinner_Budget]\n indice+=1\n except:\n print(\"Skip\")\n continue\nbrowser.quit()\ndf.to_csv(\"O_Data/data_1.csv\")\n","sub_path":"O_Crawler/o_crawl_by_rst.py","file_name":"o_crawl_by_rst.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"93663974","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nimport math\nimport numpy as np\nimport scipy as sp\nimport pandas\nimport matplotlib.pyplot as plt\nfrom progressbar import ProgressBar\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import linalg as sparse_linalg\nimport sys\nfile_dir = '/localhome/pykb/physics_code/Exact_Diagonalization/Classes/'\nsys.path.append(file_dir)\nfile_dir = '/localhome/pykb/physics_code/Exact_Diagonalization/functions/'\nsys.path.append(file_dir)\n\nfrom Hamiltonian_Classes import Hamiltonian,H_table,clock_Hamiltonian,spin_Hamiltonian\nfrom System_Classes import unlocking_System\nfrom Symmetry_Classes import translational,parity,model_sym_data,charge_conjugation\n# from Plotting_Classes import eig_overlap,fidelity,entropy,energy_basis\nfrom Non_observables import zm\nfrom Construction_functions import bin_to_int_base_m,int_to_bin_base_m,cycle_bits_state\nfrom Search_functions import find_index_bisection\nfrom State_Classes import zm_state,sym_state,prod_state,bin_state,ref_state\nfrom rw_functions import save_obj,load_obj\nfrom Calculations import level_stats,fidelity,eig_overlap,entropy,site_precession,site_projection,time_evolve_state\n\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Computer Modern'],'size':26})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n# matplotlib.rcParams['figure.dpi'] = 400\n\ndef krylov(v,H,krylov_size):\n #generate subspace by repeated application of H\n temp = v\n for n in range(0,krylov_size):\n temp = np.dot(H,temp)\n v = np.vstack((v,temp))\n\n #orthonormalize basis using QR decomposition\n v = np.transpose(v)\n #orthornormalize with qr decomp\n x,r = np.linalg.qr(v)\n return x\n\ndef find_hamming_sectors(state_bits,system):\n #organize states via hamming distance from Neel\n hamming_sectors = dict()\n for n in range(0,system.N+1):\n hamming_sectors[n] = []\n for n in range(0,system.dim):\n h = 0\n for m in range(0,system.N,1):\n if system.basis[n][m] != state_bits[m]:\n h = h+1\n hamming_sectors[int(h)] = np.append(hamming_sectors[int(h)],system.basis_refs[n])\n return hamming_sectors\n\ndef sublattice_state(v1,v2,system):\n state_bits = np.zeros(system.N)\n for n in range(0,np.size(v1)):\n state_bits[2*v1[n]] = 1\n for n in range(0,np.size(v2)):\n state_bits[2*v2[n]+1] = 1\n return bin_state(state_bits,system)\n\n#pxp cube\nN=10\npxp = unlocking_System([0],\"periodic\",2,N)\npxp.gen_basis()\npxp_syms = model_sym_data(pxp,[translational(pxp)])\n\n#create dictionary + hash table storing refs of all states in perm sector |n,m>\nbasis_perm_labels = np.zeros((pxp.dim,2))\nperm_sector_refs = dict()\n\ndef perm_key(v):\n return bin_to_int_base_m(v,int(pxp.N/2)+1)\nfor n in range(0,int(pxp.N/2)+1):\n for m in range(0,int(pxp.N/2)+1):\n perm_sector_refs[perm_key([n,m])] = []\nfor n in range(0,np.size(pxp.basis_refs,axis=0)):\n c1=0\n c2=0\n for m in range(0,pxp.N):\n if pxp.basis[n][m] == 1:\n if m % 2 == 0:\n c1 = c1 + 1\n elif m % 2 != 0:\n c2 = c2 + 1\n basis_perm_labels[n] = np.array([c1,c2])\n perm_sector_refs[perm_key(np.array((c1,c2)))] = np.append(perm_sector_refs[perm_key(np.array((c1,c2)))],pxp.basis_refs[n])\n\n#identify all root states\n# find perm sectors root states live in\nperm_labels = np.zeros(((int(pxp.N/2))**2,2))\ntemp = np.arange(0,pxp.N/2)\nfrom itertools import *\ntemp = product(temp,temp)\nc=0\nfor label in temp:\n perm_labels[c] = label\n c=c+1\n\nedge_vertex_sectors = np.zeros(2)\nfor n in range(0,np.size(perm_labels,axis=0)):\n temp_sum = np.sum(perm_labels[n])\n if temp_sum == int(pxp.N/2-1):\n edge_vertex_sectors = np.vstack((edge_vertex_sectors,perm_labels[n]))\nedge_vertex_sectors = np.delete(edge_vertex_sectors,0,axis=0)\n\nroot_sectors = np.zeros(2)\nfor n in range(0,np.size(edge_vertex_sectors,axis=0)-1,2):\n temp = np.zeros(2)\n temp[0] = edge_vertex_sectors[n][0]\n temp[1] = edge_vertex_sectors[n+1][1]\n root_sectors = np.vstack((root_sectors,temp))\nroot_sectors = np.delete(root_sectors,0,axis=0)\n\nedge_vertex_sectors = np.delete(edge_vertex_sectors,0,axis=0)\nedge_vertex_sectors = np.delete(edge_vertex_sectors,np.size(edge_vertex_sectors,axis=0)-1,axis=0)\nprint(edge_vertex_sectors)\n\nroot_sectors = np.unique(np.vstack((root_sectors,np.flip(root_sectors,axis=1))),axis=0)\nprint(root_sectors)\n \nH = spin_Hamiltonian(pxp,\"x\")\nH.gen()\n#find root states as those connected to edge vertex sectors while also residing in root sector\nroot_refs = dict()\nroot_ref_from= dict()\nroot_ref_from_sector= dict()\nfor n in range(0,np.size(root_sectors,axis=0)):\n root_refs[perm_key(root_sectors[n])] = []\n root_ref_from[perm_key(root_sectors[n])] = []\n root_ref_from_sector[perm_key(root_sectors[n])] = np.zeros(2)\n\nfor n in range(0,np.size(edge_vertex_sectors,axis=0)):\n refs = perm_sector_refs[perm_key(edge_vertex_sectors[n])]\n for m in range(0,np.size(refs,axis=0)):\n state = np.zeros(pxp.dim)\n state[pxp.keys[refs[m]]] = 1\n\n #find new product states H|psi> maps to\n state = np.dot(H.sector.matrix(),state)\n acted_refs = []\n for i in range(0,np.size(state,axis=0)):\n if state[i] == 1:\n acted_refs = np.append(acted_refs,pxp.basis_refs[i])\n\n #check those in root sector, these are root states\n in_root_sector = np.zeros(np.size(acted_refs))\n for i in range(0,np.size(acted_refs,axis=0)):\n for j in range(0,np.size(root_sectors,axis=0)):\n if acted_refs[i] in perm_sector_refs[perm_key(root_sectors[j])]:\n print(edge_vertex_sectors[n],root_sectors[j])\n root_refs[perm_key(root_sectors[j])] = np.append(root_refs[perm_key(root_sectors[j])],acted_refs[i])\n root_ref_from[perm_key(root_sectors[j])] = np.append(root_ref_from[perm_key(root_sectors[j])],refs[m])\n root_ref_from_sector[perm_key(root_sectors[j])] = np.vstack((root_ref_from_sector[perm_key(root_sectors[j])],edge_vertex_sectors[n]))\n\n #track which sector maps from, to find 0/1 locations to fill from subcube basis\n break\n \nfor n in range(0,np.size(root_sectors,axis=0)):\n refs = root_refs[perm_key(root_sectors[n])]\n print(\"\\n\")\n for m in range(0,np.size(refs,axis=0)):\n print(pxp.basis[pxp.keys[refs[m]]],root_sectors[n],root_ref_from[perm_key(root_sectors[n])][m],pxp.basis[pxp.keys[root_ref_from[perm_key(root_sectors[n])][m]]])\n\n#for subcube basis from roots + smaller cube basis\n# smaller hypercubes + hamming, for subcube identification\n\nsub_cube_systems = dict()\nsub_cube_hamming = dict()\nfor n in range(int(pxp.N/2),1,-1):\n sub_cube_systems[n] = unlocking_System([0,1],\"open\",2,n)\n sub_cube_systems[n].gen_basis()\n z_temp = np.ones(sub_cube_systems[n].N)\n if n != int(pxp.N/2):\n z_temp[np.size(z_temp,axis=0)-1] = 0\n z_temp = bin_state(z_temp,sub_cube_systems[n])\n sub_cube_hamming[n] = find_hamming_sectors(z_temp.bits,sub_cube_systems[n])\n\nsubcube_basis = dict()\nfor n in range(0,np.size(root_sectors,axis=0)):\n subcube_basis[perm_key(root_sectors[n])] = dict()\n refs = root_refs[perm_key(root_sectors[n])]\n refs_from = root_ref_from[perm_key(root_sectors[n])]\n refs_from_sector = root_ref_from_sector[perm_key(root_sectors[n])]\n refs_from_sector = np.delete(refs_from_sector,0,axis=0)\n for m in range(0,np.size(refs)):\n subcube_basis[perm_key(root_sectors[n])][m] = np.zeros(pxp.dim)\n cube_dim = int(np.sum(refs_from_sector[m]))\n state_bits = pxp.basis[pxp.keys[refs[m]]]\n state_from_bits = pxp.basis[pxp.keys[refs_from[m]]]\n bit_loc = np.zeros(cube_dim)\n c=0\n\n #get 1 bit loc from orig state\n for i in range(0,np.size(state_bits,axis=0)):\n if state_bits[i] == 1:\n bit_loc[c] = i\n c = c +1\n\n # get loc of flippable 0 to 1 from connected sector in hypercube graph\n for i in range(0,np.size(state_from_bits)):\n if state_from_bits[i] == 1:\n if i not in bit_loc:\n bit_loc[c] = i\n break\n\n for i in range(0,len(sub_cube_hamming[cube_dim])):\n sub_refs = sub_cube_hamming[cube_dim][i]\n new_basis_state = np.zeros(pxp.dim)\n for j in range(0,np.size(sub_refs,axis=0)):\n state_bits = sub_cube_systems[cube_dim].basis[sub_cube_systems[cube_dim].keys[sub_refs[j]]]\n temp_bits=np.zeros(pxp.N)\n for k in range(0,np.size(state_bits,axis=0)):\n temp_bits[int(bit_loc[k])] = state_bits[k]\n temp_ref = bin_to_int_base_m(temp_bits,pxp.base)\n new_basis_state = new_basis_state + ref_state(temp_ref,pxp).prod_basis()\n new_basis_state = new_basis_state / np.power(np.vdot(new_basis_state,new_basis_state),0.5)\n subcube_basis[perm_key(root_sectors[n])][m] = np.vstack((subcube_basis[perm_key(root_sectors[n])][m],new_basis_state))\n subcube_basis[perm_key(root_sectors[n])][m] = np.transpose(np.delete(subcube_basis[perm_key(root_sectors[n])][m],0,axis=0))\n\n#hypercube from Neel/AntiNeel\nz0=zm_state(2,1,pxp)\nz1=zm_state(2,1,pxp,1)\ncube_dim = int(pxp.N/2)\ncube_basisL = np.zeros((pxp.dim,len(sub_cube_hamming[cube_dim])))\ncube_basisR = np.zeros((pxp.dim,len(sub_cube_hamming[cube_dim])))\nfor n in range(0,len(sub_cube_hamming[cube_dim])):\n refs = sub_cube_hamming[cube_dim][n]\n temp_stateL = np.zeros(pxp.dim)\n temp_stateR = np.zeros(pxp.dim)\n one_locL = np.arange(0,pxp.N-1,2)\n one_locR = np.arange(1,pxp.N,2)\n for m in range(0,np.size(refs,axis=0)):\n state_bits = sub_cube_systems[cube_dim].basis[sub_cube_systems[cube_dim].keys[refs[m]]]\n temp_bitsL = np.zeros(pxp.N)\n temp_bitsR = np.zeros(pxp.N)\n for i in range(0,np.size(state_bits,axis=0)):\n temp_bitsL[int(one_locL[i])] = state_bits[i]\n temp_bitsR[int(one_locR[i])] = state_bits[i]\n temp_refL = bin_to_int_base_m(temp_bitsL,pxp.base)\n temp_refR = bin_to_int_base_m(temp_bitsR,pxp.base)\n temp_stateL = temp_stateL + ref_state(temp_refL,pxp).prod_basis()\n temp_stateR = temp_stateR + ref_state(temp_refR,pxp).prod_basis()\n temp_stateL = temp_stateL / np.power(np.vdot(temp_stateL,temp_stateL),0.5)\n temp_stateR = temp_stateR / np.power(np.vdot(temp_stateR,temp_stateR),0.5)\n cube_basisL[:,n] = temp_stateL\n cube_basisR[:,n] = temp_stateR\n\n# combine subcube basis (translational symm)\nsubcube_combined_basis=dict()\nfor n in range(0,np.size(root_sectors,axis=0)):\n subcube_combined_basis[perm_key(root_sectors[n])] = subcube_basis[perm_key(root_sectors[n])][0]\n for m in range(1,len(subcube_basis[perm_key(root_sectors[n])])):\n subcube_combined_basis[perm_key(root_sectors[n])] = subcube_combined_basis[perm_key(root_sectors[n])] + subcube_basis[perm_key(root_sectors[n])][m]\n\nfor n in range(0,np.size(root_sectors,axis=0)):\n for m in range(0,np.size(subcube_combined_basis[perm_key(root_sectors[n])],axis=1)):\n temp = subcube_combined_basis[perm_key(root_sectors[n])][:,m]\n temp = temp / np.power(np.vdot(temp,temp),0.5)\n subcube_combined_basis[perm_key(root_sectors[n])][:,m] = temp\n\n\n#combine basis\nbasis = cube_basisL\nbasis = np.hstack((basis,cube_basisR))\nfor n in range(0,np.size(root_sectors,axis=0)):\n print(root_sectors[n])\n basis = np.hstack((basis,subcube_combined_basis[perm_key(root_sectors[n])]))\nbasis = np.hstack((basis,cube_basisR))\nbasis = np.unique(basis,axis=1)\nprint(np.size(basis,axis=1))\nbasis,temp = np.linalg.qr(basis)\nfrom Diagnostics import print_wf\nfor n in range(0,np.size(basis,axis=1)):\n print(\"\\n\")\n print_wf(basis[:,n],pxp,1e-2)\n \n\n\nH.sector.find_eig()\n\nH_rot = np.dot(np.conj(np.transpose(basis)),np.dot(H.sector.matrix(),basis))\ne,u = np.linalg.eigh(H_rot)\nplt.matshow(np.abs(H_rot))\nplt.show()\n\noverlap = np.log10(np.abs(u[0,:])**2)\nplt.scatter(e,overlap,marker=\"s\",color=\"red\",s=200,alpha=0.5,label=\"Subcube basis\")\n\neigenvalues = np.copy(H.sector.eigvalues())\nz=zm_state(2,1,pxp)\noverlap = eig_overlap(z,H).eval()\n# to_del=[]\n# for n in range(0,np.size(overlap,axis=0)):\n # if overlap[n] <-5:\n # to_del = np.append(to_del,n)\n# for n in range(np.size(to_del,axis=0)-1,-1,-1):\n # overlap=np.delete(overlap,to_del[n])\n # eigenvalues=np.delete(eigenvalues,to_del[n])\n# plt.scatter(eigenvalues,overlap,label=\"Exact\")\nplt.legend()\nplt.show()\n\nu_comp_basis = np.dot(basis,u)\nexact_overlap = np.zeros(np.size(e))\nfor n in range(0,np.size(u_comp_basis,axis=1)):\n max_overlap = 0\n for m in range(0,pxp.dim):\n temp = np.abs(np.vdot(u_comp_basis[:,n],H.sector.eigvectors()[:,m]))**2\n if temp > max_overlap:\n max_overlap = temp\n exact_overlap[n] = max_overlap\nplt.scatter(e,exact_overlap)\nplt.show()\n","sub_path":"projects/hypercube,random_cuts/cube_stitch/subcube_basis_cube_node.py","file_name":"subcube_basis_cube_node.py","file_ext":"py","file_size_in_byte":13125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"486504175","text":"from zipfile import ZipFile\n\nfrom PythonClientAPI.libs.Game.Enums import Direction\n\n\nclass NavigationCache:\n def __init__(self):\n self.navigation_data = []\n self.loaded = False\n\n def deserialize_nav_data(self, array):\n d1 = array[0]\n d2 = array[1]\n d3 = array[2]\n d4 = array[3]\n\n data = []\n dir_list = [Direction.NOWHERE] + list(Direction._rotation_list.keys())\n for i1 in range(d1):\n data.append([])\n for i2 in range(d2):\n data[i1].append([])\n for i3 in range(d3):\n data[i1][i2].append([])\n for i4 in range(d4):\n index = 4 + i1 * d2 * d3 * d4 + i2 * d3 * d4 + i3 * d4 + i4\n c_byte = array[index]\n c_dir = dir_list[c_byte]\n data[i1][i2][i3].append(c_dir)\n\n print(\"Loaded navigation cache\")\n return data\n\n def load_compiled_data(self, file):\n with ZipFile(file) as zip_file:\n info = zip_file.getinfo(\"data\")\n\n expected_size = info.file_size\n\n data = zip_file.read('data')\n\n if len(data) != expected_size:\n raise EOFError(\"Expected \" + str(expected_size) + \" bytes, got \" + str(len(data)))\n\n self.navigation_data = self.deserialize_nav_data(data)\n self.loaded = True\n\n def get_next_direction_in_path(self, position, target):\n return self.navigation_data[position[0]][position[1]][target[0]][target[1]]\n\nnavigation_cache = NavigationCache()\n","sub_path":"Cyber Team Zero/Bots/PythonAI/PythonClientAPI/libs/Navigation/NavigationCache.py","file_name":"NavigationCache.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"42737311","text":"from django.db import models\nimport uuid\n\n# Create your models here.\n\n\nclass Book(models.Model):\n _id = models.UUIDField(\n primary_key=True, auto_created=True, default=uuid.uuid4, editable=False)\n name = models.CharField(max_length=30, default='', help_text=u'书名')\n price = models.DecimalField(max_digits=5, decimal_places=2, help_text=u'单价')\n","sub_path":"django/django-rest-swagger/quickstart/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"214484953","text":"import time\n\nimport cv2\nfrom configuration_global.logger_factory import LoggerFactory\nfrom dataLayer.entities.recognition_result import RecognitionResult\nfrom dataLayer.repositories.recognition_result_repository import RecognitionResultRepository\nfrom domain.face_detection.face_detectors_manager import FaceDetectorsManager\nfrom opencv_client.image_converters.image_converter import ImageConverter\n\n\nclass OpenCvFaceRecognizer():\n def __init__(self):\n self.logger = LoggerFactory()\n self.faceDetectorManager = FaceDetectorsManager()\n self.imageConverter = ImageConverter()\n self.recognitionResultRepo = RecognitionResultRepository()\n\n def recognize_face_from_image(self, request_id, recognizers, image_path):\n detection_start = time.time()\n image = cv2.imread(image_path)\n detected_faces = self.faceDetectorManager.get_face_by_dnn(image)\n detection_end = time.time()\n detection_time = detection_end - detection_start\n for face_recognizer, file_id in recognizers:\n start_time = time.time()\n self.logger.info(f\"Using {face_recognizer} recognizer created from {file_id} file id\")\n if len(detected_faces) is 0:\n self.__add_empty_result__(file_id, request_id, start_time, detection_time)\n for (startX, startY, endX, endY) in detected_faces:\n predict_image = self.imageConverter.convert_to_np_array(image[startY:endY, startX:endX])\n nbr_predicted, confidence = face_recognizer.predict(predict_image)\n self.__add_result__(confidence, file_id, nbr_predicted, request_id, start_time, detection_time)\n\n def recognize_with_single_recognizer(self, face_recognizer, image_path):\n detection_start = time.time()\n image = cv2.imread(image_path)\n detected_faces = self.faceDetectorManager.get_face_by_haar(image)\n detection_end = time.time()\n detection_time = detection_end - detection_start\n start_time = time.time()\n self.logger.info(f\"Using {face_recognizer} recognizer on {image_path}\")\n if len(detected_faces) is 0:\n return 0\n (startX, startY, endX, endY)= detected_faces[0]\n predict_image = self.imageConverter.convert_to_np_array(image[startY:endY, startX:endX])\n nbr_predicted, confidence = face_recognizer.predict(predict_image)\n return nbr_predicted\n\n def __add_result__(self, confidence, file_id, nbr_predicted, request_id, start_time, detection_time):\n self.logger.info(f\"Recognized identity: {nbr_predicted} confidence:{confidence}\")\n end_time = time.time()\n process_time = end_time - start_time + detection_time\n result = RecognitionResult(nbr_predicted, request_id, confidence, file_id, str(process_time))\n self.recognitionResultRepo.add_recognition_result(result)\n\n def __add_empty_result__(self, azure_file, request_id, start_time, detection_time):\n end_time = time.time()\n process_time = end_time - start_time + detection_time\n result = RecognitionResult(0, request_id, 0, azure_file.id, str(process_time), \"No faces detected\")\n self.recognitionResultRepo.add_recognition_result(result)\n","sub_path":"FaceRecognition/opencv_client/face_recognition/open_cv_face_recognizer.py","file_name":"open_cv_face_recognizer.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"537627599","text":"def calc(octet):\n\tprint(\"calcul de %s\" % (octet))\n\tfor n in range(0, 10000):\n\t\tfor n2 in range(0, 10000):\n\t\t\taddition = n + n2\n\t\t\t#return(\"%s + %s = %s\" % (str(n), str(n2), str(addition)))\n\t\t\t#input('')\n\t\t\tsoustraction = n - n2\n\t\t\tsoustraction2 = n2 - n\n\t\t\tmultiplication = n * n2\n\t\t\ttry:\n\t\t\t\tdivision = int(n / n2)\n\t\t\t\tdivision2 = int(n2 / n)\n\t\t\t\tif division == int(octet):\n\t\t\t\t\treturn(\"%s/%s=\" % (str(n), str(n2)))\n\t\t\t\tif division2 == int(octet):\n\t\t\t\t\treturn(\"%s/%s=\" % (str(n2), str(n)))\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t\n\t\t\tif addition == int(octet):\n\t\t\t\treturn(\"%s+%s=\" % (str(n), str(n2)))\n\t\t\tif soustraction == int(octet):\n\t\t\t\treturn(\"%s-%s=\" % (str(n), str(n2)))\n\t\t\tif soustraction2 == int(octet):\n\t\t\t\treturn(\"%s-%s=\" % (str(n2), str(n)))\n\t\t\tif multiplication == int(octet):\n\t\t\t\treturn(\"%s*%s=\" % (str(n), str(n2)))\n\t\t\t\t\ndef alphabet_convert(calcbin):\n\talphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\talphabet = list(alphabet)\n\t\n\ttext_crypt = \"\"\n\t\n\tfor caractere in calcbin:\n\t\ttry:\n\t\t\tcar = int(caractere)\n\t\t\tpos = alphabet[car]\n\t\t\ttext_crypt += pos\n\t\t\t\n\t\texcept:\n\t\t\ttext_crypt += caractere\n\t\t\t\n\treturn(text_crypt)\n\t\ndef text_to_bin(texte):\n\tlist_octet = [ bin(ord(ch))[2:].zfill(8) for ch in texte ]\n\tliste = []\n\t\n\tfor octet in list_octet:\n\t\tseg_1 = octet[:3]\n\t\tseg_2 = octet[3:]\n\t\t\n\t\tliste.append(seg_1)\n\t\tliste.append(seg_2)\n\t\t\n\treturn(liste)\n\ncalc_text = \"\"\n\ntexte = input(\"crypt> \")\n\nbin_list = text_to_bin(texte)\n\nfor octet in bin_list:\n\tcalcul = calc(octet)\n\tcalc_text += calcul\n\nfinal_crypt = alphabet_convert(calc_text)\nprint(\"HASH: \"+final_crypt)\n\t\n","sub_path":"Crypt.py","file_name":"Crypt.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"106773597","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Clientes, Tramites, Gastos, Notas, TipoTramite\nfrom django.db import transaction\nfrom django.http import HttpResponse, JsonResponse\nfrom .forms import NuevoClienteForm, NuevoTramiteForm, NuevoTipoTramiteForm, NuevoGastoForm, EditarTramiteForm, NuevaNotaForm, NuevaDescripcion\nfrom django.template.loader import render_to_string\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\n\n\n@login_required\ndef home(request):\n return render(request, 'home.html')\n\ndef busqueda(request):\n query = request.GET.get('q')\n entorno = request.GET.get('e')\n print(entorno)\n if query is not None and query != ' ' and request.is_ajax():\n if entorno == 'Tramites':\n items = Tramites.objects.filter(Q(cliente__apellido__icontains=query)|Q(cliente__nombre__icontains=query)).order_by(\"fecha_presentacion\")\n return render(request, 'buscar_tramite.html',{'items': items})\n elif entorno == 'Clientes':\n items = Clientes.objects.filter(Q(apellido__icontains=query)|Q(nombre__icontains=query)).order_by(\"nombre\")\n return render(request, 'buscar_cliente.html',{'items': items})\n\n\n\n\ndef paginacion(request,modelo):\n paginator = Paginator(modelo, 5)\n page = request.GET.get('page')\n try:\n items = paginator.page(page)\n except PageNotAnInteger:\n items = paginator.page(1)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n\n index = items.number - 1\n max_index = len(paginator.page_range)\n start_index = index - 5 if index >= 5 else 0\n end_index = index + 5 if index <= max_index - 5 else max_index\n page_range = paginator.page_range[start_index:end_index]\n request.session['page_range'] = page_range\n request.session['items'] = items\n\n@login_required\ndef clientes(request):\n if request.method == 'GET':\n clientes = Clientes.objects.all().order_by(\"nombre\")\n paginator = Paginator(clientes, 5)\n page = request.GET.get('page')\n try:\n items = paginator.page(page)\n except PageNotAnInteger:\n items = paginator.page(1)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n\n index = items.number - 1\n max_index = len(paginator.page_range)\n start_index = index - 5 if index >= 5 else 0\n end_index = index + 5 if index <= max_index - 5 else max_index\n page_range = paginator.page_range[start_index:end_index]\n return render(request, 'clientes.html', {'clientes': clientes, 'items': items, 'page_range': page_range})\n\n\n@login_required\ndef nuevoCliente(request):\n if request.method=='POST':\n form = NuevoClienteForm(request.POST)\n if form.is_valid():\n cliente = form.save(commit=False)\n cliente.save()\n return redirect('clientes')\n else:\n form = NuevoClienteForm()\n return render(request, 'nuevo_cliente.html', {'form': form})\n\n\n@login_required\ndef editarCliente(request,cliente_pk):\n if request.method=='GET':\n cliente = Clientes.objects.get(pk=cliente_pk)\n form = NuevoClienteForm(instance=cliente)\n return render(request, \"editar_cliente.html\", {'cliente': form, \"id\": cliente_pk})\n elif request.method == 'POST':\n cliente = Clientes.objects.get(pk=cliente_pk)\n form = NuevoClienteForm(request.POST, instance=cliente)\n try:\n if form.is_valid():\n form.save()\n cliente = Clientes.objects.get(pk=cliente_pk)\n form = NuevoClienteForm(instance=cliente)\n return redirect('clientes')\n else:\n cliente = Clientes.objects.get(pk=cliente_pk)\n form = NuevoClienteForm(instance = cliente)\n return render(request, \"editar_cliente.html\", {'cliente': form,\"id\":cliente_pk})\n except Exception as e:\n cliente = Clientes.objects.get(pk=cliente_pk)\n form = NuevoClienteForm(instance = cliente)\n return render(request, \"editar_cliente.html\", {'cliente': form,\"id\":cliente_pk})\n\n\n@login_required\ndef tramites(request):\n if request.method=='GET':\n tramites = Tramites.objects.filter(entregado=False).order_by(\"fecha_presentacion\")\n paginator = Paginator(tramites, 5)\n page = request.GET.get('page')\n try:\n items = paginator.page(page)\n except PageNotAnInteger:\n items = paginator.page(1)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n\n index = items.number - 1\n max_index = len(paginator.page_range)\n start_index = index - 5 if index >= 5 else 0\n end_index = index + 5 if index <= max_index - 5 else max_index\n page_range = paginator.page_range[start_index:end_index]\n return render(request, 'tramites.html', {'tramites': tramites, 'items': items, 'page_range': page_range})\n\n\n@login_required\ndef tramitesFinalizados(request):\n if request.method=='GET':\n tramites = Tramites.objects.filter(entregado=True).order_by(\"fecha_presentacion\")\n return render(request, 'tramites_finalizados.html', {'tramites': tramites})\n\n\n@login_required\ndef finalizarTramite(request, tramite_pk):\n if request.method=='POST':\n tramite = Tramites.objects.get(pk=tramite_pk)\n tramite.entregado == True\n return redirect('tramites')\n\n\n\n@login_required\ndef editarTramite(request,tramite_pk):\n if request.method=='GET':\n tramite = Tramites.objects.get(pk=tramite_pk)\n form = EditarTramiteForm(instance=tramite)\n\n return render(request, \"editar_tramite.html\", {'tramite': form, \"id\": tramite_pk})\n\n elif request.method == 'POST':\n tramite = Tramites.objects.get(pk=tramite_pk)\n form = EditarTramiteForm(request.POST, instance=tramite)\n\n try:\n if form.is_valid():\n form.save()\n tramite = Tramites.objects.get(pk=tramite_pk)\n form = EditarTramiteForm(instance=tramite)\n return redirect('tramites')\n else:\n tramite = Tramites.objects.get(pk=tramite_pk)\n form = EditarTramiteForm(instance = tramite)\n return render(request, \"editar_tramite.html\", {'tramite': form,\"id\":tramite_pk})\n except Exception as e:\n tramite = Tramites.objects.get(pk=tramite_pk)\n form = EditarTramiteForm(instance = tramite)\n return render(request, \"editar_tramite.html\", {'tramite': form,\"id\":tramite_pk})\n\n\n\n@login_required\ndef nuevoTramite(request):\n if request.method=='POST':\n form = NuevoTramiteForm(request.POST)\n if form.is_valid():\n tramite = form.save(commit=False)\n tramite.save()\n return redirect('tramites')\n else:\n form = NuevoTramiteForm()\n return render(request, 'nuevo_tramite.html', {'form': form})\n\n\n@login_required\ndef nuevoTipoTramite(request):\n if request.method=='GET':\n tiposTramites = TipoTramite.objects.all()\n return render(request, 'nuevo_tipo_tramite.html', {'tiposTramites': tiposTramites})\n\n if request.method=='POST':\n form = NuevoTipoTramiteForm(request.POST)\n\n if form.is_valid():\n tipo = form.save(commit=False)\n tipo.save()\n return redirect('tramites')\n else:\n form = NuevoTipoTramiteForm()\n return render(request, 'nuevo_tipo_tramite.html', {'form': form})\n\n\n@login_required\ndef nuevaDescripcion(request):\n if request.method=='POST':\n form = NuevaDescripcion(request.POST)\n\n if form.is_valid():\n desc = form.save(commit=False)\n desc.save()\n return redirect('tramites')\n else:\n form = NuevaDescripcion()\n return render(request, 'nueva_descripcion.html', {'form': form})\n\n\n@login_required\ndef gastos(request,tramite_pk):\n total = 0\n tramite = Tramites.objects.get(pk=tramite_pk)\n gastos = Gastos.objects.filter(tramite__id=tramite_pk)\n\n if request.method == 'GET':\n for gasto in gastos:\n total += gasto.monto\n tramite.total = total\n tramite.save()\n return render(request, 'gastos_modal.html', {'tramite': tramite, 'gastos': gastos, 'total': total})\n\n\n if request.method=='POST':\n form = NuevoGastoForm(request.POST)\n if form.is_valid():\n gasto = form.save(commit=False)\n gasto.tramite = tramite\n gasto.save()\n render(request, 'gastos_modal.html', {'tramite': tramite, 'gastos': gastos, 'total': total})\n else:\n form = NuevoGastoForm()\n form = NuevoGastoForm()\n return render(request, 'gastos_modal.html', {'tramite': tramite,'gastos': gastos,'form': form})\n\n\n@login_required\ndef nuevoGasto(request,tramite_pk):\n tramite = get_object_or_404(Tramites, pk=tramite_pk)\n if request.method=='POST':\n form = NuevoGastoForm(request.POST)\n\n if form.is_valid():\n gasto = form.save(commit=False)\n gasto.tramite = tramite\n gasto.save()\n return redirect('gastos', tramite_pk)\n else:\n form = NuevoGastoForm()\n return render(request, 'nuevo_gasto.html', {'tramite': tramite,'form': form})\n\n\n@login_required\ndef notas(request,tramite_pk):\n tramite = Tramites.objects.get(pk=tramite_pk)\n notas = Notas.objects.filter(tramite__id=tramite_pk)\n if request.method == 'GET':\n return render(request, 'notas_modal.html', {'tramite': tramite, 'notas': notas})\n\n if request.method == 'POST':\n try:\n with transaction.atomic():\n nota = request.POST.get('nota')\n n = Notas()\n n.descripcion = nota\n n.tramite = tramite\n n.save()\n return render(request, 'notas_modal.html', {'tramite': tramite, 'notas': notas})\n\n except Exception as e:\n print ('error')\n print (e)\n\n\n@login_required\ndef editarNota(request,nota_pk,tramite_pk):\n tramite = get_object_or_404(Tramites, pk=tramite_pk)\n if request.method=='GET':\n nota = Notas.objects.get(pk=nota_pk)\n form = NuevaNotaForm(instance=nota)\n return render(request, \"nueva_nota.html\", {'nota': form, \"id\": nota_pk, \"tramite\": tramite})\n elif request.method == 'POST':\n nota = Notas.objects.get(pk=nota_pk)\n form = NuevaNotaForm(request.POST, instance=nota)\n try:\n if form.is_valid():\n form.save()\n nota = Notas.objects.get(pk=nota_pk)\n form = NuevaNotaForm(instance=nota)\n redirect('notas', tramite_pk)\n else:\n nota = Notas.objects.get(pk=nota_pk)\n form = NuevaNotaForm(instance=nota)\n return render(request, \"nueva_nota.html\", {'nota': form,\"id\":nota_pk, \"tramite\": tramite})\n except Exception as e:\n nota = Notas.objects.get(pk=nota_pk)\n form = NuevaNotaForm(instance=nota)\n return render(request, \"nueva_nota.html\", {'nota': form,\"id\":nota_pk, \"tramite\": tramite})\n","sub_path":"Tramites/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"544653791","text":"# Benjamin Ugaz\r\n# AM CLASS OF 2021\r\n# 4-27-2021\r\n# Final ver 5/7/2021\r\nfrom tkinter import *\r\nimport pyaudio\r\nimport pyttsx3\r\nimport speech_recognition as sr\r\nimport threading\r\nimport time\r\nimport os\r\nimport datetime\r\nimport wikipedia\r\nimport subprocess\r\nimport webbrowser\r\nfrom selenium import webdriver\r\nimport cv2\r\nimport imageio\r\nfrom PIL import Image, ImageTk\r\nfrom playsound import playsound\r\n\r\nsttime = datetime.datetime.now().strftime(\"%D\")\r\nstarttime = open(\"runtime.txt\",\"a\")\r\nstarttime.write(\"Started: \" + sttime +\"\\n\")\r\nstarttime.write(\"______________\\n\" )\r\nstarttime.close()\r\nroot = Tk()\r\nroot.configure(bg='black')\r\nroot.title(\"JRVA\")\r\nroot.protocol(\"WM_DELETE_WINDOW\", root.iconify)\r\nprint(root.protocol)\r\n\r\n# make Esc exit the program\r\n\r\nroot.bind('', lambda e: root.destroy())\r\n\r\nphoto = PhotoImage(file = \"Icons/logo.png\")\r\ncl = PhotoImage(file = \"Icons/classlogo.png\")\r\n#frames of the animation\r\nframes = [PhotoImage(file='Icons/Background.gif',format = 'gif -index %i' %(i)) for i in range(60)]\r\n\r\n\r\n#speed of the animation\r\nspeed = 50\r\nwith open('Commands.txt', 'r') as file:\r\n data = file.read()\r\n#speed rate of the voice\r\nnewVoiceRate = 175\r\n#speach engine properties\r\nJRVA = pyttsx3.init()\r\nvoices =JRVA.getProperty('voices')\r\nJRVA.setProperty('voice',voices[0].id)\r\nJRVA.setProperty('rate',newVoiceRate)\r\n\r\n#How jrva will intorduce himself\r\nintroduction = 'Im Jrva. Your virtual assistant.'\r\n\r\n#mic on image\r\nmic_status_on = [PhotoImage(file='Icons/mic_1.png')]\r\n\r\n#mic off image\r\nmic_status_off = [PhotoImage(file='Icons/mic_0.png')]\r\n\r\n#a function that tells jrva what to say\r\ndef say(audio):\r\n write(\"Jrva said: \" + audio)\r\n label.configure(text=audio,compound='center')\r\n JRVA.say(audio)\r\n JRVA.runAndWait()\r\n\r\ndef write(say):\r\n time = datetime.datetime.now().strftime(\"%H:%M:%S\\n\")\r\n runtime = open(\"runtime.txt\",\"a\")\r\n runtime.write('\\n')\r\n runtime.write('Time: ' + time)\r\n runtime.write(str(say) + '\\n')\r\n\r\n runtime.close()\r\n\r\n#gets the current time of day \r\ndef saytime():\r\n hour=int(datetime.datetime.now().hour)\r\n if hour>=0 and hour<12:\r\n label.configure(text=\"Good morning, \" + introduction)\r\n say(\"Good morning, \" + introduction)\r\n elif hour>=12 and hour<18:\r\n say(\"Good afternoon. \" + introduction)\r\n label.configure(text=\"Good afternoon. \" + introduction)\r\n else:\r\n say(\"Good Evenning. \" + introduction)\r\n label.configure(text=\"Good Evenning. \" + introduction)\r\n\r\n#gets the command and activates it\r\ndef results():\r\n try:\r\n query=takeCommand().lower()\r\n write('User said: ' + query)\r\n if 'who are you' in query:\r\n say(introduction)\r\n\r\n elif 'james' in query:\r\n say('opening James Rumsey home page')\r\n webbrowser.open('jamesrumsey.com')\r\n\r\n elif 'adult programs' in query:\r\n say('opening James Rumsey adult programs')\r\n webbrowser.open('https://www.jamesrumsey.com/category/adult-programs/')\r\n\r\n elif 'high school progrmas' in query:\r\n say('opening James Rumsey high school programs')\r\n webbrowser.open('https://www.jamesrumsey.com/category/high-school-programs/')\r\n\r\n elif 'nursing' in query:\r\n say('opening James Rumsey nursing program')\r\n webbrowser.open('https://www.jamesrumsey.com/academics/adult-programs/practical-nursing/')\r\n\r\n elif 'adult education' in query:\r\n say('opening adult education')\r\n webbrowser.open('https://www.jamesrumsey.com/academics/adult-basic-education/')\r\n\r\n elif 'youtube' in query:\r\n say('Opening Youtube')\r\n webbrowser.open(\"youtube.com\")\r\n\r\n elif 'google' in query:\r\n say('Opening Google')\r\n webbrowser.open(\"google.com\")\r\n\r\n elif 'time' in query:\r\n strTime=datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n say(f\"the time is {strTime}\")\r\n\r\n elif 'how are you' in query:\r\n say(\"Great, Thanks for asking.\")\r\n\r\n elif 'spotify' in query:\r\n say(\"Opening Spotify\")\r\n subprocess.call('Apps/Spotify/Spotify.exe')\r\n\r\n elif 'grades' in query:\r\n say(\"Opening Gradebook\")\r\n webbrowser.open('https://igradeplus.com/index.jsp')\r\n \r\n elif 'career' in query:\r\n say(\"Opening Career Center\")\r\n webbrowser.open('http://mcc.jamesrumsey.com/')\r\n\r\n elif 'resources' in query:\r\n say(\"Opening Student Resources\")\r\n webbrowser.open('https://www.jamesrumsey.com/knowledge-base/electronic-resources/')\r\n \r\n elif 'calender' in query:\r\n say('Opening Calender')\r\n webbrowser.open('https://www.jamesrumsey.com/calendars/')\r\n\r\n elif query == 'none':\r\n say(\"Sorry, i didnt get that. please try again.\")\r\n write('Microphone didnt Hear anything')\r\n readon()\r\n\r\n else:\r\n say('Here is a Google search for, ' + query + '.' )\r\n webbrowser.open('https://www.google.com/search?q=' + query)\r\n except Exception as e:\r\n write(e)\r\n readoff()\r\n\r\n\r\n#listens for a command\r\ndef takeCommand():\r\n try:\r\n r=sr.Recognizer()\r\n with sr.Microphone() as source:\r\n try:\r\n r.pause_threshold=1\r\n say(\"Listening..\")\r\n r.dynamic_energy_threshold = False \r\n audio=r.listen(source,timeout=5.0,phrase_time_limit=5)\r\n except Exception as e:\r\n write(e)\r\n audio = ''\r\n try:\r\n readoff()\r\n say(\"One second\")\r\n query=r.recognize_google(audio,language='en-in')\r\n except Exception as e:\r\n write(e)\r\n return \"None\"\r\n return query\r\n except Exception as e:\r\n write(e)\r\n readoff()\r\n say('No Microphone was Detected')\r\n\r\n \r\nvideo_name = \"Videos/jriv.mp4\" #This is your video file path\r\nvideo = imageio.get_reader(video_name)\r\n#activates the mic and updates the image\r\ndef readon():\r\n write('Mic On Activated')\r\n button.config(image=mic_status_on,command=lambda: threading.Thread(target=readoff, daemon=True).start())\r\n threading.Thread(target=results, daemon=True).start()\r\n\r\n\r\ndef playsounds():\r\n playsound('Videos/jriv.mp3')\r\n \r\ndef video():\r\n video_name = \"Videos/jriv.mp4\" #This is your video file path\r\n video = imageio.get_reader(video_name)\r\n\r\n videowin = Toplevel(root)\r\n \r\n # sets the title of the\r\n # Toplevel widget\r\n videowin.title(\"INTRO\")\r\n \r\n\r\n videowin.resizable(False, False)\r\n \r\n re = Label(videowin,background=\"black\")\r\n re.pack()\r\n threading.Thread(target=playsounds, daemon=True).start()\r\n for image in video.iter_data():\r\n frame_image = ImageTk.PhotoImage(Image.fromarray(image))\r\n re.config(image=frame_image)\r\n re.image = frame_image\r\n p.terminate()\r\n\r\n\r\n\r\n# function to open a new window \r\n# on a button click\r\ndef openNewWindow():\r\n \r\n # Toplevel object which will \r\n # be treated as a new window\r\n newWindow = Toplevel(root)\r\n \r\n # sets the title of the\r\n # Toplevel widget\r\n newWindow.title(\"Jrva\")\r\n \r\n # sets the geometry of toplevel\r\n newWindow.geometry(\"350x350\")\r\n\r\n newWindow.resizable(False, False)\r\n # A Label widget to show in toplevel\r\n Label(newWindow,text =data,relief=RIDGE,justify=LEFT,bd=2,font=('Bookman',8)).pack()\r\n\r\n\r\n#turns off the mic\r\ndef readoff():\r\n write('Mic Off Activated')\r\n button.config(image=mic_status_off,command=lambda: threading.Thread(target=readon, daemon=True).start())\r\n\r\n#updates the animation\r\ndef update(ind):\r\n frame = frames[ind]\r\n ind += 1\r\n if ind == 60:\r\n ind = 0\r\n label.configure(image=frame,compound='center',font='impact',foreground=\"orange\")\r\n root.after(speed, update, ind)\r\n\r\nbutton= Button(root,activebackground=\"black\",borderwidth=0,background=\"black\", width=50, height=50,image=mic_status_off,command=lambda: threading.Thread(target=readon, daemon=True).start())\r\nc = Button(root,background='#DCAE96', text=\"Commands\",command = openNewWindow, width=9, )\r\ntag = Button(root,image=cl,activebackground=\"black\",borderwidth=0,bg='black',height=100,relief=SUNKEN, state=ACTIVE,pady=50,command=lambda: threading.Thread(target=video, daemon=True).start())\r\ntext= Label(root)\r\nlabel = Label(root,background=\"black\")\r\nlabel.pack()\r\n\r\nbutton.pack()\r\ntag.pack(side=RIGHT)\r\nroot.iconphoto(False, photo)\r\nroot.resizable(False, False)\r\nc.pack(side=LEFT)\r\nroot.geometry('600x485')\r\nroot.after(0, update, 0)\r\nthreading.Thread(target=saytime, daemon=True).start()\r\n\r\nroot.mainloop()\r\nsttime = datetime.datetime.now().strftime(\"%D\")\r\nstarttime = open(\"runtime.txt\",\"a\")\r\nstarttime.write('\\n')\r\nstarttime.write(\"closed: \"+ sttime + '\\n')\r\nstarttime.write(\"______________\\n\" )\r\nstarttime.close()","sub_path":"JRVA/JRVA.py","file_name":"JRVA.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"291755905","text":"\n\nfrom conductor.native.lib.dependency_list import DependencyList\nimport glob\n\"\"\"Test DependencyList. Assume run on posix filesystem\"\"\"\n\nimport os\nimport sys\nimport unittest\nimport mock\n\n\nfrom conductor.native.lib.sequence import Sequence\n\n\nNATIVE_MODULE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nif NATIVE_MODULE not in sys.path:\n sys.path.insert(0, NATIVE_MODULE)\n\nsys.modules['glob'] = __import__(\n 'conductor.native.lib.mocks.glob', fromlist=['dummy'])\n\n\n@mock.patch.dict(os.environ, {\n \"HOME\": \"/users/joebloggs\",\n \"SHOT\": \"/metropolis/shot01\"\n})\nclass DepListTest(unittest.TestCase):\n\n def test_init_empty(self):\n d = DependencyList()\n self.assertEqual(list(d), [])\n\n def test_adds_files(self):\n d = DependencyList()\n d.add(\"file1\", \"file2\", must_exist=False)\n self.assertEqual(len(d), 2)\n\n def test_expand_tilde(self):\n d = DependencyList()\n d.add(\"~/file1\", \"~/file2\", must_exist=False)\n self.assertIn(\"/users/joebloggs/file1\", d)\n\n def test_expand_envvar(self):\n d = DependencyList()\n d.add(\"$SHOT/file1\", \"$HOME/file2\", must_exist=False)\n self.assertIn(\"/metropolis/shot01/file1\", d)\n self.assertIn(\"/users/joebloggs/file2\", d)\n\n def test_dedup_same_filenames(self):\n d = DependencyList()\n d.add(\"/file1\", \"/file2\", \"/file2\", must_exist=False)\n self.assertEqual(len(d), 2)\n self.assertIn(\"/file1\", d)\n self.assertIn(\"/file2\", d)\n\n def test_dedup_contained_file(self):\n d = DependencyList()\n d.add(\n \"/dir1/\",\n \"/dir1/file1\",\n \"/dir2/file1\",\n \"file2\",\n must_exist=False)\n self.assertEqual(len(d), 3)\n\n def test_dedup_dirtied_on_add(self):\n d = DependencyList()\n d.add(\"/file1\", must_exist=False)\n self.assertFalse(d._clean)\n\n def test_dedup_cleaned_on_access_iter(self):\n d = DependencyList()\n d.add(\"/file1\", must_exist=False)\n ls = list(d)\n self.assertTrue(d._clean)\n\n def test_dedup_cleaned_on_access_len(self):\n d = DependencyList()\n d.add(\"/file1\", must_exist=False)\n ls = len(d)\n self.assertTrue(d._clean)\n\n def test_dedup_cleaned_on_access_next(self):\n d = DependencyList()\n d.add(\"/file1\", \"/file2\", \"/file3\", must_exist=False)\n n = next(d)\n self.assertTrue(d._clean)\n\n def test_next(self):\n d = DependencyList()\n d.add(\"/file1\", \"/file2\", \"/file3\", must_exist=False)\n self.assertEqual(next(d), \"/file1\")\n self.assertEqual(next(d), \"/file2\")\n\n def test_next_fails_after_last(self):\n d = DependencyList()\n d.add(\"/file1\", \"/file2\", \"/file3\", must_exist=False)\n next(d)\n next(d)\n next(d)\n with self.assertRaises(StopIteration):\n next(d)\n\n def test_next_reset_after_add(self):\n d = DependencyList()\n d.add(\"/file1\", \"/file2\", \"/file3\", must_exist=False)\n next(d)\n next(d)\n d.add(\"/file4\")\n self.assertEqual(next(d), \"/file1\")\n\n def test_common_path_when_common_prefix_in_filename(self):\n d = DependencyList()\n files = [\"/users/joebloggs/tmp/dissention/perfect\",\n \"/users/joebloggs/tmp/disagreement/crimson\",\n \"/users/joebloggs/tmp/diatribe/belew\"]\n d.add(*files, must_exist=False)\n self.assertEqual(d.common_path(), \"/users/joebloggs/tmp\")\n\n def test_common_path(self):\n d = DependencyList()\n files = [\"/users/joebloggs/tmp/foobar/test\",\n \"/users/joebloggs/tmp/baz/fripp\",\n \"/users/joebloggs/tmp/elephant/corner\"]\n d.add(*files, must_exist=False)\n self.assertEqual(d.common_path(), \"/users/joebloggs/tmp\")\n\n def test_common_path_when_one_path_is_the_common_path(self):\n d = DependencyList()\n files = [\n \"/users/joebloggs/tmp\",\n \"/users/joebloggs/tmp/bolly/operation\",\n \"/users/joebloggs/tmp/stay/go\"]\n d.add(*files, must_exist=False)\n self.assertEqual(d.common_path(), \"/users/joebloggs/tmp\")\n\n def test_common_path_when_lowest_path_is_the_common_path(self):\n d = DependencyList()\n files = [\n \"/users/joebloggs/tmp/foo.txt\",\n \"/users/joebloggs/tmp/modelman.jpg\",\n \"/users/joebloggs/tmp/ration.cpp\",\n \"/users/joebloggs/tmp/bill.project\"]\n d.add(*files, must_exist=False)\n self.assertEqual(d.common_path(), \"/users/joebloggs/tmp\")\n\n def test_common_path_when_single_path(self):\n d = DependencyList()\n files = [\"/users/joebloggs/tmp/foo.txt\"]\n d.add(*files, must_exist=False)\n self.assertEqual(d.common_path(), \"/users/joebloggs/tmp/foo.txt\")\n\n def test_common_path_when_duplicate_entries_of_single_path(self):\n d = DependencyList()\n files = [\n \"/users/joebloggs/tmp/foo.txt\",\n \"/users/joebloggs/tmp/foo.txt\"]\n d.add(*files, must_exist=False)\n self.assertEqual(d.common_path(), \"/users/joebloggs/tmp/foo.txt\")\n\n def test_common_path_is_none_when_no_entries(self):\n d = DependencyList()\n self.assertIsNone(d.common_path())\n\n def test_common_path_is_slash_when_root(self):\n d = DependencyList()\n files = [\n \"/users/joebloggs/tmp/foo.txt\",\n \"/dev/joebloggs/tmp/foo.txt\"]\n d.add(*files, must_exist=False)\n self.assertEqual(d.common_path(), \"/\")\n\n def test_self_glob_when_files_match(self):\n glob.populate(Sequence.create(\"1-20\").expand(\"/some/file.####.exr\"))\n d = DependencyList()\n file = \"/some/file.*.exr\"\n d.add(file, must_exist=False)\n d.glob()\n self.assertEqual(len(d), 20)\n\n def test_self_glob_dedups_when_many_files_match(self):\n glob.populate(Sequence.create(\"1-20\").expand(\"/some/file.####.exr\"))\n d = DependencyList()\n files = [\"/some/file.*.exr\", \"/some/*.exr\"]\n d.add(*files, must_exist=False)\n d.glob()\n self.assertEqual(len(d), 20)\n\n def test_self_glob_when_files_dont_match(self):\n glob.populate(Sequence.create(\"1-20\").expand(\"/other/file.####.exr\"))\n d = DependencyList()\n file = \"/some/file.*.exr\"\n d.add(file, must_exist=False)\n d.glob()\n self.assertEqual(len(d), 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"conductor/native/tests/test_dependency_list.py","file_name":"test_dependency_list.py","file_ext":"py","file_size_in_byte":6491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"78763263","text":"#!/usr/bin/env python3\n\nimport os\nimport textwrap\nimport glob\nimport re\n\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\nfrom spiops.utils.utils import replace\nfrom spiops.utils.files import update_former_versions\n\n\ndef main(test=False, log=False):\n execution_dir = os.getcwd()\n\n with open(os.path.dirname(__file__) + '/config/version',\n 'r') as f:\n for line in f:\n version = line\n\n parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,\n description=textwrap.dedent('''\\\n\n SPIOPS -- Version {}, SPICE Operational Procedures for ESA Missions\n\n SPIOPS is a library aimed to help scientists and engineers that deal \n with Solar System Geometry for ESA planetary science missions. More \n information is available here:\n\n https://github.com/esaSPICEservice/spiops\n\n\n'''.format(version)),\n epilog='''\n __ __ __ __ __ __ ___ __ ___ __ __ ___\n /__\\ /__` '__\\ /__` |__) | / ` |__ /__` |__ |__) \\ / | / ` |__\n \\__, .__/ \\__/ .__/ | | \\__, |___ .__/ |___ | \\ \\/ | \\__, |___\n\n esa_spice@sciops.esa.int\n http://spice.esac.esa.int\n\n''')\n parser.add_argument('-v', '--version',\n help='Display the version of SPIOPS',\n action='store_true')\n parser.add_argument('-m', '--metakernel',\n help='Generate local meta-kernels from {mission}_{type}.tm',\n action='store_true')\n parser.add_argument('-a', '--all',\n help='Generate local meta-kernels from all files',\n action='store_true')\n parser.add_argument('-c', '--clean',\n help='Remove local meta-kernels',\n action='store_true')\n parser.add_argument('-f', '--former',\n help='Update the meta-kernels in the former_versions directory.',\n action='store_true')\n args = parser.parse_args()\n\n if args.version:\n print(version)\n return\n\n if args.clean:\n\n cwd = os.getcwd()\n mks_in_dir = glob.glob('*local*.tm')\n mks_in_dir += glob.glob('*LOCAL*.TM')\n\n for mk_in_dir in mks_in_dir:\n os.remove(cwd + os.sep + mk_in_dir)\n\n if args.metakernel:\n\n cwd = os.getcwd()\n local_mks = []\n mks_in_dir = glob.glob('*.tm')\n mks_in_dir += glob.glob('*.TM')\n\n\n for mk_in_dir in mks_in_dir:\n if 'local' not in mk_in_dir.lower():\n if args.all:\n replace(mk_in_dir, \"'..'\",\n \"'\" + cwd.rsplit('/kernels', 1)[0] + \"/kernels'\")\n local_mks.append(mk_in_dir)\n else:\n not_append = re.search(r\".*_v[0-9]{3}_[0-9]{8}_[0-9]{3}.tm\", mk_in_dir.lower())\n if not_append == None:\n replace(mk_in_dir, \"'..'\",\n \"'\" + cwd.rsplit('/kernels', 1)[0] + \"/kernels'\")\n local_mks.append(mk_in_dir)\n\n if local_mks:\n print('SPIOPS -- Meta-Kernel Update\\nThe following meta-kernels have been generated/replaced to local:')\n for mk in local_mks:\n print(f' {mk}')\n else:\n print(\n 'SPIOPS -- Meta-Kernel Update -- No meta-kernels have been updated.')\n\n if args.former:\n cwd = os.getcwd()\n if 'mk/former_versions' in cwd:\n mk_dir = os.sep.join(cwd.split(os.sep)[:-1])\n kernels_dir = os.sep.join(cwd.split(os.sep)[:-2])\n else:\n mk_dir = cwd\n kernels_dir = os.sep.join(cwd.split(os.sep)[:-1])\n try:\n updated_mks = update_former_versions(mk_dir, kernels_dir)\n if updated_mks:\n print(\n 'SPIOPS -- Meta-Kernel Update\\nThe following former_versions meta-kernels have been updated:')\n for mk in updated_mks:\n print(f' {mk}')\n else:\n print(\n 'SPIOPS -- Meta-Kernel Update -- No meta-kernels have been updated.')\n except Exception as e: print(e)\n\n\n return\n\n","sub_path":"spiops/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"596119203","text":"from qtpyvcp_conversational_gcode.ops.face_ops import FaceOps\n\nfrom qtpyvcp_conversational_gcode.widgets.base_widget import ConversationalBaseWidget\n\nclass FacingWidget(ConversationalBaseWidget):\n def __init__(self, parent=None):\n super(FacingWidget, self).__init__(parent, 'facing.ui')\n\n self.step_down_input.editingFinished.connect(self._validate_step_down)\n self.step_over_input.editingFinished.connect(self._validate_step_over)\n self.x_start_input.editingFinished.connect(self._validate_x_positions)\n self.x_end_input.editingFinished.connect(self._validate_x_positions)\n self.y_start_input.editingFinished.connect(self._validate_y_positions)\n self.y_end_input.editingFinished.connect(self._validate_y_positions)\n\n self._validators.extend([self._validate_step_down,\n self._validate_step_over,\n self._validate_x_positions,\n self._validate_y_positions])\n\n def step_over(self):\n return self.step_over_input.value()\n\n def step_down(self):\n return self.step_down_input.value()\n\n def x_start(self):\n return self.x_start_input.value()\n\n def x_end(self):\n return self.x_end_input.value()\n\n def y_start(self):\n return self.y_start_input.value()\n\n def y_end(self):\n return self.y_end_input.value()\n\n def create_op(self):\n f = FaceOps()\n self._set_common_fields(f)\n\n f.tool_diameter = self.tool_diameter()\n f.x_start = self.x_start()\n f.x_end = self.x_end()\n f.y_start = self.y_start()\n f.y_end = self.y_end()\n\n if self.step_down() == 0:\n f.step_down = abs(self.z_end() - self.z_start())\n self.step_down_input.setText('{0:.3f}'.format(f.step_down))\n else:\n f.step_down = self.step_down()\n\n if self.step_over() == 0:\n f.step_over = self.tool_diameter() * 0.9\n self.step_over_input.setText('{0:.3f}'.format(f.step_over))\n else:\n f.step_over = self.step_over()\n\n return f.face()\n\n def _validate_step_over(self):\n if self.step_over() < 0:\n self.step_over_input.setStyleSheet('background-color: rgb(205, 141, 123)')\n error = 'Step over cannot be negative.'\n self.step_over_input.setToolTip(error)\n return False, error\n else:\n self.step_over_input.setStyleSheet('')\n return True, None\n\n def _validate_step_down(self):\n if self.step_down() < 0:\n self.step_down_input.setStyleSheet('background-color: rgb(205, 141, 123)')\n error = 'Step down cannot be negative.'\n self.step_down_input.setToolTip(error)\n return False, error\n else:\n self.step_down_input.setStyleSheet('')\n return True, None\n\n def _validate_x_positions(self):\n if self.x_start() < self.x_end():\n self.x_start_input.setStyleSheet('')\n self.x_end_input.setStyleSheet('')\n return True, None\n else:\n self.x_start_input.setStyleSheet('background-color: rgb(205, 141, 123)')\n self.x_end_input.setStyleSheet('background-color: rgb(205, 141, 123)')\n error = 'X start position must be less than end position.'\n self.x_start_input.setToolTip(error)\n self.x_end_input.setToolTip(error)\n return False, error\n\n def _validate_y_positions(self):\n if self.y_start() > self.y_end():\n self.y_start_input.setStyleSheet('')\n self.y_end_input.setStyleSheet('')\n return True, None\n else:\n self.y_start_input.setStyleSheet('background-color: rgb(205, 141, 123)')\n self.y_end_input.setStyleSheet('background-color: rgb(205, 141, 123)')\n error = 'Y start position must be greater than end position.'\n self.y_start_input.setToolTip(error)\n self.y_end_input.setToolTip(error)\n return False, error\n","sub_path":"qtpyvcp_conversational_gcode/widgets/facing.py","file_name":"facing.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"180097045","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Make sure our version of stringtemplate3 is imported. Using\n# PYTHONPATH=../.. python TestStringTemplate.py\n# does not work as expected, if the stringtemplate3 egg is installed, because\n# that gets inserted into the path even before PYTHONPATH!\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../..'))\n\nimport cgi\nimport codecs\nimport unittest\nimport warnings\nimport stringtemplate3\n\nwarnings.simplefilter('error', Warning)\nstringtemplate3.crashOnActionParseError = True\n\n\nclass TestEncoding(unittest.TestCase):\n def runTest(self):\n datadir = os.path.join(os.path.dirname(__file__), 'testencoding-data')\n pages_group = stringtemplate3.StringTemplateGroup(\n name='pages',\n rootDir=datadir)\n\n group = stringtemplate3.StringTemplateGroup(\n fileName=os.path.join(datadir, 'page.stg'),\n lexer='default',\n superGroup=pages_group)\n\n class EscapeRenderer(stringtemplate3.AttributeRenderer):\n def toString(self, o, formatName=None):\n if formatName is None:\n # no formatting specified -> escape it\n return cgi.escape(o)\n\n if formatName == \"noescape\":\n return o\n else:\n raise ValueError(\"Unsupported format name\")\n\n pages_group.registerRenderer(unicode, EscapeRenderer())\n\n menu = group.getInstanceOf('menu')\n\n menuItem = group.getInstanceOf('menuItem')\n menuItem['url'] = 'http://www.stringtemplate.org/'\n menuItem['text'] = u\"Îñţérñåţîöñåļîžåţîöñ\"\n menu['items'] = menuItem\n\n menuItem = group.getInstanceOf('menuItem')\n menuItem['url'] = 'http://www.google.com/'\n menuItem['text'] = \"\"\n menu['items'] = menuItem\n\n body = group.getInstanceOf('page_index')\n\n page = group.getInstanceOf('page')\n page['title'] = u\"This is a Îñţérñåţîöñåļîžåţîöñ demo\"\n page['menu'] = menu\n page['body'] = body\n\n fp = codecs.open(os.path.join(datadir, 'expected'), 'r', 'utf-8')\n expected = fp.read()\n fp.close()\n\n self.assertEqual(page.toString(), expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/release/PyStringTemplate-3.2b1/stringtemplate3/test/testencoding.py","file_name":"testencoding.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"405844469","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 25 13:21:18 2020\r\n\r\n@author: mrudula\r\n\"\"\"\r\n\r\n#Write a program to check whether a number is even or odd using conditional operator.\r\n\r\nn=int(input(\"enter a number\"))\r\nstatus=\"even\" if n%2==0 else \"odd\"\r\nprint(n,\"is\",status)\r\n\r\n\r\n\r\n\r\n","sub_path":"2nd assign/odd or even, 119.py","file_name":"odd or even, 119.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"571451119","text":"import urllib.request, sys\nfrom bs4 import BeautifulSoup\n\nsearch = sys.argv[1]\nurl = 'https://eksisozluk.com/'+search\nresponse = urllib.request.urlopen(url)\nsoup = BeautifulSoup(response.read(), 'html.parser')\npage_count = soup.find(class_='pager')['data-pagecount']\n\nwith open(search+\".txt\", \"w\") as file:\n for i in range(1,int(page_count)+1):\n response = urllib.request.urlopen(url+'?p='+str(i))\n soup = BeautifulSoup(response.read(), 'html.parser')\n\n file.write('PAGE ' + str(i) + '\\n')\n file.write('----------------\\n')\n file.writelines([item.text.strip('\\n') for item in soup.find_all(class_='content')])\n file.write('\\n')","sub_path":"entry_scraper.py","file_name":"entry_scraper.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"606828592","text":"###########################################################\n# QWOP with Farts PROTOTYPE\n#\n# Yeah\n###########################################################\n\n###########################################################\n# Imported Libraries #\n###########################################################\n\nimport simplegui\n\n###########################################################\n# Load Assets #\n###########################################################\n\n###########################################################\n# Literal Constants #\n###########################################################\n\n###########################################################\n# Global Variables #\n###########################################################\n\ng_game_running = True\n\ng_e_timer_down = None\ng_e_timer_up = None\ng_f_timer_down = None\ng_f_timer_up = None\n\ng_e_instruct = 'Up'\ng_f_instruct = 'Up'\n\ng_e_state = 'Up'\ng_f_state = 'Up'\n\n\ng_e_health = 100\ng_f_health = 100\n\n###########################################################\n# Defined Classes #\n###########################################################\n\n###########################################################\n# Event Handlers #\n###########################################################\n \n \ndef keydwn_hndlr( p_key ):\n \"\"\"\n Handles key presses.\n \"\"\"\n \n # Import globals\n global g_e_state, g_f_state\n \n # Perform key function\n if p_key == simplegui.KEY_MAP['E']:\n g_e_state = \"Down\"\n elif p_key == simplegui.KEY_MAP['F']:\n g_f_state = \"Down\"\n \n \ndef keyup_hndlr( p_key ):\n \"\"\"\n Handles key releases.\n \"\"\"\n \n # Import globals\n global g_e_state, g_f_state\n \n # Perform key function\n if p_key == simplegui.KEY_MAP['E']:\n g_e_state = \"Up\"\n elif p_key == simplegui.KEY_MAP['F']:\n g_f_state = \"Up\"\n\n# Handler to draw on canvas\ndef draw_hndlr( p_canvas ):\n \n # Import globals\n global g_game_running, g_e_health, g_f_health, g_e_instruct, g_f_instruct, g_e_state, g_f_state\n \n if g_game_running:\n p_canvas.draw_text( 'E:', [60, 50], 20, \"White\" )\n p_canvas.draw_text( 'F:', [60, 150], 20, \"White\" )\n p_canvas.draw_text( g_e_state, [100, 50], 20, \"White\" )\n p_canvas.draw_text( g_f_state, [100,150], 20, \"White\" )\n p_canvas.draw_text( g_e_instruct, [150, 50], 20, \"Red\" )\n p_canvas.draw_text( g_f_instruct, [150,150], 20, \"Red\" )\n \n p_canvas.draw_polygon( [ [200, 30], [200, 50],\n [(200 + g_e_health), 50],\n [(200 + g_e_health), 30] ],\n 1, \"White\", \"White\" )\n p_canvas.draw_polygon( [ [200,130], [200,150],\n [(200 + g_f_health),150],\n [(200 + g_f_health),130] ],\n 1, \"White\", \"White\" )\n else:\n p_canvas.draw_text( 'YOU A DEAD', [20, 100], 40, \"Red\" )\n \ndef e_timer_down_hndlr():\n \n # Import globals\n global g_e_instruct, g_e_timer_up\n \n g_e_instruct = \"Up\"\n g_e_timer_down.stop()\n g_e_timer_up.start()\n \ndef e_timer_up_hndlr():\n \n # Import globals\n global g_e_instruct, g_e_timer_down\n \n g_e_instruct = \"Down\"\n g_e_timer_up.stop()\n g_e_timer_down.start()\n \ndef f_timer_down_hndlr():\n \n # Import globals\n global g_f_instruct, g_f_timer_up\n \n g_f_instruct = \"Up\"\n g_f_timer_down.stop()\n g_f_timer_up.start()\n \ndef f_timer_up_hndlr():\n \n # Import globals\n global g_f_instruct, g_f_timer_down\n \n g_f_instruct = \"Down\"\n g_f_timer_up.stop()\n g_f_timer_down.start()\n \ndef health_timer_hndlr():\n \n # Import globals\n global g_game_running, g_e_health, g_f_health, g_e_instruct, g_f_instruct, g_e_state, g_f_state\n \n if g_e_instruct != g_e_state:\n g_e_health -= 2\n else:\n g_e_health = (g_e_health+1) if g_e_health < 100 else 100\n \n if g_f_instruct != g_f_state:\n g_f_health -= 2\n else:\n g_f_health = (g_f_health+1) if g_f_health < 100 else 100\n \n if g_f_health < 0 or g_e_health < 0:\n g_game_running = False\n \n\n###########################################################\n# Helper Functions #\n###########################################################\n\n###########################################################\n# Main Function #\n###########################################################\n\n# Create a frame and assign callbacks to event handlers\nl_frame = simplegui.create_frame( \"Home\", 300, 200 )\nl_frame.set_draw_handler( draw_hndlr )\nl_frame.set_keydown_handler( keydwn_hndlr )\nl_frame.set_keyup_handler( keyup_hndlr )\n\ng_e_timer_down = simplegui.create_timer( 1000.0, e_timer_down_hndlr )\ng_e_timer_up = simplegui.create_timer( 1000.0, e_timer_up_hndlr )\ng_f_timer_down = simplegui.create_timer( 2000.0, f_timer_down_hndlr )\ng_f_timer_up = simplegui.create_timer( 2000.0, f_timer_up_hndlr )\nl_timer_health = simplegui.create_timer( 90.0, health_timer_hndlr )\n\ng_e_timer_up.start()\ng_f_timer_up.start()\nl_timer_health.start()\n\n# Start the frame animation\nl_frame.start()\n\n######################## End Program ######################\n","sub_path":"fartqwop_prototype.py","file_name":"fartqwop_prototype.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"161942864","text":"#Program napisany w jezyku Python 3\n#sprawdzony przy uzyciu interpretera \"Python (v3.4m)\"\n\n#***************************************************************************\nprint(\"Zakladam ze wierzcholkami moga byc liczby naturalne z przedzialu <0;n)\")\nprint(\"Np dla liczby wierzcholkow 5 wierzcholkami moga byc liczby <0;4>\\n\")\n#*******Tworzenie listy sasiedztwa**********************************\nliczba_wierzcholkow=int(input(\"Prosze podac liczbe wierzcholkow\"))\nliczba_krawedzi=int(input(\"Prosze podac liczbe krawedzi\"))\nlista=[None]*(liczba_wierzcholkow)\nkolor=[\"bialy\"]*(liczba_wierzcholkow)\npi=[-1]*(liczba_wierzcholkow)\nfor x in range(0,int(liczba_wierzcholkow)): lista[x]=[]\nfor y in range(0,int(liczba_krawedzi)):\n print(\"Prosze podac poczatek krawedzi nr\",y)\n a=int(input())\n print(\"Prosze podac koniec krawedzi nr\",y)\n b=int(input())\n lista[a].append(b)\n lista[b].append(a)\n#***********************************\nQ=[]\nkolor[0]=\"szary\"\nQ.append(0)\nwhile Q :\n u=Q[0]\n for v in lista[u]:\n if kolor[v]==\"bialy\":\n kolor[v]=\"szary\"\n pi[v]=u\n Q.append(v)\n\n Q.pop(0)\n kolor[u]=\"czarny\"\nfor x in range(liczba_wierzcholkow):\n if pi[x] != -1:\n print(pi[x],\" jest poprzednikiem \",x)\n","sub_path":"Pythony/poprzednicy_bfs.py","file_name":"poprzednicy_bfs.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"441379846","text":"from os import name\nfrom app.main.model.domain_tasks_model import DomainTasksModel\nfrom app.main.model.special_skills_model import SpecialSkillsModel\nfrom app.main.util.response import response_object\nfrom app.main.model.job_domain_model import JobDomainModel\nfrom app.main import db\n\ndef get_all_domain(name):\n if not name:\n domains = JobDomainModel.query.all()\n else:\n domains = JobDomainModel.query.filter(JobDomainModel.name.contains(name))\n domains = [ d.to_json() for d in domains ]\n\n return response_object(code=200, message=\"Get list domain success|Lấy danh sách domain thành công\", data=domains)\n\ndef add_new_skill_to_domain(data): \n domain = JobDomainModel.query.get(data['domain_id'])\n if not domain:\n return response_object(200, \"Domain not found|Domain không tồn tại\", data=None)\n\n skill = SpecialSkillsModel.query.get(data['skill_id'])\n if not skill:\n return response_object(200, \"Skill not found|Kỹ năng không tồn tại\", data=None)\n\n try:\n domain.skills.append(skill)\n db.session.add(domain)\n db.session.commit()\n return response_object(200, \"Add skill success|Thêm skill thành công\", data=skill.to_json())\n except Exception as ex:\n return response_object(200, \"Add skill fail|Thêm skill thất bại\", data=None)\n\ndef add_new_task_to_domain(data): \n domain = JobDomainModel.query.get(data['domain_id'])\n if not domain:\n return response_object(200, \"Domain not found|Domain không tồn tại\", data=None)\n\n content = data['content']\n if not content or content == \"\":\n return response_object(200, \"Content not empty|Nội dung trống!\", data=None)\n\n try:\n task = DomainTasksModel(\n name = content,\n job_domain_id = domain.id\n )\n db.session.add(task)\n db.session.commit()\n return response_object(200, \"Add task success|Thêm task thành công\", data=task.to_json())\n except Exception as ex:\n return response_object(200, \"Add task fail|Thêm task thất bại\", data=None)","sub_path":"app/main/service/job_domain_service.py","file_name":"job_domain_service.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"626101805","text":"from __future__ import absolute_import\nfrom six import reraise as _reraise\n\nfrom wrapt import decorator as _decorator\nfrom sys import exc_info as _exc_info\nfrom flytekit.common.exceptions import system as _system_exceptions, user as _user_exceptions, base as _base_exceptions\nfrom flytekit.models.core import errors as _error_model\nfrom traceback import format_tb as _format_tb\n\n\nclass FlyteScopedException(Exception):\n\n def __init__(self, context, exc_type, exc_value, exc_tb, top_trim=0, bottom_trim=0, kind=None):\n self._exc_type = exc_type\n self._exc_value = exc_value\n self._exc_tb = exc_tb\n self._top_trim = top_trim\n self._bottom_trim = bottom_trim\n self._context = context\n self._kind = kind\n super(FlyteScopedException, self).__init__(str(self.value))\n\n @property\n def verbose_message(self):\n tb = self.traceback\n to_trim = self._top_trim\n while to_trim > 0 and tb.tb_next is not None:\n tb = tb.tb_next\n\n top_tb = tb\n limit = 0\n while tb is not None:\n limit += 1\n tb = tb.tb_next\n limit = max(0, limit - self._bottom_trim)\n\n lines = _format_tb(top_tb, limit=limit)\n lines = [line.rstrip() for line in lines]\n lines = ('\\n'.join(lines).split('\\n'))\n traceback_str = '\\n '.join([\"\"] + lines)\n\n format_str = (\n \"Traceback (most recent call last):\\n\"\n \"{traceback}\\n\"\n \"\\n\"\n \"Message:\\n\"\n \"\\n\"\n \" {message}\")\n return format_str.format(traceback=traceback_str, message=str(self.value))\n\n def __str__(self):\n return str(self.value)\n\n @property\n def value(self):\n if isinstance(self._exc_value, FlyteScopedException):\n return self._exc_value.value\n return self._exc_value\n\n @property\n def traceback(self):\n if isinstance(self._exc_value, FlyteScopedException):\n return self._exc_value.traceback\n return self._exc_tb\n\n @property\n def type(self):\n if isinstance(self._exc_value, FlyteScopedException):\n return self._exc_value.type\n return self._exc_type\n\n @property\n def error_code(self):\n \"\"\"\n :rtype: Text\n \"\"\"\n if isinstance(self._exc_value, FlyteScopedException):\n return self._exc_value.error_code\n\n if hasattr(type(self._exc_value), \"error_code\"):\n return type(self._exc_value).error_code\n return \"{}:Unknown\".format(self._context)\n\n @property\n def kind(self):\n \"\"\"\n :rtype: int\n \"\"\"\n if self._kind is not None:\n # If kind is overriden, return it.\n return self._kind\n elif isinstance(self._exc_value, FlyteScopedException):\n # Otherwise, go lower in the scope to find the kind of exception.\n return self._exc_value.kind\n elif isinstance(self._exc_value, _base_exceptions.FlyteRecoverableException):\n # If it is an exception that is recoverable, we return it as such.\n return _error_model.ContainerError.Kind.RECOVERABLE\n else:\n # The remaining exceptions are considered unrecoverable.\n return _error_model.ContainerError.Kind.NON_RECOVERABLE\n\n\nclass FlyteScopedSystemException(FlyteScopedException):\n\n def __init__(self, exc_type, exc_value, exc_tb, **kwargs):\n super(FlyteScopedSystemException, self).__init__(\n \"SYSTEM\", exc_type, exc_value, exc_tb, **kwargs\n )\n\n @property\n def verbose_message(self):\n \"\"\"\n :rtype: Text\n \"\"\"\n base_msg = super(FlyteScopedSystemException, self).verbose_message\n base_msg += \"\\n\\nSYSTEM ERROR! Contact platform administrators.\"\n return base_msg\n\n\nclass FlyteScopedUserException(FlyteScopedException):\n\n def __init__(self, exc_type, exc_value, exc_tb, **kwargs):\n super(FlyteScopedUserException, self).__init__(\n \"USER\", exc_type, exc_value, exc_tb, **kwargs\n )\n\n @property\n def verbose_message(self):\n \"\"\"\n :rtype: Text\n \"\"\"\n base_msg = super(FlyteScopedUserException, self).verbose_message\n base_msg += \"\\n\\nUser error.\"\n return base_msg\n\n\n_NULL_CONTEXT = 0\n_USER_CONTEXT = 1\n_SYSTEM_CONTEXT = 2\n\n# Keep the stack with a null-context so we never have to range check when peeking back.\n_CONTEXT_STACK = [_NULL_CONTEXT]\n\n\ndef _is_base_context():\n return _CONTEXT_STACK[-2] == _NULL_CONTEXT\n\n\n@_decorator\ndef system_entry_point(wrapped, instance, args, kwargs):\n \"\"\"\n Decorator for wrapping functions that enter a system context. This should decorate every method a user might\n call. This will allow us to add differentiation between what is a user error and what is a system failure.\n Furthermore, we will clean the exception trace so as to make more sense to the user--allowing them to know if they\n should take action themselves or pass on to the platform owners. We will dispatch metrics and such appropriately.\n \"\"\"\n try:\n _CONTEXT_STACK.append(_SYSTEM_CONTEXT)\n if _is_base_context():\n try:\n return wrapped(*args, **kwargs)\n except FlyteScopedException as ex:\n _reraise(ex.type, ex.value, ex.traceback)\n else:\n try:\n return wrapped(*args, **kwargs)\n except FlyteScopedException:\n # Just pass-on the exception that is already wrapped and scoped\n _reraise(*_exc_info())\n except _user_exceptions.FlyteUserException:\n # Re-raise from here.\n _reraise(\n FlyteScopedUserException,\n FlyteScopedUserException(*_exc_info()),\n _exc_info()[2])\n except:\n # System error, raise full stack-trace all the way up the chain.\n _reraise(\n FlyteScopedSystemException,\n FlyteScopedSystemException(*_exc_info(), kind=_error_model.ContainerError.Kind.RECOVERABLE),\n _exc_info()[2])\n finally:\n _CONTEXT_STACK.pop()\n\n\n@_decorator\ndef user_entry_point(wrapped, instance, args, kwargs):\n \"\"\"\n Decorator for wrapping functions that enter into a user context. This will help us differentiate user-created\n failures even when it is re-entrant into system code.\n\n Note: a user_entry_point can ONLY ever be called from within a @system_entry_point wrapped function, therefore,\n we can always ensure we will hit a system_entry_point to correctly reformat our exceptions. Also, any exception\n we create here will only be handled within our system code so we don't need to worry about leaking weird exceptions\n to the user.\n \"\"\"\n try:\n _CONTEXT_STACK.append(_USER_CONTEXT)\n if _is_base_context():\n try:\n return wrapped(*args, **kwargs)\n except FlyteScopedException as ex:\n _reraise(ex.type, ex.value, ex.traceback)\n else:\n try:\n return wrapped(*args, **kwargs)\n except FlyteScopedException:\n # Just pass on the already wrapped and scoped exception\n _reraise(*_exc_info())\n except _user_exceptions.FlyteUserException:\n _reraise(\n FlyteScopedUserException,\n FlyteScopedUserException(*_exc_info()),\n _exc_info()[2])\n except _system_exceptions.FlyteSystemException:\n _reraise(\n FlyteScopedSystemException,\n FlyteScopedSystemException(*_exc_info()),\n _exc_info()[2])\n except:\n # Any non-platform raised exception is a user exception.\n # This will also catch FlyteUserException re-raised by the system_entry_point handler\n _reraise(\n FlyteScopedUserException,\n FlyteScopedUserException(*_exc_info()),\n _exc_info()[2])\n finally:\n _CONTEXT_STACK.pop()\n","sub_path":"flytekit/common/exceptions/scopes.py","file_name":"scopes.py","file_ext":"py","file_size_in_byte":8212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"293623205","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: c:\\Users\\mastromatteo\\Progetti\\flows\\flows\\Actions\\BufferAction.py\n# Compiled at: 2017-03-23 05:15:25\n# Size of source mod 2**32: 1369 bytes\n\"\"\"\nBufferAction.py\n----------------------------\n\nCopyright 2016 Davide Mastromatteo\n\"\"\"\nimport re\nfrom flows.Actions.Action import Action\n\nclass BufferAction(Action):\n __doc__ = '\\n BufferAction Class\\n '\n type = 'buffer'\n buffer = None\n regex = ''\n\n def on_init(self):\n super().on_init()\n if 'regex_new_buffer' not in self.configuration:\n raise ValueError(str.format('The buffer action {0} is not properly configured.The regex_new_buffer parameter is missing', self.name))\n self.buffer = []\n self.regex = self.configuration['regex_new_buffer']\n\n def on_input_received(self, action_input=None):\n super().on_input_received(action_input)\n match = re.search(self.regex, action_input.message)\n if match is None:\n self.buffer.append(action_input.message)\n return (None, '*')\n else:\n if len(self.buffer) > 0:\n return_value = ''.join(self.buffer)\n self.buffer.clear()\n self.buffer.append(action_input.message)\n self.send_message(return_value)\n else:\n self.buffer.append(action_input.message)","sub_path":"pycfiles/flows-1.2.1-py3-none-any/BufferAction.cpython-36.py","file_name":"BufferAction.cpython-36.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"42"}
+{"seq_id":"541405227","text":"import os\nimport fcntl \nfrom datetime import datetime\ndef log_access(log_path,store_path):\n\twith open(log_path,'rw') as log_file:\n\t\tfcntl.flock(log_file, fcntl.LOCK_EX)\n\t\twith open('%s/access%i%i%i%i'%(store_path,month,day,year,hour),'w') as store_log:\n\t\t\tstore_log.write(log_file.read())\n\t\t\tlog_file.write('')\n\t\tfcntl.flock(log_file, fcntl.LOCK_UN)\nif __name__ == '__main__':\n\ttime=datetime.now()\n\thour=time.hour\n\twhile 1:\n\t\tif hour!=time.hour:\n\t\t\tlog_access('/var/log', '/var/logs/')\n\t\thour=time.hour()\n\t\n\t\t","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"594708683","text":"# O(n * k * log(k)) k is max length of a string in strs\nimport collections\n# def group_anagrams(strs):\n# dic = collections.defaultdict(list)\n# for s in strs:\n# dic[tuple(sorted(s))].append(s)\n#\n# res = []\n# for val in dic.values():\n# res.append(val)\n#\n# return res\n\n\n# O(n * k)\ndef group_anagrams(strs):\n dic = collections.defaultdict(list)\n for s in strs:\n count = [0] * 26\n for char in s:\n count[ord(char) - ord('a')] += 1\n\n dic[tuple(count)].append(s)\n\n return [x for x in dic.values()]\n\n\nprint(group_anagrams([\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]))\n","sub_path":"49_group_anagrams.py","file_name":"49_group_anagrams.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"71122965","text":"#!/usr/bin/env python3\nfrom PIL import Image\nfrom os.path import join\nfrom os import makedirs\nfrom shutil import copyfile, copytree, rmtree\nimport json\n\ndestination = \"..\"\nsource = \"src\"\ntheme = \"Mnml.ReaperTheme\"\nrtconfig = \"rtconfig.txt\"\ndir_img = join(destination, \"Mnml\")\ndir_150 = join(dir_img, \"150\")\ndir_200 = join(dir_img, \"200\")\n\nratio_200_150 = 0.75\nratio_200_100 = 0.5\nratio_150_100 = 0.66\n\n# Create the assets folders\ntry:\n rmtree(dir_img)\nexcept:\n pass\n\nmakedirs(dir_img)\nmakedirs(dir_200)\nmakedirs(dir_150)\n\n# Copy the theme into destination\ncopyfile(join(source, theme), join(destination, theme))\ncopyfile(join(source, rtconfig), join(dir_img, rtconfig))\n\n# Read data file\nwith open(\"combined.json\", \"r\") as f:\n data = json.loads(f.read())\n\n# Read combined image\nimg = Image.open(\"combined.png\")\n\n\nparts = data[\"parts\"]\n\nfor name in parts:\n part = parts[name]\n\n # Crop each image\n part_cropped = img.crop(\n (part[\"x\"], part[\"y\"], part[\"x\"] + part[\"w\"], part[\"y\"] + part[\"h\"],)\n )\n\n # Save only in the img dir\n part_cropped.save(join(dir_img, name))\n","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"506400790","text":"# set global variables\n\n# nb: this script now requires python3\n\nimport csv\nfrom os import path, popen, sys\nimport json\n\n\ndef getParms(parmFile, prmz):\n try:\n f = open(parmFile, 'r')\n csvfile = csv.reader(f, delimiter=\"\\t\")\n except IOError:\n raise\n message = 'Expected to be able to read %s, but it was not found or unreadable' % parmFile\n return message, -1\n except:\n raise\n\n try:\n rows = []\n for row, values in enumerate(csvfile):\n rows.append(values)\n\n f.close()\n\n return parseRows(rows, prmz)\n\n except IOError:\n message = 'Could not read (or maybe parse) rows from %s' % parmFile\n return message, -1\n except:\n raise\n\n\ndef parseRows(rows, prmz):\n prmz.PARMS = {}\n prmz.HEADER = {}\n labels = {}\n prmz.FIELDS = {}\n prmz.DEFAULTSORTKEY = 'None'\n\n prmz.SEARCHCOLUMNS = 0\n prmz.SEARCHROWS = 0\n prmz.CSRECORDTYPE = 'cataloging' # default\n\n prmz.LOCATION = ''\n prmz.DROPDOWNS = []\n\n functions = 'Search,Facet,bMapper,listDisplay,fullDisplay,gridDisplay,mapDisplay,inCSV'.split(',')\n for function in functions:\n prmz.FIELDS[function] = []\n\n fieldkeys = 'label fieldtype suggestions solrfield name X order searchtarget'.split(' ')\n\n for rowid, row in enumerate(rows):\n rowtype = row[0]\n\n if rowtype == 'header':\n for i, r in enumerate(row):\n prmz.HEADER[i] = r\n labels[r] = i\n\n elif rowtype == 'server':\n prmz.SOLRSERVER = row[1]\n\n elif rowtype == 'csrecordtype':\n prmz.CSRECORDTYPE = row[1]\n\n elif rowtype == 'core':\n prmz.SOLRCORE = row[1]\n\n elif rowtype == 'title':\n prmz.TITLE = row[1]\n\n elif rowtype == 'field':\n\n needed = [row[labels[i]] for i in 'Label Role Suggestions SolrField Name Search SearchTarget'.split(' ')]\n if row[labels['Suggestions']] != '':\n # suggestname = '%s.%s' % (row[labels['Suggestions']], row[labels['Name']])\n suggestname = row[labels['Name']]\n else:\n suggestname = row[labels['Name']]\n needed[4] = suggestname\n prmz.PARMS[suggestname] = needed\n needed.append(rowid)\n if 'sortkey' in row[labels['Role']]:\n prmz.DEFAULTSORTKEY = row[labels['SolrField']]\n\n for function in functions:\n if len(row) > labels[function] and row[labels[function]] != '':\n fieldhash = {}\n for n, v in enumerate(needed):\n if n == 5 and function == 'Search': # 5th item in needed is search field x,y coord for layout\n if v == '':\n continue\n searchlayout = (v + ',1').split(',')\n fieldhash['column'] = int('0' + searchlayout[1])\n fieldhash['row'] = int('0' + searchlayout[0])\n prmz.SEARCHCOLUMNS = max(prmz.SEARCHCOLUMNS, int('0' + searchlayout[1]))\n prmz.SEARCHROWS = max(prmz.SEARCHROWS, int('0' + searchlayout[0]))\n else:\n fieldhash[fieldkeys[n]] = v\n fieldhash['order'] = int(row[labels[function]].split(',')[0])\n fieldhash['style'] = '' # temporary hack!\n fieldhash['type'] = 'text' # temporary hack!\n prmz.FIELDS[function].append(fieldhash)\n\n prmz.FIELDS[function] = sorted(prmz.FIELDS[function], key=lambda x: x['order'])\n\n if prmz.SEARCHROWS == 0: prmz.SEARCHROWS = 1\n if prmz.SEARCHCOLUMNS == 0: prmz.SEARCHCOLUMNS = 1\n\n for p in prmz.PARMS:\n if 'dropdown' in prmz.PARMS[p][1]:\n prmz.DROPDOWNS.append(prmz.PARMS[p][4])\n if 'location' in prmz.PARMS[p][1]:\n prmz.LOCATION = prmz.PARMS[p][3]\n\n prmz.FACETS = [f['solrfield'] for f in prmz.FIELDS['Search'] if 'dropdown' in f['fieldtype']]\n\n return prmz\n\n\ndef getversion():\n try:\n version = popen(\"/usr/bin/git describe --always\").read().strip()\n if version == '': # try alternate location for git (this is the usual Mac location)\n version = popen(\"/usr/local/bin/git describe --always\").read().strip()\n except:\n version = 'Unknown'\n return version\n\ndef check_use(field_to_check, bl_field, used_so_far):\n if field_to_check in used_so_far[bl_field]:\n return True\n else:\n used_so_far[bl_field][field_to_check] = True\n return False\n\nif __name__ == \"__main__\":\n\n # holder for global variables and other parameters\n class prmz:\n pass\n\n\n prmz = getParms(sys.argv[1], prmz)\n pass\n\n bl_fields = 'facet search show gallery index sort'.split(' ')\n\n '''\n facet_field \"objproddate_begin_dt\", :label => \"Production Date\", :partial => \"blacklight_range_limit/range_limit_panel\", :range => {\n :input_label_range_begin => \"from year\",\n :input_label_range_end => \"to year\"\n }\n '''\n\n used_so_far = {'sort': {}}\n bl_config = {key: {} for key in bl_fields}\n\n for i, fieldtype in enumerate('Facet Search fullDisplay gridDisplay listDisplay'.split(' ')):\n for fields in prmz.FIELDS[fieldtype]:\n solr_field = fields['solrfield']\n label_field = fields['label']\n bl_field = bl_fields[i]\n if bl_field == 'gallery':\n continue\n if bl_field not in used_so_far:\n used_so_far[bl_field] = {}\n limit = ''\n # the catch-all field 'text' is already included\n if solr_field == 'text':\n continue\n if 'mainentry' in fields['fieldtype']:\n bl_config['sort'][ \"config.index.title_field = '%s'\" % solr_field] = True\n bl_config['sort'][ \"config.show.title_field = '%s'\" % solr_field] = True\n if check_use(solr_field, 'sort', used_so_far): continue\n bl_config['sort'][ \"config.add_sort_field '%s asc', label: '%s'\" % (solr_field, label_field)] = True\n if 'sortkey' in fields['fieldtype'] or 'musno' in fields['fieldtype']:\n if check_use(solr_field, 'sort', used_so_far): continue\n bl_config['sort'][ \"config.add_sort_field '%s asc', label: '%s'\" % (solr_field, label_field)] = True\n continue\n if 'blob' in fields['fieldtype']:\n bl_config[bl_field][ \"config.index.thumbnail = '%s'\" % solr_field] = True\n bl_config[bl_field][ \"config.show.thumbnail = '%s'\" % solr_field] = True\n continue\n if bl_field == 'facet':\n limit = ', limit: true'\n if '_dt' in solr_field:\n bl_config[bl_field][ '''\n config.add_facet_field \"%s\", :label => \"%s\", :partial => \"blacklight_range_limit/range_limit_panel\", :range => {\n :input_label_range_begin => \"from year\",\n :input_label_range_end => \"to year\"\n }\n ''' % (solr_field, label_field)] = True\n else:\n bl_config[bl_field][\"config.add_%s_field '%s', label: '%s'%s\" % (bl_field, solr_field, label_field, limit)] = True\n\nfor section in bl_config:\n print('# %s' % section)\n for c in sorted(bl_config[section]):\n print(c)\n\nprint('''\n end\nend\n''')\n","sub_path":"ucb_bl.py","file_name":"ucb_bl.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"303777564","text":"import requests\nimport voluptuous as vol\nimport homeassistant.helpers.config_validation as cv\nimport logging\n\n_LOGGER = logging.getLogger(__name__)\n\nCONF_ROKID_SN = 'sn'\nCONF_WEBHOOK_ID = 'webhook_id'\n\nATTR_MESSAGE = 'message'\n\nDOMAIN = 'rokid_tts'\n\nSERVICE_SCHEMA = vol.Schema({\n vol.Required(ATTR_MESSAGE): cv.string,\n})\n\nCONFIG_SCHEMA = vol.Schema({\n DOMAIN: vol.Schema({\n vol.Required(CONF_ROKID_SN): cv.string,\n vol.Required(CONF_WEBHOOK_ID): cv.string,\n }),\n}, extra=vol.ALLOW_EXTRA)\n\ndef setup(hass, config):\n conf = config.get(DOMAIN, {})\n rokid_sn = conf.get(CONF_ROKID_SN)\n webhook_id = conf.get(CONF_WEBHOOK_ID) \n \n def send_message(call):\n message = call.data.get(ATTR_MESSAGE) \n client = rokid_tts(rokid_sn, webhook_id) \n try:\n message = client.speak(message)\n except Exception as e:\n _LOGGER.error(e)\n\n hass.services.register(DOMAIN, 'speak', send_message, schema=SERVICE_SCHEMA)\n return True\n\nclass rokid_tts:\n\n def __init__(self, rokid_sn=None, webhook_id=None): \n self._rokid_sn = rokid_sn\n self._headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n self._tts_url = \"https://homebase.rokid.com/trigger/with/{}\".format(webhook_id)\n\n def _text_to_speech(self, text):\n try:\n data = {\"type\":\"tts\",\"devices\":{\"sn\": self._rokid_sn},\"data\": {\"text\": text}}\n r = requests.post(self._tts_url, headers=self._headers, json=data)\n if r.status_code == 200:\n return\n except Exception as e:\n _LOGGER.error(e)\n return False\n\n def speak(self, text):\n if text:\n self._text_to_speech(text)\n else:\n _LOGGER.error('Please provide message to speak!')","sub_path":"custom_components/rokid_s_tts.py","file_name":"rokid_s_tts.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"121959835","text":"\"\"\"\n-------------------------------------------------------\nAlici's Profit Library\n-------------------------------------------------------\nAuthor: Minesh Varu\nID: 110814300\nEmail: varu4300@mylaurier.ca\nVersion: Jun 4, 2012\n-------------------------------------------------------\n\"\"\"\nrent = 600\nexpenses = 410\nemployees = 1920\n\n\"\"\"\n-------------------------------------------------------\nAlicia's Profit function\n-------------------------------------------------------\nPreconditions:\n purchase: How many computers Alicia purchased (int > 0)\n cost: The amount it cost per computer (float > 0)\n commission: The wholesalers commission (float >0)\n sold: the amount of computers that are sold (int > 0)\n charges: the amount that she charges per computer (float > 0)\nPostconditions:\n returns:\n p: The amount Alicia paid for the computers (float)\n pc: The amount Alicia paid for inculding commission (float)\n s: The amount Alicia sold the computers for (float)\n profit: The profit Alicia makes\n-------------------------------------------------------\n\"\"\"\ndef Alicia_profit(purchase, cost, commission, sold, charges):\n assert purchase > 0, \"purchase must be >0\"\n assert cost > 0, \"cost must be >0\"\n assert commission > 0, \"commission must be >0\"\n assert sold > 0 , \"sold must be >0\"\n assert charges > 0, \"charges must be >0\"\n p = purchase * cost\n pc = (p * (1 - commission)) + (p)\n s = sold * charges\n profit = s - (rent + expenses + employees)\n return p, pc, s, profit\n\n","sub_path":"CP104/Assignements/varu4300_a6/src/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"598356755","text":"import os\nimport json\nimport collections\nimport numpy as np\nimport config as cfg\n\n\nclass TextConverter(object):\n def __init__(self):\n file_name = cfg.file_name.split('.')[0]\n words_file = os.path.join(cfg.file_path, file_name+'_sorted.txt')\n text_for_train = os.path.join(cfg.file_path, file_name+'_for_train.txt')\n text_for_test = os.path.join(cfg.file_path, file_name + '_for_test.txt')\n word_to_int_file = os.path.join(cfg.file_path, file_name + '_w_to_i.txt')\n int_to_word_file = os.path.join(cfg.file_path, file_name + '_i_to_w.txt')\n if os.path.exists(words_file):\n self.vocab = self.load_file(words_file)\n else:\n text = self.read_text(os.path.join(cfg.file_path, cfg.file_name))\n all_words = [word for line in text for word in line] # 所有字符\n counter = collections.Counter(all_words) # 统计每个字符的字频\n self.vocab = sorted(counter.keys(), key=lambda x: counter[x], reverse=True) # 按字频将所有字符从大到小排序\n self.word_to_int_table = {c: i for i, c in enumerate(self.vocab)} # 字符——>词向量\n self.int_to_word_table = dict(enumerate(self.vocab)) # 词向量——>字符\n\n # 划分训练集和测试集\n np.random.shuffle(text)\n split_index = int(len(text) * cfg.train_precent)\n train_text = text[0: split_index]\n test_text = text[split_index:]\n\n self.save_to_file(words_file, self.vocab)\n self.save_to_file(text_for_train, [self.word_to_int(word) for line in train_text for word in line])\n self.save_to_file(text_for_test, [self.word_to_int(word) for line in test_text for word in line])\n self.save_to_file(int_to_word_file, self.int_to_word_table)\n self.save_to_file(word_to_int_file, self.word_to_int_table)\n\n @property\n def vocab_size(self):\n return len(self.vocab) + 1\n\n def read_text(self, filepath):\n with open(filepath, 'r', encoding='utf-8') as f:\n return f.readlines()\n\n def load_file(self, filepath):\n return json.load(open(filepath, 'r', encoding='utf-8'))\n\n def word_to_int(self, word):\n if word in self.word_to_int_table:\n return self.word_to_int_table[word]\n else:\n return len(self.vocab)\n\n def int_to_word(self, index):\n if index == len(self.vocab):\n return ''\n elif index < len(self.vocab):\n return self.int_to_word_table[index]\n else:\n raise Exception('Unknown index!')\n\n def arr_to_text(self, arr):\n words = []\n for index in arr:\n words.append(self.int_to_word(index))\n return \"\".join(words)\n\n def save_to_file(self, filename, content):\n with open(filename, 'w', encoding='utf-8') as f:\n f.write(json.dumps(content))\n\n\nclass GenerateBatch(object):\n def __init__(self):\n self.epoch = 0\n self.counter = 0\n self.counter_t = 0\n file_name = cfg.file_name.split('.')[0]\n words_file = os.path.join(cfg.file_path, file_name + '_sorted.txt')\n text_for_train = os.path.join(cfg.file_path, file_name + '_for_train.txt')\n text_for_test = os.path.join(cfg.file_path, file_name + '_for_test.txt')\n if os.path.exists(words_file) and os.path.exists(text_for_train) and os.path.exists(text_for_test):\n self.vocab = self.load_file(words_file)\n self.text_for_train = self.load_file(text_for_train)\n self.text_for_test = self.load_file(text_for_test)\n else:\n pass\n\n def load_file(self, filepath):\n return json.load(open(filepath, 'r', encoding='utf-8'))\n\n def next_batch(self):\n x = np.zeros([cfg.batch_size, cfg.char_length])\n y = np.zeros([cfg.batch_size, cfg.char_length])\n last_char = []\n start_index = len(last_char)\n for num in range(cfg.batch_size):\n s_ind = num * cfg.char_length - start_index\n e_ind = (num + 1) * cfg.char_length - start_index\n if s_ind < 0:\n s_ind = 0\n x[num:] = last_char + self.text_for_train[s_ind: e_ind]\n last_char = []\n self.counter += 1\n if (self.counter * cfg.char_length) >= len(self.text_for_train):\n last_char = self.text_for_train[(self.counter + 1) * cfg.char_length:]\n start_index = len(last_char)\n self.counter = 0\n self.epoch += 1\n\n y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]\n return x, y\n\n def next_batch_test(self):\n x = np.zeros([cfg.batch_size, cfg.char_length])\n y = np.zeros([cfg.batch_size, cfg.char_length])\n last_char = []\n start_index = len(last_char)\n for num in range(cfg.batch_size):\n s_ind = num * cfg.char_length - start_index\n e_ind = (num + 1) * cfg.char_length - start_index\n if s_ind < 0:\n s_ind = 0\n print(self.counter_t)\n x[num:] = last_char + self.text_for_test[s_ind: e_ind]\n last_char = []\n self.counter_t += 1\n if (self.counter * cfg.char_length) >= len(self.text_for_test):\n last_char = self.text_for_test[(self.counter + 1) * cfg.char_length:]\n start_index = len(last_char)\n self.counter_t = 0\n\n y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]\n return x, y\n\n\nif __name__ == '__main__':\n # s = TextConverter()\n t = GenerateBatch()\n x, y = t.next_batch_test()\n print(np.shape(x))\n","sub_path":"read_utils.py","file_name":"read_utils.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"40"}
+{"seq_id":"218333868","text":"import requests,csv\n\nurl = \"https://api.github.com/orgs/mspmac/repos\"\n\nquerystring = {\"access_token\":\"token-here\", \"per_page\":\"200\", \"page\":\"1\"}\n\nheaders = {\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"665d9900-d33c-3238-281e-34baa3b083e0\"\n }\n\nresponse = requests.request(\"GET\", url, headers=headers, params=querystring)\nr_json = response.json()\n# for r in r_json:\n# print(\"git clone \" + r['html_url'])\ngit_data = open('git-data.csv','w')\ncsvwriter = csv.writer(git_data)\nheaders = {\"App\":\"\",\"description\":\"\" ,\"url\":\"\", \"criticality\":\"\"}\ncsvwriter.writerow(headers.keys())\nfor r in r_json:\n csvwriter.writerow([r['name'], r['description'], r['html_url'], \"\"])\n","sub_path":"pull_all_repos.py","file_name":"pull_all_repos.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"130637530","text":"import json\nimport uuid\n\n\n\ndef execute(request):\n content_length = int(request.headers['Content-Length'])\n data = request.rfile.read(content_length)\n data = json.loads(data)\n data['id'] = str(uuid.uuid1())\n data['connected'] = False\n\n with open('./data.json') as f:\n content = f.read()\n\n if content:\n content = json.loads(content)\n else:\n content = []\n\n content.append(data)\n\n with open('./data.json', 'w') as f:\n f.write(json.dumps(content, indent=4))\n\n return 'success'\n","sub_path":"src/handlers/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"415079484","text":"\"\"\"Computes the partial trace of a matrix.\"\"\"\nfrom typing import Union, List\nimport numpy as np\nfrom toqito.perms.permute_systems import permute_systems\nfrom toqito.helper.cvxpy_helper import expr_as_np_array, np_array_as_expr\n\n\ndef partial_trace_cvx(rho, sys=None, dim=None):\n \"\"\"\n Perform the partial trace on a cvxpy variable.\n\n References:\n [1] Adapted from:\n https://github.com/cvxgrp/cvxpy/issues/563\n\n :param rho: A square matrix.\n :param sys: Scalar or vector specifying the size of the subsystems.\n :param dim: Dimension of the subsystems. If `None`, all dimensions\n are assumed to be equal.\n :return: The partial trace of matrix `input_mat`.\n \"\"\"\n rho_np = expr_as_np_array(rho)\n traced_rho = partial_trace(rho_np, sys, dim)\n traced_rho = np_array_as_expr(traced_rho)\n return traced_rho\n\n\ndef partial_trace(\n input_mat: np.ndarray,\n sys: Union[int, List[int]] = 2,\n dim: Union[int, List[int]] = None,\n):\n \"\"\"\n Compute the partial trace of a matrix.\n\n Gives the partial trace of the matrix X, where the dimensions of the\n (possibly more than 2) subsystems are given by the vector `dim` and the\n subsystems to take the trace on are given by the scalar or vector `sys`.\n\n References:\n [1] Wikipedia: Partial trace\n https://en.wikipedia.org/wiki/Partial_trace\n\n :param input_mat: A square matrix.\n :param sys: Scalar or vector specifying the size of the subsystems.\n :param dim: Dimension of the subsystems. If `None`, all dimensions\n are assumed to be equal.\n :return: The partial trace of matrix `input_mat`.\n \"\"\"\n if dim is None:\n dim = np.array([np.round(np.sqrt(len(input_mat)))])\n if isinstance(dim, int):\n dim = np.array([dim])\n if isinstance(dim, list):\n dim = np.array(dim)\n\n if sys is None:\n sys = 2\n\n num_sys = len(dim)\n\n # Allow the user to enter a single number for dim.\n if num_sys == 1:\n dim = np.array([dim[0], len(input_mat) / dim[0]])\n if (\n np.abs(dim[1] - np.round(dim[1]))\n >= 2 * len(input_mat) * np.finfo(float).eps\n ):\n raise ValueError(\n \"Invalid: If `dim` is a scalar, `dim` must evenly \"\n \"divide `len(input_mat)`.\"\n )\n dim[1] = np.round(dim[1])\n num_sys = 2\n\n prod_dim = np.prod(dim)\n if isinstance(sys, list):\n prod_dim_sys = np.prod(dim[sys])\n elif isinstance(sys, int):\n prod_dim_sys = np.prod(dim[sys - 1])\n else:\n raise ValueError(\n \"Invalid: The variable `sys` must either be of type \"\n \"int or of a list of ints.\"\n )\n\n sub_prod = prod_dim / prod_dim_sys\n sub_sys_vec = prod_dim * np.ones(int(sub_prod)) / sub_prod\n\n if isinstance(sys, int):\n sys = [sys]\n set_diff = list(set(list(range(1, num_sys + 1))) - set(sys))\n\n perm = set_diff\n perm.extend(sys)\n\n a_mat = permute_systems(input_mat, perm, dim)\n\n ret_mat = np.reshape(\n a_mat,\n [int(sub_sys_vec[0]), int(sub_prod), int(sub_sys_vec[0]), int(sub_prod)],\n order=\"F\",\n )\n permuted_mat = ret_mat.transpose((1, 3, 0, 2))\n permuted_reshaped_mat = np.reshape(\n permuted_mat,\n [int(sub_prod), int(sub_prod), int(sub_sys_vec[0] ** 2)],\n order=\"F\",\n )\n\n pt_mat = permuted_reshaped_mat[\n :, :, list(range(0, int(sub_sys_vec[0] ** 2), int(sub_sys_vec[0] + 1)))\n ]\n pt_mat = np.sum(pt_mat, axis=2)\n\n return pt_mat\n","sub_path":"toqito/super_operators/partial_trace.py","file_name":"partial_trace.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"316936008","text":"# Copyright (C) 2017-2019 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport argparse\nfrom bench import parse_args, time_mean_min, print_header, print_row\nfrom daal4py import linear_regression_training, linear_regression_prediction\nfrom daal4py.sklearn.utils import getFPType\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='daal4py linear regression '\n 'benchmark')\nparser.add_argument('--no-fit-intercept', dest='fit_intercept', default=True,\n action='store_false',\n help=\"Don't fit intercept (assume data already centered)\")\nparser.add_argument('--method', default='normEqDense',\n choices=('normEqDense', 'qrDense'),\n help=\"Training method used by DAAL. 'normEqDense' selects\"\n \"the normal equations method, while 'qrDense' selects\"\n \"the method based on QR decomposition.\")\nparams = parse_args(parser, size=(1000000, 50), dtypes=('f8', 'f4'),\n loop_types=('fit', 'predict'), prefix='daal4py')\n\n# Generate random data\nX = np.random.rand(*params.shape).astype(params.dtype)\nXp = np.random.rand(*params.shape).astype(params.dtype)\ny = np.random.rand(*params.shape).astype(params.dtype)\n\n\n# Create our regression objects\ndef test_fit(X, y):\n regr_train = linear_regression_training(fptype=getFPType(X),\n method=params.method,\n interceptFlag=params.fit_intercept)\n return regr_train.compute(X, y)\n\n\ndef test_predict(Xp, model):\n regr_predict = linear_regression_prediction(fptype=getFPType(X))\n return regr_predict.compute(Xp, model)\n\n\ncolumns = ('batch', 'arch', 'prefix', 'function', 'threads', 'dtype', 'size',\n 'method', 'time')\nprint_header(columns, params)\n\n# Time fit\nfit_time, res = time_mean_min(test_fit, X, y,\n outer_loops=params.fit_outer_loops,\n inner_loops=params.fit_inner_loops)\nprint_row(columns, params, function='Linear.fit', time=fit_time)\n\n# Time predict\npredict_time, yp = time_mean_min(test_predict, Xp, res.model,\n outer_loops=params.predict_outer_loops,\n inner_loops=params.predict_inner_loops)\nprint_row(columns, params, function='Linear.predict', time=predict_time)\n","sub_path":"daal4py/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"78133600","text":"from datetime import datetime\n\n# for plotters\nPLOT = False\nFIGSIZE = (12, 9)\n\n# for date\nFIRST_HISTORICAL_FLOOD_DATE = datetime(1961, 1, 1)\nMIN_DATE = datetime(1961, 1, 1)\nMAX_DATE = datetime(2018, 12, 31)\nFUTURE_MONTH = 0 # January\nDAYS_ROLLBACK = 40 # total days are 41, 40+start_date\n\n# for location:\nBOUNDS_LONDON = [[-1, 51], [1, 52]] # greater London\nBOUNDS_UK = [[-8.4, 49.8], [1.9, 58.8]] # UK incl. Northern Ireland\n\n# for raster\nRESOLUTION = 2.5\nRADIUS = 1\n","sub_path":"handler/constants/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"592140989","text":"\r\nname = \"Modupe Ariyo\"\r\nemail = \"ariyocedar@gmail.com\"\r\nslackUsername = \"@Modupe\"\r\ntwitter_handle = \"@modeee\"\r\ndef hammingDistance(str1,str2):\r\n count = 0 #this is the incremented variable for loop\r\n ham = 0 #incremented variable for hammD\r\n while (count < len(str1)):\r\n if str1[count] == str2[count]:\r\n ham+=1\r\n return ham\r\n count+=1\r\n \r\nhamming_distance = hammingDistance(slackUsername, twitter_handle)\r\n\r\n\r\n\r\nprint(name,email,slackUsername,twitter_handle, hamming_distance, sep=\"\\n\")\r\n56\r\n","sub_path":"stage_0_modupe.py","file_name":"stage_0_modupe.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"577376400","text":"# Выполнить логические побитовые операции \"И\", \"ИЛИ\" и др. над числами 5 и 6.\n# Выполнить над числом 5 побитовый сдвиг вправо и влево на два знака.\n\n# first_num = 5\n# second_num = 6\n# left_two = first_num << 2\n# right_two = first_num >> 2\n# res_bit_and = first_num & second_num\n# res_bit_or = first_num | second_num\n# res_bit_xor = first_num ^ second_num\n# print(bin(first_num))\n# print(bin(second_num))\n# print(res_bit_and, bin(res_bit_and))\n# print(res_bit_or, bin(res_bit_or))\n# print(res_bit_xor, bin(res_bit_xor))\n# print(left_two, bin(left_two))\n# print(right_two, bin(right_two))\n\nalphabet = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z')\n\nwhile True:\n try:\n first_letter = (input(\"\")).lower()\n second_letter = (input(\"\")).lower()\n print(alphabet.index(first_letter) + 1)\n print(alphabet.index(second_letter) + 1)\n print(len(alphabet[alphabet.index(first_letter):alphabet.index(second_letter) - 1]))\n break\n except Exception as e:\n print(e)\n continue\n\n","sub_path":"parser_2/bit_op.py","file_name":"bit_op.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"122669515","text":"#!/usr/bin/python3\n\nimport game_engine as ge\nimport character as ch\nfrom os import system as sys\n\n\ndef main():\n \"\"\"Initial start point for Dungeon dudes. Creates a Hero(), the Map(),\n the Engine(), and then starts the game\"\"\"\n sys('clear')\n print(\"\\n\\t -- Welcome to Dungeon Dudes. --\\n\")\n name = input(\"\\tWhat's your name? \")\n gender = input(\"\\tAre you a man or a woman? \")\n sys('clear')\n hero = ch.Hero(name, gender, 'Human')\n\n welcome = \"\\n\\tPleased to meet you\"\n if gender.lower() == \"man\":\n welcome += \" Mr. \"\n elif gender.lower() == \"woman\":\n welcome += \" Miss \"\n else:\n welcome += \" 'Whatever you are' \"\n welcome += name\n welcome += \". You currently find yourself \\n\"\n welcome += \"standing in the living room of your small\"\n welcome += \" apartment, unable to connect to the \\n\"\n welcome += \"internet. You find a letter on the floor \"\n welcome += \"that reads 'I stole your router and \\n\"\n welcome += \"hid it away in a dark cavern in a cold dungeon'.\"\n\n print(welcome)\n input(\"\\n\")\n sys('clear')\n print(\"\\nTo reconnect to the Internet you have to retrieve the router.\")\n input(\"\\n\")\n sys('clear')\n\n game_map = ge.Map(hero, 'Living Room')\n game = ge.Engine(game_map)\n game.play()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dungeon_dudes.py","file_name":"dungeon_dudes.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"120469871","text":"'''\nCreated on Feb 24, 2018\n\n@author: brian\n'''\n\nimport logging, os, pickle, threading, traceback, sys\n\nfrom envisage.ui.tasks.api import TasksApplication\nfrom envisage.ui.tasks.tasks_application import TasksApplicationState\nfrom pyface.api import error\nfrom pyface.tasks.api import TaskWindowLayout\nfrom traits.api import Bool, Instance, List, Property, Str\n\nlogger = logging.getLogger(__name__)\n\nfrom dptrp1.gui.preferences import Preferences\n\ndef log_notification_handler(_, trait_name, old, new):\n \n (exc_type, exc_value, tb) = sys.exc_info()\n logging.debug('Exception occurred in traits notification '\n 'handler for object: %s, trait: %s, old value: %s, '\n 'new value: %s.\\n%s\\n' % ( object, trait_name, old, new,\n ''.join( traceback.format_exception(exc_type, exc_value, tb) ) ) )\n\n err_string = traceback.format_exception_only(exc_type, exc_value)[0]\n err_loc = traceback.format_tb(tb)[-1]\n err_ctx = threading.current_thread().name\n \n logging.error(\"Error: {0}\\nLocation: {1}Thread: {2}\" \\\n .format(err_string, err_loc, err_ctx) )\n \ndef log_excepthook(typ, val, tb):\n tb_str = \"\".join(traceback.format_tb(tb))\n logging.debug(\"Global exception: {0}\\n{1}: {2}\"\n .format(tb_str, typ, val))\n \n tb_str = traceback.format_tb(tb)[-1]\n logging.error(\"Error: {0}: {1}\\nLocation: {2}Thread: Main\"\n .format(typ, val, tb_str))\n\n\ndef gui_handler_callback(msg, app):\n app.application_error = msg\n \n\nclass CallbackHandler(logging.Handler):\n def __init__(self, callback):\n logging.Handler.__init__(self)\n self._callback = callback\n \n def emit(self, record):\n self._callback(record.getMessage())\n\n\nclass DPTApplication(TasksApplication):\n\n # The application's globally unique identifier.\n id = 'dptrp1.app'\n\n # The application's user-visible name.\n name = 'DPT-RP1 Manager'\n\n # The default window-level layout for the application.\n default_layout = List(TaskWindowLayout)\n \n # if there's an ERROR-level log message, drop it here \n application_error = Str\n \n # Whether to restore the previous application-level layout when the\n # applicaton is started.\n always_use_default_layout = Property(Bool)\n\n preferences_helper = Instance(Preferences)\n\n def _default_layout_default(self):\n active_task = self.preferences_helper.default_task\n tasks = [ factory.id for factory in self.task_factories ]\n return [ TaskWindowLayout(*tasks,\n active_task = active_task,\n size = (800, 600)) ]\n\n def _preferences_helper_default(self):\n return Preferences(preferences = self.preferences)\n\n def _get_always_use_default_layout(self):\n return self.preferences_helper.always_use_default_layout\n \n def show_error(self, error_string):\n error(None, error_string)\n \n def _load_state(self):\n \"\"\" \n Loads saved application state, if possible. Overload the envisage-\n defined one to fix a py3k bug and increment the TasksApplicationState\n version.\n \n \"\"\"\n state = TasksApplicationState(version = 2)\n filename = os.path.join(self.state_location, 'application_memento')\n if os.path.exists(filename):\n # Attempt to unpickle the saved application state.\n try:\n with open(filename, 'rb') as f:\n restored_state = pickle.load(f)\n if state.version == restored_state.version:\n state = restored_state\n else:\n logger.warn('Discarding outdated application layout')\n except:\n # If anything goes wrong, log the error and continue.\n logger.exception('Had a problem restoring application layout from %s',\n filename)\n \n self._state = state\n\n def _save_state(self):\n \"\"\"\n Saves the application state -- ONLY IF THE CYTOFLOW TASK IS ACTIVE\n \n \"\"\"\n\n # Grab the current window layouts.\n window_layouts = [w.get_window_layout() for w in self.windows]\n self._state.previous_window_layouts = window_layouts\n \n # Attempt to pickle the application state.\n filename = os.path.join(self.state_location, 'application_memento')\n try:\n with open(filename, 'wb') as f:\n pickle.dump(self._state, f)\n except:\n # If anything goes wrong, log the error and continue.\n logger.exception('Had a problem saving application layout')\n\n\n\ndef main(argv):\n \n logging.getLogger().setLevel(logging.DEBUG)\n \n ## send the log to STDERR\n try:\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s:%(name)s:%(message)s\"))\n logging.getLogger().addHandler(console_handler)\n except:\n # if there's no console, this fails\n pass\n \n # install a global (gui) error handler for traits notifications\n from traits.api import push_exception_handler\n push_exception_handler(handler = log_notification_handler,\n reraise_exceptions = True, \n main = True)\n \n\n from envisage.core_plugin import CorePlugin\n from envisage.ui.tasks.tasks_plugin import TasksPlugin\n from dptrp1.gui.gui_task import GUITaskPlugin\n \n app = DPTApplication(plugins = [CorePlugin(), TasksPlugin(), GUITaskPlugin()])\n \n ## and display gui messages for exceprions\n gui_handler = CallbackHandler( lambda msg, app = app: gui_handler_callback(msg, app))\n gui_handler.setLevel(logging.ERROR)\n logging.getLogger().addHandler(gui_handler) \n \n # must redirect to the gui thread\n app.on_trait_change(app.show_error, 'application_error', dispatch = 'ui')\n\n sys.excepthook = log_excepthook \n\n def _size_hint_wrapper(f, ui):\n \"\"\"Wrap an existing sizeHint method with sizes from a UI object.\n \"\"\"\n def sizeHint():\n size = f()\n if ui.view is not None and ui.view.width > 0:\n size.setWidth(ui.view.width)\n if ui.view is not None and ui.view.height > 0:\n size.setHeight(ui.view.height)\n return size\n return sizeHint\n \n import traitsui.qt4.ui_panel\n traitsui.qt4.ui_panel._size_hint_wrapper = _size_hint_wrapper\n\n def _tree_hash(self):\n return id(self)\n \n def _tree_eq(self, other):\n return id(self) == id(other)\n \n from PyQt4.QtGui import QTreeWidgetItem\n QTreeWidgetItem.__hash__ = _tree_hash\n QTreeWidgetItem.__eq__ = _tree_eq\n \n app.run()\n\nif __name__ == '__main__':\n import sys\n main(sys.argv)\n","sub_path":"dptrp1/gui/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"13560141","text":"pair = {'}': '{', ')': '(', ']': '['}\n\n\ndef check_brackets(stream):\n stack = []\n\n for br in stream:\n if stack and pair.get(br, None) == stack[-1]:\n stack.pop()\n else:\n stack.append(br)\n\n if stack:\n return False\n else:\n return True\n","sub_path":"python/bracket-push/bracket_push.py","file_name":"bracket_push.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"457804113","text":"import os\n\n# Vragen om het bestand\nbestand = input(\"Welk bestand wil je verwijderen? \")\n\nif len(bestand) > 0:\n # controleren of dit bestand wel bestaat met os.path.exists()\n if os.path.exists(bestand):\n # Bestand verwijderen\n os.remove(bestand)\n print(\"Het bestand \" + bestand + \" is verwijderd. Jammer dan.\")\n else:\n print(\"Dit bestand bestaat niet, sorry.\")\nelse:\n print(\"Geen invoer, script zal stoppen\")\n","sub_path":"02-FilesFolders/verwijder_bestand.py","file_name":"verwijder_bestand.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"388264856","text":"from app.global_data.global_data import g\nfrom app.domain.description import Description\n\n\ndef create_description(description):\n sql = '''\n INSERT INTO description (\n solution_uuid,\n author_login,\n content\n ) VALUES (\"{}\", \"{}\", \"{}\")\n '''.format(\n description.solutionUuid,\n description.authorLogin,\n description.content\n )\n\n conn = g.db.pool.connection()\n with conn.cursor() as cursor:\n cursor.execute(sql)\n conn.commit()\n conn.close()\n\n\ndef get_descriptions(solution_uuid):\n sql = 'SELECT * FROM description WHERE solution_uuid = \"{}\"'.format(solution_uuid)\n\n conn = g.db.pool.connection()\n with conn.cursor() as cursor:\n cursor.execute(sql)\n records = cursor.fetchall()\n conn.close()\n\n description_list = []\n for record in records:\n description = Description()\n description.from_record(record)\n description_list.append(description.__dict__)\n\n return description_list\n\n\ndef get_description(id):\n sql = 'SELECT * FROM description WHERE id = \"{}\" limit 1'.format(id)\n\n conn = g.db.pool.connection()\n with conn.cursor() as cursor:\n cursor.execute(sql)\n records = cursor.fetchall()\n conn.close()\n\n description_list = []\n for record in records:\n description = Description()\n description.from_record(record)\n description_list.append(description.__dict__)\n\n return description_list[0]\n\n\ndef update_description_content(description):\n sql = r'''\n UPDATE description SET \n content = '{}'\n WHERE id = {}\n '''.format(\n description.content,\n description.id\n )\n\n conn = g.db.pool.connection()\n with conn.cursor() as cursor:\n cursor.execute(sql)\n conn.commit()\n conn.close()\n\n\ndef delete_description(id):\n sql = 'DELETE FROM description WHERE id = \"{}\"'.format(id)\n\n conn = g.db.pool.connection()\n with conn.cursor() as cursor:\n cursor.execute(sql)\n conn.commit()\n conn.close()\n","sub_path":"umm-python/app/database/description_db.py","file_name":"description_db.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"41"}
+{"seq_id":"66963317","text":"from vnpy.trader.object import BarData\n\nfrom vnpy.trader.utility import ArrayManager\nfrom pytz import timezone\nimport datetime\nimport math\n\n\nclass ClockManager:\n eastern = timezone('US/Eastern')\n\n def __init__(self, start_time: datetime.time, end_time: datetime.time,\n interval: datetime.timedelta = None, on_time_check=None):\n self.time_offset = datetime.timedelta(hours=start_time.hour, minutes=start_time.minute,\n seconds=start_time.second)\n et = datetime.timedelta(hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second)\n self.total_time = et - self.time_offset\n self.on_time_check = on_time_check\n self.interval = interval\n\n # def convert_time(self, unix_time):\n # return datetime.datetime.fromtimestamp(unix_time, self.eastern)\n\n def market_start(self):\n now = datetime.datetime.now(tz=self.eastern)\n return datetime.datetime(now.year,now.month,now.day) + self.time_offset\n\n def get_run_time(self, now:datetime.datetime):\n now_delay = datetime.timedelta(hours=now.hour, minutes=now.minute, seconds=now.second)\n rt = now_delay - self.time_offset\n return rt\n\n def on_bar(self, bar: BarData):\n if self.interval is None:\n return\n t = bar.datetime\n now = datetime.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)\n rt = now - self.time_offset\n if rt.total_seconds() % self.interval.total_seconds() == 0:\n if self.on_time_check is not None:\n scale = self.get_time_scale(t)\n for callback in self.on_time_check:\n callback(scale, rt.total_seconds() / 60)\n\n def get_time_scale(self, t: datetime.datetime):\n now = datetime.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)\n rt = now - self.time_offset\n\n scale = rt.total_seconds() / self.total_time.total_seconds()\n return scale\n","sub_path":"vnpy/app/cta_strategy/strategies/ma_trend/time_manager.py","file_name":"time_manager.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"270264048","text":"\"\"\"\nDefines the executors, programs, train entry, test entry and save/load models\n\"\"\"\n#!/usr/bin/env python\n# coding=utf8\n# File: train.py\nfrom __future__ import print_function\nimport sys\nimport copy\nimport os\nfrom os.path import exists, join\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')\n\nPARL_DIR = os.environ.get('PARL_DIR', '')\nassert exists(PARL_DIR), ('PARL_DIR', PARL_DIR, 'not exists')\nsys.path.append(PARL_DIR)\n\nimport parl.layers as layers\nfrom paddle import fluid\nfrom paddle.fluid.framework import Variable\n\nfrom utils import TracebackWrapper, save_pickle\nfrom fluid_utils import executor_run_with_fetch_dict, parallel_executor_run_with_fetch_dict\n\n\nclass GenComputationTask(object):\n \"\"\"\n For generation tasks\n \"\"\"\n def __init__(self, algorithm, model_dir='', mode='single', mode_args=None, scope=None):\n \"\"\"\n Args:\n algorithm: Algorithm object in PARL\n \"\"\"\n self.alg = algorithm\n self.model_dir = model_dir\n self.mode = mode\n self.mode_args = mode_args\n self.ckp_step = -1\n self.use_cuda = True if self.alg.gpu_id >= 0 else False\n\n self.scope = scope\n with fluid.scope_guard(self.scope):\n self._define_program()\n self._define_executor(mode)\n\n def _define_program(self):\n \"\"\"\n Use fluid.unique_name to make sure train \n and test are using the same params \n if the model is not base on PARL.\n \"\"\"\n self.train_program = fluid.Program()\n self.startup_program = fluid.Program()\n self.test_program = fluid.Program()\n self.inference_program = fluid.Program() # only consider single mode\n self.eps_greedy_sampling_program = fluid.Program() # only consider single mode\n self.softmax_sampling_program = fluid.Program() # only consider single mode\n\n with fluid.program_guard(self.train_program, self.startup_program):\n with fluid.unique_name.guard():\n self.train_outputs = self.alg.train()\n\n with fluid.program_guard(self.test_program, fluid.Program()):\n with fluid.unique_name.guard():\n self.test_outputs = self.alg.test()\n\n with fluid.program_guard(self.inference_program, fluid.Program()):\n with fluid.unique_name.guard():\n self.inference_outputs = self.alg.inference()\n\n with fluid.program_guard(self.eps_greedy_sampling_program, fluid.Program()):\n with fluid.unique_name.guard():\n self.eps_greedy_sampling_outputs = self.alg.eps_greedy_sampling()\n\n with fluid.program_guard(self.softmax_sampling_program, fluid.Program()):\n with fluid.unique_name.guard():\n self.softmax_sampling_outputs = self.alg.softmax_sampling()\n\n def _define_executor(self, mode):\n \"\"\"\n define executors, run startup, and load saved models\n \"\"\"\n if mode == 'single':\n place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()\n self.base_exe = fluid.Executor(place)\n self.base_exe.run(self.startup_program)\n self.ckp_step = self.load_model(self.model_dir)\n self.train_exe = self.base_exe\n self.test_exe = self.base_exe\n\n elif mode == 'parallel':\n place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()\n self.base_exe = fluid.Executor(place)\n self.base_exe.run(self.startup_program)\n self.ckp_step = self.load_model(self.model_dir)\n self._define_parallel_executor(self.train_program, self.test_program)\n\n def _define_parallel_executor(self, train_program, test_program):\n strategy = fluid.ExecutionStrategy()\n if self.use_cuda:\n strategy.num_threads = 1 # otherwise it will crash in GPU mode. \n # strategy.allow_op_delay = False\n build_strategy = fluid.BuildStrategy()\n loss = self.train_outputs['fetch_dict']['loss']\n self.train_exe = fluid.ParallelExecutor(use_cuda=self.use_cuda, \n loss_name=loss.name,\n main_program=train_program,\n exec_strategy=strategy,\n build_strategy=build_strategy,\n scope=self.scope)\n self.test_exe = fluid.ParallelExecutor(use_cuda=self.use_cuda, \n share_vars_from=self.train_exe,\n main_program=test_program,\n exec_strategy=strategy,\n build_strategy=build_strategy,\n scope=self.scope)\n \n ###################\n ### main functions\n ###################\n\n def train(self, list_feed_dict):\n \"\"\"train\"\"\"\n if self.mode == 'single':\n assert len(list_feed_dict) == 1\n return executor_run_with_fetch_dict(self.train_exe, \n program=self.train_program,\n fetch_dict=self.train_outputs['fetch_dict'],\n feed=list_feed_dict[0],\n return_numpy=False,\n scope=self.scope)\n\n elif self.mode == 'parallel' or self.mode == 'pserver':\n return parallel_executor_run_with_fetch_dict(self.train_exe,\n fetch_dict=self.train_outputs['fetch_dict'],\n feed=list_feed_dict,\n return_numpy=False)\n\n def test(self, list_feed_dict):\n \"\"\"test\"\"\"\n if self.mode == 'single':\n assert len(list_feed_dict) == 1\n return executor_run_with_fetch_dict(self.test_exe, \n program=self.test_program,\n fetch_dict=self.test_outputs['fetch_dict'],\n feed=list_feed_dict[0],\n return_numpy=False,\n scope=self.scope)\n\n elif self.mode == 'parallel' or self.mode == 'pserver':\n return parallel_executor_run_with_fetch_dict(self.test_exe,\n fetch_dict=self.test_outputs['fetch_dict'],\n feed=list_feed_dict,\n return_numpy=False)\n\n def inference(self, feed_dict):\n \"\"\"inference\"\"\"\n return executor_run_with_fetch_dict(self.base_exe, \n program=self.inference_program,\n fetch_dict=self.inference_outputs['fetch_dict'],\n feed=feed_dict,\n return_numpy=False,\n scope=self.scope)\n\n def eps_greedy_sampling(self, feed_dict):\n \"\"\"sampling\"\"\"\n return executor_run_with_fetch_dict(self.base_exe, \n program=self.eps_greedy_sampling_program,\n fetch_dict=self.eps_greedy_sampling_outputs['fetch_dict'],\n feed=feed_dict,\n return_numpy=False,\n scope=self.scope)\n\n def softmax_sampling(self, feed_dict):\n \"\"\"sampling\"\"\"\n return executor_run_with_fetch_dict(self.base_exe, \n program=self.softmax_sampling_program,\n fetch_dict=self.softmax_sampling_outputs['fetch_dict'],\n feed=feed_dict,\n return_numpy=False,\n scope=self.scope)\n\n ##############\n ### utils\n ##############\n\n def print_var_shapes(self):\n for param in self.train_program.global_block().all_parameters():\n array = np.array(self.scope.find_var(param.name).get_tensor())\n if len(array.shape) == 2:\n print (param.name, array.shape, array[0, :4])\n elif len(array.shape) == 1:\n print (param.name, array.shape, array[:4])\n\n def save_model(self, path, checkpoint_step):\n \"\"\"save network model\"\"\"\n if not exists(path):\n os.makedirs(path)\n with fluid.scope_guard(self.scope):\n fluid.io.save_persistables(executor=self.base_exe,\n dirname=path,\n main_program=self.train_program,\n filename='model-%d.ckp' % checkpoint_step)\n logging.info('==> Model saved to %s' % path)\n\n def load_model(self, path, ckp_step=None):\n \"\"\"\n return -1 if not found\n \"\"\"\n def _load_model(path, ckp_step):\n file = join(path, 'model-%d.ckp' % ckp_step)\n assert exists(file), file\n with fluid.scope_guard(self.scope):\n fluid.io.load_persistables(executor=self.base_exe,\n dirname=path,\n main_program=self.train_program,\n filename='model-%d.ckp' % ckp_step)\n logging.info('==> Model loaded from %s (step = %d)' % (path, ckp_step))\n\n if ckp_step is None:\n ckp_step = self.get_lastest_checkpoint(path)\n if ckp_step is None:\n logging.info('==> Model loaded from %s (not found, skipped)' % path)\n return -1\n else:\n _load_model(path, ckp_step)\n return ckp_step\n \n def get_lastest_checkpoint(self, path):\n \"\"\"\n search lastest checkpoint with model-*.ckp under the give path\n return None if not found\n \"\"\"\n if not exists(path):\n return None\n\n files = os.listdir(path)\n prefix = 'model-'\n suffix = '.ckp'\n last_ckp_step = None\n for f in files:\n if not (f.startswith(prefix) and f.endswith(suffix)):\n continue\n ckp_step = f[len(prefix):-len(suffix)]\n if not ckp_step.isdigit():\n continue\n ckp_step = int(ckp_step)\n if last_ckp_step is None:\n last_ckp_step = ckp_step\n else:\n last_ckp_step = max(last_ckp_step, ckp_step)\n return last_ckp_step\n\n\n\n\n\n\n\n","sub_path":"src/gen_computation_task.py","file_name":"gen_computation_task.py","file_ext":"py","file_size_in_byte":11159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"23839900","text":"from flask import render_template,request,\\\n jsonify,session,redirect,url_for\nfrom flask.templating import render_template_string\nfrom . import main\nfrom .routes import connection\nfrom functools import wraps\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not 'USER_LOGGED_IN' in session:\n return render_template('loginspec.html', next=request.url)\n return f(*args, **kwargs)\n return decorated_function\n\n\n@main.route('/q/',methods=['GET'])\n@login_required\ndef result(username):\n cursor = connection.cursor()\n sqlko = \"select doctor_fullname from doctor where doctor_username = %s\"\n datako = (username)\n connection.ping(reconnect=True)\n cursor.execute(sqlko, datako)\n out = cursor.fetchone()\n name = session['USER_DISPLAY_NAME']\n return render_template('result.html',doctor_name=out[0],name=name)\n ","sub_path":"app/main/redirect.py","file_name":"redirect.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"277527379","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\n\n# import requests\n# from xml.etree import ElementTree\nimport materials_commons.api as mcapi\nimport yaml\nfrom urllib.parse import urlparse\nimport posixpath\nimport OpenVisus as ov\nfrom matplotlib import pyplot as plt\nfrom slugify import slugify\n\n\ndef create_ds_name_from_url(url):\n # Take off filename\n p = posixpath.dirname(url.path)\n\n # Now iterate over path peeling off each part of path\n # to create the dataset name. End when the basename\n # is equal to 1mb\n ds_name = \"\"\n while True:\n name = posixpath.basename(p)\n if name == \"1mb\":\n break\n\n if ds_name == \"\":\n ds_name = name\n else:\n ds_name += f\"-{name}\"\n p = posixpath.dirname(p)\n return slugify(ds_name)\n\n\nif __name__ == \"__main__\":\n c = mcapi.Client(os.getenv(\"MCAPI_KEY\"), base_url=\"http://localhost:8000/api\")\n proj = c.get_project(77)\n c.set_debug_on()\n with open('/home/gtarcea/Dropbox/transfers/datasets.yaml') as f:\n try:\n ds = yaml.safe_load(f)\n i = 0\n for ds_entry in ds:\n if i == 1:\n break\n\n remote = ds_entry[\"remote\"]\n if remote is None:\n continue\n\n with open(\"ov-template.ipynb\", \"r\") as t:\n data = t.read()\n data = data.replace(\"{remote-here}\", remote)\n with open(\"dataset.ipynb\", \"w\") as out:\n out.write(data)\n url = urlparse(remote)\n key = remote[len(\"s3://\"):]\n profile = \"sealstorage\"\n s3_url = f\"https://maritime.sealstorage.io/api/v0/s3/{key}?profile={profile}\"\n print(f\"s3_url = {s3_url}\")\n db = ov.LoadDataset(s3_url)\n data = db.read()\n # Remove idx file\n ds_name = create_ds_name_from_url(url)\n plt.imsave(f\"{ds_name}.png\", data)\n ds_dir = c.create_directory(77, ds_name, proj.root_dir.id)\n c.upload_file(77, ds_dir.id, f\"./{ds_name}.png\")\n c.upload_file(77, ds_dir.id, \"./dataset.ipynb\")\n description = f\"OpenVisus Dataset\\n\"\n for key, value in ds_entry.items():\n if key != \"source\" and key != \"remote\":\n if value is not dict:\n description += f\"{key}: {value}\\n\"\n else:\n description += \"{key}:\\n\"\n for k, v in value.items():\n description += f\" {k}: {v}\\n\"\n print(f\"Creating dataset {ds_name}\")\n ds_request = mcapi.CreateDatasetRequest(description=description,\n license=\"Open Database License (ODC-ODbL)\")\n # tags=[{\"value\": \"OpenVisus\"}])\n created_ds = c.create_dataset(77, ds_name, ds_request)\n file_selection = {\n \"include_files\": [f\"/{ds_name}/{ds_name}.png\", f\"/{ds_name}/dataset.ipynb\"],\n \"exclude_files\": [],\n \"include_dirs\": [],\n \"exclude_dirs\": []\n }\n c.change_dataset_file_selection(77, created_ds.id, file_selection)\n os.remove(f\"{ds_name}.png\")\n i = 1\n except yaml.YAMLError as e:\n print(e)\n i = 1\n\n # c.set_debug_on()\n # r = requests.get(\"http://atlantis.sci.utah.edu/mod_visus?action=list\")\n\n # tree = ElementTree.fromstring(r.content)\n # group = \"\"\n # for elem in tree.iter():\n # if elem.tag == \"group\":\n # group = elem.attrib[\"name\"]\n # elif elem.tag == \"dataset\":\n # ds = elem.attrib[\"name\"]\n # print(f\"Dataset {ds} in group {group}\")\n # mod_visus = requests.get(\"https://atlantis.sci.utah.edu/mod_visus?action=readdataset&dataset=\" + ds)\n # data = mod_visus.text.split('\\n')[5]\n # ds_request = mcapi.CreateDatasetRequest(description=\"OpenVisus Dataset\\n\" + \"Group: \" + group + \"\\n\" + data,\n # license=\"Open Database License (ODC-ODbL)\")\n # # tags=[{\"value\": \"OpenVisus\"}])\n # created_ds = c.create_dataset(77, ds, ds_request)\n\n # group = child.attrib[\"name\"]\n # for ds in child:\n # dsname = child.attrib[\"name\"]\n # print(\"Dataset \" + dsname + \" in group \" + group)\n","sub_path":"ovds.py","file_name":"ovds.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"325345535","text":"from __future__ import print_function, absolute_import\nimport argparse\nimport os.path as osp\n\nimport numpy as np\nimport sys\nimport torch\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\n\nfrom reid import datasets\nfrom reid import models\nfrom reid.dist_metric import DistanceMetric\nfrom reid.trainers import Trainer\nfrom reid.evaluators import Evaluator\nfrom reid.utils.data import transforms as T\nfrom reid.utils.data.preprocessor import Preprocessor\nfrom reid.utils.logging import Logger\nfrom reid.utils.serialization import load_checkpoint, save_checkpoint\n\ndef get_data(name, split_id, data_dir, height, width, batch_size, workers,\n combine_trainval):\n root = osp.join(data_dir, name)\n\n dataset = datasets.create(name, root, split_id=split_id)\n\n normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_set = dataset.trainval if combine_trainval else dataset.train\n num_classes = dataset.num_trainval_ids \n\n train_transformer = T.Compose([\n T.RandomSizedRectCrop(height, width),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n normalizer,\n ])\n\n test_transformer = T.Compose([\n T.RectScale(height, width),\n T.ToTensor(),\n normalizer,\n ])\n\n train_loader = DataLoader(\n Preprocessor(train_set, root=dataset.images_dir,\n transform=train_transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=True, pin_memory=True, drop_last=True)\n\n val_loader = DataLoader(\n Preprocessor(dataset.val, root=dataset.images_dir,\n transform=test_transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n test_loader = DataLoader(\n Preprocessor(list(set(dataset.query) | set(dataset.gallery)),\n root=dataset.images_dir, transform=test_transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n return dataset, num_classes, train_loader, val_loader, test_loader\n\ndef get_state_dict(dict1,dict2):\n state_dict = {k:v for k,v in dict1.items() if k in dict2}\n return state_dict\n\ndef main(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.benchmark = True\n\n # Redirect print to both console and log file\n if not args.evaluate:\n sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))\n\n # Create data loaders\n if args.height is None or args.width is None:\n args.height, args.width = (144, 56) if args.arch == 'inception' else \\\n (256, 128)\n dataset, num_classes, train_loader, val_loader, test_loader = \\\n get_data(args.dataset, args.split, args.data_dir, args.height,\n args.width, args.batch_size, args.workers,\n args.combine_trainval)\n\n # Create model\n model = models.create(args.arch, num_features=args.features,\n dropout=args.dropout, num_classes=num_classes)\n start_epoch = best_top1 = 0\n model = nn.DataParallel(model).cuda()\n if args.resume:\n #checkpoint = load_checkpoint(args.resume)\n #state_dict = get_state_dict(checkpoint['state_dict'],model.state_dict())\n #model.load_state_dict(state_dict)\n #start_epoch = checkpoint['epoch']\n #best_top1 = checkpoint['best_top1']\n #print(\"=> Start epoch {} best top1 {:.1%}\"\n # .format(start_epoch, best_top1))\n state_dict = torch.load(args.resume)\n state_dict = get_state_dict(state_dict,model.state_dict())\n model.load_state_dict(state_dict)\n # Distance metric\n metric = DistanceMetric(algorithm=args.dist_metric)\n\n # Evaluator\n evaluator = Evaluator(model)\n\n if args.evaluate:\n metric.train(model, train_loader)\n print(\"Validation:\")\n evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)\n print(\"Test:\")\n evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Softmax loss classification\")\n # data\n parser.add_argument('-d', '--dataset', type=str, default='cuhk03',\n choices=datasets.names())\n parser.add_argument('-b', '--batch-size', type=int, default=256)\n parser.add_argument('-j', '--workers', type=int, default=4)\n parser.add_argument('--split', type=int, default=0)\n parser.add_argument('--height', type=int,\n help=\"input height, default: 256 for resnet*, \"\n \"144 for inception\")\n parser.add_argument('--width', type=int,\n help=\"input width, default: 128 for resnet*, \"\n \"56 for inception\")\n parser.add_argument('--combine-trainval', action='store_true',\n help=\"train and val sets together for training, \"\n \"val set alone for validation\")\n # model\n parser.add_argument('-a', '--arch', type=str, default='resnet50',\n choices=models.names())\n parser.add_argument('--features', type=int, default=128)\n parser.add_argument('--dropout', type=float, default=0.5)\n # optimizer\n parser.add_argument('--lr', type=float, default=0.1,\n help=\"learning rate of new parameters, for pretrained \"\n \"parameters it is 10 times smaller than this\")\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--weight-decay', type=float, default=5e-4)\n # training configs\n parser.add_argument('--resume', type=str, default='', metavar='PATH')\n parser.add_argument('--evaluate', action='store_true',\n help=\"evaluation only\")\n parser.add_argument('--epochs', type=int, default=50)\n parser.add_argument('--start_save', type=int, default=0,\n help=\"start saving checkpoints after specific epoch\")\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--print-freq', type=int, default=1)\n # metric learning\n parser.add_argument('--dist-metric', type=str, default='euclidean',\n choices=['euclidean', 'kissme'])\n # misc\n working_dir = osp.dirname(osp.abspath(__file__))\n parser.add_argument('--data-dir', type=str, metavar='PATH',\n default=osp.join(working_dir, 'data'))\n parser.add_argument('--logs-dir', type=str, metavar='PATH',\n default=osp.join(working_dir, 'logs'))\n main(parser.parse_args())\n","sub_path":"examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"637216967","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\n# 每個批次的大小\nbatch_size = 100\n\n# 計算一共有多少個批次\nn_batch = mnist.train.num_examples // batch_size\n\n\n# 初始化權值\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1) # 生成一個截斷的常態分配\n return tf.Variable(initial)\n\n\n# 初始化偏量\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\n# 卷積層\ndef conv2d(x, W):\n # x input tensor of shape '[batch, in_height, in_width, in_channels]'\n # W filter /kernel tensor of shape [filter_height, filter_width, in_channels, out_channels]\n # strides[0] = strides[3]=1, strides[1]代表x方向的布長, strides[0]代表y方向的布長\n # padding: A string from: 'SAME', 'VALID'\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\n# 池化層\ndef max_pool_2x2(x):\n # ksize [1,x,y,1]\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\n# 定義兩個placeholder\nx = tf.placeholder(tf.float32, [None, 784]) # 28x28\ny = tf.placeholder(tf.float32, [None, 10])\n\n# 改變x的格式轉為4D的向量[batch, in_height, in_width, in_channels]\nx_image = tf.reshape(x, [-1, 28, 28, 1])\n\n# 初始化第一個卷積層的權值和偏量\nW_conv1 = weight_variable([5, 5, 1, 32]) # 採用5x5的採樣窗口,32個卷積核從1個平面抽取特徵\nb_conv1 = bias_variable([32]) # 每一個卷積核一個偏量\n\n# 把x_image和權值向量進行卷積,再加上偏量值,然後應用於relu激活函數\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1) # 執行max-pooling\n\n# 初始化第二個卷積層的權值和偏量\nW_conv2 = weight_variable([5, 5, 32, 64]) # 採用5x5的採樣窗口,64個卷積核從32個平面抽取特徵\nb_conv2 = bias_variable([64]) # 每一個卷積核一個偏量\n\n# 把x_image和權值向量進行卷積,再加上偏量值,然後應用於relu激活函數\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2) # 執行max-pooling\n\n# 28x28 的圖片第一次卷積後還是28x28,第一次池化後變為14x14\n# 第二次卷積後為14x14,第二次池化後變為7x7\n# 透過上面操作後得到64張7x7的平面\n\n# 初始化第一個全聯接層的權值\nW_fc1 = weight_variable([7 * 7 * 64, 1024]) # 上一張有7*7*64個神經元,全連結層有1024個神經元\nb_fc1 = bias_variable([1024]) # 1024個節點\n\n# 把池化層2的輸出層平化為1維\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n# 求第一個全連接層的輸出\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n# keep_prob用來表示神經元的輸出機率\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n# 初始化第二個全聯接層的權值\nW_fc2 = weight_variable([1024, 10])\nb_fc2 = bias_variable([10])\n\n# 計算輸出\nprediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n# 交叉熵代價函數\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=prediction))\n# 使用AdamOptimizer進行優化\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n# 結果存放在一個布爾列表中\ncorrect_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) # argmax返回一維張亮中最大的值所在的位置\n# 求準確率\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(21):\n for batch in range(n_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.7})\n\n acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0})\n print(\"Iter \" + str(epoch) + \". Testing Accuracy= \" + str(acc))\n","sub_path":"Artificial_Intelligence_Related/TensorflowEx/TestBookExample/youtube/lesson6/tf1-cnn.py","file_name":"tf1-cnn.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"31886842","text":"import pygame, random, sys\n\npygame.init()\n\nWW, WH = 800, 600\nscreen = pygame.display.set_mode((WW, WH))\npygame.display.set_caption('DANCING BALLS!')\n\nclock = pygame.time.Clock()\n\n# font = pygame.font.Font('Roboto-Thin.ttf', 32)\n\nG = 1 # u can edit this ;)\n\nclass Ball:\n def __init__(self, x, y, radius, vx, vy, damping, color):\n self.vx = vx\n self.vy = vy\n self.damping = damping\n self.color = color\n self.rect = pygame.Rect(x, y, radius, radius)\n\n def draw(self):\n self.rect.x += int(self.vx)\n self.rect.y += int(self.vy)\n\n self.vy += G\n\n if self.rect.top <= 0 or self.rect.bottom >= WH:\n self.vy *= -(1 - self.damping)\n if self.rect.left <= 0 or self.rect.right >= WW:\n self.vx *= -(1 - self.damping)\n\n pygame.draw.ellipse(screen, self.color, self.rect)\n\n def theEdgeCase(self):\n if self.rect.top <= 0:\n self.rect.top = 0 + 1\n if self.rect.bottom >= WH:\n self.rect.bottom = WH - 1\n if self.rect.left <= 0:\n self.rect.left = 0 + 1\n if self.rect.right >= WW:\n self.rect.right = WW - 1\n\nballs = []\nfor i in range(250): # u can edit this ;)\n x, y = random.randint(25, WW - 25), random.randint(25, WH - 25)\n r, g, b = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)\n s = random.randint(25, 51)\n vx = random.randint(-15, 16)\n vy = random.randint(-15, 16)\n balls.append(Ball(x, y, s, vx, vy, 0.045, (r, g, b))) # u can edit this ;)\n\nr, g, b = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)\n\nrunning = True\n\nwhile running:\n clock.tick(120)\n ran = range(-3, 4, 1)\n \n r += random.choice(ran)\n g += random.choice(ran)\n b += random.choice(ran)\n if r > 255 or r < 0:\n r = random.randint(0, 255)\n if g > 255 or g < 0:\n g = random.randint(0, 255)\n if b > 255 or b < 0:\n b = random.randint(0, 255)\n screen.fill((r, g, b))\n \n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n\n for i in balls:\n i.draw()\n i.theEdgeCase()\n\n pygame.display.update()\n\n clock.tick(60)\n \npygame.quit()","sub_path":"Ping Pong/physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"4"}
+{"seq_id":"149451746","text":"# -*- coding: utf-8 -*- \n'''\nNo.598\nGiven an m * n matrix M initialized with all 0's and several update operations.\nOperations are represented by a 2D array, and each operation is represented by an array with two positive integers a and b, \nwhich means M[i][j] should be added by one for all 0 <= i < a and 0 <= j < b.\nYou need to count and return the number of maximum integers in the matrix after performing all the operations.\n'''\nclass Solution(object):\n def maxCount(self, m, n, ops):\n \"\"\"\n :type m: int\n :type n: int\n :type ops: List[List[int]]\n :rtype: int\n 对于每次操作,0 <= i < a and 0 <= j < b.的元素肯定都要受到影响,对于每次都影响左上角的元素,\n 只要求出所有操作中的最小的共同影响的行数和列数再相乘即可\n \"\"\"\n maxm = m\n maxn = n\n for i in range(len(ops)):\n a = ops[i][0]\n b = ops[i][1]\n if a\\d+/(\\d+)')\n shopid_pettern = re.compile(r'shopId:\\'(\\d*)\\',')\n venderid_pettern = re.compile(r'venderId:(\\d*),')\n brand_pettern = re.compile(r'brand: (\\d*),')\n skuids_pettern = re.compile(r'{.*?\"skuId\":(\\d+).*?}')\n shop_name_pettern = re.compile(r'target=\"_blank\" title=\"(\\S*?)\" clstag=\"shangpin')\n ziying_pettern = re.compile(r'