\n self._extra_info = {}\n for key_str in self._extra_info_key:\n tag = self.soup.find(\"span\", class_=key_str)\n if tag is not None:\n self._extra_info[key_str] = tag[\"title\"]\n\n\n def __str__(self):\n #print类的实例打印的字符串\n out_str = \"User \" + self._name + \" agree: \" + str(self._agree_cnt) + \", \" \\\n \"thank: \" + str(self._thank_cnt) + \" \" + self._gender + \" \"\n\n for key_str in self._extra_info_key:\n if key_str in self._extra_info:\n out_str += \" \" + key_str + \": \" + self._extra_info[key_str]\n\n return out_str\n\nclass ZhihuCommon(object):\n \"\"\"ZhihuCrawler, ZhihuTopic, ZhihuUser三个类的共用代码, 包含一些服务于debug的函数, 共用的网页获取函数, 等。\"\"\"\n\n root_topic = 19776749 # 19776749 根话题 19776751 未归类 19778298 形而上\n unclassed_topic = 19776751\n my_header = {\n 'Connection': 'Keep-Alive',\n 'Accept': 'text/html, application/xhtml+xml, */*',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Accept-Encoding': 'gzip,deflate,sdch',\n 'Host': 'www.zhihu.com',\n 'DNT': '1'\n }\n\n \"\"\"运行参数\"\"\"\n debug_fast_crawler = False #快速模式是否打开,当此模式打开时,不会遍历所有同类的信息,用于调试。\n traversal_level_max = 3 #深度优化遍历最大层数限制\n user_json_file = \"user.json\"\n answer_json_file = \"answer.json\"\n topic_json_file = \"topic.json\"\n config_json_file = \"config.json\"\n\n _last_get_page_fail = False #上一次调用get_page是失败的?\n _xsrf = None\n _session = None\n\n @staticmethod\n def set_xsrf(xsrf):\n ZhihuCommon._xsrf = xsrf\n\n @staticmethod\n def get_xsrf():\n return ZhihuCommon._xsrf\n\n @staticmethod\n def session_init():\n ZhihuCommon._session = requests.Session()\n\n @staticmethod\n def get_session():\n return ZhihuCommon._session\n\n @staticmethod\n def get(url):\n try_time = 0\n\n while try_time < 5:\n #上一次get页面失败,暂停10秒\n if ZhihuCommon._last_get_page_fail:\n time.sleep(10)\n\n try:\n try_time += 1\n response = ZhihuCommon.get_session().get(url, headers = ZhihuCommon.my_header, timeout = 30)\n #, cert = 'F:\\Programs\\Class-3-Public-Primary-Certification-Authority.pem')\n soup = BeautifulSoup(response.text, \"html.parser\")\n ZhihuCommon._last_get_page_fail = False\n return response.text, soup\n except Exception as e:\n print(\"fail to get \" + url + \" error info: \" + str(e) + \" try_time \" + str(try_time))\n ZhihuCommon._last_get_page_fail = True\n else:\n raise #当前函数不知道应该怎么处理该错误,所以,最恰当的方式是继续往上抛,让顶层调用者去处理\n\n @staticmethod\n def post(url, post_dict):\n try_time = 0\n\n while try_time < 5:\n #上一次get页面失败,暂停10秒\n if ZhihuCommon._last_get_page_fail:\n time.sleep(10)\n\n try:\n try_time += 1\n response = ZhihuCommon.get_session().post(url, headers = ZhihuCommon.my_header, data = post_dict, timeout = 30)\n #, cert = 'F:\\Programs\\Class-3-Public-Primary-Certification-Authority.pem')\n ZhihuCommon._last_get_page_fail = False\n return response\n except Exception as e:\n print(\"fail to post \" + url + \" error info: \" + str(e) + \" try_time \" + str(try_time))\n ZhihuCommon._last_get_page_fail = True\n else:\n raise #当前函数不知道应该怎么处理该错误,所以,最恰当的方式是继续往上抛,让顶层调用者去处理\n\n @staticmethod\n def get_and_save_page(url, path):\n try:\n response = ZhihuCommon.get_session().get(url, headers = ZhihuCommon.my_header, verify = False)\n with codecs.open(path, 'w', response.encoding) as fp:\n fp.write(response.text)\n return\n except Exception as e:\n print(\"fail to get \" + url + \" error info: \" + str(e))\n return\n\ndef main():\n z = ZhihuCrawler()\n z.init_xsrf()\n login_sucess = z.login()\n if not login_sucess:\n print(\"fail to login.\")\n return\n z.do_crawler()\n\n print(\"ok\\n\")\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"crawler_user.py","file_name":"crawler_user.py","file_ext":"py","file_size_in_byte":11841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"575245313","text":"\"\"\"\nHTTP client for api requests. This is pluggable into the IPFS Api client and\ncan/will eventually be supplemented with an asynchronous version.\n\"\"\"\nimport requests\nimport contextlib\n\nfrom . import encoding\n\n\n\nclass HTTPClient(object):\n\n def __init__(self, host, port, base, default_enc):\n self.host = host\n self.port = port\n self.base = 'http://%s:%s/%s' % (host, port, base)\n\n self.default_enc = encoding.get_encoding(default_enc)\n self._session = None\n\n\n def request(self, path,\n args=[], opts={}, files=[],\n decoder=None, post_hook=None,\n **kwargs):\n \n url = self.base + path\n \n params = []\n params.append(('stream-channels', 'true'))\n for opt in opts.items():\n params.append(opt)\n for arg in args:\n params.append(('arg', arg))\n\n method = 'post' if (files or kwargs.has_key('data')) else 'get'\n \n if self._session:\n res = self._session.request(method, url,\n params=params, files=files, **kwargs)\n else:\n res = requests.request(method, url,\n params=params, files=files, **kwargs)\n\n if not decoder:\n try:\n ret = self.default_enc.parse(res.text)\n except:\n ret = res.text\n else:\n enc = encoding.get_encoding(decoder)\n try:\n ret = enc.parse(res.text)\n except:\n ret = res.text\n \n if post_hook:\n return post_hook(ret)\n return ret\n\n\n @contextlib.contextmanager\n def session(self):\n self._session = requests.session()\n yield\n self._session.close()\n self._session = None\n","sub_path":"ipfsApi/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"398736136","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 5 11:04:17 2019\n\n@author: jonahcullen\n\"\"\"\n\nimport argparse\nimport os\n\n\ndef make_arg_parser():\n parser = argparse.ArgumentParser(\n prog=\"GeneratePBS.py\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n \"-d\", \"--data\",\n default=argparse.SUPPRESS,\n metavar=\"\",\n required=True,\n help=\"Path to dir containing the pbs submission scripts files [required]\")\n return parser\n\n\nif __name__ == '__main__':\n \n parser = make_arg_parser()\n args = parser.parse_args()\n\n data = os.path.abspath(args.data)\n\n pbs = os.path.join(os.getcwd(), \"submit_pbs_scripts.sh\")\n\n with open(pbs, \"w\") as f:\n for file_name in os.listdir(data):\n if file_name.endswith(\".pbs\"):\n print(f\"qsub {data}/\", file_name, file = f, sep = \"\")\n","sub_path":"RANDOM_REQUESTS/Ted_K/python_scripts/Generate_pbs_submission_shell_script.py","file_name":"Generate_pbs_submission_shell_script.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"471915070","text":"\"\"\"\nSeparate Chaining Hash Table based functions, timings and statistics\n\n@author Mark Diedericks 30572738\n@since 21/10/2019\n@modified 21/10/2019\n\"\"\"\n\nimport bst\nfrom task1 import HashTable as HashTableLinear\nfrom task3 import load_dictionary_statistics\n\nclass BinarySearchTree(bst.BinarySearchTree):\n def insert(self, key, value):\n \"\"\"\n Will attempt to insert at a value with a given key\n\n @param key: The key to search for and get it's value\n @return The depth at which the key-value pair was inserted\n @complexity O(log n) for both best and worst case, where n is the depth of the binary search tree\n @postcondition The binary search tree will contain the given key-value par\n \"\"\"\n if self.root is None:\n self.root = bst.BinaryTreeNode(key, value)\n return 0 # It was the root node, so depth of 0\n\n depth = 0 # Start at root, so depth of 0\n current_node = self.root\n while True:\n if key < current_node.key:\n depth += 1 # Increment depth at where the insert is\n if current_node.left is None:\n current_node.left = bst.BinaryTreeNode(key, value)\n break\n else:\n current_node = current_node.left\n elif key > current_node.key:\n depth += 1 # Increment depth at where the insert is\n if current_node.right is None:\n current_node.right = bst.BinaryTreeNode(key, value)\n break\n else:\n current_node = current_node.right\n else:\n assert current_node.key == key\n current_node.item = value\n break\n\n # Return set/insertion depth\n return depth\n\nclass HashTable(HashTableLinear): \n ### Override only the methods which directly implement linear probing ###\n ### Implement separate chaining instead. ###\n \n def __getitem__(self, key):\n \"\"\"\n Will attempt to get the value associoated with the given key\n\n @param key: The key to search for and get it's value\n @return The value of associated with the given key\n @raises KeyError: key does not exist in the hash table\n @complexity O(log n) for both best and worst case, where n is the length of the binary search tree,\n @precondition The parameter key is of type string\n @postcondition The value for the key will be returned if the key exists within the hash table\n \"\"\"\n \n # assert preconditions\n assert isinstance(key, str)\n\n # Get starting index and table size\n i = self.hash(key)\n\n # If slot is not empty, attempt to find key\n # BinarySearchTree will raise KeyError if not found.\n if self.table[i] is not None:\n assert isinstance(self.table[i], BinarySearchTree)\n return self.table[i][key]\n\n # Key wasn't found\n raise KeyError('Key does not exist in table.')\n\n def __setitem__(self, key, item):\n \"\"\"\n Will set value of existing key-value pair, insert new key-value pair if not existent within dictionary. \n Will also rehash the hash table if it is full, inserting the key-value pair afterwards.\n\n @param key: The key of the key value pair, hashed to find index\n @param item: The value associated with the key\n @return None\n @complexity O(log n) for both best and worst case, where n is the length of the binary search tree,\n @precondition The parameter key is of type string\n @postcondition The hash table will contain the the item at for the given key\n \"\"\"\n \n # assert preconditions\n assert isinstance(key, str)\n\n # Get starting index and table size\n i = self.hash(key)\n\n # If a pair is where this is meant to be, we have a collision\n # If that pair has the same key we are setting, not inserting\n # thus it cannot count as a collision.\n if self.table[i] is not None:\n self.collisions += 1\n else:\n self.table[i] = BinarySearchTree()\n \n assert isinstance(self.table[i], BinarySearchTree)\n\n # Insert/set and get probe length stat\n depth = self.table[i].insert(key, item)\n self.count += 1\n\n # We're adding a new pair, so consider probe length\n self.probe_len += depth\n if depth > self.probe_max:\n self.probe_max = depth\n\n def __contains__(self, key):\n \"\"\"\n Determines whether or not the hash table contains a specified key\n\n @param key: the key to search for\n @return Whether or not the key exists within the hash table\n @complexity O(1) for best case - no BST. O(log n) for worst case, where n is the length of the binary search tree,\n @precondition The parameter key is of type string\n \"\"\"\n \n # assert preconditions\n assert isinstance(key, str)\n\n # Get starting index and table size\n i = self.hash(key)\n if self.table[i] is not None:\n assert isinstance(self.table[i], BinarySearchTree)\n return key in self.table[i]\n\n # Key wasn't found\n return False\n\n\ndef table_load_dictionary_statistics(max_time):\n \"\"\"\n Will execute load_dictionary_time on a combination of files, sizes and bases. Saving the data, along with timing and words\n to a file. Uses separate chaining hash table..\n \n @param max_time: how long load_dictionary operates before timing out, if none the function wont time out\n @return None\n @complexity O(nm) for both best and worst case. Where n is cost of load_dictionary and m is the number of size-base-file combinations\n @postcondition A file, 'output_task5.csv', will contain the filename, table, base, words, collisions, probe length, max probe length \n and rehash count time data for each combination.\n \"\"\"\n\n TABLE_BASE = [1, 27183, 250726]\n TABLE_SIZE = [250727, 402221, 1000081]\n FILE_NAMES = [\"english_small.txt\", \"english_large.txt\", \"french.txt\"]\n\n # Get output file handle\n f = open(\"output_task5.csv\", 'w+', encoding=\"UTF-8\")\n\n # Create headers\n f.write('File Name,Table Size,Table Base,Words,Time,Collisions,Probe Total,Probe Max, Rehashes\\n')\n\n # Loop through each combination\n for file in FILE_NAMES:\n for size in TABLE_SIZE:\n for base in TABLE_BASE:\n # Run combination with quadratic probing hash table\n res = load_dictionary_statistics(base, size, file, max_time, HashTable(size, base))\n\n words = res[0]\n time = res[1] if res[1] is not None else \"TIMEOUT\"\n col = res[2]\n pro = res[3]\n promax = res[4]\n rehashes = res[5]\n\n # Print results to file\n f.write('{0},{1},{2},{3},{4},{5},{6},{7},{8}\\n'.format(file, size, base, words, time, col, pro, promax, rehashes))\n print('{0},{1},{2},{3},{4},{5},{6},{7},{8}'.format(file, size, base, words, time, col, pro, promax, rehashes))\n\n # Close file\n f.close()\n\n # Ensure file is closed\n if not f.closed:\n raise IOError('File is not closed.')\n\n\nif __name__ == '__main__':\n table_load_dictionary_statistics(120)\n\n","sub_path":"Interview Prac 3/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"238801809","text":"\n\nfrom xai.brain.wordbase.nouns._llama import _LLAMA\n\n#calss header\nclass _LLAMAS(_LLAMA, ):\n\tdef __init__(self,): \n\t\t_LLAMA.__init__(self)\n\t\tself.name = \"LLAMAS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"llama\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_llamas.py","file_name":"_llamas.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"308490753","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 31 14:37:11 2016\nModule which computes a the 2D spatial power spectrum of a given image. Used for VCA analysis. \n@author: npingel\n\"\"\"\n\n#imports\nimport numpy as np\nfrom astropy.io import fits\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as pyplot\nimport matplotlib\n\nmatplotlib.rc('font', family='sans-serif')\nmatplotlib.rc('font', serif='Helvetica Neue')\nmatplotlib.rc('text', usetex='false')\nmatplotlib.rcParams.update({'font.size': 14})\n\n\n##method to compute relevent gridding paramters and coordinates; returns pixel boundaries in modulus image for lower,\n##upper, and corresponding physical scale taken as the mid-point between the lower and upper boundaries\n##numPaddedPix is the number of pixels along one axis in padded image\n##angRes is the angular resolution of single pixel in original image (arcmin)\n##origAngExt is the original angular extent of the integrated image\ndef scale(numPaddedPix,angRes,origAngExt): \n distance = 300. ##pc\n totAngularCoverage = numPaddedPix*angRes/60.\n maxPhysScale = 2*distance*np.tan(np.deg2rad(totAngularCoverage/2.))\n modRes = (distance)/maxPhysScale ## pixel resolution of modulus image\n maxSpatialFreq = (numPaddedPix/2.)*modRes \n ##pixel radius of annuli boundaries \n lowerRadius = []\n midRadius = []\n upperRadius = [] \n minSpatialFreq = distance/(2*distance*np.tan(np.deg2rad(origAngExt/2.)))\n logInc = (np.log(maxSpatialFreq)-np.log(minSpatialFreq))/18. \n initLower = np.log(minSpatialFreq)\n for i in range(0,18):\n lowerRadius.append(initLower+(i*logInc))\n midRadius.append(lowerRadius[i]+logInc/2.)\n upperRadius.append(lowerRadius[i]+logInc)\n ##corresponding physical extent taken as midpoint between annuli\n midRadiusArr=np.array(midRadius,dtype='float32')\n lowerRadiusArr = np.array(lowerRadius, dtype='float32')\n upperRadiusArr = np.array(upperRadius, dtype='float32') \n spatialScale = distance/np.exp(midRadiusArr)\n return np.exp(lowerRadiusArr)/modRes,np.exp(upperRadiusArr)/modRes, spatialScale\n\n##function for fitting\ndef linFunc(x,slope,b):\n return x*slope+b\n \ndef gaussFunc(x,a,mu,sig):\n return a*np.exp(-(x-mu)**2/(2*sig**2))\n\ndef computePS(intImage,angResolution,numPadPix,origAngExt):\n intImage_Scaled = intImage[:,:]#/1.82e18\n where_are_NaNs = np.isnan(intImage_Scaled)\n intImage_Scaled[where_are_NaNs] = 0.\n plotPS = True\n print('Creating Modulus Image...')\n modulusImage = np.abs(np.fft.fftshift(np.fft.fft2(intImage_Scaled,[int(numPadPix),int(numPadPix)])))**2\n\n lowerRadius, upperRadius, spatialScale = scale(numPadPix,angResolution,origAngExt)\n\n ##arrays to hold results\n medianList = []\n errList = []\n for ring in range(0,18):\n print('Computing median value within annulus: '+np.str(ring+1)) \n colDenDist = []\n for i in range(0,int(numPadPix)):\n for j in range(0,int(numPadPix)):\n radius = np.sqrt((i-(numPadPix/2-1))**2+(j-(numPadPix/2-1))**2)\n if lowerRadius[ring] <= radius <= upperRadius[ring]:\n colDenDist.append(modulusImage[i,j])\n colDenDistArr = np.array(colDenDist, dtype='float32')\n medianList.append(np.median(colDenDistArr)) \n errList.append(np.median(np.abs(colDenDistArr-medianList[ring])))\n\n\n print('Fitting spectra...')\n errArr = np.array(errList, dtype='float32')\n medianArr = np.array(medianList, dtype='float32')\n errArr_Log = errArr/(medianArr*np.log(10))\n spatialScaleLog = np.log10(spatialScale)\n\n coeffs,matcov = curve_fit(linFunc,spatialScaleLog,np.log10(medianList),[1,1],sigma=errArr_Log)\n\n fitList = []\n for i in range(0,18):\n fitList.append(coeffs[0]*spatialScaleLog[i]+coeffs[1])\n error = np.sqrt(np.diag(matcov))\n print('Slope: '+np.str(coeffs[0]))\n print('Error: '+np.str(error[0]))\n if plotPS == False:\n pyplot.errorbar(spatialScale,np.log10(medianArr),yerr=errArr_Log, fmt='o')\n pyplot.plot(spatialScale,fitList, color='black', linewidth=2,label=r'${}^{13}$CO Slope: $-$%.2f' % coeffs[0]+'+/$-$'+'%.2f' % error[0])\n pyplot.xlim(100,0.5)\n pyplot.xscale('log')\n pyplot.ylabel(r'Log$_{10}$(Power)')\n pyplot.xlabel(r'Linear Scale [pc]')\n pyplot.legend(loc=0, fontsize=14)\n #pyplot.savefig('/Users/npingel/Desktop/Perseus_intPS_13CO_MADErrors', bbox_inches='tight')\n pyplot.show()\n pyplot.clf()\n return coeffs[0], error[0]\n \n\n\n","sub_path":"VCA/SPSModule.py","file_name":"SPSModule.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"277299855","text":"import webvtt\nimport sys\n\nif len(sys.argv) < 4:\n print(\"need file names\")\n sys.exit(1)\n\ndef rreplace(s, old, new):\n li = s.rsplit(old, 1)\n return new.join(li)\n\ndef convert_file(file_name, main_lang, sub_lang):\n file_name_sub = rreplace(file_name, main_lang, sub_lang)\n #print(file_name + '\\n' + file_name_sub)\n #return\n vtt_main = webvtt.read(file_name)\n vtt_sub = webvtt.read(file_name_sub)\n\n # while loop all korean time captions\n index_main = 0\n index_sub = 0\n while index_main < len(vtt_main):\n while index_sub < len(vtt_sub):\n caption_main = vtt_main[index_main]\n caption_sub = vtt_sub[index_sub]\n\n if (caption_main.start <= caption_sub.start):\n #print(\"##### \" + caption_main.text.replace(\"\",\"\").replace(\"\\n\",\"\\n##### \"))\n print(\"\" + caption_main.text.replace(\"\",\"\") + \"
\")\n break\n else:\n print(\"\" + caption_sub.text.replace(\"\", \"\") + \"
\")\n print(\"\")\n index_sub += 1\n index_main += 1\n\n # finish final z index\n while index_sub < len(vtt_sub):\n print(caption_sub.text)\n index_sub += 1\n\n #print('end of ' + file_name)\n\n# main\nprint(\"\")\nlang_main = sys.argv[1]\nlang_sub = sys.argv[2]\nfor i in range(3, len(sys.argv)):\n print('Episode' + str(i - 2) + '')\n print('======================')\n print('
Episode ' + str(i - 2))\n print('
======================')\n print('')\n convert_file(sys.argv[i], lang_main, lang_sub)\n\nprint(\"\")\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"205647782","text":"from math import exp\n\ndef f1(x: float, u1: float, u2: float):\n value = -500.005 * u1 + 499.995 * u2\n return value\n\ndef f2(x: float, u1: float, u2: float):\n value = 499.995 * u1 - 500.005 * u2\n return value\n\ndef f1_true(x: float):\n value = 10 * exp(-0.01 * x) - 3 * exp(-1000 * x)\n return value\n\ndef f2_true(x: float):\n value = 10 * exp(-0.01 * x) + 3 * exp(-1000 * x)\n return value\n\ndef calculate_true(x: float):\n return f1_true(x), f2_true(x)\n\ndef rk2euler(x_curr: float, v1_curr: float, v2_curr: float, h: float):\n x_euler = x_curr + h\n v1_euler = v1_curr + h * f1(x_curr, v1_curr, v2_curr)\n v2_euler = v2_curr + h * f2(x_curr, v1_curr, v2_curr)\n\n x_next = x_euler\n v1_next = v1_curr + 0.5 * h * (f1(x_curr, v1_curr, v2_curr) + f1(x_euler, v1_euler, v2_euler))\n v2_next = v2_curr + 0.5 * h * (f2(x_curr, v1_curr, v2_curr) + f2(x_euler, v1_euler, v2_euler))\n return x_next, v1_next, v2_next\n\ndef print_header():\n print(\"|Итерация |Время |Численное решение |Точное решение |Глобальная погрешность |\")\n print(\"|---------|----------|---------------------------------------------------|---------------------------------------------------|---------------------------------------------------|\")\n print(\"|{0:9}|{1:10.5}|{2:25}|{3:25}|{4:25}|{5:25}|{6:25}|{7:25}|\".format(\"n\", \"Xn\", \"V1\", \"V2\", \"U1\", \"U2\", \"E1\", \"E2\"))\n\ndef print_string(iter: int, x: float, v1: float, v2: float, u1: float, u2: float, e1: float, e2: float):\n print(\"|{0:9}|{1:10.5}|{2:25}|{3:25}|{4:25}|{5:25}|{6:25}|{7:25}|\".format(iter, x, v1, v2, u1, u2, e1, e2))\n\ndef print_table(x_list: list, v1_list: list, v2_list: list, u1_list: list, u2_list: list):\n print_header()\n number_iters = len(x_list)\n if number_iters < 150:\n for i in range(number_iters):\n print_string(i, x_list[i], v1_list[i], v2_list[i], u1_list[i], u2_list[i], u1_list[i] - v1_list[i], u2_list[i] - v2_list[i])\n else:\n for i in range(101):\n print_string(i, x_list[i], v1_list[i], v2_list[i], u1_list[i], u2_list[i], u1_list[i] - v1_list[i], u2_list[i] - v2_list[i])\n for i in range(30, 0, -1):\n print_string(number_iters - i, x_list[-i], v1_list[-i], v2_list[-i], u1_list[-i], u2_list[-i], u1_list[-i] - v1_list[-i], u2_list[-i] - v2_list[-i])\n\ndef calculate(x_start: float, v1_start: float, v2_start: float, h_start: float, epsilon: float):\n x_list = [x_start]\n v1_list = [v1_start]\n v2_list = [v2_start]\n u1_list = [v1_start]\n u2_list = [v2_start]\n\n x_curr, v1_curr, v2_curr, h = x_start, v1_start, v2_start, h_start\n for i in range(1, number_iter + 1):\n if right_break <= x_curr:\n break\n while(True):\n x_next, v1_next, v2_next = rk2euler(x_curr, v1_curr, v2_curr, h)\n x05, v1_05, v2_05 = rk2euler(x_curr, v1_curr, v2_curr, h / 2)\n _, v12, v22 = rk2euler(x05, v1_05, v2_05, h / 2)\n e = max(abs(v1_next - v12), abs(v2_next - v22)) / 3\n if epsilon < e:\n h /= 2\n continue\n if epsilon / 3 <= e <= epsilon:\n break\n if e < epsilon / 3:\n h *= 2\n break\n u1, u2 = calculate_true(x_next)\n x_curr, v1_curr, v2_curr = x_next, v1_next, v2_next\n x_list.append(x_curr)\n v1_list.append(v1_curr)\n v2_list.append(v2_curr)\n u1_list.append(u1)\n u2_list.append(u2)\n return x_list, v1_list, v2_list, u1_list, u2_list\n\nnumber_iter = 250000\nright_break = 500\nx_start, v1_start, v2_start = 0.0, 7.0, 13.0\nh_start = 0.01\nepsilon = 0.0000001\n\ndef main():\n print_table(*calculate(x_start, v1_start, v2_start, h_start, epsilon))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"hard_system/hard_system.py","file_name":"hard_system.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"640647774","text":"\"\"\"\nFrites\n======\n\nFramework of Information Theory for Electrophysiological data and Statistics\n\"\"\"\nimport logging\n\nfrom frites import (io, core, conn, stats, utils, workflow, simulations, # noqa\n estimator)\n\n__version__ = \"0.3.8\"\n\n# -----------------------------------------------------------------------------\n# Set 'info' as the default logging level\nlogger = logging.getLogger('frites')\nio.set_log_level('info')\n\n# -----------------------------------------------------------------------------\n# get / set config\n\n\ndef get_config():\n \"\"\"Get the global configuration of frites.\"\"\"\n from frites.config import CONFIG\n return CONFIG\n\n\ndef set_config(key, value, verbose=None):\n \"\"\"Change the global config of frites.\n\n Parameters\n ----------\n key : string\n Entry of the config\n value : dict / list\n The new value for the selected key. The type should be the same as the\n default one\n \"\"\"\n io.set_log_level(verbose)\n assert isinstance(key, str)\n CONFIG = get_config() # noqa\n assert key in CONFIG.keys(), f\"The key {key} doesn't exist.\"\n CONFIG[key] = value\n logger.info(f\"The key {key} has been updated\")\n","sub_path":"frites/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"358997534","text":"import os\r\nfrom ASP_Classes import *\r\n\r\n\r\ndef line2class(character):\r\n switcher = {\r\n '*': 0,\r\n 'G': 1,\r\n 'T': 2,\r\n 'LT': 3,\r\n 'Vb': 4,\r\n 'Sb': 5,\r\n }\r\n return switcher.get(character,0)\r\n\r\ndef initCircuit(lineSplit):\r\n global circ1\r\n\r\n if lineSplit[0][0] == 'G': # init new generator\r\n tempGen = Generator(lineSplit)\r\n circ1.generators.append(tempGen)\r\n elif lineSplit[0][0] == 'T':\r\n tempTrans = Transformer(lineSplit)\r\n circ1.transformers.append(tempTrans)\r\n elif lineSplit[0][0] == 'L':\r\n if lineSplit[0][1] == 'T':\r\n tempLine = TransmissionLine(lineSplit)\r\n circ1.transmissionLines.append(tempLine)\r\n elif lineSplit[0][0] == 'C':\r\n pass\r\n\r\ndef main():\r\n global circ1\r\n\r\n with open(\"Circ1.txt\") as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n lineSplit = line.split(\" \")\r\n initCircuit(lineSplit)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n circ1 = Circuit(\"Circ1.txt\")\r\n main()\r\n\r\n\r\n","sub_path":"ASP_simulator/ASP.py","file_name":"ASP.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"585208748","text":"from database_files import models\nfrom database_files.module_settings import DBF_SETTINGS\nfrom django.core.files.storage import Storage\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nimport os\n\nclass DatabaseStorage(Storage):\n\n\tdef __init__(self, encrypt=DBF_SETTINGS[\"DATABASE_FILES_ENCRYPTION\"], compress=DBF_SETTINGS[\"DATABASE_FILES_COMPRESSION\"], *args, **kwargs):\n\t\tself.encrypt = encrypt\n\t\tself.compress = compress\n\t\tsuper(DatabaseStorage, self).__init__(*args, **kwargs)\n\n\tdef _open(self, name, mode='rb'):\n\t\ttry:\n\t\t\tf = models.DatabaseFile.objects.get(filepath=name)\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n\t\treturn f.retreive()\n\n\tdef _save(self, name, content):\n\t\tnewname = name\n\t\tnewpath, newfilename = os.path.split(name)\n\t\tnewfilenamestem, newfilenameext = os.path.splitext(newfilename)\n\t\tpostpend = 0\n\t\twhile self.exists(newname):\n\t\t\tpostpend += 1\n\t\t\tnewname = newpath + newfilenamestem + \"-\" + unicode(postpend) + newfilenameext\n\t\tf = models.DatabaseFile.objects.create(\n\t\t\t\tfilepath=newname,\n\t\t\t\t)\n\t\tf.store(content,\n\t\t\t\tencrypt=self.encrypt,\n\t\t\t\tcompress=self.compress,\n\t\t\t\t)\n\t\treturn newname\n\n\tdef exists(self, name):\n\t\treturn models.DatabaseFile.objects.filter(filepath=name).exists()\n\n\tdef delete(self, name):\n\t\ttry:\n\t\t\tmodels.DatabaseFile.objects.get(filepath=name).delete()\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\tpass\n\n\tdef url(self, name):\n\t\ttry:\n\t\t\tfile = models.DatabaseFile.objects.get(filepath=name)\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n\t\treturn reverse('database_file', kwargs={'file_id': file.pk})\n\n\tdef size(self, name):\n\t\ttry:\n\t\t\treturn models.DatabaseFile.objects.get(filepath=name).size\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn 0\n\n\tdef modified_time(self, name):\n\t\ttry:\n\t\t\treturn models.DatabaseFile.objects.get(filepath=name).modified_time\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n\n\tdef accessed_time(self, name):\n\t\ttry:\n\t\t\treturn models.DatabaseFile.objects.get(filepath=name).accessed_time\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n\n\tdef created_time(self, name):\n\t\ttry:\n\t\t\treturn models.DatabaseFile.objects.get(filepath=name).created_time\n\t\texcept models.DatabaseFile.DoesNotExist:\n\t\t\treturn None\n","sub_path":"database_files/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"32957213","text":"from custom.math import (triangle_number as tri, square_number as squ,\n pentagonal_number as pen, hexagonal_number as hex,\n heptagonal_number as hep, octagonal_number as oct)\n\ndef poly_n(n):\n return (3, tri(n)), (4, squ(n)), (5, pen(n)), (6, hex(n)), (7, hep(n)), (8, oct(n))\n\n\ndef next(types, value):\n if len(types) == 6 and value[0] // 100 == value[-1] % 100: # First two 0 == last two 6\n print(value, sum(value))\n else:\n for t, n in dic.get((types[-1], value[-1]), []):\n if t not in types:\n next(types+[t], value+[n])\n\npolys = []\nstart = 19 # 19 oct is first > 999 so start here\nend = 141 # 141 tri is first > 9999 so cut off here\n\nfor n in range(start, end):\n for type, value in poly_n(n):\n if 1000 <= value <= 9999 and value % 100 > 9:\n polys.append((type, value))\n\ndic = {}\n\nfor type_1, value_1 in polys:\n for type_2, value_2 in polys:\n if type_1 != type_2 and value_1 % 100 == value_2 // 100:\n dic[type_1, value_1] = dic.get((type_1, value_1), []) + [(type_2, value_2)]\n\nfor type, value in dic:\n next([type], [value])\n","sub_path":"Problems 051 - 100/Problem 061.py","file_name":"Problem 061.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"78440091","text":"import bs4\n\nindex = open('index.html')\nindexSoup = bs4.BeautifulSoup(index, \"html.parser\")\n\ndef displayMatches(selected):\n \"\"\"Displays the text of selected elements\"\"\"\n for ix, el in enumerate(selected):\n print(\"\\t\" + str(ix+1) + \".) \" + el.getText())\n print(\"\")\n\nprint(\"Title:\", indexSoup.select('title')[0].getText())\n\npars = indexSoup.select('p')\nprint(\"Paragraphs:\")\ndisplayMatches(pars)\n \nlinks = indexSoup.select('a')\nprint(\"Links:\")\ndisplayMatches(links)\n\nbigtext = indexSoup.select('.bigtext')\nprint(\"With class 'bigtext':\")\ndisplayMatches(bigtext)\n\nscraem = indexSoup.select('#scraem')\nprint(\"With ID 'scraem':\")\ndisplayMatches(scraem)\n ","sub_path":"043-beautiful-soup/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"546860083","text":"import clr\nclr.AddReference('System.Drawing')\nclr.AddReference('System.Windows.Forms')\n\nfrom System.Drawing import *\nfrom System.Windows.Forms import *\n\nclass MyForm(Form):\n def __init__(self):\n # Create child controls and initialize form\n\n #hello btn\n self.btn = Button()\n self.btn.Text = 'Hello'\n self.btn.AutoSize = True\n self.btn.Location = Point(5,5)\n self.btn.Click += self.btn_Click\n #hello lable\n self.lbl = Label()\n self.lbl.Text = \"this is a word\"\n self.lbl.Location = Point(50,5)\n \n \n # add Controls \n self.Controls.Add(self.btn)\n self.Controls.Add(self.lbl)\n #define event \n def btn_Click(self,sender,e):\n MessageBox.Show(\"hello\")\n \n\nApplication.EnableVisualStyles()\nApplication.SetCompatibleTextRenderingDefault(False)\n\nform = MyForm()\nApplication.Run(form)\n\n","sub_path":"Python/Project/WindowsApplication1/WindowsApplication1/WindowsApplication1.py","file_name":"WindowsApplication1.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"623984660","text":"linha = list()\ncoluna = list()\nmatriz = list()\npares = 0\nsoma = 0\n\n#lina\nfor poslin in range(0, 3):\n for poscol in range(0, 3):\n valor = int(input(f'Digite um valor para [{poslin}, {poscol}]: '))\n coluna.insert(poscol, valor)\n \n \n linha.insert(poslin, coluna[:])\n coluna.clear()\n \nmatriz = linha[:]\n\nprint('=-'*30)\nfor linmat in matriz:\n for colmat in linmat:\n print(f'[{colmat:^5}]',end='')\n\n #soma dos valores pares\n if colmat % 2 == 0:\n pares += colmat\n \n print('')\nprint('=-'*30)\n\n\n\nprint(f'A soma dos valores pares é {pares}')\n\nfor c in range(0,3):\n soma += matriz[c][2]\nprint(f'A soma dos valores da terceira coluna é {soma}')\n\n\nprint(f'O maior valor da segunda linha é {max(matriz[1])}')\n","sub_path":"ex087maissobrematriz.py","file_name":"ex087maissobrematriz.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"123015140","text":"input = 1358\n\ndef isOpen(x, y):\n number = (x * x) + (3 * x) + (2 * x * y) + y + (y * y)\n number += input\n binary = bin(number)\n one = 0\n for b in binary:\n if b == \"1\":\n one += 1\n return one % 2 == 0\n\nvisited = {}\npossible = []\ndestination = False\n\ndef rec(x, y, steps):\n global destination\n\n # dont visit multiple times, which could create loops\n if (x,y) in visited or x < 0 or y < 0:\n return\n\n if steps == 51:\n destination = True\n return\n \n # add to visited\n visited[(x,y)] = True\n\n steps += 1\n # check if possible to set a step for every direction\n if isOpen(x+1, y):\n possible.append([x+1, y, steps])\n if isOpen(x, y+1):\n possible.append([x, y+1, steps])\n if isOpen(x-1, y):\n possible.append([x-1, y, steps])\n if isOpen(x, y-1):\n possible.append([x, y-1, steps])\n\nrec(1, 1, 0) # starting point\nwhile not destination:\n for i in possible:\n rec(i[0], i[1], i[2])\n\nprint(len(visited))\n\n# part 2: 141\n","sub_path":"2016/13/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"340176702","text":"import json\n\nfrom klein import Klein\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nfrom paradrop.base import pdutils\nfrom paradrop.base.output import out\nfrom . import cors\n\n\nclass ChangeApi(object):\n routes = Klein()\n\n def __init__(self, update_manager):\n self.update_manager = update_manager\n\n @routes.route('/', methods=['GET'])\n def get_changes(self, request):\n \"\"\"\n Get list of active and queued changes.\n\n Note: we use the term \"change\" even though, internally, the objects are\n referred to as \"updates\". The word \"update\" has become so overloaded it\n causes much confusion. A \"change\" is an atomic and self-contained\n alteration to the running state of the system. A \"change\" could install\n a chute, remove a chute, change the host configuration, etc.\n \"\"\"\n cors.config_cors(request)\n request.setHeader('Content-Type', 'application/json')\n\n changes = []\n\n update = self.update_manager.active_change\n if update is not None:\n changes.append({\n 'id': update.change_id,\n 'updateClass': update.updateClass,\n 'updateType': update.updateType,\n 'name': getattr(update, 'name', None),\n 'version': getattr(update, 'version', None),\n 'status': 'processing'\n })\n\n for update in self.update_manager.updateQueue:\n changes.append({\n 'id': update.change_id,\n 'updateClass': update.updateClass,\n 'updateType': update.updateType,\n 'name': getattr(update, 'name', None),\n 'version': getattr(update, 'version', None),\n 'status': 'queued'\n })\n\n return json.dumps(changes)\n","sub_path":"paradrop/daemon/paradrop/backend/change_api.py","file_name":"change_api.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"101089229","text":"from string import ascii_letters\nfrom nltk.tokenize import sent_tokenize\nfrom wordcloud import WordCloud\nfrom PIL import Image\nfrom wordcloud import ImageColorGenerator\nfrom gensim.models import word2vec\nfrom sklearn.manifold import TSNE\n\n\nimport nltk\nimport math\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\nfrom konlpy.tag import Okt\n\n'''\n불러오는 파일 목록\n update_word_remake.csv : 업데이트 날짜, 업데이트 분류 키워드 항목\n update_review/update_review_YYYY년MM월DD일.csv : 업데이트 기간별 리뷰 데이터\n noun stopword.txt \n YYYY년MM월DD일_category.txt\n \n \n저장하는 파일 목록\n'''\n\n\n\nplt.rc('font', family='Malgun Gothic')\nplt.rcParams[\"figure.figsize\"] = (15,6)\n\n# 감정별 이미지\nimage_path = {'긍정':'positive.png','부정':'negative.jpg','복합':'middle.png'}\n\nfilename = 'update_word_remake.csv'\nforFrame = pd.read_csv(filename,index_col=0,usecols=['update_id','year','month','day','word'])\n\n# fd 전체 날짜\nforFrame['fd'] = forFrame['year'].apply(lambda x : str(x))\nforFrame['fd'] += forFrame['month'].apply(lambda x : '-'+str(x))\nforFrame['fd'] += forFrame['day'].apply(lambda x : '-'+str(x))\n\n\ndef analyData(forFrame):\n # 내가 사용할 형태소 분석기 불러오기\n okt = Okt()\n review_dict = {}\n\n\n # 불용어 처리\n Stopword = open('noun_stopword.txt','r',encoding='utf-8').read().split(',')\n # 각 업데이트별 데이터 분석\n for itrow in forFrame.iterrows():\n # 해당 업데이트에 관련된 리뷰데이터 파일 불러오기\n reviewframe = pd.read_csv(f'update_review/update_review_{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일.csv')\n\n # 리뷰프레임을 Label로 구별하여 review_dict 사전에 넣기\n # review_dict['긍정'] = reviewframe\n review_dict['긍정'] = reviewframe[reviewframe['LABEL'] =='긍정']\n review_dict['복합'] = reviewframe[reviewframe['LABEL'] == '복합']\n review_dict['부정'] = reviewframe[reviewframe['LABEL'] == '부정']\n # review_dict['긍정'] = pd.concat([review_dict['긍정'],review_dict['복합']])\n\n\n # 감정별 데이터 분석\n for label,f in review_dict.items(): # items(감정, 감정 프레임)\n print(label)\n\n frame_row = [] # 명 : 키워드 , ��: 키워드 , 동 : 키워드\n\n noun_tokens = set() # score에 사용할 명사 데이터\n adjec_s = set()\n verb_s = set()\n unk_s = set()\n freq_dict = {}\n all_sentense_list = []\n\n # 리뷰 개수 설정하기\n item_cnt = 30000\n text_id_list = list(f['ID'][:item_cnt])\n print(text_id_list)\n # 각 감정 프레임에서 리뷰 100개만 형태소 분석기 돌리기\n # 한 리뷰에 해당하는 단어들 집합 넣을 사전 : TDM 행렬을 만드는데 사용\n review_tokens_dict = dict()\n for textid,text in enumerate(f['CONTENTS'][:item_cnt]): # contents iter\n # 해당 리뷰의 토큰은 textid key로 value list에 다 담는다.\n review_tokens_dict[text_id_list[textid]] = list()\n\n # 리뷰 문자 토큰화\n sent_tokens = sent_tokenize(text)\n # print(sent_tokens)\n # print('sent_l',len(sent_l))\n\n\n # 각 문장 토큰 단어 토큰화하기\n for sent_token in sent_tokens:\n\n word_tokens = [] # 각 문자의 단어 토큰\n\n # okt 형태소 분석기\n pos = okt.pos(sent_token,norm=True)\n # print(pos)\n\n # 명사,동사,형용사 만 추출하기\n pos = [x for x in pos\n if x[0] not in('롤러','지금','다른') and\n x[1] in ['Noun','Verb','Adjective','Unknown'] and\n len(x[0])>1]\n print(pos)\n # word2vec에 학습시킬 데이터 (명사,형용사,동사)\n word_tokens.extend([ x[0] for x in pos ])\n\n # word2vct 학습후 가중치행렬롤 만든 키워드 ( 명사)\n noun_tokens.update([ x[0] for x in pos if x[1] in ['Noun'] ]) # 중복제거 있음\n\n review_tokens_dict[text_id_list[textid]].extend([ x[0] for x in pos if x[1] in ['Noun'] ]) # 중복제거 없음 : 빈도수 행렬\n\n # 학습데이터 집합에 넣기\n all_sentense_list.append(word_tokens)\n # end Okt 형태소 분석기\n \"\"\" \n end result :\n all_sentense_list\n review_tokens_dict\n noun_tokens\n \"\"\"\n # end sent_tokenizer 문장 분석기\n\n\n # 분류한 토큰으로 word2vec 학습 시키기\n model = word2vec.Word2Vec(all_sentense_list,\n size=100,\n window=3,\n iter=5,\n min_count=1,\n hs=1,\n sg=1,\n workers=6)\n\n # size 100 concat_data dimesion 2로 줄이기\n tsne = TSNE(n_components=2) # 2차원 설정\n\n # noun_vocab : 학습된 명사 모록\n noun_vocab = [ w for w in model.wv.vocab if w in noun_tokens and w not in Stopword]\n W_data = model.wv[noun_vocab]\n # tsne\n W_tsne = tsne.fit_transform(W_data)\n\n # 차원 축소한 데이터 dataframe으로 만들기\n tsneFrame = pd.DataFrame(W_tsne,index=noun_vocab,columns=['x','y'])\n tsneFrame.to_csv(f'uclid_data/tsneFrame_{label}.csv')\n # print(tsneFrame)\n\n ################################\n # plt.figure()\n # # tsne프레임으로 좌표 그리기\n #\n # # fig.set_size_inches(100, 80)\n # # ax = fig.subplots()\n #\n # plt.scatter(tsneFrame['x'], tsneFrame['y'])\n # plt.title(f'{label}의 명사 관계도')\n # for word, pos in tsneFrame.iterrows():\n # plt.annotate(word, pos, fontsize=5)\n # ### 좌표 그리고 그 표를 파일로 저장하기\n # plt.savefig(f'uclid_data/{label}_noun_scatter.png', dpi=600, bbox_inches='tight')\n ###############################################\n\n # 거리 행렬 구하기 : noun_vocab * noun_vocab\n # 거리 행렬 프레임 데이터\n data_n = len(noun_vocab)**2\n print('data_n','\\n',data_n)\n uclid_data_list = list()\n\n sum_distance = 0\n for e1,row in tsneFrame.iterrows():\n frame_row_dict = dict()\n\n mean = 0\n\n # 기준 단어 좌표\n e1x = row.x\n e1y = row.y\n\n # 상대 단어 좌표와의 유클리드 거리 계산\n for e2 in noun_vocab:\n e2x = tsneFrame.loc[e2].x\n e2y = tsneFrame.loc[e2].y\n # 유클리드 거리 계산\n distance = math.sqrt((e1x-e2x)**2+(e1y-e2y)**2)\n sum_distance += distance\n frame_row_dict[e2] = distance\n # end row distance calculation\n uclid_data_list.append(frame_row_dict)\n uclidFrame = pd.DataFrame(uclid_data_list,index=noun_vocab)\n uclidFrame.to_csv(f'uclid_data/uclidFrame_{label}.csv')\n\n mean_distance = sum_distance/data_n\n\n v = 0\n\n for idx,row in uclidFrame.iterrows():\n for distance in row:\n v += (distance - mean_distance)**2\n\n v = v/data_n\n\n # var = uclidFrame.var()\n # print('var','\\n',var)\n # 가중치 행렬 구하기 : exp(-(거리 제곱)/(2*분산))\n weight_data_list = list()\n for w in noun_vocab:\n one_row_dict = dict()\n #가중치 계산\n # v = var[w]\n for wid,dis in uclidFrame[w].items():\n weight = math.exp(-(dis**2)/(2*v))\n one_row_dict[wid] = weight\n weight_data_list.append(one_row_dict)\n\n weightFrame = pd.DataFrame(weight_data_list,index=noun_vocab)\n weightFrame.to_csv(f'uclid_data/weightFrame_{label}.csv')\n\n # 가중치 행렬에서 업데이트 관련 키워드만 추출(명사)\n # 실제 채점할 단어 목록 리스트\n # print(list(itrow[1]['word']))\n up_kwd_list = itrow[1]['word'].split(',')\n # up_kwd_list = ['태블릿', '토큰', '조이스틱', '기기',\\\n # '매칭', '찾기', '트로피', '오락실', '테이크다운', \\\n # '바이러스', '비비', '닌자', '큐피트', '에이전트', \\\n # '길거리', '코알라', '히로인', '보석', '핫존', '미스터', '로봇', '깡통', '가방']\n category_noun_list = [w for w in weightFrame.index if w in up_kwd_list]\n # print('category_noun_list',category_noun_list)\n categoryFrame = weightFrame.loc[category_noun_list,:]\n # print('categoryFrame',categoryFrame)\n\n\n\n # TDM 행렬 구하기 : 단어 : 빈도수\n TDM_data_list = []\n for textid,review_token in review_tokens_dict.items():\n one_row_dict = {x:0 for x in noun_vocab}\n for t in review_token:\n if t in noun_vocab:\n one_row_dict[t] = one_row_dict[t]+1\n TDM_data_list.append(one_row_dict)\n\n TdmFrame = pd.DataFrame(TDM_data_list,index = review_tokens_dict.keys()).T\n TdmFrame.to_csv(f'uclid_data/tdmFrame_{label}.csv')\n\n # print(TdmFrame)\n\n score_arr = np.dot(categoryFrame,TdmFrame)\n\n # print('score_arr',score_arr)\n scoreFrame = pd.DataFrame(score_arr,index=category_noun_list,columns=review_tokens_dict.keys())\n # print(scoreFrame)\n scoreFrame.to_csv(f'uclid_data/scoreFrame_{label}.csv')\n # print(scoreFrame)\n # 분류된 단어 리스트\n top_word_list = []\n for textid, row in scoreFrame.T.iterrows():\n sort_key = row.sort_values(ascending=False)[:1].index\n # print(sort_key)\n top_word_list.append(list(sort_key)[0])\n print(top_word_list)\n # 분류된 단어 목록 파일에 저장\n with open(f'top_word_list_{label}.txt','w',encoding='utf-8') as file:\n file.write(','.join(top_word_list))\n\n # 분류된 단어가 속한 카테고리로 카운트:\n # txt file format\n # category1:kwd1,kwd2,kwd3....\n # category2:kwd1,kwd2,kwd3....\n imsi_list = [x.strip() for x in open(f'reference_category/{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일_category.txt', 'r', encoding='utf-8').readlines()]\n # 카테고리별 단어 집합 사전\n\n Category_dict = {i.split(':')[0]: i.split(':')[1].split(',') for i in imsi_list}\n Category_detail = {i.split(':')[0]:dict() for i in imsi_list}\n # 카테고리 : []\n Category_frequence_dict = { x:0 for x in Category_dict }\n for word in top_word_list:\n for k,v_list in Category_dict.items():\n if word in v_list:\n Category_frequence_dict[k] += 1\n if word in Category_detail[k]:\n Category_detail[k][word] += 1\n else:\n Category_detail[k][word] = 1\n detail_val_list = []\n detail_index_list = []\n for k1,indict in Category_detail.items():\n for k2,val in indict.items():\n detail_val_list.append(val)\n detail_index_list.append(k2)\n\n detailFrame = pd.DataFrame(detail_val_list,index=detail_index_list)\n detailFrame.columns = ['개수']\n\n CategoryFrame = pd.DataFrame(Category_frequence_dict,index=[0])\n CategoryFrame = CategoryFrame.T\n CategoryFrame.columns = ['개수']\n CategoryFrame = CategoryFrame.loc[CategoryFrame['개수']>0]\n\n # 관심 카테고리 비중 그래프 그리기 ########################\n plt.figure()\n CategoryFrame['개수'].plot(kind='pie',autopct='%.2f%%')\n plt.title(f'{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일_업데이트_{label}_관심카테고리')\n\n category_file_name = f'pie_graph_category_{label}.png'\n plt.savefig(category_file_name, dpi=600, bbox_inches='tight')\n\n print(CategoryFrame.index)\n\n # 키워드별 빈도 막대 그래프 :\n plt.figure()\n barFrame = pd.DataFrame(top_word_list,columns=['kwd'])\n barGroup = barFrame.groupby(by='kwd')['kwd']\n bardata = barGroup.count()\n\n\n bardata.plot(kind='barh')\n plt.title(f'{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일_{label}_카테고리_세부내역')\n bar_file_name = f'bar_graph_category_{label}.png'\n plt.savefig(bar_file_name, dpi=600, bbox_inches='tight')\n # plt.show()\n\n fig, ax = plt.subplots()\n\n size = 0.3\n vals = np.array([[60., 32.], [37., 40.], [29., 10.]])\n\n cmap = plt.get_cmap(\"tab20c\")\n outer_colors = cmap(np.arange(3) * 4)\n inner_colors = cmap(np.array([1, 2, 5, 6, 9, 10]))\n\n ax.pie(CategoryFrame['개수'],labeldistance=1.1, labels=CategoryFrame.index,radius=1, colors=outer_colors,\n wedgeprops=dict(width=size, edgecolor='w'))\n\n ax.pie(detailFrame['개수'],labeldistance=0.7,labels=detailFrame.index, radius=1 - size, colors=inner_colors,\n wedgeprops=dict(width=size, edgecolor='w'))\n\n ax.set(aspect=\"equal\", title='Pie plot with `ax.pie`')\n plt.show()\n\n # top_Series = pd.Series(top_10_list)\n # result = top_Series.value_counts()\n # top_dict = dict()\n # for x in result.items():\n # top_dict[x[0]] = x[1]\n\n # 워드 클라우드에 사용할 top 10\n # top_10_list = []\n # for textid,row in scoreFrame.T.iterrows():\n # sort_key = row.sort_values(ascending=False)[:10].index\n # print(sort_key)\n # top_10_list.extend(sort_key)\n #\n # top_Series = pd.Series(top_10_list)\n # result = top_Series.value_counts()\n # top_dict = dict()\n # for x in result.items():\n # top_dict[x[0]] = x[1]\n\n ########## 워드 클라우드###########\n # plt.figure()\n # mask = np.array(Image.open(image_path[label]))\n #\n # image_color = ImageColorGenerator(mask)\n #\n # wc = WordCloud(font_path='malgun.ttf', max_words=100, mask=mask,\n # background_color='rgba(255,255,255,0)', mode='RGBA', random_state=43)\n # wc.generate_from_frequencies(top_dict)\n # newwc = wc.recolor(color_func=image_color)\n # plt.imshow(wc)\n # plt.title(f'update_{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일')\n # plt.axis('off')\n #\n # # 워드클라우드 이미지로 저장하기\n # wcimgfilename = f'keyword_wordcloud/wcd_{itrow[1][0]}년{itrow[1][1]}월{itrow[1][2]}일_{label}.png'\n # plt.savefig(wcimgfilename, dpi=600, bbox_inches='tight')\n # print(wcimgfilename + '파일이 저장되었습니다.')\n # plt.close()\n ##########\n # end 감정별 데이터 분석\n\n # 각 업데이트별 데이터 분석\n\n\n# 업데이트 하나만 가지고 테스트 해본다.\nprint(forFrame.iloc[[0],:])\nanalyData(forFrame.iloc[[1],:])","sub_path":"업데이트별 리뷰 가중치행렬 카테고리 분석/uclid_matrix_keyword.py","file_name":"uclid_matrix_keyword.py","file_ext":"py","file_size_in_byte":16498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"355386238","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule `chatette.parsing.parser_utils`\nContains utility functions that are specific to\nthe parsing of template files.\n\"\"\"\n\n\nimport re\nfrom enum import Enum\n\nfrom chatette import deprecations\nimport chatette.modifiers.representation as mods\n\nCOMMENT_SYM_DEPRECATED = ';'\nCOMMENT_MARKER = '//'\nESCAPE_SYM = '\\\\'\n\nALIAS_SYM = '~'\nSLOT_SYM = '@'\nINTENT_SYM = '%'\nUNIT_OPEN_SYM = '[' # This shouldn't be changed\nUNIT_CLOSE_SYM = ']' # id.\n\nANNOTATION_OPEN_SYM = '('\nANNOTATION_CLOSE_SYM = ')'\nANNOTATION_SEP = ','\nANNOTATION_ASSIGNMENT_SYM = ':'\nANNOTATION_IGNORED_SYM = \"'\"\n\nCHOICE_OPEN_SYM = r'{'\nCHOICE_CLOSE_SYM = r'}'\nCHOICE_SEP = '/' # TODO: deprecate and rather use '|'\n\nVARIATION_SYM = '#'\nRAND_GEN_SYM = '?' # This shouldn't be changed\nPERCENT_GEN_SYM = '/'\nCASE_GEN_SYM = '&'\nARG_SYM = '$' # This shouldn't be changed\n\nALT_SLOT_VALUE_NAME_SYM = '='\nALT_SLOT_VALUE_FIRST_SYM = '/'\n\nINCLUDE_FILE_SYM = '|'\n\n# TODO add special characters at the beginning of those to prevent people from\n# using them by chance\nRESERVED_VARIATION_NAMES = [\"all-variations-aggregation\", \"rules\",\n \"nb-gen-asked\", \"arg\"]\n\n\nPATTERN_COMMENT_DEPRECATED = re.compile(r\"(?= length:\n return None\n\n end_index = starting_index\n nb_closing_brackets_expected = 1\n while end_index < length and nb_closing_brackets_expected > 0:\n if tokens[end_index] == UNIT_OPEN_SYM:\n nb_closing_brackets_expected += 1\n elif tokens[end_index] == UNIT_CLOSE_SYM:\n nb_closing_brackets_expected -= 1\n end_index += 1\n end_index -= 1\n if end_index == starting_index:\n return None\n\n return tokens[starting_index:end_index]\n\ndef get_annotation_interior(tokens):\n \"\"\"\n Returns a list of tokens that represent the inside of the annotation\n that is present on this line.\n Returns `None` if there is no annotation in `tokens`.\n \"\"\"\n length = len(tokens)\n starting_index = 0\n while starting_index < length and tokens[starting_index] != ANNOTATION_OPEN_SYM:\n starting_index += 1\n starting_index += 1\n if starting_index >= length:\n return None\n\n end_index = starting_index\n nb_closing_brackets_expected = 1\n while end_index < length and nb_closing_brackets_expected > 0:\n if tokens[end_index] == ANNOTATION_OPEN_SYM:\n nb_closing_brackets_expected += 1\n elif tokens[end_index] == ANNOTATION_CLOSE_SYM:\n nb_closing_brackets_expected -= 1\n end_index += 1\n end_index -= 1\n if end_index == starting_index:\n return None\n\n return tokens[starting_index:end_index]\n\n\ndef check_declaration_validity(tokens_unit_inside):\n \"\"\"\n Check that the interior of a declaration is syntactically legal.\n Raises a `SyntaxError` if the declaration is invalid.\n The constraints checked are:\n - there is only one modifier of each type\n - there are no randgen or percentgen modifiers\n - `&` is at the beginning of the declaration (or nowhere)\n - there is a name after `#`\n - there is a value after `$`\n - there is a name either after `&` or at the beginning\n - the variation names are not reserved\n \"\"\"\n casegen_count = tokens_unit_inside.count(CASE_GEN_SYM)\n if casegen_count > 1:\n raise SyntaxError(\"There can be only one case generation modifier \"+\n \"in a unit declaration.\")\n if casegen_count == 1 and tokens_unit_inside.index(CASE_GEN_SYM) != 0:\n raise SyntaxError(\"Case generation modifiers have to be at the start \"+\n \"of a unit declaration.\")\n\n if casegen_count == 0 and is_special_sym(tokens_unit_inside[0]):\n raise SyntaxError(\"Unit declarations must be named.\")\n elif casegen_count == 1 and len(tokens_unit_inside) <= 1:\n raise SyntaxError(\"Unit declarations must be named.\")\n elif casegen_count == 1 and is_special_sym(tokens_unit_inside[1]):\n raise SyntaxError(\"Unit declarations must be named.\")\n\n variation_count = tokens_unit_inside.count(VARIATION_SYM)\n if variation_count > 1:\n raise SyntaxError(\"There can be only one variation modifier \"+\n \"in a unit declaration.\")\n if variation_count == 1:\n variation_name_index = tokens_unit_inside.index(VARIATION_SYM)+1\n if variation_name_index >= len(tokens_unit_inside) \\\n or is_special_sym(tokens_unit_inside[variation_name_index]):\n raise SyntaxError(\"Variations must be named.\")\n variation_name = tokens_unit_inside[variation_name_index]\n if variation_name in RESERVED_VARIATION_NAMES:\n raise SyntaxError(\"The following variation names are reserved: \"+\n str(RESERVED_VARIATION_NAMES)+\". Please don't \"+\n \"use them.\")\n\n argument_count = tokens_unit_inside.count(ARG_SYM)\n if argument_count > 1:\n raise SyntaxError(\"There can be only one argument modifier \"+\n \"per unit declaration.\")\n if argument_count == 1:\n argument_name_index = tokens_unit_inside.index(ARG_SYM)+1\n if argument_name_index >= len(tokens_unit_inside) \\\n or is_special_sym(tokens_unit_inside[argument_name_index]):\n raise SyntaxError(\"Arguments must be named.\")\n\n # TODO remove the following because you should allow ? and / in declarations?\n # or the tokenizer should not consider them special characters in this\n # case\n randgen_count = tokens_unit_inside.count(RAND_GEN_SYM)\n if randgen_count > 0:\n raise SyntaxError(\"Unit declarations cannot take a random generation \"+\n \"modifier.\")\n percentgen_count = tokens_unit_inside.count(PERCENT_GEN_SYM)\n if percentgen_count > 0:\n raise SyntaxError(\"Unit declarations cannot take a percentage for \"+\n \"the random generation modifier.\")\n\n\ndef check_reference_validity(tokens_unit_inside):\n \"\"\"\n Check that the interior of a reference is syntactically legal.\n Raises a `SyntaxError` if the reference is invalid.\n The constraints checked are:\n - there is only one modifier of each type\n - `/` is not there unless `?` is there\n - there is a number between 0 and 100 if `/` is present\n - `&` is at the beginning of the declaration (or nowhere)\n - there is a name after `#`\n - there is a name either after `&` or at the beginning\n \"\"\"\n casegen_count = tokens_unit_inside.count(CASE_GEN_SYM)\n if casegen_count > 1:\n raise SyntaxError(\"There can be only one case generation modifier \"+\n \"in a unit reference.\")\n if casegen_count == 1 and tokens_unit_inside.index(CASE_GEN_SYM) != 0:\n raise SyntaxError(\"Case generation modifiers have to be at the start \"+\n \"of a unit reference.\")\n\n if casegen_count == 0 and is_special_sym(tokens_unit_inside[0]):\n raise SyntaxError(\"Unit references must be named.\")\n elif casegen_count == 1 and len(tokens_unit_inside) <= 1:\n raise SyntaxError(\"Unit references must be named.\")\n elif casegen_count == 1 and is_special_sym(tokens_unit_inside[1]):\n raise SyntaxError(\"Unit references must be named.\")\n\n variation_count = tokens_unit_inside.count(VARIATION_SYM)\n if variation_count > 1:\n raise SyntaxError(\"There can be only one variation modifier \"+\n \"in a unit reference.\")\n if variation_count == 1:\n variation_name_index = tokens_unit_inside.index(VARIATION_SYM)+1\n if variation_name_index >= len(tokens_unit_inside) \\\n or is_special_sym(tokens_unit_inside[variation_name_index]):\n raise SyntaxError(\"Variations must be named.\")\n variation_name = tokens_unit_inside[variation_name_index]\n if variation_name in RESERVED_VARIATION_NAMES:\n raise SyntaxError(\"The following variation names are reserved: \"+\n str(RESERVED_VARIATION_NAMES)+\". Please don't \"+\n \"use them.\")\n\n argument_count = tokens_unit_inside.count(ARG_SYM)\n if argument_count > 1:\n raise SyntaxError(\"There can be only one argument modifier \"+\n \"per unit reference.\")\n # if argument_count == 1:\n # argument_name_index = tokens_unit_inside.index(ARG_SYM)+1\n # if argument_name_index >= len(tokens_unit_inside) \\\n # or is_special_sym(tokens_unit_inside[argument_name_index]):\n # raise SyntaxError(\"Arguments must be named.\")\n\n randgen_count = tokens_unit_inside.count(RAND_GEN_SYM)\n if randgen_count > 1:\n raise SyntaxError(\"There can be only one random generation modifier \"+\n \"per unit reference.\")\n percentgen_count = tokens_unit_inside.count(PERCENT_GEN_SYM)\n if percentgen_count > 1:\n raise SyntaxError(\"There can be only one percentage for generation \"+\n \"modifier per unit reference.\")\n if percentgen_count == 1 and randgen_count == 0:\n raise SyntaxError(\"There cannot be a percentage for generation \"+\n \"modifier if there is no random generation modifier \"+\n \"(did you mean to escape '\"+PERCENT_GEN_SYM+\"'?)\")\n if percentgen_count == 1:\n index_randgen = tokens_unit_inside.index(RAND_GEN_SYM)\n index_percentgen = tokens_unit_inside.index(PERCENT_GEN_SYM)\n if index_randgen > index_percentgen:\n raise SyntaxError(\"A percentage for generation modifier must \"+\n \"always be right after the random generation \"+\n \"modifier.\")\n if index_percentgen == len(tokens_unit_inside)-1:\n raise SyntaxError(\"No percentage found after the special symbol \"+\n \"for percentage modifier.\")\n try:\n percentgen = int(tokens_unit_inside[index_percentgen+1])\n except ValueError:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"an integer.\")\n if percentgen < 0 or percentgen > 100:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"between 0 and 100.\")\n\ndef check_choice_validity(tokens_choice_inside):\n \"\"\"\n Check that the interior of a choice is syntactically legal.\n Deals with word groups as well.\n Raises a `SyntaxError` if the choice is invalid.\n As any sub-rules can be inside choices, we cannot check anything except\n that the last tokens is not a separator (or the two-to-last one if the\n last one is a random generation modifier).\n \"\"\"\n # TODO: deprecate `/` as choice separators AND percentgen\n # percentgen_count = tokens_choice_inside.count(PERCENT_GEN_SYM)\n # if percentgen_count > 0:\n # raise SyntaxError(\"Choices cannot take a percentage for generation \"+\n # \"modifier.\")\n if len(tokens_choice_inside) > 0:\n if tokens_choice_inside[-1] == CHOICE_SEP:\n raise SyntaxError(\"Choice cannot end with a choice separator. \" +\n \"Did you forget to escape the last character?\")\n if ( len(tokens_choice_inside) > 1\n and tokens_choice_inside[-1] == RAND_GEN_SYM\n and tokens_choice_inside[-2] == CHOICE_SEP):\n raise SyntaxError(\"Choice ends with an empty choice item. \" +\n \"Did you forget to escape the choice separator?\")\n\n\ndef check_word_group_validity(tokens_word_group_inside):\n \"\"\"\n Check that the interior of a choice is syntactically legal.\n Deals with word groups as well.\n Raises a `SyntaxError` if the choice is invalid.\n The constraints checked are:\n - there is only one modifier of each type\n - `/` and `#` are not there\n - `&` is at the beginning of the declaration (or nowhere)\n - choices are separated by '/' (not checked as there can be 0 or 1 choice)\n \"\"\"\n casegen_count = tokens_word_group_inside.count(CASE_GEN_SYM)\n if casegen_count > 1:\n raise SyntaxError(\"There can be only one case generation modifier \"+\n \"in a word group.\")\n if casegen_count == 1 and tokens_word_group_inside.index(CASE_GEN_SYM) != 0:\n raise SyntaxError(\"Case generation modifiers have to be at the start \"+\n \"of a word group.\")\n\n variation_count = tokens_word_group_inside.count(VARIATION_SYM)\n if variation_count > 0:\n raise SyntaxError(\"Word groups cannot take variation modifiers.\")\n\n argument_count = tokens_word_group_inside.count(ARG_SYM)\n if argument_count > 0:\n raise SyntaxError(\"Word groups cannot take arguments.\")\n\n randgen_count = tokens_word_group_inside.count(RAND_GEN_SYM)\n if randgen_count > 1:\n raise SyntaxError(\"There can be only one random generation modifier \"+\n \"per word group.\")\n percentgen_count = tokens_word_group_inside.count(PERCENT_GEN_SYM)\n if percentgen_count > 1:\n raise SyntaxError(\"There can be only one percentage for generation \"+\n \"modifier per word group.\")\n if percentgen_count == 1 and randgen_count == 0:\n raise SyntaxError(\"There cannot be a percentage for generation \"+\n \"modifier if there is no random generation modifier \"+\n \"(did you mean to escape '\"+PERCENT_GEN_SYM+\"'?)\")\n if percentgen_count == 1:\n index_randgen = tokens_word_group_inside.index(RAND_GEN_SYM)\n index_percentgen = tokens_word_group_inside.index(PERCENT_GEN_SYM)\n if index_randgen > index_percentgen:\n raise SyntaxError(\"A percentage for generation modifier must \"+\n \"always be right after the random generation \"+\n \"modifier.\")\n if index_percentgen == len(tokens_word_group_inside)-1:\n raise SyntaxError(\"No percentage found after the special symbol \"+\n \"for percentage modifier.\")\n try:\n percentgen = int(tokens_word_group_inside[index_percentgen+1])\n except ValueError:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"an integer.\")\n if percentgen < 0 or percentgen > 100:\n raise SyntaxError(\"Percentage for generation modifiers need to be \"+\n \"between 0 and 100.\")\n\n\ndef find_name(tokens_inside_unit):\n \"\"\"\n Finds the name of the unit from the tokens that represent the interior of\n a unit declaration or reference (inside the brackets (excluded)).\n @pre: there is no syntax error in this part.\n \"\"\"\n start_index = 0\n if tokens_inside_unit[0] == CASE_GEN_SYM:\n start_index = 1\n name = \"\"\n while ( start_index < len(tokens_inside_unit)\n and not is_special_sym(tokens_inside_unit[start_index])):\n name += tokens_inside_unit[start_index]\n start_index += 1\n return remove_escapement(name)\n\ndef find_words(tokens_inside_word_group):\n \"\"\"\n Finds the words in the tokens that represent the interior of a word group.\n Returns the list of those words in sequence.\n @pre: there is no syntax error in this part.\n \"\"\"\n words = []\n for token in tokens_inside_word_group:\n if token == CASE_GEN_SYM:\n continue\n if token in (RAND_GEN_SYM, VARIATION_SYM, ARG_SYM):\n return words\n words.append(token)\n return words\n\n\ndef find_modifiers_decl(tokens_inside_decl):\n \"\"\"\n Finds and create a representation of the modifiers from a list of tokens\n representing the inside of a unit declaration. Returns the representation.\n If the percentage of generation was present but couldn't be\n @pre: there is no syntax error in this part (except possibly for\n percentage of generation).\n \"\"\"\n modifiers = mods.UnitDeclarationModifiersRepr()\n\n i = 0\n if tokens_inside_decl[0] == CASE_GEN_SYM:\n modifiers.casegen = True\n i += 1\n\n expecting_variation = False\n expecting_argument = False\n while i < len(tokens_inside_decl):\n if tokens_inside_decl[i] == VARIATION_SYM:\n modifiers.variation_name = \"\"\n expecting_variation = True\n expecting_argument = False\n elif tokens_inside_decl[i] == ARG_SYM:\n modifiers.argument_name = \"\"\n expecting_variation = False\n expecting_argument = True\n elif expecting_variation:\n modifiers.variation_name += tokens_inside_decl[i]\n elif expecting_argument:\n modifiers.argument_name += tokens_inside_decl[i]\n i += 1\n\n modifiers.variation_name = remove_escapement(modifiers.variation_name)\n modifiers.argument_name = remove_escapement(modifiers.argument_name)\n\n return modifiers\n\ndef find_modifiers_reference(tokens_inside_reference):\n \"\"\"\n Finds and create a representation of the modifiers from a list of tokens\n representing the inside of a reference. Returns the representation.\n @pre: there is no syntax error in this part.\n \"\"\"\n modifiers = mods.ReferenceModifiersRepr()\n\n i = 0\n if tokens_inside_reference[0] == CASE_GEN_SYM:\n modifiers.casegen = True\n i += 1\n\n expecting_randgen_name = False\n expecting_percentgen = False\n expecting_variation = False\n expecting_argument = False\n while i < len(tokens_inside_reference):\n if tokens_inside_reference[i] == RAND_GEN_SYM:\n modifiers.randgen_name = \"\"\n expecting_randgen_name = True\n expecting_percentgen = False\n expecting_variation = False\n expecting_argument = False\n elif tokens_inside_reference[i] == PERCENT_GEN_SYM:\n expecting_randgen_name = False\n expecting_percentgen = True\n expecting_variation = False\n expecting_argument = False\n elif tokens_inside_reference[i] == VARIATION_SYM:\n modifiers.variation_name = \"\"\n expecting_randgen_name = False\n expecting_percentgen = False\n expecting_variation = True\n expecting_argument = False\n elif tokens_inside_reference[i] == ARG_SYM:\n modifiers.argument_value = \"\"\n expecting_randgen_name = False\n expecting_percentgen = False\n expecting_variation = False\n expecting_argument = True\n elif expecting_randgen_name:\n modifiers.randgen_name += tokens_inside_reference[i]\n elif expecting_percentgen:\n modifiers.percentage_randgen = int(tokens_inside_reference[i])\n expecting_percentgen = False\n elif expecting_variation:\n modifiers.variation_name += tokens_inside_reference[i]\n elif expecting_argument:\n modifiers.argument_value += tokens_inside_reference[i]\n i += 1\n\n modifiers.randgen_name = remove_escapement(modifiers.randgen_name)\n modifiers.variation_name = remove_escapement(modifiers.variation_name)\n modifiers.argument_value = remove_escapement(modifiers.argument_value)\n\n return modifiers\n\ndef find_modifiers_word_group(tokens_inside_word_group):\n \"\"\"\n Finds and create a representation of the modifiers from a list of tokens\n representing the inside of a word group. Returns the representation.\n @pre: there is no syntax error in this part.\n \"\"\"\n modifiers = mods.WordGroupModifiersRepr()\n\n i = 0\n if tokens_inside_word_group[0] == CASE_GEN_SYM:\n modifiers.casegen = True\n i += 1\n\n expecting_randgen_name = False\n expecting_percentgen = False\n while i < len(tokens_inside_word_group):\n if tokens_inside_word_group[i] == RAND_GEN_SYM:\n modifiers.randgen_name = \"\"\n expecting_randgen_name = True\n expecting_percentgen = False\n elif tokens_inside_word_group[i] == PERCENT_GEN_SYM:\n expecting_percentgen = True\n expecting_randgen_name = False\n elif expecting_randgen_name:\n modifiers.randgen_name += tokens_inside_word_group[i]\n elif expecting_percentgen:\n modifiers.percentage_randgen = int(tokens_inside_word_group[i])\n expecting_percentgen = False\n i += 1\n\n modifiers.randgen_name = remove_escapement(modifiers.randgen_name)\n\n return modifiers\n\ndef find_modifiers_choice(tokens_inside_choice):\n \"\"\"\n Finds and create a representation of the modifiers from a list of tokens\n representing the inside of a choice. Returns the representation.\n @pre: there is no syntax error in this part.\n \"\"\"\n modifiers = mods.ChoiceModifiersRepr()\n\n if tokens_inside_choice[0] == CASE_GEN_SYM:\n modifiers.casegen = True\n if tokens_inside_choice[-1] == RAND_GEN_SYM:\n modifiers.randgen = True\n\n return modifiers\n\n\ndef find_nb_examples_asked(annotation_interior):\n \"\"\"\n Returns the training and testing number of examples asked for an intent\n declaration as a tuple. Returns `None` if the numbers given are not numbers.\n @pre: there is no syntax error in the annotation.\n \"\"\"\n if len(annotation_interior) == 0:\n return None\n nb_train = None\n nb_test = None\n\n if len(annotation_interior) == 1:\n nb_train = annotation_interior[0]\n else:\n expecting_train = False\n expecting_test = False\n for token in annotation_interior:\n if ( token not in (ANNOTATION_ASSIGNMENT_SYM, ANNOTATION_SEP)\n and not token.isspace()):\n if PATTERN_NB_TRAIN_EX_KEY.match(token):\n expecting_train = True\n elif PATTERN_NB_TEST_EX_KEY.match(token):\n expecting_test = True\n elif expecting_train:\n nb_train = token\n expecting_train = False\n elif expecting_test:\n nb_test = token\n expecting_test = False\n\n if nb_train is None and nb_test is None:\n return None\n\n if nb_train is not None:\n nb_train = nb_train.replace(ANNOTATION_IGNORED_SYM, \"\")\n if nb_test is not None:\n nb_test = nb_test.replace(ANNOTATION_IGNORED_SYM, \"\")\n\n try:\n nb_train = int(nb_train)\n if nb_test is None:\n nb_test = 0\n else:\n nb_test = int(nb_test)\n except ValueError:\n return None\n return (nb_train, nb_test)\n\n\ndef find_alt_slot_and_index(slot_rule_tokens):\n \"\"\"\n Returns the index of the equal sign and the alt slot value as a 2-tuple,\n from the tokens representing a slot rule. Returns `None` if no alt slot\n value was found.\n @pre: there is no syntax error in this part.\n \"\"\"\n try:\n index = slot_rule_tokens.index(ALT_SLOT_VALUE_NAME_SYM)\n except ValueError:\n return None\n if index+1 < len(slot_rule_tokens):\n i = index+1\n alt_slot_val = slot_rule_tokens[i]\n if alt_slot_val == ' ':\n alt_slot_val = \"\"\n i += 1\n while i < len(slot_rule_tokens):\n alt_slot_val += slot_rule_tokens[i]\n i += 1\n return (index, remove_escapement(alt_slot_val))\n return None\n\n\ndef next_choice_tokens(choice_interior_tokens):\n \"\"\"\n Yields the next choice as a list of tokens in `choice_interior_tokens`.\n @pre: there is no syntax error in this part.\n \"\"\"\n current_choice = []\n for (i, token) in enumerate(choice_interior_tokens):\n if token == CASE_GEN_SYM:\n continue\n elif token == RAND_GEN_SYM:\n if i == len(choice_interior_tokens)-1: # Random generation symbol\n # NOTE: this should be changed if named randgen or percentgen\n # is supported in the future.\n break\n else: # Not a random generation symbol\n current_choice.append(token)\n elif token == CHOICE_SEP:\n yield current_choice\n current_choice = []\n else:\n current_choice.append(token)\n yield current_choice\n\n\n\ndef next_sub_rule_tokens(tokens):\n \"\"\"\n Yields the next sub-rule from a rule\n represented as tokens (i.e. a list of str).\n @pre: `tokens` represents a valid rule.\n \"\"\"\n current_sub_rule = []\n stop_with_char = None\n reading_sub_rule = False\n for token in tokens:\n if reading_sub_rule:\n if token == stop_with_char:\n current_sub_rule.append(token)\n yield current_sub_rule\n current_sub_rule = []\n stop_with_char = None\n reading_sub_rule = False\n else:\n current_sub_rule.append(token)\n else: # Looking for the start of a sub-rule\n if is_start_unit_sym(token): # Unit reference starting point\n current_sub_rule.append(token)\n reading_sub_rule = True\n stop_with_char = UNIT_CLOSE_SYM\n elif token == UNIT_OPEN_SYM: # Word group starting point\n current_sub_rule.append(token)\n reading_sub_rule = True\n stop_with_char = UNIT_CLOSE_SYM\n elif token == CHOICE_OPEN_SYM: # Word group starting point\n current_sub_rule.append(token)\n reading_sub_rule = True\n stop_with_char = CHOICE_CLOSE_SYM\n else: # Word\n yield [token]\n\n\ndef is_sub_rule_word(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens` represents a word.\n @pre: considers `sub_rule_tokens` is never a single space.\n \"\"\"\n return len(sub_rule_tokens) == 1\ndef is_sub_rule_word_group(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents a word group.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == UNIT_OPEN_SYM\ndef is_sub_rule_choice(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents a choice.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == CHOICE_OPEN_SYM\ndef is_sub_rule_alias_ref(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents an alias reference.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == ALIAS_SYM\ndef is_sub_rule_slot_ref(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents a slot reference.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == SLOT_SYM\ndef is_sub_rule_intent_ref(sub_rule_tokens):\n \"\"\"\n Returns `True` if the list of str `sub_rule_tokens`\n represents an intent reference.\n @pre: considers `sub_rule_tokens` to be a valid sub-rule.\n \"\"\"\n return sub_rule_tokens[0] == INTENT_SYM\n","sub_path":"chatette/parsing/parser_utils.py","file_name":"parser_utils.py","file_ext":"py","file_size_in_byte":36916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"646559958","text":"from ConnectDataBase import ConnectDataBase\n\n#class for insert elements in data base\nclass HandlerQuery:\n\n\t#method for insert, delete or update element in table\n\tdef insertToTable(self, query, ConnectionDB):\n\t\tConnectionDB.cursor.execute(query)\n\t\tConnectionDB.Conex.commit()\n\t\t#self.ConnectionDB.cursor.close()\n\n\t#method for generate basic query to data base\n\tdef queryBasicDataBase(self, query, ConnectionDB):\n\t\tConnectionDB.cursor.execute(query)\n\n\t\tcollection_id = []\n\t\tfor element in ConnectionDB.cursor:\n\n\t\t\tcollection_id.append(element)\n\n\t\treturn collection_id\n","sub_path":"view/view/admin/pythonScripts/CrudDataBase.py","file_name":"CrudDataBase.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"620487960","text":"import sys\r\nfrom args import Args, print_exit\r\nfrom fsm import FSM\r\nfrom minimalize import Minimize\r\n\r\n# ----------------------------------< --analyze-string >---------------------------------------#\r\ndef analyze_string(string, automata):\r\n for char in string:\r\n if char not in automata[\"alphabet\"]:\r\n print_exit('0', 1)\r\n\r\n state = automata[\"start\"]\r\n char_cnt = 0\r\n str_len = len(string)\r\n\r\n for char in string:\r\n found = False\r\n \r\n for rule in automata[\"rules\"]:\r\n if rule.c == char and rule.s1 == state:\r\n state = rule.s2\r\n char_cnt += 1\r\n found = True\r\n break;\r\n \r\n if found is False:\r\n return '0'\r\n\r\n if rule.s2 not in automata[\"finals\"]:\r\n return '0'\r\n else:\r\n return '1'\r\n\r\n# -------------------------------------< FINAL OUTPUT >-----------------------------------------#\r\ndef print_FSM(fsm):\r\n str = \"(\\n{\"\r\n\r\n # all states\r\n for state in fsm[\"states\"]:\r\n str += state+', '\r\n str = str[0:-2]+'},\\n{'\r\n\r\n # alphabet\r\n for char in fsm[\"alphabet\"]:\r\n str += '\\''+char+'\\', '\r\n str = str[0:-2]+'},\\n{\\n'\r\n\r\n # rules\r\n for rule in fsm[\"rules\"]:\r\n str += rule+',\\n'\r\n\r\n # start state\r\n str = str[0:-2] + '\\n},\\n' + fsm[\"start\"] + ',\\n{'\r\n\r\n # final states\r\n for state in fsm[\"finals\"]:\r\n str += state+', '\r\n str = str[0:-2]+'}\\n)\\n'\r\n\r\n return str\r\n\r\n# ----------------------------------------------------------------------------------------------#\r\n#Parse arguments\r\nargs = Args()\r\n#Check arguments\r\nargs.check_args()\r\n\r\n#Print help if exists switch\r\nif args.argv.help is True:\r\n if len(sys.argv) != 2:\r\n print_exit(\"Wrong arguments1.\", 1)\r\n print(args.print_help())\r\n sys.exit(0)\r\n\r\n#Use stdin if --input switch does not exist\r\nif args.argv.input is None:\r\n inputFile = sys.stdin\r\nelse:\r\n try:\r\n inputFile = open(args.argv.input, mode=\"r\", newline=\"\", encoding=\"utf-8\")\r\n except IOError:\r\n print_exit(\"Opening input file failed.\", 2)\r\n\r\n#Use stdout if --output switch does not exist\r\nif args.argv.output is None:\r\n outputFile = sys.stdout\r\nelse:\r\n try:\r\n outputFile = open(args.argv.output, mode=\"w\", newline=\"\", encoding=\"utf-8\")\r\n except IOError:\r\n print_exit(\"Opening output file failed.\", 3)\r\n\r\n#Let the scanner do the work and get us all the tokens.\r\nWSFA = FSM(inputFile.read(), args)\r\n\r\n# final output based on command line options\r\nif args.argv.analyze is not None:\r\n ret = analyze_string(args.argv.analyze, WSFA.automata)\r\n outputFile.write(ret)\r\n\r\nelif args.argv.f is True:\r\n outputFile.write(WSFA.nonFinState)\r\n\r\nelif args.argv.m is True:\r\n min = Minimize(WSFA)\r\n outputFile.write(print_FSM( min.automata ))\r\n\r\nelse:\r\n WSFA.create_sorted_automata()\r\n outputFile.write(print_FSM( WSFA.automata ))\r\n\r\ninputFile.close()\r\noutputFile.close()\r\n\r\nexit(0)\r\n","sub_path":"minimize_automata/mka.py","file_name":"mka.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"535608855","text":"\n#coding:utf-8\nimport time\nfrom scrapy.http import Request\nfrom scrapy.spiders import Spider\nfrom urlparse import urljoin\nfrom ..DaiLi import DailiItem\nfrom ..DaiLi import DailiItem1\nfrom urlparse import urljoin\n\nclass Daili(Spider):\n\tname = \"daili\"\n\tallowd_domains = \".proxy360.cn\"\n\tstart_urls = [\"http://www.proxy360.cn/Proxy\"]\n\n\tdef parse(self, response):\n\t\t# items = []\n\t\tip = response.xpath('''//div[@class=\"proxylistitem\"]/div[1]/span[1]/text()''').extract()\n\t\tport = response.xpath('''//div[@class=\"proxylistitem\"]/div[1]/span[2]/text()''').extract()\n\t\tzone = response.xpath('''//div[@class=\"proxylistitem\"]/div[1]/span[4]/text()''').extract()\n\t\ttime = response.xpath('''//div[@class=\"proxylistitem\"]/div[1]/span[last()]/text()''').extract()\n\t\tfor i in zip(ip, port, zone, time):\n\t\t\t# print i[0], i[1], i[2], i[3]\n\t\t\titem = DailiItem()\n\t\t\titem[\"ip\"] = ''.join(i[0]).strip()\n\t\t\titem['port'] = ''.join(i[1]).strip()\n\t\t\titem['zone'] = ''.join(i[2]).strip()\n\t\t\titem['time'] = ''.join(i[3]).strip()\n\t\t\t# items.append(item)\n\t\t\tyield item\n\nclass Daili1(Spider):\n\n\tname = \"daili1\"\n\tallowd_domains = \"youdaili.net\"\n\tstart_urls = [\"http://www.youdaili.net/Daili/http/\"]\n\n\tdef parse(self, response):\n\t\turls = response.url\n\t\tmeta = {\n\t\t\"urls\":urls,\n\t\t}\n\t\tlink = response.xpath('''//ul[@class=\"newslist_line\"]/li/a/@href''').extract()\n\t\tfor lin in link:\n\t\t\t# print lin,\"SSSSSSSSSSSSSSSSSSSSSSSsss\"\n\t\t\tyield Request(lin, callback=self.inof, meta=meta)\n\t\tnext_link = response.xpath('''//ul[@class=\"pages_ulstyle\"]/li[last()-2]/a/@href''').extract()\n\t\tif next_link:\n\t\t\t# print next_link, \"dddddddddddddddddddd\"\n\t\t\tyield Request(urljoin(urls, ''.join(next_link)), callback=self.parse)\n\n\tdef inof(self, response):\n\t\t#urls = response.meta[\"urls\"]\n\t\turls = \"http://www.youdaili.net/Daili/http/\"\n\t\tip = response.xpath('''//div[@class=\"cont_font\"]//p//text()[position()>0 and position()<151]''').extract()\n\t\taddtime = ''.join(response.xpath('''//div[@class=\"cont_time\"]/text()[1]''').re(u'''发布时间:(\\S+)'''))\n\t\titem = DailiItem1()\n\t\tfor i in ip:\n\t\t\titem[\"ip\"] = ''.join(i).split(\":\")[0].strip()\n\t\t\titem['port'] = ''.join(i).split(\":\")[1].split(\"@\")[0].strip()\n\t\t\titem['zone'] = ''.join(i).split(\"@\")[1].strip()\n\t\t\titem['spidertime'] = time.strftime(\"%Y-%m-%d\", time.localtime())\n\t\t\titem['addtime'] = addtime\n\t\t\tyield item \n\t\tlink = response.xpath('''//ul[@class=\"pagelist\"]/li[last()]/a/@href''').extract()\n\t\tif link:\n\t\t\tyield Request(urljoin(urls, ''.join(link)),callback=self.inof)\n","sub_path":"daili/spiders/daili.py","file_name":"daili.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"329338360","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 25 15:01:46 2018\r\n\r\n@author: 47532\r\n\"\"\"\r\n\r\nimport re\r\nimport regex\r\nimport tensorflow as tf\r\nimport pandas as pd\r\nimport numpy as np\r\nimport copy\r\nfrom tensorflow.contrib import rnn\r\nfrom tqdm import tqdm\r\n\r\ndef get_data():\r\n with open('comment1', encoding='utf8', mode='r') as rfile:\r\n words = []\r\n sentences = []\r\n def repl(m):\r\n inner_word = list(m.group(0))\r\n return \" \" + ''.join(inner_word) + \" \"\r\n for line in rfile:\r\n line = line.lower()\r\n line = re.sub(r'<.*>', ' ', line)\r\n line = re.sub('[\\s+\\.\\!\\?\\,\\/_,$%^*(+\\\"\\:\\-\\@\\#\\&)]+', \" \", line)\r\n sentence = regex.sub(r'\\p{So}\\p{Sk}*', repl, line)\r\n word = sentence.split() \r\n if len(word) > 1:\r\n if \"'\" in word:\r\n word.remove(\"'\")\r\n else: \r\n word = word\r\n words.extend(word)\r\n sentences.append(word) \r\n else:\r\n continue \r\n words_sort = pd.DataFrame(words)[0].value_counts() \r\n #words_sort = words_sort[words_sort>1] \r\n word_bank = list(words_sort.index)\r\n #word_bank = list(set(words))\r\n word2id = {} # word => id 的映射\r\n for i in range(len(word_bank)):\r\n word2id[word_bank[i]] = i+1 \r\n word2id['EOS'] = len(word_bank)+1 # Word2id中增加‘EOS'\r\n inputs = [] \r\n for sent in sentences: # 输入是多个句子,这里每个循环处理一个句子\r\n input_sent = []\r\n for i in range(sent.__len__()): # 处理单个句子中的每个单词\r\n input_id = word2id.get(sent[i])\r\n if not input_id: # 如果单词不在词典中,则跳过\r\n continue\r\n input_sent.append(input_id)\r\n input_sent.append(len(word_bank)+1) # 每个句子末尾添加'EOS'\r\n if len(input_sent) > 21:\r\n input_sent = [input_sent[i:i+20] for i in range(0,len(input_sent),20)]\r\n inputs.extend(input_sent)\r\n else:\r\n inputs.append(input_sent)\r\n #pad = np.mean([len(x) for x in inputs])\r\n pad = len(max(inputs, key=len))\r\n inputs = [i + [0]*(pad-len(i)) for i in inputs]\r\n return word_bank, pad, inputs, len(word2id)+1, word2id\r\nword_bank, sen_length, inputs, vocab_size, word2id = get_data()\r\n\r\nclass TrainData:\r\n def __init__(self, inputs, batch_size, sen_length):\r\n self.inputs = inputs\r\n self.batch_size = batch_size\r\n self.sen_length = sen_length\r\n self.n = len(inputs) \r\n def get_batch_data(self, batch):\r\n global batch_size\r\n start_pos = batch * self.batch_size\r\n end_pos = min((batch + 1) * self.batch_size, self.n)\r\n xdata = self.inputs[start_pos:end_pos]\r\n # target data 左移一位\r\n ydata = copy.deepcopy(self.inputs[start_pos:end_pos])\r\n for row in ydata:\r\n b = row.pop(0)\r\n row.append(b) \r\n x_batch = np.array(xdata, dtype=np.int32)\r\n y_batch = np.array(ydata, dtype=np.int32)\r\n return x_batch, y_batch\r\n def get_num_batches(self):\r\n return max(self.n - 1, 0) // self.batch_size \r\n \r\nbatch_size = 128\r\ntrain_data = TrainData(inputs, batch_size, sen_length) #inputs, batch_size, sen_length\r\nprint('Train size: %s' % (train_data.get_num_batches()*batch_size))\r\nprint('Vocab_size: %s' % vocab_size) \r\n\r\ndef build_inputs(num_steps):\r\n # num_seqs: 每个batch中的序列个数\r\n # num_steps: 每个序列包含的字符数\r\n inputs = tf.placeholder(tf.int32, shape=[None, num_steps], name='inputs')\r\n targets = tf.placeholder(tf.int32, shape=[None, num_steps], name='targets')\r\n return inputs, targets\r\n\r\ndef build_lstm(hidden_dim, num_layers, batch_size, dropout_rate,sampling):\r\n # hidden_dim: lstm隐层中结点数目\r\n # num_layers: lstm的隐层数目\r\n # lstm cell\r\n lstm_cell_fw = rnn.BasicLSTMCell(hidden_dim, forget_bias=1.0, state_is_tuple=True)\r\n lstm_cell_bw = rnn.BasicLSTMCell(hidden_dim, forget_bias=1.0, state_is_tuple=True) \r\n # 添加dropout\r\n if sampling == False:\r\n lstm_cell_fw = rnn.DropoutWrapper(lstm_cell_fw, output_keep_prob=(1 - dropout_rate))\r\n lstm_cell_bw = rnn.DropoutWrapper(lstm_cell_bw, output_keep_prob=(1 - dropout_rate))\r\n # 堆叠\r\n lstm_cell_fw = rnn.MultiRNNCell([lstm_cell_fw] * num_layers, state_is_tuple=True)\r\n lstm_cell_bw = rnn.MultiRNNCell([lstm_cell_bw] * num_layers, state_is_tuple=True)\r\n initial_state_fw = lstm_cell_fw.zero_state(batch_size, tf.float32)\r\n initial_state_bw = lstm_cell_bw.zero_state(batch_size, tf.float32) \r\n return lstm_cell_fw, lstm_cell_bw, initial_state_fw, initial_state_bw\r\n\r\ndef build_output(lstm_output, hidden_dim, vocab_size, lambd, num_batches):\r\n outputs = tf.reshape(tf.concat(lstm_output,1), [-1, hidden_dim * 2])\r\n # 将lstm层与softmax层全连接\r\n with tf.variable_scope('softmax'):\r\n softmax_w = tf.get_variable(\"softmax_w\", shape = [hidden_dim * 2, vocab_size], \r\n regularizer=tf.contrib.layers.l2_regularizer(scale=lambd / num_batches), \r\n initializer = tf.random_uniform_initializer(-1,1,seed=1))\r\n softmax_b = tf.get_variable(\"softmax_b\", initializer = tf.zeros([vocab_size])) \r\n # 计算logits\r\n logits = tf.matmul(outputs, softmax_w) + softmax_b \r\n # softmax层返回概率分布\r\n preds = tf.nn.softmax(logits, name='predictions') \r\n return preds, logits\r\n\r\ndef build_loss(logits, targets): \r\n # Softmax cross entropy loss \r\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tf.reshape(targets, [-1]), logits = logits)) \r\n return loss\r\n\r\ndef build_optimizer(loss, learning_rate):\r\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss) \r\n return optimizer\r\n\r\nclass Bi_LSTM: \r\n def __init__(self, vocab_size, batch_size, \r\n num_batches, num_steps=sen_length, embedding_size=32, hidden_dim=30, \r\n num_layers=1, learning_rate=0.01, lambd=0.01, dropout_rate=0.5, sampling=True):\r\n self.lr = learning_rate\r\n self.lambd = lambd\r\n self.dropout_rate = dropout_rate \r\n self.num_layers = num_layers \r\n self.num_batches = num_batches\r\n self.embedding_size = embedding_size\r\n self.hidden_dim = hidden_dim \r\n self.vocab_size = vocab_size\r\n if sampling == True:\r\n self.batch_size, self.num_steps = 1, 1\r\n else:\r\n self.batch_size, self.num_steps = batch_size, num_steps\r\n \r\n tf.reset_default_graph() \r\n # 输入层\r\n self.inputs = tf.placeholder(tf.int32, shape=[None, self.num_steps], name='inputs')\r\n self.targets = tf.placeholder(tf.int32, shape=[None, self.num_steps], name='targets')\r\n # LSTM层\r\n lstm_cell_fw, lstm_cell_bw, self.initial_state_fw, self.initial_state_bw = build_lstm(self.hidden_dim, self.num_layers, self.batch_size, self.dropout_rate, sampling)\r\n # 对输入进行one-hot编码\r\n self.embedding = tf.get_variable(\"embedding\", shape=[self.vocab_size, self.embedding_size],\r\n regularizer=tf.contrib.layers.l2_regularizer(scale=self.lambd / self.num_batches),\r\n initializer=tf.random_uniform_initializer(-1,1,seed=1))\r\n self.inputs_emb = tf.nn.embedding_lookup(self.embedding, self.inputs)\r\n self.inputs_emb = tf.unstack(self.inputs_emb, self.num_steps, 1)\r\n # 运行RNN\r\n outputs, self.final_state_fw, self.final_state_bw = rnn.static_bidirectional_rnn(lstm_cell_fw, lstm_cell_bw, self.inputs_emb, \r\n initial_state_fw = self.initial_state_fw, initial_state_bw = self.initial_state_bw, dtype=tf.float32)\r\n # softmax prediction probability\r\n self.prediction, self.logits = build_output(outputs, self.hidden_dim, self.vocab_size, self.lambd, self.num_batches)\r\n # Loss 和 optimizer (with gradient clipping)\r\n self.loss = build_loss(self.logits, self.targets)\r\n self.optimizer = build_optimizer(self.loss, self.lr)\r\n \r\n#%% ===================================训练数据 ============================= \r\nnum_batches = train_data.get_num_batches()\r\nmodel = Bi_LSTM(vocab_size, batch_size, num_batches, sampling=False)\r\n\r\nepochs = 20\r\nwith tf.Session() as sess:\r\n sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))\r\n writer = tf.summary.FileWriter('./comments_model/bi_LSTM', sess.graph) # self.global_step.eval(session=sess)\r\n step = 0\r\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)\r\n new_state_fw, new_state_bw = sess.run([model.initial_state_fw, model.initial_state_bw])\r\n for index in range(epochs): \r\n total_loss = 0.0\r\n for batch in tqdm(range(num_batches)):\r\n batch_inputs,batch_targets = train_data.get_batch_data(batch)\r\n # 生成供tensorflow训练用的数据\r\n feed = {model.inputs: batch_inputs, model.targets: batch_targets, model.initial_state_fw: new_state_fw, model.initial_state_bw: new_state_bw}\r\n batch_loss, new_state_fw, new_state_bw, _ = sess.run([model.loss, model.final_state_fw, model.final_state_bw, model.optimizer], feed_dict=feed)\r\n total_loss += batch_loss\r\n step += 1\r\n if step % 100 == 0:\r\n saver.save(sess, './comments_model/bi_LSTM/', global_step=step)\r\n if step % 10000 == 0:\r\n print(step)\r\n print('Train Loss at step {}: {:5.6f}'.format(index+1, total_loss / num_batches))\r\n\r\n#%% ===================================生成句子 ============================= \r\ndef pick_top_n(preds, vocab_size, top_n=10):\r\n # 从预测结果中选取前top_n个最可能的字符\r\n p = np.squeeze(preds)\r\n #p1 = list(p)\r\n #c = p1.index(max(p1))\r\n p = p[1:]\r\n # 将除了top_n个预测值的位置都置为0\r\n p[np.argsort(p)[:-top_n]] = 0\r\n # 归一化概率\r\n p = p / np.sum(p)\r\n # 随机选取一个字符\r\n c = np.random.choice(vocab_size-1, 1, p=p)[0]+1\r\n return c\r\n \r\ndef sample(n_words, vocab_size, batch_size, num_batches, prime):\r\n #prime: 起始文本 \r\n samples=[prime]\r\n # sampling=True意味着batch的size=1 x 1\r\n model = Bi_LSTM(vocab_size, batch_size, num_batches, sampling=True)\r\n saver = tf.train.Saver()\r\n with tf.Session() as sess:\r\n # 加载模型参数,恢复训练\r\n checkpoint_file = tf.train.latest_checkpoint('./comments_model/bi_LSTM')\r\n saver.restore(sess, checkpoint_file)\r\n new_state_fw, new_state_bw = sess.run([model.initial_state_fw, model.initial_state_bw]) \r\n # 不断生成字符,直到达到指定数目\r\n c = word2id.get(prime)\r\n for i in range(n_words):\r\n test_word_id = c\r\n if test_word_id == word2id.get('EOS'):\r\n break\r\n else:\r\n feed = {model.inputs: [[test_word_id]],\r\n model.initial_state_fw: new_state_fw, model.initial_state_bw: new_state_bw}\r\n #preds= sess.run([model.prediction], feed_dict=feed)\r\n preds, new_state_fw, new_state_bw = sess.run([model.prediction, model.final_state_fw, model.final_state_bw], feed_dict=feed)\r\n c = pick_top_n(preds, vocab_size)\r\n samples.extend(x for x,v in word2id.items() if v==c) \r\n print(' '.join(samples))\r\n \r\nfor i in range(5): \r\n #sample(10, vocab_size, batch_size, num_batches, prime = \"you\")\r\n for j in [\"thank\", \"beautiful\", \"very\", \"bro\", \"this\",\"you\" ]:\r\n sample(20, vocab_size, batch_size, num_batches, prime = j)\r\n\r\n","sub_path":"language models/Bi-LSTM.py","file_name":"Bi-LSTM.py","file_ext":"py","file_size_in_byte":12012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"235838956","text":"import socket\nimport sys\nimport util\n\nfrom sympy.crypto.crypto import rsa_private_key, rsa_public_key\n\n\n\ndef start():\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Bind the socket to the port\n server_address = (util.TTP_address, util.TTP_listen_port)\n print('starting up on {} port {}'.format(*server_address))\n sock.bind(server_address)\n\n # Listen for incoming connections\n while True:\n sock.listen(1)\n while True:\n # Wait for a connection\n print('TTP: waiting for a connection')\n connection, client_address = sock.accept()\n client_msg = bytearray()\n\n print('TTP: connection from', client_address)\n\n # Receive the data in small chunks and retransmit it\n while True:\n data = connection.recv(1024)\n print('TTP: received {!r}'.format(data))\n client_msg = \"\".join(util.bytesToStringArr(data))\n if(client_msg==util.signRequest):\n connection.sendall(util.numbersToByteArr([\"OK\"]))\n data = connection.recv(1024) # get len(name)(4 byte)|name|PK(128 byte)\n ln = int.from_bytes(data[:4], byteorder='big')\n name = int.from_bytes(data[4:4+ln], byteorder='big')\n pk = int.from_bytes(data[4+ln:4+ln+128], byteorder='big')\n sig = util.RSA_decrypt(util.H512([name,pk])% util.rsa_N, util.rsa_prk)\n signedCert = util.rsa_N.to_bytes(128,byteorder='big')+sig.to_bytes(128,byteorder='big')\n connection.sendall(signedCert)\n elif(client_msg==util.keyRequest):\n response = util.rsa_N.to_bytes(128,byteorder='big')+util.rsa_e.to_bytes(128,byteorder='big')\n connection.sendall(response)\n connection.close()\n break\n\n\ndef main():\n util.RSA_keyGeneration(17)\n start()\n\nmain()\n","sub_path":"TTP.py","file_name":"TTP.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"177953754","text":"\n\"\"\"Script for creating a secret key\n\nAttributes\n----------\nCHARS : str\n Valid characters for a django secret key: a-z, 0-9, !@#$%^&*(-_=+)\n (No capitals)\n\"\"\"\n\nimport secrets\nimport json\n\n\nCHARS = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n\n\ndef new_key(target=\"key.json\"):\n \"\"\"Generate a new key file, in the form of a JSON with one entry (``key``).\n\n Parameters\n ----------\n target : str\n Name of the file to generate; defaults to key.json\n \"\"\"\n\n key = \"\".join([secrets.choice(CHARS) for i in range(50)])\n print(\"Created new secret key: \" + key)\n\n with open(target, \"w+\") as keyfile:\n keyfile.write(json.dumps({\"key\": key}))\n\n\nif __name__ == '__main__':\n new_key()\n","sub_path":"checkin/new_key.py","file_name":"new_key.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"534350102","text":"from django.conf import settings\nfrom django.conf.urls import *\nfrom django.urls import path\nfrom django.contrib import admin\nfrom django.views.generic.base import TemplateView\nfrom djoser.views import UserViewSet, TokenCreateView\nfrom rest_framework import routers\n\nimport manabi.views\nfrom manabi.apps.flashcards.api_views import (\n DeckViewSet,\n SynchronizedDeckViewSet,\n SharedDeckViewSet,\n SuggestedSharedDecksViewSet,\n ManabiReaderFactViewSet,\n FactViewSet,\n CardViewSet,\n)\nfrom manabi.apps.manabi_auth.api_views import (\n AppleLoginView,\n exchange_token,\n)\nfrom manabi.apps.review_results.api_views import ReviewResultsView\n\n\napi_router = routers.DefaultRouter()\napi_router.register(r'flashcards/decks',\n DeckViewSet,\n basename='deck')\napi_router.register(r'flashcards/synchronized_decks',\n SynchronizedDeckViewSet,\n basename='synchronized-deck')\napi_router.register(r'flashcards/suggested_shared_decks',\n SuggestedSharedDecksViewSet,\n basename='suggested-shared-deck')\napi_router.register(r'flashcards/shared_decks',\n SharedDeckViewSet,\n basename='shared-deck')\napi_router.register(r'flashcards/facts',\n FactViewSet,\n basename='fact')\napi_router.register(r'flashcards/manabi_reader_facts',\n ManabiReaderFactViewSet,\n basename='fact')\napi_router.register(r'flashcards/cards',\n CardViewSet,\n basename='card')\n\nurlpatterns = [\n url(r'^apple-app-site-association$', TemplateView.as_view(\n template_name='apple_app_site_association.json',\n content_type='application/json',\n )),\n\n url(r'^ios-required/', TemplateView.as_view(\n template_name='ios_required.html'), name='ios-required'),\n\n url(r'^accounts/', include('allauth.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'impersonate/', include('impersonate.urls')),\n url(r'^rq/', include('django_rq.urls')),\n\n url(r'^$', manabi.views.homepage, name='homepage'),\n url(r'^flashcards/', include('manabi.apps.flashcards.urls')),\n url(r'^reader_feeds/', include('manabi.apps.reader_feeds.urls')),\n url(r'^users/', include('manabi.apps.profiles.urls')),\n\n url(r'^terms-of-service/$', TemplateView.as_view(\n template_name='tos.html'), name='terms_of_service'),\n url(r'^privacy-policy/$', TemplateView.as_view(\n template_name='privacy.html'), name='privacy_policy'),\n url(r'^credits/$', TemplateView.as_view(\n template_name='credits.html'), name='credits'),\n\n # API URLs.\n url(r'^api/', include((api_router.urls, 'api'))),\n\n path('api/dj-rest-auth/', include('dj_rest_auth.urls')),\n # path('api/dj-rest-auth/registration/', include('dj_rest_auth.registration.urls'))\n path('api/dj-rest-auth/apple/', AppleLoginView.as_view()),\n\n url(r'^api/auth/social_login/(?P\\S+)/$', exchange_token),\n url(r'^api/auth/users/create/', UserViewSet.as_view({'post': 'create'})),\n url(r'^api/auth/token/create/', TokenCreateView.as_view()),\n url(r'^api/auth/', include('djoser.urls')),\n url(r'^api/auth/', include('djoser.urls.authtoken')),\n\n url(r'^api/flashcards/', include('manabi.apps.flashcards.api_urls')),\n url(r'^api/flashcards/review_results/',\n include('manabi.apps.review_results.api_urls')),\n url(r'^api/subscriptions/', include('manabi.apps.subscriptions.api_urls')),\n url(r'^api/furigana/', include('manabi.apps.furigana.urls')),\n url(r'^api/twitter_usages/', include('manabi.apps.twitter_usages.urls')),\n url(r'^api/word_tracking/', include('manabi.apps.word_tracking.api_urls')),\n]\n\n# if not settings.LIVE_HOST:\n# urlpatterns += [url(r'^silk/', include('silk.urls', namespace='silk'))]\nif 'silk' in settings.INSTALLED_APPS:\n urlpatterns += [url(r'^silk/', include('silk.urls', namespace='silk'))]\n","sub_path":"manabi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"579346961","text":"from signal import *\nimport atexit\nimport helpers\nimport resources\nimport sys\nimport temperature_sensor\nimport time\n\nimport part1\nimport part2\nimport part3\nimport part4\n\n# Type in your Raspberry Pi's name, description, and location here\nname = \"Example Raspberry Pi Name (dont use me!)\"\ndescription = \"This is my raspberry pi!\"\nposition_x = -1\nposition_y = -1\n\n# (Set up some helpers to clean up when we finish running):\npermit_cleanup = True\natexit.register(lambda: helpers.perform_part5_cleanup(name, permit_cleanup))\nfor sig in (SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGHUP):\n signal(sig, lambda x, y: helpers.perform_part5_cleanup(name, permit_cleanup))\n\nif __name__ == \"__main__\":\n # First, we need to create a PI Point:\n pipoint_response = part1.create_pipoint(\n name,\n resources.base_url,\n resources.dataserver_web_id)\n\n # (We check that the PI Point was successfully created before continuing):\n if pipoint_response.text != \"\":\n permit_cleanup = False\n print(pipoint_response.text)\n\n # (We need the PI Point WebId later, so we get it now):\n pipoint_web_id = helpers.get_web_id(pipoint_response)\n\n # Second, we need to create an AF Element:\n element_response = part2.create_af_element(\n name,\n description,\n resources.base_url,\n resources.parent_af_element_web_id)\n \n # (Here, we retrieve the WebIds for the X and Y coordinate attributes):\n coordinate_web_ids = helpers.get_coordinate_locations(element_response)\n\n # Third, we update the X and Y coordinates to match our location:\n part3.update_af_attribute(\n position_x,\n resources.base_url,\n coordinate_web_ids.x_web_id)\n part3.update_af_attribute(\n position_y,\n resources.base_url,\n coordinate_web_ids.y_web_id)\n\n # (Here, we retrieve the WebId for the Temperature attribute):\n value_web_id = helpers.get_attribute_web_id_by_name(\n \"Temperature\",\n resources.base_url,\n helpers.get_web_id(element_response))\n\n # Finally, we read the temperature from the temperature sensor, and POST\n # the value to the PI Point:\n sensor = temperature_sensor.TemperatureSensor()\n while True:\n current_temperature = sensor.read_temp()\n print(\"Sending: \" + str(current_temperature))\n part4.post_pi_value(\n current_temperature,\n pipoint_web_id,\n resources.base_url)\n print(\"Received: \" + str(helpers.get_attribute_field(\n value_web_id,\n lambda x: x[\"Value\"])))\n time.sleep(5)\n\n \n \n","sub_path":"Instructor Copies/Exercise1/part5.py","file_name":"part5.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"416449580","text":"import pandas as pd\nimport scipy.stats as st\nimport numpy as np\n\ndef ta_chip(high, low, close, volume, window):\n price_dist = (close-(high+low)/2)**2\n new_vol = price_dist * volume\n chip_avg = pd.Series(np.nan, index=close.index)\n chip_score = pd.Series(np.nan, index=close.index)\n #\n for i in range(window-1, close.shape[0]):\n newvol_col = new_vol.iloc[i-window+1:i+1]\n weight = newvol_col / newvol_col.sum()\n price_weight = close.iloc[i-window+1:i+1] * weight\n chip_avg.iloc[i] = price_weight.sum()\n z_score = (close.iloc[i] - chip_avg.iloc[i]) / close.iloc[i-window+1:i+1].std()\n p_values = st.norm.cdf(z_score)\n chip_score.iloc[i] = p_values\n #\n return chip_avg, chip_score","sub_path":"dev/yfissue_1/lambda_new_talib.py","file_name":"lambda_new_talib.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"405427457","text":"import marshmallow\nfrom marshmallow import INCLUDE, fields\nfrom marshmallow_enum import EnumField\nfrom sqlalchemy import func\n\nfrom crc import db, ma\nfrom crc.api.common import ApiErrorSchema\nfrom crc.models.file import FileModel, SimpleFileSchema, FileSchema\nfrom crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy\nfrom crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, WorkflowStatus, WorkflowSpecModel, \\\n WorkflowModel\n\n\nclass StudyModel(db.Model):\n __tablename__ = 'study'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String)\n last_updated = db.Column(db.DateTime(timezone=True), default=func.now())\n protocol_builder_status = db.Column(db.Enum(ProtocolBuilderStatus))\n primary_investigator_id = db.Column(db.String, nullable=True)\n sponsor = db.Column(db.String, nullable=True)\n hsr_number = db.Column(db.String, nullable=True)\n ind_number = db.Column(db.String, nullable=True)\n user_uid = db.Column(db.String, db.ForeignKey('user.uid'), nullable=False)\n investigator_uids = db.Column(db.ARRAY(db.String), nullable=True)\n requirements = db.Column(db.ARRAY(db.Integer), nullable=True)\n on_hold = db.Column(db.Boolean, default=False)\n\n def update_from_protocol_builder(self, pbs: ProtocolBuilderStudy):\n self.hsr_number = pbs.HSRNUMBER\n self.title = pbs.TITLE\n self.user_uid = pbs.NETBADGEID\n self.last_updated = pbs.DATE_MODIFIED\n\n self.protocol_builder_status = ProtocolBuilderStatus.ACTIVE\n if pbs.HSRNUMBER:\n self.protocol_builder_status = ProtocolBuilderStatus.OPEN\n if self.on_hold:\n self.protocol_builder_status = ProtocolBuilderStatus.HOLD\n\n\nclass WorkflowMetadata(object):\n def __init__(self, id, name, display_name, description, spec_version, category_id, state: WorkflowState, status: WorkflowStatus,\n total_tasks, completed_tasks, display_order):\n self.id = id\n self.name = name\n self.display_name = display_name\n self.description = description\n self.spec_version = spec_version\n self.category_id = category_id\n self.state = state\n self.status = status\n self.total_tasks = total_tasks\n self.completed_tasks = completed_tasks\n self.display_order = display_order\n\n\n @classmethod\n def from_workflow(cls, workflow: WorkflowModel):\n instance = cls(\n id=workflow.id,\n name=workflow.workflow_spec.name,\n display_name=workflow.workflow_spec.display_name,\n description=workflow.workflow_spec.description,\n spec_version=workflow.spec_version(),\n category_id=workflow.workflow_spec.category_id,\n state=WorkflowState.optional,\n status=workflow.status,\n total_tasks=workflow.total_tasks,\n completed_tasks=workflow.completed_tasks,\n display_order=workflow.workflow_spec.display_order\n )\n return instance\n\n\nclass WorkflowMetadataSchema(ma.Schema):\n state = EnumField(WorkflowState)\n status = EnumField(WorkflowStatus)\n class Meta:\n model = WorkflowMetadata\n additional = [\"id\", \"name\", \"display_name\", \"description\",\n \"total_tasks\", \"completed_tasks\", \"display_order\"]\n unknown = INCLUDE\n\n\nclass Category(object):\n def __init__(self, model: WorkflowSpecCategoryModel):\n self.id = model.id\n self.name = model.name\n self.display_name = model.display_name\n self.display_order = model.display_order\n\n\nclass CategorySchema(ma.Schema):\n workflows = fields.List(fields.Nested(WorkflowMetadataSchema), dump_only=True)\n class Meta:\n model = Category\n additional = [\"id\", \"name\", \"display_name\", \"display_order\"]\n unknown = INCLUDE\n\n\nclass Study(object):\n\n def __init__(self, title, last_updated, primary_investigator_id, user_uid,\n id=None,\n protocol_builder_status=None,\n sponsor=\"\", hsr_number=\"\", ind_number=\"\", categories=[],\n files=[], approvals=[], **argsv):\n self.id = id\n self.user_uid = user_uid\n self.title = title\n self.last_updated = last_updated\n self.protocol_builder_status = protocol_builder_status\n self.primary_investigator_id = primary_investigator_id\n self.sponsor = sponsor\n self.hsr_number = hsr_number\n self.ind_number = ind_number\n self.categories = categories\n self.approvals = approvals\n self.warnings = []\n self.files = files\n\n @classmethod\n def from_model(cls, study_model: StudyModel):\n id = study_model.id # Just read some value, in case the dict expired, otherwise dict may be empty.\n args = dict((k, v) for k, v in study_model.__dict__.items() if not k.startswith('_'))\n instance = cls(**args)\n return instance\n\n def update_model(self, study_model: StudyModel):\n for k,v in self.__dict__.items():\n if not k.startswith('_'):\n study_model.__dict__[k] = v\n\n def model_args(self):\n \"\"\"Arguments that can be passed into the Study Model to update it.\"\"\"\n self_dict = self.__dict__.copy()\n del self_dict[\"categories\"]\n del self_dict[\"warnings\"]\n return self_dict\n\n\nclass StudySchema(ma.Schema):\n\n id = fields.Integer(required=False, allow_none=True)\n categories = fields.List(fields.Nested(CategorySchema), dump_only=True)\n warnings = fields.List(fields.Nested(ApiErrorSchema), dump_only=True)\n protocol_builder_status = EnumField(ProtocolBuilderStatus)\n hsr_number = fields.String(allow_none=True)\n sponsor = fields.String(allow_none=True)\n ind_number = fields.String(allow_none=True)\n files = fields.List(fields.Nested(FileSchema), dump_only=True)\n approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True)\n\n class Meta:\n model = Study\n additional = [\"id\", \"title\", \"last_updated\", \"primary_investigator_id\", \"user_uid\",\n \"sponsor\", \"ind_number\", \"approvals\", \"files\"]\n unknown = INCLUDE\n\n @marshmallow.post_load\n def make_study(self, data, **kwargs):\n \"\"\"Can load the basic study data for updates to the database, but categories are write only\"\"\"\n return Study(**data)\n\n","sub_path":"crc/models/study.py","file_name":"study.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"492111725","text":"# -*- coding: utf-8 -*-\n\"\"\"\n__title__ = '04 多进程并发_服务端_scoket.py'\n__author__ = 'yangyang'\n__mtime__ = '2018.03.08'\n\"\"\"\n\nimport socket\nfrom multiprocessing import Process\n\ndef talk(conn):\n\twhile True:\n\t\ttry:\n\t\t\tres = conn.recv(1024)\n\t\t\tif not res:continue\n\t\t\tprint(\"recv:\",res)\n\t\t\tconn.send(res)\n\t\texcept ConnectionResetError:\n\t\t\tbreak\n\tconn.close()\n\ndef server(ip_port):\n\tserver = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\tserver.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n\tserver.bind(ip_port)\n\tserver.listen(5)\n\n\twhile True:\n\t\tconn,client_addr = server.accept()\n\t\tp = Process(target=talk,args=(conn,))\n\t\tp.start()\n\tserver.close()\n\n\nif __name__ == '__main__':\n\tip_port = ('127.0.0.1',8090)\n\tserver(ip_port)\n\n\n","sub_path":"fourth_module/复习/04 多进程并发_服务端_scoket.py","file_name":"04 多进程并发_服务端_scoket.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"186306603","text":"import torchsparse\nimport torchsparse.nn as spnn\nimport torchsparse.nn.functional as spf\nfrom torchsparse.sparse_tensor import SparseTensor\nfrom torchsparse.point_tensor import PointTensor\nfrom torchsparse.utils.kernel_region import *\nfrom torchsparse.utils.helpers import *\n\n\n__all__ = ['initial_voxelize', 'point_to_voxel', 'voxel_to_point']\n\n\n# z: PointTensor\n# return: SparseTensor\ndef initial_voxelize(z, init_res, after_res):\n new_float_coord = torch.cat(\n [(z.C[:, :3] * init_res) / after_res, z.C[:, -1].view(-1, 1)], 1)\n\n pc_hash = spf.sphash(torch.floor(new_float_coord).int())\n sparse_hash = torch.unique(pc_hash)\n idx_query = spf.sphashquery(pc_hash, sparse_hash)\n counts = spf.spcount(idx_query.int(), len(sparse_hash))\n\n inserted_coords = spf.spvoxelize(torch.floor(new_float_coord), idx_query,\n counts)\n inserted_coords = torch.round(inserted_coords).int()\n inserted_feat = spf.spvoxelize(z.F, idx_query, counts)\n\n new_tensor = SparseTensor(inserted_feat, inserted_coords, 1)\n new_tensor.check()\n z.additional_features['idx_query'][1] = idx_query\n z.additional_features['counts'][1] = counts\n z.C = new_float_coord\n\n return new_tensor\n\n\n# x: SparseTensor, z: PointTensor\n# return: SparseTensor\ndef point_to_voxel(x, z):\n if z.additional_features is None or z.additional_features.get('idx_query') is None\\\n or z.additional_features['idx_query'].get(x.s) is None:\n #pc_hash = hash_gpu(torch.floor(z.C).int())\n pc_hash = spf.sphash(\n torch.cat([\n torch.floor(z.C[:, :3] / x.s).int() * x.s,\n z.C[:, -1].int().view(-1, 1)\n ], 1))\n sparse_hash = spf.sphash(x.C)\n idx_query = spf.sphashquery(pc_hash, sparse_hash)\n counts = spf.spcount(idx_query.int(), x.C.shape[0])\n z.additional_features['idx_query'][x.s] = idx_query\n z.additional_features['counts'][x.s] = counts\n else:\n idx_query = z.additional_features['idx_query'][x.s]\n counts = z.additional_features['counts'][x.s]\n\n inserted_feat = spf.spvoxelize(z.F, idx_query, counts)\n new_tensor = SparseTensor(inserted_feat, x.C, x.s)\n new_tensor.coord_maps = x.coord_maps\n new_tensor.kernel_maps = x.kernel_maps\n\n return new_tensor\n\n\n# # x: SparseTensor, z: PointTensor\n# # return: PointTensor\n# def voxel_to_point(x, z, nearest=False):\n# if z.idx_query is None or z.weights is None or z.idx_query.get(\n# x.s) is None or z.weights.get(x.s) is None:\n# kr = KernelRegion(2, x.s, 1)\n# off = kr.get_kernel_offset().to(z.F.device)\n# #old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off)\n# old_hash = spf.sphash(\n# torch.cat([\n# torch.floor(z.C[:, :3] / x.s).int() * x.s,\n# z.C[:, -1].int().view(-1, 1)\n# ], 1), off)\n# pc_hash = spf.sphash(x.C.to(z.F.device))\n# idx_query = spf.sphashquery(old_hash, pc_hash)\n# weights = spf.calc_ti_weights(z.C, idx_query,\n# scale=x.s).transpose(0, 1).contiguous().float()\n# idx_query = idx_query.transpose(0, 1).contiguous()\n# if nearest:\n# weights[:, 1:] = 0.\n# idx_query[:, 1:] = -1\n# new_feat = spf.spdevoxelize(x.F, idx_query, weights)\n# new_tensor = PointTensor(new_feat,\n# z.C,\n# idx_query=z.idx_query,\n# weights=z.weights)\n# new_tensor.additional_features = z.additional_features\n# new_tensor.idx_query[x.s] = idx_query\n# new_tensor.weights[x.s] = weights\n# z.idx_query[x.s] = idx_query\n# z.weights[x.s] = weights\n\n# else:\n# new_feat = spf.spdevoxelize(x.F, z.idx_query.get(x.s), z.weights.get(x.s))\n# new_tensor = PointTensor(new_feat,\n# z.C,\n# idx_query=z.idx_query,\n# weights=z.weights)\n# new_tensor.additional_features = z.additional_features\n\n# return new_tensor\n\n\ndef calc_ti_weights(coords, idx_query):\n mask = torch.cuda.FloatTensor(\n [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],\n [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])\n frac = coords - torch.floor(coords)\n frac = frac[:, 0:3]\n frac = torch.cuda.FloatTensor([1, 1, 1]) - mask - torch.unsqueeze(frac, dim=1)\n weights = torch.abs(torch.prod(frac, dim=2)).t()\n weights[idx_query == -1] = 0\n weights /= weights.sum(0) + 1e-8\n return weights\n\n# x: SparseTensor, z: PointTensor\n# return: PointTensor\ndef voxel_to_point(x, z, nearest=False):\n #print(x.s)\n #print(z.C.shape)\n h = x.C.shape[0]\n npt = z.C.shape[0] \n if z.idx_query is None or z.weights is None or z.idx_query.get(\n x.s) is None or z.weights.get(x.s) is None:\n kr = KernelRegion(2, x.s, 1)\n off = kr.get_kernel_offset().to(z.F.device)\n #old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off)\n old_hash = spf.sphash(\n torch.cat([\n torch.floor(z.C[:, :3] / x.s).int() * x.s,\n z.C[:, -1].int().view(-1, 1)\n ], 1), off)\n pc_hash = spf.sphash(x.C.to(z.F.device))\n idx_query = spf.sphashquery(old_hash, pc_hash)\n weights = calc_ti_weights(z.C, idx_query).transpose(0, 1).contiguous()\n\n idx_query = idx_query.transpose(0, 1).contiguous()\n #print(idx_query[idx_query==-1])\n\n ids = torch.arange(npt).view(npt, 1).cuda()\n ids = ids.repeat(1, 8).view(-1)\n idx = idx_query.view(-1)\n flgs = idx > -1\n ids = ids[flgs]\n idx = idx[flgs]\n weights = weights.view(-1)[flgs].float()\n \n indices = torch.cat([torch.unsqueeze(ids, dim=1), torch.unsqueeze(idx, dim=1)], dim=1).long()\n\n mat = torch.sparse.FloatTensor(indices.t(), weights, torch.Size([npt, h])).cuda()\n\n new_feat = torch.sparse.mm(mat, x.F)\n\n new_tensor = PointTensor(new_feat,\n z.C,\n idx_query=z.idx_query,\n weights=z.weights)\n new_tensor.additional_features = z.additional_features\n new_tensor.idx_query[x.s] = idx_query\n new_tensor.weights[x.s] = weights\n z.idx_query[x.s] = idx_query\n z.weights[x.s] = weights\n\n else:\n weights = z.weights.get(x.s)\n idx_query = z.idx_query.get(x.s)\n \n ids = torch.arange(npt).view(npt, 1).cuda()\n ids = ids.repeat(1, 8).view(-1)\n idx = idx_query.view(-1)\n flgs = idx > -1\n ids = ids[flgs]\n idx = idx[flgs]\n weights = weights.view(-1)[flgs]\n indices = torch.cat([torch.unsqueeze(ids, dim=1), torch.unsqueeze(idx, dim=1)], dim=1).long()\n\n mat = torch.sparse.FloatTensor(indices.t(), weights, torch.Size([npt, h])).cuda()\n new_feat = torch.sparse.mm(mat, x.F)\n \n new_tensor = PointTensor(new_feat,\n z.C,\n idx_query=z.idx_query,\n weights=z.weights)\n new_tensor.additional_features = z.additional_features\n\n return new_tensor\n\n\ndef nearest_voxel(x, z):\n #print(x.s)\n #print(z.C.shape)\n #old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off)\n old_hash = spf.sphash(\n torch.cat([\n torch.floor(torch.round(z.C[:, :3]) / x.s).int() * x.s,\n z.C[:, -1].int().view(-1, 1)\n ], 1))\n pc_hash = spf.sphash(x.C.to(z.F.device))\n idx_query = spf.sphashquery(old_hash, pc_hash)\n assert((idx_query!=-1).all())\n new_feat = x.F[idx_query, :]\n new_tensor = PointTensor(new_feat,\n z.C)\n\n return new_tensor","sub_path":"spvnas_patch/spvnas/core/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"34207991","text":"##################################################\n## {Description}: Rename and resize the dataset by \n## numbering format e.g.: 000001.png\n##################################################\n## Author: Khairul Izwan Bin Kamsani\n## Version: {1}.{0}.{0}\n## Email: {wansnap@gmail.com}\n##################################################\n\n# import the necessary packages\nfrom imutils import paths\nimport argparse\nimport imutils\nimport cv2\nimport os\n\nfrom pyimagesearch.preprocessing.aspectawarepreprocessor import AspectAwarePreprocessor\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", required=True, \n\thelp=\"path to input directory of images\")\nap.add_argument(\"-a\", \"--annot\", required=True, \n\thelp=\"path to output directory of annotations\")\nap.add_argument(\"-f\", \"--folder\", required=True, help=\"folder name\")\nap.add_argument(\"-ws\", \"--width\", required=False, default=200, \n\thelp=\"width size\")\nap.add_argument(\"-hs\", \"--height\", required=False, default=200, \n\thelp=\"height size\")\nargs = vars(ap.parse_args())\n\n# grab the image paths then initialize the dictionary of character\n# counts\nimagePaths = list(paths.list_images(args[\"input\"]))\ncount = 0\n\n# initiate aspectawarepreprocessor\naap = AspectAwarePreprocessor(args[\"width\"], args[\"height\"])\n\n# loop over the image paths\nfor (i, imagePath) in enumerate(imagePaths):\n\t# display an update to the user\n\tprint(\"[INFO] processing image {}/{}\".format(i + 1, len(imagePaths)))\n\n\ttry:\n\t\t# load the image\n\t\timage = cv2.imread(imagePath)\n\n\t\t# resize the image\n\t\timage = aap.preprocess(image)\n\n\t\t# construct the path the output directory\n\t\tdirPath = os.path.join(args[\"annot\"], args[\"folder\"])\n\n\t\t# if the output directory does not exist, create it\n\t\tif not os.path.exists(dirPath):\n\t\t\tos.makedirs(dirPath)\n\n\t\t# write the labeled character to file\n\t\tp = os.path.sep.join([dirPath, \"{}_W{}H{}.png\".format(\n\t\t\tstr(count).zfill(6), args[\"width\"], args[\"height\"])])\n\t\tcv2.imwrite(p, image)\n\t\n\t\t# increment the count for the current key\n\t\tcount = count + 1\n\n\t# we are trying to control-c out of the script, so break from the\n\t# loop (you still need to press a key for the active window to\n\t# trigger this)\n\texcept KeyboardInterrupt:\n\t\tprint(\"[INFO] manually leaving script\")\n\t\tbreak\n\n\t# an unknown error has occurred for this particular image\n\texcept:\n\t\tprint(\"[INFO] skipping image...\")\n","sub_path":"intelligent_robot_vision/vision_oil_palm_fruit_detection/scripts/resize_aspect_ratio_dataset.py","file_name":"resize_aspect_ratio_dataset.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"309868527","text":"import os\nimport sys\nimport lib_common\nfrom lib_properties import pc\n\nimport win32con\nimport win32api\n\nTypeLibRegistryKey = win32api.RegOpenKey(win32con.HKEY_CLASSES_ROOT, \"TypeLib\")\n\ndef ComKeyAllNameVersion(key, keyName):\n\tresult = {}\n\tsys.stderr.write(\"ComKeyAllNameVersion key=%s keyName=%s\\n\" % (key,keyName) )\n\n\ttry:\n\t\tsubKey = win32api.RegOpenKey(key, keyName)\n\texcept:\n\t\texc = sys.exc_info()\n\t\tlib_common.ErrorMessageHtml(\"ComKeyAllNameVersion key=%s keyName=%s. Error:%s\"%(key,keyName,str(exc)))\n\n\ttry:\n\t\t\tsubNum = 0\n\t\t\tbestVersion = 0.0\n\t\t\twhile 1:\n\t\t\t\t\ttry:\n\t\t\t\t\t\t\tversionStr = win32api.RegEnumKey(subKey, subNum)\n\t\t\t\t\texcept win32api.error:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tname = win32api.RegQueryValue(subKey, versionStr)\n\t\t\t\t\t# sys.stderr.write(\"name=%s\\n\" % name)\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\t\tversionFlt = float(versionStr)\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tversionFlt = 0 # ????\n\n\t\t\t\t\tresult[ versionFlt ] = name\n\t\t\t\t\tsubNum = subNum + 1\n\tfinally:\n\t\t\twin32api.RegCloseKey(subKey)\n\n\treturn result\n\ndef ComKeyLastName(result):\n\tbestVrs = -999.0\n\tbestNam = \"\"\n\n\tfor vers, name in list( result.items() ):\n\t\tif vers > bestVrs:\n\t\t\tbestVrs = vers\n\t\t\tbestNam = name\n\t\t\t\n\treturn ( bestNam, bestVrs )\n\ndef CreateComRegisteredTypeLibNode( grph, key, name, version ):\n\ttypelibNode = lib_common.gUriGen.ComRegisteredTypeLibUri( key )\n\tstrTypLibName = \"%s / %.1f\" % ( name , version )\n\tgrph.add( (typelibNode, pc.property_information, lib_common.NodeLiteral(strTypLibName) ) )\n\n\treturn typelibNode\n","sub_path":"survol/lib_com_type_lib.py","file_name":"lib_com_type_lib.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"131750603","text":"import logging\nimport time\nimport functools\n\nfrom hazelcast.errors import create_error_from_message, HazelcastInstanceNotActiveError, is_retryable_error, \\\n HazelcastTimeoutError, TargetDisconnectedError, HazelcastClientNotActiveError, TargetNotMemberError, \\\n EXCEPTION_MESSAGE_TYPE\nfrom hazelcast.future import Future\nfrom hazelcast.util import AtomicInteger\nfrom hazelcast import six\n\n\ndef _no_op_response_handler(_):\n pass\n\n\nclass Invocation(object):\n __slots__ = (\"request\", \"timeout\", \"partition_id\", \"uuid\", \"connection\", \"event_handler\",\n \"future\", \"sent_connection\", \"urgent\", \"response_handler\")\n\n def __init__(self, request, partition_id=-1, uuid=None, connection=None,\n event_handler=None, urgent=False, timeout=None, response_handler=_no_op_response_handler):\n self.request = request\n self.partition_id = partition_id\n self.uuid = uuid\n self.connection = connection\n self.event_handler = event_handler\n self.urgent = urgent\n self.timeout = timeout\n self.future = Future()\n self.timeout = None\n self.sent_connection = None\n self.response_handler = response_handler\n\n def set_response(self, response):\n try:\n result = self.response_handler(response)\n self.future.set_result(result)\n except Exception as e:\n self.future.set_exception(e)\n\n def set_exception(self, exception, traceback=None):\n self.future.set_exception(exception, traceback)\n\n\nclass InvocationService(object):\n logger = logging.getLogger(\"HazelcastClient.InvocationService\")\n\n def __init__(self, client, reactor, logger_extras):\n config = client.config\n if config.network.smart_routing:\n self.invoke = self._invoke_smart\n else:\n self.invoke = self._invoke_non_smart\n\n self._client = client\n self._reactor = reactor\n self._logger_extras = logger_extras\n self._partition_service = None\n self._connection_manager = None\n self._listener_service = None\n self._check_invocation_allowed_fn = None\n self._pending = {}\n self._next_correlation_id = AtomicInteger(1)\n self._is_redo_operation = config.network.redo_operation\n self._invocation_timeout = self._init_invocation_timeout()\n self._invocation_retry_pause = self._init_invocation_retry_pause()\n self._shutdown = False\n\n def start(self, partition_service, connection_manager, listener_service):\n self._partition_service = partition_service\n self._connection_manager = connection_manager\n self._listener_service = listener_service\n self._check_invocation_allowed_fn = connection_manager.check_invocation_allowed\n\n def handle_client_message(self, message):\n correlation_id = message.get_correlation_id()\n\n if message.start_frame.has_event_flag():\n self._listener_service.handle_client_message(message, correlation_id)\n return\n\n invocation = self._pending.pop(correlation_id, None)\n if not invocation:\n self.logger.warning(\"Got message with unknown correlation id: %s\", message, extra=self._logger_extras)\n return\n\n if message.get_message_type() == EXCEPTION_MESSAGE_TYPE:\n error = create_error_from_message(message)\n return self._handle_exception(invocation, error)\n\n invocation.set_response(message)\n\n def shutdown(self):\n self._shutdown = True\n for invocation in list(six.itervalues(self._pending)):\n self._handle_exception(invocation, HazelcastClientNotActiveError())\n\n def _invoke_on_partition_owner(self, invocation, partition_id):\n owner_uuid = self._partition_service.get_partition_owner(partition_id)\n if not owner_uuid:\n self.logger.debug(\"Partition owner is not assigned yet\", extra=self._logger_extras)\n return False\n return self._invoke_on_target(invocation, owner_uuid)\n\n def _invoke_on_target(self, invocation, owner_uuid):\n connection = self._connection_manager.get_connection(owner_uuid)\n if not connection:\n self.logger.debug(\"Client is not connected to target: %s\" % owner_uuid, extra=self._logger_extras)\n return False\n return self._send(invocation, connection)\n\n def _invoke_on_random_connection(self, invocation):\n connection = self._connection_manager.get_random_connection()\n if not connection:\n self.logger.debug(\"No connection found to invoke\", extra=self._logger_extras)\n return False\n return self._send(invocation, connection)\n\n def _invoke_smart(self, invocation):\n if not invocation.timeout:\n invocation.timeout = self._invocation_timeout + time.time()\n\n try:\n if not invocation.urgent:\n self._check_invocation_allowed_fn()\n\n connection = invocation.connection\n if connection:\n invoked = self._send(invocation, connection)\n if not invoked:\n self._handle_exception(invocation, IOError(\"Could not invoke on connection %s\" % connection))\n return\n\n if invocation.partition_id != -1:\n invoked = self._invoke_on_partition_owner(invocation, invocation.partition_id)\n elif invocation.uuid:\n invoked = self._invoke_on_target(invocation, invocation.uuid)\n else:\n invoked = self._invoke_on_random_connection(invocation)\n\n if not invoked:\n invoked = self._invoke_on_random_connection(invocation)\n\n if not invoked:\n self._handle_exception(invocation, IOError(\"No connection found to invoke\"))\n except Exception as e:\n self._handle_exception(invocation, e)\n\n def _invoke_non_smart(self, invocation):\n if not invocation.timeout:\n invocation.timeout = self._invocation_timeout + time.time()\n\n try:\n if not invocation.urgent:\n self._check_invocation_allowed_fn()\n\n connection = invocation.connection\n if connection:\n invoked = self._send(invocation, connection)\n if not invoked:\n self._handle_exception(invocation, IOError(\"Could not invoke on connection %s\" % connection))\n return\n\n if not self._invoke_on_random_connection(invocation):\n self._handle_exception(invocation, IOError(\"No connection found to invoke\"))\n except Exception as e:\n self._handle_exception(invocation, e)\n\n def _init_invocation_retry_pause(self):\n invocation_retry_pause = self._client.properties.get_seconds_positive_or_default(\n self._client.properties.INVOCATION_RETRY_PAUSE_MILLIS)\n return invocation_retry_pause\n\n def _init_invocation_timeout(self):\n invocation_timeout = self._client.properties.get_seconds_positive_or_default(\n self._client.properties.INVOCATION_TIMEOUT_SECONDS)\n return invocation_timeout\n\n def _send(self, invocation, connection):\n if self._shutdown:\n raise HazelcastClientNotActiveError()\n\n correlation_id = self._next_correlation_id.get_and_increment()\n message = invocation.request\n message.set_correlation_id(correlation_id)\n message.set_partition_id(invocation.partition_id)\n self._pending[correlation_id] = invocation\n\n if invocation.event_handler:\n self._listener_service.add_event_handler(correlation_id, invocation.event_handler)\n\n self.logger.debug(\"Sending %s to %s\", message, connection, extra=self._logger_extras)\n\n if not connection.send_message(message):\n if invocation.event_handler:\n self._listener_service.remove_event_handler(correlation_id)\n return False\n return True\n\n def _handle_exception(self, invocation, error, traceback=None):\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\"Got exception for request %s, error: %s\" % (invocation.request, error),\n extra=self._logger_extras)\n\n if not self._client.lifecycle_service.is_running():\n invocation.set_exception(HazelcastClientNotActiveError(), traceback)\n self._pending.pop(invocation.request.get_correlation_id(), None)\n return\n\n if not self._should_retry(invocation, error):\n invocation.set_exception(error, traceback)\n self._pending.pop(invocation.request.get_correlation_id(), None)\n return\n\n if invocation.timeout < time.time():\n self.logger.debug(\"Error will not be retried because invocation timed out: %s\", error,\n extra=self._logger_extras)\n invocation.set_exception(HazelcastTimeoutError(\"Request timed out because an error occurred after \"\n \"invocation timeout: %s\" % error, traceback))\n self._pending.pop(invocation.request.get_correlation_id(), None)\n return\n\n invoke_func = functools.partial(self.invoke, invocation)\n self._reactor.add_timer(self._invocation_retry_pause, invoke_func)\n\n def _should_retry(self, invocation, error):\n if invocation.connection and isinstance(error, (IOError, TargetDisconnectedError)):\n return True\n\n if invocation.uuid and isinstance(error, TargetNotMemberError):\n return False\n\n if isinstance(error, (IOError, HazelcastInstanceNotActiveError)) or is_retryable_error(error):\n return True\n\n if isinstance(error, TargetDisconnectedError):\n return invocation.request.retryable or self._is_redo_operation\n\n return False\n","sub_path":"hazelcast/invocation.py","file_name":"invocation.py","file_ext":"py","file_size_in_byte":9936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"255200325","text":"from flask import Flask, render_template, redirect, request, jsonify\nimport time\nimport sys\nimport logging\n#from scipy import signal\nimport __main__ as main\nimport Alarm\n\napp = Flask(__name__, static_url_path='/static')\nrooms = []\n\n#initialize global variables\ndef init():\n global rooms\n for key in main.rooms:\n if 'lights' in main.rooms[key]:\n rooms.append(key)\n return None\n\n#Make sure the browser does not cache any of the images.\n#Prevents non updating measurement graphs\n@app.after_request\ndef add_header(response):\n response.cache_control.max_age = 1\n return response\n\n@app.route('/')\ndef homePage():\n global rooms\n return render_template('index.html',\n rooms=rooms,\n page=\"home\")\n\n#handle to show the graphs of every room and the pi\n@app.route('/graphs')\ndef graphs():\n global rooms\n return render_template('index.html',\n rooms = rooms,\n page='graphs')\n\n#Here are the handles for alarms\n@app.route('/alarm')\ndef alarm():\n global rooms\n Alarms = [key for key in Alarm.AlarmDict.keys()]\n return render_template('index.html',\n rooms = rooms,\n alarms = Alarms,\n page = 'alarm')\n\n@app.route('/alarm/setAlarm', methods=[\"POST\"])\ndef setAlarm():\n JSON = request.get_json(force=True)\n if not ( (\"Hour\" in JSON) and (\"Minute\" in JSON) ):\n return (\"\", 204)\n Hour = JSON[\"Hour\"]\n Minute = JSON[\"Minute\"]\n PreWakeUp = JSON.get(\"PreWakeUp\", None)\n Alarm.setupAlarm(Hour, Minute, PreWakeUp=PreWakeUp)\n return(\"\", 418)\n\n@app.route('/alarm/disableAlarm', methods=[\"POST\"])\ndef disableAlarm():\n JSON = request.get_json(force=True)\n if not \"AlarmName\" in JSON:\n return(\"\",204)\n Alarm.AlarmDict[JSON[\"AlarmName\"]][\"Stop\"] = True\n return(\"\",204)\n\n#From here on all the light handles begin\n@app.route('/rooms/')\ndef ledPage(roomID):\n global rooms\n if (roomID in rooms):\n Lights = {}\n for Light in [*main.rooms[roomID]['lights']]:\n Lights[Light] = main.rooms[roomID][\"lights\"][Light].functdict.keys()\n print(Lights)\n return render_template('index.html',\n rooms = rooms,\n roomID = roomID,\n lights = [*main.rooms[roomID]['lights']],\n functdict = main.rooms[roomID]['lights']['LedStrip'].functdict,\n color_rgb = main.rooms[roomID]['lights']['LedStrip'].color_rgb,\n wait = main.rooms[roomID]['lights']['LedStrip'].wait,\n page='led')\n else:\n return('404 not found, you have not installed this light')\n\n\n\n@app.route('/rooms/color', methods=[\"POST\"])\n# expect JSON in the following format:\n# {'roomID':A rooms defined in main\n# 'lightIDs':A list of lightIDs defined in main,\n# 'r': 0, 'g':0, 'b':0}\ndef colorControl():\n color = {'r':0,'g':0,'b':0}\n json = request.get_json(force=True)\n for key in color:\n color[key] = int(json[key])\n lightIDs = json['lightIDs']\n roomID = json['roomID']\n for lightID in lightIDs:\n main.rooms[roomID]['lights'][lightID].color_rgb = color\n return(\"\",204)\n\n@app.route('/rooms/function', methods=[\"POST\"])\ndef functionControl():\n #expect JSON with following format\n #{'roomID': A room defined in main.\n # 'lightIDs': A list of lightIDs defined in main.\n # 'function': An accepted function for the light}\n function = \"\"\n json = request.get_json(force=True)\n roomID = json['roomID']\n lightIDs = json['lightIDs']\n function = json['function']\n for lights in lightIDs:\n main.rooms[roomID]['lights'][lights].function = function\n return(\"\",204)\n\n@app.route('/rooms/attrNum', methods=[\"POST\"])\ndef attributeControlNum():\n #expect JSON with following format\n # {'roomID': A room defined in main,\n # 'lightIDs': A list of lightIDs defined in main.\n # 'key':value -> key is the variable that needs to be set to value\n # }\n json = request.get_json(force=True)\n roomID = json['roomID']\n lightIDs = json['lightIDs']\n json.pop('roomID', None)\n json.pop('lightIDs', None)\n attributes={}\n for key,value in json.items():\n attributes[key] = value\n #try:\n for lights in lightIDs:\n for key,value in attributes.items():\n setattr(main.rooms[roomID]['lights'][lights],\n key,\n float(value))\n #except:\n # pass\n return(\"\",204)\n\n@app.route('/rooms/attrBool', methods=[\"POST\"])\ndef attributeControlBool():\n #expect JSON with following format\n # {'roomID': A room defined in main,\n # 'lightIDs': A list of lightIDs defined in main.\n # 'key':value -> key is the variable that needs to be set to value\n # }\n\n json = request.get_json(force=True)\n roomID = json['roomID']\n lightIDs = json['lightIDs']\n json.pop('roomID', None)\n json.pop('lightIDs', None)\n attributes={}\n for key,value in json.items():\n attributes[key] = value\n try:\n for lights in lightIDs:\n for key,value in attributes.items():\n setattr(main.rooms[roomID]['lights'][lights],\n key,\n bool(value))\n except:\n pass\n return(\"\",204)\n\n@app.route('/rooms/data', methods=[\"POST\"])\ndef returnData():\n # expect json with only the room ID in it, return with the data in json\n json = request.get_json(force=True)\n try:\n roomID = json['roomID']\n if roomID in main.rooms:\n response = {}\n response.update(main.rooms[roomID][\"room\"].json)\n if \"lightID\" in json:\n response.update({\"color\": main.rooms[roomID][\"lights\"][json[\"lightID\"]].color_rgb})\n return(jsonify(response), 200)\n else:\n return(\"\",418) #I'm a teapot\n except:\n return(jsonify({'error': 'invalid'}), 200)\n\n@app.route('/rooms/graphData', methods=[\"POST\"])\ndef returnGraphData():\n # expect json with roomID\n JSON = request.get_json(force = True)\n RoomID = JSON[\"RoomID\"]\n Items = int(JSON.get(\"Items\", 1000))\n print(\"Items: {}\".format(Items))\n print(\"RoomID: {}\".format(RoomID))\n Data, Time = main.rooms[RoomID][\"room\"].getDataFromDB(Items = Items)\n # Temperature = signal.savgol_filter(Data[\"temperature\"], 25, 3)\n # Humidity = signal.savgol_filter(Data[\"humidity\"], 25, 3)\n # LDR = signal.savgol_filter(Data[\"ldr\"], 25, 3)\n Temperature = Data[\"temperature\"]\n Humidity = Data[\"humidity\"]\n LDR = Data[\"ldr\"]\n Graphs = [\n dict(\n data=[\n dict(\n x = Time,\n y = Temperature,\n name = \"Temperature\",\n yaxis = \"Temperature\",\n type = \"lines\"\n ),\n dict(\n x = Time,\n y = Humidity,\n name = \"Humidity\",\n yaxis = \"y2\",\n type = \"lines\"\n )\n ],\n layout = dict(\n title = \"Temperature and Humidity\",\n yaxis = {\"title\": \"Degree Celsius\"},\n xaxis = {\"automargin\": True},\n yaxis2 = dict(\n title = \"Relative Humidity\",\n overlaying = \"y\",\n side = \"right\"\n )\n )\n ),\n dict(\n data=[\n dict(\n x = Time,\n y = LDR,\n name = \"LDR\",\n yaxis = \"LDR\",\n type = \"lines\"\n )\n ],\n layout = dict(\n title = \"LDR\",\n xaxis = {\"automargin\": True},\n yaxis = {\"title\": \"Relative light strength\"},\n showlegend = True\n )\n )\n ]\n\n GraphsJSON = jsonify(Graphs)\n return(GraphsJSON)\n\n@app.route('/ritregistration')\ndef ritRegistration():\n global rooms\n return render_template('index.html',\n rooms = rooms,\n page='ritRegistration')\n\n@app.route('/ritregistration/addNew', methods=[\"POST\"])\ndef addNewMileage():\n #expects json with \"KMStand\", \"BeginEindPunt\", \"Getankt\"\n json = request.get_json(force=True)\n try:\n KMStand = json[\"KMStand\"]\n BeginEndPoint = json[\"BeginEindPunt\"]\n Tanked = json[\"Getankt\"]\n main.RitReg.insertMileage(KMStand, BeginEndPoint, Tanked)\n return(\"\",418)\n except Exception as e:\n return(e,400)\n\n@app.route('/ritregistration/getData')\ndef returnRitData():\n DataList = main.RitReg.getAllData()\n DataList.reverse()\n return(jsonify(DataList), 200)\n\n@app.route('/ritregistration/getMileage')\ndef returnMileAge():\n MileAges, MileAge = main.RitReg.averageMileage()\n return(jsonify({\"Mileage\": MileAge, \"Mileages\": MileAges}), 200)\n","sub_path":"LedServer/Website/WEB.py","file_name":"WEB.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"188740978","text":"## 리눅스에서만 가능함\n## 파일명 : cron01.py\n\n#pip install apscheduler\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport time\n# apscheduler 를 install 하고 import 한다\n# 먼저 함수를 작성한 후, scheduler에 add.job을 통해 넣어준다\n# 넣어줄 때 인터벌과 분,초 등을 같이 넣어주면 함수를 통한 출력값을 인터벌, 분, 초 단위로 출력한다\n\n\n# 먼저 hello world를 출력하겠다고 하는 함수를 작성한다.\n# 이 외에도 print보다 복잡한 함수를 실행시키도 싶어도 될 것 같다.\ndef exec_interval(): \n print(\"hello world\")\n \n# 두 번째 함수를 작성한다.(해도 되고 안 해도 됨)\n# time.localtime이라는 함수가 아마 현재 시간을 출력하는 것 같다\ndef exec_cron(): \n str = time.strftime('%c', time.localtime(time.time()))\n print(\"cron\", str)\n\nsched = BlockingScheduler()\n# 5초 간격으로 exec_interval()함수 호출하기\n# 위에서 만든 함수를 추가하고\nsched.add_job(exec_interval, 'interval', seconds=10)\n\n# 예약 방식 (매시간 10초 30초 일 경우 구동)\n# 하나 더 추가하고\nsched.add_job(exec_cron, 'cron', minute=\"*\", second=\"20-30\")\nsched.start()","sub_path":"web_practice/crawling/cron01.py","file_name":"cron01.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"313731809","text":"from .core import exceptions\nfrom .core.logger import sql_logger\n\n\n\"\"\" Decorator used to monkey patch the Django SQLCompiler.\n\nArgs:\n Accepts and passes on all arguments, the only one being used is the first\n positional argument which is the `self` class reference, the Django SQLCompiler\nReturns:\n decorated Django SQLCompiler class.\nRaises:\n Nothing\n\n\"\"\"\ndef profiler_wrapper(func):\n def inner(*args, **kwargs):\n compiler = args[0]\n\n try:\n sql_string = compiler.as_sql()[0]\n except EmptyResultSet:\n sql_string = 'EmptyResultSet raised from Django'\n\n sql_logger(\n compiler.using,\n compiler.query.model,\n sql_string\n )\n\n return func(*args, **kwargs)\n return inner\n\ntry:\n from django.db.models.sql.compiler import SQLCompiler, SQLInsertCompiler, SQLUpdateCompiler\n from django.db.models.sql.datastructures import EmptyResultSet\n\n # decorate any SQL compilers that have an `execute_sql` method\n SQLCompiler.execute_sql = profiler_wrapper(SQLCompiler.execute_sql)\n SQLInsertCompiler.execute_sql = profiler_wrapper(SQLInsertCompiler.execute_sql)\n SQLUpdateCompiler.execute_sql = profiler_wrapper(SQLUpdateCompiler.execute_sql)\nexcept:\n raise exceptions.DjangoImportException('Could not import Django SQLCompiler')\n","sub_path":"django_orm_profiler/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"307012483","text":"#!/usr/bin/python\n# >>stt\n'''\nset GOOGLE_APPLICATION_CREDENTIALS=C:/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\nexport GOOGLE_APPLICATION_CREDENTIALS=\"/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\"\n'''\n# set GOOGLE_APPLICATION_CREDENTIALS=C:/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\n\n# gcloud auth activate-service-account --key-file=\"/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\"\n\n# export GOOGLE_APPLICATION_CREDENTIALS=\"/home/pi/Downloads/vast-verve-320303-b594822e4a04.json\"\n\n# >>tts\n\n# vast-verve-320303-5b57752cb55a // .json\n\n# /home/pi/Downloads/vast-verve-320303-5b57752cb55a.json\n\n# 가상환경 활성화 source test/bin/activate\n\nfrom __future__ import division\n\nimport pandas as pd\nimport threading\nimport alsaaudio\nimport pyaudio\nimport pyttsx3\nimport socket\nimport numpy\nimport time\nimport pygame\nimport sys\nimport cv2\nimport ast\nimport re\nimport os\nimport gspeech\nfrom google.cloud import speech\nfrom six.moves import queue\n\n# 상위 디렉토리 추가 (for utils.config)\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom utils.config import Config as cfg\n\nsys.path.append(cfg.OPENPIBO_PATH + '/lib')\nfrom vision.visionlib import cCamera\nfrom motion.motionlib import cMotion\nfrom audio.audiolib import cAudio\nfrom oled.oledlib import cOled\nfrom speech.speechlib import cSpeech\nfrom speech.speechlib import cDialog\n\noObj = cOled(conf=cfg)\nm = cMotion(conf=cfg)\n'''\ndef playSound(filename):\n pygame.mixer.music.load(filename)\n pygame.mixer.music.play()\n\ndef tts_f():\n pygame.mixer.init()\n playSound('5m.mp3')\n time.sleep(30.0)\n'''\ndata_pd = pd.read_excel('/home/pi/openpibo-data/proc/dialog.xls', header = None) # names = ['명령어', '대답', '종료여부', '모션'])\ncmdLists = pd.DataFrame.to_numpy(data_pd)\n\n# 이거 안 씀 motion_flag = 0 # 모션 동작 연속적으로 할 수 있게 하는 flag\n# 이거 안 씀 count_flag = 0\n# 이거 안 씀 wait = 0\n\n###변수 선언부###\nstatus_speak_mode = 2 # 전역 / 안내모드 : 1, 일상모드 : 0 / cmdLists[i][3] 2: 대기모드\nflag_action = 0\nmotion_once = 0\nr_arm = -70\nr_hand = -25\nl_arm = 70\nl_hand = 25\n\nR_ARM_P = 8\nR_ARM_D = 0.05\nrepeat = 0 # 안내모드 시 순차적 동작 실행하는데 쓰임 \n\nr_arm_p = R_ARM_P # 파이보 모션 제어 \nr_arm_d = R_ARM_D\nl_arm_p = R_ARM_P\nr_hand_p = 4\nl_hand_p = r_hand_p\nr_hand_d = 1\n\nglobal_vol = 0\n\ndef speak(msg,num,voice):\n tObj = cSpeech(conf=cfg)\n filename = cfg.TESTDATA_PATH+\"/test.mp3\"\n print(\"voice : \", voice)\n tObj.tts(\"\\\n \"+msg+\"\\\n \"\\\n , filename)\n aObj = cAudio()\n # audio.setvolume(voice)\n # current_volume = audio.getvolume() # Get the current Volume\n # print(\"current_volume :\", current_volume)\n\n aObj.play(filename, out='local', volume=global_vol)\n time.sleep(num)\n \ndef CommandProc(stt):\n global flag_action, repeat\n global status_speak_mode, global_vol\n # 문자 양쪽 공백 제거\n cmd = stt.strip()\n # 입력 받은 문자 화면에 표시\n \n print('나 : ' + str(cmd))\n for i in range(len(cmdLists)):\n if cmdLists[i][0] in str(cmd):\n \n status_speak_mode = int(cmdLists[i][3]) \n print ('구글 스피치 : ' + cmdLists[i][1])\n print(\"global_vol : \", global_vol)\n speak(cmdLists[i][1], len(cmdLists[i][1])/5, global_vol)\n \n \n print(\"\\n>>말해주세요~\")\n print(\"cmdLists[i][3] : \", cmdLists[i][3])\n gsp.resumeMic()\n return cmdLists[i][2]\n # 리스트에 없는 명령어일 경우 \n print ('구글 스피치 : 무슨 얘기하는 거니?')\n speak('무슨이야기 하는거니?', 1, global_vol)\n status_speak_mode = 2\n time.sleep(2)\n gsp.resumeMic()\n print(\"\\n>>말해주세요~\")\n return 1\n\n\n\ndef eye_tracking(r_arm, r_hand, motionData_x, motionData_y, l_arm, l_hand):\n MT = 300\n \n global flag_action, motion_once\n global status_speak_mode\n # print(\"------------\")\n # print(\"status_speak_mode(tracking): \", status_speak_mode)\n \n if status_speak_mode == 1 and motion_once == 0 : # 안내모드 일 때 \n motion_once += 1\n oObj.draw_image(cfg.TESTDATA_PATH +\"/i2.JPEG\")\n oObj.show() # oled 띄우는 것 \n m.set_motion(name=\"guide2\", cycle=1)\n motion_once = 0\n status_speak_mode = 2\n \n \n elif status_speak_mode == 0 and motion_once == 0 : # 일상모드 일 떄 \n motion_once += 1\n oObj.draw_image(cfg.TESTDATA_PATH +\"/conversation.png\")\n oObj.show() # oled 띄우는 것 \n m.set_motion(name=\"clapping2\", cycle=1)\n motion_once = 0\n status_speak_mode = 2\n\n elif status_speak_mode == 2: # 음성 입력 없을 때 그냥 트래킹만 하는 것. 숨쉬기 모드 \n # print(\"r_hand, l_hand: \", r_hand, l_hand)\n m.set_motors(positions=[0,0,-70,r_hand, motionData_x, motionData_y,0,0,70,l_hand], movetime=MT) \n oObj.draw_image(cfg.TESTDATA_PATH +\"/pibo_logo.png\") \n oObj.show() # oled 띄우는 것 \n \n \n\"\"\"구글 스피치 부분 함수 끝\"\"\"\n\n#연결할 서버(수신단)의 ip주소와 port번호\nTCP_IP = '192.168.0.79'\nTCP_PORT = 5001\n#송신을 위한 socket 준비\nsock = socket.socket()\nsock.connect((TCP_IP,TCP_PORT))\n#OpenCV를 이용해서 webcam으로 부터 이미지 추출\n\ncapture = cv2.VideoCapture(0)\n\nm.set_motors(positions=[0,0,-70,-25,0,0,0,0,70,25], movetime=500)\n\ntime.sleep(3)\nmotion_list = []\n\naudio = alsaaudio.Mixer()\ncurrent_volume = audio.getvolume() # Get the current Volume\naudio.setvolume(30) # Set the volume to 70%.\n\n# Audio recording parameters\nRATE = 16000\nCHUNK = int(RATE / 10) # 100ms\n\ntext = pyttsx3.init()\n\n'''\n# 말하는 속도\ntext.setProperty('rate', 150)\nrate = text.getProperty('rate')\n# 목소리\nvoices = text.getProperty('voices')\n# text.setProperty('voice', voices[0].id) # 남성\ntext.setProperty('voice', 'english+f1') # 여성\n# text.setProperty('voice', voices[1].id) # 여성\n'''\n\ngsp = gspeech.Gspeech()\ndef main(): \n while True:\n # 음성 인식 될때까지 대기 한다.\n stt = gsp.getText()\n if stt is None:\n break\n gsp.pauseMic()\n time.sleep(0.01)\n CommandProc(stt)\n\n #끝내자는 명령이 들어오면 프로그램 종료\n if ('끝내자' in stt):\n break\n \nSTT = threading.Thread(target = main) # 구글 스피치 thread \nSTT.start()\nwhile True :\n \n ret, frame = capture.read()\n frame = cv2.flip(frame,0)\n\n #추출한 이미지를 String 형태로 변환(인코딩)시키는 과정\n encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]\n result, imgencode = cv2.imencode('.jpg', frame, encode_param)\n data = numpy.array(imgencode)\n stringData = data.tostring()\n\n #String 형태로 변환한 이미지를 socket을 통해서 전송\n sock.send( str(len(stringData)).ljust(16).encode())\n sock.send( stringData )\n\n # 파이보로부터 motion data 수신했는지 판단 여부\n people = sock.recv(1).decode(\"utf8\")\n # print(\"motion_send : \", people)\n\n if people != '0' and people != '':\n \n # 파이보로부터 Motion data 수신\n motion_list = sock.recv(1024)\n # print(\"motion_list : \", motion_list)\n motion_list = eval(motion_list)\n motionData_x = int(motion_list[0])\n motionData_y = int(motion_list[1])\n vol = int(motion_list[2])\n global_vol = vol\n motion_list = []\n \n if status_speak_mode == 2 : \n # 숨쉬는 귀여운 파이보 가만히 있을 때 \n if l_hand < -20 or l_hand > 25 :\n l_hand_p = -1 * l_hand_p\n # print(\">>l_hand_p :\", l_hand_p)\n l_hand -= l_hand_p\n r_hand = -1 * l_hand\n \n\n eye_track = threading.Thread(target=eye_tracking, args=(r_arm, r_hand, motionData_x, motionData_y, l_arm, l_hand))\n eye_track.start()\n else :\n if status_speak_mode == 2 : \n # 숨쉬는 귀여운 파이보 가만히 있을 때 \n if l_hand < -20 or l_hand > 25 :\n l_hand_p = -1 * l_hand_p\n # print(\">>l_hand_p :\", l_hand_p)\n l_hand -= l_hand_p\n r_hand = -1 * l_hand\n\n eye_track = threading.Thread(target=eye_tracking, args=(r_arm, r_hand, 0, 0, l_arm, l_hand))\n eye_track.start()\n \ncv2.destroyAllWindows() \nsock.close()\n","sub_path":"openpibo-example/speech/restrict_new.py","file_name":"restrict_new.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"597123193","text":"\nimport lit.util\n\n\nclass CXXCompiler(object):\n def __init__(self, path, flags=[], compile_flags=[], link_flags=[], use_ccache=False):\n self.path = path\n self.flags = list(flags)\n self.compile_flags = list(compile_flags)\n self.link_flags = list(link_flags)\n self.use_ccache = use_ccache\n self.type = None\n self.version = (None, None, None)\n self._initTypeAndVersion()\n\n def _initTypeAndVersion(self):\n # Get compiler type and version\n macros = self.dumpMacros()\n if macros is None:\n return\n compiler_type = None\n major_ver = minor_ver = patchlevel = None\n if '__clang__' in macros.keys():\n compiler_type = 'clang'\n # Treat apple's llvm fork differently.\n if '__apple_build_version__' in macros.keys():\n compiler_type = 'apple-clang'\n major_ver = macros['__clang_major__']\n minor_ver = macros['__clang_minor__']\n patchlevel = macros['__clang_patchlevel__']\n elif '__GNUC__' in macros.keys():\n compiler_type = 'gcc'\n major_ver = macros['__GNUC__']\n minor_ver = macros['__GNUC_MINOR__']\n patchlevel = macros['__GNUC_PATCHLEVEL__']\n self.type = compiler_type\n self.version = (major_ver, minor_ver, patchlevel)\n\n def _basicCmd(self, infiles, out, is_link=False):\n cmd = []\n if self.use_ccache and not is_link:\n cmd += ['ccache']\n cmd += [self.path]\n if out is not None:\n cmd += ['-o', out]\n if isinstance(infiles, list):\n cmd += infiles\n elif isinstance(infiles, str):\n cmd += [infiles]\n else:\n raise TypeError('infiles must be a string or list')\n return cmd\n\n def preprocessCmd(self, infiles, out=None, flags=[]):\n cmd = self._basicCmd(infiles, out) + ['-x', 'c++', '-E']\n cmd += self.flags + self.compile_flags + flags\n return cmd\n\n def compileCmd(self, infiles, out=None, flags=[]):\n cmd = self._basicCmd(infiles, out) + ['-x', 'c++', '-c']\n cmd += self.flags + self.compile_flags + flags\n return cmd\n\n def linkCmd(self, infiles, out=None, flags=[]):\n cmd = self._basicCmd(infiles, out, is_link=True)\n cmd += self.flags + self.link_flags + flags\n return cmd\n\n def compileLinkCmd(self, infiles, out=None, flags=[]):\n cmd = self._basicCmd(infiles, out, is_link=True) + ['-x', 'c++']\n cmd += self.flags + self.compile_flags + self.link_flags + flags\n return cmd\n\n def preprocess(self, infiles, out=None, flags=[], env=None, cwd=None):\n cmd = self.preprocessCmd(infiles, out, flags)\n out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)\n return cmd, out, err, rc\n\n def compile(self, infiles, out=None, flags=[], env=None, cwd=None):\n cmd = self.compileCmd(infiles, out, flags)\n out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)\n return cmd, out, err, rc\n\n def link(self, infiles, out=None, flags=[], env=None, cwd=None):\n cmd = self.linkCmd(infiles, out, flags)\n out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)\n return cmd, out, err, rc\n\n def compileLink(self, infiles, out=None, flags=[], env=None, cwd=None):\n cmd = self.compileLinkCmd(infiles, out, flags)\n out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)\n return cmd, out, err, rc\n\n def dumpMacros(self, infiles=None, flags=[], env=None, cwd=None):\n if infiles is None:\n infiles = '/dev/null'\n flags = ['-dM'] + flags\n cmd, out, err, rc = self.preprocess(infiles, flags=flags, env=env,\n cwd=cwd)\n if rc != 0:\n return None\n parsed_macros = dict()\n lines = [l.strip() for l in out.split('\\n') if l.strip()]\n for l in lines:\n assert l.startswith('#define ')\n l = l[len('#define '):]\n macro, _, value = l.partition(' ')\n parsed_macros[macro] = value\n return parsed_macros\n\n def getTriple(self):\n cmd = [self.path] + self.flags + ['-dumpmachine']\n return lit.util.capture(cmd).strip()\n","sub_path":"test/libcxx/test/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"608476935","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nReads lines of input.txt and writes whether brackets are nested correctly to output.txt \n\"\"\"\n__author__ = \"ElizabethS5\"\n\nimport sys\n\n\ndef get_lines(filename):\n \"\"\"Open and read txt file, return list of lines\"\"\"\n f = open(filename, \"r\")\n lines = f.read().split('\\n')\n f.close()\n return lines\n\n\ndef string_to_list(string):\n \"\"\"Takes a string and puts characters in a list\"\"\"\n line_list = []\n copy = string[:]\n while copy:\n if copy[:2] == '(*' or copy[:2] == '*)':\n line_list.append(copy[:2])\n copy = copy[2:]\n else:\n line_list.append(copy[0])\n copy = copy[1:]\n return line_list\n\n\ndef test_line_list(line_list):\n \"\"\"If line_list passes return 'Yes' else return 'No' and failing position\"\"\"\n copy = line_list[:]\n stack = []\n position = 1\n while copy:\n if copy[0][-1] in '>}])':\n if len(stack) == 0:\n return f\"NO {position}\"\n elif copy[0] == '>' and stack[-1] == '<':\n stack.pop()\n elif copy[0] == ']' and stack[-1] == '[':\n stack.pop()\n elif copy[0] == '}' and stack[-1] == '{':\n stack.pop()\n elif copy[0] == ')' and stack[-1] == '(':\n stack.pop()\n elif copy[0] == '*)' and stack[-1] == '(*':\n stack.pop()\n else:\n return f\"NO {position}\"\n elif copy[0][0] in '<{[(':\n if len(copy) == 1:\n return f\"NO {position}\"\n else:\n stack.append(copy[0])\n position += 1\n copy.pop(0)\n if len(stack) != 0:\n return f'NO {position}'\n else:\n return 'YES'\n\n\ndef write_output(string):\n \"\"\"Write string to file\"\"\"\n f = open('output.txt', 'w')\n f.write(string)\n f.close()\n\n\ndef main(args):\n \"\"\"Use input.txt to write output.txt\"\"\"\n lines_from_input = get_lines('input.txt')\n line_lists = [string_to_list(line) for line in lines_from_input]\n output = '\\n'.join([test_line_list(line_list)\n for line_list in line_lists])\n write_output(output)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"nested.py","file_name":"nested.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"231374509","text":"# python3\r\n\r\nimport sys\r\n\r\n\r\nclass Table:\r\n def __init__(self,id,rows):\r\n self._id = id\r\n self._rows = rows\r\n\r\n\r\ndef getParent(table):\r\n while table != parent[table]:\r\n parent[table] = getParent(parent[table])\r\n table = parent[table]\r\n\r\n return table\r\n\r\ndef merge(destination, source):\r\n global maxRows\r\n i_id = getParent(destination)\r\n j_id = getParent(source)\r\n\r\n if i_id == j_id:\r\n return\r\n\r\n if rank[i_id] > rank[j_id]:\r\n parent[j_id] = i_id\r\n tables[i_id]._rows += tables[j_id]._rows\r\n if maxRows < tables[i_id]._rows:\r\n maxRows = tables[i_id]._rows\r\n else:\r\n parent[i_id] = j_id\r\n tables[j_id]._rows += tables[i_id]._rows\r\n if maxRows < tables[j_id]._rows:\r\n maxRows = tables[j_id]._rows\r\n\r\n if rank[i_id] == rank[j_id]:\r\n rank[j_id] += 1\r\n\r\n\r\nn, m = map(int, sys.stdin.readline().split())\r\nlines = list(map(int, sys.stdin.readline().split()))\r\nrank = [1] * n\r\nparent = list(range(0, n))\r\ntables = [Table(i,rowCount) for i,rowCount in enumerate(lines)]\r\nmaxRows = max(lines)\r\nfor i in range(m):\r\n destination, source = map(int, sys.stdin.readline().split()) \r\n merge(destination - 1, source - 1)\r\n print(maxRows)\r\n \r\n","sub_path":"datastructures/Week3/merging_tables.py","file_name":"merging_tables.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"529911577","text":"from pymongo import MongoClient\nimport json\nHostName = '192.168.2.169'\n\ndef mongoconn():\n db = MongoClient(HostName,27017)\n my_set = db.MultiApp.test\n return my_set\n\ndef ReadData(my_set):\n j = 0\n #n = 0\n ViewName = None\n for i in my_set.find():\n ViewName = i.get(\"_id\")\n k=i.get('states').get('views')\n if(k!=[]):\n for n in range(len(k)):\n text = k[n].get('text')\n KeyNode = {\n '_id': ViewName,\n 'viewsnum':n,\n 'text':text\n }\n (KeyNode)\n #n+=1\n #break\n\n\n\n\nmyset = mongoconn()\nReadData(myset)","sub_path":"ExperiemtProject/ReadMongo.py","file_name":"ReadMongo.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"523610462","text":"\"\"\"Defines URL patterns for users\"\"\"\nfrom django.urls import path, include\n\nfrom . import views\n\napp_name = 'users'\n\nurlpatterns = [\n\t# Include default auth urls.\n # http://localhost:8000/users/login/ (users.urls.py & login view)\n\tpath('', include('django.contrib.auth.urls')),\n # Registration page.\n # http://localhost:8000/users/register/\n\tpath('register/', views.register, name='register'),\n]\n","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"219368932","text":"import turtle\nwn = turtle.Screen()\nwn.bgcolor(\"white\")\nbob = turtle.Turtle()\n\n\ndef ex_thirteen(t, n):\n t.speed(0)\n for i in range(n):\n t.penup()\n deg = 360 / n\n t.lt(deg)\n t.fd(100)\n t.stamp()\n t.back(100)\n\n\ndef ex_fourteen():\n print(\"bale\", \"turn\", \"dole\", \"nest\")\n\n\ndef ex_fifteen():\n print(\"pythons,\", \"no it is a boa,\", \"No\")\n\n\n# ex_thirteen(bob, 1000)\n# ex_fourteen()\n# ex_fifteeen()\n\n\nwn.mainloop()\n","sub_path":"3.8_excercises.py","file_name":"3.8_excercises.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"73951805","text":"# Linked lists are structures where each node or object in the list is pointing\n# to another node in the list. Since they are referenced by pointers to other objects\n# in the list, they have some advantages/disadvantages over arrays. Nodes in the linked\n# list are not accessible through indices, so they have a linear lookup time. However,\n# this allows them to be stored apart in memory rather than as a sequential block.\n#\n# Built-in list functions and deque Python trivialize implementation of linked lists\n# in Python, this remains a good pedagogical exercise. This linked list is implemented\n# going doubly, with a head, tail, next, and previous pointers. - Luan Pham 2018\n\n# The linked list is comprised of nodes, an abstraction of a connected object.\nclass node:\n\n def __init__(self, data, next, prev): # initializer/constructor\n self.data = data # data stored in the node\n self.next = None # pointer to the next node\n self.prev = None # ponter to the previous node\n\n # some getters and setters, apparently not as important in python (no private constructors anyway)\n def get_data(self):\n return self.data\n\n def set_data(data):\n self.data = data\n\n def get_next(self):\n return self.next\n\n def get_prev(self):\n return self.prev\n\n def set_next(self, Node):\n self.next = Node\n\n def print_node(self):\n print(self.data)\n\nclass linked_list:\n\n def __init__(self): # initializer/constructor\n self.head = None\n self.tail = None\n self.size = 0\n\n def prepend(self, data): # adds a node to the beginning of the linked list\n # creates a node with parameterized data\n new_node = node(data, next = None, prev = None)\n if self.size == 0: # if the linked list is empty, everything points to the new node\n self.tail = self.head = new_node\n else:\n self.head.prev = new_node # former head prev points to new node\n new_node.next = self.head # new node next points to the former head\n self.head = new_node # assigns the head pointer to the new node\n self.size += 1\n\n def append(self, data): # adds a node to the end of the linked list\n # creates a node with parameterized data\n new_node = node(data, next = None, prev = None)\n if self.size == 0: # if the linked list is empty, everything points to the new node\n self.head = self.tail = new_node\n else:\n new_node.prev = self.tail # new node previous points to former tail\n self.tail.next = new_node # former tail next points to the new node\n self.tail = new_node # tail pointer reassigned\n self.size += 1\n\n def remove(self, data):\n if self.size == 0:\n raise IndexError(\"This list is empty!\")\n current = self.head\n counter = 0\n while current:\n if current.data == data:\n if self.size == 1: # exit when we get a last match to avoid pointer issues\n self.head = self.tail = current.next = current.prev = None\n self.size -=1\n return\n counter +=1\n self.size -=1\n if current.prev:\n current.prev.next = current.next\n current.next.prev = current.prev\n else:\n self.head = current.next\n current.next.prev = None\n\n current = current.next\n\n print(\"%s instance(s) of %s removed!\" % (counter, data))\n\n def contains(self, data):\n if self.size == 0:\n raise IndexError(\"This list is empty!\")\n current = self.head\n while current:\n if current.data == data:\n return True\n current = current.next\n return False\n\n\n def print_list(self):\n if self.size == 0:\n raise IndexError(\"This list is empty!\")\n current = self.head #start at the beginning\n while current:\n print(current.data, end = \" \")\n current = current.next #moves to the next node by reassigning pointer\n","sub_path":"python/linked-list.py","file_name":"linked-list.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"29423138","text":"from mcpi.minecraft import Minecraft\nimport shapes\nfrom math import ceil\n\ndef main(place):\n mc=Minecraft.create(place,4711)\n x,y,z=mc.player.getPos()\n mc.setBlocks(x-5,y,z-20,x+5,y+5,z-8,0)\n shapes.sphere(mc,x,y,z-15,5,5,5,80,0,4,4,4,0.5,1)\n shapes.cylinder(mc,x,y,z-10,3,3,1,shapes.XY,80)\n shapes.cylinder(mc,x,y,z-10,2,2,1,shapes.XY,0)\n height=0\n nx,ny,nz=x,y-1,z-15\n clear=True\n while clear:\n height+=1\n clearpos=0\n pos=[(0,0),(5,5),(-5,5),(-5,-5),(5,-5)]\n for xmod,zmod in pos:\n if mc.getBlock(xmod+nx,ny-height,zmod+nz) in [0,8,9,10,11,31,37,38,39,40,65,78,102,107,]:\n clearpos+=1\n if clearpos==0:\n clear=False\n shapes.cylinder(mc,x,y-1-ceil(height/2),z-15,5,ceil(height/2),5,shapes.XZ,80)\n mc.setBlocks(x-2,y-1-height,z-11,x+2,y-1,z-9,80)\n #mc.setBlocks(x-5,y-1,z-20,x+5,y-1,z-8,80)\n\nif __name__=='__main__':\n main(\"127.0.0.1\")\n","sub_path":"newigloo.py","file_name":"newigloo.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"58273595","text":"# Copyright 2018 Contributors to Hyperledger Sawtooth\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -----------------------------------------------------------------------------\n\"\"\"Implements the CONFIRM_ADD_ROLE_ADMIN message\nusage: rbac.role.admin.confirm.create()\"\"\"\nimport logging\nfrom rbac.common import addresser\nfrom rbac.common.crypto.keys import Key\nfrom rbac.common.proposal.proposal_message import ProposalMessage\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass ConfirmAddRoleAdmin(ProposalMessage):\n \"\"\"Implements the CONFIRM_ADD_ROLE_ADMIN message\n usage: rbac.role.admin.confirm.create()\"\"\"\n\n @property\n def message_action_type(self):\n \"\"\"The action type performed by this message\"\"\"\n return addresser.MessageActionType.CONFIRM\n\n @property\n def message_subaction_type(self):\n \"\"\"The subsequent action performed or proposed by this message\"\"\"\n return addresser.MessageActionType.ADD\n\n @property\n def message_object_type(self):\n \"\"\"The object type this message acts upon\"\"\"\n return addresser.ObjectType.ROLE\n\n @property\n def message_relationship_type(self):\n \"\"\"The relationship type this message acts upon\"\"\"\n return addresser.RelationshipType.ADMIN\n\n def make_addresses(self, message, signer_keypair):\n \"\"\"Makes the appropriate inputs & output addresses for the message\"\"\"\n if not isinstance(message, self.message_proto):\n raise TypeError(\"Expected message to be {}\".format(self.message_proto))\n if not isinstance(signer_keypair, Key):\n raise TypeError(\"Expected signer_keypair to be provided\")\n\n signer_admin_address = addresser.role.admin.address(\n message.role_id, signer_keypair.public_key\n )\n\n relationship_address = addresser.role.admin.address(\n message.role_id, message.user_id\n )\n\n proposal_address = self.address(\n object_id=message.role_id, target_id=message.user_id\n )\n\n inputs = [signer_admin_address, proposal_address]\n outputs = [proposal_address, relationship_address]\n\n return inputs, outputs\n","sub_path":"rbac/common/role/confirm_admin.py","file_name":"confirm_admin.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"130043127","text":"from envs import env_for_training as env\nfrom simulator import simulate_Interceptor_V2 as sim_env\nfrom savers.debug_logger import create_logger\nfrom envs.env_for_training import Init, Draw, Game_step\n\n# import Interceptor_V2 as env\n# from Interceptor_V2 import Init, Draw, Game_step\n\nlogger = create_logger(\"smart_player\")\ndebug = logger.debug\n\n\ndef choose_action(steps_to_sim):\n SHOOT = 3\n WAIT = 1\n diff_score = simulate_shoot_score(steps_to_sim)\n # if it worth shooting, shoot:\n if diff_score > 0:\n debug(\"shoot!\")\n action_button = SHOOT\n else:\n debug(\"skip\")\n action_button = WAIT\n return action_button\n\n\ndef simulate_shoot_score(steps_to_sim):\n \"\"\"\n :param steps_to_sim: how many step until end of game (1000-stp)\n :return: predicted_shoot_score - predicted_wait_score\n \"\"\"\n SHOOT = 3\n WAIT = 1\n MAX_STEPS = 300\n actions = [SHOOT, WAIT]\n scores = []\n steps_to_sim = min(steps_to_sim, MAX_STEPS)\n for action_button in actions:\n # init new simulate game\n sim_env.Simulate(env.world, env.turret, env.rocket_list, env.interceptor_list, env.city_list,\n env.explosion_list)\n # act\n sim_env.simulate_game_step(action_button)\n\n # peace steps until end of game\n for i in range(steps_to_sim):\n _, _, _, _, score = sim_env.simulate_peace_step()\n # last step : save score in end of peace game\n scores.append(score)\n\n shoot_score = scores[0] - scores[1]\n # debug\n # if shoot_score != 0:\n # debug(f\"steps_to_simulate = {steps_to_sim}\\n diff={shoot_score}\")\n return shoot_score\n\n\nif __name__ == \"__main__\":\n Init()\n max_stp = 1000\n init_stp = 8\n\n # move turent to best angle\n for stp in range(init_stp):\n action_button = 2\n r_locs, i_locs, c_locs, ang, score = Game_step(action_button)\n\n # shoot only if it's worth it\n for stp in range(stp, max_stp):\n action_button = choose_action(max_stp - stp)\n r_locs, i_locs, c_locs, ang, score = Game_step(action_button)\n debug(f\"{stp}.score = {score}\")\n\n if action_button == 3 or stp % 1 == 0:\n Draw()\n","sub_path":"simulator/non_ai_smart_player.py","file_name":"non_ai_smart_player.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"541709167","text":"import numpy as np\nimport random\n\n\nclass SOM():\n\n def __init__(self, nNodes, inputDim, nClass, eta=0.2):\n self.nNodes = nNodes\n self.weights = None\n self.eta = eta\n self.inputDim = inputDim\n self.nClass = nClass\n\n def initWeights(self):\n self.weights = np.zeros((self.nNodes, self.inputDim))\n for i in range(self.nNodes):\n self.weights[i][:] = np.random.random(self.inputDim)\n return self.weights\n\n def euclidianDist(self, pattern):\n dBest = 10000\n for i in range(self.weights.shape[0]):\n d = np.transpose(\n pattern-self.weights[i][:])@(pattern-self.weights[i][:])\n if d < dBest:\n dBest = d\n iBest = i\n return iBest\n\n def neighbourhood(self, index, epoch, epochs):\n if epoch/epochs <= 0.1:\n dist = 25\n elif epoch/epochs <= 0.25:\n dist = 10\n elif epoch/epochs <= 0.75:\n dist = 5\n else:\n dist = 1\n\n neighbours = np.linspace(\n index-dist, index+dist, 2*dist+1)\n neighbours = np.where(neighbours < 0, neighbours + 100, neighbours)\n neighbours = np.where(neighbours > 99, neighbours - 100, neighbours)\n return neighbours\n\n def weightsUpdate(self, pattern, neighbours):\n for i in neighbours:\n self.weights[int(i)][:] = self.weights[int(i)][:] + \\\n self.eta*(pattern-self.weights[int(i)][:])\n return self.weights\n\n\ndef main():\n\n ######## Import animal data ############\n data = []\n with open('/home/andrej/school/ann-course/lab2/animals.dat', 'r') as f:\n d = f.readlines()\n for i in d:\n k = i.rstrip().split(\",\")\n data.append([int(i) for i in k])\n data = np.array(data, dtype='O')\n animalData = np.reshape(data, (32, 84))\n\n ######### Import animal names ############\n data = []\n with open('/home/andrej/school/ann-course/lab2/animalnames.txt', 'r') as f:\n d = f.readlines()\n for i in d:\n k = i.rstrip(\"'\").split()\n data.append([i for i in k])\n data = np.array(data, dtype='O')\n animalNames = data\n animalNames = np.squeeze(animalNames)\n ########################################\n\n ####### init som and weights ###########\n som = SOM(nNodes=100, inputDim=84, nClass=32)\n weights = som.initWeights()\n epochs = 20\n ######################################\n\n ######## Training ###################\n for epoch in range(epochs):\n for i in range(32):\n iBest = som.euclidianDist(animalData[i][:])\n # print(iBest)\n neighbours = som.neighbourhood(iBest, epoch, epochs)\n # print(neighbours)\n som.weightsUpdate(animalData[i][:], neighbours)\n ######################################\n\n ########## Testing ##################\n winnerIndexes = []\n for i in range(32):\n iBest = som.euclidianDist(animalData[i][:])\n winnerIndexes.append(iBest)\n ######################################\n\n animalNames = np.ndarray.tolist(animalNames)\n animalNames = [x for _, x in sorted(zip(winnerIndexes, animalNames))]\n print(animalNames)\n winnerIndexes = sorted(winnerIndexes)\n # print(winnerIndexes)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"RBF, CL, SOM/som_animals.py","file_name":"som_animals.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"12610386","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom PyQt4 import QtCore\nimport os\n\nSERVER_PORT = 502\nCLIENT_ADDR = \"127.0.0.1\"\nCLIENT_PORT = 502\nSEGMAC = \"241.0.0.1\"\nSCMAC = \"0.0.0.241.0.0.0.1\"\nPROTO_ID = 0\n\nFREQ_ADDR = 0x00F5\nTEMP_UPPER = 0x4100\nTEMP_LOWER = 0x4101\nMOISTURE_UPPER = 0x4102\nMOISTURE_LOWER = 0x4103\nEARTH_TEMP_UPPER = 0x4104\nEARTH_TEMP_LOWER = 0x4105\nEARTH_MOISTURE_UPPER = 0x4106\nEARTH_MOISTURE_LOWER = 0x4107\nCO2_UPPER = 0x4108\nCO2_LOWER = 0x4109\nILLUM_UPPER = 0x410A\nILLUM_LOWER = 0x410B\nCOMMAND_ADDR = 0x2100\n\nINI_FILE = \".\" + os.sep + \"config.ini\"\n\ndef loadConfig():\n\tglobal SERVER_PORT, CLIENT_ADDR, CLIENT_PORT\n\tsettings = QtCore.QSettings(INI_FILE, QtCore.QSettings.IniFormat)\n\tSERVER_PORT = int(settings.value(\"server-port\", SERVER_PORT).toString())\n\tCLIENT_ADDR = settings.value(\"client-addr\", CLIENT_ADDR).toString()\n\tCLIENT_PORT = int(settings.value(\"client-port\", CLIENT_PORT).toString())","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"269087948","text":"memory = []\r\nmask = ''\r\n\r\ndef find(a, m):\r\n # address and mask \r\n a = bin(int(a))\r\n a = a.replace('b', '0' * (37 - len(a)))\r\n lm = list(m)\r\n la = list(a)\r\n for i in range(len(lm)):\r\n if lm[i] != '0':\r\n la[i] = lm[i]\r\n \r\n # number of binary numbers\r\n a = ''.join(la)\r\n bn = a.count('X')\r\n\r\n mx = ''\r\n for x in range(bn):\r\n mx += '1'\r\n \r\n mx = int(mx, 2)\r\n\r\n # all bi possibilities\r\n ab = []\r\n for i in range(mx + 1):\r\n bi = bin(int(i))[2:].zfill(bn)\r\n bi = list(bi)\r\n ab.append(bi)\r\n \r\n # all addresses\r\n aa = []\r\n for q in ab:\r\n ind = 0\r\n dup = list(a)\r\n a = list(a)\r\n for x in range(len(a)):\r\n if a[x] == 'X':\r\n dup[x] = q[ind]\r\n ind += 1\r\n aa.append(int((''.join(dup)), 2))\r\n return aa\r\n\r\nfor line in open(\"inputs/day14.txt\"):\r\n line = line.rstrip('\\n')\r\n key, val = line.split(' = ')\r\n if key == 'mask':\r\n mask = val\r\n print(mask)\r\n else:\r\n mem = key[4 : -1]\r\n add = find(mem, mask)\r\n dup = False\r\n loop = 0\r\n for x in memory:\r\n if x[0] in add:\r\n index = add.index(x[0])\r\n del add[index]\r\n memory[loop][1] = val\r\n loop += 1\r\n for x in add:\r\n memory.append([x, val])\r\n\r\nsums = 0\r\nfor x in memory:\r\n sums += int(x[1])\r\n\r\nprint(sums)\r\n\r\n# memory = []\r\n# mask = ''\r\n\r\n# for line in open(\"inputs/day14.txt\"):\r\n# line = line.rstrip('\\n')\r\n# key, val = line.split(' = ')\r\n# if key == 'mask':\r\n# mask = val\r\n# else:\r\n# val = bin(int(val))\r\n# val = val.replace('b', '0' * (37 - len(val)))\r\n# lm = list(mask)\r\n# lv = list(val)\r\n# for i in range(len(lm)):\r\n# if lm[i] != 'X':\r\n# lv[i] = lm[i]\r\n# mask = ''.join(lm)\r\n# val = ''.join(lv)\r\n# mem = key[4 : -1]\r\n# dup = False\r\n# loop = 0\r\n# for x in memory:\r\n# if x[0] == mem:\r\n# dup = True\r\n# memory[loop][1] = val\r\n# loop += 1\r\n# if dup == False:\r\n# memory.append([mem, val])\r\n\r\n# sums = 0\r\n# for x in memory:\r\n# sums += int(x[1], 2)\r\n \r\n# print(sums)","sub_path":"2020/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"344515906","text":"#!/usr/bin/env python\n\nimport zmarkdown\nfrom zmarkdown.util import etree\nfrom zmarkdown.blockprocessors import BlockProcessor\nimport re\n\n\nclass VideoExtension(zmarkdown.Extension):\n def __init__(self, js_support=False, **kwargs):\n zmarkdown.Extension.__init__(self)\n\n self.config = {\n 'dailymotion_width': ['480', 'Width for Dailymotion videos'],\n 'dailymotion_height': ['270', 'Height for Dailymotion videos'],\n 'vimeo_width': ['500', 'Width for Vimeo videos'],\n 'vimeo_height': ['281', 'Height for Vimeo videos'],\n 'yahoo_width': ['624', 'Width for Yahoo! videos'],\n 'yahoo_height': ['351', 'Height for Yahoo! videos'],\n 'youtube_width': ['560', 'Width for Youtube videos'],\n 'youtube_height': ['315', 'Height for Youtube videos'],\n 'ina_width': ['620', 'Width for INA videos'],\n 'ina_height': ['349', 'Height for INA videos'],\n 'jsfiddle': [False, ''],\n 'jsfiddle_width': ['560', 'Width for jsfiddle'],\n 'jsfiddle_height': ['560', 'Height for jsfiddle'],\n }\n\n self.config['youtube_short_width'] = self.config['youtube_width']\n self.config['youtube_short_height'] = self.config['youtube_height']\n\n # Override defaults with user settings\n for key, value in kwargs.items():\n self.setConfig(key, value)\n\n if js_support:\n self.setConfig(\"jsfiddle\", True)\n\n def add_inline(self, md, name, klass, pat):\n RE = r'(^|\\n)!\\(' + pat + r'\\)'\n md.parser.blockprocessors.add(\"video-\" + name,\n klass(md, RE,\n self.config[\"{}_width\".format(name)][0],\n self.config[\"{}_height\".format(name)][0]),\n \">reference\")\n\n def extendZMarkdown(self, md, md_globals):\n self.add_inline(md, 'dailymotion', Dailymotion,\n r'https?://www\\.dailymotion\\.com/video/(?P[a-z0-9]+)(_[\\w\\-]*)?')\n self.add_inline(md, 'vimeo', Vimeo,\n r'https?://(www.|)vimeo\\.com/(?P\\d+)\\S*')\n self.add_inline(md, 'yahoo', Yahoo,\n r'https?://screen\\.yahoo\\.com/.+/?')\n self.add_inline(md, 'youtube', Youtube,\n r'https?://(www\\.)?youtube\\.com/watch\\?\\S*v=(?P\\S[^&/]+)'\n r'(?P&ab_channel=[\\w%]+)?')\n self.add_inline(md, 'youtube_short', Youtube,\n r'https?://youtu\\.be/(?P\\S[^?&/]+)?')\n self.add_inline(md, 'ina', Ina,\n r'https?://www\\.ina\\.fr/video/(?P[A-Z0-9]+)/([\\w\\-]*)\\.html')\n if self.config[\"jsfiddle\"][0]:\n self.add_inline(md, 'jsfiddle', JsFiddle,\n r'https?://(www.|)jsfiddle\\.net(/(?P\\w+))?/'\n r'(?P\\w+)(/(?P[0-9]+)|)/?')\n\n\nclass VideoBProcessor(BlockProcessor):\n def __init__(self, md, patt, width, height):\n BlockProcessor.__init__(self, md.parser)\n self.md = md\n self.width = width\n self.height = height\n self.RE = re.compile(patt)\n\n def test(self, parent, block):\n return bool(self.RE.search(block))\n\n def run(self, parent, blocks):\n m = self.RE.search(blocks[0])\n\n el = self.handle_match(m)\n if el is None:\n return False\n\n block = blocks.pop(0)\n before = block[:m.start()]\n after = block[m.end():]\n\n if before: # pragma: no cover\n # This should never occur because regex require that the expression is starting the block.\n # Do not raise an exception because exception should never be generated.\n self.md.parser.parseBlocks(parent, [before])\n\n parent.append(el)\n\n if after:\n blocks.insert(0, after)\n\n @staticmethod\n def extract_url(_): # pragma: no cover\n # Should be overridden in sub-class\n return \"\"\n\n def handle_match(self, m):\n url = self.extract_url(m)\n if url is None:\n return None\n return self.render_iframe(url, self.width, self.height)\n\n @staticmethod\n def render_iframe(url, width, height):\n iframe = etree.Element('iframe')\n iframe.set('width', width)\n iframe.set('height', height)\n iframe.set('src', url)\n iframe.set('allowfullscreen', 'true')\n iframe.set('frameborder', '0')\n return iframe\n\n\nclass Dailymotion(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return 'https://www.dailymotion.com/embed/video/%s' % m.group('dailymotionid')\n\n\nclass Vimeo(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return 'https://player.vimeo.com/video/%s' % m.group('vimeoid')\n\n\nclass Yahoo(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return m.string + '?format=embed&player_autoplay=false'\n\n\nclass Youtube(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return 'https://www.youtube.com/embed/%s' % m.group('youtubeid')\n\n\nclass Ina(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n return 'http://player.ina.fr/player/embed/%s/1/1b0bd203fbcd702f9bc9b10ac3d0fc21/560/315/1/148db8' % m.group(\n 'inaid')\n\n\nclass JsFiddle(VideoBProcessor):\n @staticmethod\n def extract_url(m):\n fields = (m.group('jsfiddleuser'), m.group('jsfiddleid'), m.group('jsfiddlerev'))\n if fields[0] is not None and fields[2] is None:\n # Only two part, revision could be in id pattern\n try:\n int(fields[1])\n # It is a revision !\n fields = (None, fields[0], fields[1])\n except ValueError:\n pass\n if fields[0] is not None and fields[1] is not None and fields[2] is None:\n # Base version link, should not be allowed because content can be changed externally\n return None\n base = \"https://jsfiddle.net/{}/embedded/result,js,html,css/\"\n return base.format(\"/\".join([t for t in fields if t is not None]))\n\n\ndef makeExtension(*args, **kwargs):\n return VideoExtension(*args, **kwargs)\n","sub_path":"zmarkdown/extensions/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"539100876","text":"import vtk\nimport numpy as np\nimport pydicom as dicom\nimport platform\nimport os\nimport time\nimport vtk.util.numpy_support as vtknp\nimport glob\nimport re\n\n\nminValGr = 0.0 # Skalierung der Grauwerte\nmaxValGr = 255.0\ndiffValGr = maxValGr - minValGr\n\ncountList = []\ncount = -1\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef close_window(iren):\n render_window = iren.GetRenderWindow()\n render_window.Finalize()\n iren.TerminateApp()\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef showAxes():\n axes = vtk.vtkAxesActor()\n widget = vtk.vtkOrientationMarkerWidget()\n widget.SetOrientationMarker(axes)\n widget.SetInteractor(iren)\n widget.SetEnabled(1)\n widget.InteractiveOn()\n\n return(axes, widget)\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef timer_callback(obj, event):\n global updateVectors, numTimeSteps, numSlices, count, minDiffAtPos\n\n count = (count + 1) % numTimeSteps\n\n for actImage in range(numSlices):\n images[actImage].GetPointData().SetScalars(listOfVTKDataLists[actImage][updateVectors[actImage][count]])\n\n iren.Render()\n\n actMesh[...] = displacements[timeVectors[minDiffAtPos][updateVectors[minDiffAtPos][count]]]\n polydata.Modified()\n\n iren.GetRenderWindow().Render()\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef calcUpdateVector(timeVectors, pos):\n numSlices = len(timeVectors)\n numTimeSteps = len(timeVectors[pos])\n\n updateVectors = np.ndarray((numSlices, numTimeSteps), int)\n\n for i in range(numSlices):\n lowerIndex = 0\n for j in range(numTimeSteps):\n needVal = True\n if i == pos:\n updateVectors[i][j] = j\n continue\n else:\n actTime = timeVectors[pos][j]\n upperIndex = len(timeVectors[i])\n\n lastDiff = float('Inf')\n\n for k in range(lowerIndex, upperIndex):\n actDiff = int(abs(actTime - timeVectors[i][k]))\n\n if actDiff == 0:\n updateVectors[i][j] = k\n needVal = False\n break\n\n if actDiff < lastDiff:\n lastDiff = actDiff\n else:\n updateVectors[i][j] = k - 1\n needVal = False\n break\n\n lowerIndex = k\n\n if needVal:\n updateVectors[i][j] = k\n\n return updateVectors\n\n\n\ndef DTWcalcUpdateVector(timeVectors, pos):\n numSlices = len(timeVectors)\n numTimeSteps = len(timeVectors[pos])\n\n updateVectors = np.ndarray((numSlices, numTimeSteps), int)\n\n for i in range(numSlices):\n lowerIndex = 0\n for j in range(numTimeSteps):\n needVal = True\n if i == pos:\n updateVectors[i][j] = j\n continue\n else:\n actTime = timeVectors[pos][j]\n upperIndex = len(timeVectors[i])\n\n lastDiff = float('Inf')\n\n for k in range(lowerIndex, upperIndex):\n actDiff = int(abs(actTime - timeVectors[i][k]))\n\n if actDiff == 0:\n updateVectors[i][j] = k\n needVal = False\n break\n\n if actDiff < lastDiff:\n lastDiff = actDiff\n else:\n updateVectors[i][j] = k - 1\n needVal = False\n break\n\n lowerIndex = k\n\n if needVal:\n updateVectors[i][j] = k\n\n return updateVectors\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef readFilesToDicomArray(path, listOfSeries):\n listOfDicomArrays = []\n listOfPixelDims = []\n listOfPixelSpacings = []\n listOfPlaneShapes = []\n listOfMaxCounts = []\n listOfMatrices = []\n\n\n dictFilesDCM = {}\n\n for series in listOfSeries: # für jeden Ordner\n for dirName, subdirList, fileList in os.walk(path + series):\n for filename in fileList:\n if \".dcm\" in filename.lower():\n actDs = dicom.read_file(os.path.join(dirName, filename))\n pos = str(actDs.ImagePositionPatient + actDs.ImageOrientationPatient)\n\n if (pos not in dictFilesDCM):\n dictFilesDCM[pos] = {}\n dictFilesDCM[pos][actDs.InstanceNumber] = os.path.join(dirName, filename)\n\n minDiffAtPos = -1\n minDiff = float('Inf')\n\n timeVectors = []\n\n\n for actPos, actDict in dictFilesDCM.items(): # für jede Slice\n sortEntries = sorted(actDict)\n\n actTimeVector = []\n timeVectors.append(actTimeVector)\n\n first = True\n actIndex = 0\n\n for actFile in sortEntries: # für jedes einzelne Bild\n\n actDicom = dicom.read_file(actDict[actFile])\n\n if first: # organisiere Metadaten + ArrayDicom anlegen\n first = False\n\n winCen = actDicom.WindowCenter\n winWidth = actDicom.WindowWidth\n resIntercept = actDicom.RescaleIntercept\n resSlope = actDicom.RescaleSlope\n\n ConstPixelDims = (len(sortEntries),\n int(actDicom.Rows),\n int(actDicom.Columns))\n\n planeShape = (int(actDicom.Rows), int(actDicom.Columns), 1)\n\n ConstPixelSpacing = (float(actDicom.PixelSpacing[0]),\n float(actDicom.PixelSpacing[1]),\n float(actDicom.SliceThickness))\n\n position = actDicom.ImagePositionPatient\n orientation = actDicom.ImageOrientationPatient\n\n xdir = orientation[0:3]\n ydir = orientation[3:6]\n zdir = [0.0, 0.0, 0.0]\n\n vtk.vtkMath.Cross(xdir, ydir, zdir)\n\n matrix = vtk.vtkMatrix4x4()\n\n for i in range(3):\n matrix.SetElement(i, 0, xdir[i])\n matrix.SetElement(i, 1, ydir[i])\n matrix.SetElement(i, 2, zdir[i])\n matrix.SetElement(i, 3, position[i])\n\n ArrayDicom = np.zeros(ConstPixelDims, dtype = float)\n\n actTimeVector.append(int(actDicom.TriggerTime))\n\n ArrayDicom[actIndex, :, :] = actDicom.pixel_array\n actIndex += 1\n\n np.clip(resSlope * diffValGr / (winWidth - 1) * ArrayDicom + ((resIntercept - winCen) / (winWidth - 1) + 0.5) * diffValGr + minValGr,\n minValGr, maxValGr, out = ArrayDicom)\n\n listOfMaxCounts.append(len(sortEntries))\n listOfDicomArrays.append(ArrayDicom)\n listOfPixelDims.append(ConstPixelDims)\n listOfPixelSpacings.append(ConstPixelSpacing)\n listOfPlaneShapes.append(planeShape)\n listOfMatrices.append(matrix)\n\n\n for i in range(len(timeVectors)):\n actTimeVector = timeVectors[i]\n factor = 800.0 / actTimeVector[-1]\n\n for j in range(len(actTimeVector)):\n actTimeVector[j] = int(factor * actTimeVector[j])\n\n if len(actTimeVector) > 0:\n tempDiff = actTimeVector[1] - actTimeVector[0]\n if tempDiff < minDiff:\n minDiff = tempDiff\n minDiffAtPos = i\n\n return (listOfDicomArrays, listOfPixelDims, listOfPixelSpacings,\n listOfPlaneShapes, listOfMaxCounts, listOfMatrices, minDiffAtPos,\n timeVectors)\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef getAllVTKDataLists(listOfDicomArrays):\n\n resultList = []\n\n for ArrayDicom in listOfDicomArrays:\n VTK_dataList = []\n\n for actImage in range(len(ArrayDicom)):\n VTK_dataList.append(vtknp.numpy_to_vtk(ArrayDicom[actImage].ravel(),deep=True, array_type=vtk.VTK_FLOAT))\n\n resultList.append(VTK_dataList)\n\n return resultList\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef readDynpt():\n f = open(pathIn + \"simulation/x.dynpt\", 'rb')\n header = dict(re.findall(r\"(\\w*):(\\w*)\", f.read(1024).decode('utf-8')))\n\n shapeTest = [int(header['t']), int(header['x']), 3]\n\n data = np.fromfile(f, dtype=np.float32)\n\n if header['unites_x'] == \"um\":\n data /= 1000\n\n return(header, data.reshape(shapeTest))\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef getModelPosition():\n minZPos = float(\"inf\")\n\n for dirName, subdirList, fileList in os.walk(pathIn + \"segmentation\"):\n for filename in fileList:\n if \".dcm\" in filename.lower():\n actDs = dicom.read_file(os.path.join(dirName, filename))\n actZPos = actDs.ImagePositionPatient[2]\n\n if actZPos < minZPos:\n minZPos = actZPos\n\n return [actDs.ImagePositionPatient[0], actDs.ImagePositionPatient[1], minZPos]\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\nif platform.platform()[0] == \"W\":\n print(\"OS: win\")\n pathIn = \"c:/users/vch/desktop/Bredies/CASE01/\"\n pathIn = \"c:/users/vch/Desktop/ILHgit/\"\n\nelse:\n print(\"OS: not win\")\n pathIn = \"/home/horakv/Desktop/Bredies/CASE01/\"\n pathIn = \"/home/horakv/Desktop/ILHgit/\"\n\n\nseriesList = []\n\nseriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9111835162.607/\") # 40\nseriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9111848390.608/\") # 40\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9111901895.609/\") # 25*16\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9112235900.610/\") # 40\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9112254187.611/\") # 40\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9112308236.612/\") # 25\nseriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9114136191.628/\") # 40\n#seriesList.append(\"cine/Visit_1___MRI_Data_and_Images_14d/Smart-1.3.6.1.4.1.16787.100.1.2.20160613.9114329783.631/\") # 40\n\n#t0 = time.time()\n\n\n\n\n\npathResult = \"/home/horakv/Desktop/results/withCushion\"\n\n\n\nDTWmatrix = np.load(\"{}matrix15.npy\".format(pathResult))\n\nn, m = DTWmatrix.shape\n\nprint(DTWmatrix.shape, n, m)\n\nDTW = np.full((n+1, m+1), np.inf)\n\n\n#s: array [1..n], t: array [1..m]) {\n#DTW := array [0..n, 0..m]\n\n\nDTW[0][0] = 0\n\nfor i in range(1, n):\n for j in range(1, m):\n DTW[i][j] = DTWmatrix[i][j] + min(DTW[i-1][j], DTW[i][j-1], DTW[i-1][j-1])\n\nb, a = n - 1, m - 1\nDTWpath = []\n\nwhile (b, a) != (0, 0):\n DTWpath.append((a, b))\n b, a = min((b - 1, a), (b, a - 1), (b - 1, a - 1), key=lambda x: DTW[x[0], x[1]])\n\nDTWpath.append((0, 0))\n\n\nDTWpath.sort()\n\n\n\nmin((i, DTWpath[i]) for i in range(len(DTWpath)) )\n\n#cost, path = DTW(matrix)\nprint(DTWpath)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n(listOfDicomArrays, listOfPixelDims, listOfPixelSpacings,\n listOfPlaneShapes, listOfMaxCounts, listOfMatrices, minDiffAtPos,\n timeVectors) = readFilesToDicomArray(pathIn, seriesList)\n\n#t1 = time.time()\n\n#print(\"Zeit:\", t1-t0)\n\n#updateVectors = calcUpdateVector(timeVectors, minDiffAtPos)\nupdateVectors = DTWcalcUpdateVector(timeVectors, minDiffAtPos)\n\nnumSlices = len(timeVectors)\nnumTimeSteps = len(timeVectors[minDiffAtPos])\n\nnumImages = len(listOfDicomArrays)\n\n###############################\n# place for data manipulation #\n###############################\n\n\nlistOfVTKDataLists = getAllVTKDataLists(listOfDicomArrays)\n\n#########################################################\n\nren = vtk.vtkRenderer()\nren.SetBackground(0.8, 0.8, 0.8)\n\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetSize(1000, 1000)\n\nrenWin.AddRenderer(ren)\n\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\n#############################\n# =============================================================================\n# axes = vtk.vtkAxesActor()\n# widget = vtk.vtkOrientationMarkerWidget()\n# widget.SetOrientationMarker(axes)\n# widget.SetInteractor(iren)\n# widget.SetEnabled( 1 )\n# widget.InteractiveOn()\n# =============================================================================\n#############################\n\n\nlookupTable = vtk.vtkLookupTable()\nlookupTable.SetNumberOfTableValues(256)\nlookupTable.SetRange(0.0, 255.0)\nfor j in range(256):\n lookupTable.SetTableValue(j, j/255.0, j/255.0, j/255.0, min(j/25.5, 1.0))\nlookupTable.Build()\n\nimages = []\n\nfor actImage in range(numImages):\n image = vtk.vtkImageData()\n image.SetDimensions(listOfPlaneShapes[actImage])\n image.SetSpacing(listOfPixelSpacings[actImage][0], listOfPixelSpacings[actImage][1], 0.0)\n\n image.AllocateScalars(vtk.VTK_FLOAT, 1)\n image.GetPointData().SetScalars(listOfVTKDataLists[actImage][0])\n\n images.append(image)\n\n mapTransparency = vtk.vtkImageMapToColors()\n mapTransparency.SetLookupTable(lookupTable)\n mapTransparency.PassAlphaToOutputOn()\n mapTransparency.SetInputData(image)\n\n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputConnection(mapTransparency.GetOutputPort())\n mapper.SetColorModeToDirectScalars()\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetInterpolationToFlat()\n actor.GetProperty().ShadingOff()\n actor.GetProperty().LightingOff()\n actor.SetUserMatrix(listOfMatrices[actImage])\n\n ren.AddActor(actor)\n\n\n\n###############################################################################\n# Struktur des Modells einlesen\n###############################################################################\n\nfilenames = glob.glob(pathIn + 'mesh/*.vtk')\n\nreader = vtk.vtkUnstructuredGridReader()\nreader.SetFileName(filenames[0])\n\ngeometryFilter = vtk.vtkGeometryFilter()\ngeometryFilter.SetInputConnection(reader.GetOutputPort())\ngeometryFilter.Update()\n\npolydata = geometryFilter.GetOutput()\nscalarRange = polydata.GetScalarRange()\n\nactMesh = vtknp.vtk_to_numpy(polydata.GetPoints().GetData())\nactMesh /= 1000 # Daten sind in um statt in mm gegeben -> Korrektur\n\n\n###############################################################################\n# Verschiebungen vorbereiten und Visualisierung\n###############################################################################\n\n(header, displacements) = readDynpt()\n\nmaxCount = len(displacements)\n\nmMapper = vtk.vtkPolyDataMapper()\nmMapper.SetInputData(polydata)\nmMapper.SetScalarRange(scalarRange)\n\n\n\nmActor = vtk.vtkActor()\nmActor.SetMapper(mMapper)\nmActor.SetPosition(getModelPosition())\n\nmActor.GetProperty().SetOpacity(0.2)\n\nif scalarRange == (0.0, 1.0):\n mMapper.ScalarVisibilityOff()\n mActor.GetProperty().SetColor(1,0,0)\n\nren.AddActor(mActor)\n\n###############################################################################\n\niren.Initialize()\niren.AddObserver('TimerEvent', timer_callback)\niren.CreateRepeatingTimer(10)\n\n\nrenWin.Render()\nprint(\"Start\")\niren.Start()\n\n\nif platform.platform()[0] != \"W\":\n close_window(iren)\n del renWin, iren\n","sub_path":"DTWdicomModel2position.py","file_name":"DTWdicomModel2position.py","file_ext":"py","file_size_in_byte":15408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"180054473","text":"import os\n\ntry:\n import _thread\nexcept ImportError:\n import _dummy_thread as _thread\n\nfrom Costants import PORT\nfrom ServerLaunchThread import ServerLaunchThread\n\nos.chdir('..')\nos.chdir('..')\nos.chdir('..')\n\nprint(os.getcwd())\n\nos.chdir('Binaries')\nos.chdir('Win32')\n\nNUM_SERVER = 10\n\nMAX_GAMESPEED = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nthreads = []\n\nfor i in range(NUM_SERVER):\n threads.append(ServerLaunchThread(i, \"Thread-\" + str(i), MAX_GAMESPEED[i], PORT[i], 2400))\n\nfor thread in threads:\n thread.start()","sub_path":"client/testing/TestSpeedServerLaunch.py","file_name":"TestSpeedServerLaunch.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"20026214","text":"try:\n import RPi.GPIO as GPIO\nexcept(ImportError):\n print(\"Failed to import RPI. Aborting.\")\n\nimport time\n\nclass IO:\n def __init__(self):\n GPIO.setmode(GPIO.BCM)\n self.OutputList = [[\"A\", 26],\n [\"B\", 24],\n [\"C\", 22],\n [\"D\", 23],\n [\"E\", 21],\n [\"F\", 19],\n [\"G\", 16],\n [\"H\", 18]]\n self.InputList = [[]]\n self.Initial = True\n if self.Initial == True:\n self.SetupIO()\n\n def SetupIO(self):\n for count in range(0, len(self.OutputList)):\n GPIO.setup(self.OutputList[count][1], GPIO.OUT, initial=GPIO.LOW)\n time.sleep(0.5)\n\n def InitialTest(self):\n print(\"Beginning initial output test...\")\n for count in range(0, len(self.OutputList)):\n for count in range(0,2):\n print(\"Output: {}\".format(self.OutputList[count][0]))\n GPIO.output(self.OutputList[count][1], GPIO.HIGH)\n time.sleep(0.25)\n GPIO.output(self.OutputList[count][1], GPIO.LOW)\n print(\"Complete.\")\n\n def ManualControl(self):\n print(\"\"\"\"\"\")\n\n\n\nIO = IO()\nIO.InitialTest()\n\n\n","sub_path":"AES_Master/AES 4.3/IOTestingUtility.py","file_name":"IOTestingUtility.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"619908299","text":"from src.consts import *\nfrom pygame.image import load\n\n\nclass ConverterBase:\n \"\"\"画像変換基底クラス\"\"\"\n _instance = None\n\n def __new__(cls, converter=None):\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n\n return cls._instance\n\n def __init__(self, converter):\n self.converter = converter\n\n def get_converter(self):\n return self.converter\n\n\nclass MapImage(ConverterBase):\n \"\"\"マップ画像変換クラス\"\"\"\n\n def __init__(self):\n converter = {SEA: load(PATH_SEA).convert(),\n SAND: load(PATH_SAND).convert(),\n GLASS: load(PATH_GLASS).convert(),\n FOREST: load(PATH_FOREST).convert(),\n MOUNTAIN: load(PATH_MOUNTAIN).convert(),\n RIVER: load(PATH_RIVER).convert()}\n super().__init__(converter)\n\n\nclass PlayerImage(ConverterBase):\n \"\"\"プレーヤー画像変換クラス\"\"\"\n\n def __init__(self):\n converter = {DIRECTION_UP: load(PATH_IMAGE_PLAYER_UP).convert_alpha(),\n DIRECTION_RIGHT: load(PATH_IMAGE_PLAYER_RIGHT).convert_alpha(),\n DIRECTION_DOWN: load(PATH_IMAGE_PLAYER_DOWN).convert_alpha(),\n DIRECTION_LEFT: load(PATH_IMAGE_PLAYER_LEFT).convert_alpha()}\n super().__init__(converter)\n\n\nclass NationImage(ConverterBase):\n \"\"\"国家画像変換クラス\"\"\"\n\n def __init__(self):\n converter = {NATION_LEVEL_VILLAGE: load(PATH_IMAGE_VILLAGE).convert_alpha(),\n NATION_LEVEL_TOWN: load(PATH_IMAGE_TOWN).convert_alpha(),\n NATION_LEVEL_CASTLE_TOWN: load(PATH_IMAGE_CASTLE_TOWN).convert_alpha(),\n NATION_LEVEL_CASTLE: load(PATH_IMAGE_CASTLE).convert_alpha()}\n super().__init__(converter)\n","sub_path":"src/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"414403145","text":"import csv\nimport re\n\n\ndef read():\n with open('aa.csv', 'r', encoding='utf8') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n title,link,*rest=row\n host, href = link.rsplit('/', 1)\n chapter_num = re.search('第(\\d+)話', title).groups()[0]\n print(f'move {href} {chapter_num:0>6}')\n print(', '.join(row))\n\ndef write():\n # writer\n import csv\n with open('csv/some.csv', 'w') as csvfile:\n header_columns = ['word', 'count', 'timestamp']\n writer = csv.DictWriter(csvfile, fieldnames=header_columns)\n writer.writeheader()\n writer. writerow({\n \"count\": count,\n \"word\": word,\n \"timestamp\": timestamp\n })\n","sub_path":"python_01/csv_operate.py","file_name":"csv_operate.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"295760514","text":"import re\nimport time\nimport threading\nimport calendar\nfrom datetime import datetime, date, timedelta\nimport math\nimport collections\nimport common_definition as CMN_DEF\nfrom libs.common.common_variable import GlobalVar as GV\nimport common_function as CMN_FUNC\n\nMonthTuple = collections.namedtuple('MonthTuple', ('year', 'month'))\nQuarterTuple = collections.namedtuple('QuarterTuple', ('year', 'quarter'))\nTimeDurationTuple = collections.namedtuple('TimeDurationTuple', ('time_duration_start', 'time_duration_end'))\nScrapyClassTimeDurationTuple = collections.namedtuple('ScrapyClassTimeDurationTuple', ('scrapy_class_index', 'time_duration_type', 'time_duration_start', 'time_duration_end'))\nScrapyClassCompanyTimeDurationTuple = collections.namedtuple('ScrapyClassCompanyTimeDurationTuple', ('scrapy_class_index', 'company_code_number', 'time_duration_type', 'time_duration_start', 'time_duration_end'))\n\nsingleton_thread_lock = threading.Lock()\n\n\nclass Singleton:\n \"\"\"\n A non-thread-safe helper class to ease implementing singletons.\n This should be used as a decorator -- not a metaclass -- to the class that should be a singleton.\n\n The decorated class can define one `__init__` function that takes only the `self` argument. Other than that, there are\n no restrictions that apply to the decorated class.\n\n To get the singleton instance, use the `Instance` method. Trying to use `__call__` will result in a `TypeError` being raised.\n\n Limitations: The decorated class cannot be inherited from.\n\n \"\"\"\n\n def __init__(self, decorated):\n self._decorated = decorated\n\n\n def Instance(self, cfg=None):\n \"\"\"\n Returns the singleton instance. Upon its first call, it creates a\n new instance of the decorated class and calls its `__init__` method.\n On all subsequent calls, the already created instance is returned.\n\n \"\"\"\n try:\n return self._instance\n except AttributeError:\n with singleton_thread_lock:\n try:\n return self._instance\n except AttributeError:\n # import pdb; pdb.set_trace()\n self._instance = self._decorated() # Call __init__() of the class\n if hasattr(self._instance, \"initialize\"):\n if cfg is None:\n self._instance.initialize()\n else:\n self._instance.initialize(**cfg)\n return self._instance\n\n\n def __call__(self):\n raise TypeError('Singletons must be accessed through Instance()')\n\n\n def __instancecheck__(self, inst):\n return isinstance(inst, self._decorated)\n\n#############################################################################################\n\nclass FinanceTimeBase(object):\n\n def __init__(self):\n self.year = None\n self.republic_era_year = None\n pass\n\n\n def to_string(self):\n raise NotImplementedError\n\n\n def get_value(self):\n raise NotImplementedError\n\n\n def get_value_tuple(self):\n raise NotImplementedError\n\n\n def check_continous_time_duration(self, another_time_duration):\n raise NotImplementedError\n\n\n def get_year(self):\n assert (self.year is not None), \"year value should NOT be None\"\n return self.year\n\n\n def get_republic_era_year(self):\n assert (self.republic_era_year is not None), \"republic_era_year value should NOT be None\"\n return self.republic_era_year\n\n\n def setup_year_value(self, year_value):\n if CMN_FUNC.is_republic_era_year(year_value):\n self.republic_era_year = int(year_value)\n self.year = self.republic_era_year + CMN_DEF.REPUBLIC_ERA_YEAR_OFFSET\n else:\n self.year = int(year_value)\n self.republic_era_year = self.year - CMN_DEF.REPUBLIC_ERA_YEAR_OFFSET\n\n\n def check_continous_time_duration(self, another_time_duration):\n return CMN_FUNC.is_continous_time_duration(self, another_time_duration)\n\n\n @staticmethod\n def get_time_unit_type():\n # \"\"\"IMPORTANT: This is a static method, override it with @staticmethod !\"\"\"\n raise NotImplementedError\n\n\n @classmethod\n def from_string(cls, time_string):\n # \"\"\"IMPORTANT: This is a class method, override it with @classmethod !\"\"\"\n raise NotImplementedError\n\n\n @staticmethod\n def from_time_string(time_string, time_unit=None):\n time_obj = None\n # import pdb; pdb.set_trace()\n if time_unit is None:\n# Detect time unit from the time string format\n if CMN_FUNC.is_date_str_format(time_string):\n time_obj = FinanceDate.from_string(time_string)\n elif CMN_FUNC.is_week_str_format(time_string):\n time_obj = FinanceWeek.from_string(time_string)\n elif CMN_FUNC.is_month_str_format(time_string):\n time_obj = FinanceMonth.from_string(time_string)\n elif CMN_FUNC.is_quarter_str_format(time_string):\n time_obj = FinanceQuarter.from_string(time_string)\n elif CMN_FUNC.is_year_str_format(time_string):\n time_obj = FinanceYear.from_string(time_string)\n else:\n raise ValueError(\"Unknown time format: %s\" % time_string)\n else:\n if time_unit == CMN_DEF.DATA_TIME_UNIT_DAY:\n time_obj = FinanceDate(time_string)\n elif time_unit == CMN_DEF.DATA_TIME_UNIT_WEEK:\n time_obj = FinanceWeek(time_string)\n elif time_unit == CMN_DEF.DATA_TIME_UNIT_MONTH:\n time_obj = FinanceMonth(time_string)\n elif time_unit == CMN_DEF.DATA_TIME_UNIT_QUARTER:\n time_obj = FinanceQuarter(time_string)\n elif time_unit == CMN_DEF.DATA_TIME_UNIT_YEAR:\n time_obj = FinanceYear(time_string)\n else:\n raise ValueError(\"Unsupport time unit[%d] for transform\" % time_unit)\n return time_obj\n\n\n # @staticmethod\n # def date_str_to_time_obj(date_string, time_unit):\n # CMN_FUNC.check_date_str_format(time_string)\n # time_obj = None\n # # import pdb; pdb.set_trace()\n # if time_unit == CMN_DEF.DATA_TIME_UNIT_WEEK:\n # time_obj = CMN.CLS.FinanceMonth(time_str)\n # elif time_unit == CMN_DEF.DATA_TIME_UNIT_MONTH:\n # time_obj = CMN.CLS.FinanceMonth(time_str)\n # elif time_unit == CMN_DEF.DATA_TIME_UNIT_QUARTER:\n # time_obj = CMN.CLS.FinanceQuarter(time_str)\n # elif time_unit == CMN_DEF.DATA_TIME_UNIT_YEAR:\n # time_obj = CMN.CLS.FinanceYear(time_str)\n # else:\n # raise ValueError(\"Unsupport time unit[%d] for transform\" % time_unit)\n # return time_obj\n\n\n\n def __str__(self):\n return self.to_string()\n\n\n def __lt__(self, other):\n return self.get_value() < other.get_value()\n\n\n def __le__(self, other):\n return self.get_value() <= other.get_value()\n\n\n def __eq__(self, other):\n return self.get_value() == other.get_value()\n\n\n def __ne__(self, other):\n return self.get_value() != other.get_value()\n\n\n def __gt__(self, other):\n return self.get_value() > other.get_value()\n\n\n def __ge__(self, other):\n return self.get_value() >= other.get_value()\n\n\nclass FinanceDate(FinanceTimeBase):\n\n today_finance_date = None\n last_finance_date = None\n def __init__(self, *args):\n super(FinanceDate, self).__init__()\n self.month = None # range: 1 - 12\n self.day = None # range: 1 - last date of month\n self.date_str = None\n self.datetime_cfg = None\n try:\n format_unsupport = False\n if len(args) == 1:\n time_cfg = None\n if isinstance(args[0], str):\n mobj = CMN_FUNC.check_date_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n # self.year = mobj.group(1)\n self.month = int(mobj.group(2))\n self.day = int(mobj.group(3))\n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceDate):\n self.setup_year_value(args[0].year)\n # self.year = args[0].year\n self.month = args[0].month\n self.day = args[0].day\n else:\n format_unsupport = True\n elif len(args) == 3:\n for index in range(3):\n if type(args[index]) is not int:\n format_unsupport = True\n self.setup_year_value(args[0])\n # self.year = args[0]\n self.month = args[1]\n self.day = args[2]\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceDate, due to: %s\" % str(e))\n# Check value range\n FinanceDate.check_value_range(self.year, self.month, self.day)\n\n\n @staticmethod\n def check_value_range(year, month, day):\n# Check Year Range\n CMN_FUNC.check_year_range(year)\n# Check Month Range\n CMN_FUNC.check_month_range(month)\n# Check Day Range\n CMN_FUNC.check_day_range(day, year, month)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_DAY\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n @classmethod\n def get_today_finance_date(cls):\n if cls.today_finance_date is None:\n cls.today_finance_date = FinanceDate(datetime.today())\n return cls.today_finance_date\n\n\n @classmethod\n def get_last_finance_date(cls):\n if cls.last_finance_date is None:\n today_data_exist_hour = CMN_DEF.TODAY_DATA_EXIST_HOUR # if GV.IS_FINANCE_MARKET_MODE else CMN_DEF.TODAY_STOCK_DATA_EXIST_HOUR\n today_data_exist_minute = CMN_DEF.TODAY_DATA_EXIST_MINUTE # if GV.IS_FINANCE_MARKET_MODE else CMN_DEF.TODAY_STOCK_DATA_EXIST_HOUR\n cls.last_finance_date = CMN_FUNC.get_last_url_data_date(today_data_exist_hour, today_data_exist_minute) \n return cls.last_finance_date\n\n\n def __add__(self, day_delta):\n # if not isinstance(delta, timedelta):\n # raise TypeError('The type[%s] of the other variable is NOT timedelta' % type(delta))\n if not isinstance(day_delta, int):\n raise TypeError('The type[%s] of the day_delta argument is NOT int' % type(day_delta))\n return FinanceDate(self.to_datetime() + timedelta(days = day_delta))\n\n\n def __sub__(self, day_delta):\n # if not isinstance(delta, timedelta):\n # raise TypeError('The type[%s] of the other variable is NOT timedelta' % type(delta))\n if not isinstance(day_delta, int):\n raise TypeError('The type[%s] of the day_delta argument is NOT int' % type(day_delta))\n return FinanceDate(self.to_datetime() - timedelta(days = day_delta))\n\n\n def to_string(self):\n if self.date_str is None:\n self.date_str = CMN_FUNC.transform_date_str(self.year, self.month, self.day)\n return self.date_str\n\n\n def get_value(self):\n return (self.year << 12 | self.month << 8 | self.day)\n\n\n def get_value_tuple(self):\n return (self.year, self.month, self.day)\n\n\n def to_datetime(self):\n if self.datetime_cfg is None:\n self.datetime_cfg = datetime(self.year, self.month, self.day)\n return self.datetime_cfg\n\n\n @staticmethod\n def is_same_month(finance_date1, finance_date2):\n return (True if FinanceMonth(finance_date1.year, finance_date1.month) == FinanceMonth(finance_date2.year, finance_date2.month) else False)\n\n\nclass FinanceWeek(FinanceTimeBase):\n\n @classmethod\n def date_to_weekofyear(cls, year, month, day):\n year, weekofyear, weekday = date(year, month, day).isocalendar()\n return (year, weekofyear, weekday)\n\n\n @classmethod\n def weekofyear_to_date(cls, year, weekofyear, weekday=0):\n# The first day of the week '0': Sunday \n# The second day of the week '1': Monday\n# ...\n # import pdb; pdb.set_trace()\n week_str = CMN_FUNC.transform_week_str(year, weekofyear) + '-%d' % weekday\n date_obj = datetime.strptime(week_str, \"%Yw%W-%w\")\n return (date_obj.year, date_obj.month, date_obj.day)\n\n\n @classmethod\n def get_finance_week_from_date(cls, *week_args):\n \"\"\" Find the finance week due to the specific finance date\"\"\"\n \n year = None\n month = None\n day = None\n if isinstance(week_args[0], FinanceDate):\n pass\n elif isinstance(week_args[0], int) and len(week_args) == 3:\n pass\n else:\n raise ValueError(\"UnSupport input argument: %s\" % week_args)\n return cls(*week_args)\n\n\n def __init__(self, *args):\n super(FinanceWeek, self).__init__()\n self.year = None # range: 2000 - 2099\n self.weekofyear = None\n self.weekday = 0\n self.week_str = None\n # import pdb; pdb.set_trace()\n try:\n format_unsupport = False\n if len(args) == 1:\n time_cfg = None\n if isinstance(args[0], str):\n if CMN_FUNC.is_date_str_format(args[0]):\n mobj = CMN_FUNC.check_date_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n month = int(mobj.group(2))\n day = int(mobj.group(3))\n _, self.weekofyear, self.weekday = self.date_to_weekofyear(self.year, month, day)\n elif CMN_FUNC.is_week_str_format(args[0]):\n mobj = CMN_FUNC.check_week_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n self.weekofyear = int(mobj.group(2)) \n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceDate):\n self.setup_year_value(args[0].year)\n _, self.weekofyear, self.weekday = self.date_to_weekofyear(self.year, args[0].month, args[0].day)\n elif isinstance(args[0], FinanceWeek): \n self.year = args[0].year\n self.weekofyear = args[0].weekofyear\n self.weekday = args[0].weekday\n else:\n format_unsupport = True\n elif len(args) == 2:\n if isinstance(args[0], int):\n self.setup_year_value(args[0])\n self.weekofyear = args[1]\n else:\n format_unsupport = True\n elif len(args) == 3:\n if isinstance(args[0], int):\n self.setup_year_value(args[0])\n _, self.weekofyear, self.weekday = self.date_to_weekofyear(self.year, args[1], args[2])\n else:\n format_unsupport = True\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceYear, due to: %s\" % str(e))\n# Check value range\n CMN_FUNC.check_year_range(self.year)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_WEEK\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n def __add__(self, week_delta):\n if not isinstance(week_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(week_delta))\n year, month, day = self.weekofyear_to_date(self.year, self.weekofyear, self.weekday)\n new_datetime = datetime(year, month, day) + timedelta(days = week_delta * 7)\n return FinanceWeek(new_datetime.year, new_datetime.month, new_datetime.day)\n\n\n def __sub__(self, week_delta):\n if not isinstance(week_delta, int):\n raise TypeError('The type[%s] of the week_delta argument is NOT int' % type(week_delta))\n year, month, day = self.weekofyear_to_date(self.year, self.weekofyear, self.weekday)\n new_datetime = datetime(year, month, day) - timedelta(days = week_delta * 7)\n return FinanceWeek(new_datetime.year, new_datetime.month, new_datetime.day)\n\n\n def to_string(self):\n if self.week_str is None:\n self.week_str = CMN_FUNC.transform_week_str(self.year, self.weekofyear)\n return self.week_str\n\n\n def get_value(self):\n return (self.year << 3 | self.weekofyear)\n\n\n def get_value_tuple(self):\n return (self.year, self.weekofyear,)\n\n\nclass FinanceMonth(FinanceTimeBase):\n\n @classmethod\n def get_finance_month_from_date(cls, *date_args):\n \"\"\" Find the finance month due to the specific finance date\"\"\"\n \n finance_date = None\n if isinstance(date_args[0], FinanceDate):\n finance_date = date_args[0]\n else:\n finance_date = FinanceDate(*date_args)\n\n return cls(finance_date.year, finance_date.month)\n\n\n def __init__(self, *args):\n super(FinanceMonth, self).__init__()\n self.month = None # range: 1 - 12\n self.month_str = None\n try:\n format_unsupport = False\n if len(args) == 1:\n time_cfg = None\n if isinstance(args[0], str):\n mobj = CMN_FUNC.check_month_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n # self.year = mobj.group(1)\n self.month = int(mobj.group(2))\n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceMonth):\n self.setup_year_value(args[0].year)\n # self.year = args[0].year\n self.month = args[0].month\n else:\n format_unsupport = True\n elif len(args) == 2:\n for index in range(2):\n if type(args[index]) is not int:\n format_unsupport = True\n self.setup_year_value(args[0])\n # self.year = args[0]\n self.month = args[1]\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceMonth, due to: %s\" % str(e))\n# Check value range\n FinanceMonth.check_value_range(self.year, self.month)\n\n\n @staticmethod\n def check_value_range(year, month):\n# Check Year Range\n CMN_FUNC.check_year_range(year)\n# Check Month Range\n CMN_FUNC.check_month_range(month)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_MONTH\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n def __to_month_index(self):\n return self.year * 12 + self.month - 1\n\n\n def __from_month_index_to_value(self, month_index):\n # year = month_index / 12\n # month = month_index % 12 + 1\n return MonthTuple(month_index / 12, month_index % 12 + 1)\n\n\n def __add__(self, month_delta):\n if not isinstance(month_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(month_delta))\n\n new_month_index = self.__to_month_index() + month_delta\n new_month_tuple = self.__from_month_index_to_value(new_month_index)\n return FinanceMonth(new_month_tuple.year, new_month_tuple.month)\n\n\n def __sub__(self, month_delta):\n if not isinstance(month_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(month_delta))\n\n new_month_index = self.__to_month_index() - month_delta\n new_month_tuple = self.__from_month_index_to_value(new_month_index)\n return FinanceMonth(new_month_tuple.year, new_month_tuple.month)\n\n\n def to_string(self):\n if self.month_str is None:\n self.month_str = CMN_FUNC.transform_month_str(self.year, self.month)\n return self.month_str\n\n\n def get_value(self):\n return (self.year << 4 | self.month)\n\n\n def get_value_tuple(self):\n return (self.year, self.month)\n\n def get_last_date_of_month(self):\n return calendar.monthrange(self.year, self.month)[1]\n\n\nclass FinanceQuarter(FinanceTimeBase):\n\n ANNUAL_REPORT_MONTH = 3\n ANNUAL_REPORT_DAY = 31\n Q1_QUARTERLY_REPORT_MONTH = 5\n Q1_QUARTERLY_REPORT_DAY = 15\n Q2_QUARTERLY_REPORT_MONTH = 8\n Q2_QUARTERLY_REPORT_DAY = 14\n Q3_QUARTERLY_REPORT_MONTH = 11\n Q3_QUARTERLY_REPORT_DAY = 14\n\n @classmethod\n def __get_statement_release_date_list(cls, year):\n statement_release_date_list = [\n FinanceDate(year, cls.ANNUAL_REPORT_MONTH, cls.ANNUAL_REPORT_DAY),\n FinanceDate(year, cls.Q1_QUARTERLY_REPORT_MONTH, cls.Q1_QUARTERLY_REPORT_DAY),\n FinanceDate(year, cls.Q2_QUARTERLY_REPORT_MONTH, cls.Q2_QUARTERLY_REPORT_DAY),\n FinanceDate(year, cls.Q3_QUARTERLY_REPORT_MONTH, cls.Q3_QUARTERLY_REPORT_DAY), \n ]\n return statement_release_date_list\n\n\n @classmethod\n def get_start_finance_quarter_from_date(cls, *date_args):\n \"\"\" Find the nearest start finance qaurter due to the specific finance date\"\"\"\n finance_date = None\n if isinstance(date_args[0], FinanceDate):\n finance_date = date_args[0]\n else:\n finance_date = FinanceDate(*date_args)\n statement_release_date_list = cls.__get_statement_release_date_list(finance_date.year)\n finance_quarter = None\n if finance_date <= statement_release_date_list[0]:\n finance_quarter = FinanceQuarter(finance_date.year - 1, 4)\n elif statement_release_date_list[1] >= finance_date > statement_release_date_list[0]:\n finance_quarter = FinanceQuarter(finance_date.year, 1)\n elif statement_release_date_list[2] >= finance_date > statement_release_date_list[1]:\n finance_quarter = FinanceQuarter(finance_date.year, 2)\n elif statement_release_date_list[3] >= finance_date > statement_release_date_list[2]:\n finance_quarter = FinanceQuarter(finance_date.year, 3)\n elif finance_date >= statement_release_date_list[3]:\n finance_quarter = FinanceQuarter(finance_date.year, 4)\n else:\n raise ValueError(\"Fail to transform the finance date[%s] to quarter\" % finance_date)\n return finance_quarter\n\n\n @classmethod\n def get_end_finance_quarter_from_date(cls, *date_args):\n \"\"\" Find the nearest end finance qaurter due to the specific finance date\"\"\"\n finance_date = None\n if isinstance(date_args[0], FinanceDate):\n finance_date = date_args[0]\n else:\n finance_date = FinanceDate(*date_args)\n statement_release_date_list = cls.__get_statement_release_date_list(finance_date.year)\n finance_quarter = None\n if finance_date < statement_release_date_list[0]:\n finance_quarter = FinanceQuarter(finance_date.year - 1, 3)\n elif statement_release_date_list[1] > finance_date >= statement_release_date_list[0]:\n finance_quarter = FinanceQuarter(finance_date.year - 1, 4)\n elif statement_release_date_list[2] > finance_date >= statement_release_date_list[1]:\n finance_quarter = FinanceQuarter(finance_date.year, 1)\n elif statement_release_date_list[3] > finance_date >= statement_release_date_list[2]:\n finance_quarter = FinanceQuarter(finance_date.year, 2)\n elif finance_date >= statement_release_date_list[3]:\n finance_quarter = FinanceQuarter(finance_date.year, 3)\n else:\n raise ValueError(\"Fail to transform the end finance date[%s] to quarter\" % finance_date)\n return finance_quarter\n\n\n def __init__(self, *args):\n super(FinanceQuarter, self).__init__()\n self.quarter = None\n self.quarter_str = None\n # import pdb; pdb.set_trace()\n try:\n format_unsupport = False\n if len(args) == 1:\n if isinstance(args[0], str):\n mobj = CMN_FUNC.check_quarter_str_format(args[0])\n self.setup_year_value(mobj.group(1))\n # self.year = mobj.group(1)\n self.quarter = int(mobj.group(2))\n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceQuarter):\n self.setup_year_value(args[0].year)\n # self.year = args[0].year\n self.quarter = (int)(math.ceil(args[0].month / 3.0))\n else:\n format_unsupport = True\n elif len(args) == 2:\n for index in range(2):\n if type(args[index]) is not int:\n format_unsupport = True\n self.year = args[0]\n self.quarter = args[1]\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceQuarter, due to: %s\" % str(e))\n# Check value Range\n FinanceQuarter.check_value_range(self.year, self.quarter)\n\n\n @staticmethod\n def check_value_range(year, quarter):\n# Check Year Range\n CMN_FUNC.check_year_range(year)\n# Check Quarter Range\n CMN_FUNC.check_quarter_range(quarter)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_QUARTER\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n def __to_quarter_index(self):\n return self.year * 4 + self.quarter - 1\n\n\n def __from_quarter_index_to_value(self, quarter_index):\n return QuarterTuple(quarter_index / 4, quarter_index % 4 + 1)\n\n\n def __add__(self, quarter_delta):\n if not isinstance(quarter_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(quarter_delta))\n\n new_quarter_index = self.__to_quarter_index() + quarter_delta\n new_quarter_tuple = self.__from_quarter_index_to_value(new_quarter_index)\n return FinanceQuarter(new_quarter_tuple.year, new_quarter_tuple.quarter)\n\n\n def __sub__(self, quarter_delta):\n if not isinstance(quarter_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(quarter_delta))\n\n new_quarter_index = self.__to_quarter_index() - quarter_delta\n new_quarter_tuple = self.__from_quarter_index_to_value(new_quarter_index)\n return FinanceQuarter(new_quarter_tuple.year, new_quarter_tuple.quarter)\n\n\n def to_string(self):\n if self.quarter_str is None:\n self.quarter_str = CMN_FUNC.transform_quarter_str(self.year, self.quarter)\n return self.quarter_str\n\n\n def get_value(self):\n return (self.year << 3 | self.quarter)\n\n\n def get_value_tuple(self):\n return (self.year, self.quarter)\n\n\nclass FinanceYear(FinanceTimeBase):\n\n @classmethod\n def get_finance_year_from_date(cls, *date_args):\n \"\"\" Find the finance year due to the specific finance date\"\"\"\n \n finance_date = None\n if isinstance(date_args[0], FinanceDate):\n finance_date = date_args[0]\n else:\n raise ValueError(\"UnSupport input argument: %s\" % date_args)\n return cls(finance_date.year)\n\n\n def __init__(self, *args):\n super(FinanceYear, self).__init__()\n self.year = None # range: 2000 - 2099\n self.year_str = None\n # import pdb; pdb.set_trace()\n try:\n format_unsupport = False\n if len(args) == 1:\n time_cfg = None\n if isinstance(args[0], str):\n mobj = CMN_FUNC.check_year_str_format(args[0])\n self.setup_year_value(mobj.group(0))\n elif isinstance(args[0], datetime) or isinstance(args[0], FinanceMonth):\n self.setup_year_value(args[0].year)\n else:\n format_unsupport = True\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceYear, due to: %s\" % str(e))\n# Check value range\n CMN_FUNC.check_year_range(self.year)\n\n\n @staticmethod\n def get_time_unit_type():\n return CMN_DEF.DATA_TIME_UNIT_YEAR\n\n\n @classmethod\n def from_string(cls, time_string):\n return cls(time_string)\n\n\n def __add__(self, year_delta):\n if not isinstance(year_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(year_delta))\n new_year = self.year + year_delta\n return FinanceYear(year)\n\n\n def __sub__(self, year_delta):\n if not isinstance(year_delta, int):\n raise TypeError('The type[%s] of the delta argument is NOT int' % type(year_delta))\n new_year = self.year - year_delta\n return FinanceYear(year)\n\n\n def to_string(self):\n if self.year_str is None:\n self.year_str = \"%d\" % self.year\n return self.year_str\n\n\n def get_value(self):\n return self.year\n\n\n def get_value_tuple(self):\n return (self.year,)\n\n\nclass FinanceTimeRange(object):\n\n def __init__(self, *args):\n self.time_start = None\n self.time_end = None\n self.time_range_str = None\n # import pdb; pdb.set_trace()\n try:\n format_unsupport = False\n if len(args) == 1:\n if isinstance(args[0], str):\n (self.time_start, self.time_end) = CMN_FUNC.parse_time_duration_range_str_to_object(args[0])\n else:\n format_unsupport = True\n elif len(args) == 2:\n for index in range(2):\n if not isinstance(args[index], FinanceTimeBase):\n format_unsupport = True\n self.time_start = args[0]\n self.time_end = args[1]\n else:\n format_unsupport = True\n if format_unsupport:\n raise ValueError(\"Unsupport argument format: %s\" % [type(data) for data in args])\n except ValueError as e:\n raise e\n except Exception as e:\n raise Exception(\"Exception occurs in FinanceTimeRange, due to: %s\" % str(e))\n\n\n def is_greater_than_time_start(self, finance_time):\n return False if ((self.time_start is not None) and (finance_time < self.time_start)) else True\n\n\n def is_less_than_time_end(self, finance_time):\n return False if ((self.time_end is not None) and (finance_time > self.time_end)) else True\n\n\n# class ParseURLDataType:\n\n# def __init__(self):\n# # self.parse_url_data_type = None\n# pass\n\n\n# def get_type(self):\n# raise NotImplementedError\n\n\n# class ParseURLDataByBS4(ParseURLDataType):\n\n# def __init__(self, encoding, select_flag):\n# # self.parse_url_data_type = CMN.PARSE_URL_DATA_BY_BS4\n# self.encoding = encoding\n# self.select_flag = select_flag\n\n\n# def get_type(self):\n# return CMN.PARSE_URL_DATA_BY_BS4\n\n\n# class ParseURLDataByJSON(ParseURLDataType):\n\n# def __init__(self, data_field_name):\n# # self.parse_url_data_type = CMN.PARSE_URL_DATA_BY_BS4\n# self.data_field_name = data_field_name\n\n\n# def get_type(self):\n# return CMN.PARSE_URL_DATA_BY_JSON\n\n\nclass FinanceTimerThread(threading.Thread):\n\n def __init__(self, **cfg):\n super(FinanceTimerThread, self).__init__()\n self.daemon = True\n self.xcfg = {\n \"func_ptr\": None,\n \"interval\": 30,\n }\n self.xcfg.update(cfg)\n # self.exit = False\n # if self.xcfg[\"func_ptr\"] is None:\n # raise ValueError(\"func_ptr should NOT be None\")\n self.exit_event = threading.Event()\n self.interval = self.xcfg[\"interval\"]\n self.func_ptr = None\n self.func_args = None\n self.func_kwargs = None\n self.start_time = None\n\n\n def start_timer(self, func_ptr, *args, **kwargs):\n self.func_ptr = func_ptr\n self.func_args = args\n self.func_kwargs = kwargs\n # self.start_time = time()\n # self.exit = True\n self.start()\n\n\n def stop_timer(self, timeout=5):\n # self.exit = True\n self.exit_event.set( )\n threading.Thread.join(self, timeout)\n\n\n def run(self):\n while not self.exit_event.isSet( ):\n self.func_ptr(*self.func_args, **self.func_kwargs)\n self.exit_event.wait(self.interval)\n\n\n#############################################################################################\n\nclass CSVTimeRangeUpdate(object):\n \n CSV_APPEND_NONE = 0 # No new web data to append\n CSV_APPEND_BEFORE = 1 # new web data will be appended in front of the old csv data\n CSV_APPEND_AFTER = 2 # new web data will be appended in back of the old csv data\n # CSV_APPEND_BOTH = 3 # new web data will be appended in front and back(both) of the old csv data\n\n @classmethod\n def get_init_csv_time_duration_update(cls, time_duration_start, time_duration_end):\n # import pdb; pdb.set_trace()\n# If it's time first time to write the data from web to CSV ......\n web2csv_time_duration_update = cls()\n web2csv_time_duration_update.NewCSVStart = web2csv_time_duration_update.NewWebStart = time_duration_start\n web2csv_time_duration_update.NewCSVEnd = web2csv_time_duration_update.NewWebEnd = time_duration_end\n web2csv_time_duration_update.AppendDirection = cls.CSV_APPEND_AFTER\n new_csv_extension_time_duration = TimeDurationTuple(web2csv_time_duration_update.NewWebStart, web2csv_time_duration_update.NewWebEnd)\n return (new_csv_extension_time_duration, (web2csv_time_duration_update,),)\n\n\n @classmethod\n def get_extended_csv_time_duration_update(cls, time_duration_start, time_duration_end, csv_old_time_duration_tuple):\n # import pdb; pdb.set_trace()\n# Adjust the time duration, ignore the data which already exist in the finance data folder\n# I assume that the time duration between the csv data and new data should be consecutive\n# Two cases which the original time range can be extended successfully: \n# (1) The new time range overlaps the original one\n# (2) The new time range fully covers the original one\n overlap_case = CMN_FUNC.get_time_range_overlap_case(time_duration_start, time_duration_end, csv_old_time_duration_tuple.time_duration_start, csv_old_time_duration_tuple.time_duration_end)\n new_csv_extension_time_duration = None\n web2csv_time_duration_update_before = None\n web2csv_time_duration_update_after = None\n if overlap_case == CMN_DEF.TIME_OVERLAP_COVERED:\n# # All csv data already exists, no need to update the new data\n# g_logger.debug(\"The time duration[%s:%s] of the CSV data[%s] already exist ......\" % (time_duration_start, time_duration_end, CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX]))\n# new_csv_extension_time_duration = None\n# return None\n return (new_csv_extension_time_duration, None,)\n elif overlap_case == CMN_DEF.TIME_OVERLAP_BEFORE:\n# The new time range is extended before the start side of the original time range\n web2csv_time_duration_update_before = cls()\n web2csv_time_duration_update_before.OldCSVStart = csv_old_time_duration_tuple.time_duration_start\n web2csv_time_duration_update_before.OldCSVEnd = csv_old_time_duration_tuple.time_duration_end\n web2csv_time_duration_update_before.NewWebStart = time_duration_start\n web2csv_time_duration_update_before.NewWebEnd = web2csv_time_duration_update_before.OldCSVStart - 1\n web2csv_time_duration_update_before.AppendDirection = cls.CSV_APPEND_BEFORE\n # g_logger.debug(\"Extend the time duration before the original CSV data[%s %s:%s]: %s:%s\" % (CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX], web2csv_time_duration_update_before.OldCSVStart, web2csv_time_duration_update_before.OldCSVEnd, web2csv_time_duration_update_before.NewWebStart, web2csv_time_duration_update_before.NewWebEnd))\n new_csv_extension_time_duration = TimeDurationTuple(web2csv_time_duration_update_before.NewWebStart, web2csv_time_duration_update_before.OldCSVEnd)\n return (new_csv_extension_time_duration, (web2csv_time_duration_update_before,),)\n elif overlap_case == CMN_DEF.TIME_OVERLAP_AFTER:\n# The new time range is extended after the end side of the original time range\n web2csv_time_duration_update_after = cls()\n web2csv_time_duration_update_after.OldCSVStart = csv_old_time_duration_tuple.time_duration_start\n web2csv_time_duration_update_after.OldCSVEnd = csv_old_time_duration_tuple.time_duration_end\n web2csv_time_duration_update_after.NewWebStart = web2csv_time_duration_update_after.OldCSVEnd + 1\n web2csv_time_duration_update_after.NewWebEnd = time_duration_end\n web2csv_time_duration_update_after.AppendDirection = cls.CSV_APPEND_AFTER\n # g_logger.debug(\"Extend the time duration after the original CSV data[%s %s:%s]: %s:%s\" % (CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX], web2csv_time_duration_update_after.OldCSVStart, web2csv_time_duration_update_after.OldCSVEnd, web2csv_time_duration_update_after.NewWebStart, web2csv_time_duration_update_after.NewWebEnd))\n new_csv_extension_time_duration = TimeDurationTuple(web2csv_time_duration_update_after.OldCSVStart, web2csv_time_duration_update_after.NewWebEnd)\n return (new_csv_extension_time_duration, (web2csv_time_duration_update_after,),)\n elif overlap_case == CMN_DEF.TIME_OVERLAP_COVER:\n# The new time range covers the original time range and extended before/after the start/end side of the original time range\n web2csv_time_duration_update_before = cls()\n web2csv_time_duration_update_before.OldCSVStart = csv_old_time_duration_tuple.time_duration_start\n web2csv_time_duration_update_before.OldCSVEnd = csv_old_time_duration_tuple.time_duration_end\n web2csv_time_duration_update_before.NewWebStart = time_duration_start\n web2csv_time_duration_update_before.NewWebEnd = web2csv_time_duration_update_before.OldCSVStart - 1\n web2csv_time_duration_update_before.AppendDirection = cls.CSV_APPEND_BEFORE\n # g_logger.debug(\"Extend the time duration before the original CSV data[%s %s:%s]: %s:%s\" % (CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX], web2csv_time_duration_update_before.OldCSVStart, web2csv_time_duration_update_before.OldCSVEnd, web2csv_time_duration_update_before.NewWebStart, web2csv_time_duration_update_before.NewWebEnd))\n web2csv_time_duration_update_after = cls()\n web2csv_time_duration_update_after.OldCSVStart = csv_old_time_duration_tuple.time_duration_start\n web2csv_time_duration_update_after.OldCSVEnd = csv_old_time_duration_tuple.time_duration_end\n web2csv_time_duration_update_after.NewWebStart = web2csv_time_duration_update_after.OldCSVEnd + 1\n web2csv_time_duration_update_after.NewWebEnd = time_duration_end\n web2csv_time_duration_update_after.AppendDirection = cls.CSV_APPEND_AFTER\n # g_logger.debug(\"Extend the time duration after the original CSV data[%s %s:%s]: %s:%s\" % (CMN_DEF.SCRAPY_METHOD_DESCRIPTION[self.SCRAPY_CLASS_INDEX], web2csv_time_duration_update_after.OldCSVStart, web2csv_time_duration_update_after.OldCSVEnd, web2csv_time_duration_update_after.NewWebStart, web2csv_time_duration_update_after.NewWebEnd))\n new_csv_extension_time_duration = TimeDurationTuple(web2csv_time_duration_update_before.NewWebStart, web2csv_time_duration_update_after.NewWebEnd)\n return (new_csv_extension_time_duration, (web2csv_time_duration_update_before, web2csv_time_duration_update_after,),)\n# If the time range of new data contain all the time range of csv data, the system is not desiged to update two time range interval\n else:\n raise CMN.EXCEPTION.WebScrapyUnDefiedCaseException(\"The system does NOT support this type[2] of the range update; CSV data[%s:%s], new data[%s:%s]\" % (csv_old_time_duration_tuple.time_duration_start, csv_old_time_duration_tuple.time_duration_end, time_duration_start, time_duration_end))\n\n\n @classmethod\n def get_csv_time_duration_update(cls, time_duration_start, time_duration_end, csv_old_time_duration_tuple=None):\n if csv_old_time_duration_tuple is None:\n return cls.get_init_csv_time_duration_update(time_duration_start, time_duration_end)\n else:\n return cls.get_extended_csv_time_duration_update(time_duration_start, time_duration_end, csv_old_time_duration_tuple)\n\n\n def __init__(self):\n self.append_direction = self.CSV_APPEND_NONE\n self.old_csv_start = None\n self.old_csv_end = None\n self.new_web_start = None\n self.new_web_end = None\n # self.new_csv_start = None\n # self.new_csv_end = None\n self.description = None\n\n\n def __str__(self):\n if self.description is None:\n self.description = \"\"\n if self.old_csv_start is not None:\n self.description += \"OCS: %s; \" % self.old_csv_start\n if self.old_csv_end is not None:\n self.description += \"OCE: %s; \" % self.old_csv_end\n if self.new_web_start is not None:\n self.description += \"NWS: %s; \" % self.new_web_start\n if self.new_web_end is not None:\n self.description += \"NWE: %s; \" % self.new_web_end\n # if self.new_csv_start is not None:\n # self.description += \"NCS: %s; \" % self.new_csv_start\n # if self.new_csv_end is not None:\n # self.description += \"NCE: %s; \" % self.new_csv_end\n return self.description\n\n\n def __repr__(self):\n return self.__str__()\n\n @property\n def NeedUpdate(self):\n return (True if (self.append_direction != self.CSV_APPEND_NONE) else False)\n\n @property\n def AppendDirection(self):\n return self.append_direction\n @AppendDirection.setter\n def AppendDirection(self, append_direction):\n self.append_direction = append_direction\n\n @property\n def OldCSVStart(self):\n return self.old_csv_start\n @OldCSVStart.setter\n def OldCSVStart(self, old_csv_start):\n self.old_csv_start = old_csv_start\n\n @property\n def OldCSVEnd(self):\n return self.old_csv_end\n @OldCSVEnd.setter\n def OldCSVEnd(self, old_csv_end):\n self.old_csv_end = old_csv_end\n\n @property\n def NewWebStart(self):\n return self.new_web_start\n @NewWebStart.setter\n def NewWebStart(self, new_web_start):\n self.new_web_start = new_web_start\n\n @property\n def NewWebEnd(self):\n return self.new_web_end\n @NewWebEnd.setter\n def NewWebEnd(self, new_web_end):\n self.new_web_end = new_web_end\n\n # @property\n # def NewCSVStart(self):\n # return self.new_csv_start\n # @NewCSVStart.setter\n # def NewCSVStart(self, new_csv_start):\n # self.new_csv_start = new_csv_start\n\n # @property\n # def NewCSVEnd(self):\n # return self.new_csv_end\n # @NewCSVEnd.setter\n # def NewCSVEnd(self, new_csv_end):\n # self.new_csv_end = new_csv_end\n\n\n def backup_old_csv_if_necessary(self, csv_filepath, ignore_old_csv_exist=False):\n backup_old_csv = False\n if self.append_direction == self.CSV_APPEND_BEFORE: #BASE.BASE.ScrapyBase.CSVTimeRangeUpdate.CSV_APPEND_BEFORE:\n old_csv_filepath = csv_filepath + \".old\"\n if CMN_FUNC.check_file_exist(old_csv_filepath):\n if not ignore_old_csv_exist:\n raise ValueError(\"The CSV file[%s] already exists !!!\" % old_csv_filepath)\n else:\n # g_logger.debug(\"Need add the new data in front of the old CSV data, rename the file: %s\" % (csv_filepath + \".old\"))\n CMN_FUNC.rename_file_if_exist(csv_filepath, csv_filepath + \".old\") \n backup_old_csv = True\n return backup_old_csv\n\n\n def append_old_csv_if_necessary(self, csv_filepath):\n if self.append_direction == self.CSV_APPEND_BEFORE: #BASE.BASE.ScrapyBase.CSVTimeRangeUpdate.CSV_APPEND_BEFORE:\n # g_logger.debug(\"Append the old CSV data to the file: %s\" % csv_filepath)\n CMN_FUNC.append_data_into_file(csv_filepath + \".old\", csv_filepath)\n CMN_FUNC.remove_file_if_exist(csv_filepath + \".old\") \n\n\nclass CSVFileNoScrapyRecord(object):\n\n # STATUS_RECORD_TIME_RANGE_NOT_OVERLAP = 0\n # STATUS_RECORD_CSV_FILE_ALREADY_EXIST = 1\n # STATUS_RECORD_WEB_DATA_NOT_FOUND = 2\n # RECORD_TYPE_INDEX_LIST = [\n # STATUS_RECORD_TIME_RANGE_NOT_OVERLAP,\n # STATUS_RECORD_CSV_FILE_ALREADY_EXIST,\n # STATUS_RECORD_WEB_DATA_NOT_FOUND\n # ]\n RECORD_TYPE_INDEX = 0\n RECORD_TYPE_DESCRIPTION_INDEX = 1\n RECORD_TYPE_ENTRY_LIST = [\n [\"TimeRangeNotOverlap\", \"The search time range does NOT overlap the one in the URL time range lookup table\",],\n [\"CSVFileAlreadyExist\", \"The CSV files of the time range has already existed in the local folder\",],\n [\"WebDataNotFound\", \"The web data of the URL is NOT found\",],\n ]\n RECORD_TYPE_SIZE = len(RECORD_TYPE_ENTRY_LIST)\n TIME_RANGE_NOT_OVERLAP_RECORD_INDEX = 0\n CSV_FILE_ALREADY_EXIST_RECORD_INDEX = 1\n WEB_DATA_NOT_FOUND_RECORD_INDEX = 2\n\n RECORD_TYPE_LIST = [entry[RECORD_TYPE_INDEX] for entry in RECORD_TYPE_ENTRY_LIST]\n RECORD_TYPE_DESCRIPTION_LIST = [entry[RECORD_TYPE_DESCRIPTION_INDEX] for entry in RECORD_TYPE_ENTRY_LIST]\n\n @classmethod\n def create_register_status_instance(cls):\n # import pdb; pdb.set_trace()\n csv_file_no_scrapy_record = cls()\n for index in range(cls.RECORD_TYPE_SIZE):\n csv_file_no_scrapy_record.__register_record_type(\n cls.RECORD_TYPE_LIST[index], \n cls.RECORD_TYPE_DESCRIPTION_LIST[index]\n )\n return csv_file_no_scrapy_record\n\n\n def __init__(self):\n self.record_type_dict = {}\n self.record_type_description_dict = {}\n self.web_data_not_found_time_start = None\n self.web_data_not_found_time_end = None\n\n\n def __register_record_type(self, record_type_name, record_type_description):\n # import pdb; pdb.set_trace()\n if self.record_type_dict.has_key(record_type_name):\n g_logger.debug(\"The type[%s] has already exist\" % record_type_name)\n return\n self.record_type_dict[record_type_name] = []\n self.record_type_description_dict[record_type_name] = record_type_description\n\n\n def __add_record(self, record_type_name, *args):\n if not self.record_type_dict.has_key(record_type_name):\n raise ValueError(\"Unknown Check Status Type: %s\" % record_type_name)\n self.record_type_dict[record_type_name].append(args)\n\n\n def add_time_range_not_overlap_record(self, *args):\n# Market\n# args[0]: source type index\n# Stock\n# args[0]: source type index\n# args[1]: company code number\n self.__add_record(\"TimeRangeNotOverlap\", *args)\n\n\n def add_csv_file_already_exist_record(self, *args):\n# Market\n# args[0]: source type index\n# Stock\n# args[0]: source type index\n# args[1]: company code number\n self.__add_record(\"CSVFileAlreadyExist\", *args)\n\n\n def add_web_data_not_found_record(self, *args):\n# Market\n# args[0]: time slice. None for a must to flush data into list\n# args[1]: source type index\n# Stock\n# args[0]: time slice. None for a must to flush data into list\n# args[1]: source type index\n# args[2]: company code number\n need_flush = False\n if args[0] is None:\n if self.web_data_not_found_time_start is not None:\n need_flush = True\n else:\n if self.web_data_not_found_time_start is None:\n self.web_data_not_found_time_start = self.web_data_not_found_time_end = args[0]\n else:\n if self.web_data_not_found_time_end.check_continous_time_duration(args[0]):\n self.web_data_not_found_time_end = args[0]\n else:\n need_flush = True\n# Keep track of the time range in which the web data is empty\n if need_flush:\n# Market\n# args_new[0]: time slice. None for a must to flush data into list\n# args_new[1]: source type index\n# args_new[2]: empty time start\n# args_new[3]: empty time end\n# Stock\n# args_new[0]: time slice. None for a must to flush data into list\n# args_new[1]: source type index\n# args_new[2]: company code number\n# args_new[2]: empty time start\n# args_new[3]: empty time end\n # import pdb; pdb.set_trace()\n # args_new = copy.deepcopy(args)\n args_new = [arg for arg in args]\n args_new.append(self.web_data_not_found_time_start)\n args_new.append(self.web_data_not_found_time_end)\n self.web_data_not_found_time_start = self.web_data_not_found_time_end = None\n self.__add_record(\"WebDataNotFound\", *args_new)\n","sub_path":"depreated_libs/common/common_class.py","file_name":"common_class.py","file_ext":"py","file_size_in_byte":50388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"563097407","text":"# encoding = utf-8\n\nimport ldap\nimport logging\n\n# Get an instance of a logger\nlogger = logging.getLogger('ldap_server.backend.base')\nlogger.setLevel(logging.INFO)\n\nclass InvalidCredentials(Exception):\n pass\n\nclass ServerDown(Exception):\n pass\n\nclass DatabaseCursor(object):\n def __init__(self, ldap_connection):\n self.connection = ldap_connection\n\nclass LdapDatabase(object):\n def __init__(self, settings_dict):\n self.settings_dict = settings_dict\n self.charset = \"utf-8\"\n self.connection = None\n self._cursor()\n\n def _cursor(self):\n if self.connection is None:\n try:\n logger.debug('Connecting to LDAP at %s with account %s' %(self.settings_dict['NAME'], self.settings_dict['USER']))\n \n if self.settings_dict['CACERT']:\n logger.debug('Using CACERT: %s' % self.settings_dict['CACERT'])\n ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.settings_dict['CACERT'])\n\n self.connection = ldap.initialize(self.settings_dict['NAME'])\n\n if self.settings_dict['STARTTLS']:\n logger.debug('Using STARTTLS')\n self.connection.start_tls_s()\n\n self.connection.simple_bind_s(\n self.settings_dict['USER'],\n self.settings_dict['PASSWORD'])\n\n except ldap.SERVER_DOWN:\n logger.error('LDAP server is down')\n raise ServerDown\n\n except ldap.INVALID_CREDENTIALS:\n logger.error('Invalid credentials')\n raise InvalidCredentials\n \n return DatabaseCursor(self.connection)\n\n def add_s(self, dn, modlist):\n logger.info('Adding entry \\'%s\\'' % dn)\n cursor = self._cursor()\n return cursor.connection.add_s(dn.encode(self.charset), modlist)\n\n def delete_s(self, dn):\n logger.info('Deleting entry \\'%s\\'' % dn)\n cursor = self._cursor()\n return cursor.connection.delete_s(dn.encode(self.charset))\n\n def modify_s(self, dn, modlist):\n logger.info('Modifying entry \\'%s\\'' % dn)\n if modlist:\n logger.debug('Modifying attributes: %s' % ', '.join([mod[1] for mod in modlist]))\n\n cursor = self._cursor()\n return cursor.connection.modify_s(dn.encode(self.charset), modlist)\n\n def rename_s(self, dn, newrdn):\n logger.info('Renaming entry \\'%s\\' to \\'%s\\'' % (dn, newrdn))\n cursor = self._cursor()\n return cursor.connection.rename_s(dn.encode(self.charset), newrdn.encode(self.charset))\n\n def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None):\n logger.debug('Searching entries...')\n logger.debug('Base: %s' % base)\n logger.debug('Filter: %s' % filterstr)\n \n if attrlist:\n logger.debug('Attributes: %s' % ', '.join(attrlist))\n\n cursor = self._cursor()\n results = cursor.connection.search_s(base, scope, filterstr.encode(self.charset), attrlist)\n output = []\n for dn, attrs in results:\n output.append((dn.decode(self.charset), attrs))\n return output\n\n def whoami(self):\n return self.settings_dict['USER']\n","sub_path":"ldap_server/backend/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"641607624","text":"class Solution(object):\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n\n def search(left, right, target, range):\n if left > right:\n return -1\n\n mid = left + (right - left) // 2\n\n if nums[mid] > target:\n search(left, mid - 1, target, range)\n elif nums[mid] < target:\n search(mid + 1, right, target, range)\n else:\n # equal\n if mid < range[0]:\n range[0] = mid\n search(left, mid - 1, target, range)\n if mid > range[1]:\n range[1] = mid\n search(mid + 1, right, target, range)\n\n range = [len(nums), -1]\n search(0, len(nums) - 1, target, range)\n\n if range[0] == len(nums):\n return [-1, -1]\n\n return range\n\n\n","sub_path":"34 Search For a Range.py","file_name":"34 Search For a Range.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"482624831","text":"#Name : problemthree.py\r\n#Author : Min Khant Htoo(17S23) of Pioneer JC\r\n#DOC : 9/05/2017\r\n#Last updated : \r\n#Description :\r\n\r\ndef main():\r\n print(\"### ###\")\r\n infile = open(\"records.txt\",\"r\")\r\n xlist = []\r\n ylist = []\r\n print(\"X\")\r\n for line in infile:\r\n x,y = line[:-1].split(\",\")\r\n xlist.append(x)\r\n ylist.append(y)\r\n print(\"X\")\r\n input()\r\n infile.close()\r\n z = int(0)\r\n for i in range(len(xlist)):\r\n z = z + ((int(xlist[i])**2) - (int(ylist[i])**2))\r\n print(\"X\")\r\n print(\"|{0:^7}|{1:^7}|\".format(\"x\",\"y\"))\r\n for i in range(len(xlist)):\r\n print(\"|{0:^7}|{1:^7}|\".format(xlist[i],ylist[i]))\r\n print(\"z = {0}\".format(z))\r\n print(\"X\")\r\n \r\n input(\"Press Any Key to continue ...\")\r\n print(\"### Program Ending ... ###\")\r\n\r\nmain()\r\n\r\n","sub_path":"Computing J1/Programming Exercise 3/problemthree.py","file_name":"problemthree.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"175423194","text":"import numpy as np\nimport imageio\nfrom matplotlib import pyplot as plt\nimport sys\nimport os\nimport time\n\n\n'''\nK-Means Implementation and Functions\n---------------------------------------\n'''\ndef distance3d(pixel, cluster):\n\tpixel = pixel.astype(float)\n\tcluster = cluster.astype(float)\n\treturn (pixel[0] - cluster[0])**2 + (pixel[1] - cluster[1])**2 + (pixel[2] - cluster[2])**2\n\n\ndef assignclusters(pixels, clusters):\n\tassignments = np.zeros((pixels.shape[0], 1), np.int)\n\tfor i in range(0, pixels.shape[0]):\n\t\t# assign pixel to cluster\n\t\tpixel = pixels[i][:]\n\t\tassign = 0\n\t\tmin_dis = float(\"inf\")\n\t\tfor j in range(0, clusters.shape[0]):\n\t\t\tcluster = clusters[j][:]\n\t\t\tdistance = distance3d(pixel, cluster)\n\t\t\tif distance < min_dis:\n\t\t\t\tmin_dis = distance\n\t\t\t\tassign = j\n\t\tassignments[i] = assign\n\tgroups = np.linspace(0, clusters.shape[0] - 1, num=clusters.shape[0], dtype=np.int)\n\tmask = np.isin(groups, np.unique(assignments))\n\t# Check for empty clusters and randomly assign to keep algorithm running (for large # of clusters)\n\tif np.isin(False,mask):\n\t\tprint(\"Some empty clusters found, random sampling new points for cluster\")\n\t\tfor i in range(0, mask.shape[0]):\n\t\t\tif mask[i] == False:\n\t\t\t\tselections = np.random.randint(0, assignments.shape[0], 1)\n\t\t\t\tassignments[selections] = i\n\treturn assignments\n\n\ndef centeradjustment(assignments, pixels, K):\n\tpixel_locations = np.concatenate((assignments, pixels), axis=1)\n\tpixel_locations.sort(axis=0)\n\tcluster_sum = np.zeros((K,3), np.int)\n\tcluster_num = 0\n\tcluster_count = 0\n\tfor i in range(0, pixel_locations.shape[0]):\n\t\tif pixel_locations[i][0] > cluster_count:\n\t\t\tcluster_sum[cluster_count] = cluster_sum[cluster_count] / cluster_num\n\t\t\tcluster_count = cluster_count + 1\n\t\t\tcluster_num = 0\n\t\tcluster_num = cluster_num + 1\n\t\tcluster_sum[cluster_count] = cluster_sum[cluster_count] + pixel_locations[i][1:4]\n\tcluster_sum[-1] = cluster_sum[-1] / cluster_num # last group\n\tclusters = cluster_sum\n\treturn clusters\n\n\ndef mykmeans(pixels, K):\n\tcentroids_ind = np.random.randint(0, pixels.shape[0], (K, 1)) # initialize as random index location\n\tcentroids = pixels[centroids_ind.squeeze()]\n\tsteps = 100\n\tcnt = 1\n\tepsilon = 1e-1\n\tconverged = False\n\tprint(\"Clustering iterations for K-Means are beginning...\")\n\twhile not converged:\n\t\t# CLUSTER ASSIGNMENT\n\t\tclasses = assignclusters(pixels, centroids)\n\t\tcentroids_last = centroids\n\t\t# CLUSTER ADJUSTMENT\n\t\tcentroids = centeradjustment(classes, pixels, K)\n\t\tif (cnt % 10 == 0 or cnt == 1):\n\t\t\tprint(\"K-means Iteration #{}\".format(cnt))\n\t\t\tprint(\"Centroid clusters: \\n {}\".format(centroids))\n\t\tcnt = cnt + 1\n\t\tif np.linalg.norm(centroids-centroids_last, axis=0).sum() < epsilon:\n\t\t\tconverged = True\n\t\t\tprint(\"K-Means Converged after {} iterations\".format(cnt))\n\t\telif cnt >= steps:\n\t\t\tconverged = True\n\t\t\tprint(\"K-Means Stopped after reaching max {} iterations\".format(cnt))\n\t\telse:\n\t\t\tpass\n\treturn classes, centroids\n\n\n'''\nK-Mediod Implementation and Functions\n---------------------------------------\n'''\ndef distance_function(pixel, cluster, option):\n\tpixel = pixel.astype(float)\n\tcluster = cluster.astype(float)\n\tif option == 0: # euclidean\n\t\tdistance = (pixel[0] - cluster[0]) ** 2 + (pixel[1] - cluster[1]) ** 2 + (pixel[2] - cluster[2]) ** 2\n\telif option == 1: # L1\n\t\tdistance = abs(pixel[0] - cluster[0]) + abs(pixel[1] - cluster[1]) + abs(pixel[2] - cluster[2])\n\telse:\n\t\tdistance = 0\n\treturn distance\n\n\ndef kmed_assignclusters(pixels, clusters, K, option):\n\tassignments_r = np.zeros((pixels.shape[0], K))\n\t# option = 0\n\tfor i in range(0, pixels.shape[0]):\n\t\t# assign pixel to cluster\n\t\tpixel = pixels[i][:]\n\t\tassign = 0\n\t\tmin_dis = float(\"inf\")\n\t\tfor j in range(0, K):\n\t\t\tcluster = clusters[j][:]\n\t\t\t# distance = distance3d(pixel, cluster) #TODO: change distance function\n\t\t\tdistance = distance_function(pixel, cluster, option)\n\t\t\tif distance < min_dis:\n\t\t\t\tmin_dis = distance\n\t\t\t\tassign = j\n\t\tassignments_r[i][assign] = 1\n\n\treturn assignments_r\n\n\ndef distance_matrix(pixels):\n\tprint(\"Calculating Distance Matrix...\")\n\tP = pixels-np.mean(pixels, axis=0)\n\tPt = np.transpose(P)\n\tq=np.linalg.norm(P, axis=1)\n\tD = q + np.transpose(q) - 2*np.matmul(P,Pt)\n\tprint(\"Distance matrix calculated!\")\n\treturn D\n\n\ndef distance_matrix_manhattan(pixels):\n\tprint(\"Calculating Manhattan Distance Matrix...\")\n\tP = pixels-np.mean(pixels, axis=0)\n\tDj = np.zeros((P.shape[0], P.shape[0], 3))\n\tq = np.linalg.norm(P,axis=1)\n\tfor j in range(0,3):\n\t\tPj = P[:, j]\n\t\tPjt = np.transpose(Pj)\n\t\tq = np.square(Pj)\n\t\tqt = np.transpose(q)\n\t\tDj[:,:,j] = q + qt - 2*np.matmul(Pj, Pjt)\n\n\tD = np.sum(Dj, axis=2)\n\tprint(\"Distance Manhattan matrix calculated!\")\n\n\treturn D\n\n\ndef distance_matrix_chebychev(pixels):\n\tprint(\"Calculating Chebychev Distance Matrix...\")\n\tP = pixels-np.mean(pixels, axis=0)\n\tDj = np.zeros((P.shape[0], P.shape[0], 3))\n\tq = np.linalg.norm(P,axis=1)\n\tfor j in range(0,3):\n\t\tPj = P[:, j]\n\t\tPjt = np.transpose(Pj)\n\t\tq = np.square(Pj)\n\t\tqt = np.transpose(q)\n\t\tDj[:,:,j] = q + qt - 2*np.matmul(Pj, Pjt)\n\n\tD = np.max(Dj, axis=2)\n\tprint(\"Distance Chebychev matrix calculated!\")\n\treturn D\n\n\ndef kmed_assignclusters_matrix(pixels, centroids_ind, K, D):\n\tassignments_r = np.zeros((pixels.shape[0], K))\n\tfor i in range(0, pixels.shape[0]):\n\t\t# assign pixel to cluster\n\t\tpixel = pixels[i][:]\n\t\tassign = 0\n\t\tmin_dis = float(\"inf\")\n\t\tfor j in range(0, K):\n\t\t\tind = centroids_ind[j][:]\n\t\t\tdistance = D[i, j]\n\t\t\t# distance = # find in distance matrix\n\t\t\tif distance < min_dis:\n\t\t\t\tmin_dis = distance\n\t\t\t\tassign = j\n\t\tassignments_r[i][assign] = 1\n\n\t# groups = np.linspace(0, K - 1, num=K, dtype=np.int)\n\t# mask = np.isin(groups, np.unique(assignments_r))\n\t# # Check for empty clusters and randomly assign to keep algorithm running (for large # of clusters)\n\t# if np.isin(False, mask):\n\t# \tprint(\"Some empty clusters found, random sampling new points for cluster\")\n\t# \tfor i in range(0, mask.shape[0]):\n\t# \t\tif mask[i] == False:\n\t# \t\t\tselections = np.random.randint(0, assignments_r.shape[0], 1)\n\t# \t\t\tassignments_r[selections] = i\n\n\treturn assignments_r\n\n\ndef kmed_findcentroid_matrix(cluster_pixels, pixels, D):\n\tbest_centroid = 0\n\tbest_distancesum = float(\"inf\")\n\tdistancesum = 0.0\n\tfor i in range(0,cluster_pixels.shape[0]):\n\t\t# print(\"{} of {} data points checked for dissimilarity in this cluster\".format(i, cluster_pixels.shape[0]))\n\t\tpixel_sel = pixels[cluster_pixels[i]]\n\t\tdistancesum = 0.0\n\t\tfor j in range(0, cluster_pixels.shape[0]):\n\t\t\tif i==j:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdistance = D[i,j]\n\t\t\t\tdistancesum = distancesum + distance\n\t\tif distancesum < best_distancesum:\n\t\t\tbest_distancesum = distancesum\n\t\t\tbest_centroid = i\n\n\treturn best_centroid\n\n\ndef kmed_findcentroid(cluster_pixels, pixels):\n\tbest_centroid = 0\n\tbest_distancesum = float(\"inf\")\n\tdistancesum = 0.0\n\tfor i in range(0,cluster_pixels.shape[0]):\n\t\tprint(\"{} of {} data points checked for dissimilarity in this cluster\".format(i, cluster_pixels.shape[0]))\n\t\tpixel_sel = pixels[cluster_pixels[i]]\n\t\tdistancesum = 0.0\n\t\tfor j in range(0, cluster_pixels.shape[0]):\n\t\t\tif i==j:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdistance = distance_function(pixel_sel.squeeze(), pixels[cluster_pixels[j]].squeeze(), option=1)\n\t\t\t\tdistancesum = distancesum + distance\n\t\tif distancesum < best_distancesum:\n\t\t\tbest_distancesum = distancesum\n\t\t\tbest_centroid = i\n\n\treturn best_centroid\n\n\ndef kmed_L2centroid(cluster_pixels, pixels):\n\tbest_centroid = 0\n\tbest_distance = float(\"inf\")\n\tmean = pixels[cluster_pixels].mean(axis=0)\n\tfor i in range(0, cluster_pixels.shape[0]):\n\t\t# print(\"{} of {} data points checked for dissimilarity in this cluster\".format(i, cluster_pixels.shape[0]))\n\t\tpixel_sel = pixels[cluster_pixels[i]]\n\t\tdistance = distance_function(pixel_sel.squeeze(), mean.squeeze(), option=0)\n\t\tif distance < best_distance:\n\t\t\tbest_distance = distance\n\t\t\tbest_centroid = cluster_pixels[i].squeeze()\n\treturn best_centroid\n\n\ndef kmed_updateclusters(assign_r, pixels, K):\n\tcluster_inds = np.zeros((K,1), np.int)\n\tclusters = pixels[cluster_inds.squeeze()]\n\tfor i in range(0,K):\n\t\tpixel_ind = np.transpose(np.nonzero(assign_r[:, i]))\n\t\tquick = True\n\t\tif quick: # using knowledge of L2 norm for faster solution\n\t\t\tcluster_ind = kmed_L2centroid(pixel_ind, pixels)\n\t\telse:\n\t\t\tcluster_ind = kmed_findcentroid(pixel_ind, pixels)\n\t\tcluster_inds[i] = cluster_ind\n\n\tclusters = pixels[cluster_inds.squeeze()]\n\tuniq, ind = np.unique(clusters, axis=0, return_index=True)\n\twhile ind.shape[0] < clusters.shape[0]:\n\t\tfor i in range(0, clusters.shape[0]):\n\t\t\tif i not in ind:\n\t\t\t\tclusters[i] = pixels[np.random.randint(0, pixels.shape[0])]\n\t\t\tuniq, ind = np.unique(clusters, axis=0, return_index=True)\n\treturn clusters\n\n\ndef kmed_updateclusters_matrix(assign_r, pixels, K, D):\n\tcluster_inds = np.zeros((K,1), np.int)\n\tclusters = pixels[cluster_inds.squeeze()]\n\tfor i in range(0,K):\n\t\tpixel_ind = np.transpose(np.nonzero(assign_r[:, i]))\n\t\tcluster_ind = kmed_findcentroid_matrix(pixel_ind, pixels, D)\n\t\tcluster_inds[i] = cluster_ind\n\n\tclusters = pixels[cluster_inds.squeeze()]\n\tuniq, ind = np.unique(clusters, axis=0, return_index=True)\n\twhile ind.shape[0] < clusters.shape[0]:\n\t\tfor i in range(0, clusters.shape[0]):\n\t\t\tif i not in ind:\n\t\t\t\tclusters[i] = pixels[np.random.randint(0, pixels.shape[0])]\n\t\tuniq, ind = np.unique(clusters, axis=0, return_index=True)\n\treturn clusters\n\n\ndef mykmedoids(pixels, K, option=0):\n\t'''\n\tK-Mediod Options:\n\t0: Quick L2 norm implementation\n\t1: Slow L2 (TODO)\n\t2: Matrix Math L2 (TODO)\n\t'''\n\tcentroids_ind = np.random.randint(0, pixels.shape[0], (K, 1)) # initialize as random index location\n\n\tcentroids = pixels[centroids_ind.squeeze()]\n\tassignments_r = np.zeros((pixels.shape[0], K))\n\n\t# if matrix option, calculate D matrix\n\tif option == 2:\n\t\tD = distance_matrix(pixels)\n\telif option == 3:\n\t\tD = distance_matrix_manhattan(pixels)\n\telif option == 4:\n\t\tD = distance_matrix_chebychev(pixels)\n\tsteps = 100\n\tcnt = 1\n\tepsilon = 1e-1\n\tconverged = False\n\tprint(\"Clustering iterations for K-Medoids are beginning...\")\n\twhile not converged:\n\t\t# CLUSTER ASSIGNMENT\n\t\tif option==2 or option == 3 or option == 4:\n\t\t\tassignments_r = kmed_assignclusters_matrix(pixels, centroids_ind, K, D)\n\t\telse:\n\t\t\tassignments_r = kmed_assignclusters(pixels, centroids, K, option)\n\t\tcentroids_last = centroids\n\t\t# CLUSTER ADJUSTMENT\n\t\tif option==2 or option == 3 or option == 4:\n\t\t\tcentroids = kmed_updateclusters_matrix(assignments_r, pixels, K, D)\n\t\telse:\n\t\t\tcentroids = kmed_updateclusters(assignments_r, pixels, K)\n\n\t\tif (cnt % 10 == 0 or cnt == 1):\n\t\t\tprint(\"K-Mediods Iteration #{}\".format(cnt))\n\t\t\tprint(\"Centroid clusters: \\n {}\".format(centroids))\n\t\tcnt = cnt + 1\n\t\tarray, locations = np.where(assignments_r == 1)\n\t\tclasses = locations.reshape((locations.shape[0], 1))\n\t\tif np.linalg.norm(centroids-centroids_last, axis=0).sum() < epsilon:\n\t\t\tconverged = True\n\t\t\tprint(\"K-Mediods Converged after {} iterations\".format(cnt))\n\t\telif cnt >= steps:\n\t\t\tconverged = True\n\t\t\tprint(\"K-Mediods Stopped after reaching max {} iterations\".format(cnt))\n\t\telse:\n\t\t\tpass\n\treturn classes, centroids\n\n\ndef main():\n\n\tif(len(sys.argv) < 2):\n\t\tprint(\"Please supply an image file\")\n\t\treturn\n\n\timage_file_name = sys.argv[1]\n\tK = 5 if len(sys.argv) == 2 else int(sys.argv[2])\n\tprint(image_file_name, K)\n\tim = np.asarray(imageio.imread(image_file_name))\n\n\tplt.imshow(im)\n\n\tfig, axs = plt.subplots(1, 2)\n\n\tim_vector = im.copy()\n\tim_vector.resize((im.shape[0]*im.shape[1], im.shape[2])) # added\n\n\tt1 = time.time()\n\tprint(\"Starting K-medoids Clustering\")\n\tclasses, centers = mykmedoids(im_vector, K, option=0) # Options: 0: L2 quick, 2: Matrix L2, 3: Matrix L1, 4: Matrix Linf\n\tprint(classes, centers)\n\tnew_im = np.asarray(centers[classes].reshape(im.shape), im.dtype)\n\timageio.imwrite(os.path.basename(os.path.splitext(image_file_name)[0]) + '_converted_mykmedoids_' + str(K) + os.path.splitext(image_file_name)[1], new_im)\n\taxs[0].imshow(new_im)\n\taxs[0].set_title('K-medoids')\n\tt2 = time.time()\n\tkmed_time = (t2-t1)\n\n\tt1 = time.time()\n\tprint(\"Starting K-means Clustering\")\n\tclasses, centers = mykmeans(im_vector, K)\n\tprint(classes, centers)\n\tnew_im = np.asarray(centers[classes].reshape(im.shape), im.dtype)\n\timageio.imwrite(os.path.basename(os.path.splitext(image_file_name)[0]) + '_converted_mykmeans_' + str(K) + os.path.splitext(image_file_name)[1], new_im)\n\taxs[1].imshow(new_im)\n\taxs[1].set_title('K-means')\n\tt2 = time.time()\n\tkmean_time = (t2-t1)\n\n\tplt.show()\n\n\tprint(\"Total times\\n K-mediods: {} s \\n K-Means: {} s\".format(kmed_time, kmean_time))\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"K-Medoids/homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":12485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"255598013","text":"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport random\n\nimport numpy as np\n\n\ndef rand_choice(prob=0.5):\n \"\"\"Returns True if a randomly chosen number is less than or equal to `prob', by default this is a 50/50 chance.\"\"\"\n return random.random() <= prob\n\n\ndef img_bounds(img):\n \"\"\"Returns the minimum and maximum indices of non-zero lines in axis 0 of `img', followed by that for axis 1.\"\"\"\n ax0 = np.any(img, axis=0)\n ax1 = np.any(img, axis=1)\n return np.concatenate((np.where(ax0)[0][[0, -1]], np.where(ax1)[0][[0, -1]]))\n\n\ndef in_bounds(x, y, margin, maxx, maxy):\n \"\"\"Returns True if (x,y) is within the rectangle (margin,margin,maxx-margin,maxy-margin).\"\"\"\n return margin <= x < (maxx - margin) and margin <= y < (maxy - margin)\n\n\ndef is_empty(img):\n \"\"\"Returns True if `img' is empty, that is its maximum value is not greater than its minimum.\"\"\"\n return not (img.max() > img.min()) # use > instead of <= so that an image full of NaNs will result in True\n\n\ndef ensure_tuple_size(tup, dim):\n \"\"\"Returns a copy of `tup' with `dim' values by either shortened or padded with zeros as necessary.\"\"\"\n tup = tuple(tup) + (0,) * dim\n return tup[:dim]\n\n\ndef zero_margins(img, margin):\n \"\"\"Returns True if the values within `margin' indices of the edges of `img' in dimensions 1 and 2 are 0.\"\"\"\n if np.any(img[:, :, :margin]) or np.any(img[:, :, -margin:]):\n return False\n\n if np.any(img[:, :margin, :]) or np.any(img[:, -margin:, :]):\n return False\n\n return True\n\n\ndef rescale_array(arr, minv=0.0, maxv=1.0, dtype=np.float32):\n \"\"\"Rescale the values of numpy array `arr' to be from `minv' to `maxv'.\"\"\"\n if dtype is not None:\n arr = arr.astype(dtype)\n\n mina = np.min(arr)\n maxa = np.max(arr)\n\n if mina == maxa:\n return arr * minv\n\n norm = (arr - mina) / (maxa - mina) # normalize the array first\n return (norm * (maxv - minv)) + minv # rescale by minv and maxv, which is the normalized array by default\n\n\ndef rescale_instance_array(arr, minv=0.0, maxv=1.0, dtype=np.float32):\n \"\"\"Rescale each array slice along the first dimension of `arr' independently.\"\"\"\n out = np.zeros(arr.shape, dtype)\n for i in range(arr.shape[0]):\n out[i] = rescale_array(arr[i], minv, maxv, dtype)\n\n return out\n\n\ndef rescale_array_int_max(arr, dtype=np.uint16):\n \"\"\"Rescale the array `arr' to be between the minimum and maximum values of the type `dtype'.\"\"\"\n info = np.iinfo(dtype)\n return rescale_array(arr, info.min, info.max).astype(dtype)\n\n\ndef copypaste_arrays(src, dest, srccenter, destcenter, dims):\n \"\"\"\n Calculate the slices to copy a sliced area of array `src' into array `dest'. The area has dimensions `dims' (use 0\n or None to copy everything in that dimension), the source area is centered at `srccenter' index in `src' and copied\n into area centered at `destcenter' in `dest'. The dimensions of the copied area will be clipped to fit within the\n source and destination arrays so a smaller area may be copied than expected. Return value is the tuples of slice\n objects indexing the copied area in `src', and those indexing the copy area in `dest'.\n\n Example:\n src=np.random.randint(0,10,(6,6))\n dest=np.zeros_like(src)\n srcslices,destslices=copypasteArrays(src,dest,(3,2),(2,1),(3,4))\n dest[destslices]=src[srcslices]\n print(src)\n print(dest)\n\n >>> [[9 5 6 6 9 6]\n [4 3 5 6 1 2]\n [0 7 3 2 4 1]\n [3 0 0 1 5 1]\n [9 4 7 1 8 2]\n [6 6 5 8 6 7]]\n [[0 0 0 0 0 0]\n [7 3 2 4 0 0]\n [0 0 1 5 0 0]\n [4 7 1 8 0 0]\n [0 0 0 0 0 0]\n [0 0 0 0 0 0]]\n \"\"\"\n srcslices = [slice(None)] * src.ndim\n destslices = [slice(None)] * dest.ndim\n\n for i, ss, ds, sc, dc, dim in zip(range(src.ndim), src.shape, dest.shape, srccenter, destcenter, dims):\n if dim:\n # dimension before midpoint, clip to size fitting in both arrays\n d1 = np.clip(dim // 2, 0, min(sc, dc))\n # dimension after midpoint, clip to size fitting in both arrays\n d2 = np.clip(dim // 2 + 1, 0, min(ss - sc, ds - dc))\n\n srcslices[i] = slice(sc - d1, sc + d2)\n destslices[i] = slice(dc - d1, dc + d2)\n\n return tuple(srcslices), tuple(destslices)\n\n\ndef resize_center(img, *resize_dims, fill_value=0):\n \"\"\"\n Resize `img' by cropping or expanding the image from the center. The `resizeDims' values are the output dimensions\n (or None to use original dimension of `img'). If a dimension is smaller than that of `img' then the result will be\n cropped and if larger padded with zeros, in both cases this is done relative to the center of `img'. The result is\n a new image with the specified dimensions and values from `img' copied into its center.\n \"\"\"\n resize_dims = tuple(resize_dims[i] or img.shape[i] for i in range(len(resize_dims)))\n\n dest = np.full(resize_dims, fill_value, img.dtype)\n half_img_shape = np.asarray(img.shape) // 2\n half_dest_shape = np.asarray(dest.shape) // 2\n\n srcslices, destslices = copypaste_arrays(img, dest, half_img_shape, half_dest_shape, resize_dims)\n dest[destslices] = img[srcslices]\n\n return dest\n\n\ndef one_hot(labels, num_classes):\n \"\"\"\n Converts label image `labels' to a one-hot vector with `num_classes' number of channels as last dimension.\n \"\"\"\n labels = labels % num_classes\n y = np.eye(num_classes)\n onehot = y[labels.flatten()]\n\n return onehot.reshape(tuple(labels.shape) + (num_classes,)).astype(labels.dtype)\n","sub_path":"monai/transforms/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"317586421","text":"from adminworkstation.models import Path\n\n# add by wangshibin 20190725\ndef DBuptdate_find_data(path_id):\n # 获取详细信息\n res = Path.objects.filter(path_id=path_id)\n line_type = res[0].path_linetype\n content = res[0].path_content\n trade_type = res[0].path_tradetype\n trade_path = res[0].path_tradepath\n remark = res[0].path_remark\n state = res[0].path_approval_state\n return line_type, content, trade_type, trade_path, remark, state\n","sub_path":"DBoperation/DBuser/DBuptdate_find_data.py","file_name":"DBuptdate_find_data.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"292993283","text":"# -*- coding: utf-8 -*-\n''' Curse of Dimensionality Example \n\nPlot the average distance between points and fraction of points close to the\nedge of a unit hypercube in varying dimensional space \n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.spatial.distance as dm\nimport config as cfg\n\nplt.close()\n\ndef dimbox(ndim,nsamples):\n return np.random.rand(nsamples,ndim)\n \ndef calcfracdist(data,dist):\n num_points = data.shape[0]\n close_to_edge = np.logical_or(data > 1-dist,data < dist)\n within_range = np.any(close_to_edge,axis=1)\n num_in_range = np.count_nonzero(within_range)\n return num_in_range / num_points\n\ndef averagedistance(coordinates):\n nvalues = coordinates.shape[0]\n dist_between_points = dm.cdist(coordinates,coordinates)\n return np.mean(dist_between_points[np.triu_indices(nvalues,k=1)])\n \ndimensions = np.unique(np.round(np.logspace(0,3,50))).astype(int)\nndim = len(dimensions)\nnum_points = 1000\nfraction = np.zeros(ndim)\navg_distance = np.zeros(ndim)\n\ndistance = 0.01\nfor i,dim in enumerate(dimensions):\n print('Dim = {}'.format(dim))\n # Create the data\n data = dimbox(dim,num_points)\n # Calculate the fractional distance\n fraction[i] = calcfracdist(data,distance)\n # Calculate the average distance between points\n avg_distance[i] = averagedistance(data)\n\nplt.figure(figsize=(6,8))\nplt.subplot(2,1,1)\nplt.semilogx(dimensions,fraction,\n linewidth=2,\n color='#00B050')\nplt.xlabel('Number of Dimensions')\nplt.ylabel('Fraction of points distance < {} \\n from edge of hypercube'.format(distance))\nplt.grid('on')\n\nplt.subplot(2,1,2)\nplt.semilogx(dimensions,avg_distance,\n linewidth=2,\n color='#00B050')\nplt.xlabel('Number of Dimensions')\nplt.ylabel('Mean distance between points')\nplt.grid('on')\nplt.tight_layout()\nplt.savefig(cfg.dir_figures + 'lecture01e.png',dpi=300)","sub_path":"lecture01e.py","file_name":"lecture01e.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"284911871","text":"import avalanche as avl\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom avalanche.evaluation import metrics as metrics\nfrom models import MLP\nfrom experiments.utils import set_seed, create_default_args\n\n\ndef generative_replay_smnist(override_args=None):\n \"\"\"\n \"Continual Learning with Deep Generative Replay\" by Shin et. al. (2017).\n https://arxiv.org/abs/1705.08690\n \"\"\"\n args = create_default_args({'cuda': 0, 'hidden_size': 400,\n 'hidden_layers': 2, 'epochs': 10, 'dropout': 0,\n 'learning_rate': 0.001, 'train_mb_size': 16, 'seed': None}, override_args)\n set_seed(args.seed)\n device = torch.device(f\"cuda:{args.cuda}\"\n if torch.cuda.is_available() and\n args.cuda >= 0 else \"cpu\")\n\n benchmark = avl.benchmarks.SplitMNIST(5, return_task_id=False)\n model = MLP(hidden_size=args.hidden_size, hidden_layers=args.hidden_layers,\n drop_rate=args.dropout, relu_act=True)\n criterion = CrossEntropyLoss()\n\n interactive_logger = avl.logging.InteractiveLogger()\n\n evaluation_plugin = avl.training.plugins.EvaluationPlugin(\n metrics.accuracy_metrics(epoch=True, experience=True, stream=True),\n loggers=[interactive_logger])\n\n cl_strategy = avl.training.GenerativeReplay(\n model,\n torch.optim.Adam(model.parameters(), lr=args.learning_rate),\n criterion,\n train_mb_size=args.train_mb_size,\n train_epochs=args.epochs,\n eval_mb_size=128,\n replay_size=100,\n device=device,\n evaluator=evaluation_plugin,\n )\n\n res = None\n for experience in benchmark.train_stream:\n cl_strategy.train(experience)\n res = cl_strategy.eval(benchmark.test_stream)\n\n return res\n\n\nif __name__ == '__main__':\n res = generative_replay_smnist()\n print(res)\n","sub_path":"experiments/split_mnist/generative_replay.py","file_name":"generative_replay.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"220950837","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nfrom cms.sitemaps import CMSSitemap\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap as sitemap_view\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.views.static import serve as serve_static\n\nfrom attendee.views import (\n AttendeeCancelView, AttendeeDeleteView, AttendeeProfileView,\n AttendeeRegistrationView, RegisterSuccessView,\n login_or_register_attendee_view)\nfrom devday.views import exception_test_view\nfrom talk.views import (\n InfoBeamerXMLView, RedirectVideoView, SpeakerListView, SpeakerProfileView,\n TalkDetails, TalkListPreviewView, TalkListView, TalkVideoView)\n\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^sitemap\\.xml$', sitemap_view, {'sitemaps': {'cmspages': CMSSitemap}}),\n url(r'^select2/', include('django_select2.urls')),\n url(r'^attendee/register/$', AttendeeRegistrationView.as_view(), name='registration_register'),\n url(r'^attendee/cancel/(?P\\d+)$', AttendeeCancelView.as_view(), name='attendee_cancel'),\n url(r'^register/$', login_or_register_attendee_view, name='login_or_register_attendee'),\n url(r'^register/success/$', RegisterSuccessView.as_view(), name='register_success'),\n url(r'^accounts/', include('devday.registration_urls')),\n url(r'^accounts/delete/$', AttendeeDeleteView.as_view(), name='attendee_delete'),\n url(r'^accounts/profile/$', AttendeeProfileView.as_view(), name='user_profile'),\n url(r'^speakers/$', SpeakerListView.as_view(), name='speaker_list'),\n url(r'^schedule\\.xml$', InfoBeamerXMLView.as_view()),\n url(r'^(?P[^/]+)/schedule\\.xml$', InfoBeamerXMLView.as_view()),\n url(r'^videos/$', RedirectVideoView.as_view()),\n url(r'^speaker/profile/(?P\\d+)/$', SpeakerProfileView.as_view(), name='speaker_profile'),\n url(r'^upload/', include('django_file_form.urls')),\n url(r'^session/', include('talk.urls')),\n url(r'^committee/', include('talk.urls_committee')),\n url(r'^synthetic_server_error/$', exception_test_view),\n url(r'^(?P[^/]+)/talk-preview/$', TalkListPreviewView.as_view(), name='session_list_preview'),\n url(r'^(?P[^/]+)/talk/$', TalkListView.as_view(), name='session_list'),\n url(r'^(?P[^/]+)/videos/$', TalkVideoView.as_view(), name='video_list'),\n url(r'^(?P[^/]+)/talk/(?P[^/]+)/$', TalkDetails.as_view(), name='talk_details'),\n url(r'^', include('cms.urls')),\n url(r'^csvviews/', include('attendee.csv_urls')),\n]\n\n# This is only needed when using runserver.\nif settings.DEBUG: # pragma: nocover\n import debug_toolbar\n\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n url(r'^media/(?P.*)$', serve_static, # NOQA\n {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n ] + staticfiles_urlpatterns() + urlpatterns # NOQA\n","sub_path":"devday/devday/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"272044448","text":"from TeamPokerMainApp.Common.VariableDefinitions import *\nfrom TeamPokerMainApp.Player.PlayerProfile import PlayerProfile\nfrom TeamPokerMainApp.PokerGame.GameLogic.CardDeck import CardDeck\nfrom TeamPokerMainApp.PokerGame.GameLogic.Dealer import Dealer\nfrom TeamPokerMainApp.PokerGame.GameLogic.HandEvaluator import HandEvaluator\nimport numpy as np\n\n\nclass PokerGame:\n numberOfPlayers = 0\n maximumPlayers = 9\n\n def __init__(self):\n self._dealer = Dealer()\n self._deck = CardDeck()\n self._player0 = PlayerProfile()\n self._player1 = PlayerProfile()\n self._player2 = PlayerProfile()\n self._player3 = PlayerProfile()\n self._player4 = PlayerProfile()\n self._player5 = PlayerProfile()\n self._player6 = PlayerProfile()\n self._player7 = PlayerProfile()\n self._player8 = PlayerProfile()\n\n def addNewPlayer(self, name, money):\n if self.numberOfPlayers < 8:\n eval(f'self._player{self.numberOfPlayers}.createNewPlayer(name, money)')\n self.numberOfPlayers += 1\n else:\n print('Table is full!')\n\n def removePlayer(self, playerNumber):\n self.numberOfPlayers -= 1\n eval(f'self._player{playerNumber}.clearPlayer()')\n\n def newPokerRound(self):\n self._dealer.clearCardsOnTheTableAndPot()\n self.deck = self._deck.get_shuffled_deck()\n self._deck.print_shuffled_deck(self.deck)\n # start giving cards to players\n for card in range(NUMBER_OF_CARDS_IN_HAND):\n for player in range(self.maximumPlayers):\n if eval(f'self._player{player}.getPlayingStatus()') is STATUS_PLAYING:\n eval(f'self._player{player}.setCardsInPlayerHand(card, self.get_top_card())')\n\n def card_round_flop(self):\n self.get_top_card() # just removes top card from the deck, to burn it\n for card in range(NUMBER_OF_CARDS_ON_FLOP):\n self._dealer.setCardsOnTheTable(card, self.get_top_card())\n\n def card_round_turn(self):\n self.get_top_card() # just removes top card from the deck, to burn it\n self._dealer.setCardsOnTheTable(CARD_INDEX_TURN, self.get_top_card())\n\n def card_round_river(self):\n self.get_top_card() # just removes top card from the deck, to burn it\n self._dealer.setCardsOnTheTable(CARD_INDEX_RIVER, self.get_top_card())\n\n def get_top_card(self):\n topCard = self.deck[CARD_INDEX_TOP_CARD]\n self.deck = np.delete(self.deck, CARD_INDEX_TOP_CARD)\n return topCard\n\n def printPlayerHands(self):\n for player in range(self.numberOfPlayers):\n printingString = eval(f'self._player{player}.name')\n for item in eval(f'self._player{player}.getCardsInPlayerHand()'):\n printingString += ' '\n printingString += self._deck.get_card_name_from_card_number(item)\n\n print(printingString)\n\n def printTableCards(self):\n prnt = ''\n for card in self._dealer.getCardsOnTheTable():\n if card is not NO_CARD:\n prnt += ' '\n prnt += self._deck.get_card_name_from_card_number(card)\n print(prnt)\n\n def takePlayerCardsPlusTableCards(self, playerID):\n sevenCards = []\n for card in eval(f'self._player{playerID}.getCardsInPlayerHand()'):\n sevenCards.append(self._deck.get_card_number_from_card_id(card))\n for card in self._dealer.getCardsOnTheTable():\n sevenCards.append(self._deck.get_card_number_from_card_id(card))\n return sevenCards\n\n def evaluatePlayersHands(self):\n print(\"\\nResults:\")\n for player in range(self.numberOfPlayers):\n playerName = eval(f'self._player{player}.name')\n sevenCards = self.takePlayerCardsPlusTableCards(player)\n handEvaluator = HandEvaluator(sevenCards, playerName)\n result = handEvaluator.evaluate_hand()\n print(result)\n\n\ngame = PokerGame()\ngame.addNewPlayer('Victor', 10.0)\ngame.addNewPlayer('Cornel', 10.0)\ngame.addNewPlayer('Csaba', 10.0)\ngame.addNewPlayer('Adi', 10.0)\ngame.addNewPlayer('Andrei', 10.0)\n\ngame.newPokerRound()\nprint('----')\ngame.printPlayerHands()\nprint('----')\nprint('Flop: ')\ngame.card_round_flop()\ngame.printTableCards()\nprint('Turn: ')\ngame.card_round_turn()\ngame.printTableCards()\nprint('River: ')\ngame.card_round_river()\ngame.printTableCards()\ngame.evaluatePlayersHands()\n","sub_path":"~examples_and_tests/first_test.py","file_name":"first_test.py","file_ext":"py","file_size_in_byte":4436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"156913677","text":"from canvasapi import Canvas\nimport config\nimport csv\n\ncanvas = Canvas(config.API_URL, config.API_KEY)\n\naccountIDs = [6,7,8,9,10,11,12,13,14,15,16,17,18]\n\nfilename = \"DOE_Admin.csv\"\n\ncsv = open(filename, \"w\")\ncsv.write(\"username,role,role_id,subaccount\\n\")\n\ndef build_admin_lst(account, mode):\n\n print(\"\\n_______________________\\n\" + \"Admin's for: \" + str(account))\n root_admins = account.get_admins()\n print(\"*****Root Admins*****\\n\")\n\n for admin in root_admins: # Grabs all root level Admin Users\n user = admin.user\n role = admin.role\n print(\"Name: \" + user['login_id'] + \" -> Role: \" + role)\n\n csv.write(str(user['login_id'])+\",\"+role+\",\"+str(admin.id)+\",\"+str(account)+\"\\n\")\n\n\n if mode == \"all\":\n subaccounts = account.get_subaccounts(True) # Get all subaccounts in an account\n for subaccount in subaccounts:\n #print(\" \" + str(subaccount))\n admins = subaccount.get_admins() # Get all Admin Users\n tmp = []\n for admin in admins:\n user = admin.user\n role = admin.role\n # print(\" Name: \" + user['name'] + \" -> Role: \" + role)\n\n else:\n return None\n\n\nfor ID in accountIDs:\n current_account = canvas.get_account(ID)\n\n build_admin_lst(current_account, \"root\") # Account, Mode(\"root\" or \"all\")\n\nprint(\"Your .CSV is done...\")","sub_path":"get_admins.py","file_name":"get_admins.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"306415465","text":"import sys\nfrom pathlib import Path\nfrom utils_chess import *\nfrom datasets import PascalVOCDataset, ChessDataset\nfrom tqdm import tqdm\nfrom pprint import PrettyPrinter\nfrom argparser import parse_val_arguments\n\n# Good formatting when printing the APs for each class and mAP\npp = PrettyPrinter()\n\n# Parameters\nkeep_difficult = True # difficult ground truth objects must always be considered in mAP calculation, because these objects DO exist!\nworkers = 4\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef evaluate(checkpoint, run_colab, batch_size, set, subset):\n \"\"\"\n Evaluate.\n\n :param test_loader: DataLoader for test data\n :param model: model\n \"\"\"\n if run_colab:\n root_path = Path(\"/content/gdrive/My Drive/Chess notation/annotated\")\n else:\n root_path = \"/Users/laurenssamson/Documents/Projects/Chess_notation/chess/data/chess_data\"\n test_dataset = ChessDataset(root_path)\n if subset > 0:\n test_dataset.images = test_dataset.images[:subset]\n test_dataset.objects = test_dataset.objects[:subset]\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False,\n collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)\n # Load model checkpoint that is to be evaluated\n checkpoint = torch.load(checkpoint, map_location=device)\n model = checkpoint['model']\n print(f\"Number of epoch trained: {checkpoint['epoch']}\")\n model = model.to(device)\n\n # Switch to eval mode\n model.eval()\n # Make sure it's in eval mode\n model.eval()\n\n # Lists to store detected and true boxes, labels, scores\n det_boxes = list()\n det_labels = list()\n det_scores = list()\n true_boxes = list()\n true_labels = list()\n true_difficulties = list() # it is necessary to know which objects are 'difficult', see 'calculate_mAP' in utils.py\n\n with torch.no_grad():\n # Batches\n for i, (images, boxes, labels, difficulties) in enumerate(tqdm(test_loader, desc='Evaluating')):\n images = images.to(device) # (N, 3, 300, 300)\n\n # Forward prop.\n predicted_locs, predicted_scores = model(images)\n\n # Detect objects in SSD output\n det_boxes_batch, det_labels_batch, det_scores_batch = model.detect_objects(predicted_locs, predicted_scores,\n min_score=0.01, max_overlap=0.45,\n top_k=200)\n # Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200 for fair comparision with the paper's results and other repos\n\n # Store this batch's results for mAP calculation\n boxes = [b.cpu() for b in boxes]\n labels = [l.cpu()for l in labels]\n difficulties = [d.cpu() for d in difficulties]\n\n det_boxes.extend([box.cpu() for box in det_boxes_batch])\n det_labels.extend([label.cpu() for label in det_labels_batch])\n det_scores.extend([score.cpu() for score in det_scores_batch])\n true_boxes.extend(boxes)\n true_labels.extend(labels)\n true_difficulties.extend(difficulties)\n\n # Calculate mAP\n APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties)\n\n # Print AP for each class\n pp.pprint(APs)\n\n print('\\nMean Average Precision (mAP): %.3f' % mAP)\n model.train()\n\nif __name__ == '__main__':\n args = parse_val_arguments(sys.argv[1:])\n evaluate(**args)\n\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"347146932","text":"import pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom matplotlib import pyplot as plt\nimport time\nimport os\nimport numpy as np\nimport sys\n\ntime1 = time.time()\n#Daten einlesen\nlocation = input(\"Auf welchem Rechner?\")\nroot_path = \"//\"\nif location == \"Taurus\" or location == \"taurus\":\n root_path = \"/home/s1388135/Bachelor-Thesis/\"\nsys.path.insert(0, root_path)\nimport ml\n\n\n\n#Grid erstellen, pool für jeden Hyperparameter, so kann man dynamische einstellen in welchen Dimensionen das Grid liegt\n# wichtig: Standardmodell hat index 0 in jedem pool\npools = dict()\npools[\"batch_size\"] = [256, 512, 768, 1024, 128]\npools[\"units_nr_layers\"] = [(256,5), (512,3), (64,7), (1024, 2), (128, 6)]\npools[\"learning_rate\"]= [1e-2, 1e-3, 1e-4, 5e-3]\npools[\"l2_kernel\"] = [0.0]\npools[\"l2_bias\"] = [0.0]\npools[\"loss_fn\"] = [keras.losses.MeanAbsoluteError(), keras.losses.MeanSquaredError(), keras.losses.Huber()]\npools[\"optimizer\"] = [keras.optimizers.Adam, keras.optimizers.RMSprop]\npools[\"momentum\"] = [0.1]\npools[\"dropout\"] = [False]\npools[\"dropout_rate\"] = [0]\npools[\"kernel_initializer\"] = [tf.keras.initializers.HeNormal()]\npools[\"bias_initializer\"] = [tf.keras.initializers.Zeros()]\npools[\"hidden_activation\"] = [tf.nn.leaky_relu, tf.nn.relu, tf.nn.elu, tf.nn.sigmoid, tf.nn.tanh]\npools[\"output_activation\"] = [ml.LinearActiavtion()]\npools[\"feature_normalization\"] = [\"normalization\", None]\npools[\"scaling_bool\"] = [True]\npools[\"logarithm\"] = [True]\npools[\"base10\"] = [True, False]\npools[\"label_normalization\"] = [True, False]\npools[\"min_delta\"] = [5e-6]\npools[\"min_lr\"] = [5e-8]\npools[\"dataset\"] =[ \"TrainingData4M\"]\n#Festlegen, welche Hyperparameter in der Bezeichnung stehen sollen:\nnames = {\"loss_fn\", \"units_nr_layers\", \"optimizer\", \"hidden_activation\", \"dataset\", \"batch_size\",\n \"learning_rate\", \"units_nr_layers\", \"label_normalization\", \"base10\", \"feature_normalization\", }\n\nvary_multiple_parameters = True\n\n#Variablen...\ntrain_frac = 0.95\ntraining_epochs = 100\nsize = 100\nlr_reduction=0.05\nlr_factor = 0.5\nnesterov = True\nloss_function = keras.losses.MeanAbsolutePercentageError()\nfeature_rescaling = False\n\ncustom = False\nnew_model=True\n\nlr_patience = 1\nstopping_patience = 3\nrepeat = 2\n\n\n\n#Menge mit bereits gesehen konfigurationen\nchecked_configs = ml.create_param_configs(pools=pools, size=size, vary_multiple_parameters=vary_multiple_parameters)\nresults_list = dict()\n\nfor config in checked_configs:\n #Schönere accessability\n params = dict()\n for i,param in enumerate(pools):\n params[param] = config[i]\n\n data_path = root_path + \"/Files/Hadronic/Data/\" + params[\"dataset\"] + \"/\"\n data_name = \"all\"\n project_path = root_path + \"Files/Hadronic/Models/LastRandomSearch/\"\n if not vary_multiple_parameters:\n project_path += str(config[-1]) + \"/\"\n loss_name = \"best_loss\"\n project_name = \"\"\n\n label_name = \"WQ\"\n\n if params[\"feature_normalization\"] == \"rescaling\":\n feature_rescaling = True\n elif params[\"feature_normalization\"] == \"normalization\":\n feature_normalization = True\n \n #Trainingsparameter ein wenig nach batches anpassen\n training_epochs = int(1/100 * params[\"batch_size\"]) + 90\n lr_reduction = 25/params[\"batch_size\"]\n\n #Callbacks initialisieren\n #min delta initialiseren\n reduce_lr = keras.callbacks.LearningRateScheduler(ml.class_scheduler(reduction=lr_reduction, min_lr=params[\"min_lr\"]))\n reduce_lr_on_plateau = keras.callbacks.ReduceLROnPlateau(monitor=\"loss\", factor=lr_factor, patience=lr_patience, min_delta=params[\"min_delta\"], min_lr=params[\"min_lr\"])\n early_stopping = keras.callbacks.EarlyStopping(monitor=\"loss\", min_delta=1e-1 * params[\"min_delta\"], patience=stopping_patience)\n callbacks = [reduce_lr_on_plateau, early_stopping, reduce_lr]\n\n # Daten einlsen\n # Daten einlesen:\n (training_data, train_features, train_labels, test_features, test_labels, transformer) = ml.data_handling(\n data_path=data_path + data_name, label_name=label_name, scaling_bool=params[\"scaling_bool\"], logarithm=pools[\"logarithm\"], base10=params[\"base10\"],\n label_normalization=params[\"label_normalization\"], feature_rescaling=feature_rescaling,\n train_frac=train_frac)\n\n\n #Create path to save model\n if not vary_multiple_parameters:\n names = {config[-1]}\n model_name = ml.construct_name(params, names_set=names)\n save_path = project_path + model_name\n print(\"Wir initialisieren Modell \", model_name)\n\n #Best loss einlesen\n best_losses = None\n if os.path.exists(project_path + project_name + loss_name):\n best_losses = pd.read_csv(project_path + project_name + loss_name)\n\n # Verzeichnis erstellen\n if not os.path.exists(path=save_path):\n os.makedirs(save_path)\n\n\n #zweimal initialisiern um statistische Schwankungen zu verkleinern\n #trainin_time und total loss über die initialisierungen mitteln\n training_time = 0\n total_losses = []\n models = []\n for i in range(repeat):\n #Modell initialisieren\n models.append(ml.initialize_model(nr_layers=params[\"units_nr_layers\"][1], units=params[\"units_nr_layers\"][0], loss_fn=params[\"loss_fn\"], optimizer=params[\"optimizer\"],\n hidden_activation=params[\"hidden_activation\"], output_activation=params[\"output_activation\"],\n kernel_initializer=params[\"kernel_initializer\"], bias_initializer=params[\"bias_initializer\"], l2_kernel=params[\"l2_kernel\"],\n learning_rate=params[\"learning_rate\"], momentum=params[\"momentum\"], nesterov=nesterov,\n l2_bias=params[\"l2_bias\"], dropout=params[\"dropout\"], dropout_rate=params[\"dropout_rate\"],\n new_model=new_model, custom=custom, feature_normalization=pools[\"feature_normalization\"]))\n for i,model in enumerate(models):\n # Training starten\n time4 = time.time()\n history = model.fit(x=train_features, y=train_labels, batch_size=params[\"batch_size\"], epochs=training_epochs,\n callbacks = callbacks, verbose=2, shuffle=True)\n time5 = time.time()\n training_time += time5 - time4\n\n # Losses plotten\n ml.make_losses_plot(history=history)\n plt.savefig(save_path + \"/training_losses\")\n plt.show()\n\n # Überprüfen wie gut es war\n results = model(test_features)\n loss = float(loss_function(y_pred=transformer.retransform(results), y_true=transformer.retransform(test_labels)))\n print(\"Loss von Durchgang Nummer \", i, \" : \", loss)\n total_losses.append(loss)\n\n #training_time und total loss mitteln:\n avg_total_loss = np.mean(total_losses)\n smallest_loss = np.min(total_losses)\n loss_error = np.std(total_losses)\n training_time = 1 / repeat * training_time\n print(\"Losses of the specific cycle:\", total_losses)\n print(\"average Loss over \", repeat, \"cycles:\", np.mean(total_losses))\n print(\"Das beste Modell (Modell Nr.\", np.argmin(total_losses), \") wird gespeichert\")\n # Modell und config speichern\n model = models[np.argmin(total_losses)]\n model.save(filepath=save_path, save_format=\"tf\")\n (config, index) = ml.save_config(new_model=new_model, save_path=save_path, model=model, learning_rate=params[\"learning_rate\"],\n training_epochs=training_epochs, batch_size=params[\"batch_size\"],\n avg_total_Loss=avg_total_loss, smallest_loss=smallest_loss, loss_error=loss_error, total_losses=total_losses,\n transformer=transformer, training_time=training_time,\n custom=custom, loss_fn=params[\"loss_fn\"], feature_handling= params[\"feature_normalization\"],\n min_delta = params[\"min_delta\"], nr_hidden_layers=params[\"units_nr_layers\"][1], units=params[\"units_nr_layers\"][0])\n\n\n #Überprüfen ob Fortschritt gemacht wurde\n ml.check_progress(model=models[np.argmin(total_losses)], transformer=transformer, test_features=test_features, test_labels=test_labels,\n best_losses=best_losses, project_path=project_path, project_name=project_name,\n index=index, config=config, loss_name=loss_name)\n\n #Ergebnis im dict festhalten\n results_list[model_name] = \"{:.2f}\".format(float(avg_total_loss))\n\n #Ergebnisse speichern\n results_list_pd = pd.DataFrame(\n results_list,\n index = [0]\n )\n results_list_pd = results_list_pd.transpose()\n results_list_pd.to_csv(project_path + \"results\")\n\n\n\n","sub_path":"Executables/Hadronic Process/ML/GridSearch.py","file_name":"GridSearch.py","file_ext":"py","file_size_in_byte":8711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"463306487","text":"n = int(input(\"enter n:\"))\ndig = []\n\nwhile n != 0:\n dig.append(n % 10)\n n = n // 10\n \nprime = False\n\nfor num in dig:\n i = 2\n \n while i < num and num != 1 and num != 0:\n if num % i == 0:\n prime = False\n break\n else:\n prime = True\n i += 1\n \n if prime == True:\n break\n\n \nif prime:\n print(\"At least one prime digit found\")\nelse:\n print(\"No prime digits found\")\n \n","sub_path":"week-2/3-Simple-Algorithms/prime_digit.py","file_name":"prime_digit.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"253807765","text":"import numpy as np\nfrom scipy.linalg import solve_triangular\n\ndef householder(A, kmax=None):\n \"\"\"\n Given a real mxn matrix A, find the reduction to upper triangular matrix R\n using Householder transformations.\n\n :param A: an mxn-dimensional numpy array\n :param kmax: an integer, the number of columns of A to reduce \\\n to upper triangular. If not present, will default to n.\n\n :return R: an mxn-dimensional numpy array containing the upper \\\n triangular matrix\n \"\"\"\n\n m, n = A.shape\n R = (1.0+0j)*A\n if kmax is None:\n kmax = n\n kmax = min(m,kmax)\n for k in range(kmax):\n x = 1.0 * R[k:,k]\n v = (1.0+0j) * x\n arg = np.angle(x[0])\n coeff = np.exp(1j * arg)\n v[0] += coeff * np.linalg.norm(x)\n v = v / np.linalg.norm(v)\n R[k:,k:] -= 2 * np.outer(v, np.dot(v.conj().transpose(), R[k:,k:]))\n return R\n\n\ndef householder_solve(A, b):\n \"\"\"\n Given a real mxm matrix A, use the Householder transformation to solve\n Ax_i=b_i, i=1,2,...,k.\n\n :param A: an mxm-dimensional numpy array\n :param b: an mxk-dimensional numpy array whose columns are the \\\n right-hand side vectors b_1,b_2,...,b_k.\n\n :return x: an mxk-dimensional numpy array whose columns are the \\\n right-hand side vectors x_1,x_2,...,x_k.\n \"\"\"\n m, k = b.shape\n Ahat = np.zeros((m,m+1))\n x = np.zeros((m,k))\n for i in range(k):\n Ahat[:,:m] = 1.0*A\n Ahat[:,m] = 1.0*b[:,i]\n Rhat = householder(Ahat, m)\n x[:,i] = solve_triangular(Rhat[:,:m], Rhat[:,m])\n return x\n\n\ndef householder_qr(A):\n \"\"\"\n Given a real mxn matrix A, use the Householder transformation to find\n the QR factorisation of A.\n\n :param A: an mxn-dimensional numpy array\n\n :return Q: an mxm-dimensional numpy array\n :return R: an mxn-dimensional numpy array\n \"\"\"\n m, n = A.shape\n I = np.eye(m, dtype = complex)\n Ahat = np.zeros((m, n+m), dtype = complex)\n Ahat[:, :n] = A\n Ahat[:, n:] = I\n\n Rhat = householder(Ahat)\n R = Rhat[:,:n]\n Q = Rhat[:,n:].transpose().conj()\n\n return Q, R\n\n\ndef householder_ls(A, b):\n \"\"\"\n Given a real mxn matrix A and an m dimensional vector b, find the\n least squares solution to Ax = b.\n\n :param A: an mxn-dimensional numpy array\n :param b: an m-dimensional numpy array\n\n :return x: an n-dimensional numpy array\n \"\"\"\n m, n = A.shape\n Ahat = np.zeros((m, n+1))\n Ahat[:,:n] = 1.0*A\n Ahat[:, n] = 1.0*b\n\n Rhat = householder(Ahat)\n x = solve_triangular(Rhat[:n,:n], Rhat[:n,n])\n\n return x\n","sub_path":"exercises3.py","file_name":"exercises3.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"305557689","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('index.urls')),\n path('polls/', include('polls.urls')),\n path('blogs/', include('blog.urls')),\n path('files/', include('files.urls')),\n path('photos/', include('photos.urls')),\n path('users/', include('users.urls')),\n path('watchList/', include('watchlist.urls')),\n path('messageBoard/', include('messageboard.urls')),\n path('music/', include('music.urls')),\n path('mdeditor', include('mdeditor.urls')),\n path('captcha', include('captcha.urls'))\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"350349089","text":"# External imports\nimport unittest\nimport pytest\n\nimport numpy as np\nfrom gpmap import GenotypePhenotypeMap\n\n# Module to test\nfrom ..lasso import EpistasisLasso\n\n\n@pytest.fixture\ndef gpm():\n \"\"\"Create a genotype-phenotype map\"\"\"\n wildtype = \"000\"\n genotypes = [\"000\", \"001\", \"010\", \"100\", \"011\", \"101\", \"110\", \"111\"]\n phenotypes = [0.1, 0.1, 0.5, 0.4, 0.2, 0.8, 0.5, 1.0]\n stdeviations = 0.1\n return GenotypePhenotypeMap(wildtype, genotypes, phenotypes,\n stdeviations=stdeviations)\n\n\nclass TestEpistasisLasso(object):\n\n order = 3\n\n def test_init(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n\n # Checks\n check1 = model.order\n check2 = model.model_type\n assert check1 == self.order\n assert check2 == \"local\"\n\n def test_fit(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n model.fit()\n # Checks\n check1 = hasattr(model, \"Xbuilt\")\n check2 = hasattr(model, \"coef_\")\n check3 = hasattr(model, \"epistasis\")\n\n # Tests\n assert check1 is True\n assert check2 is True\n assert check3 is True\n assert \"fit\" in model.Xbuilt\n\n\n def test_predict(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n model.fit()\n check1 = model.predict(X='fit')\n\n # Tests\n assert \"predict\" in model.Xbuilt\n\n def test_score(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\", alpha=0.1)\n model.add_gpm(gpm)\n model.fit()\n score = model.score()\n # Tests\n assert score >= 0\n assert score <= 1\n\n def test_hypothesis(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n model.fit()\n # Checks\n check1 = model.hypothesis(thetas=model.coef_)\n # Tests\n True\n\n def test_lnlikelihood(self, gpm):\n model = EpistasisLasso(order=self.order, model_type=\"local\")\n model.add_gpm(gpm)\n model.fit()\n\n # Calculate lnlikelihood\n lnlike = model.lnlikelihood()\n assert lnlike.dtype == float\n","sub_path":"epistasis/models/linear/tests/test_lasso.py","file_name":"test_lasso.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"12048574","text":"from util import Queue\n\ndef earliest_ancestor(ancestors, starting_node):\n ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]\n\n # Create a graph\n graph = {}\n # Keep a track of visited nodes\n visited = set()\n # Keeps a track of the path when traversing the graph\n path = []\n # add empty sets to the graph with respective indexes\n for i in ancestors:\n if i[0] not in graph:\n graph[i[0]] = set()\n if i[1] not in graph:\n graph[i[1]] = set()\n print('graph', graph[i[0]])\n # we add the edges to the nodes\n for ancestor in ancestors:\n if ancestor[0] in graph and ancestor[1] in graph:\n graph[ancestor[1]].add(ancestor[0])\n \n # Create an empty queue\n queue = Queue()\n # Add the initial vertex to the path\n path.append(starting_node)\n # Then add the path to the queue, should be initialized as a empty list\n queue.enqueue(path)\n\n # This checks if the initial node has no ancestors, if it does not it returns '-1' \n for i in graph:\n if starting_node is i and bool(graph[i]) is False:\n return -1\n\n # while we have something in our queue.. \n while queue.size() > 0:\n # Dequeue the first path\n path = queue.dequeue() \n # We grab the last vertex from the path\n last_vertex = path[-1] \n # We check if it has NOT been visited..\n if last_vertex not in visited:\n # if not, we add it to the visited dic\n visited.add(last_vertex)\n # iterate over the last vertex's neighbors..\n for neighbor in graph[last_vertex]:\n # we check if the neighbor is NOT marked as visited...\n if neighbor not in visited:\n # if it is not, we make a copy of the path\n path_copy = path.copy()\n # we append the neighbor to the copy of the path\n path_copy.append(neighbor)\n # then we add the copy of the path to the queue\n queue.enqueue(path_copy)\n # we return the last vertex, indicating the farthest node in the ancestry chain\n return last_vertex\n\nif __name__=='__main__':\n earliest_ancestor((1,2), 4)\n\n","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"252338195","text":"import multiprocessing\n\n# アプリケーション指定\nwsgi_app = 'app:app'\n# ワーカークラス指定\nworker_class = \"uvicorn.workers.UvicornWorker\"\n# IPポートバインド\nbind = \"0.0.0.0:5000\"\n# 実行ワーカー数\nworkers = 2\n#workers = multiprocessing.cpu_count() * 1 + 1\n# デーモン無効\ndaemon = False\n# ホットリロード\nreload=True\n# タイムアウト\ntimeout = 10\n","sub_path":"Development_Setup/FastAPI/src/gunicorn.conf.py","file_name":"gunicorn.conf.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"95679929","text":"import cv2\r\nimport mediapipe as mp\r\nimport time\r\nimport math\r\n\r\n\r\nclass handDetector():\r\n def __init__(self, static_mode=False, max_hands=2, min_detection_confidence=0.5, min_tracking_confidence=0.5):\r\n self.static_mode = static_mode\r\n self.max_hands = max_hands\r\n self.min_detection_confidence = min_detection_confidence\r\n self.min_tracking_confidence = min_tracking_confidence\r\n\r\n self.mpHands = mp.solutions.hands\r\n self.hands = self.mpHands.Hands(self.static_mode, self.max_hands,\r\n self.min_detection_confidence, self.min_tracking_confidence)\r\n self.mpDraw = mp.solutions.drawing_utils\r\n\r\n def findHands(self, img, draw=True):\r\n # hands object uses RGB images only\r\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n self.results = self.hands.process(imgRGB)\r\n # print(results.multi_hand_landmarks)\r\n\r\n if self.results.multi_hand_landmarks:\r\n for handLms in self.results.multi_hand_landmarks: # for each hand\r\n if draw:\r\n self.mpDraw.draw_landmarks(\r\n img, handLms, self.mpHands.HAND_CONNECTIONS)\r\n\r\n return img\r\n\r\n def findPosition(self, img, handNo=0, draw=True):\r\n\r\n self.lmList = []\r\n if self.results.multi_hand_landmarks:\r\n myHand = self.results.multi_hand_landmarks[handNo]\r\n for id, lm in enumerate(myHand.landmark):\r\n # print(id, lm)\r\n h, w, c = img.shape\r\n # (x, y) are ratio of the point on img\r\n cx, cy = int(lm.x*w), int(lm.y*h)\r\n #print(id, cx, cy)\r\n self.lmList.append([id, cx, cy])\r\n if draw:\r\n cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)\r\n\r\n return self.lmList\r\n\r\n def fingersUp(self):\r\n tipIds = [4, 8, 12, 16, 20]\r\n OpenFingers = []\r\n # FOR THUMB:\r\n if self.lmList[tipIds[0]][1] > self.lmList[tipIds[0]-1][1]: # lmlist[id][x][y]\r\n OpenFingers.append(1)\r\n else:\r\n OpenFingers.append(0)\r\n\r\n # Remaining Fingers:\r\n for id in range(1, 5):\r\n if self.lmList[tipIds[id]][2] < self.lmList[tipIds[id]-2][2]: # lmlist[id][x][y]\r\n OpenFingers.append(1)\r\n else:\r\n OpenFingers.append(0)\r\n # print(OpenFingers)\r\n return OpenFingers\r\n\r\n def findDistance(self, img, p1, p2):\r\n x1, y1 = self.lmList[p1][1], self.lmList[p1][2]\r\n x2, y2 = self.lmList[p2][1], self.lmList[p2][2]\r\n cx, cy = (x1 + x2) // 2, (y1 + y2) // 2\r\n\r\n cv2.circle(img, (x1, y1), 10, (255, 0, 255), cv2.FILLED)\r\n cv2.circle(img, (x2, y2), 10, (255, 0, 255), cv2.FILLED)\r\n cv2.circle(img, (cx, cy), 10, (255, 0, 255), cv2.FILLED)\r\n cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)\r\n\r\n length = math.hypot(x2-x1, y2-y1)\r\n # print(length)\r\n\r\n # if length < 40:\r\n # cv2.circle(img, (cx, cy), 10, (255, 0, 0), cv2.FILLED)\r\n\r\n return length, [x1, y1, x2, y2, cx, cy]\r\n\r\n\r\ndef main():\r\n pTime = 0\r\n cTime = 0\r\n\r\n cap = cv2.VideoCapture(0)\r\n detector = handDetector() # using default parameters\r\n while True:\r\n success, img = cap.read()\r\n img = detector.findHands(img)\r\n lmList = detector.findPosition(img)\r\n if len(lmList) != 0:\r\n print(lmList[4]) # print position of 4th landmark\r\n\r\n cTime = time.time()\r\n fps = 1/(cTime - pTime)\r\n pTime = cTime\r\n\r\n cv2.putText(img, str(int(fps)), (10, 70),\r\n cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 2)\r\n\r\n cv2.imshow(\"Image\", img)\r\n cv2.waitKey(1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"HandTrackingModule.py","file_name":"HandTrackingModule.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"644008063","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\n\nfrom talk import models as m\nfrom talk import forms as f\n\n\n@login_required\ndef message(request, user_id):\n user = User.objects.get(id=user_id)\n try:\n conv = m.Conversation.objects.get(\n Q(user_1=request.user) & Q(user_2=user))\n except m.Conversation.DoesNotExist:\n try:\n conv = m.Conversation.objects.get(\n Q(user_1=user) & Q(user_2=request.user))\n except m.Conversation.DoesNotExist:\n conv = None\n messages = None\n if conv:\n messages = m.Message.objects.filter(conversation=conv)\n form = f.MessageForm(request.POST or None)\n if form.is_valid():\n if conv is None:\n conv = m.Conversation(user_1=request.user, user_2=user)\n conv.save()\n message = form.cleaned_data.get('content')\n msg = m.Message(conversation=conv, user=request.user, content=message)\n msg.save()\n return redirect('messages:message', user_id)\n args = dict(user=user, form=form, messages=messages)\n return render(request, 'talk/message_details.html', args)\n\n\n@login_required\ndef messages(request):\n args = dict()\n try:\n conv = m.Conversation.objects.filter(\n Q(user_1=request.user) | Q(user_2=request.user))\n except m.Conversation.DoesNotExist:\n conv = None\n args = dict(conv=conv)\n return render(request, 'talk/message_list.html', args)\n","sub_path":"talk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"409167004","text":"'''\n@author: Mike McMillan\n12.1 Recursive Function part 1\n'''\ndef factorial(n):\n if n == 1:\n return n\n else:\n return n*factorial(n-1)\n\ndef main():\n numList = [1, 2, 3, 4, 5]\n for num in numList:\n print(num, factorial(num))\n\nif __name__ == '__main__': main()\n","sub_path":"Python3/MikeMcMillan/recursion_factorial.py","file_name":"recursion_factorial.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"338051844","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/watchme/client/remove.py\n# Compiled at: 2020-04-10 14:08:50\n# Size of source mod 2**32: 935 bytes\n\"\"\"\n\nCopyright (C) 2019-2020 Vanessa Sochat.\n\nThis Source Code Form is subject to the terms of the\nMozilla Public License, v. 2.0. If a copy of the MPL was not distributed\nwith this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\"\"\"\nfrom watchme import get_watcher\nfrom watchme.logger import bot\n\ndef main(args, extra):\n \"\"\"activate one or more watchers\n \"\"\"\n name = args.watcher[0]\n watcher = get_watcher(name, base=(args.base), create=False)\n if args.delete:\n watcher.delete()\n else:\n if extra is None:\n bot.exit('Provide tasks to remove, or --delete for entire watcher.')\n for task in extra:\n watcher.remove_task(task)","sub_path":"pycfiles/watchme-0.0.28-py3.7/remove.cpython-37.py","file_name":"remove.cpython-37.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"447737645","text":"import requests\nimport re\nfrom pyquery import PyQuery as py\nimport os\n\n# params\ncourse_url = \"http://ocw.nctu.edu.tw/course_detail-v.php?bgid=1&gid=1&nid=263\"\ntest_vedio_url = \"http://ocwvideo.nctu.edu.tw/pub/mp4/mxa001_mp4/mxa001_110914.mp4\"\npath = 'videos/'\n# set headers\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36\"\n}\n\n# get the page_source\nresponse = requests.get(course_url, headers=headers)\n\n# get hrefs\nvedio_urls = []\ndoc = py(response.text)\nitems = doc(\".table.table-bordered.table-striped\")\nfor item in items('a').items():\n if item.attr('href') and item.text() == \"MP4 下載\":\n vedio_urls.append(item.attr('href'))\n print(item.attr('href'))\n\n\n# refer to https://www.cnblogs/^http:\\/\\/www\\..+\\.mp4$/.com/linxiyue/p/8244724.html\ndef download_video(url, fname):\n with requests.get(url, headers=headers, stream=True) as r:\n content_size = int(r.headers['content-length'])\n with open(fname, 'wb')as f:\n n = 1\n for i in r.iter_content(chunk_size=1024):\n loaded = n * 1024.0 / content_size\n f.write(i)\n print(\"\\r processed {0:%}\".format(loaded), end=\"\")\n n += 1\n print('end this %s' % url)\n\n\n# start download\nif not os.path.exists(path):\n os.makedirs(path)\nfor v_url in vedio_urls:\n print(\"start download from %s\" % v_url)\n fname = v_url.split('/')[-1]\n download_video(v_url, path + fname)\n","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"56415966","text":"#########################################\n#########################################\n# 1. Activate pipenv shell\n# -- pipenv shell\n# 2. Run Python Script\n# -- python *filename*\n# 3. End python script\n# -- ctr c\n# 4. Exit pipenv shell\n# -- exit\n#########################################\n\n\n#imports\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException as ex\n# import time\nfrom bs4 import BeautifulSoup\n# import pandas as pd\n# import re\n# import os\n#website urls\nbase_url = 'http://www.worldsnowboarding.org/'\nathletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=SS&gender=M#table' #mens ss page 1\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/27/?type=SS&gender=M' #mens ss page 27\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/9/?type=SS&gender=M' #mens ss page 9\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=HP&gender=M#table' #mens hp page 1\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=BA&gender=M#table' #mens ba page 1\n\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=SS&gender=W#table' #womens ss page 1\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=HP&gender=W#table' #womens hp page 1\n# athletes_url = 'http://www.worldsnowboarding.org/points-lists/?type=BA&gender=W#table' #womens ba page 1\n\n# Chrome session\ndriver = webdriver.Chrome(executable_path='/Users/rcadby/Sites/shreds_scraper/chromedriver')\ndriver.get(athletes_url)\ndriver.implicitly_wait(100)\n\n# name the output file to write to local disk\nout_filename = \"./csv/snowboard-profiles.csv\"\n# header of csv file to be written\nheaders = \"lastName, firstName, position, points, sponsors, age, nationality, nationality_full, stance, height, residence, resort, website, facebook, twitter, rider_concat_id, \\n\"\n\n# opens file, and writes headers\nf = open(out_filename, \"w\")\nf.write(headers)\n\n# initiate variables\ncountry_array = []\nscore_array = []\nposition_array = []\nlastname_array = []\nfirstname_array = []\nnationality_abr = []\n\ndef assign_arrays(type):\n # get full country name from table and add to array\n try:\n country_array.append(type.find(\"span\", {\"class\": \"icon-flag-medium\"})['oldtitle'].split(',')[0].lower())\n except:\n country_array.append('')\n # get rider score from table and add to array\n try:\n score_array.append(float(type.find(\"td\", {\"class\": \"last\"}).text))\n except:\n score_array.append('')\n # get position from table and add to array\n try:\n position_str = type.findChildren()[0].span.text\n position = int(position_str[:-1])\n position_array.append(position)\n except:\n position_array.append(0)\n # get rider last name\n try:\n lastname_array.append(type.find(\"a\", {\"class\": \"ranking-table-link\"}).text.split(',')[0])\n except:\n lastname_array.append('')\n # get rider first name\n try:\n firstname_array.append(type.find(\"a\", {\"class\": \"ranking-table-link\"}).text.split(',')[1])\n except:\n firstname_array.append('')\n # get full nationality\n try:\n nationality_full_array = type.find(\"span\", {\"class\": \"icon-flag-medium\"}).text.lower()\n nationality_abr.append(nationality_full_array)\n except:\n nationality_abr.append('')\n\n\n\n\n# get initial page soup\npage_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n\n# count number of pages\npage_count = page_soup.find('div', attrs={'class': 'pagination-links'}).find_all('a')\npages = []\nfor link in page_count:\n pages.append(link)\npage_total = pages[-2].text.strip()\npage_total = int(page_total) + 20\nprint(\"page total: \" + str(page_total))\n# initiate empty variable to see if it has already read this page\nlast_position_check = None\n\nfor i in range(page_total): # for each page\n\n # count profiles per page\n profile_count = len(driver.find_elements_by_class_name('ranking'))\n print('number of riders ' + str(profile_count))\n\n \n # wait for table to appear\n WebDriverWait(driver, 10).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, \".block-table\"))\n )\n rank_page_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n # profile a tags\n profile_links = rank_page_soup.find_all(class_=\"ranking-table-link\")\n # get urls from a tags\n rider_link_array = []\n for profile_link in profile_links:\n rider_link_input = 'http://www.worldsnowboarding.org/' + profile_link.get('href')\n # rider_link_input = profile_link.get('href')\n rider_link_array.append(rider_link_input)\n\n\n # get whole rank row\n profile_data = rank_page_soup.find_all(\"tr\", {\"class\":\"ranking\"})\n country_array.clear()\n score_array.clear()\n position_array.clear()\n lastname_array.clear()\n firstname_array.clear()\n nationality_abr.clear()\n # get array of full country names\n for row in profile_data:\n assign_arrays(row)\n\n\n if i != 0 and position_array[(profile_count - 1)] == last_position_check:\n # navigate to link\n print('FAIL: duplicate page trial')\n print('current url: ' + driver.current_url)\n page_next = driver.find_element_by_class_name('next')\n page_next.click()\n print('next page url: ' + driver.current_url)\n\n # count profiles per page\n profile_count = len(driver.find_elements_by_class_name('ranking'))\n print('number of riders ' + str(profile_count))\n\n \n # wait for table to appear\n element = WebDriverWait(driver, 10).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, \".block-table\"))\n )\n rank_page_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n # profile a tags\n profile_links = rank_page_soup.find_all(class_=\"ranking-table-link\")\n # get urls from a tags\n rider_link_array = []\n for profile_link in profile_links:\n rider_link_input = 'http://www.worldsnowboarding.org/' + profile_link.get('href')\n # rider_link_input = profile_link.get('href')\n rider_link_array.append(rider_link_input)\n\n # get whole rank row\n profile_data = rank_page_soup.find_all(\"tr\", {\"class\":\"ranking\"})\n country_array.clear()\n score_array.clear()\n position_array.clear()\n lastname_array.clear()\n firstname_array.clear()\n nationality_abr.clear()\n # get array of full country names\n for row in profile_data:\n assign_arrays(row)\n else:\n pass\n\n\n \n print('profile links:')\n print(profile_links) # print all a tags to profiles\n print('rider links:')\n print(rider_link_array) # print all urls to profiles\n print('full countries:')\n print(country_array)\n print('scores:')\n print(score_array)\n print('positions:')\n print(position_array)\n print('last names:')\n print(lastname_array)\n print('first names:')\n print(firstname_array)\n \n loop_counter = 0\n for rider_link in rider_link_array:\n # initiate list for rider stats\n profile = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']\n # assign full country name to profile\n profile[7] = country_array[loop_counter]\n profile[0] = lastname_array[loop_counter]\n profile[1] = firstname_array[loop_counter]\n profile[2] = position_array[loop_counter]\n profile[3] = score_array[loop_counter]\n\n first_name_concat = firstname_array[loop_counter].strip().lower()\n last_name_concat = lastname_array[loop_counter].strip().lower()\n nationality_concat = nationality_abr[loop_counter].strip()\n profile[15] = str(first_name_concat + last_name_concat + nationality_concat)\n\n # click on rider profile\n driver.get(rider_link)\n # get html on rider page and parse\n profile_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n # get rider name\n try:\n # rider_name = profile_soup.select('h1.rider-label')[0].text.strip()\n # profile[0] = rider_name\n \n # get rider sponsors\n rider_sponsor_soup = profile_soup.find('div', attrs={'class': 'sponsor-list'})\n if rider_sponsor_soup is None:\n profile[4] = ''\n else:\n rider_sponsor_soup_conf = rider_sponsor_soup.ul.find_all('li')\n sponsors = ''\n for litag in rider_sponsor_soup_conf:\n sponsor_item = litag.text.strip()\n sponsors += sponsor_item + ' | '\n\n profile[4] = sponsors\n\n profile_code_soup = profile_soup.find_all('ul', attrs={'class': 'plain-list'})\n for ultag in profile_code_soup:\n profile_li = ultag.find_all('li')\n\n for litag in profile_li:\n # get data\n profile_info = litag.getText()\n # clean data\n profile_info = profile_info.replace(\"'\", 'ft.')\n profile_info = profile_info.replace('\"', 'in.')\n profile_info = profile_info.replace(',', '|')\n\n # split profile info by title and value using a colon\n profile_type= profile_info.split(\":\", 1)[0].lower()\n profile_stat = profile_info.split(\":\", 1)[1].strip().replace('\\n', ' ') #replace new line value with space\n\n print(profile_type, profile_stat)\n\n # check nationality\n if profile_type == 'age':\n profile[5]=profile_stat\n elif profile_type == 'nationality':\n profile[6]=profile_stat\n elif profile_type == 'stance':\n profile[8]=profile_stat\n elif profile_type == 'height':\n profile[9]=profile_stat\n elif profile_type == 'residence':\n profile[10]=profile_stat\n elif profile_type == 'home resort':\n profile[11]=profile_stat\n elif profile_type == 'website':\n profile[12]=profile_stat\n elif profile_type == 'facebook':\n profile[13]=profile_stat\n elif profile_type == 'twitter':\n profile[14]=profile_stat\n else:\n pass\n\n profile_str = ', '.join(str(x) for x in profile)\n print('PROFILE STRING: ' + profile_str)\n f.write(profile_str)\n if loop_counter == 49:\n last_position_check = profile[2]\n\n\n # go back to initial page\n driver.execute_script(\"window.history.go(-1)\")\n\n #start new line for new rider profile\n f.write(\"\\n\")\n except:\n print('FAIL: 404 go back')\n # go back to initial page\n driver.execute_script(\"window.history.go(-1)\")\n\n table_soup = BeautifulSoup(driver.page_source, 'html.parser', from_encoding='utf8')\n # find url in table\n url = rider_link.strip('http://www.worldsnowboarding.org/')\n find_link = table_soup.select_one(\"a[href*='\" + url + \"']\")\n # find parent of url - this is the row that has all the rider info\n parent = find_link.find_parent('tr', attrs={'class': 'ranking'})\n stat_array = parent.find_all('td')\n\n profile[1] = int(stat_array[0].span.text.strip('.')) #position\n name = stat_array[3].a.text.split(',')\n first_name = name[1]\n last_name = name[0]\n profile[0] = str(first_name + last_name) #name\n profile[5] = stat_array[4].span.text #nationality\n if stat_array[5] is not None or len(stat_array[5]) > 0:\n profile[4] = stat_array[5].text #age\n profile[2] = float(stat_array[8].text) #points\n\n profile_str = ', '.join(str(x) for x in profile)\n print('PROFILE STRING (FROM TABLE): ' + profile_str)\n f.write(profile_str)\n\n #start new line for new rider profile\n f.write(\"\\n\")\n\n loop_counter += 1\n\n # # wait for table to appear\n # WebDriverWait(driver, 10).until(\n # EC.visibility_of_element_located((By.CSS_SELECTOR, \".pagination\"))\n # )\n # navigate to link\n try:\n page_next = driver.find_element_by_class_name('next')\n page_next.click()\n except:\n driver.execute_script(\"window.history.go(-1)\")\n page_next = driver.find_element_by_class_name('next')\n page_next.click()\n\nf.close() # Close the file\ndriver.quit()","sub_path":"shredstats_scraper_all.py","file_name":"shredstats_scraper_all.py","file_ext":"py","file_size_in_byte":13132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"602991054","text":"n = int(input())\na = [int(x) for x in input().split()]\n\ny = []\nfor s, t in zip(a, a[1:]):\n if abs(s - t) != 1 and s != t:\n y.append(abs(s - t))\ny.append(1)\ny = y[0]\n\nfor s, t in zip(a, a[1:]):\n if not(abs(s - t) == y or (abs(s - t) == 1 and min(s, t) % y != 0)):\n break\nelse:\n print(\"YES\")\n print(\"{} {}\".format(10**9, y))\n exit()\n\nprint(\"NO\")\n","sub_path":"Codeforces/954C.py","file_name":"954C.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"520911128","text":"import sys\nsys.stdin = open(\"input.txt\")\n\nT = int(input())\n\nfor tc in range(1, T+1):\n str1 = list(map(int,input()))\n N = len(str1)\n str2 = [0]*N #0으로 채워진 리스트를 한개 만들어줌\n\n # print(str1)\n # print(str2)\n cnt = 0\n\n for i in range(N):\n if str2[i] == str1[i]:\n continue\n elif str2[i] != str1[i]: #같지 않다면 str1의 인덱스 i번째 값으로 끝까지 채운다\n cnt += 1\n j = i\n for j in range(N):\n str2[j] = str1[i]\n\n #print(str2)\n\n\n print(\"#{} {}\".format(tc,cnt))\n\n","sub_path":"SWEA/1289_원재의메모리복구/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"510657148","text":"# create a tuple named zoo\nzoo = ('zebra', 'elephant', 'flamingo', 'camel', 'polarbear')\n\n# find animal using index\nprint(zoo.index('flamingo'))\n\n# determine if animal is in tuple\nanimal = []\n\ndef find(animal):\n for value in zoo:\n if animal in zoo:\n print(f'{animal} is currently at the Zoo.')\n else:\n print(f'{animal} is not at the Zoo.')\n\nfind('flamingo')\nfind('lion')\n\n# create variable for each animal\n(zebra, elephant, flamingo, camel, polarbear) = zoo\nprint(elephant)\n\n# convert tuple to list\nzoo_list = list(zoo)\nprint(zoo_list)\n\n# use extend to add 3 animals\nzoo_list.extend(['kitty', 'cardinal', 'dolphin'])\nprint(zoo_list)\n\n# convert list back to tuple\nprint(tuple(zoo_list))","sub_path":"tuples/zoo.py","file_name":"zoo.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"289767598","text":"from odoo import http, tools\nfrom odoo.http import request\nfrom bs4 import BeautifulSoup\nimport werkzeug\nimport requests\nimport logging\nimport json\n\n_logger = logging.getLogger(__name__)\n\n\nclass WechatEnglishController(http.Controller):\n\n @http.route('/api/v2/wechat_english', type='json', auth=\"user\")\n def wechat_category(self, key, **kw):\n result_list = []\n user_id = request.env.user.id\n word_lexicon = request.env['english.lexicon']\n user_word = request.env['english.lexicon.user.master']\n word_list = word_lexicon.sudo().search([\n \"|\",\n (\"word\", \"ilike\", key),\n (\"chinese_mean\", \"ilike\", key)])\n for word in word_list:\n is_added = False\n user_master = user_word.sudo().search([('english_lexicon_id', '=', word.id), ('user_id', '=', user_id)], limit=1)\n if user_master:\n is_added = True\n word_voice_url = word_lexicon.sudo().get_word_voice_url(word.id)\n result = {\"id\": word.id,\n \"word\": word.word,\n \"chinese_mean\": word.chinese_mean,\n \"british_accent\": word.british_accent,\n \"source_name\": word.source_name,\n \"sequence\": word.sequence,\n \"forms\": word.forms,\n \"is_added\": is_added,\n \"voice_url\": word_voice_url,\n }\n defintion_list = []\n special_defintion_word = {\n \"order\": 0,\n \"gram\": \"\",\n \"english_mean\": word.chinese_mean,\n \"chinese_mean\": \"\",\n \"synonymous\": word.forms,\n \"sentence_list\": []\n }\n defintion_list.append(special_defintion_word)\n\n for defintion in word.lexicon_explain_ids:\n defintion_word = {\n \"order\": defintion.order,\n \"gram\": defintion.gram,\n \"english_mean\": defintion.english_mean,\n \"chinese_mean\": defintion.chinese_mean,\n \"synonymous\": defintion.synonymous,\n }\n sentence_list = []\n for example in defintion.lexicon_explain_example_ids:\n sentences = {\n \"order\": example.order,\n \"example\": example.sentence,\n }\n sentence_list.append(sentences)\n defintion_word[\"sentence_list\"] = sentence_list\n\n defintion_list.append(defintion_word)\n\n result[\"defintion_list\"] = defintion_list\n result_list.append(result)\n return result_list\n\n @http.route('/api/v2/wechat_english_level', type='json', auth=\"user\")\n def get_word_level_list(self, **kwargs):\n return request.env['english.lexicon.master.level'].sudo().get_all_level()\n\n @http.route('/api/v2/wechat_english_save', type='json', auth=\"user\")\n def get_word_level_save(self, word_id, level_id, **kwargs):\n result = {'is_success': True}\n user_id = request.env.user.id\n try:\n request.env['english.lexicon.user.master'].sudo()\\\n .save_user_word(word_id, user_id, level_id)\n except Exception:\n result['is_success'] = False\n return result\n\n @http.route('/api/v2/get_word_id', type='http', auth=\"none\", csrf=False)\n def get_word_id(self, word, **kwargs):\n result = {'id': 0}\n request.session.db = \"Odoo_Project\"\n word_list = request.env['english.lexicon'].sudo().search([\n (\"word\", \"=\", word)], limit=1)\n if word_list:\n result[\"id\"] = word_list.id\n return json.dumps(result)\n\n @http.route('/api/v2/delete_word_id', type='http', auth=\"none\", csrf=False)\n def delete_word_id(self, **kwargs):\n request.session.db = \"Odoo_Project\"\n result = request.env['english.lexicon'].sudo().delete_attachment()\n return json.dumps(result)\n\n @http.route('/api/v2/wechat_level_words', type='json', auth=\"user\")\n def get_level_words(self, level_id, page_index, page_size, **kw):\n result = {'is_success': True}\n user_id = request.env.user.id\n try:\n data = request.env['english.lexicon.user.master'].sudo()\\\n .get_my_level_words(level_id, user_id, page_index, page_size)\n result['data'] = data\n except Exception as e:\n result['is_success'] = False\n result['info'] = tools.ustr(e)\n return result\n\n @http.route('/api/v2/wechat_insert_my_words', type='json', auth=\"user\")\n def insert_my_level_words(self, **kw):\n result = {'is_success': True}\n try:\n request.env['english.lexicon.user.master'].sudo().insert_my_words()\n except Exception as e:\n result['is_success'] = False\n result['info'] = tools.ustr(e)\n return result\n","sub_path":"wechat_mini_programs/controllers/wechat_english_controller.py","file_name":"wechat_english_controller.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"380974398","text":"# -*- coding:utf-8 -*-\n# 作者 :JunFengG\n# 创建时间 :2018/10/1 0001 16:53 \n# 文件 :UseTimeSeries\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom TimeSeries import convert_data_to_timeseries\n\n\ninput_file='data_timeseries.txt'\n\n#加载数据\ndata1=convert_data_to_timeseries(input_file,2)\ndata2=convert_data_to_timeseries(input_file,3)\n\n#转换为pandas数据帧\ndataframe=pd.DataFrame({'first':data1,'second':data2})\n\n#给定年份数据\ndataframe['1952':'1955'].plot()\nplt.title('First')\n\n\n#画出不同数据\nplt.figure()\ndifference=dataframe['1952':'1955']['first']-dataframe['1952':'1955']['second']\ndifference.plot()\nplt.title('Difference')\n\n#数据过滤\ndataframe[(dataframe['first']>60)&(dataframe['second']<20)].plot()\nplt.title('Selected')\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PythonMechineLearningCookbook/Chapter08/UseTimeSeries.py","file_name":"UseTimeSeries.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"513245474","text":"\"\"\"\nYou're given a dictionary of people and the number of games they've won.\n\nUse a for loop to iterate over the dictionary and print out the users name and how many games they've won in the\nfollowing format: sara has won n games\n\nTo make it human readable, pluralise the word game to suit the number of games won.\n\"\"\"\n\ngames_won = dict(sara=0, bob=1, tim=5, julian=3, jim=1)\n\n\ndef print_game_stats(games_won=games_won):\n \"\"\"Loop through games_won's dict (key, value) pairs (dict.items)\n printing (print, not return) how many games each person has won,\n pluralize 'game' based on number.\n\n Expected output (ignore the docstring's indentation):\n\n sara has won 0 games\n bob has won 1 game\n tim has won 5 games\n julian has won 3 games\n jim has won 1 game\n\n (Note that as of Python 3.7 - which we're using atm - dict insert order is retained\n so no sorting is required for this Bite.)\n \"\"\"\n for key in games_won:\n plural = \"game\"\n if games_won[key] != 1:\n plural = \"games\"\n print(\"{0} has won {1} {2}\".format(key, games_won[key], plural))\n\nprint_game_stats(games_won)","sub_path":"python/plurals.py","file_name":"plurals.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"425045363","text":"# -*- coding:utf-8 -*-\n\nimport requests\n\nproxies = {\n \"http\": \"http://10.10.1.10:3128\",\n \"https\": \"http://10.10.1.10:1080\",\n}\n\n\ndef get(url):\n '''\n @summary: 网络请求\n '''\n return requests.get(url, proxies=proxies)\n\n\nif __name__ == '__main__':\n print(get('http://www.baidu.com'))\n","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"598474503","text":"import pandas as pd\n\nfrom time import time\nfrom sklearn.model_selection import cross_val_score, StratifiedKFold\n\nfrom twitter.models.word2vec_model import build_model\n# from models.baseline_model import build_model\n\nimport logging\n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n\n df = pd.read_csv('data/Tweets.csv')\n X = df['text'].values\n y = df['airline_sentiment']\n y = y.map({'negative': -1, 'neutral': 0, 'positive': 1}).values\n\n model = build_model(n_dim=25)\n\n t0 = time()\n\n # using determined test_splits for smart dump/load\n cv = StratifiedKFold(n_splits=3, random_state=42)\n scores = cross_val_score(model, X, y, cv=cv)\n\n score = scores.mean()\n std = scores.std()\n print(\"Score: %.4f +- %.4f\" % (score, std))\n print(\"Total done in %0.3fs\" % (time() - t0))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"twitter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"246335194","text":"import json\nimport re\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom decimal import Decimal\n\nfrom bs4 import BeautifulSoup\n\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import html_to_markdown, remove_words, \\\n session_with_proxy\n\n\nclass Todoclick(Store):\n @classmethod\n def categories(cls):\n return [\n 'AllInOne',\n 'Notebook',\n 'StorageDrive',\n 'SolidStateDrive',\n 'ExternalStorageDrive',\n 'PowerSupply',\n 'ComputerCase',\n 'Motherboard',\n 'Processor',\n 'VideoCard',\n 'Ram',\n 'Tablet',\n 'Headphones',\n 'Mouse',\n 'Keyboard',\n 'Monitor',\n 'Printer',\n 'UsbFlashDrive',\n 'StereoSystem',\n 'Wearable',\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n category_paths = [\n ['notebooks', 'Notebook'],\n ['all-in-one', 'AllInOne'],\n ['disco-duro', 'StorageDrive'],\n ['fuentes-de-poder', 'PowerSupply'],\n ['gabinetes', 'ComputerCase'],\n ['placa-madre', 'Motherboard'],\n ['procesadores', 'Processor'],\n ['tarjetas-de-video', 'VideoCard'],\n ['memoria-ram', 'Ram'],\n ['tablet', 'Tablet'],\n ['audifonos', 'Headphones'],\n ['audifonos-gamer', 'Headphones'],\n ['mouse-accesorios', 'Mouse'],\n ['mouse-gamer', 'Mouse'],\n ['teclados', 'Keyboard'],\n ['teclado-gamer', 'Keyboard'],\n ['monitores', 'Monitor'],\n ['impresoras-laser-impresoras', 'Printer'],\n ['impresoras-ink-jet-impresoras', 'Printer'],\n ['multifuncional-laser', 'Printer'],\n ['multifuncional-ink-jet', 'Printer'],\n ['pendrive', 'UsbFlashDrive'],\n ['parlantes', 'StereoSystem'],\n ['soundbar', 'StereoSystem'],\n ['smartwatch', 'Wearable']\n ]\n\n session = session_with_proxy(extra_args)\n product_urls = []\n\n for category_path, local_category in category_paths:\n if local_category != category:\n continue\n\n page = 1\n\n while True:\n if page >= 15:\n raise Exception('Page overflow')\n\n if page == 1:\n page_url = 'https://todoclick.cl/{}/'.format(category_path)\n else:\n page_url = 'https://todoclick.cl/{}/page/{}/'\\\n .format(category_path, page)\n\n soup = BeautifulSoup(session.get(page_url).text, 'html.parser')\n products = soup.findAll('li', 'product')\n\n if not products:\n break\n\n for product in products:\n product_urls.append(product.find('a')['href'])\n\n page += 1\n\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n print(url)\n session = session_with_proxy(extra_args)\n response = session.get(url)\n soup = BeautifulSoup(response.text, 'html5lib')\n\n name = soup.find('h1', 'product_title').text\n sku = soup.find('div', 'ct-code-block').text.split(':')[1].strip()\n\n stock = 0\n stock_container = soup.find('p', 'stock in-stock')\n if stock_container:\n stock = int(stock_container.text.split(' ')[0])\n\n offer_price_container = soup.find('p', 'price')\n\n if offer_price_container.find('ins'):\n offer_price_container = offer_price_container.find('ins')\n\n offer_price = Decimal(offer_price_container.find('span', 'amount')\n .text.replace('$', '').replace('.', ''))\n normal_price = Decimal(soup.find('div', {'id': 'Webpay'})\n .text.split('$')[1].replace('.', ''))\n\n images = soup.findAll('img', 'wp-post-image')\n picture_urls = [i['src'] for i in images]\n\n description = html_to_markdown(\n str(soup.find('div', {'id': 'tab-description'})))\n\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n sku,\n stock,\n normal_price,\n offer_price,\n 'CLP',\n sku=sku,\n part_number=sku,\n picture_urls=picture_urls,\n description=description\n )\n\n return [p]\n","sub_path":"storescraper/stores/todoclick.py","file_name":"todoclick.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"572105773","text":"# Classic knapsack problem, memoized\r\n# Each item has a weight w_i and a value v_i. The knapsack can only take up to\r\n# weight W\r\ndef memoize(f):\r\n memo = {}\r\n\r\n # internal function\r\n def helper(*args):\r\n if args not in memo:\r\n memo[args] = f(*args)\r\n return memo[args]\r\n\r\n return helper\r\n\r\ndef knapsack(items, max_weight):\r\n # no more items/ none to choose from\r\n if len(items) == 0:\r\n return 0\r\n\r\n @memoize\r\n def knapsack_best_value(n, max_w):\r\n if n == 0:\r\n return 0\r\n\r\n value, weight = items[n-1]\r\n if weight > max_w:\r\n # can't add item, so skipping to next\r\n return knapsack_best_value(n-1,max_w)\r\n else:\r\n # choose best valued item, maximising over existing\r\n # value to new potential\r\n return max(knapsack_best_value(n-1,max_w-weight) + value,\r\n knapsack_best_value(n-1,max_w))\r\n\r\n j = max_weight\r\n result = []\r\n for i in range(len(items), 0, -1):\r\n if knapsack_best_value(i, j) != knapsack_best_value(i - 1, j):\r\n result.append(items[i - 1])\r\n j -= items[i - 1][1]\r\n result.reverse()\r\n\r\n # prints number of partial computations\r\n # print(\"Partial computations %d\" % len(bestvalue.cache))\r\n return knapsack_best_value(len(items), max_weight), result\r\n\r\nitems = [(4, 12), (2, 1), (6, 4), (1, 1), (2, 2)]\r\nprint(knapsack(items,15))\r\n","sub_path":"dp_problems/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"30694993","text":"import torch, numpy, os, shutil, math, re, torchvision, argparse, warnings\nfrom netdissect import parallelfolder, renormalize, pbar\nfrom torchvision import transforms\nfrom torch.optim import Optimizer\n\ndef parseargs():\n parser = argparse.ArgumentParser()\n def aa(*args, **kwargs):\n parser.add_argument(*args, **kwargs)\n aa('--dataset', choices=['imagenet', 'novelty'], default='novelty')\n aa('--selected_classes', type=int, default=413)\n aa('--training_iterations', type=int, default=100001)\n args = parser.parse_args()\n return args\n\ndef main():\n args = parseargs()\n experiment_dir = 'results/baseline-%d-%s-resnet' % (\n args.selected_classes, args.dataset)\n ds_dirname = dict(\n novelty='novelty/dataset_v1/known_classes/images',\n imagenet='imagenet')[args.dataset]\n training_dir = 'datasets/%s/train' % ds_dirname\n val_dir = 'datasets/%s/val' % ds_dirname\n os.makedirs(experiment_dir, exist_ok=True)\n with open(os.path.join(experiment_dir, 'args.txt'), 'w') as f:\n f.write(str(args) + '\\n')\n def printstat(s):\n with open(os.path.join(experiment_dir, 'log.txt'), 'a') as f:\n f.write(str(s) + '\\n')\n pbar.print(s)\n def filter_tuple(item):\n return item[1] < args.selected_classes\n # Imagenet has a couple bad exif images.\n warnings.filterwarnings('ignore', message='.*orrupt EXIF.*')\n # Here's our data\n train_loader = torch.utils.data.DataLoader(\n parallelfolder.ParallelImageFolders([training_dir],\n classification=True,\n filter_tuples=filter_tuple,\n transform=transforms.Compose([\n transforms.Resize(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n renormalize.NORMALIZER['imagenet'],\n ])),\n batch_size=64, shuffle=True,\n num_workers=48, pin_memory=True)\n val_loader = torch.utils.data.DataLoader(\n parallelfolder.ParallelImageFolders([val_dir],\n classification=True,\n filter_tuples=filter_tuple,\n transform=transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n renormalize.NORMALIZER['imagenet'],\n ])),\n batch_size=64, shuffle=False,\n num_workers=24, pin_memory=True)\n late_model = torchvision.models.resnet50(num_classes=args.selected_classes)\n for n, p in late_model.named_parameters():\n if 'bias' in n:\n torch.nn.init.zeros_(p)\n elif len(p.shape) <= 1:\n torch.nn.init.ones_(p)\n else:\n torch.nn.init.kaiming_normal_(p, nonlinearity='relu')\n late_model.train()\n late_model.cuda()\n\n model = late_model\n\n max_lr = 5e-3\n max_iter = args.training_iterations\n criterion = torch.nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.Adam(model.parameters())\n scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr,\n total_steps=max_iter - 1)\n iter_num = 0\n best = dict(val_accuracy=0.0)\n # Oh, hold on. Let's actually resume training if we already have a model.\n checkpoint_filename = 'weights.pth'\n best_filename = 'best_%s' % checkpoint_filename\n best_checkpoint = os.path.join(experiment_dir, best_filename)\n try_to_resume_training = False\n if try_to_resume_training and os.path.exists(best_checkpoint):\n checkpoint = torch.load(os.path.join(experiment_dir, best_filename))\n iter_num = checkpoint['iter']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n best['val_accuracy'] = checkpoint['accuracy']\n\n def save_checkpoint(state, is_best):\n filename = os.path.join(experiment_dir, checkpoint_filename)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename,\n os.path.join(experiment_dir, best_filename))\n\n def validate_and_checkpoint():\n model.eval()\n val_loss, val_acc = AverageMeter(), AverageMeter()\n for input, target in pbar(val_loader):\n # Load data\n input_var, target_var = [d.cuda() for d in [input, target]]\n # Evaluate model\n with torch.no_grad():\n output = model(input_var)\n loss = criterion(output, target_var)\n _, pred = output.max(1)\n accuracy = (target_var.eq(pred)\n ).data.float().sum().item() / input.size(0)\n val_loss.update(loss.data.item(), input.size(0))\n val_acc.update(accuracy, input.size(0))\n # Check accuracy\n pbar.post(l=val_loss.avg, a=val_acc.avg)\n # Save checkpoint\n save_checkpoint({\n 'iter': iter_num,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'scheduler' : scheduler.state_dict(),\n 'accuracy': val_acc.avg,\n 'loss': val_loss.avg,\n }, val_acc.avg > best['val_accuracy'])\n best['val_accuracy'] = max(val_acc.avg, best['val_accuracy'])\n printstat('Iteration %d val accuracy %.2f' %\n (iter_num, val_acc.avg * 100.0))\n\n # Here is our training loop.\n while iter_num < max_iter:\n for filtered_input, filtered_target in pbar(train_loader):\n # Track the average training loss/accuracy for each epoch.\n train_loss, train_acc = AverageMeter(), AverageMeter()\n # Load data\n input_var, target_var = [d.cuda()\n for d in [filtered_input, filtered_target]]\n # Evaluate model\n output = model(input_var)\n loss = criterion(output, target_var)\n train_loss.update(loss.data.item(), filtered_input.size(0))\n # Perform one step of SGD\n if iter_num > 0:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # Learning rate schedule\n scheduler.step()\n # Also check training set accuracy\n _, pred = output.max(1)\n accuracy = (target_var.eq(pred)).data.float().sum().item() / (\n filtered_input.size(0))\n train_acc.update(accuracy)\n remaining = 1 - iter_num / float(max_iter)\n pbar.post(l=train_loss.avg, a=train_acc.avg,\n v=best['val_accuracy'])\n # Ocassionally check validation set accuracy and checkpoint\n if iter_num % 1000 == 0:\n validate_and_checkpoint()\n model.train()\n # Advance\n iter_num += 1\n if iter_num >= max_iter:\n break\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\nif __name__ == '__main__':\n torch.backends.cudnn.benchmark = True\n main()\n","sub_path":"train_baseline_resnet.py","file_name":"train_baseline_resnet.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"148533611","text":"# Conjuntos são colecções desordenadas de elementos únicos.\n# Os elementos não são armazenados em uma ordem específica e confiável.\n# Conjuntos não contém elementos repetidos.\n\nprint('Loja Stars Wars')\n\npedidos_dia1 = [ \n {'Cliente': 'José', 'filme': 'A New Hope'},\n {'Cliente': 'José', 'filme': 'A New Hope'},\n {'Cliente': 'Francisco', 'filme': 'The Empire Strikes Back'},\n {'Cliente': 'Luis', 'filme': ' Revenge of the Sith'},\n]\n\n\npedidos_dia2 = [\n {'Cliente': 'Marco', 'filme': 'A New Hope'},\n {'Cliente': 'Nuno', 'filme': 'A New Hope'},\n {'Cliente': 'Carlos', 'filme': 'The Empire Strikes Back'},\n {'Cliente': 'Rodrigo', 'filme': ' Revenge of the Sith'}, \n]\n\nclientes_dia1 = set()\nfor pedido in pedidos_dia1:\n clientes_dia1.add(pedido['Cliente'])\n\nprint(f'Dia 1: {clientes_dia1}')\n\nclientes_dia2 = set()\nfor pedido in pedidos_dia2:\n clientes_dia2.add(pedido['Cliente'])\n\nprint(f'Dia 2: {clientes_dia2}')\n\ntodos_clientes = clientes_dia1 | clientes_dia2\nprint(f'União: {todos_clientes}')\n\ncliente_comprou_todos_os_dias = clientes_dia1.intersection(clientes_dia2)\nprint(f'Intersecão: {cliente_comprou_todos_os_dias}')\n\nclientes_diferenca = clientes_dia1 - clientes_dia2\nprint(f'Diferença: {clientes_diferenca}')","sub_path":"python_do_zero/conjuntos.py","file_name":"conjuntos.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"315135062","text":"import autofit as af\r\nfrom src.dataset.dataset import Dataset\r\nfrom src.phase.result import Result\r\nfrom src.phase.analysis import Analysis\r\nfrom src.phase.settings import SettingsPhase\r\n\r\n\"\"\"\r\nThe phase package combines a data-set, model and `NonLinearSearch`, allowing us to fit the `Dataset` with the model. It\r\nessentially acts as the `meeting point` between the other packages in the project (dataset, fit, plot) and modules\r\nin the phase package (phase.py, analysis.py, result.py).\r\n\"\"\"\r\n\r\n\r\nclass Phase(af.AbstractPhase):\r\n\r\n \"\"\"\r\n This tells the phase that the input parameter `profiles` contains model components that are fitted for by the\r\n phase`s `NonLinearSearch`.\r\n\r\n In `analysis.py`, the `log_likelihood_function`' input parameter `instance` contains the `profiles` mapped from\r\n this model via the `NonLinearSearch` (as we saw in chapter 1).\r\n\r\n For your model-fitting problem, this will be replaced by the modules in your `model` package.\r\n \"\"\"\r\n\r\n profiles = af.PhaseProperty(\"profiles\")\r\n\r\n Result = Result # Set the result to the Result class in `result.py`\r\n\r\n def __init__(\r\n self,\r\n search: af.NonLinearSearch, # <- This specifies the default `NonLinearSearch` used by the phase.\r\n settings: SettingsPhase, # <- Settings will be covered in detail in tutorial 3.\r\n profiles: list,\r\n ):\r\n \"\"\"\r\n A phase which fits a `Gaussian` model using a `NonLinearSearch`.\r\n\r\n Parameters\r\n ----------\r\n search: class\r\n The class of a non_linear search\r\n settings : SettingsPhase\r\n The collection of settings of the phase used to augment the data that is fitted and tag the output path.\r\n profiles : [profiles.Profile]\r\n The model components (e.g. Gaussian, Exponential) fitted by this phase.\r\n \"\"\"\r\n super().__init__(search=search)\r\n\r\n self.settings = settings\r\n self.profiles = profiles\r\n\r\n def run(self, dataset: Dataset, info=None) -> Result:\r\n \"\"\"\r\n Pass a `Dataset` to the phase, running the phase and `NonLinearSearch`.\r\n\r\n Parameters\r\n ----------\r\n dataset : `Dataset`.Dataset\r\n The `Dataset` fitted by the phase, which is specified in the module `dataset/dataset.py`\r\n\r\n Returns\r\n -------\r\n result: result.Result\r\n A result object comprising information on the `NonLinearSearch` and the maximum likelihood model.\r\n Tutorial 3 will cover phase tagging, which this function handles.\"\"\"\r\n self.modify_search_paths()\r\n\r\n \"\"\"\r\n These functions create instances of the Analysis class (in `analysis.py`), runs the analysis (which performs\r\n the `NonLinearSearch` ) and returns an instance of the Result class (in `result.py`).\r\n\r\n Once you`ve looked through this module, check those modules out to see exactly what these classes do!\r\n \"\"\"\r\n analysis = self.make_analysis(dataset=dataset)\r\n\r\n \"\"\"\r\n `run_analysis` is not located in analysis.py, instead it is an inherited method from the parent class\r\n `af.AbstractPhase`. Essentially, all this function does is begin the `NonLinearSearch`, using the analysis\r\n created above.\r\n \"\"\"\r\n result = self.run_analysis(analysis=analysis, info=info)\r\n\r\n return self.make_result(result=result, analysis=analysis)\r\n\r\n def make_analysis(self, dataset: Dataset) -> Analysis:\r\n \"\"\"\r\n Returns an Analysis object, which uses the `Dataset` with functions to perform a fit.\r\n\r\n Parameters\r\n ----------\r\n dataset : `Dataset`.Dataset\r\n The `Dataset` fitted by the phase, which is specified in the module `dataset/dataset.py`\r\n\r\n Returns\r\n -------\r\n analysis : Analysis\r\n An analysis object that the `NonLinearSearch` calls to determine the fit log_likelihood for a given model\r\n instance.\r\n \"\"\"\r\n dataset = dataset.trimmed_dataset_from_settings(\r\n settings=self.settings.settings_dataset\r\n )\r\n\r\n return Analysis(dataset=dataset, settings=self.settings)\r\n\r\n def make_result(self, result: af.Result, analysis: Analysis) -> Result:\r\n return self.Result(\r\n samples=result.samples,\r\n previous_model=self.model,\r\n search=self.search,\r\n analysis=analysis,\r\n )\r\n\r\n def modify_search_paths(self):\r\n \"\"\"\r\n Modify the output paths of the phase before the non-linear search is run, so that the output path can be\r\n customized using the tags of the phase.\r\n \"\"\"\r\n self.search.paths.tag = self.settings.tag\r\n","sub_path":"notebooks/howtofit/chapter_phase_api/src/phase/phase.py","file_name":"phase.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"478712604","text":"a = (\"Процент\")\r\nb = (\"Процента\")\r\nc = (\"Процентов\")\r\nn = [i for i in range(1,101)]\r\nnumbs = {11,12,13,14}\r\nfor n in range(100):\r\n n = n + 1\r\n if n in numbs:\r\n print(n, \"процентов\")\r\n elif n % 10 == 1:\r\n print(n, \"процент\")\r\n elif n % 10 > 1 and n % 10 <5:\r\n print(n, \"процента\")\r\n else:\r\n print(n, \"процентов\")","sub_path":"Mironov_Mark_DZ_3.py","file_name":"Mironov_Mark_DZ_3.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"428763683","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport scipy.misc\nfrom keras.preprocessing import image\nimport os\nimport subprocess\nimport easygui\nfrom PIL import Image\n\nDATA_DIR = \"./data\"\nTRAIN_DIR = os.path.join(DATA_DIR, \"train\")\nTEST_DIR = os.path.join(DATA_DIR, \"test\")\n\n# IMG_SHAPE = (64, 64)\n\n\n\ndef pixel_errors(input_S, input_C, decoded_S, decoded_C):\n \"\"\"Calculates mean of Sum of Squared Errors per pixel for cover and secret images. \"\"\"\n see_Spixel = np.sqrt(np.mean(np.square(255 * (input_S - decoded_S))))\n see_Cpixel = np.sqrt(np.mean(np.square(255 * (input_C - decoded_C))))\n\n return see_Spixel, see_Cpixel\n\n# TODO debug\ndef pixel_histogram(diff_S, diff_C):\n \"\"\"Calculates histograms of errors for cover and secret image. \"\"\"\n diff_Sflat = diff_S.flatten()\n diff_Cflat = diff_C.flatten()\n\n fig = plt.figure(figsize=(15, 5))\n a = fig.add_subplot(1, 2, 1)\n\n imgplot = plt.hist(255 * diff_Cflat, 100, normed=1, alpha=0.75, facecolor='red')\n a.set_title('Distribution of error in the Cover image.')\n plt.axis([0, 250, 0, 0.2])\n\n a = fig.add_subplot(1, 2, 2)\n imgplot = plt.hist(255 * diff_Sflat, 100, normed=1, alpha=0.75, facSecolor='red')\n a.set_title('Distribution of errors in the Secret image.')\n plt.axis([0, 250, 0, 0.2])\n\n plt.show()\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n\ndef show_image(img, n_rows, n_col, idx, gray=False, first_row=False, title=None):\n ax = plt.subplot(n_rows, n_col, idx)\n if gray:\n plt.imshow(rgb2gray(img), cmap = plt.get_cmap('gray'))\n else:\n plt.imshow(img)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n if first_row:\n plt.title(title)\n\n# Configs for results display\ndef result_display(input_S,input_C,decoded_S,decoded_C,\n SHOW_GRAY = False,SHOW_DIFF = True,ENHANCE = 1,n = 6):\n '''\n\n :param SHOW_GRAY: Show images in gray scale\n :param SHOW_DIFF: Show difference bettwen predictions and ground truth.\n :param ENHANCE: Diff enhance magnitude\n :param n: Number of secret and cover pairs to show.\n :return:\n '''\n # Get absolute difference between the outputs and the expected values.\n diff_S, diff_C = np.abs(decoded_S - input_S), np.abs(decoded_C - input_C)\n\n # Print pixel-wise average errors in a 256 scale.\n S_error, C_error = pixel_errors(input_S, input_C, decoded_S, decoded_C)\n\n print(\"S error per pixel [0, 255]:\", S_error)\n print(\"C error per pixel [0, 255]:\", C_error)\n # pixel_histogram(diff_S, diff_C)\n if n > 6:\n n = 6\n\n plt.figure(figsize=(14, 15))\n rand_indx = [random.randint(0, len(input_C)) for x in range(n)]\n # for i, idx in enumerate(range(0, n)):\n for i, idx in enumerate(rand_indx):\n n_col = 6 if SHOW_DIFF else 4\n\n show_image(input_C[i], n, n_col, i * n_col + 1, gray=SHOW_GRAY, first_row=i == 0, title='Cover')\n\n show_image(input_S[i], n, n_col, i * n_col + 2, gray=SHOW_GRAY, first_row=i == 0, title='Secret')\n\n show_image(decoded_C[i], n, n_col, i * n_col + 3, gray=SHOW_GRAY, first_row=i == 0, title='Encoded Cover')\n\n show_image(decoded_S[i], n, n_col, i * n_col + 4, gray=SHOW_GRAY, first_row=i == 0, title='Decoded Secret')\n\n if SHOW_DIFF:\n show_image(np.multiply(diff_C[i], ENHANCE), n, n_col, i * n_col + 5, gray=SHOW_GRAY, first_row=i == 0,\n title='Diff Cover')\n\n show_image(np.multiply(diff_S[i], ENHANCE), n, n_col, i * n_col + 6, gray=SHOW_GRAY, first_row=i == 0,\n title='Diff Secret')\n\n plt.show()\n\ndef MatrixToImage(data):\n data = data*255\n new_im = Image.fromarray(data.astype(np.uint8))\n return new_im\n\n# new display and write\ndef iamge_save(decoded_S,decoded_C,orig_size,path='./outcome',name_box = None):\n cover_path = path+'/cover/'\n secret_path = path + '/secret/'\n if not os.path.exists(path):\n os.mkdir(path)\n if not os.path.exists(cover_path):\n os.mkdir(cover_path)\n if not os.path.exists(secret_path):\n os.mkdir(secret_path)\n for i in range(decoded_C.shape[0]):\n d_C = MatrixToImage(decoded_C[i])\n d_S = MatrixToImage(decoded_S[i])\n if d_C.size != orig_size[i]:\n d_C = d_C.resize(orig_size[i], Image.ANTIALIAS)\n d_S = d_S.resize(orig_size[i], Image.ANTIALIAS)\n if name_box==None:\n d_C.save(cover_path+f'{i}.png')\n d_S.save(secret_path+f'{i}.png')\n else:\n d_C.save(cover_path + str(name_box[i])+r'.png')\n d_S.save(secret_path + str(name_box[i])+r'.png')\n print('\\nFinsh! ')\n\n\n\ndef load_dataset_small(num_images_per_class_train, num_images_test, train_set_range):\n \"\"\"Loads training and test datasets, from Tiny ImageNet Visual Recogition Challenge.\n\n Arguments:\n num_images_per_class_train: number of images per class to load into training dataset.\n num_images_test: total number of images to load into training dataset.\n \"\"\"\n X_train = []\n X_test = []\n X_test_size = []\n\n\n # Get training dataset directory. It should contain 'train' folder and 'test' folder.\n path = easygui.diropenbox(title = 'Choose dataset directory')\n # path = './exp'\n # Create training set.\n train_set = os.listdir(os.path.join(path, 'train'))\n for c in train_set:\n train_set_range = train_set_range - 1\n if train_set_range < 0:\n break\n c_dir = os.path.join(path, 'train', c, 'images')\n c_imgs = os.listdir(c_dir)\n random.shuffle(c_imgs)\n for img_name_i in c_imgs[0:num_images_per_class_train]:\n img_i = image.load_img(os.path.join(c_dir, img_name_i))\n x = image.img_to_array(img_i)\n X_train.append(x)\n random.shuffle(X_train)\n\n # Create test set.\n test_dir = os.path.join(path, 'test','images')\n test_imgs = os.listdir(test_dir)\n random.shuffle(test_imgs)\n for img_name_i in test_imgs[0:num_images_test]:\n img_i = image.load_img(os.path.join(test_dir, img_name_i))\n #resize\n img_i_reshape,img_ori_size = resize_image(img_i)\n x = image.img_to_array(img_i_reshape)\n X_test.append(x)\n X_test_size.append(img_ori_size)\n\n\n # Return train and test data as numpy arrays.\n return np.array(X_train), np.array(X_test), X_test_size\n\ndef resize_image(im):\n '''\n N*M is resized to N*N\n :param im: image cls\n :return: if idx==0 N==M\n '''\n (x,y) = im.size\n if x==y:\n return im, (x,y)\n elif x>y:\n N = y\n M = x\n idx_bigger = 1\n else:\n N = x\n M = y\n idx_bigger = 2\n out = im.resize((N,N), Image.ANTIALIAS)\n\n return out, (x,y)\n\n\n\ndef ffmpegProcess(code):\n '''\n run ffmepg code\n '''\n getmp3 = code\n returnget = subprocess.call(getmp3,shell=True)\n # print(returnget)\n\ndef extractFrameOfVideo(video_path,frame_rate=30,frame_save_path='./coverSource'):\n DivideCode = 'ffmpeg -i ' + video_path + ' -r '+str(frame_rate)+' '+frame_save_path+'%06d.png'\n ffmpegProcess(DivideCode)\n return\n\ndef generateVideo(frame_save_path='./hideSource',output_path='./test.mp4',frame_rate=5):\n generateCode = \"ffmpeg -framerate \"+str(frame_rate)+\" -i \"+frame_save_path+\"\\%d.png -vcodec libx264 -r \"\\\n +str(frame_rate)+\" -pix_fmt yuv420p \"+output_path\n ffmpegProcess(generateCode)\n\ndef readFrames(file_path):\n '''\n :return: list of framePath and num of file\n '''\n fs = os.listdir(file_path)\n fs.sort(key=lambda x: int(x[:-4]))\n file_name_list = []\n cnt=0\n for f in fs:\n file_name_list.append(os.path.join(file_path,f))\n cnt += 1\n return file_name_list,cnt\n\ndef randomSort(file_name_list,length,key,mode='encode'):\n '''\n if you want to recover the length and key must keep same\n :param file_name_list:\n :param length: number of files\n :param key: as seed\n :return: resorted list\n '''\n\n random.seed(key)\n # generate the random order\n rs = random.sample(range(length),length)\n resorted_list = []\n if mode=='encode':\n for i in range(length):\n resorted_list.append(file_name_list[rs[i]])\n print(resorted_list)\n elif mode =='decode':\n tmp = list(range(length))\n for i in range(length):\n tmp[rs[i]] = file_name_list[i]\n resorted_list = tmp\n print(resorted_list)\n else:\n print('mode wrong\\n')\n\n return resorted_list\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"509819191","text":"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport threading\nimport os\nimport traceback\nimport sys\nfrom hashlib import sha1\nfrom typing import Callable\n\nimport yaml\nfrom ray.autoscaler.commands import (\n create_or_update_cluster,\n teardown_cluster,\n get_head_node_ip,\n _bootstrap_config,\n)\n\nfrom .base import (\n CannotSpawnCluster,\n CannotDestroyCluster,\n ConnectionDetails,\n _get_ssh_proxy_command,\n)\nfrom .cluster import BaseCluster, Provider\n\n\nclass _ThreadTask:\n def __init__(self, target: Callable):\n self.target = target\n self.thread: threading.Thread = None\n self.exc: Exception = None\n self.silent = False\n\n\nclass _Immediate:\n def __init__(self, target: Callable):\n self.target = target\n\n def start(self):\n self.target()\n\n def join(self):\n pass\n\n\nclass RayCluster(BaseCluster):\n target_engine = \"Cloudray\"\n target_partition = \"Pandas\"\n\n __base_config = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"ray-autoscaler.yml\"\n )\n __instance_key = {Provider.AWS: \"InstanceType\"}\n __image_key = {Provider.AWS: \"ImageId\"}\n __credentials_env = {Provider.AWS: \"AWS_SHARED_CREDENTIALS_FILE\"}\n\n def __init__(self, *a, **kw):\n self.spawner = _ThreadTask(self.__do_spawn)\n self.destroyer = _ThreadTask(self.__do_destroy)\n\n self.ready = False\n super().__init__(*a, **kw)\n\n if self.provider.credentials_file is not None:\n try:\n config_key = self.__credentials_env[self.provider.name]\n except KeyError:\n raise ValueError(f\"Unsupported provider: {self.provider.name}\")\n os.environ[config_key] = self.provider.credentials_file\n\n self.config = self.__make_config()\n self.config_file = self.__save_config(self.config)\n\n def _spawn(self, wait=True):\n self.__run_thread(wait, self.spawner)\n\n def _destroy(self, wait=True):\n self.__run_thread(wait, self.destroyer)\n\n def __run_thread(self, wait, task: _ThreadTask):\n if not task.thread:\n task.thread = (_Immediate if wait else threading.Thread)(target=task.target)\n task.thread.start()\n\n if wait:\n task.silent = True\n task.thread.join()\n exc, task.exc = task.exc, None\n if exc:\n raise exc\n\n def __make_config(self):\n with open(self.__base_config) as inp:\n config = yaml.safe_load(inp.read())\n\n # cluster and provider details\n config[\"cluster_name\"] = self.cluster_name\n config[\"min_workers\"] = self.worker_count\n config[\"max_workers\"] = self.worker_count\n config[\"initial_workers\"] = self.worker_count\n config[\"provider\"][\"type\"] = self.provider.name\n if self.provider.region:\n config[\"provider\"][\"region\"] = self.provider.region\n if self.provider.zone:\n config[\"provider\"][\"availability_zone\"] = self.provider.zone\n\n # connection details\n config[\"auth\"][\"ssh_user\"] = \"ubuntu\"\n socks_proxy_cmd = _get_ssh_proxy_command()\n if socks_proxy_cmd:\n config[\"auth\"][\"ssh_proxy_command\"] = socks_proxy_cmd\n\n # instance types\n try:\n instance_key = self.__instance_key[self.provider.name]\n image_key = self.__image_key[self.provider.name]\n except KeyError:\n raise ValueError(f\"Unsupported provider: {self.provider.name}\")\n\n config[\"head_node\"][instance_key] = self.head_node_type\n config[\"head_node\"][image_key] = self.provider.image\n config[\"worker_nodes\"][instance_key] = self.worker_node_type\n config[\"worker_nodes\"][image_key] = self.provider.image\n\n return _bootstrap_config(config)\n\n @staticmethod\n def __save_config(config):\n cfgdir = os.path.abspath(os.path.expanduser(\"~/.modin/cloud\"))\n os.makedirs(cfgdir, mode=0o700, exist_ok=True)\n namehash = sha1(repr(config).encode(\"utf8\")).hexdigest()[:8]\n entry = os.path.join(cfgdir, f\"config-{namehash}.yml\")\n\n with open(entry, \"w\") as out:\n out.write(yaml.dump(config))\n return entry\n\n def __do_spawn(self):\n try:\n create_or_update_cluster(\n self.config_file,\n override_min_workers=None,\n override_max_workers=None,\n no_restart=False,\n restart_only=False,\n yes=True,\n override_cluster_name=None,\n )\n # need to re-load the config, as create_or_update_cluster() modifies it\n with open(self.config_file) as inp:\n self.config = yaml.safe_load(inp.read())\n self.ready = True\n except BaseException as ex:\n self.spawner.exc = CannotSpawnCluster(\n \"Cannot spawn cluster\", cause=ex, traceback=traceback.format_exc()\n )\n if not self.spawner.silent:\n sys.stderr.write(f\"Cannot spawn cluster:\\n{traceback.format_exc()}\\n\")\n\n def __do_destroy(self):\n try:\n teardown_cluster(\n self.config_file,\n yes=True,\n workers_only=False,\n override_cluster_name=None,\n keep_min_workers=0,\n )\n self.ready = False\n self.config = None\n except BaseException as ex:\n self.destroyer.exc = CannotDestroyCluster(\n \"Cannot destroy cluster\", cause=ex, traceback=traceback.format_exc()\n )\n if not self.destroyer.silent:\n sys.stderr.write(f\"Cannot destroy cluster:\\n{traceback.format_exc()}\\n\")\n\n def _get_connection_details(self) -> ConnectionDetails:\n \"\"\"\n Gets the coordinates on how to connect to cluster frontend node.\n \"\"\"\n assert self.ready, \"Cluster is not ready, cannot get connection details\"\n return ConnectionDetails(\n user_name=self.config[\"auth\"][\"ssh_user\"],\n key_file=self.config[\"auth\"][\"ssh_private_key\"],\n address=get_head_node_ip(self.config_file, override_cluster_name=None),\n )\n\n def _get_main_python(self) -> str:\n \"\"\"\n Gets the path to 'main' interpreter (the one that houses created environment for running everything)\n \"\"\"\n return \"~/miniconda/envs/modin/bin/python\"\n","sub_path":"modin/experimental/cloud/rayscale.py","file_name":"rayscale.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"556271622","text":"\"\"\" openscad-board-maker.py\n\nUsage:\n openscad-board-maker.py \n\n\"\"\"\n\nfrom solid import *\n\nimport docopt\n\nif __name__ == \"__main__\":\n args = docopt.docopt(__doc__)\n\n l = float(args[\"\"])\n w = float(args[\"\"])\n\n r = 4\n\n rect1 = cube(size=(l, w - 2*r, 1.6), center=True)\n rect2 = cube(size=(l - 2*r, w, 1.6), center=True)\n\n corner_centers = (l/2-r, w/2-r)\n\n corners = [\n translate(v=(corner_centers[0], corner_centers[1], 0)) (\n cylinder(r=r, h=1.6, center=True) - hole()(cylinder(r=1.5, h=1.7, center=True))\n ),\n translate(v=(-corner_centers[0], corner_centers[1], 0)) (\n cylinder(r=r, h=1.6, center=True) - hole()(cylinder(r=1.5, h=1.7, center=True))\n ),\n translate(v=(corner_centers[0], -corner_centers[1], 0)) (\n cylinder(r=r, h=1.6, center=True) - hole()(cylinder(r=1.5, h=1.7, center=True))\n ),\n translate(v=(-corner_centers[0], -corner_centers[1], 0)) (\n cylinder(r=r, h=1.6, center=True) - hole()(cylinder(r=1.5, h=1.7, center=True))\n )\n ]\n\n a = union()(rect1, rect2, *corners)\n\n scad_render_to_file(a, file_header='$fn = 48;', include_orig_code=True)","sub_path":"openscad-board-maker.py","file_name":"openscad-board-maker.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"318554377","text":"\nfrom flask import session, jsonify, request, redirect, render_template, url_for, Markup, send_from_directory\nfrom flask_user import login_required\nfrom flask.ext.misaka import markdown\n\nimport os, re\nimport json\n\nfrom urlparse import urlparse\n\nfrom webrob.app_and_db import app, db\nfrom webrob.user.knowrob_user import read_tutorial_page\nfrom webrob.docker.docker_application import ensure_application_started\nfrom webrob.docker import docker_interface\n\nfrom utility import *\n\nMAX_HISTORY_LINES = 50\n\n@app.route('/knowrob/static/')\n@login_required\ndef download_static(filename):\n return send_from_directory(os.path.join(app.root_path, \"static\"), filename)\n\n@app.route('/knowrob/knowrob_data/')\n@login_required\ndef download_logged_image(filename):\n return send_from_directory('/home/ros/knowrob_data/', filename)\n\n@app.route('/knowrob/summary_data/')\n@login_required\ndef download_summary_image(filename):\n # TODO migrate summary_data -> users own data container and use docker_interface to retrieve summary!\n return send_from_directory('/home/ros/summary_data/', filename)\n\n@app.route('/knowrob/tutorials/')\n@app.route('/knowrob/tutorials//')\n@app.route('/knowrob/tutorials//')\n# @login_required\ndef tutorials(cat_id='getting_started', page=1):\n session['video'] = 0\n if not ensure_application_started('knowrob/hydro-knowrob-daemon'):\n return redirect(url_for('/knowrob/tutorials/'))\n \n error=\"\"\n # determine hostname/IP we are currently using\n # (needed for accessing container)\n host_url = urlparse(request.host_url).hostname\n container_name = session['user_container_name'] # 'tutorials'\n show_south_pane = False\n readonly = True\n authentication = False\n\n tut = read_tutorial_page(cat_id, page)\n content = markdown(tut.text, fenced_code=True)\n\n # automatically add event handler for highlighting DOM elements\n tmp = re.findall('(.*?)', str(content))\n for t in tmp:\n if 'hl_' in t:\n text = t.split(' hl_')[0]\n idname = t.split(' hl_')[1]\n content = re.sub('{} hl_{}'.format(text, idname), '{1}'.format(idname, text), str(content))\n elif 'hlc_' in t:\n text = t.split(' hlc_')[0]\n classname = t.split(' hlc_')[1]\n content = re.sub('{} hlc_{}'.format(text, classname), '{1}'.format(classname, text), str(content))\n\n # automatically add \"ask as query\" links after code blocks\n content = re.sub('(\\s)?', \"\", str(content))\n content = Markup(content)\n\n # check whether there is another tutorial in this category\n nxt = read_tutorial_page(cat_id, int(page)+1)\n prev = read_tutorial_page(cat_id, int(page)-1)\n\n return render_template('knowrob_tutorial.html', **locals())\n\n@app.route('/knowrob/')\n@app.route('/knowrob/hydro-knowrob-daemon')\n@app.route('/knowrob/exp/')\n@login_required\ndef knowrob(exp_path=None):\n session['video'] = 0\n if not ensure_application_started('knowrob/hydro-knowrob-daemon'):\n return redirect(url_for('user.logout'))\n \n error=\"\"\n # determine hostname/IP we are currently using\n # (needed for accessing container)\n host_url = urlparse(request.host_url).hostname\n\n container_name = session['user_container_name']\n show_south_pane = True\n # Remember experiment selection\n if exp_path is not None: session['exp'] = exp_path\n # Select a query file\n exp_query_file = None\n if 'exp' in session:\n exp = session['exp']\n if exp is not None: exp_query_file = exp + '.json'\n # TODO: Allow to select html template using a experiment configuration file\n\n return render_template('knowrob_simple.html', **locals())\n\n@app.route('/knowrob/video')\n@app.route('/knowrob/video/exp/')\n@login_required\ndef video(exp_path=None):\n session['video'] = 1\n if not ensure_application_started('knowrob/hydro-knowrob-daemon'):\n return redirect(url_for('user.logout'))\n \n error=\"\"\n # determine hostname/IP we are currently using\n # (needed for accessing container)\n host_url = urlparse(request.host_url).hostname\n container_name = session['user_container_name']\n\n # Remember experiment selection\n if exp_path is not None: session['exp'] = exp_path\n # Select a query file\n exp_query_file = None\n if 'exp' in session:\n exp = session['exp']\n if exp is not None: exp_query_file = exp + '.json'\n \n return render_template('video.html', **locals())\n\n@app.route('/knowrob/menu', methods=['POST'])\n@app.route('/knowrob/hydro-knowrob-daemon/menu', methods=['POST'])\ndef menu():\n knowrobUrl = '/knowrob/'\n \n menu_left = [\n ('Knowledge Base', knowrobUrl),\n ('Robot Memory Replay', knowrobUrl+'video'),\n ('Editor', knowrobUrl+'editor')\n ]\n \n exp_selection = __exp_file__()\n if exp_selection is None: exp_selection = \"Experiment\"\n \n exp_choices_map = {}\n for (submenu,exp) in __exp_list__():\n # Find exp url\n exp_url = knowrobUrl\n if __is_video__():\n exp_url += 'video/'\n exp_url += 'exp/'\n if len(submenu)>0:\n exp_url += submenu + '/'\n exp_url += exp\n \n menu = ''\n if len(submenu)>0:\n menu = submenu\n if not menu in exp_choices_map:\n exp_choices_map[menu] = []\n \n exp_choices_map[menu].append((exp, exp_url))\n \n exp_choices = []\n exp_map_keys = exp_choices_map.keys()\n exp_map_keys.sort()\n \n for key in exp_map_keys:\n if key == '': continue\n exp_choices_map[key].sort()\n exp_choices.append(('CHOICES', (key+' >>', exp_choices_map[key])))\n if '' in exp_map_keys:\n exp_choices_map[''].sort()\n exp_choices += exp_choices_map['']\n \n menu_right = [\n ('CHOICES', (exp_selection, exp_choices))\n ]\n \n return jsonify(menu_left=menu_left, menu_right=menu_right)\n\ndef __exp_menu_file__(f, category):\n if f.endswith(\".json\"):\n return (category, f[0:len(f)-len(\".json\")])\n else:\n return None\n \ndef __exp_list__():\n expList = []\n exp_root_path = os.path.join(app.root_path, \"static/experiments/queries\")\n \n for f0 in os.listdir(exp_root_path):\n exp_path = os.path.join(exp_root_path, f0)\n \n # Query file with submenu\n if os.path.isdir(exp_path):\n for f1 in os.listdir(exp_path):\n menu_entry = __exp_menu_file__(f1, f0)\n if menu_entry != None: expList.append(menu_entry)\n \n # Query file without submenu\n else:\n menu_entry = __exp_menu_file__(f0, '')\n if menu_entry != None: expList.append(menu_entry)\n \n return expList\n\ndef __exp_file__():\n if 'exp' in session:\n return session['exp']\n else:\n return None\n\ndef __is_video__():\n if 'video' in session:\n return session['video']\n else:\n return 0\n \n@app.route('/knowrob/exp_set', methods=['POST'])\n@login_required\ndef exp_set():\n expName = json.loads(request.data)['experimentName']\n session['exp'] = expName\n return jsonify(result=None)\n\n@app.route('/knowrob/add_history_item', methods=['POST'])\n@login_required\ndef add_history_item():\n query = json.loads(request.data)['query']\n hfile = get_history_file()\n # Remove newline characters\n query.replace(\"\\n\", \" \")\n \n # Read history\n lines = []\n if os.path.isfile(hfile):\n f = open(hfile)\n lines = f.readlines()\n f.close()\n # Append the last query\n lines.append(query+\".\\n\")\n # Remove old history items\n numLines = len(lines)\n lines = lines[max(0, numLines-MAX_HISTORY_LINES):numLines]\n \n with open(hfile, \"w\") as f:\n f.writelines(lines)\n \n return jsonify(result=None)\n\n@app.route('/knowrob/get_history_item', methods=['POST'])\n@login_required\ndef get_history_item():\n index = json.loads(request.data)['index']\n \n if index<0:\n return jsonify(item=\"\", index=-1)\n \n hfile = get_history_file()\n if os.path.isfile(hfile):\n # Read file content\n f = open(hfile)\n lines = f.readlines()\n f.close()\n \n # Clamp index\n if index<0: index=0\n if index>=len(lines): index=len(lines)-1\n if index<0: return jsonify(item=\"\", index=-1)\n \n item = lines[len(lines)-index-1]\n item = item[:len(item)-1]\n \n return jsonify(item=item, index=index)\n \n else:\n return jsonify(item=\"\", index=-1)\n\n\ndef get_history_file():\n userDir = get_user_dir()\n return os.path.join(get_user_dir(), \"query.history\")\n","sub_path":"webapps/knowrob/webrob/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"90323663","text":"\"\"\"Main script. Run your experiments from here\"\"\"\nfrom src.dataset import TextToSpeechDataset\nfrom src.Tacotron_model.taco_util import fetch_model,fetch_optimizer,fetch_dataloader\nfrom torch.utils.data.dataset import Subset\nimport src.hparam as hp\nimport torch\nimport src.Tacotron_model.util as utils\nfrom src.Tacotron_model.util import text_to_sequence, wav_to_spectrogram\nfrom src.Tacotron_model.taco_train import train_and_evaluate\n\nimport torch\nimport argparse\nimport os\n\n\nimport logging\nlogging.basicConfig(level=logging.WARNING)\nlog = logging.getLogger()\n\n\ndef parse_args():\n \"\"\"Parse command line arguments.\n Returns:\n\t\t(Namespace): arguments\n\t\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-l', '--logdir', type=str,\n\t\t\t\t\t\tdefault='log/tacotron',\n\t\t\t\t\t\thelp='parent directory of experiment logs (checkpoints/tensorboard events)')\n parser.add_argument('--log_level', type=str, default='WARNING',\n\t\t\t\t\t\thelp='log level to be used')\n parser.add_argument('-n', '--name', type=str, default='Training-naive',\n\t\t\t\t\t\thelp='name of experiment')\n parser.add_argument('-c', '--checkpoint', type=str, default=None,\n\t\t\t\t\t\trequired=False, help='path to checkpoint')\n parser.add_argument('--default_hparams', type=str,\n\t\t\t\t\t\tdefault='Tacotron_model/taco_hparams.yaml', help='path to .yaml with default hparams')\n parser.add_argument('--hparams', type=str,\n\t\t\t\t\t\trequired=False, help='comma separated name=value pairs')\n parser.add_argument('--data', type=str, default='/home/rajanie/Documents/Semester2/TTS/LJSpeech-1.1/',\n\t\t\t\t\t\thelp='csv file of texts and audio names in LLJDS Format')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n # set log level\n assert (args.log_level in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'])\n log.setLevel(logging.getLevelName(args.log_level))\n # make sure log directory exists\n logdir = os.path.join(args.logdir, args.name)\n if not os.path.isdir(logdir):\n log.info(\"Creating directory {}\".format(logdir))\n os.makedirs(logdir)\n os.chmod(logdir, 0o775)\n hparams = hp.load_params_from_yaml(args.default_hparams)\n hparams.parse(args.hparams)\n hp.write_params_to_yaml(hparams, os.path.join(logdir, 'hparams.yaml'))\n #print(hparams)\n\n\n # set seed for reproducible experiments\n torch.manual_seed(hparams.seed)\n if torch.cuda.is_available():\n log.info(\"CUDA is available. Using GPU.\")\n torch.cuda.manual_seed(hparams.seed)\n torch.backends.cudnn.benchmark = True\n hparams.device = torch.device(\"cuda:0\")\n hparams.cuda = True\n else:\n log.info(\"CUDA is not available. Using CPU.\")\n hparams.device = torch.device(\"cpu\")\n hparams.cuda = False\n\n\n \n\n PATH = args.data\n dataset = TextToSpeechDataset(path = PATH,\n text_embeddings=text_to_sequence,\n mel_transforms=wav_to_spectrogram)\n logdir = args.logdir\n checkpoint = args.checkpoint\n fetch_dataloader(dataset,hparams)\n\n\n #train_and_evaluate(dataset, hparams, logdir)\n\n #melnet.cuda(device:0)\n # melnet #=melnet.load_state_dict(torch.load('/home/rajaniep/code/UntitledFolder/runs/melnet.pt'))\n\n\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"618777962","text":"from django.test import TestCase\n\nimport mock\nfrom model_mommy import mommy\n\nfrom battles.forms import CreateBattleForm, CreateTeamForm\nfrom pokemon.helpers import save_pokemon\nfrom pokemon.models import Pokemon\n\n\nclass TestCreateTeamForm(TestCase):\n def setUp(self):\n self.trainer = mommy.make(\"users.User\")\n self.pokemon_1 = mommy.make(\"pokemon.Pokemon\", attack=50, defense=50, hp=50)\n self.pokemon_2 = mommy.make(\"pokemon.Pokemon\", attack=50, defense=50, hp=50)\n self.pokemon_3 = mommy.make(\"pokemon.Pokemon\", attack=50, defense=50, hp=50)\n self.battle = mommy.make(\"battles.Battle\")\n\n def test_create_a_team(self):\n params = {\n \"data\": {\n \"trainer\": self.trainer.id,\n \"pokemon_1\": self.pokemon_1.id,\n \"pokemon_2\": self.pokemon_2.id,\n \"pokemon_3\": self.pokemon_3.id,\n \"order_1\": \"1\",\n \"order_2\": \"2\",\n \"order_3\": \"3\",\n },\n }\n form = CreateTeamForm(**params)\n self.assertTrue(form.is_valid())\n\n def test_team_cant_have_identical_pokemon(self):\n params = {\n \"data\": {\n \"trainer\": self.trainer.id,\n \"pokemon_1\": self.pokemon_1.id,\n \"pokemon_2\": self.pokemon_1.id,\n \"pokemon_3\": self.pokemon_3.id,\n \"order_1\": \"1\",\n \"order_2\": \"2\",\n \"order_3\": \"3\",\n },\n }\n form = CreateTeamForm(**params)\n self.assertFalse(form.is_valid())\n self.assertEqual(\n [\"Your team has duplicates, please use unique pokemon\"], form.non_field_errors()\n )\n\n @mock.patch(\"pokemon.helpers.get_pokemon_stats\")\n def test_pokemon_exceeds_points_limit(self, mock_get_pokemon_stats):\n mock_get_pokemon_stats.return_value = {\n \"name\": \"mock_name\",\n \"id\": 493,\n \"sprite\": \"\",\n \"attack\": 360,\n \"defense\": 360,\n \"hp\": 360,\n }\n\n save_pokemon(493)\n pokemon_493 = Pokemon.objects.get(id=493)\n\n params = {\n \"data\": {\n \"trainer\": self.trainer.id,\n \"pokemon_1\": self.pokemon_1.id,\n \"pokemon_2\": pokemon_493.id,\n \"pokemon_3\": self.pokemon_3.id,\n \"order_1\": \"1\",\n \"order_2\": \"2\",\n \"order_3\": \"3\",\n },\n }\n form = CreateTeamForm(**params)\n self.assertFalse(form.is_valid())\n self.assertEqual(\n [\"Your team exceeds the 600 points limit, please choose another team\"],\n form.non_field_errors(),\n )\n assert mock_get_pokemon_stats.called\n\n def test_more_than_one_pokemon_cant_battle_in_the_same_round(self):\n params = {\n \"data\": {\n \"trainer\": self.trainer.id,\n \"pokemon_1\": self.pokemon_1.id,\n \"pokemon_2\": self.pokemon_2.id,\n \"pokemon_3\": self.pokemon_3.id,\n \"order_1\": \"2\",\n \"order_2\": \"2\",\n \"order_3\": \"3\",\n },\n }\n form = CreateTeamForm(**params)\n self.assertFalse(form.is_valid())\n self.assertEqual([\"Please allocate one pokemon per round\"], form.non_field_errors())\n\n\nclass TestCreateBattleForm(TestCase):\n def setUp(self):\n self.creator = mommy.make(\"users.User\")\n self.opponent = mommy.make(\"users.User\")\n\n def test_form_is_valid(self):\n params = {\n \"initial\": {\"user_creator\": self.creator},\n \"data\": {\"user_creator\": self.creator.id, \"user_opponent\": self.opponent.id},\n }\n form = CreateBattleForm(**params)\n self.assertTrue(form.is_valid())\n","sub_path":"backend/battles/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"441287515","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 4 13:34:22 2018\n\n@author: evanderdcosta\n\"\"\"\n\nimport inspect\nimport os\nimport tensorflow as tf\n\ndef class_vars(obj):\n return{k:v for k,v in inspect.getmembers(obj) \n if not k.startswith('__') and not callable(k)}\n \n\nclass BaseModel(object):\n def __init__(self, config, sess):\n self.config = config\n self.sess = sess\n self._saver = None\n \n # Keep a graph variable tracking steps\n # Can use this for annealing, etc\n with tf.variable_scope('step'):\n self.step = tf.Variable(0, trainable=False, name='step')\n self.step_input = tf.placeholder('int32', None, name='step_input')\n self.step_assign_op = self.step.assign(self.step_input)\n \n \n try:\n self._attrs = config.__dict__['__flags']\n except:\n self._attrs = class_vars(config)\n print(self._attrs)\n \n for attr in self._attrs:\n name = attr if not attr.startswith('_') else attr[1:]\n setattr(self, name, getattr(self.config, attr))\n \n \n def save_model(self, step=None):\n print(\"[*] Saving a checkpoint\")\n if(not os.path.exists(self.checkpoint_dir)):\n os.makedirs(self.checkpoint_dir)\n self.saver.save(self.sess, self.checkpoint_dir, global_step=self.step)\n \n def load_model(self):\n print(\"[*] Loading a model\")\n chkpt = tf.train.get_checkpoint_state(self.checkpoint_dir)\n if(chkpt and chkpt.model_checkpoint_path):\n chkpt_name = os.path.basename(chkpt.model_checkpoint_path)\n fname = os.path.join(self.checkpoint_dir, chkpt_name)\n self.saver.restore(self.sess, fname)\n print(\"[*] SUCCESS!\")\n return True\n else:\n print(\"Model load failed....\")\n return False\n \n @property\n def checkpoint_dir(self):\n return os.path.join('checkpoints', self.model_dir)\n \n @property\n def model_dir(self):\n model_dir = self.config.name\n for k, v in self._attrs.items():\n if not k.startswith('_') and k not in ['display']:\n model_dir += \"/%s-%s\" % (k, \",\".join([str(i) for i in v])\n if type(v) == list else v)\n return model_dir + '/'\n\n @property\n def saver(self):\n if(self._saver == None):\n self._saver = tf.train.Saver(max_to_keep=10)\n return self._saver\n","sub_path":"base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"270486410","text":"import datetime\n\ndef getDateTime(str_date):\n\t'''\n\tConverts string date to a date time object.\n\tParameters\n\t----------\n\tstr_date (String): A date string\n\tReturns\n\t-------\n\tDatetime object\n\n\t'''\n\tif len(str_date) == 0:\n\t return 'None'\n\telse:\n\t dt_format = \"%Y-%m-%d %H:%M:%S.%f\"\n\t return datetime.datetime.strptime(str_date, dt_format)","sub_path":"preprocess/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"211437404","text":"import numpy as np\nfrom SistemaFuzzy.Model import Regra\n\n\nclass Reducao:\n\n def __init__(self, regras, instancias, particoes):\n self.regrasComRuido = regras\n self.regras_para_classificacao = []\n self.instancias = instancias\n self.particoes = particoes\n self.regras = []\n self.tnormas = []\n\n def reduzir(self):\n for instancia in self.instancias:\n caracteristicas = instancia.caracteristicas\n classe = instancia.classe\n for regraAtual in self.regrasComRuido:\n antecedentes_regras = regraAtual.antecedentes\n consequente = regraAtual.consequente\n pertinencias_maximas = []\n for id_antecedente, caracteristica, particao in zip(antecedentes_regras, caracteristicas, self.particoes):\n #print(id_antecedente, caracteristicas, particao)\n pertinencia = particao.getPertinenciaIdConjunto(id_antecedente, caracteristica)\n pertinencias_maximas.append(pertinencia)\n tnorma = np.prod(pertinencias_maximas)\n self.atualizarRegras(tnorma, regraAtual)\n self.preencher_regra_nula()\n\n #print(\"1 - \", len(self.regrasComRuido))\n #print(\"2 - \", len(self.regras))\n #print(\"3 - \", len(self.regrasSemRuido))\n #for regra in self.regrasSemRuido:\n # print(regra)\n #a = 2 + \"2\"\n return self.regrasSemRuido\n\n def preencher_regra_nula(self):\n for regra in self.regrasComRuido:\n regraNula = Regra.Regra([-1] * len(regra.antecedentes), -1, 1, 1)\n self.regrasSemRuido.append(regraNula)\n for posicao, regra in enumerate(self.regrasComRuido):\n if regra in self.regras:\n self.regrasSemRuido[posicao] = regra\n\n\n\n def atualizarRegras(self, tnorma, regraAtual):\n\n if tnorma > 0:\n index, cond = self.inconsistencia(regraAtual)\n if not cond:\n self.regras.append(regraAtual)\n self.tnormas.append(tnorma)\n elif self.tnormas[index] < tnorma:\n self.regras[index] = regraAtual\n self.tnormas[index] = tnorma\n\n def inconsistencia(self, novaRegra):\n for index, r in enumerate(self.regras):\n if r.__eq__(novaRegra):\n return index, True\n return -1, False\n\n\nregra1 = Regra.Regra([1, 1, 1, 1], 1, 0.5)\nregra2 = Regra.Regra([1, 1, 1, 2], 1, 0.7)\nregra3 = Regra.Regra([1, 1, 1, 2], 2, 0.5)\nregra4 = Regra.Regra([1, 1, 1, 3], 3, 0.5)\n\nregras = [regra1,regra2,regra3,regra4]\nsemDuplicidade = []\nfor regra in regras:\n if not regra in semDuplicidade:\n semDuplicidade.append(regra)\nsemAmbiguidade = []\ncomAmbiguidade = []\nfor regra in semDuplicidade:\n encontrou = False\n for sa in semAmbiguidade:\n if regra.eq_antecedentes(sa):\n semAmbiguidade.remove(sa)\n comAmbiguidade.append(sa)\n comAmbiguidade.append(regra)\n encontrou = True\n if not encontrou:\n semAmbiguidade.append(regra)\n\n\"\"\"\nprint(\"Sem Ambiguidade\")\nfor regra in semAmbiguidade:\n print(regra.__str__())\n\nprint(\"Com Ambiguidade\")\nfor regra in comAmbiguidade:\n print(regra.__str__())\n\nregrasTratadas = semAmbiguidade\"\"\"\n\n\n\n","sub_path":"100-Testes/Reducao_RegrasV2.py","file_name":"Reducao_RegrasV2.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"377376468","text":"import math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport time\n\nLEARNING_RATE = 0.1\n\ndef main():\n if len(sys.argv) < 9:\n print(\"Please give formatted_train_out, formatted_validation_out, formatted_test_out, dict_input, \"\n \"train_out_labels, test_out_labels, metrics_out, and epochs respectively in commandline arguments.\")\n\n # start = time.time()\n formatted_train_out = sys.argv[1]\n formatted_validation_out = sys.argv[2]\n formatted_test_out = sys.argv[3]\n dict_input = sys.argv[4]\n train_out_labels = sys.argv[5]\n test_out_labels = sys.argv[6]\n metrics_out = sys.argv[7]\n epochs = int(sys.argv[8])\n\n with open(dict_input, 'r') as f:\n dict_input = f.readlines()\n word_dict = {}\n for line in dict_input:\n key, value = line.split()\n word_dict[key.strip()] = int(value)\n with open(formatted_train_out, 'r') as f:\n train_data = f.readlines()\n # with open(formatted_validation_out, 'r') as f:\n # validation_data = f.readlines()\n with open(formatted_test_out, 'r') as f:\n test_data = f.readlines()\n\n theta = np.zeros(len(word_dict)+1)\n X_train, Y_train = get_data(train_data)\n # X_validation, Y_validation = get_data(validation_data)\n X_test, Y_test = get_data(test_data)\n theta = train_logistic_regression(X_train, Y_train, theta, epochs)\n # theta, train_loss, validation_loss = train_logistic_regression_valid(X_train, Y_train, X_validation, Y_validation, theta, epochs)\n Y_train_predicted = predict(X_train, theta)\n Y_test_predicted = predict(X_test, theta)\n train_error = calculate_error(Y_train_predicted, Y_train)\n test_error = calculate_error(Y_test_predicted, Y_test)\n\n with open(train_out_labels, 'w') as f:\n for i in range(len(Y_train_predicted)):\n f.write(str(Y_train_predicted[i]))\n f.write('\\n')\n with open(test_out_labels, 'w') as f:\n for i in range(len(Y_test_predicted)):\n f.write(str(Y_test_predicted[i]))\n f.write('\\n')\n with open(metrics_out, 'w') as f:\n f.write(\"error(train): {}\\n\".format(str.format('{0:.6f}', train_error)))\n f.write(\"error(test): {}\\n\".format(str.format('{0:.6f}', test_error)))\n # end = time.time()\n # print(end-start)\n\n # print(train_loss)\n # print(validation_loss)\n # plt.plot(range(len(train_loss[:200])), train_loss[:200], label=\"Train Loss\")\n # plt.plot(range(len(validation_loss[:200])), validation_loss[:200], label=\"Validation Loss\")\n # plt.xlabel(\"Number of Epochs\")\n # plt.ylabel(\"Average Negative Log Likelihood\")\n # plt.legend()\n # plt.show()\n\ndef calculate_error(Y_predicted, Y):\n count = 0.0\n for i in range(len(Y)):\n if Y_predicted[i] != Y[i]:\n count += 1\n return count/float(len(Y))\n\ndef train_logistic_regression(X_train, Y_train, theta, epochs):\n for i in range(epochs):\n for j in range(len(X_train)):\n theta = update_theta_step_sgd(theta, X_train, Y_train, j)\n # train_loss = calculate_loss_function(theta, X_train, Y_train)\n # print(\"Train loss after epoch {}: {}\\n\".format(i, train_loss))\n return theta\n\ndef train_logistic_regression_valid(X_train, Y_train, X_validation, Y_validation, theta, epochs):\n train_loss, validation_loss = [], []\n for i in range(epochs):\n for j in range(len(X_train)):\n theta = update_theta_step_sgd(theta, X_train, Y_train, j)\n train_loss.append(calculate_loss_function(theta, X_train, Y_train))\n validation_loss.append(calculate_loss_function(theta, X_validation, Y_validation))\n return theta, train_loss, validation_loss\n\ndef predict(X, theta):\n Y = []\n for i in range(len(X)):\n theta_x = calculate_dot_product(theta, X[i])\n exponent = math.exp(theta_x)\n if (exponent/(1.0 + exponent)) > 0.5:\n Y.append(1)\n else:\n Y.append(0)\n return Y\n\ndef get_data(data_lines):\n X, Y = [], []\n for data_line in data_lines:\n line_data = data_line.split('\\t')\n y = int(line_data[0])\n Y.append(y)\n x = {0: 1}\n parameters = line_data[1:]\n for parameter in parameters:\n vals = parameter.split(':')\n x[int(vals[0])+1] = int(vals[1])\n X.append(x)\n return X, Y\n\ndef update_theta_step_sgd(theta, X, Y, i):\n theta_x = calculate_dot_product(theta, X[i])\n gradient = np.zeros(len(theta))\n for key in X[i].keys():\n term1 = -1 * (X[i])[key]\n exponent = math.exp(theta_x)\n term2 = Y[i] - (exponent/(1.0 + exponent))\n gradient[key] = term1 * term2\n theta = theta - LEARNING_RATE * gradient\n return theta\n\ndef calculate_loss_function(theta, X, Y):\n sum = 0.0\n for i in range(len(X)):\n theta_x = calculate_dot_product(theta, X[i])\n term1 = -1 * Y[i] * theta_x\n term2 = math.log(1.0 + math.exp(theta_x))\n sum += (term1 + term2)\n return sum/len(X)\n\ndef calculate_dot_product(theta, x):\n sum = 0.0\n for key in x.keys():\n sum += theta[key] * x[key]\n return sum\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"HW4/handout/pr.py","file_name":"pr.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"639068527","text":"import socks\nimport socket\nimport requests\nimport re\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\n\n# libraries to scarp the data\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Comment\nfrom urllib.request import urlopen\n\n\n# connect socks \nsocks.set_default_proxy(socks.SOCKS5, \"localhost\", 9150)\nsocket.socket = socks.socksocket\n# yake module to extract Keywords\nimport yake\nkw_extractor = yake.KeywordExtractor()\nlanguage = \"en\"\nmax_ngram_size = 2\ndeduplication_thresold = 0.9\ndeduplication_algo = 'seqm'\nwindowSize = 1\n# get the top 20 keywords\nnumOfKeywords = 20\n\n\n# for socket connection\ndef getaddrinfo(*args):\n return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]\nsocket.getaddrinfo = getaddrinfo\n\n\n# to find the visible Tags\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]','script','a']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\n# code to get text from HTML\ndef text_from_html(body):\n soup = BeautifulSoup(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts) \n return u\" \".join(t.strip() for t in visible_texts)\n\n\n# read the link & get the HTML\nhtml = urlopen('http://jncyepk6zbnosf4p.onion/onions.html').read()\n# parse the HTML\nsoup = BeautifulSoup(html, \"html.parser\")\n# Get the PRE tags\npre_tags = soup.findAll('pre',text=True)\n\n\n# below code is to extract the urls from the link\nonions = []\nfor i, pre_tag in enumerate(pre_tags): \n\ttry:\n\t\tif i!=0:\n\t\t\ttext = (pre_tag.text)\n\t\t\tonion_unfiltered = (text.split(\"\\u2003\\u2003\\u2003\\u2003\"))\n\t\t\tif onion_unfiltered[1].endswith('onion') and onion_unfiltered[3] == '200':\n\t\t\t\tonions.append(onion_unfiltered[1])\n\texcept Exception as E:\n\t\tpass\n\n\noutput = {}\ncustom_kw_extractor = yake.KeywordExtractor(lan=language, n=max_ngram_size, dedupLim=deduplication_thresold, dedupFunc=deduplication_algo, windowsSize=windowSize, top=numOfKeywords, features=None)\n\n# function to extract key words from the onion link\ndef tag(onion):\n\ttry:\n\t\t# extract the HTML\n\t\thtml = urlopen('http://'+onion).read()\n\t\t# get the text from the HTML\n\t\ttext = text_from_html(html)\n\t\t# extract keywords from the text\n\t\tkeywords = custom_kw_extractor.extract_keywords(text)\n\t\t# add keywords to the output\n\t\tfor kw in keywords:\n\t\t\tif onion not in output:\n\t\t\t\toutput[onion] = [kw[0]]\n\t\t\telse: \n\t\t\t\toutput[onion].append(kw[0])\n\texcept Exception as E:\n\t\tpass\n\n\n# making concurrency\nprocesses = []\n# assigning 5 workers\nwith ThreadPoolExecutor(max_workers=5) as executor:\n\t# creating threads for the first 5 onions\n\tfor onion in onions[:5]:\n \tprocesses.append(executor.submit(tag, onion))\n\n\nfor task in as_completed(processes):\n\ttask.result()\n\n\n# prints the url & its resp. keywords\nprint(output)\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"279383060","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author:owefsad\n# software: PyCharm\n# project: lingzhi-webapi\n\nfrom dongtai.endpoint import R, UserEndPoint\nfrom dongtai.models.asset import Asset\n\nfrom iast.base.agent import get_agents_with_project\nfrom iast.base.project_version import get_project_version, get_project_version_by_id\nfrom iast.serializers.sca import ScaSerializer\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass ScaList(UserEndPoint):\n def get(self, request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n auth_users = self.get_auth_users(request.user)\n auth_agents = self.get_auth_agents(auth_users)\n\n language = request.query_params.get('language')\n if language:\n auth_agents = auth_agents.filter(language=language)\n\n queryset = Asset.objects.filter(agent__in=auth_agents)\n\n order = request.query_params.get('order', None)\n order_fields = [\n 'level', 'package_name', 'vul_count', 'version', 'language', 'dt',\n 'project_name'\n ]\n order = order if order in order_fields + list(\n map(lambda x: ''.join(['-', x]), order_fields)) else None\n\n package_kw = request.query_params.get('keyword', None)\n\n project_id = request.query_params.get('project_id', None)\n if project_id and project_id != '':\n\n version_id = request.GET.get('version_id', None)\n if not version_id:\n current_project_version = get_project_version(\n project_id, auth_users)\n else:\n current_project_version = get_project_version_by_id(version_id)\n agents = self.get_auth_agents(auth_users).filter(\n bind_project_id=project_id,\n project_version_id=current_project_version.get(\"version_id\", 0)\n )\n queryset = queryset.filter(agent__in=agents)\n project_name = request.query_params.get('project_name')\n if project_name and project_name != '':\n agent_ids = get_agents_with_project(project_name, auth_users)\n if agent_ids:\n queryset = queryset.filter(agent_id__in=agent_ids)\n\n level = request.query_params.get('level')\n if level:\n queryset = queryset.filter(level=level)\n\n if package_kw and package_kw.strip() != '':\n queryset = queryset.filter(package_name__icontains=package_kw)\n\n if order:\n queryset = queryset.order_by(order)\n else:\n queryset = queryset.order_by('-dt')\n page = request.query_params.get('page', 1)\n page_size = request.query_params.get('pageSize', 20)\n page_summary, page_data = self.get_paginator(queryset, page, page_size)\n return R.success(data=ScaSerializer(page_data, many=True).data, page=page_summary)\n","sub_path":"iast/views/scas.py","file_name":"scas.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"5883558","text":"from PIL import Image\nfrom tempfile import NamedTemporaryFile\nfrom io import BytesIO\nimport requests\n\ndef apply_harper_collins_logo(url):\n response = requests.get(url)\n background = Image.open(BytesIO(response.content))\n foreground = Image.open('images/harper_collins_logo.png')\n foreground.thumbnail((background.width // 4, background.width // 4), Image.ANTIALIAS)\n y_coord = background.height - foreground.height\n background.paste(foreground, (0, y_coord), foreground.convert('RGBA'))\n\n temp_file=NamedTemporaryFile()\n background.save(temp_file, format='png')\n return temp_file\n","sub_path":"process_image.py","file_name":"process_image.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"84992107","text":"import sys\nfrom manim_imports_ext import *\n\nclass MeasureScene(AlgoScene):\n def construct(self):\n shape = self.camera.frame.get_shape()\n\n t = Text(\"1 width %.2f height %.2f delta 0.00\"%(shape[0], shape[1]), color=GREEN, font_size=20).shift(LEFT*3).scale(1)\n self.add(t)\n horizon = Line(start=LEFT*shape[0]/2, end=RIGHT*shape[0]/2, color=RED)\n verticle = Line(start=UP*shape[1]/2, end=DOWN*shape[1]/2, color=BLUE)\n\n t.next_to(horizon, direction=UP, buff=0)\n\n count = 2\n delta = 0.0\n while True:\n nt = Text(\"%d width %.2f height %.2f delta %.2f\"%(count, shape[0], shape[1], delta), \n color=GREEN, font_size=20).shift(LEFT*3).scale(1)\n nt.next_to(t, direction=UP, buff=0)\n p = nt.get_center()\n delta = p[1] - t.get_center()[1]\n t = nt\n if p[1] > shape[1]/2:\n break\n count += 1\n self.add(nt)\n\n # self.camera.frame.shift(OUT*0.2)\n self.play(ShowCreation(horizon), ShowCreation(verticle))\n self.snapshot()\n self.wait()\n","sub_path":"animations/measure_scene.py","file_name":"measure_scene.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"252887403","text":"import logging\nfrom flask import request, jsonify\nfrom codeitsuisse import app\n\nlogger = logging.getLogger(__name__)\n\n@app.route('/inventory-management', methods=['POST'])\ndef evaluateInventoryManagement():\n data = request.get_json()\n logging.info('data sent for evaluation: {}'.format(data))\n result = []\n for test in data:\n target = test['searchItemName']\n items = test['items']\n ans = solve(target, items)\n result.append({'searchItemName': target, 'searchResult': ans})\n logging.info('my result: {}'.format(result))\n return jsonify(result)\n\ndef solve(target, items):\n ans = {}\n str_ans = {}\n for item in items:\n l1 = len(target)\n l2 = len(item)\n l = max(l1, l2) + 1\n \n # init\n table = [[0 for j in range(l)] for i in range(l)]\n for i in range(1, l1 + 1):\n table[i][0] = i * 1\n for j in range(1, l2 + 1):\n table[0][j] = j * 1\n str_table = [['' for j in range(l)] for i in range(l)]\n for i in range(1, l1 + 1):\n str_table[i][0] = str_table[i - 1][0] + '-' + target[i - 1]\n for j in range(1, l2 + 1):\n str_table[0][j] = str_table[j - 1][0] + '+' + item[j - 1]\n\n # dp\n for i in range(1, l1 + 1):\n for j in range(1, l2 + 1):\n # replace\n if target[i - 1].lower() == item[j - 1].lower():\n table[i][j] = table[i - 1][j - 1]\n str_table[i][j] = str_table[i - 1][j - 1] + target[i - 1]\n else:\n table[i][j] = table[i - 1][j - 1] + 1\n str_table[i][j] = str_table[i - 1][j - 1] + item[j - 1]\n # delete\n temp_del = table[i - 1][j] + 1\n if temp_del < table[i][j]:\n table[i][j] = temp_del\n str_table[i][j] = str_table[i - 1][j] + '-' + target[i - 1]\n # insert\n temp_ins = table[i][j - 1] + 1\n if temp_ins < table[i][j]:\n table[i][j] = temp_ins\n str_table[i][j] = str_table[i][j - 1] + '+' + item[j - 1]\n \n ans[item] = table[l1][l2]\n str_ans[item] = str_table[l1][l2]\n\n sorted_ans = sorted(ans.items(), key=lambda x: (x[1], x[0]))\n\n res = []\n for i in sorted_ans:\n print(i[0], i[1])\n res.append(str_ans[i[0]])\n\n if len(res) > 10:\n res = res[:10]\n\n return res\n","sub_path":"codeitsuisse/routes/inventory_management.py","file_name":"inventory_management.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"384860578","text":"#!/usr/bin/env python\n\n# Import modules\nimport numpy as np\nimport sklearn\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle\nfrom sensor_stick.srv import GetNormals\nfrom sensor_stick.features import compute_color_histograms\nfrom sensor_stick.features import compute_normal_histograms\nfrom visualization_msgs.msg import Marker\nfrom sensor_stick.marker_tools import *\nfrom sensor_stick.msg import DetectedObjectsArray\nfrom sensor_stick.msg import DetectedObject\nfrom sensor_stick.pcl_helper import *\n\nimport rospy\nimport tf\nfrom geometry_msgs.msg import Pose, Point, Quaternion\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import Int32\nfrom std_msgs.msg import String\nfrom pr2_robot.srv import *\nfrom rospy_message_converter import message_converter\nimport yaml\nimport os\n\n# Helper function to get surface normals\ndef get_normals(cloud):\n get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)\n return get_normals_prox(cloud).cluster\n\n# Helper function to create a yaml friendly dictionary from ROS messages\ndef make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):\n yaml_dict = {}\n yaml_dict[\"test_scene_num\"] = test_scene_num.data\n yaml_dict[\"arm_name\"] = arm_name.data\n yaml_dict[\"object_name\"] = object_name.data\n yaml_dict[\"pick_pose\"] = message_converter.convert_ros_message_to_dictionary(pick_pose)\n yaml_dict[\"place_pose\"] = message_converter.convert_ros_message_to_dictionary(place_pose)\n return yaml_dict\n\n# Helper function to output to yaml file\ndef send_to_yaml(yaml_filename, dict_list):\n data_dict = {\"object_list\": dict_list}\n with open(yaml_filename, 'w') as outfile:\n yaml.dump(data_dict, outfile, default_flow_style=False)\n\n# Define functions as required\ndef vox_filt( cloud, LEAF_SIZE = 0.005 ):\n vox = cloud.make_voxel_grid_filter()\n vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)\n return vox.filter()\n\ndef passthrough_filt( cloud, filter_axis = 'z', axis_min = 0.6, axis_max = 1.1 ):\n passthrough = cloud.make_passthrough_filter()\n passthrough.set_filter_field_name(filter_axis)\n passthrough.set_filter_limits(axis_min, axis_max)\n return passthrough.filter()\n\ndef outlier_filt( cloud, mean_k = 10, dev_mul = 0.01 ):\n out = cloud.make_statistical_outlier_filter()\n out.set_mean_k( mean_k )\n out.set_std_dev_mul_thresh( dev_mul)\n return out.filter()\n\ndef seg_plane( cloud, max_distance = 0.01 ):\n seg = cloud.make_segmenter()\n seg.set_model_type(pcl.SACMODEL_PLANE)\n seg.set_method_type(pcl.SAC_RANSAC)\n seg.set_distance_threshold(max_distance)\n inliers, coefficients = seg.segment()\n return inliers, coefficients\n\ndef euclidean_cluster( white_cloud, tolerance = 0.05, min = 100, max = 2500 ):\n tree = white_cloud.make_kdtree()\n # Create a cluster extraction object\n ec = white_cloud.make_EuclideanClusterExtraction()\n # Set tolerances for distance threshold\n # as well as minimum and maximum cluster size (in points)\n ec.set_ClusterTolerance( tolerance )\n ec.set_MinClusterSize( min )\n ec.set_MaxClusterSize( max )\n # Search the k-d tree for clusters\n ec.set_SearchMethod( tree )\n # Extract indices for each of the discovered clusters\n return ec.Extract()\n\n# Callback function for your Point Cloud Subscriber\ndef pcl_callback(pcl_msg):\n\n # Convert ROS msg to PCL data\n cloud = ros_to_pcl( pcl_msg )\n\n # Outliers removing\n cloud = outlier_filt( cloud )\n\n # Voxel Grid Downsampling\n cloud = vox_filt( cloud )\n\n # PassThrough Filter\n cloud = passthrough_filt( cloud )\n cloud = passthrough_filt( cloud, filter_axis = 'y', axis_min = - 0.45, axis_max = 0.45 )\n\n # RANSAC Plane Segmentation\n inliers, coefficients = seg_plane( cloud )\n\n # Extract inliers and outliers\n cloud_table = cloud.extract( inliers, negative = False )\n cloud_objects = cloud.extract( inliers, negative = True )\n\n # Euclidean Clustering\n white_cloud = XYZRGB_to_XYZ( cloud_objects )\n cluster_indices = euclidean_cluster( white_cloud )\n\n # Create Cluster-Mask Point Cloud to visualize each cluster separately\n #Assign a color corresponding to each segmented object in scene\n cluster_color = get_color_list( len( cluster_indices ) )\n\n color_cluster_point_list = []\n\n for j, indices in enumerate( cluster_indices ):\n for i, indice in enumerate(indices):\n color_cluster_point_list.append( [ white_cloud[ indice ][ 0 ],\n white_cloud[ indice ][ 1 ],\n white_cloud[ indice ][ 2 ],\n rgb_to_float( cluster_color[ j ] ) ] )\n\n #Create new cloud containing all clusters, each with unique color\n cluster_cloud = pcl.PointCloud_PointXYZRGB()\n cluster_cloud.from_list( color_cluster_point_list )\n\n ros_cluster_cloud = pcl_to_ros( cluster_cloud )\n\n # Convert PCL data to ROS messages\n ros_cloud_table = pcl_to_ros( cloud_table )\n ros_cloud_objects = pcl_to_ros( cloud_objects )\n\n # Publish ROS messages\n pcl_table_pub.publish( ros_cloud_table )\n pcl_objects_pub.publish( ros_cloud_objects )\n\n pcl_cluster_pub.publish( ros_cluster_cloud )\n\n # Exercise-3 TODOs:\n\n # Classify the clusters! (loop through each detected cluster one at a time)\n detected_objects = []\n detected_objects_labels = []\n\n for idx, pts_list in enumerate( cluster_indices ):\n # Grab the points for the cluster\n pcl_cluster = cloud_objects.extract( pts_list )\n ros_cluster = pcl_to_ros( pcl_cluster )\n\n # Compute the associated feature vector\n chists = compute_color_histograms( ros_cluster, using_hsv = True )\n # normals = get_normals( ros_cluster )\n # nhists = compute_normal_histograms( normals )\n feature = chists # np.concatenate( ( chists, nhists ) )\n\n # Make the prediction\n prediction = clf.predict( scaler.transform( feature.reshape( 1, -1 ) ) )\n label = encoder.inverse_transform( prediction )[ 0 ]\n detected_objects_labels.append( label )\n\n # Publish a label into RViz\n label_pos = list( white_cloud[ pts_list[ 0 ] ] )\n label_pos[ 2 ] += .4\n object_markers_pub.publish( make_label( label, label_pos, idx ) )\n\n # Add the detected object to the list of detected objects.\n do = DetectedObject()\n do.label = label\n do.cloud = ros_cluster\n detected_objects.append( do )\n\n rospy.loginfo( 'Detected {} objects: {}'.format( len( detected_objects_labels ), detected_objects_labels ) )\n\n # Publish the list of detected objects\n detected_objects_pub.publish( detected_objects )\n\n # output yaml\n output_file = 'output_1.yaml'\n\n if not os.path.exists( output_file ):\n object_list_param = rospy.get_param( '/object_list' )\n dropbox_param = rospy.get_param( '/dropbox' )\n\n if dropbox_param[ 0 ][ 'group' ] == 'red':\n dropbox = {\n 'red': dropbox_param[ 0 ],\n 'green': dropbox_param[ 1 ]\n }\n else:\n dropbox = {\n 'red': dropbox_param[ 1 ],\n 'green': dropbox_param[ 0 ]\n }\n\n dicts = []\n for obj in object_list_param:\n name = obj[ 'name' ]\n group = obj[ 'group' ]\n box = dropbox[ group ]\n\n for do in detected_objects:\n if do.label == name:\n ps = ros_to_pcl( do.cloud ).to_array()\n cs = np.mean( ps, axis = 0 )[ : 3 ]\n\n dict = make_yaml_dict( * pick_req( 1, box[ 'name' ], name, cs, box[ 'position' ] ) )\n dicts.append( dict )\n\n send_to_yaml( output_file, dicts )\n\ndef pick_req( test_scene_num, arm_name, object_name, pick_pose, place_pose ):\n msg_scene = Int32()\n msg_scene.data = test_scene_num\n\n msg_arm_name = String()\n msg_arm_name.data = arm_name\n\n msg_obj_name = String()\n msg_obj_name.data = object_name\n\n msg_pick_pose = Pose()\n msg_pick_pose.position = Point()\n msg_pick_pose.position.x = np.asscalar( pick_pose[ 0 ] )\n msg_pick_pose.position.y = np.asscalar( pick_pose[ 1 ] )\n msg_pick_pose.position.z = np.asscalar( pick_pose[ 2 ] )\n\n msg_place_pose = Pose()\n msg_place_pose.position = Point()\n msg_place_pose.position.x = place_pose[ 0 ]\n msg_place_pose.position.y = place_pose[ 1 ]\n msg_place_pose.position.z = place_pose[ 2 ]\n\n return msg_scene, msg_arm_name, msg_obj_name, msg_pick_pose, msg_place_pose\n\nif __name__ == '__main__':\n\n # TODO: ROS node initialization\n rospy.init_node( 'pick_place', anonymous = True )\n\n # TODO: Create Subscribers\n pcl_sub = rospy.Subscriber( \"/pr2/world/points\", PointCloud2, pcl_callback, queue_size = 1 )\n\n # TODO: Create Publishers\n pcl_table_pub = rospy.Publisher( \"/pcl_table\", PointCloud2, queue_size = 1 )\n pcl_objects_pub = rospy.Publisher( \"/pcl_objects\", PointCloud2, queue_size = 1 )\n\n pcl_cluster_pub = rospy.Publisher( \"/pcl_cluster\", PointCloud2, queue_size = 1 )\n\n object_markers_pub = rospy.Publisher( '/object_markers', Marker, queue_size = 1 )\n detected_objects_pub = rospy.Publisher( '/detected_objects', DetectedObjectsArray, queue_size = 1 )\n\n # TODO: Load Model From disk\n model = pickle.load( open( 'model.sav', 'rb' ) )\n clf = model[ 'classifier' ]\n encoder = LabelEncoder()\n encoder.classes_ = model[ 'classes' ]\n scaler = model[ 'scaler' ]\n\n # Initialize color_list\n get_color_list.color_list = []\n\n # TODO: Spin while node is not shutdown\n while not rospy.is_shutdown():\n rospy.spin()\n","sub_path":"pr2_robot/scripts/pick_place.py","file_name":"pick_place.py","file_ext":"py","file_size_in_byte":9729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"149413028","text":"\"\"\"\r\n============================\r\nAuthor:柠檬班-木森\r\nTime:2020/5/11 14:29\r\nE-mail:3247119728@qq.com\r\nCompany:湖南零檬信息技术有限公司\r\n============================\r\n\"\"\"\r\n\r\n\r\n# 第一题\r\ndef work1():\r\n li = [11, 21, 4, 55, 6, 67, 123, 54, 66, 9, 90, 56, 34, 22]\r\n li1 = filter(lambda x: x > 5, li)\r\n return [i % 2 for i in li1]\r\n\r\n\r\n# 第二题\r\ndef work2():\r\n name = yield\r\n for i in range(5):\r\n if not isinstance(name, str):\r\n name = '127.0.0.1:8000'\r\n name = yield \"http://\" + name + \"/user/login\"\r\n\r\n\r\n# gen = work2()\r\n# next(gen)\r\n# res = gen.send('www.baidu.com')\r\n# print(res)\r\n# res = gen.send('www.qq.com')\r\n# print(res)\r\n\"\"\"\r\n# 有一个正整数列表(数据是无序的,并且允许有相等的整数存在),\r\n# 编写能实现下列功能的函数,传入列表和正整数x,返回下面要求的三个数据\r\n# def func(array, x)\r\n# '''逻辑代码'''\r\n# return count, li, new_array\r\n# 1、统计并返回在列表中,比正整数x大的数有几个(相同的数只计算一次),并返回-----返回值中的的count\r\n# 2、计算列表中比正整数X小的所有偶数,并返回 -----------返回值中的li\r\n# 3、将列表中比正整数X小的偶数去掉,未去掉的数添加到新列表中,并返回-------返回值中的new_array\r\n\"\"\"\r\n\r\n\r\ndef work3_1(array, x):\r\n \"\"\"用推导式\"\"\"\r\n # 比x大的个数\r\n res1 = len({i for i in array if i > x})\r\n # 正整数X小的所有偶数:放在li1中\r\n li1 = list(filter(lambda i: i < x and i % 2 == 0, array))\r\n # 去除比X小的偶数\r\n [array.remove(i) for i in li1]\r\n new_list = array.copy()\r\n return res1, li1, new_list\r\n\r\n\r\n\"\"\"\r\n4、定义一个函数实现以下功能,第一个元素是数据标识,第二个元素的数值必须大于等于50才返回,\r\n不够50往后累加加到最后如果不够50也直接返回,因为没有可加的数据了\r\n例子1 :\r\na = [[1,3],[2,51],[3,49],[4,42],[5,42]] #入参 \r\na1 = [[2,54],[4,91],[5,42]] #返回 \r\n例子2:\r\nb = [[1,50],[2,5],[3,10],[4,42],[5,42],[6,10]] #入参\r\nb1 = [[1,50],[4,57],[6,52]] #返回\r\n\"\"\"\r\n\r\n\r\n# 第四题:\r\ndef work4(array):\r\n li = []\r\n sum = 0\r\n for i in array:\r\n sum += i[1]\r\n # 第一种元素值大于等50\r\n if sum >= 50 or len(array) == i[0]:\r\n i[1] = sum\r\n li.append(i)\r\n sum = 0\r\n return li\r\n\r\n\r\na = [[1, 3], [2, 51], [3, 49], [4, 42], [5, 42]]\r\nb = [[1, 50], [2, 5], [3, 10], [4, 42], [5, 42], [6, 10]]\r\n\r\nprint(work4(b))\r\n","sub_path":"python基础高阶编程/py_03day/task_02day.py","file_name":"task_02day.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"565147770","text":"\n# -- ------------------------------------------------------------------------------------ -- #\n# -- proyecto: IDI-II\n# -- codigo: IDI_II_Tarea4_JFME.py\n# -- repositorio: https://github.com/IFFranciscoME/IDI_II_GIT\n# -- ------------------------------------------------------------------------------------ -- #\n\nimport sympy as sp\nfrom sympy import symbols, N\nfrom sympy.plotting import plot3d, plot\n\n\n# -- ---------------------------------------- FUNCION: Gradiente Descendente (Ascendente) -- #\n# -- ------------------------------------------------------------------------------------ -- #\n# -- --\n\ndef f_grad(param_fun, param_x, param_y, param_e, param_p):\n \"\"\"\n Parameters\n ----------\n param_fun : str : funcion a utilizar\n param_x : numeric : valor inicial para x0\n param_y : numeric : valor inicial para y0\n param_e : numeric : exactitud deseada\n param_p : int : cantidad de digitos para la precision\n\n Returns\n -------\n p_x0 : numeric : componente en x del punto minimo (maximo) encontrado\n p_y0 : numeric : componente en y del punto minimo (maximo) encontrado\n\n Debugging\n ---------\n param_fun = 'x**2 - 24*y + y**2 -10*y'\n param_x = -2\n param_y = 0\n param_e = 10e-3\n param_p = 4\n \"\"\"\n\n # Establecer que es una expresion con variable simbolica\n param_fun = sp.S(param_fun)\n # diferencial de la funcion respecto a x\n f_x = param_fun.diff(x)\n # diferencial de la funcion respecto a y\n f_y = param_fun.diff(y)\n # factor de \"incremento\"\n theta = .1\n # iteraciones\n iteraciones = 0\n\n while True:\n # evaluacion de expresion de gradiente descendente\n\n temp_x = theta*N(f_x.subs(x, param_x).subs(y, param_y)).evalf()\n temp_y = theta*N(f_y.subs(x, param_x).subs(y, param_y)).evalf()\n\n # actualizar contador de iteraciones\n iteraciones += 1\n print(iteraciones)\n\n if abs(temp_x - param_x) < param_e and abs(temp_y - param_y) < param_e:\n break\n\n if iteraciones > 100:\n print(\"Algo paso que son muchas iteraciones sin llegar al resultado\")\n break\n\n param_x = temp_x\n param_y = temp_y\n\n print(\"f(x,y) = \" + str(param_fun) + \"converge\")\n print(\"el número de interaciones fueron: \", iteraciones, sep=\" \")\n print('el error es: ' + str(abs(temp_x - param_x)))\n\n\n# Declarar x, y, z como variables simbolicas\nx, y, z = symbols('x y z')\n\n# Funcion 1\nf_1 = 'x**4 - 3*x**3 + 2'\n# Establecer que es una expresion con variable simbolica\nf_n = sp.S(f_1)\n# Graficar la funcion para explorar dominio\nplot(f_1, (x, -2, +4))\n# Evaluar funcion de gradiente descendente\nf_grad(param_fun=0, param_x=0, param_y=0, param_e=0, param_p=0)\n\n# -- Notas de ejercicio 1\n# grafique de -10 a 10\n# la funcion crece indefinidamente en ambos sentidos, hacia x++ y x--\n# grafique, puse x0=-2, con theta = 0.5, resultado fue que mando a x=32\n\n# Funcion 2\nf_2 = 'x**2 - 24*y + y**2 -10*y'\n# Establecer que es una expresion con variable simbolica\nf_2 = sp.S(f_2)\n# Graficar la funcion para explorar dominio\nplot3d(f_2, (x, -4, +4), (y, -4, +4))\n# Evaluar funcion de gradiente descendente\nf_grad(param_fun=f_2, param_x=1, param_y=-3, param_e=10e-3, param_p=4)\n\n# -- Notas de ejercicio 2\n# grafique de x=-10 a x=10, y=-10 a y=10 y note que la funcion es bastante \"simple\"\n# reduje a -4 todos los valores\n# deje a x=1 y y =-3 y fue bastante rapida la convergencia\n\n# Funcion 3\nf_3 = 'sin((1/2)*x**2 - (1/4)*y**2 + 3)*cos(2*x + 1 - exp(y))'\n# Establecer que es una expresion con variable simbolica\nf_3 = sp.S(f_3)\n# Graficar la funcion para explorar dominio\nplot3d(f_3, (x, -4, +4), (y, -4, +4))\n# Evaluar funcion de gradiente descendente\nf_grad(param_fun=f_3, param_x=-1, param_y=-4, param_e=10e-3, param_p=4)\n\n# -- Notas de ejercicio 3\n# grafique de x=-4 a x=4, y=-4 a y=4 y note que la funcion es bastante \"complicada\"\n# deje valores de x=-1 a y=-4, que elegi visualmente como una \"cima\" o valor maximo\n# y la funcion convergio bastante rapido a un minimo local\n","sub_path":"Tarea_4_Gradiente_Descendente/IDI_II_Tarea4_JFME.py","file_name":"IDI_II_Tarea4_JFME.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"147453149","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nhf = 8.75*10**-20\nkB = 1.381*10**-23\neps = 1.218*10**-21\nTmin = 20\nTmax = 2000\nnsteps = 198000\n\n\nT = np.linspace(Tmin,Tmax,nsteps)\n\nj = np.arange(1,88,2)\n\n\nx = kB*T/eps\nZ_para = np.ones(len(x))\nU_para = np.zeros(nsteps)\n\ndx = x[100]-x[99]\n\nfor k in range(0,nsteps):\n Z_para[k] = np.sum((2*j+1)*np.exp(-j*(j+1)/x[k]))\n U_para[k] = np.sum(j*(j+1)*(2*j+1)*np.exp(-j*(j+1)/x[k])/Z_para[k])\n\nCv_para = np.diff(U_para)/dx\n\nj = np.arange(0,88,2)\nZ_orth = np.ones(len(x))\nU_orth = np.zeros(nsteps)\n\nfor k in range(0,nsteps):\n Z_orth[k] = np.sum((2*j+1)*np.exp(-j*(j+1)/x[k]))\n U_orth[k] = np.sum(j*(j+1)*(2*j+1)*np.exp(-j*(j+1)/x[k])/Z_orth[k])\n\nCv_orth = np.diff(U_orth)/dx\n\n\nCv_rot = 3*Cv_orth/4+Cv_para/4\n\nCv_tr = 3/2\n\nCv_vib = (hf/(kB*T))**2/(2*np.sinh(hf/(kB*T))-2)\n\n\nplt.figure()\nplt.plot(x,Z_para,'r')\nplt.plot(x,Z_orth,'b')\nplt.xlabel('kT/eps')\nplt.ylabel('Partition function')\nplt.show()\n\nplt.figure()\nplt.plot(x,U_para)\nplt.plot(x,U_orth)\nplt.xlabel('kT/eps')\nplt.ylabel('U')\nplt.show()\n\nx = np.delete(x,0)\nT = np.delete(T,0)\n\nplt.figure()\nplt.plot(T,Cv_para)\nplt.plot(T,Cv_orth)\nplt.xlabel('kT/eps')\nplt.ylabel('Cv/Nk')\nplt.show()\n\nCv_vib = np.delete(Cv_vib,0)\n\nplt.figure()\nplt.plot(T,Cv_vib+Cv_tr+Cv_rot)\nplt.ylim(0,3)\nplt.xscale('log')\nplt.xlabel('T (K)')\nplt.ylabel('Cv/Nk')\nplt.show()\n","sub_path":"opdracht.py","file_name":"opdracht.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"250626447","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom functions.handyWrappers import HandyWrappers\nfrom datetime import datetime\nimport time\nimport random\nfrom functions.explicit_wait import ExplicitWaitType\n\n\nclass TestCase1():\n def testSuite(self):\n driver = webdriver.Chrome(executable_path=\"C:\\Selenium\\chromedriver.exe\")\n URL = \"https://www.expedia.com/\"\n driver.maximize_window()\n driver.implicitly_wait(10)\n driver.get(URL)\n wait = ExplicitWaitType(driver)\n hw = HandyWrappers(driver)\n ## generic method to select dates from calendar\n\n destinationForScreenshots = \"C:\\\\Users\\S4etovodov\\Desktop\\Selenium_screenshots\\\\\"\n\n flightsBtn = hw.getElement(\"tab-flight-tab-hp\")\n flightsBtn.click()\n\n advancedOptions = hw.getElement(\"flight-advanced-options-hp-flight\")\n advancedOptions.click()\n\n time.sleep(2)\n\n nonstopCheckBox = hw.getElement(\"advanced-flight-nonstop-hp-flight\")\n nonstopCheckBox.click()\n\n time.sleep(2)\n\n preferredAirlineDrDwn = hw.getElement(\"flight-advanced-preferred-airline-hp-flight\")\n sel = Select(preferredAirlineDrDwn)\n sel.select_by_value(\"SU\")\n\n time.sleep(2)\n\n classDrpDwn = hw.getElement(\"flight-advanced-preferred-class-hp-flight\")\n sel1 = Select(classDrpDwn)\n sel1.select_by_value(\"business\")\n\n OriginField = hw.getElement(\"flight-origin-hp-flight\")\n OriginField.send_keys(\"new\")\n time.sleep(2)\n OriginItem = hw.getElement(\"// ul[ @ id = 'typeaheadDataPlain'] // li/a[contains(@data-value,'New York (NYC-All Airports)')]\", \"xpath\")\n OriginItemText = OriginItem.text\n print(OriginItemText)\n OriginItem.click()\n time.sleep(2)\n\n flyingToField = hw.getElement(\"flight-destination-hp-flight\")\n flyingToField.send_keys(\"mos\")\n time.sleep(2)\n flyingToFieldItem = hw.getElement(\"//ul[@id='typeaheadDataPlain']//li/a[contains(@data-value, 'Moscow, Russia (MOW-All Airports)')]\",\"xpath\")\n flyingToFieldItem.click()\n\n time.sleep(2)\n\n oneWayBtn = hw.getElement(\"flight-type-one-way-label-hp-flight\")\n oneWayBtn.click()\n\n time.sleep(2)\n departingField = hw.getElement(\"//input[@id='flight-departing-single-hp-flight']\", \"xpath\")\n departingField.click()\n\n\n monthsSel = {\n \"january\": '0',\n \"february\": '1',\n \"september\": '8'\n }\n\n\n depDateMonth = \"september\"\n depDateDay = \"18\"\n depDateYear = \"2018\"\n\n print(monthsSel[depDateMonth])\n\n depDXpath = \"//div[@id='flight-departing-wrapper-single-hp-flight']//button[contains(@data-year, '{0}') and contains(@data-month, '{1}') and contains(@data-day, '{2}')]\"\n depDateXpath = depDXpath.format(depDateYear, monthsSel[depDateMonth], depDateDay)\n print(depDateXpath)\n departureDate = hw.getElement(depDateXpath, \"xpath\")\n departureDate.click()\n #//div[ @ id = 'flight-departing-wrapper-single-hp-flight'] // button[contains( @ data - year, '2018') and contains( @ data - month, '8') and contains( @ data - day, '18')]\n #driver.find_element(By.XPATH, \"//div[@id='flight-departing-wrapper-single-hp-flight']//button[contains (@data-year, '2018') and contains (@data-month, '8') and contains (@data-day, '18')]\").click()\n #el = wait.waitForElement(\"//div[@id='flight-departing-wrapper-single-hp-flight']//button[contains(@data-year,'2018') and contains( @data-month, '8') and contains(@data-day, '18')]\", \"xpath\", 25).click()\n\n hw.takeScrShot(destinationForScreenshots)\n\n\n element = wait.waitForElement(\"//form[@id='gcw-flights-form-hp-flight']/div[8]/label/button\", \"xpath\", 10)\n element.click()\n time.sleep(2)\n\n hw.takeScrShot(destinationForScreenshots)\n\n driver.quit()\n\nTestExecution1 = TestCase1()\nTestExecution1.testSuite()\n\n","sub_path":"Testing3(expedia.com).py","file_name":"Testing3(expedia.com).py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"265506228","text":"# Q 11 Assignment\nimport math\n\nC = 50\nH = 30\nd = input(\"enter comma separated decimal input\")\nlist1 = d.split(',')\nlength = len(list1)\nprint(d)\nprint(list1)\nprint(length)\n\nfor i in list1:\n print(i)\n D = int(i)\n Q = math.sqrt((2 * C * D)/H)\n \n print(\"Q = \", int(Q))\n","sub_path":"pycode/original1.Question11CSVinput.py","file_name":"original1.Question11CSVinput.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"311127409","text":"# Input\n# [\"LRUCache\", \"put\", \"put\", \"get\", \"put\", \"get\", \"put\", \"get\", \"get\", \"get\"]\n# [[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]\n# Output\n# [null, null, null, 1, null, -1, null, -1, 3, 4]\n\n# Explanation\n# LRUCache lRUCache = new LRUCache(2);\n# lRUCache.put(1, 1); // cache is {1=1}\n# lRUCache.put(2, 2); // cache is {1=1, 2=2}\n# lRUCache.get(1); // return 1\n# lRUCache.put(3, 3); // LRU key was 2, evicts key 2, cache is {1=1, 3=3}\n# lRUCache.get(2); // returns -1 (not found)\n# lRUCache.put(4, 4); // LRU key was 1, evicts key 1, cache is {4=4, 3=3}\n# lRUCache.get(1); // return -1 (not found)\n# lRUCache.get(3); // return 3\n# lRUCache.get(4); // return 4\n\nclass Node:\n def __init__(self, key,val):\n self.key,self.val = key,val\n self.prev = self.next = None\n\nclass LRUCache:\n\n def __init__(self, capacity: int):\n self.cap = capacity\n self.cache = {}\n self.left, self.right = Node(0,0), Node(0,0)\n self.left.next, self.right.prev = self.right, self.left \n\n def get(self, key: int) -> int:\n if key in self.cache:\n self.remove(self.cache[key])\n self.insert(self.cache[key])\n return self.cache[key].val\n else:\n return -1\n \n def remove(self,node):\n prev, nex = node.prev, node.next\n prev.next, nex.prev = nex, prev\n \n def insert(self,node):\n prev,nex = self.right.prev, self.right\n prev.next = nex.prev = node\n node.next,node.prev = nex,prev \n \n def put(self, key: int, value: int) -> None:\n if key in self.cache:\n self.remove(self.cache[key])\n self.cache[key] = Node(key,value)\n self.insert(self.cache[key])\n \n if len(self.cache) > self.cap:\n lru = self.left.next\n self.remove(lru)\n del self.cache[lru.key]\n \n \n\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)","sub_path":"team_work/Vincent/linkList/medium/LRU Cache/Q146.py","file_name":"Q146.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"298232270","text":"__author__ = 'Joe Linn'\n\nimport pylastica.aggregation.abstractaggregation as abstract\nimport pylastica.filter.abstractfilter as abstractfilter\n\n\nclass Filter(abstract.AbstractAggregation):\n def set_filter(self, filter):\n \"\"\"\n Set the filter for this aggregation\n @param filter: the filter to use for this aggregation\n @type filter: pylastica.filter.abstractfilter.AbstractFilter\n @return:\n @rtype: Filter\n \"\"\"\n if not isinstance(filter, abstractfilter.AbstractFilter):\n raise TypeError(\"filter must be an instance of an implementation of AbstractFilter: %r\" % filter)\n return self.set_param(\"filter\", filter.to_dict())\n\n def to_dict(self):\n \"\"\"\n\n @return:\n @rtype: dict\n \"\"\"\n return {\n \"filter\": self.get_param(\"filter\"),\n \"aggs\": self._aggs\n }","sub_path":"pylastica/aggregation/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"49308241","text":"import random\n#Decide what type of game you will be playing. For now there is just Gladiator\nwhile True:\n try:\n GameType = input(\"What type of game do you want to play? For now there is only Gladiator.\\n>\").lower().split()\n except ValueError:\n print(\"Sorry, I didn't understand that!\")\n continue\n if GameType[0] == \"gladiator\":\n from Gladiator import GladiatorMain\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"337571226","text":"#! /usr/bin/env python\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nfrom tensorflow.contrib import learn\n\n\ndef batch_data(layer1, layer2, batch_size=64):\n data_size = len(layer1)\n num_batches_per_epoch = int((data_size-1)/batch_size) + 1\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n \n batch_layer1_data = layer1[start_index:end_index]\n batch_layer2_data = layer2[start_index:end_index]\n \n yield zip(batch_layer1_data, batch_layer2_data)\n \n \ndef predict(layer1, layer2):\n \n # Map data into vocabulary\n vocab_path = os.path.join(os.path.curdir, \"mlmodels\", \"vocab\")\n vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\n x_layer1 = np.array(list(vocab_processor.transform(layer1)))\n x_layer2 = np.array(list(vocab_processor.transform(layer2)))\n \n # 3D arrays\n x_test = np.dstack([x_layer1, x_layer2])\n x_test = x_test.reshape((x_test.shape[0], x_test.shape[2], x_test.shape[1]))\n \n # Read in the model form the latest checkpoint. There are a lot of \n # components to load in separately. \n checkpoint_path = os.path.join(os.path.curdir, \"mlmodels\", \"checkpoints\")\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_path)\n graph = tf.Graph()\n with graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n \n # Get the placeholders from the graph by name\n input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n \n # If dropout was used to regularize, get that information.\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n \n # Tensors we want to evaluate\n predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\n \n all_predictions = sess.run(predictions, {input_x: x_test, dropout_keep_prob: 1.0})\n \n return all_predictions\n","sub_path":"cnn_text_classification_tf/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"359343954","text":"#!/usr/bin/python\n\nimport fnmatch\nimport os\n\ndef createNewFile(filename_prog):\n\tnewFile = open(filename_prog[0:-4] + \"py\", \"w\");\n\toriginalFile = open(filename_prog, \"r\")\n\tnewText = \"\"\n\n\tfor line in originalFile:\n\t\tif \"import\" in line:\n\t\t\tnewText += line\n\t\telif line[0:6] == \"CIKLUS\":\n\t\t\tnewText += swapLoop(line)\n\t\telif line[0:8] == \"ELAGAZAS\":\n\t\t\tnewText += swap(line, \"ELAGAZAS\")\n\t\telse:\n\t\t\tnewText += swapSequence(line)\n\n\tnewFile.write(newText[:-1])\n\ndef swap(line, swapstr, inner = False):\n\tsplittedLine = line.split(\"[\");\n\n\tif swapstr == \"CIKLUS\":\n\t\treturnLine = splittedLine[0].rstrip().replace(swapstr, \"for\") + \" :\"\n\telse:\n\t\treturnLine = splittedLine[0].rstrip().replace(swapstr, \"if\") + \" :\"\n\n\trows = swapSequence(splittedLine[2].rstrip()[0:-2]).split(\"\\n\")\n\n\tfor i in rows:\n\t\tif(inner):\n\t\t\treturnLine += \"\\n \" + i\n\t\telse:\n\t\t\treturnLine += \"\\n \" + i\n\n\treturn returnLine[:-4]\n\ndef swapLoop(line):\n\treturnLine = \"\"\n\n\tsplittedLine = line.split(\";\");\n\n\tinnerSplit = splittedLine[0].split(\"[\")\n\treturnLine = innerSplit[0].rstrip().replace(\"CIKLUS\", \"for\") + \" :\"\n\t\n\tif innerSplit[2].startswith(\"CIKLUS\"):\n\t\treturnLine += \"\\n \" + swap(innerSplit[2], \"CIKLUS\").rstrip()\n\telse:\n\t\treturnLine += \"\\n \" + innerSplit[2].rstrip()\n\n\tfor i in range(1, len(splittedLine)):\n\t\tif splittedLine[i].startswith(\"CIKLUS\"):\n\t\t\tif i == len(splittedLine) - 1:\n\t\t\t\treturnLine += \"\\n \" + swap(splittedLine[i].rstrip()[:-2], \"CIKLUS\", True).rstrip()\n\t\t\telse:\n\t\t\t\treturnLine += \"\\n \" + swap(splittedLine[i], \"CIKLUS\", True).rstrip()\n\t\telse:\n\t\t\tif i == len(splittedLine) - 1:\n\t\t\t\treturnLine += \"\\n \" + splittedLine[i].rstrip()[:-2]\n\t\t\telse:\n\t\t\t\treturnLine += \"\\n \" + splittedLine[i]\n\n\treturnLine += \"\\n\"\n\treturn returnLine\n\ndef swapSequence(line):\n\tsplittedLine = line.split(\";\")\n\treturnLine = \"\"\n\n\tfor i in splittedLine:\n\t\treturnLine += i.rstrip() + \"\\n\"\n\n\treturn returnLine\n\nfor file in os.listdir('.'):\n if fnmatch.fnmatch(file, '*.prog'):\n createNewFile(file)","sub_path":"Part_II./main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"349862933","text":"import collections\n\nn, m = map(int, input().split())\nw = map(int, input().split())\nmemo = [set() for _ in range(m)]\np = [[] for _ in range(m + 1)] \nfor i in range(i, m + 1):\n x, y = map(int, input().split())\n memo[x-1].add((i, y - 1))\n memo[y-1].add((i, x - 1))\n w[x-1] -= 1\n w[y-1] -= 1\n\nque = collections.deque()\nfor i in range(n):\n if w[i] >= 0:\n que.append(i)\n\nres = collections.deque()\nexist = [False] * (m + 1)\nwhile que and len(res) < m:\n x = que.popleft()\n for i, o in memo[x]:\n if i not in exist:\n res.append(i)\n exist.add(i)\n memo[o] += 1\n if memo[o] == 0:\n que.append(o)\nif len(res) != m:\n print('DEAD')\nelse:\n print('ALIVE')\n for i in reversed(res):\n print(i, end=' ') \n print()\n\n\n\n\n\n","sub_path":"Codeforces Round #652 (Div. 2)/.history/E_DeadLee_20200703105634.py","file_name":"E_DeadLee_20200703105634.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"304125719","text":"def possible_plan(n,r,w):\n global cnt\n global visited\n\n if n == r:\n cnt += 1\n else:\n for i in range(N):\n if visited[i] == 0 and w + kits[i] - K >= 500:\n visited[i] = 1\n possible_plan(n,r+1, w+kits[i] - K)\n visited[i] = 0\n\nN, K = map(int, input().split())\nkits = list(map(int, input().split()))\nvisited = [0 for _ in range(N)]\ncnt = 0\n\npossible_plan(N,0,500)\nprint(cnt)","sub_path":"baekjun/근손실.py","file_name":"근손실.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"418939870","text":"from sklearn.naive_bayes import GaussianNB\nfrom mining.preprocessing import *\n\nclass Classify:\n #Data should be acquired from file_path[0] in main_content begin(push button) event handle\n def __init__(self, data):\n\n #training data\n self.training = Preprocessing_Train()\n\n self.training_data = self.training.display_Training()\n self.training_class = self.training.display_Class()\n\n #Data to predict\n self.data = data\n self.new_data = Preprocessing_Data(self.data)\n self.new_data_display = self.new_data.display()\n\n #classification training and prediction\n self.classifier = GaussianNB()\n self.classifier.fit(self.training_data, self.training_class)\n self.predict = self.classifier.predict(self.new_data_display)\n #print('training_data: \\n', self.training_data.shape)\n #print('training_class: \\n', self.training_class.shape)\n #print('new_data_display \\n', self.new_data_display.shape)\n\n def raw_display(self):\n return self.predict\n\n def display_table(self):\n self.table_data = DataFrame(read_excel(self.data, header = 0))\n\n #display NOMBRE | APELLIDO | CORREO\n self.table_data_column_removed = self.table_data.drop(self.table_data.columns[[0, 1, 2, 3, 4, 5, 8, 9, 10, 12, 13, 14, 15, 16]], axis = 1)\n #self.table_data_row_removed = self.table_data_column_removed.drop(self.table_data_column_removed.index[[0]])\n #self.table_data_removed = self.table_data_row_removed\n self.table_data_removed = self.table_data_column_removed\n\n #add column name to predicted data\n self.predicted_data = DataFrame(self.predict)\n self.predicted_data.columns = [\"CLASS\"]\n\n #connected the predicted results with the data we want to display\n\n self.output_data = concat([self.table_data_removed, self.predicted_data], axis = 1)\n return array(self.output_data)\n\n\n def full_display(self):\n self.full_data = DataFrame(read_excel(self.data, header = 0))\n self.predicted_data = DataFrame(self.predict)\n self.predicted_data.columns = [\"CLASS\"]\n self.output_data = concat([self.full_data, self.predicted_data], axis=1)\n return self.output_data\n\n\n\n#cls = Classify('/Users/rtassara2006/Dropbox/QUINTO2016/TALLER DE INGENIERIA DE SOFTWARE/TEST.xlsx')\n#print('raw data: \\n', cls.raw_display())\n#print('display table \\n', cls.display_table())\n#print(cls.full_display())","sub_path":"mining/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"263745680","text":"\"\"\"\nModule which acts as a analytic Jacobian calculator\n\"\"\"\nfrom fitbenchmarking.jacobian.base_jacobian import Jacobian\nfrom fitbenchmarking.utils.exceptions import NoJacobianError\n\nfrom numpy import matmul\n\n\n# pylint: disable=useless-super-delegation\nclass Analytic(Jacobian):\n \"\"\"\n Class to apply an analytical Jacobian\n \"\"\"\n\n def __init__(self, cost_func):\n super(Analytic, self).__init__(cost_func)\n if not callable(self.problem.jacobian):\n raise NoJacobianError(\"Problem set selected does not currently \"\n \"support analytic Jacobians\")\n\n def eval(self, params, **kwargs):\n \"\"\"\n Evaluates Jacobian of problem.eval_model\n\n :param params: The parameter values to find the Jacobian at\n :type params: list\n\n :return: Approximation of the Jacobian\n :rtype: numpy array\n \"\"\"\n x = kwargs.get(\"x\", self.problem.data_x)\n e = kwargs.get(\"e\", self.problem.data_e)\n jac = self.problem.jacobian(x, params)\n if self.problem.options.cost_func_type == \"weighted_nlls\":\n # scales each column of the Jacobian by the weights\n jac = jac / e[:, None]\n elif self.problem.options.cost_func_type == \"root_nlls\":\n # calculates the Jacobian of the root NLLS cost function\n jac = jac * self.problem.eval_model(params, x=x)[:, None] / 2\n return jac\n\n def eval_cost(self, params, **kwargs):\n \"\"\"\n Evaluates derivative of the cost function\n\n :param params: The parameter values to find the Jacobian at\n :type params: list\n\n :return: Computed derivative of the cost function\n :rtype: numpy array\n \"\"\"\n rx = self.cached_func_values(self.cost_func.cache_rx,\n self.cost_func.eval_r,\n params,\n **kwargs)\n J = self.eval(params, **kwargs)\n out = 2.0 * matmul(J.T, rx)\n return out\n","sub_path":"fitbenchmarking/jacobian/analytic_jacobian.py","file_name":"analytic_jacobian.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"576911107","text":"\"\"\"\n拆分-Nim游戏\n给定 n 堆石子,两位玩家轮流操作,每次操作可以取走其中的一堆石子,然后放入两堆规模更小的石子\n(新堆规模可以为 0,且两个新堆的石子总数可以大于取走的那堆石子数),最后无法进行操作的人视为失败。\n\n问如果两人都采用最优策略,先手是否必胜。\n\n输入格式\n第一行包含整数 n。\n\n第二行包含 n 个整数,其中第 i 个整数表示第 i 堆石子的数量 ai。\n\n输出格式\n如果先手方必胜,则输出 Yes。\n\n否则,输出 No。\n\n数据范围\n1 ≤ n, ai ≤ 100\n输入样例:\n2\n2 3\n输出样例:\nYes\n================================================================\n相比于集合-Nim,这里的每一堆可以变成不大于原来那堆的任意大小的两堆\n即 a[i]可以拆分成 (b[i], b[j])\n为了避免重复规定 b[i] >= b[j],即:a[i] >= b[i] >= b[j],\n相当于一个局面拆分成了两个局面,由 SG函数理论,多个独立局面的 SG值,等于这些局面 SG值的异或和。\n因此需要存储的状态就是 sg(b[i]) ^ sg(b[j])(与集合-Nim的唯一区别)\n\"\"\"\nN, M = 100 + 10, 10000 + 10\n# F存储的是所有可能出现过的情况的 SG值, 初始化 F均为 -1,方便查看 SG(x)是否被记录过\nF = [-1] * M\n\n\ndef SG(x):\n # 因为取石子数目的集合 S是已经确定了的,\n # 所以在递归条件下,每个数的 SG值也都是确定的, 如果F[x]已经存储过了, 直接返回即可\n if F[x] != -1:\n return F[x]\n S = set() # S存储的是可供选择的集合\n\n for i in range(x):\n j = 0\n while j <= i: # 规定j不大于i,避免重复\n # 相当于一个局面拆分成了两个局面,由SG函数理论,多个独立局面的SG值,等于这些局面SG值的异或和\n S.add(SG(i) ^ SG(j))\n j += 1\n\n i = 0\n while True: # 循环完之后, 可以选出没有出现的最小自然数\n if not S.__contains__(i):\n F[x] = i # 对F[x]��值\n return i\n i += 1\n\n\nif __name__ == '__main__':\n n = int(input())\n A = list(map(int, input().split()))\n\n res = 0\n for i in range(n):\n res ^= SG(A[i]) # 计算所有堆的异或值,基本原理与Nim游戏相同\n\n print('Yes' if res >= 1 else 'No') # res != 0\n","sub_path":"2021/Algorithm/Python/Base/4_math_knowledge/894.py","file_name":"894.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"386571076","text":"from qcommunity.optimization.obj import get_obj_val, get_obj\nfrom scipy.optimize import minimize\nimport numpy as np\nimport nlopt\n\n\ndef optimize_obj(obj_val, num_parameters, params=None):\n options = {}\n try:\n init_points = params['sample_points'][0]\n except (KeyError, TypeError):\n init_points = np.random.uniform(-np.pi, np.pi, num_parameters)\n try:\n options['maxiter'] = params['n_iter'] + params['init_points']\n except (KeyError, TypeError):\n options['maxiter'] = 100\n\n def objective(x, grad):\n f = obj_val(x)\n return f\n\n nlopt.srand(params['seed'])\n opt = nlopt.opt(nlopt.LN_PRAXIS, num_parameters)\n opt.set_min_objective(objective)\n opt.set_maxeval(options['maxiter'])\n \n if params['ansatz'] == 'QAOA':\n lb = np.array([0, 0] * params['ansatz_depth'])\n ub = np.array([np.pi, 2*np.pi] * params['ansatz_depth'])\n elif params['ansatz'] == 'RYRZ':\n lb = np.array([-np.pi] * num_parameters)\n ub = np.array([np.pi] * num_parameters)\n\n #dist_to_bound = min(min(ub-init_points),min(init_points-lb))\n #opt.set_initial_step(dist_to_bound)\n opt.set_ftol_rel(params['ftol_rel']) \n opt.set_xtol_rel(params['xtol_rel'])\n\n opt.set_lower_bounds(lb)\n opt.set_upper_bounds(ub)\n x = opt.optimize(init_points)\n return x\n","sub_path":"qcommunity/optimization/praxis_nlopt.py","file_name":"praxis_nlopt.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"529538465","text":"#!/usr/bin/python\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport uuid\nimport logging\nimport os\nfrom locust import HttpLocust, TaskSet\nfrom locust.events import request_failure\nfrom binascii import hexlify\n\nprefixes = [\n 'goo',\n 'mic',\n 'app',\n 'fgssg',\n 'int',\n 'mo',\n 'ewer',\n 'am',\n 'ma',\n 'ip',\n 'sdf']\n\nUSER_AGENTS = [\n \"Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19 (LocustIO)\",\n \"Android 4.0.3;AppleWebKit/534.30;Build/IML74K;GT-I9220 Build/IML74K (LocustIO)\",\n \"KWC-S4000/ UP.Browser/7.2.6.1.794 (GUI) MMP/2.0 (LocustIO)\",\n \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html) (LocustIO)\",\n \"Googlebot-Image/1.0 (LocustIO)\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0 (LocustIO)\",\n \"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52 (LocustIO)\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36\",\n]\n\n\nclass B3Header:\n def generateHeader(self, identifier_length):\n bit_length = identifier_length * 4\n byte_length = int(bit_length / 8)\n identifier = os.urandom(byte_length)\n return hexlify(identifier).decode('ascii')\n\n\nb3 = B3Header()\n\n\ndef on_failure(request_type, name, response_time, exception, **kwargs):\n logging.error(exception.request)\n logging.error(exception.response)\n\n\nrequest_failure += on_failure\n\n\ndef findProduct(l):\n h = {\n \"User-Agent\": random.choice(USER_AGENTS),\n \"x-client-trace-id\": str(uuid.uuid4()),\n \"x-b3-sampled\": \"1\",\n \"x-b3-flags\": \"1\",\t\n \"x-b3-traceid\": b3.generateHeader(32),\n \"x-b3-spanid\": b3.generateHeader(16)\n }\n logging.info(h)\n l.client.get(\"/api/fetchProducts?name=\" +\n random.choice(prefixes), headers=h)\n\n\nclass UserBehavior(TaskSet):\n tasks = {findProduct: 1}\n\n\nclass WebsiteUser(HttpLocust):\n\n task_set = UserBehavior\n min_wait = 1000\n max_wait = 10000\n","sub_path":"pre-grpc/src/loadgenerator/locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"3516507","text":"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Static definitions, such as constants.\"\"\"\n\nimport cProfile\nimport functools\nimport logging\nimport math\nimport typing\nimport injector\nimport numpy\nfrom simulation.configuration import Configuration\n\nT = typing.TypeVar('T')\n\nDAYS = {\n 'Sunday': 0,\n 'Monday': 1,\n 'Tuesday': 2,\n 'Wednesday': 3,\n 'Thursday': 4,\n 'Friday': 5,\n 'Saturday': 6,\n}\n\n# All this functions convert to seconds.\nHOUR = lambda x: x * 3600.0\nDAY = lambda x: x * HOUR(24)\nWEEK = lambda x: x * DAY(7)\n\n# And these to bytes.\nKB = lambda x: x << 10\nMB = lambda x: x << 20\n\n\ndef config_logging(config: Configuration) -> None:\n \"\"\"Sets logging basic config\"\"\"\n logging.basicConfig(\n format='%(asctime)s %(levelname)s(%(name)s): %(message)s',\n datefmt='%d/%m/%Y %H:%M:%S',\n level=logging.DEBUG if config.get_arg('debug') else logging.INFO)\n logging.captureWarnings(True)\n\n\n# pylint: disable=invalid-name,too-few-public-methods\nclass profile:\n \"\"\"Decorator to run a function and generate a trace.\"\"\"\n\n @injector.inject\n def __init__(self, config: Configuration):\n super(profile, self).__init__()\n self.__config = config\n\n def __call__(self, func: typing.Callable[..., T]) -> T:\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"Wraps the function generating a trace.\"\"\"\n if self.__config.get_arg('trace'):\n profiler = cProfile.Profile()\n profiler.enable()\n\n ret = func(*args, **kwargs)\n\n if self.__config.get_arg('trace'):\n profiler.create_stats()\n profiler.dump_stats('trace')\n\n return ret\n\n return wrapper\n\n\ndef timestamp_to_day(timestamp: int) -> typing.Tuple[int, int]:\n \"\"\"Converts from a simulation timestamp to the pair (day, hour).\"\"\"\n day = int((timestamp % WEEK(1)) // DAY(1))\n hour = int((timestamp % DAY(1)) // HOUR(1))\n return day, hour\n\n\n# pylint: disable=invalid-name,no-member\ndef weight(x: float, ip: float, fp: float) -> float:\n \"\"\"Linear increment between ip and fp function.\"\"\"\n return numpy.maximum(0.0, numpy.minimum(1.0, (ip - x) / (ip - fp)))\n\n\n# pylint: disable=invalid-name\ndef weighted_user_satisfaction(\n t: float, timeout: float, threshold: float) -> float:\n \"\"\"Calculates the weighted satisfaction with a sigmoid.\"\"\"\n return numpy.where(t < timeout, 1.0, weight(t - timeout, 60, threshold))\n\n\ndef user_satisfaction(t: float, timeout: float) -> float:\n \"\"\"Calculates plain old user satisfaction.\"\"\"\n return numpy.where(t < timeout, 1.0, 0.0)\n\n\ndef generate_servers(size: int) -> typing.List[str]:\n \"\"\"Generates a list of servers randomly generated.\"\"\"\n fill = math.ceil(math.log(size, 10))\n return ['workstation' + str(i).zfill(fill) for i in range(size)]\n","sub_path":"simulation/static.py","file_name":"static.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"462833344","text":"#将原始图片转换成需要的大小,并将其保存 \n#制作threcords然乎从tfrecords中读取数据,reshape成64x64并保存\n#======================================================================================== \nimport os \nimport numpy as np \nimport tensorflow as tf \nfrom PIL import ImageFile\nfrom PIL import Image\nImageFile.LOAD_TRUNCATED_IMAGES = True\n \n#原始图片的存储位置 \npath=os.path.abspath('.')\norig_picture =path + '/pic'#图片在pic文件夹下的 mao和gou 的文件夹下 \n#orig_picture = 'C:/Users/zhuan/Desktop/tf/pic' \n \n#生成图片的存储位置 \ngen_picture =path+ '/pic/test_data' #会在inputdata下生产64x64的数据\n \n#需要的识别类型 \nclasses = {'ok','liefeng','qinshituoluo','wailu','wusun'} #这是一个dict只不过他的value为零\n \n#样本总数 \n#num_samples = 74 \n\nnum_samples = 5702\n \n#制作TFRecords数据 \ndef create_record(): \n writer = tf.python_io.TFRecordWriter(\"train.tfrecords\") \n for index, name in enumerate(classes): #enumerate既可以遍历又可以索引,index是索引号,name是字典里的key值 \n class_path = orig_picture +\"/\"+ name+\"/\" \n i = 0\n for img_name in os.listdir(class_path): \n img_path = class_path + img_name \n img = Image.open(img_path) \n img = img.resize((64, 64)) \n #设置需要转换的图片大小\n i = i+1\n print(\"成功转化\",i,\"张图片\")\n# img = img.tf.image.resize_images() #设置需要转换的图片大小 \n img_raw = img.tobytes() #将图片转化为原生bytes \n# print (index,img_raw) \n example = tf.train.Example( \n features=tf.train.Features(feature={ \n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])), \n 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])) \n })) \n\t\t\t # 写 TFRecord\n writer.write(example.SerializeToString()) \n writer.close() \n# \n#======================================================================================= \ndef read_and_decode(filename): \n # 创建文件队列,不限读取的数量 \n filename_queue = tf.train.string_input_producer([filename]) \n # create a reader from file queue \n reader = tf.TFRecordReader() \n # reader从文件队列中读入一个序列化的样本 \n _, serialized_example = reader.read(filename_queue) \n # get feature from serialized example \n # 解析符号化的样本 \n features = tf.parse_single_example( \n serialized_example, \n features={ \n 'label': tf.FixedLenFeature([], tf.int64), \n 'img_raw': tf.FixedLenFeature([], tf.string) \n }) \n label = features['label'] \n img = features['img_raw'] \n img = tf.decode_raw(img, tf.uint8) \n img = tf.reshape(img, [64, 64, 3]) \n# img = tf.cast(img, tf.float32) * (1. / 255) - 0.5 \n label = tf.cast(label, tf.int32) \n return img, label \n \n#======================================================================================= \nif __name__ == '__main__': \n create_record() \n batch = read_and_decode('train.tfrecords') \n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) \n \n with tf.Session() as sess: #开始一个会话 \n sess.run(init_op) \n coord=tf.train.Coordinator() \n threads= tf.train.start_queue_runners(coord=coord) \n# print(sess.run(batch))\n for i in range(num_samples): \n try:\n example, lab = sess.run(batch) #在会话中取出image和label \n# print(example)\n except:\n print (\"错误异常\")\n img=Image.fromarray(example, 'RGB')#这里Image是之前提到的 \n img.save(gen_picture+'/'+str(lab)+'/'+str(i)+'samples'+str(lab)+'.jpg')\n print(\"第\",i+1,\"张图片成功输出\")\n #存下图片;注意cwd后边加上‘/’ \n# print(example, lab) \n coord.request_stop() \n coord.join(threads) \n sess.close()","sub_path":"预处理.py","file_name":"预处理.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"369547838","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom screens.stud_det import *\nfrom screens.attendence import *\nfrom screens.electives import *\nfrom screens.to_login import ToLogin\nfrom screens.marks import *\nfrom screens.placement import *\nfrom PIL import Image, ImageTk\nfrom screens.placement_det import *\n#import screens.login as log\n\n\nclass Placement:\n def __init__(self):\n\n self.root = Toplevel()\n self.root.geometry(\"2000x1024\")\n\n self.root.title(\"Menu\")\n self.c = Canvas(self.root,bg = \"gray\",height=2000,width=2024)\n # image = Image.open(\"images/epic1.png\")\n # photo = ImageTk.PhotoImage(image)\n photo = PhotoImage(file = \"images/plac.png\")\n \n # Setting the background\n self.c.create_image((0,0), image=photo, anchor=\"nw\")\n\n \n # Setting the font\n self.fnt = ('latin modern typewriter',50,'bold')\n \n # Setting the text\n\n self.c.create_text((600, 150), text=\"SELECT BRANCH\", fill=\"black\", anchor=\"nw\"\n ,font=('newcenturyschlbk',50,'bold'))\n\n\n\n\n self.back = Button(self.c,text='Back',bg='red',fg='white',activebackground='black',activeforeground='white',width=10,height=2, font=(\"Times\",20,'bold'),command=lambda:back())\n self.back.place(x=1400,y=900,width=100,height=40)\n\n\n\n def back():\n self.root.destroy() \n\n\n self.c.pack()\n self.back = Button(self.c,text='Back',bg='red',fg='white',activebackground='black',activeforeground='white',width=10,height=2, font=(\"Times\",15,'bold'),command=lambda:back())\n\n\n self.b1 = Button(self.c,text='CSE',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7, font=(\"Times\",25,'bold'),command=lambda:buttonClick('CSE'))\n self.b2 = Button(self.c,text='ISE',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7,font=(\"Times\",25,'bold'),command=lambda:buttonClick('ISE'))\n self.b3 = Button(self.c,text='ECE',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7,font=(\"Times\",25,'bold'),command=lambda:buttonClick('ECE'))\n self.b4 = Button(self.c,text='TC',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7, font=(\"Times\",25,'bold'),command=lambda:buttonClick('TC'))\n\n self.b5 = Button(self.c,text='ME',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7, font=(\"Times\",25,'bold'),command=lambda:buttonClick('ME'))\n self.b6 = Button(self.c,text='IEM',bg='yellow',fg='blue',activebackground='black',activeforeground='white',width=20,height=7, font=(\"Times\",25,'bold'),command=lambda:buttonClick('IEM'))\n\n self.b1.place(x=800,y=300,width=300,height=50)\n self.b2.place(x=800,y=380,width=300,height=50)\n self.b3.place(x=800,y=460,width=300,height=50)\n self.b4.place(x=800,y=540,width=300,height=50)\n self.b5.place(x=800,y=620,width=300,height=50)\n self.b6.place(x=800,y=700,width=300,height=50)\n \n def back():\n self.root.destroy()\n # b = log.Login()\n \n\n def buttonClick(branch):\n a = PlacementDetails(branch)\n\n self.root.mainloop()\n \na = Placement()","sub_path":"screens/placement.py","file_name":"placement.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"261643776","text":"import numpy as np\nfrom numpy import array\n\n# == Parameters == #\nβ = 1 / 1.05\nρ, mg = .7, .35\nA = np.identity(2)\nA[0, :] = ρ, mg * (1-ρ)\nC = np.zeros((2, 1))\nC[0, 0] = np.sqrt(1 - ρ**2) * mg / 10\nSg = array((1, 0)).reshape(1, 2)\nSd = array((0, 0)).reshape(1, 2)\nSb = array((0, 2.135)).reshape(1, 2)\nSs = array((0, 0)).reshape(1, 2)\n\neconomy = Economy(β=β,\n Sg=Sg,\n Sd=Sd,\n Sb=Sb,\n Ss=Ss,\n discrete=False,\n proc=(A, C))\n\nT = 50\npath = compute_paths(T, economy)\ngen_fig_1(path)\n","sub_path":"lqramsey/lqramsey_ar1.py","file_name":"lqramsey_ar1.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"380244753","text":"'''\nConstruya un filtro pasa-altos tipo Butterworth utilizando la definición en\nfrecuencia. Filtre una imagen, modificando la frecuencia de corte y compro-\nbando el efecto sobre la imagen filtrada. Verifique el efecto del filtro \nrespecto al fenómeno de Gibbs.\n'''\n#python 006-pbi.py -i ../imgs/chairs.jpg\n\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport argparse\n\n'''\n Argumentos\n'''\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--imagem\", required=True, help=\"Imagem\")\n\n\n'''\n Realiza a Transformada de Fourier e retorna a magnitude e a fase.\n''' \ndef tf_complexa(im):\n # Para armazenar resultado da transformação\n planos = [np.float32(im), np.zeros(im.shape, np.float32)]\n tf = cv.merge(planos)\n # Transformada \n tf = cv.dft(tf, cv.DFT_COMPLEX_OUTPUT)\n #calcular magnitude, planos[0] = real, planos[1] = imaginário\n planos = cv.split(tf)\n magn, fase = cv.cartToPolar(planos[0],planos[1], angleInDegrees=False)\n #escala logaritmica\n #magn = cv.log(magn + 1)\n #centralizar\n magn = np.fft.fftshift(magn, axes=None)\n return magn, fase\n\n\n'''\n Filtro Butterworth (passa baixa, para passa alta, filtro = 1 - filtro)\nfiltro = 1/1+{D(u,v)/d}^2n, D(u,v) = [(u - P/2)^2 + (v - Q/2)^2]^1/2\n'''\ndef filtro_bw(lin, col, corte, ordem):\n filtro = np.zeros((lin, col), np.float32)\n # Corte ideal entre 2% e 5% da menor dimensão da imagem\n if lin < col:\n corte *= lin\n else:\n corte *= col\n\n for x in range(lin):\n for y in range(col):\n # // = parte inteira da divisão\n d = ((x - lin//2)**2 + (y - col//2)**2)**1/2\n bw = 1.0 / (1 + (d/corte) ** ordem)\n filtro[x,y] = bw\n filtro = 1 - filtro\n return filtro\n\n\n'''\n Aplicar Filtro em domínio frequencial.\n'''\ndef aplicar_filtro(magn, filtro):\n # Filtro para o domínio da frequência\n fmg, ffs = tf_complexa(filtro)\n # Aplicar\n rmg = cv.mulSpectrums(magn, fmg, cv.DFT_ROWS)\n rmg = np.fft.ifftshift(rmg, axes=None)\n return rmg\n\n\n'''\n MAIN\n'''\ndef main():\n \n # Imagens\n args = vars(ap.parse_args())\n img = cv.imread(args[\"imagem\"], 0)\n [alt, larg] = img.shape\n corte = 0.00009\n ordem = 2\n \n # Transformada \n m, f = tf_complexa(img)\n\n # Filtro\n fb = filtro_bw(alt, larg, corte, ordem)\n mf = aplicar_filtro(m, fb) \n\n # Montando \n x, y = cv.polarToCart(mf, f, angleInDegrees=False)\n im = cv.merge([x, y])\n\n # Inversa\n inv = cv.idft(im, cv.DFT_COMPLEX_OUTPUT)\n\n # Combinar imagens para mostrar\n r = cv.magnitude(inv[:,:,0], inv[:,:,1])\n \n # Normalizar\n r = cv.normalize(r, 0, 255, cv.NORM_MINMAX)\n\n # Mostrar\n plt.subplot(1,2,1)\n plt.xticks([])\n plt.yticks([])\n plt.title(\"Original\")\n plt.imshow(img, cmap=\"gray\")\n \n plt.subplot(1,2,2)\n plt.xticks([])\n plt.yticks([])\n plt.title(\"Filtrada\")\n plt.imshow(r, cmap=\"gray\")\n\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"pratica5/009-hpbw.py","file_name":"009-hpbw.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"615311734","text":"# -*- coding: utf-8 -*-\n\nfrom sole import SoLE\nfrom Matrix import Matrix\nimport time\n\nclass Model:\n\n def __init__(self):\n # Створення порожньої системи\n self.system = None\n self.n = 0\n\n # Метод, що змінює розмірність СЛАУ і коректно зберегає старі дані\n def set_varnum(self, n):\n if self.system is None:\n A = Matrix(n, n, value=0)\n b = Matrix(n, 1, value=0)\n self.system = SoLE(A, b)\n self.n = n\n else:\n A = Matrix(n, n, value=0)\n b = Matrix(n, 1, value=0)\n N = n if n < self.n else self.n\n # Старі значення зберігаються, нові - 0\n for i in range(N):\n b.elements[i][0] = self.system.free_members.elements[i][0]\n for j in range(N):\n A.elements[i][j] = self.system.system_matrix.elements[i][j]\n self.n = n\n self.system = SoLE(A, b)\n\n # Метод, що розв'язує систему обраним методом з обраною точністю\n # Також заміряє час виконання программи\n def solve(self, method, eps):\n if method == 0:\n m = self.system.method_Jacobi\n elif method == 1:\n m =self.system.method_Gauss_Seidel\n else:\n m = self.system.method_gradient_descent\n t1 = time.time()\n res = m(eps)\n t = time.time() - t1\n return (res, t)\n\n\n\n\n","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"47065059","text":"import unittest\nimport os\nimport json\nfrom typing import List\nimport random\nimport nflapi.Client\nfrom nflapidb.RosterManagerFacade import RosterManagerFacade\nfrom nflapidb.PlayerProfileManagerFacade import PlayerProfileManagerFacade\nfrom nflapidb.EntityManager import EntityManager\nimport nflapidb.Utilities as util\n\nclass TestPlayerProfileManagerFacade(unittest.TestCase):\n\n def setUp(self):\n self.entityName = \"player_profile\"\n self.entmgr = EntityManager()\n\n def tearDown(self):\n util.runCoroutine(self.entmgr.drop(self.entityName))\n self.entmgr.dispose()\n\n def _getMockPlayerProfileManager(self, rosterData : List[dict], profileData : List[dict]):\n apiClient = MockApiClient(profileData)\n rmgr = MockRosterManagerFacade(self.entmgr, apiClient, rosterData)\n self.datamgr = PlayerProfileManagerFacade(self.entmgr, apiClient, rmgr)\n return self.datamgr\n\n def _getPlayerProfileManager(self):\n self.datamgr = PlayerProfileManagerFacade(self.entmgr)\n return self.datamgr\n\n def test_sync_initializes_collection_one_team(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), len(srcdata), \"sync returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(dbrecs, recs, \"db records differ\")\n self.assertEqual(rmgr._apiClient.getRequestedRosters(), rstdata, \"requested rosters differs\")\n\n def test_sync_initializes_collection_two_teams(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n rstdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n srcdata.extend(json.load(fp))\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), len(srcdata), \"sync returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(dbrecs, recs, \"db records differ\")\n self.assertEqual(rmgr._apiClient.getRequestedRosters(), rstdata, \"requested rosters differs\")\n\n def test_sync_only_updates_records_with_team_change(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n rstdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n srcdata.extend(json.load(fp))\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n xreqrec = []\n for rec in rstdata:\n if rec[\"profile_id\"] == 2560950:\n rec[\"team\"] = \"KC\"\n xreqrec.append(rec)\n xrec = []\n for rec in srcdata:\n if rec[\"profile_id\"] == 2560950:\n rec[\"team\"] = \"KC\"\n crec = rec.copy()\n # Since the team is changing the previous_teams attribute should be set\n crec[\"previous_teams\"] = [\"PIT\"]\n xrec.append(crec)\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(rmgr._apiClient.getRequestedRosters(), xreqrec, \"requested rosters differs\")\n self.assertEqual(len(recs), 1, \"sync returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName,\n query={\"profile_id\": 2560950},\n projection={\"_id\": False}))\n self.assertEqual(len(dbrecs), 1, \"db record counts differ\")\n self.assertEqual(dbrecs, xrec, \"db records differ\")\n\n def test_sync_updates_nothing_with_no_team_change(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n rstdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n srcdata.extend(json.load(fp))\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), 0, \"sync returned record count differs\")\n self.assertEqual(rmgr._apiClient.getRequestedRosters(), [], \"requested rosters differs\")\n\n def test_sync_updates_all_with_all(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n rstdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n rstdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n srcdata.extend(json.load(fp))\n rmgr = self._getMockPlayerProfileManager(rosterData=rstdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n def pidmap(recs : List[dict]) -> dict:\n return dict(zip([_[\"profile_id\"] for _ in recs], recs))\n rmap = pidmap(rstdata.copy())\n pmap = pidmap(srcdata.copy())\n usrcdata = []\n for pid in rmap:\n t = random.choice([\"KC\", \"PIT\"])\n rmap[pid][\"team\"] = t\n pmap[pid][\"team\"] = t\n usrcdata.append(pmap[pid])\n rmgr = self._getMockPlayerProfileManager(rosterData=list(rmap.values()), profileData=usrcdata)\n recs = util.runCoroutine(rmgr.sync(all=True))\n self.assertEqual(len(recs), len(usrcdata), \"sync returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName, projection={\"_id\": False}))\n self.assertEqual(dbrecs, usrcdata, \"db records differ\")\n\n def test_save_appends(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n kcrdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n kcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n pitdata = json.load(fp)\n rmgr = self._getMockPlayerProfileManager(rosterData=kcrdata, profileData=kcdata.copy())\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), len(kcdata), \"sync record count differs\")\n recs.extend(util.runCoroutine(rmgr.save(pitdata.copy())))\n self.assertEqual(len(recs), len(kcdata) + len(pitdata), \"save record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(len(dbrecs), len(recs), \"db record count differs\")\n self.assertEqual(dbrecs, recs, \"db records differ\")\n\n def test_save_updates_previous_team(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_kc.json\"), \"rt\") as fp:\n srcrdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"roster_pit.json\"), \"rt\") as fp:\n srcrdata.extend(json.load(fp))\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n kcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n pitdata = json.load(fp)\n srcdata = kcdata.copy()\n srcdata.extend(pitdata.copy())\n rmgr = self._getMockPlayerProfileManager(rosterData=srcrdata, profileData=srcdata)\n recs = util.runCoroutine(rmgr.sync())\n self.assertEqual(len(recs), len(srcdata), \"sync record count differs\")\n for rec in pitdata:\n if rec[\"profile_id\"] == 2560950:\n rec[\"team\"] = \"KC\"\n kcdata.append(rec)\n recs2 = util.runCoroutine(rmgr.save(kcdata.copy()))\n self.assertEqual(len(recs2), len(kcdata), \"save record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName, projection={\"_id\": False}))\n self.assertEqual(len(dbrecs), len(srcdata), \"db record count differs\")\n for rec in srcdata:\n if rec[\"profile_id\"] == 2560950:\n rec[\"team\"] = \"KC\"\n rec[\"previous_teams\"] = [\"PIT\"]\n self.assertEqual(dbrecs, srcdata, \"db records differ\")\n\n def test_delete_team(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n kcdata = json.load(fp)\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_pit.json\"), \"rt\") as fp:\n pitdata = json.load(fp)\n srcdata = kcdata.copy()\n srcdata.extend(pitdata.copy())\n rmgr = self._getPlayerProfileManager()\n recs = util.runCoroutine(rmgr.save(srcdata))\n self.assertEqual(len(recs), len(srcdata), \"save returned record count differs\")\n dcount = util.runCoroutine(rmgr.delete(teams=[\"PIT\"]))\n self.assertEqual(dcount, len(pitdata), \"delete returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(len(dbrecs), len(kcdata), \"db record count differs\")\n for rec in dbrecs:\n del rec[\"_id\"]\n self.assertEqual(dbrecs, kcdata, \"db records differ\")\n\n def test_delete_profile_id(self):\n with open(os.path.join(os.path.dirname(__file__), \"data\", \"player_profile_kc.json\"), \"rt\") as fp:\n srcdata = json.load(fp)\n rmgr = self._getPlayerProfileManager()\n recs = util.runCoroutine(rmgr.save(srcdata))\n self.assertEqual(len(recs), len(srcdata), \"save returned record count differs\")\n dcount = util.runCoroutine(rmgr.delete(profile_ids=[2562399]))\n self.assertEqual(dcount, 1, \"delete returned record count differs\")\n dbrecs = util.runCoroutine(self.entmgr.find(self.entityName))\n self.assertEqual(len(dbrecs), len(srcdata) - 1, \"db record count differs\")\n xdata = [_ for _ in srcdata if _[\"profile_id\"] != 2562399]\n self.assertEqual(dbrecs, xdata, \"db records differ\")\n\nclass MockRosterManagerFacade(RosterManagerFacade):\n def __init__(self, entityManager : EntityManager, apiClient : nflapi.Client.Client, findData : List[dict]):\n super(MockRosterManagerFacade, self).__init__(entityManager, apiClient)\n self._find_data = findData\n\n async def find(self, *args) -> List[dict]:\n return self._find_data.copy()\n\nclass MockApiClient(nflapi.Client.Client):\n def __init__(self, profileData : List[dict]):\n self._profile_data = dict(zip([_[\"profile_id\"] for _ in profileData], profileData))\n self._req_rosters = []\n\n def getPlayerProfile(self, rosters : List[str]) -> List[dict]:\n self._req_rosters = rosters\n data = []\n for pid in [_[\"profile_id\"] for _ in rosters]:\n if pid in self._profile_data:\n data.append(self._profile_data[pid])\n return data\n\n def getRequestedRosters(self) -> List[dict]:\n return self._req_rosters\n","sub_path":"tests/TestPlayerProfileManagerFacade.py","file_name":"TestPlayerProfileManagerFacade.py","file_ext":"py","file_size_in_byte":12834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"4745865","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2021 Tianmian Tech. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\nimport functools\nimport random\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom common.python.utils import log_utils\nfrom kernel.base.sparse_vector import SparseVector\nfrom kernel.components.lr.vertlr.sync import loss_sync\nfrom kernel.optimizer.activation import sigmoid\nfrom kernel.utils import base_operator\nfrom kernel.utils import consts\nfrom kernel.utils import data_util\nfrom kernel.utils.base_operator import vec_dot\nfrom kernel.utils.random_number_generator import RandomNumberGenerator\n\nLOGGER = log_utils.get_logger()\n\n\ndef __compute_partition_gradient(data, fit_intercept=True, is_sparse=False):\n \"\"\"\n Compute vert regression gradient for:\n gradient = ∑d*x, where d is fore_gradient which differ from different algorithm\n Parameters\n ----------\n data: DSource, include fore_gradient and features\n fit_intercept: bool, if model has interception or not. Default True\n\n Returns\n ----------\n numpy.ndarray\n vert regression model gradient\n \"\"\"\n # LOGGER.debug(\"enter __compute_partition_gradient\")\n feature = []\n fore_gradient = []\n\n if is_sparse:\n row_indice = []\n col_indice = []\n data_value = []\n\n row = 0\n feature_shape = None\n for key, (sparse_features, d) in data:\n fore_gradient.append(d)\n assert isinstance(sparse_features, SparseVector)\n if feature_shape is None:\n feature_shape = sparse_features.get_shape()\n for idx, v in sparse_features.get_all_data():\n col_indice.append(idx)\n row_indice.append(row)\n data_value.append(v)\n row += 1\n if feature_shape is None or feature_shape == 0:\n return 0\n sparse_matrix = sp.csr_matrix((data_value, (row_indice, col_indice)), shape=(row, feature_shape))\n fore_gradient = np.array(fore_gradient)\n\n # gradient = sparse_matrix.transpose().dot(fore_gradient).tolist()\n gradient = base_operator.dot(sparse_matrix.transpose(), fore_gradient).tolist()\n if fit_intercept:\n bias_grad = np.sum(fore_gradient)\n gradient.append(bias_grad)\n LOGGER.debug(\"In first method, gradient: {}, bias_grad: {}\".format(gradient, bias_grad))\n return np.array(gradient)\n\n else:\n for key, value in data:\n feature.append(value[0])\n fore_gradient.append(value[1])\n feature = np.array(feature)\n fore_gradient = np.array(fore_gradient)\n if feature.shape[0] <= 0:\n return 0\n\n gradient = base_operator.dot(feature.transpose(), fore_gradient)\n gradient = gradient.tolist()\n if fit_intercept:\n bias_grad = np.sum(fore_gradient)\n gradient.append(bias_grad)\n return np.array(gradient)\n\n\ndef compute_gradient(data_instances, fore_gradient, fit_intercept):\n \"\"\"\n Compute vert-regression gradient\n Parameters\n ----------\n data_instances: DSource, input data\n fore_gradient: DSource, fore_gradient\n fit_intercept: bool, if model has intercept or not\n\n Returns\n ----------\n DSource\n the vert regression model's gradient\n \"\"\"\n feat_join_grad = data_instances.join(fore_gradient,\n lambda d, g: (d.features, g))\n is_sparse = data_util.is_sparse_data(data_instances)\n f = functools.partial(__compute_partition_gradient,\n fit_intercept=fit_intercept,\n is_sparse=is_sparse)\n gradient_partition = feat_join_grad.mapPartitions(f)\n gradient_partition = gradient_partition.reduce(lambda x, y: x + y)\n\n gradient = gradient_partition / data_instances.count()\n\n return gradient\n\n\nclass VertGradientBase(object):\n def federated_compute_gradient_and_loss(self, *args):\n raise NotImplementedError(\"Should not call here\")\n\n def set_total_batch_nums(self, total_batch_nums):\n \"\"\"\t\n Use for sqn gradient.\t\n \"\"\"\n pass\n\n\nclass Promoter(VertGradientBase, loss_sync.Promoter):\n\n def __init__(self):\n self.provider_forwards = None\n self.fore_gradient = None\n self.forwards = None\n # self.aggregated_forwards = None\n\n def _register_gradient_sync(self, provider_weight_transfer, provider_forward_transfer, fore_gradient_transfer,\n provider_gradient_r_transfer, provider_en_gradient_r_transfer):\n self.provider_weight_transfer = provider_weight_transfer\n self.provider_forward_transfer = provider_forward_transfer\n self.fore_gradient_transfer = fore_gradient_transfer\n self.provider_gradient_r_transfer = provider_gradient_r_transfer\n self.provider_en_gradient_r_transfer = provider_en_gradient_r_transfer\n\n def register_gradient_procedure(self, transfer_variables):\n self._register_gradient_sync(transfer_variables.provider_weight,\n transfer_variables.provider_forward_dict,\n transfer_variables.fore_gradient,\n transfer_variables.provider_gradient_r,\n transfer_variables.provider_en_gradient_r)\n\n self._register_loss_sync(transfer_variables.provider_loss_regular)\n\n def compute_fore_gradient(self, data_instances, model_weights, offset=None):\n \"\"\"\n gradient = d.dot(x)\n Define (sigmoid(wx+b) - y) as fore_gradient\n\n \"\"\"\n # X.dot(W)+b\n half_wx = data_instances.mapValues(\n lambda v: vec_dot(v.features, model_weights.coef_) + model_weights.intercept_)\n self.forwards = half_wx\n\n for provider_forward in self.provider_forwards:\n self.forwards = self.forwards.join(provider_forward, lambda g, h: g + h)\n\n y_hat = self.forwards.mapValues(lambda p: sigmoid(p))\n\n fore_gradient = y_hat.join(data_instances, lambda y_hat, d: y_hat - d.label)\n\n return fore_gradient, y_hat\n\n def compute_forward_hess(self, data_instances, delta_s, provider_forwards):\n \"\"\"\n To compute Hessian matrix, y, s are needed.\n g = (1/N)*∑(0.25 * wx - 0.5 * y) * x\n y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(0.25 * x * s) * x\n define forward_hess = (1/N)*∑(0.25 * x * s)\n \"\"\"\n forwards = data_instances.mapValues(\n lambda v: (np.dot(v.features, delta_s.coef_) + delta_s.intercept_) * 0.25)\n for provider_forward in provider_forwards:\n forwards = forwards.join(provider_forward, lambda g, h: g + (h * 0.25))\n # forward_hess = forwards.mapValues(lambda x: 0.25 * x / sample_size)\n hess_vector = compute_gradient(data_instances, forwards, delta_s.fit_intercept)\n return forwards, np.array(hess_vector)\n\n def compute_and_aggregate_forwards(self, data_instances, model_weights,\n encrypted_calculator, batch_index, offset=None):\n raise NotImplementedError(\"Function should not be called here\")\n\n def federated_compute_gradient_and_loss(self, data_instances, cipher_operator, encrypted_calculator, model_weights,\n optimizer,\n loss_method, n_iter_, batch_index, offset=None):\n \"\"\"\n Linear model gradient core\n Step 1: get provider forwards which differ from different algorithm\n For Logistic Regression and Linear Regression: forwards = wx\n For Poisson Regression, forwards = exp(wx)\n\n Step 2: Compute fore_gradient: d = sigmoid(wx)-y\n\n Step 3: send encrypted fore_gradient: d = [sigmoid(wx)-y]\n\n Step 5: Compute unilateral gradient = ∑d*x,\n\n \"\"\"\n current_suffix = (n_iter_, batch_index)\n\n self.provider_forwards = self.get_provider_forward(suffix=current_suffix)\n\n self.fore_gradient, y_hat = self.compute_fore_gradient(data_instances, model_weights, offset)\n encrypted_fore_gradient = encrypted_calculator[batch_index].encrypt(self.fore_gradient)\n self.remote_fore_gradient(encrypted_fore_gradient, suffix=current_suffix)\n\n self.decrypt_provider_gradient_and_remote(cipher_operator, suffix=current_suffix)\n\n unilateral_gradient = []\n if model_weights:\n unilateral_gradient = compute_gradient(data_instances,\n self.fore_gradient,\n model_weights.fit_intercept)\n if optimizer is not None:\n unilateral_gradient = optimizer.add_regular_to_grad(unilateral_gradient, model_weights)\n\n gradient = optimizer.apply_gradients(unilateral_gradient)\n\n loss_list = []\n loss_norm = optimizer.loss_norm(model_weights)\n if loss_norm is not None:\n provider_loss_regular = self.get_provider_loss_regular(suffix=current_suffix)\n else:\n provider_loss_regular = []\n\n # if len(self.provider_forwards) > 1:\n # LOGGER.info(\"More than one provider exist, loss is not available\")\n # else:\n y = data_instances.mapValues(lambda instance: instance.label)\n loss = loss_method.compute_loss(y, y_hat)\n\n if loss_norm is not None:\n loss += loss_norm\n for provider_loss_norm in provider_loss_regular:\n loss += provider_loss_norm\n loss_list.append(loss)\n LOGGER.debug(\"In compute_loss, loss list are: {}\".format(loss_list))\n\n return gradient, loss_list\n\n def get_provider_forward(self, suffix=tuple()):\n provider_forward = self.provider_forward_transfer.get(idx=-1, suffix=suffix)\n return provider_forward\n\n def get_provider_weight(self):\n provider_weight = self.provider_weight_transfer.get(idx=-1)\n return provider_weight\n\n def remote_fore_gradient(self, fore_gradient, suffix=tuple()):\n self.fore_gradient_transfer.remote(obj=fore_gradient, role=consts.PROVIDER, idx=-1, suffix=suffix)\n\n def decrypt_provider_gradient_and_remote(self, cipher_operator, suffix=tuple()):\n en_provider_gradient_rs = self.provider_en_gradient_r_transfer.get(idx=-1, suffix=suffix)\n # provider_grad_r = en_provider_gradient_r[0].decrypt(cipher_operator)\n for idx, en_provider_gradient_r in enumerate(en_provider_gradient_rs):\n provider_grad_r = np.array(cipher_operator.decrypt_list(en_provider_gradient_r))\n self.provider_gradient_r_transfer.remote(provider_grad_r,\n role=consts.PROVIDER,\n idx=idx,\n suffix=suffix)\n\n\nclass Provider(VertGradientBase, loss_sync.Provider):\n\n def __init__(self):\n self.forwards = None\n self.fore_gradient = None\n\n def _register_gradient_sync(self, provider_weight_transfer, provider_forward_transfer, fore_gradient_transfer,\n provider_gradient_r_transfer, provider_en_gradient_r_transfer):\n self.provider_weight_transfer = provider_weight_transfer\n self.provider_forward_transfer = provider_forward_transfer\n self.fore_gradient_transfer = fore_gradient_transfer\n self.provider_gradient_r_transfer = provider_gradient_r_transfer\n self.provider_en_gradient_r_transfer = provider_en_gradient_r_transfer\n\n def register_gradient_procedure(self, transfer_variables):\n self._register_gradient_sync(transfer_variables.provider_weight,\n transfer_variables.provider_forward_dict,\n transfer_variables.fore_gradient,\n transfer_variables.provider_gradient_r,\n transfer_variables.provider_en_gradient_r)\n\n self._register_loss_sync(transfer_variables.provider_loss_regular)\n\n def federated_compute_gradient_and_loss(self, data_instances, cipher_operator, encrypted_calculator,\n model_weights, optimizer, n_iter_, batch_index):\n \"\"\"\n Linear model gradient core\n Step 1: compute forwards and send to promoter : forwards = wx + b\n Step 2:get fore_gradient from promoter: d = [sigmoid(wx)-y]\n Step 3: compute gradient and add random r : gradient = (1/n)*∑(d.dot(x))\n\n \"\"\"\n current_suffix = (n_iter_, batch_index)\n self.forwards = self.compute_forwards(data_instances, model_weights)\n self.remote_provider_forward(self.forwards, suffix=current_suffix)\n fore_gradient = self.get_fore_gradient(suffix=current_suffix)\n\n unilateral_gradient = compute_gradient(data_instances,\n fore_gradient,\n model_weights.fit_intercept)\n if optimizer is not None:\n unilateral_gradient = optimizer.add_regular_to_grad(unilateral_gradient, model_weights)\n\n r = RandomNumberGenerator(-1, 1).generate_random_number(unilateral_gradient.shape)\n # r = PaillierTensor(ori_data=r)\n # encrypted_r = r.encrypt(encrypted_calculator[batch_index])\n # en_gradient_r = encrypted_r.__add__(PaillierTensor(unilateral_gradient))\n\n encrypted_r = cipher_operator.recursive_encrypt(r)\n en_gradient_r = encrypted_r + unilateral_gradient\n\n gradient_r = self.sync_gradient_r(en_gradient_r, suffix=current_suffix)\n # gradient = gradient_r - r\n gradient = np.subtract(gradient_r, r)\n gradient = optimizer.apply_gradients(gradient)\n\n loss_regular = optimizer.loss_norm(model_weights)\n norm_r = random.uniform(-loss_regular * 0.1, loss_regular * 0.1)\n loss_regular = loss_regular + norm_r\n # if loss_regular is not None:\n # loss_regular = cipher_operator.encrypt(loss_regular)\n self.remote_loss_regular(loss_regular, suffix=current_suffix)\n\n return gradient, fore_gradient\n\n def compute_sqn_forwards(self, data_instances, delta_s, cipher_operator):\n \"\"\"\n To compute Hessian matrix, y, s are needed.\n g = (1/N)*∑(0.25 * wx - 0.5 * y) * x\n y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(0.25 * x * s) * x\n define forward_hess = ∑(0.25 * x * s)\n \"\"\"\n sqn_forwards = data_instances.mapValues(\n lambda v: cipher_operator.encrypt(np.dot(v.features, delta_s.coef_) + delta_s.intercept_))\n # forward_sum = sqn_forwards.reduce(reduce_add)\n return sqn_forwards\n\n def compute_forward_hess(self, data_instances, delta_s, forward_hess):\n \"\"\"\n To compute Hessian matrix, y, s are needed.\n g = (1/N)*∑(0.25 * wx - 0.5 * y) * x\n y = ∇2^F(w_t)s_t = g' * s = (1/N)*∑(0.25 * x * s) * x\n define forward_hess = (0.25 * x * s)\n \"\"\"\n hess_vector = compute_gradient(data_instances,\n forward_hess,\n delta_s.fit_intercept)\n return np.array(hess_vector)\n\n def compute_forwards(self, data_instances, model_weights):\n \"\"\"\n forwards = wx\n \"\"\"\n # w = model_weights.coef_.reshape(model_weights.coef_.size)\n wx = data_instances.mapValues(lambda v: vec_dot(v.features, model_weights.coef_) + model_weights.intercept_,\n need_send=True)\n return wx\n\n def remote_provider_forward(self, provider_forward, suffix=tuple()):\n self.provider_forward_transfer.remote(obj=provider_forward, role=consts.PROMOTER, idx=0, suffix=suffix)\n\n def remote_provider_weight(self, provider_weight):\n self.provider_weight_transfer.remote(obj=provider_weight, role=consts.PROMOTER, idx=0)\n\n def get_fore_gradient(self, suffix=tuple()):\n provider_forward = self.fore_gradient_transfer.get(idx=0, suffix=suffix)\n return provider_forward\n\n def sync_gradient_r(self, e_gradient_r, suffix=tuple()):\n self.provider_en_gradient_r_transfer.remote(obj=e_gradient_r, role=consts.PROMOTER, idx=-1, suffix=suffix)\n gradient_r = self.provider_gradient_r_transfer.get(idx=0, suffix=suffix)\n return gradient_r\n","sub_path":"kernel/components/lr/vertlr/sync/vert_lr_gradient_and_loss.py","file_name":"vert_lr_gradient_and_loss.py","file_ext":"py","file_size_in_byte":17626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"93423869","text":"\"\"\"\n\nPython Interchangeable Virtual Instrument Library\n\nCopyright (c) 2012-2016 Alex Forencich\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\"\"\"\n\nfrom .agilent9000 import *\n\nclass agilentMSO9104A(agilent9000):\n \"Agilent Infiniium MSO9104A IVI oscilloscope driver\"\n\n def __init__(self, *args, **kwargs):\n self.__dict__.setdefault('_instrument_id', 'MSO9104A')\n\n super(agilentMSO9104A, self).__init__(*args, **kwargs)\n\n self._analog_channel_count = 4\n self._digital_channel_count = 16\n self._channel_count = self._analog_channel_count + self._digital_channel_count\n self._bandwidth = 1e9\n\n self._init_channels()\n self._add_method('measurement.fetch_waveform_digital', self._measurement_fetch_waveform_digital, ivi.Doc(\"\"\"description goes here\"\"\", cls, grp, '4.3.13'))\n self._add_property('acquisition.analog_sample_rate',\n self._get_acquisition_analog_sample_rate,\n self._set_acquisition_analog_sample_rate,\n None,\n ivi.Doc(\"\"\"\n Returns or sets the effective sample rate of the acquired analog waveform using the\n current configuration. The units are samples per second.\n \"\"\", cls, grp, '4.2.10'))\n\n def _get_acquisition_analog_sample_rate(self):\n if not self._driver_operation_simulate and not self._get_cache_valid():\n self._acquisition__analog_sample_rate = self._ask(\":acquire:srate:analog?\")\n self._set_cache_valid()\n return self._acquisition__analog_sample_rate\n\n def _set_acquisition_analog_sample_rate(self, value):\n value = float(value)\n self._acquisition_analog_sample_rate = value\n\n def _measurement_fetch_waveform_digital(self, index):\n raw_data = []\n\n if self._driver_operation_simulate:\n return list()\n\n self._write(\":waveform:byteorder msbfirst\")\n self._write(\":waveform:format ascii\")\n self._write(\":waveform:source %s\" % index)\n\n # Read preamble\n pre = self._ask(\":waveform:preamble?\").split(',')\n\n xinc = float(pre[4])\n xorg = float(pre[5])\n xref = int(float(pre[6]))\n\n# if format != 0:\n# raise UnexpectedResponseException()\n\n # Read waveform data\n raw_data.append(self._ask(':WAVeform:DATA?'))\n\n # convert string of hex values to list of hex strings\n data_list = raw_data[0].split(\",\")\n\n # convert to times\n data = [((((k-xref)*xinc) + xorg), e) for k,e in enumerate(data_list)]\n\n return data\n\n\n","sub_path":"ivi/agilent/agilentMSO9104A.py","file_name":"agilentMSO9104A.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"587848379","text":"#!/usr/bin/env python\n\nimport random\nfrom functools import reduce\n\n\"\"\"\nСоздать (программно) текстовый файл, записать в него программно набор чисел, разделенных пробелами.\nПрограмма должна подсчитывать сумму чисел в файле и выводить ее на экран.\n\"\"\"\n\nTEXT_FILE = \"task05.txt\"\n\n\ndef init_file(file_name):\n print(f\"Creating file '{file_name}' with numbers...\")\n print()\n lines_count = random.randint(1, 10)\n with open(file_name, \"w\") as t_file:\n for i in range(lines_count):\n numbers = [str(random.randint(1, 100)) for x in range(random.randint(1, 10))]\n print(\" \".join(numbers), file=t_file)\n\n\ndef calculate_sum_in_file(file_name):\n try:\n with open(file_name, \"r\") as t_file:\n total_sum = 0\n for i, line in enumerate(t_file, 1):\n sum_in_line = sum([int(x) for x in line.strip().split()])\n total_sum += sum_in_line\n print(f\"Line No {i}: '{line.strip()}'; sum '{sum_in_line}'\")\n print()\n print(f\"Total sum of numbers in file '{file_name}': {total_sum}\")\n\n except FileNotFoundError:\n print(f\"File '{TEXT_FILE}' not found\")\n exit(1)\n\n\ndef main():\n init_file(TEXT_FILE)\n calculate_sum_in_file(TEXT_FILE)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lesson-5/task05.py","file_name":"task05.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"16010042","text":"import io\nimport re\n\n# full range(1975, 2001)\nfor i in range(1975, 2001):\n toSort = []\n # open a file to put some year's data in\n weeks = io.open(\"./data/fideTSV/weeks{}.txt\".format(i), \"w\", encoding=\"utf-8\")\n # write the header so d3 can read it in\n weeks.write(\"Name\" + \"\\t\" + \"Rating\" + \"\\t\" + \"Year\" + \"\\t\" + \"Fed\" + \"\\t\" + \"Rank\" + \"\\n\")\n # open each file\n lines = io.open(\"./data/ratings_lists/{}-01.TXT\".format(i), \"r\").readlines()\n # ignore the header\n lines = lines[4:-1]\n # loop through each player\n for player in lines:\n # capture the name\n name = player[re.search(\"[^\\s\\d]+(.\\S+)+\", player).start():re.search(\"[^\\s\\d]+(.\\S+)+\", player).end()]\n if \",\" in name:\n # reorder first/last name and delete the comma if necessary\n first = name[re.search(\", *\", name).end():]\n last = name[0:name.find(\",\")]\n name = first + \" \" + last\n # some names have more than one comma :(\n while \",\" in name:\n name = name.replace(\",\", \"\")\n # capture the rating\n rating = player[re.search(\"[^\\d]\\d{4}[^\\d]\", player).start() + 1:re.search(\"[^\\d]\\d{4}[^\\d]\", player).start() + 5]\n # get the year\n year = i\n # capture the country code\n fed = player[re.search(\"[A-Z]{3}\", player).start():re.search(\"[A-Z]{3}\", player).end()]\n # put all that data into a list element\n toSort.append([name, int(rating), year, fed])\n # after all the players have been added to the list, sort them according to the rating\n allSorted = sorted(toSort, key=lambda x: x[1], reverse=True)\n # once they are sorted, add a rank column\n for k in range(0, len(allSorted)):\n allSorted[k].append(k+1)\n # print(allSorted)\n # print(\"\\n\")\n for k in range(0, len(allSorted)):\n for j in range(0, len(allSorted[k])):\n # write everything to the file\n weeks.write(str(allSorted[k][j]) + \"\\t\")\n weeks.write(\"\\n\")\n","sub_path":"scripts/combine_weeks.py","file_name":"combine_weeks.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"282092513","text":"import socket\n\n# 1.创建Tcp套接字\nscokfd = socket.socket()\n\n# 绑定地址\nscokfd.bind(('127.0.0.1',9999))\n\n# 设置套接字监听\nscokfd.listen(5)\n\n# 等待客户端连接\nprint('等待客户端连接...')\nconnfd,addr = scokfd.accept()\nprint('客户端地址',addr)\nwhile True:\n # 消息收发\n data = connfd.recv(1024)\n # 客户端退出,服务端recv立即返回空字符串\n if not data:\n break\n print('客户端:',data.decode())\n\n a = input('服务端:')\n n = connfd.send(a.encode())\n\n\nconnfd.close()\nscokfd.close()\n\n","sub_path":"03-pythonNet/2019-1-8/tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"307654815","text":"import sys\n\ndef main():\n\n\targv = sys.argv\n\targc = len(argv)\n\t\n\trankCounts = getRankCounts()\n\ttotalHands = getTotalHands(rankCounts)\n\n\trankStrings = getRankStrings()\n\tresults = getResults(rankCounts, totalHands)\n\t\n\tprintResults(rankStrings, results)\n\t\ndef getResults(rankCounts, totalHands):\n\t\n\tresults = []\n\t\n\tfor i in range(0, len(rankCounts)):\n\t\tresults.append(rankCounts[i]/totalHands)\n\t\n\treturn results\n\t\ndef printResults(rankStrings, results):\n\t\n\tfor i in range(0, len(results)):\n\t\tsys.stdout.write(\"The probability of \")\n\t\tsys.stdout.write(rankStrings[i])\n\t\tsys.stdout.write(\" is \")\n\t\tsys.stdout.write(\"{:.4f}\".format(results[i]*100))\n\t\tsys.stdout.write(\"%\\n\")\n\t\ndef getTotalHands(rankCounts):\n\tsum = 0\n\tfor i in range(0, len(rankCounts)):\n\t\tsum += rankCounts[i]\n\treturn sum\n\t\ndef getRankStrings():\n\n\trankStrings = []\n\t\n\trankStrings.append(\"nothing\")\n\trankStrings.append(\"one pair\")\n\trankStrings.append(\"two pairs\")\n\trankStrings.append(\"three of a kind\")\n\trankStrings.append(\"a straight\")\n\trankStrings.append(\"a flush\")\n\trankStrings.append(\"a full house\")\n\trankStrings.append(\"four of a kind\")\n\trankStrings.append(\"a straight flush\")\n\trankStrings.append(\"a royal flush\")\n\t\n\treturn rankStrings\n\ndef getRankCounts():\n\t\n\trankCounts = []\n\t\n\tfor i in range(0, 10):\n\t\trankCounts.append(0)\n\t\n\tfor line in sys.stdin:\n\t\trank = getRank(line)\n\t\trankCounts[rank] += 1\n\t\t\n\treturn rankCounts\n\ndef getRank(line):\n\tsplitLine = line.split(\",\")\n\trankString = splitLine[len(splitLine)-1]\n\trank = int(rankString)\n\treturn rank\n\t\nif __name__ == \"__main__\":\n\tmain()","sub_path":"31/poker_31.py","file_name":"poker_31.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"72056592","text":"from micropython import const\n\n# Device\nQOS = const(1)\nMAIN_DELAY = const(1000)\nSTATS_DELAY = const(60000)\nRESTORE_DELAY = const(250)\nWDT_DELAY = const(100)\nDEVICE_STATE = b\"$state\"\n\n# Device states\nSTATE_INIT = b\"init\"\nSTATE_READY = b\"ready\"\nSTATE_RECOVER = b\"recover\"\n\n# Property datatypes\nP_STRING = b\"string\"\n\n# Node\nPUBLISH_DELAY = const(20)\n\n# General\nSLASH = b\"/\"\nUNDERSCORE = b\"_\"\n\nON = b\"on\"\nOFF = b\"off\"\nTRUE = b\"true\"\nFALSE = b\"false\"\nLOCKED = b\"locked\"\nUNLOCKED = b\"unlocked\"\n","sub_path":"homie/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"640984378","text":"# Network Architecture for Bigan: Generator, Discriminator, and Encoder\n\nimport torch\nimport torch.nn as nn \nimport numpy as np \nimport torch.nn.functional as F \nfrom torch.nn import Parameter as P \nimport pdb \n\n\nclass Generator(nn.Module):\n def __init__(self, z_dim):\n super(Generator, self).__init__()\n\n self.Activation = nn.ReLU(inplace=True)\n\n self.dense_net = nn.Linear(z_dim, 256*4*4)\n\n self.normalization = nn.BatchNorm2d()\n\n self.deconv1 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=1, bias=True) #4x4 --> 7x7\n\n self.deconv2 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=3, stride=2, bias=True) #7x7 --> 15x15\n\n self.deconv3 = nn.ConvTranspose2d(in_channels=64, out_channels=3, kernel_size=4, stride=2, bias=True) #15x15 --> 32x32\n\n\n def forward(self, z):\n\n x = self.dense_net(z)\n x = self.Activation(x)\n \n x = x.view(x.size(0), 256, 4, 4)\n\n x = self.deconv1(x)\n x = self.Activation(x)\n x = self.normalization(128)\n\n x = self.deconv2(x)\n x = self.Activation(x)\n x = self.normalization(64)\n\n x = self.deconv3(x)\n x = torch.tanh(x)\n\n return x\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n self.Activation = nn.LeakyReLU(inplace=True, negative_slope=0.2)\n\n self.normalization = nn.BatchNorm2d()\n\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=4, stride=2, bias=True)\n\n self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2, padding=1, bias=True)\n\n self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=True)\n\n self.dense_net = nn.Linear(128*4*4, 1)\n\n \n def forward(self, x):\n\n d = self.conv1(x)\n d = self.Activation(d)\n d = self.normalization(d)\n\n d = self.conv2(d)\n d = self.Activation(d)\n d = self.normalization(d)\n\n d = self.conv3(d)\n d = self.Activation(d)\n\n d = d.view(d.size(0), 128*4*4)\n d = self.dense_net(d)\n\n\n return d\n\n\nclass Encoder(nn.Module):\n def __init__(self, z_dim):\n super(Enocder, self).__init__()\n\n self.Activation = nn.LeakyReLU(inplace=True, negative_slope=0.2)\n\n self.normalization = nn.BatchNorm2d()\n\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=4, stride=2, bias=True) #32x32 --> 14x14\n \n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1, bias=True) #14x14 --> 7x7\n\n self.conv3 = nn.conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=True) #7x7 --> 4x4\n\n self.dense_net = nn.Linear(256*4*4, z_dim)\n\n def forward(self, x):\n\n z = self.conv1(x)\n z = self.Activation(z)\n z = self.normalization(64)\n\n z = self.conv2(z)\n z = self.Activation(z)\n z = self.normalization(128)\n\n z = self.conv3(z)\n z = self.Activation(z)\n\n z = z.view(z.size(0), 256*4*4)\n z = self.dense_net(z)\n\n z = torch.tanh(z)\n\n return z\n\n\n\n","sub_path":"bigan/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"613469973","text":"import logging.config\nimport sys\n\n# I used this dictionary test, you would put:\n# logging.config.fileConfig('logging.conf')\n# The \"\" entry in loggers is the root logger, tutorials always \n# use \"root\" but I can't get that to work\nlogging.config.dictConfig({\n\t\"version\": 1,\n\t\"formatters\": {\n\t\t\"default\": {\n\t\t\t\"format\": \"%(asctime)s %(levelname)s %(name)s %(message)s\"\n\t\t},\n\t},\n\t\"handlers\": {\n\t\t\"console\": {\n\t\t\t\"level\": 'DEBUG',\n\t\t\t\"class\": \"logging.StreamHandler\",\n\t\t\t\"stream\": \"ext://sys.stdout\"\n\t\t}\n\t},\n\t\"loggers\": {\n\t\t__name__: {\n\t\t\t\"level\": \"DEBUG\",\n\t\t\t'formatter': 'default',\n\t\t\t\"handlers\": [\"console\"]\n\t\t}\n\t}\n})\n\ndef logger():\n\t# Get the name from the caller of this function\n\treturn logging.getLogger(sys._getframe(1).f_globals['__name__'])\n\t\nlog= logger()\nlog.info('main')\n\n#from include.ttest import test as t1\n\n#t1()\n\nfrom include.gui.ttest import test as t2\n\nt2()","sub_path":"_misc/tlog.py","file_name":"tlog.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"113448416","text":"import pandas as pd\nimport numpy as np\nfrom time import time\nimport itertools\n\nitem = pd.DataFrame(data=[[1, 1], [6, 2], [18, 5], [22, 6], [28, 7]], columns=['Value', 'Weight'])\nactions = list(range(len(item))) # actions 每一個物品是一個action\n\n\ndef check_state(Q, knapsack, actions):\n \"\"\"\n 檢查輸入的背包狀態是否在Q table中, 若無則新增\n 將該column中可執行的action設定為0, 其餘為Nan\n\n :param Q: Q table\n :param knapsack: 背包\n :param actions: 所有可執行的action\n :return: Q table\n \"\"\"\n\n if str(knapsack) not in Q.index: # knapsack表示状态, 例如现在包里有[1,2]\n # append new state to q table\n q_table_new = pd.Series([np.NAN] * len(actions), index=Q.columns, name=str(knapsack))\n # 下面是将能使用的状态设置为0, 不能使用的设置为NaN (这个很重要)\n for i in list(set(actions).difference(set(knapsack))):\n q_table_new[i] = 0\n return Q.append(q_table_new)\n else:\n return Q\n\n\ndef envReward(action, knapsack):\n \"\"\"\n 執行action, 返回reward, 下一步的狀態及done\n done表示是否完成(超過背包限制或所有物品已放完)\n\n :param action: 要執行的action\n :param knapsack: 目前的背包狀態\n :return: r, knapsack_, done\n \"\"\"\n\n limit_w = 11\n knapsack_ = knapsack + [action]\n knapsack_.sort()\n knapsack_w = np.sum([item['Weight'][i] for i in knapsack_]) # 計算目前背包內的物品重量總和\n if knapsack_w > limit_w:\n r = -10\n completed = True\n else:\n if len(knapsack_) == len(item):\n r = 100\n completed = True\n return r, knapsack_, completed\n r = item['Value'][action]\n completed = False\n return r, knapsack_, completed\n\n\ndef mu_policy(Q, epsilon, nA, observation, actions):\n \"\"\"\n epsilon-greedy的策略, 返回每一個動作執行的機率\n\n :param Q: Q table\n :param epsilon: epsilon\n :param nA: 所有動作的數量\n :param observation: 目前背包的狀態\n :param actions: 所有可執行的action\n :return: 每一個動作執行的概率, 一維陣列\n \"\"\"\n # 尚未執行的action\n actions_list = list(set(actions).difference(set(observation)))\n # 輸入的背包狀態中, 所有不同action獲得的累計獎勵\n action_values = Q.loc[str(observation), :]\n # 使用action_values中最大的值\n greedy_action = action_values.idxmax()\n # 設定所有動作執行概率為0\n probabilities = np.zeros(nA)\n # 設定可執行動作執行概率為(1 / len(actions_list)) * epsilon\n for i in actions_list:\n probabilities[i] = (1 / len(actions_list)) * epsilon\n # greedy_action執行概率設定為(1 / len(actions_list)) * epsilon + (1 - epsilon)\n probabilities[greedy_action] = probabilities[greedy_action] + (1 - epsilon)\n return probabilities\n\n\ndef pi_policy(Q, observation):\n \"\"\"\n greedy策略, 每次選擇能獲得最大獎勵的動作\n\n :param Q: Q table\n :param observation: 目前背包的狀態\n :return: 一維陣列, 每個動作出現的概率, 最大獎勵的動作為1\n \"\"\"\n action_values = Q.loc[str(observation), :]\n best_action = action_values.idxmax()\n return np.eye(len(action_values))[best_action]\n\n\ndef qLearning(actions, num_episodes, discount_factor=1.0, alpha=0.7, epsilon=0.2):\n \"\"\"\n Q Learning訓練\n\n :param actions: 所有可執行的action\n :param num_episodes: 訓練的迭代次數\n :param discount_factor: 衰減係數\n :param alpha: learning rate\n :param epsilon: epsilon值, 用於epsilon-greedy選擇當前最大獎勵action\n \"\"\"\n # 環境中所有物品數量\n nA = len(actions)\n\n # 初始化Q table\n Q = pd.DataFrame(columns=actions)\n\n for i_episode in range(1, num_episodes + 1):\n # 開始一輪迭代\n # 開始時背包是空的\n knapsack = []\n\n # 新增Q table column\n Q = check_state(Q, knapsack, actions)\n\n # 從實際執行的policy中選擇action\n action = np.random.choice(nA, p=mu_policy(Q, epsilon, nA, knapsack, actions))\n for t in itertools.count():\n # 執行action, 返回reward, 下一步的狀態及是否完成(超過背包限制或所有物品已放完)\n reward, next_knapsack, done = envReward(action, knapsack)\n if done:\n Q.loc[str(knapsack), action] = reward\n break\n if t > 10:\n break\n\n # 更新Q table 下一步狀態的column\n Q = check_state(Q, next_knapsack, actions)\n # 更新Q table的value\n Q.loc[str(knapsack), action] = Q.loc[str(knapsack), action] + alpha * (\n reward + discount_factor * Q.loc[str(next_knapsack), :].max() - Q.loc[str(knapsack), action])\n\n knapsack = next_knapsack\n # 選擇下一個action\n next_action = np.random.choice(nA, p=mu_policy(Q, epsilon, nA, next_knapsack, actions))\n action = next_action\n\n if i_episode % 50 == 0:\n print(\"\\rEpisode {}/{}. | \".format(i_episode, num_episodes), end=\"\")\n\n return Q\n\n\nif __name__ == '__main__':\n # 訓練\n train_start_time = time()\n Q = qLearning(actions, num_episodes=1000, discount_factor=0.9, alpha=0.3, epsilon=0.1)\n train_finish_time = time()\n print(train_finish_time - train_start_time)\n print(Q)\n\n # 查看最终结果\n actionsList = []\n knapsack = []\n nA = len(actions)\n # 從實際執行的policy中選擇action\n action = np.random.choice(nA, p=pi_policy(Q, knapsack))\n t1 = time()\n for t in itertools.count():\n actionsList.append(action)\n # 執行action, 返回reward, 下一步的狀態及是否完成(超過背包限制或所有物品已放完)\n reward, next_knapsack, done = envReward(action, knapsack)\n if done:\n actionsList.pop()\n count = len(next_knapsack)\n if count >= 5:\n knapsack = next_knapsack\n break\n break\n else:\n # 選擇下一步動作\n next_action = np.random.choice(nA, p=pi_policy(Q, next_knapsack))\n action = next_action\n knapsack = next_knapsack\n t2 = time()\n print(t2 - t1)\n print(knapsack)\n\n","sub_path":"placement_rl.py","file_name":"placement_rl.py","file_ext":"py","file_size_in_byte":6377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"529526556","text":"from django.contrib import admin\n\nfrom .models import Jurisdiction, State, SurveyEmail\nfrom mailman.mailer import MailSurvey\n\n\nclass JurisdictionAdmin(admin.ModelAdmin):\n\n list_display = (\n 'name',\n 'state',\n 'website',\n 'telephone',\n 'email',\n 'city',\n )\n\n list_filter = ('state', 'city',)\n\n search_fields = ('name', 'state__name', 'telephone',)\n\n ordering = ['name']\n\n def changelist_view(self, request, extra_context=None):\n extra_context = {\n 'export_url': '/jurisdictions/emails',\n 'export_caption': 'Download Jurisdiction Emails',\n 'show_export_button': True\n }\n\n return super(JurisdictionAdmin, self).changelist_view(request, extra_context)\n\n\nclass StateAdmin(admin.ModelAdmin):\n\n list_display = (\n 'name',\n 'alpha',\n 'pollworker_website',\n )\n\n search_fields = ('name', 'alpha',)\n\n ordering = ['name']\n\ndef send_email(modeladmin, request, queryset):\n count_success = 0\n count_resend = 0\n tot_reqs = 0\n for email_req in queryset:\n tot_reqs +=1\n # Only send e-mail once\n if email_req.send_email == False:\n obj_list = email_req.jurisdiction.all()\n jurisdiction_list = []\n for jurisdiction in obj_list:\n jurisdiction_list.append([jurisdiction.name, jurisdiction.pk])\n jurisdiction_list.sort(key=lambda x: x[0])\n\n if ',' in email_req.recipients:\n recipient_list = email_req.recipients.split(',')\n elif '\\r\\n' in email_req.recipients:\n recipient_list = email_req.recipients.split('\\r\\n')\n elif '\\n' in email_req.recipients:\n recipient_list = email_req.recipients.split('\\n')\n elif ';' in email_req.recipients:\n recipient_list = email_req.recipients.split(';')\n else: #assume only one e-mail\n recipient_list = [email_req.recipients]\n \n recipient_list = [item.strip(' ') for item in recipient_list]\n \n # send email\n mail = MailSurvey(jurisdiction_list, recipient_list, email_req.email_text)\n status = mail.send()\n if status == 'OK':\n queryset.update(send_email=True)\n count_success+=1\n else:\n count_resend +=1\n \n message=\"\"\n if count_success > 0:\n message += \"{} out of {} e-mails were successfully sent.\".format(count_success, tot_reqs)\n if count_resend > 0:\n message += '{} out of {} e-mails have already been sent to their recipient. No action has been taken. To force a re-send, set \"Sent E-mail?\" to False'\n modeladmin.message_user(request, message)\n\nsend_email.short_description = \"Send e-mail\"\n\ndef mark_unsent(modeladmin, request, queryset):\n queryset.update(send_email=False)\nmark_unsent.short_description = \"Mark e-mail as not sent\"\n\nclass SurveyEmailAdmin(admin.ModelAdmin):\n list_display = (\n 'name', 'send_email', 'recipients'\n )\n actions = [send_email, mark_unsent]\n def get_readonly_fields(self, request, obj=None):\n return ['send_email']\n\nadmin.site.register(State, StateAdmin)\nadmin.site.register(SurveyEmail, SurveyEmailAdmin)\nadmin.site.register(Jurisdiction, JurisdictionAdmin)\n","sub_path":"apps/jurisdiction/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"470579039","text":"import bpy\r\nimport os\r\nimport json\r\nimport bgl\r\nimport gpu\r\nfrom gpu_extras.batch import batch_for_shader\r\nfrom bpy_extras import view3d_utils\r\nfrom mathutils import (\r\n\t\t\t\tVector,\r\n\t\t\t\tMatrix,\r\n\t\t\t\tQuaternion,\r\n\t\t\t\tEuler\r\n\t\t\t\t)\r\nfrom textwrap import wrap\r\nfrom math import (\r\n\tdegrees,\r\n\tradians,\r\n\tsin,\r\n\tcos,\r\n\tsqrt,\r\n\tatan2,\r\n\tacos,\r\n\tpi,\r\n\t)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef raycast_light(self, event, context, range, ray_max=1000.0):\r\n\t\"\"\"Compute the location and rotation of the light from the angle or normal of the targeted face off the object\"\"\"\r\n\tlength_squared = 0\r\n\tscene = context.scene\r\n\tlight = context.active_object\r\n\trv3d = context.region_data\r\n\tregion = context.region\r\n\tcoord = (event.mouse_region_x, event.mouse_region_y)\r\n\r\n#---Get the ray from the viewport and mouse\r\n\t# Direction vector from the viewport to 2d coord\r\n\tview_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, (coord))\r\n\t# 3d view origin vector from the region\r\n\tray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, (coord))\r\n\t# Define a default direction vector\r\n\tray_target = ray_origin + view_vector\r\n\r\n\tdepsgraph = context.evaluated_depsgraph_get()\r\n\t\r\n#---Select the targeted object\r\n\tdef visible_objects_and_duplis():\r\n\t\tif light.Lumiere.target :\r\n\t\t\tobj_trgt = light.Lumiere.target\r\n\t\t\tyield (obj_trgt, obj_trgt.matrix_world.copy())\r\n\t\telse:\r\n\t\t\tfor dup in depsgraph.object_instances:\r\n\t\t\t\tif dup.object.type == 'MESH':\r\n\t\t\t\t\tif dup.object.name not in context.scene.collection.children['Lumiere'].all_objects or \\\r\n\t\t\t\t\t(dup.object.name in context.scene.collection.children['Lumiere'].all_objects and \\\r\n\t\t\t\t\t(dup.object.Lumiere.color_type == 'Reflector' and dup.object.data.name != light.data.name)):\r\n\r\n\t\t\t\t\t\tif dup.is_instance:\r\n\t\t\t\t\t\t\tyield (dup.instance_object, dup.instance_object.matrix_world.copy())\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tyield (dup.object.original, dup.object.original.matrix_world.copy())\r\n\r\n\r\n#---Cast the ray to the targeted object\r\n\tdef obj_ray_cast(obj_trgt, matrix):\r\n\t#---Get the ray direction from the view angle to the targeted object\r\n\t\tmatrix_inv = matrix.inverted()\r\n\t\tray_origin_obj = matrix_inv @ ray_origin\r\n\t\tray_target_obj = matrix_inv @ ray_target\r\n\t\tray_direction_obj = ray_target_obj - ray_origin_obj\r\n\r\n\t#---Cast the ray\r\n\t\tsuccess, hit, normal, face_index = obj_trgt.ray_cast(ray_origin_obj, ray_direction_obj)\r\n\r\n\t\tif success:\r\n\t\t\treturn success, hit, normal\r\n\t\telse:\r\n\t\t\treturn None, None, None\r\n\r\n#---Find the closest object\r\n\t# best_length_squared = ray_max * ray_max\r\n\tbest_length_squared = -1.0\r\n\tbest_obj = None\r\n\r\n#---Find the position of the light using the reflect angle and the object targeted normal\r\n\tfor obj_trgt, matrix_trgt in visible_objects_and_duplis():\r\n\t\tsuccess, hit, normal = obj_ray_cast(obj_trgt, matrix_trgt)\r\n\r\n\t\tif success is not None :\r\n\t\t\t# Get the normal of the face from the targeted object\r\n\t\t\tnormal = matrix_trgt.to_3x3().inverted().transposed() @ normal\r\n\t\t\tnormal.normalize()\r\n\r\n\t\t#---Define the direction based on the normal of the targeted object, the view angle or the bounding box\r\n\t\t\tif light.Lumiere.reflect_angle == \"0\":\r\n\t\t\t\tself.reflect_angle = \"Accurate\"\r\n\t\t\t\treflect_dir = (view_vector).reflect(normal)\r\n\t\t\telif light.Lumiere.reflect_angle == \"1\":\r\n\t\t\t\tself.reflect_angle = \"Normal\"\r\n\t\t\t\tif obj_trgt.name in context.scene.collection.children['Lumiere'].all_objects:\r\n\t\t\t\t\treflect_dir = -normal\r\n\t\t\t\telse:\r\n\t\t\t\t\treflect_dir = normal\r\n\r\n\t\t\telif light.Lumiere.reflect_angle == \"2\":\r\n\t\t\t\tself.reflect_angle = \"Estimated\"\r\n\t\t\t\tif light.Lumiere.auto_bbox_center:\r\n\t\t\t\t\tlocal_bbox_center = 0.125 * sum((Vector(b) for b in obj_trgt.bound_box), Vector())\r\n\t\t\t\t\tglobal_bbox_center = obj_trgt.matrix_world @ local_bbox_center\r\n\t\t\t\telse:\r\n\t\t\t\t\tglobal_bbox_center = Vector(light.Lumiere.bbox_center)\r\n\t\t\t\treflect_dir = (matrix_trgt @ hit) - global_bbox_center\r\n\t\t\t\treflect_dir.normalize()\r\n\r\n\t\t#---Define light location : Hit + Direction + Range\r\n\t\t\tlight_loc = (matrix_trgt @ hit) + (reflect_dir * range)\r\n\r\n\t\t\tlength_squared = ((matrix_trgt @ hit) - ray_origin).length_squared\r\n\r\n\t\t\tif best_obj is None or length_squared < best_length_squared:\r\n\t\t\t\tbest_obj = obj_trgt\r\n\t\t\t\tbest_length_squared = length_squared\r\n\t\t\t\t_matrix_trgt = matrix_trgt\r\n\t\t\t\t_hit = hit\r\n\t\t\t\t_light_loc = light_loc\r\n\t\t\t\t_direction = reflect_dir\r\n\r\n\t\t\t\tif light.Lumiere.reflect_angle == \"2\":\r\n\t\t\t\t\tlight.Lumiere.bbox_center = global_bbox_center\r\n\t\t\t#---Parent the light to the target object\r\n\t\t\t\tlight.parent = obj_trgt\r\n\t\t\t\tlight.matrix_parent_inverse = matrix_trgt.inverted()\r\n\r\n#---Define location, rotation and scale\r\n\tif length_squared > 0 :\r\n\t\tif self.shift :\r\n\t\t\ttrack = light.location - Vector(_matrix_trgt @ _hit)\r\n\t\t\trotaxis = (track.to_track_quat('Z','Y')).to_euler()\r\n\t\telse :\r\n\t\t\trotaxis = (_direction.to_track_quat('Z','Y')).to_euler()\r\n\t\t\tlight.location = Vector((_light_loc[0], _light_loc[1], _light_loc[2]))\r\n\r\n\t\tlight.Lumiere.hit = (_matrix_trgt @ _hit)\r\n\r\n#---Update rotation and pitch for spherical coordinate\r\n\t\tx,y,z = light.location - Vector((light.Lumiere.hit))\r\n\t\tr = sqrt(x**2 + y**2 + z**2)\r\n\t\ttheta = atan2(y, x)\r\n\t\tif degrees(theta) < 0:\r\n\t\t\ttheta = radians(degrees(theta) + 360)\r\n\t\tlight.Lumiere.rotation = degrees(theta)\r\n\t\tphi = acos( z / r )\r\n\t\tlight.Lumiere.pitch = degrees(phi)\r\n\r\n\t\tlight.Lumiere.direction = _direction\r\n\t\tlight.rotation_euler = rotaxis\r\n\r\n# -------------------------------------------------------------------- #\r\ndef create_2d_circle(step, radius, rotation = 0, center_x=0, center_y=0):\r\n\t\"\"\" Create the vertices of a 2d circle at (0,0) \"\"\"\r\n\t#https://stackoverflow.com/questions/8487893/generate-all-the-points-on-the-circumference-of-a-circle\r\n\tindices = []\r\n\r\n\tverts = [(center_x, center_y)] + [(\r\n\t\t\tcos(2*pi / step*x + rotation)*radius + center_x,\r\n\t\t\tsin(2*pi / step*x + rotation)*radius + center_y\r\n\t\t\t) for x in range(0, step+1)]\r\n\r\n\tfor idx in range(len(verts) - 1):\r\n\t\ti1 = idx+1\r\n\t\ti2 = idx+2 if idx+2 <= step else 1\r\n\t\tindices.append((0,i1,i2))\r\n\r\n\treturn(verts, indices)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef draw_circle(center_circle, radius_circle, steps):\r\n\t\"\"\" Return the coordinates + indices of a circle using a triangle fan \"\"\"\r\n\tindices = []\r\n\tcenter_x, center_y = center_circle\r\n\tradiusx = radius_circle[0] - center_circle[0]\r\n\tradiusy = radius_circle[1] - center_circle[1]\r\n\tradius = sqrt(radiusx**2 + radiusy**2)\r\n\trotation = radians(radius_circle[1] - center_circle[1]) / 2\r\n\t# steps = int(360 / steps)\r\n\r\n\t# Get the vertices of a 2d circle\r\n\tverts, indices = create_2d_circle(steps, radius, rotation, center_x, center_y)\r\n\r\n\treturn(verts, indices)\r\n\r\n\r\n# -------------------------------------------------------------------- #\r\ndef draw_shader(self, color, alpha, type, coords, size=1, indices=None):\r\n\t\"\"\" Create a batch for a draw type \"\"\"\r\n\tbgl.glEnable(bgl.GL_BLEND)\r\n\tbgl.glEnable(bgl.GL_LINE_SMOOTH)\r\n\tbgl.glPointSize(size)\r\n\tbgl.glLineWidth(size)\r\n\ttry:\r\n\t\tif len(coords[0])>2:\r\n\t\t\tshader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')\r\n\t\telse:\r\n\t\t\tshader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')\r\n\t\tbatch = batch_for_shader(shader, type, {\"pos\": coords}, indices=indices)\r\n\t\tshader.bind()\r\n\t\tshader.uniform_float(\"color\", (color[0], color[1], color[2], alpha))\r\n\t\tbatch.draw(shader)\r\n\t\tbgl.glLineWidth(1)\r\n\t\tbgl.glPointSize(1)\r\n\t\tbgl.glDisable(bgl.GL_LINE_SMOOTH)\r\n\t\tbgl.glDisable(bgl.GL_BLEND)\r\n\texcept:\r\n\t\texc_type, exc_value, exc_traceback = sys.exc_info()\r\n\t\tself.report({'ERROR'}, str(exc_value))\r\n\r\n# -------------------------------------------------------------------- #\r\ndef export_props_light(self, context):\r\n\t\"\"\"Export the current light data in JSON format\"\"\"\r\n\tlumiere_dict = {}\r\n\tlight = context.active_object\r\n\r\n\tlumiere_dict[light.name] = {}\r\n\tlumiere_dict[light.name]['Lumiere'] = light['Lumiere'].to_dict()\r\n\tlumiere_dict[light.name]['Lumiere']['light_type'] = light.Lumiere.light_type\r\n\tlumiere_dict[light.name]['rotation'] = tuple(light.matrix_world.to_euler())\r\n\tlumiere_dict[light.name]['scale'] = tuple(light.scale)\r\n\tlumiere_dict[light.name]['location'] = tuple(light.location)\r\n\tlumiere_dict[light.name]['Lumiere']['definition'] = list(wrap(light['Lumiere']['definition'], 50)) if \"definition\" in light['Lumiere'] else \" \"\r\n\r\n\t# lumiere_dict[light.name]['group'] = {}\r\n\t# for group in bpy.data.objects[light.name].users_group :\r\n\t# \t# lumiere_dict[light.name]['group'] = {group.name : list(wrap(group[\"Lumiere\"][\"definition\"], 50))} if \"definition\" in group[\"Lumiere\"] else {group.name : \" \"}\r\n\t# \tlumiere_dict[light.name]['group'].update({group.name : list(wrap(group['Lumiere']['definition'], 50))} if \"definition\" in group['Lumiere'] else {group.name : \" \"})\r\n\r\n\tmat = get_mat_name()\r\n\tif light.type == \"LAMP\":\r\n\t\tlamp = get_lamp(context, light.data.name)\r\n\t\tlumiere_dict[light.name]['smooth'] = light.data.node_tree.nodes[\"Light Falloff\"].inputs[1].default_value\r\n\telse:\r\n\t\tlumiere_dict[light.name]['smooth'] = mat.node_tree.nodes['Light Falloff'].inputs[1].default_value\r\n\r\n\t#---Gradient\r\n\t\tif light.Lumiere.color_type in (\"Linear\", \"Spherical\"):\r\n\t\t\t# lumiere_dict[light.name]['repeat'] = mat.node_tree.nodes['Math'].inputs[1].default_value\r\n\t\t\tcolramp = mat.node_tree.nodes['ColorRamp'].color_ramp\r\n\t\t\tlumiere_dict[light.name]['gradient'] = {}\r\n\t\t\tlumiere_dict[light.name]['interpolation'] = colramp.interpolation\r\n\t\t\tfor i in range(len(colramp.elements)):\r\n\t\t\t\tlumiere_dict[light.name]['gradient'].update({colramp.elements[i].position: colramp.elements[i].color[:]})\r\n\r\n\treturn(lumiere_dict)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef get_mat_name():\r\n\t\"\"\"Return the name of the material of the light\"\"\"\r\n\tlight = bpy.context.object\r\n\tif bpy.context.object.type == 'MESH':\r\n\t\tmat = light.active_material\r\n\telse:\r\n\t\tmat = bpy.data.lights[light.data.name].name\r\n\r\n\treturn(mat)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef get_lumiere_dict():\r\n\t\"\"\"Return the file of the exported lights in a dict format\"\"\"\r\n\r\n\tcurrent_file_dir = os.path.dirname(__file__)\r\n\tfile_name = os.path.join(current_file_dir, \"lumiere_dictionary.json\")\r\n\r\n\t#---Try to open the Lumiere export dictionary\r\n\ttry:\r\n\t\twith open(file_name, 'r', encoding='utf-8') as file:\r\n\t\t\tmy_dict = json.loads(file.read())\r\n\t\t\tfile.close()\r\n\texcept :\r\n\t\t# print(\"\\n[Lumiere ERROR]\\n\")\r\n\t\t# import traceback\r\n\t\t# traceback.print_exc()\r\n\t\tmy_dict = {}\r\n\treturn(my_dict)\r\n\r\n# -------------------------------------------------------------------- #\r\ndef update_lumiere_dict(my_dict):\r\n\t\"\"\"Update the file of the exported lights\"\"\"\r\n\tcurrent_file_dir = os.path.dirname(__file__)\r\n\r\n\twith open(current_file_dir + \"\\\\\" + \"lumiere_dictionary.json\", \"w\", encoding='utf-8') as file:\r\n\t\tjson.dump(my_dict, file, sort_keys=True, indent=4, ensure_ascii=False)\r\n\tfile.close()\r\n","sub_path":"lumiere_utils.py","file_name":"lumiere_utils.py","file_ext":"py","file_size_in_byte":10856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"66"}
+{"seq_id":"591791107","text":"from sklearn.utils import resample\nimport pandas as pd\nimport os\nimport numpy as np\nimport sys\n# We want to work in ../data/
')\nfindRating = re.compile(r'