diff --git "a/3984.jsonl" "b/3984.jsonl"
new file mode 100644--- /dev/null
+++ "b/3984.jsonl"
@@ -0,0 +1,714 @@
+{"seq_id":"295786655","text":"import streamlit as st\nimport pandas as pd\nimport math\nimport CarbonSafeFunctions as csf\n# -------------------------------------------------------------------------------------------------------------------- #\n# -------------------------------------------------------------------------------------------------------------------- #\n\nclass default_values: # Value, Min, Max\n\tdf = pd.DataFrame([[100000.0, 50000.0, 150000.0],\n\t\t\t\t\t\t[10000.0, 5000.0, 15000.0],\n\t\t\t\t\t\t[180000.0, 120000.0, 240000.0],\n\t\t\t\t\t\t[20.0, 5.0, 60.0],\n\t\t\t\t\t\t[25.0, 8.0, 40.0],\n\t\t\t\t\t\t[10.0, 3.0, 20.0],\n\t\t\t\t\t\t[20.0, 5.0, 50.0],\n\t\t\t\t\t\t[5000.0, 2000.0, 10000.0],\n\t\t\t\t\t\t[1.0, 0.1, 5.0],\n\t\t\t\t\t\t[30.0, 10.0, 50.0],\n\t\t\t\t\t\t[2.0, 0.5, 5.0],\n\t\t\t\t\t\t[1.3, 1.0, 3.0]],\n\t\t\t\t\t\tcolumns = ['Values', 'Min', 'Max'],\n\t\t\t\t\t\tindex = ['3D Seismic ($/sqr-mi)', '2D Seismic ($/mile)', 'Permitting Costs ($/sqr-mile)', \"Adt'l Costs (%Total, processing etc)\", 'Test Well Spacing (sqr-mi/well)',\n\t\t\t\t\t\t\t\t'Plug & Abandoment Cost ($/well-ft)', 'P&A Contingency (%Total-P&A)', 'Basic Site Maintenance ($/sqr-mile)',\n\t\t\t\t\t\t\t\t'Inject Rate per Well (MtCO2/yr)', 'Backup Well Factor (%Total-Wells)', 'Est. Plume Size per Primary Inj-Well (sqr-mi)', 'MVA Area Cushion Factor (mult)'])\nvalues = default_values()\nraw_values = values.df.copy()\nindexes = values.df.index\nvalue_column = values.df.columns[0]\n# -------------------------------------------------------------------------------------------------------------------- #\n# -------------------------------------------------------------------------------------------------------------------- #\n\ndef set_values(reset_message):\n\tglobal values\n\tglobal raw_values\n\tglobal value_column\n\n\tinside = 0\n\n\tif st.checkbox('Storage Site Parameters:'):\n\t\tinside = 1\n\n\t\tfor name in values.df.index:\n\t\t\tvalues.df.loc[name, value_column] = st.slider(name, values.df.loc[name, 'Min'], values.df.loc[name, 'Max'], values.df.loc[name, value_column], values.df.loc[name, value_column] / 100)\n\t\t\tst.write('')\n\t\tif st.button(\"*Reset to default\"):\n\t\t\tvalues.df = raw_values\n\t\t\tst.success(reset_message)\n\t\tcsf.main_body_divider()\n\treturn inside\n# -------------------------------------------------------------------------------------------------------------------- #\n# -------------------------------------------------------------------------------------------------------------------- #\n\nclass StorageSite:\n\tdef __init__(self, reset_message, CaptureFacilities):\n\t\tself.inside = set_values(reset_message)\n\t\tglobal values\n\t\tglobal indexes\n\t\tglobal value_column\n\n\t\tself.data = values.df[value_column]\n\t\tself.seismic_3D = values.df.loc[indexes[0], value_column]\n\t\tself.seismic_2D = values.df.loc[indexes[1], value_column]\n\t\tself.permitting = values.df.loc[indexes[2], value_column]\n\t\tself.extra_costs = values.df.loc[indexes[3], value_column] / 100\n\t\tself.well_spacing = values.df.loc[indexes[4], value_column]\n\t\tself.plug_cost = values.df.loc[indexes[5], value_column]\n\t\tself.plug_cont = values.df.loc[indexes[6], value_column] / 100\n\t\tself.maintenance = values.df.loc[indexes[7], value_column]\n\t\tself.inject_rate = values.df.loc[indexes[8], value_column]\n\t\tself.backup = values.df.loc[indexes[9], value_column] / 100\n\t\tself.plume_size = values.df.loc[indexes[10], value_column]\n\t\tself.MVA_cushion = values.df.loc[indexes[11], value_column]\n\n\t\tself.inject_well_count = int(math.ceil(CaptureFacilities.CO2_per_year / self.inject_rate / (1 - self.backup)))\n\t\tself.tot_plume_size = self.inject_well_count * self.plume_size\n\t\tself.site_area = self.tot_plume_size * 4 # 24\n\t\tself.test_well_count = int(math.ceil(self.site_area / self.well_spacing))\n\t\tself.MVA_area = self.tot_plume_size * self.MVA_cushion\n\t\tself.seal_dual_count = int(math.ceil(self.MVA_area / 4))\n\t\tself.vadose_ground_count = 3 * (self.inject_well_count - 1)\n\t\tself.test_wells_MVA_periodic = max(1 , math.ceil((self.seal_dual_count - 1) * (self.MVA_area / self.site_area)))\n\n\n\n\n\t\t# st.write(self.site_area)\n\n\n\n\n# -------------------------------------------------------------------------------------------------------------------- #\n# -------------------------------------------------------------------------------------------------------------------- #\n# -------------------------------------------------------------------------------------------------------------------- #\n# -------------------------------------------------------------------------------------------------------------------- #\n\n\t\t","sub_path":"StorageSiteParameters.py","file_name":"StorageSiteParameters.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"255929905","text":"from turtle import *\n\nsetup(320, 320)\n\ndef draw_spiral(loops):\n for i in range(4 * loops):\n forward(10 * i)\n left(90)\n\ndraw_spiral(5)\n\ngetscreen().getcanvas().postscript(file = \"spiral4.eps\")\n\ninput()\n","sub_path":"2019/03_Control_Structures/spiral2.py","file_name":"spiral2.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"645873787","text":"N, K = map(int, input().split())\nresult = []\nlst = [i + 1 for i in range(N)]\nK -= 1\nT = K\nwhile len(lst) > 0:\n if T >= len(lst):\n T = T % len(lst)\n result.append(lst.pop(T))\n T += K\n\nprint(f'<{\", \".join(map(str, result))}>')\n","sub_path":"BOJ/1158/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"385319632","text":"# hangul separate\n\nchosung=['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ', 'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ', ' ']\njungsung=['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ', 'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ', 'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ', 'ㅣ', ' ']\njongsung=['', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ', 'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ', 'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ', ' ','\\n']\n\n\nclass separate_txt:\n\t\n\tdef __init__(self,text):\n\t\tself.text=text\n\t\t\n\tdef sep(self):\n\t\tself.cho=[]\n\t\tself.jung=[]\n\t\tself.jong=[]\n\t\tfor onechar in self.text:\n\t\t\tc=ord(onechar)\n\t\t\tif c==0x0020:\n\t\t\t\tself.cho.append(19)\n\t\t\t\tself.jung.append(21)\n\t\t\t\tself.jong.append(28)\n\t\t\telif c==0x000A:\n\t\t\t\tself.cho.append(19)\n\t\t\t\tself.jung.append(21)\n\t\t\t\tself.jong.append(29)\n\t\t\telse:\n\t\t\t\tself.cho.append( int( ((c-0xAC00)/(28*21)) %19 ) )\n\t\t\t\tself.jung.append( int( ((c-0xAC00)/28) %21 ) )\n\t\t\t\tself.jong.append( int( (c-0xAC00)%28 ) )\n\t\t\t\n\tdef print_txt(self):\n\t\tprint('='*25,\"초성 나열\",'='*26)\n\t\tfor j in self.cho:\tprint(chosung[j], end=\" \")\n\t\tprint('\\n',end='')\n\t\tprint('='*60,'\\n\\n')\n\t\ti=0\n\t\tfor k in self.text:\n\t\t\t#print(\"==%s번째 글자==\" %i, end=\"\\n\") #테스트용\n\t\t\tprint(chosung[self.cho[i]], end=\" \")\n\t\t\tprint(jungsung[self.jung[i]], end=\" \")\n\t\t\tprint(jongsung[self.jong[i]], end=\"/\")\n\t\t\ti=i+1\n\t\tprint('\\n',end='')\n\t\tprint('='*60,'\\n\\n')\n\n'''\nc=separate_txt('가나다라마바사 분리')\nc.sep()\nc.print_txt()\n'''\n\n\n\n\n","sub_path":"hangul_qwerty_error_count/separate/hangul_separate_m.py","file_name":"hangul_separate_m.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"623961888","text":"if __name__ == '__main__':\n\tresult = []\n\tgradeList = []\n\tfor _ in range(int(input())):\n\t\tnewList = []\n\t\tname = input()\n\t\tscore = float(input())\n\t\tgradeList.append(score)\n\t\tnewList.append(name)\n\t\tnewList.append(score)\n\n\t\tresult.append(newList)\n\n\tgradeList.sort()\n\tfor i in range(0,len(gradeList)-1):\n\t\tif gradeList[i] != gradeList[i+1]:\n\t\t\tsecondLow = gradeList[i+1]\n\t\t\tbreak\n\n\tresult.sort()\n\tfor student in result:\n\t\tif student[1] == secondLow:\n\t\t\tprint (student[0])\n","sub_path":"Hackerrank/Hacker8-NestedList.py","file_name":"Hacker8-NestedList.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"555090110","text":"def binary(l,item):\n start = 0\n end = len(l)-1\n mid = (start+end)//2\n pos = -1\n while start<=end:\n if (l[mid] == item):\n pos = mid\n break\n elif (l[mid] > item):\n end = mid-1\n else:\n start = mid+1\n return pos\n \na = input(\"Enter the values with comma:\").split(\",\") #accepting the elements of list seperated by comma\nitem = int(input(\"Enter the item to be searched:\"))\nl = [int(i) for i in a]\nl.sort()\np = binary(l,item)\nif (p == -1):\n print(\"The element doesnt exist\")\nelse:\n print(\"The element is in\",p+1,\"position\")\n","sub_path":"coding_assignment_module1/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"616321531","text":"class Solution(object):\n def threeSumClosest(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n # follow same patter of 3Sum\n nums.sort()\n self.mingap = float('inf')\n self.res = []\n for k in xrange(len(nums)):\n if k > 0 and nums[k] == nums[k-1]:\n continue # dedup\n path = [nums[k]]\n self.find2Close(nums[k+1:], target-nums[k], path) # dedup\n if self.mingap == 0: # exit in-advance\n break\n return sum(self.res[-1])\n \n def find2Close(self,nums, target, path):\n i = 0\n j = len(nums)-1\n while i < j:\n if abs(target-nums[i]-nums[j]) < self.mingap:\n self.mingap = abs(target-nums[i]-nums[j])\n self.res.append(path+[nums[i]]+[nums[j]])\n if nums[i] + nums[j] == target: # exit in-adv\n return\n else:\n if nums[i] + nums[j] < target:\n i += 1\n else:\n j -= 1\n return","sub_path":"3sum_close/two_pointer_with_dedup.py","file_name":"two_pointer_with_dedup.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"652842868","text":"\"\"\"Module for training word embeddings.\"\"\"\n\n\nfrom utils import get_logger\nlogger = get_logger()\n\nimport glob\nimport gzip\nimport os\nimport shutil\nimport time\n\nfrom gensim.models import Word2Vec,FastText\nfrom gensim.models.word2vec import LineSentence\n\n\ndef get_out_filepaths(in_filepath, out_dir, model_name, size, n_tokens):\n \"\"\"Get output filepaths for word embeddings based on model parameters.\n \n Args:\n in_filepath (str): Filepath of input sentence lines file.\n out_dir (str): Directory to save word embeddings into.\n model_name (str): Name of the word embeddings model.\n size (int): Word embeddings vector dimension.\n n_tokens (int): Number of tokens that the model was trained on.\n \n Returns:\n tuple: Two-element tuple with output path for binary and text files.\n \"\"\"\n in_filename = os.path.splitext(os.path.basename(in_filepath))[0]\n if model_name:\n in_filename = (f'{model_name}.fi.{in_filename}'\n f'.{n_tokens / 1e6:.0f}M.{size}d')\n out_filepath = os.path.abspath(os.path.join(out_dir, in_filename))\n return f'{out_filepath}.bin',f'{out_filepath}.vec'\n\n\ndef gzip_file(filepath, remove_original=True):\n \"\"\"Gzip compress given file.\n \n Args:\n filepath (str): Path to file to be compressed.\n remove_original (bool, optional): Whether to remove the original file\n or not. Defaults to True.\n \"\"\"\n with open(filepath, 'rb') as f_in:\n with gzip.open(f'{filepath}.gz', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n if remove_original:\n os.remove(filepath)\n\n\ndef save_word_vectors(sentlines_path, out_dir, model,\n save_vec=False, compress=True):\n \"\"\"Save word vectors of a gensim model into a directory.\n \n Args:\n sentlines_path (str): Filepath of input sentence lines file.\n out_dir (str): Directory to save word embeddings into.\n model (gensim.models.*): Trained gensim model.\n save_vec (bool, optional): Whether to save vectors in text format.\n Defaults to False.\n compress (bool, optional): Whether to Gzip the output or not. Defaults\n to True.\n \"\"\"\n model_name = model.__class__.__name__.lower()\n n_tokens = model.corpus_total_words\n size = model.vector_size\n (out_binary_filepath,\n out_text_filepath) = get_out_filepaths(sentlines_path, out_dir,\n model_name, size, n_tokens)\n model.wv.save_word2vec_format(out_binary_filepath, binary=True)\n if save_vec:\n model.wv.save_word2vec_format(out_text_filepath, binary=False)\n\n if compress:\n gzip_file(out_binary_filepath)\n if save_vec:\n gzip_file(out_text_filepath)\n\n\ndef create_word2vec_embeddings(sentlines_path, out_dir, size=300):\n \"\"\"Train Word2Vec word embeddings.\n \n Args:\n sentlines_path (str): Filepath of input sentence lines file.\n out_dir (str): Directory to save word embeddings into.\n size (int, optional): Word embeddings vector dimension. Defaults to 100.\n \"\"\"\n sentences = LineSentence(sentlines_path)\n w2v = Word2Vec(\n window=5,\n size=size,\n min_count=5,\n max_vocab_size=None,\n workers=4\n )\n w2v.build_vocab(sentences, progress_per=1e6)\n w2v.train(\n sentences,\n total_examples=w2v.corpus_count,\n epochs=w2v.epochs,\n queue_factor=2\n )\n save_word_vectors(sentlines_path, out_dir, w2v)\n \n\ndef create_fasttext_embeddings(sentlines_path, out_dir, size=300):\n \"\"\"Train FastText word embeddings.\n \n Args:\n sentlines_path (str): Filepath of input sentence lines file.\n out_dir (str): Directory to save word embeddings into.\n size (int, optional): Word embeddings vector dimension. Defaults to 100.\n \"\"\"\n sentences = LineSentence(sentlines_path)\n ft = FastText(\n window=5,\n size=size,\n min_count=5,\n max_vocab_size=None,\n workers=4\n )\n ft.build_vocab(sentences, progress_per=1e6)\n ft.train(\n sentences,\n total_examples=ft.corpus_count,\n epochs=ft.epochs,\n queue_factor=2\n )\n save_word_vectors(sentlines_path, out_dir, ft)\n\n\ndef create_all_embeddings(sentlines_dir='./data/processed',\n out_dir='./data/embeddings'):\n \"\"\"Train all word embeddings based on sentence line files in a directory.\n \n Args:\n sentlines_dir (str, optional): Directory that contains sentence lines\n files to train models on. Defaults to './data/processed'.\n out_dir (str, optional): Directory to save word embeddings into.\n Defaults to './data/embeddings'.\n \"\"\"\n start_time = time.perf_counter()\n \n # Filepaths\n sentline_filepaths = glob.glob(os.path.join(sentlines_dir, '*.sl'))\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n logger.warning(f'Created directory in \"{out_dir}\"')\n\n # Train word embeddings\n for filepath in sentline_filepaths:\n logger.info(f'Creating embeddings for sentlines {filepath}...')\n\n # 300d\n create_word2vec_embeddings(filepath, out_dir, size=300)\n create_fasttext_embeddings(filepath, out_dir, size=300)\n \n logger.info(f'All done in {time.perf_counter() - start_time:.0f} seconds!')\n\n\n#if __name__ == '__main__':\n# create_all_embeddings()\n","sub_path":"embeddings/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"294805353","text":"import random, traceback, sys, os\nsys.path.append(\"..\")\nfrom util import Counter,manhattanDistance\nfrom airplane_models.airplane import baseAirplane\nfrom map_models.newMap import Map\nfrom math import sin,cos,sqrt,pi,acos\n\n# def tran(a):\n# # print('a:',a)\n# if a == str(Directions.NORTH):\n# a = [0,1,0] #y+1\n# if a == str(Directions.SOUTH):\n# a = [0,-1,0] #y-1\n# if a == str(Directions.EAST):\n# a = [1,0,0] #x-1\n# if a == str(Directions.WEST):\n# a = [-1,0,0] #x+1\n# if a == str(Directions.UP):\n# a = [0,0,1] #z+1\n# if a == str(Directions.DOWN):\n# a = [0,0,-1] #z-1\n# if a == str(Directions.STOP):\n# a = [0,0,0]\n# # print('a:',a)\n# return a\n\n\n\n\n# class Map:\n# def __init__(self):\n# # original: (30.563688,103.940061),(30.519631,103.936683)\n# # vertical: (30.563688,103.951040),(30.519631,103.936683) 0.003139\n# # vertical + 20km: (30.560549,103.951040),(30.516492,103.936683)\n# # (30.559156,103.954450)\n# # runway1 = (30.563688,103.936922)\n# # runway2 = (30.519631,103.933544)\n# runway1 = (30.560549,103.951040)\n# runway2 = (30.516492,103.936683)\n# way1 = (29.51,103.836184)\n# way2 = (29.51,104.051539)\n# self.Left= 102.28\n# self.Right= 106.22\n# self.Down = 29.51 \n# self.Up= 31.13\n# self.R = 6371000\n# self.x,self.y = self.LongLaToXY([self.Right,self.Up])\n# self.z = int((6000-1000)//1000)\n# # self.z = 2\n# self._map = [[[(4-i) for i in range(self.z)] for _ in range(self.y)] for _ in range(self.x)]\n# for i in range(self.x):\n# for j in range(self.y):\n# self._map[i][j][0] = -100\n# # print(len(self._map),len(self._map[0]),len(self._map[0][0]))\n# positionR1 = self.LongLaToXY(runway1)\n# positionR2 = self.LongLaToXY(runway2)\n# self._map[positionR1[0]][positionR1[1]][0] = 200\n# self._map[positionR2[0]][positionR2[1]][0] = 200\n# positionW1 = self.LongLaToXY(way1)\n# positionW2 = self.LongLaToXY(way2)\n# print(positionW1,positionW2)\n# self.End = [positionR1,positionR2]\n# for i in range(positionR1[0]+1):\n# self._map[i][positionR1[1]][1] = 100+2*(i-positionR1[0])\n# for i in range(positionR2[0]+1):\n# self._map[i][positionR2[1]][1] = 100+2*(i-positionR2[0])\n# for i in range(self.x):\n# self._map[i][positionW1[1]][1] = 10-0.1*i\n# self._map[i][positionW2[1]][1] = 10-0.1*i\n# # print(self._map)\n# # print('runway:',self.End)\n# # print('dis:',self.LongLaToXY(way1),self.LongLaToXY(way2))\n\n# self.plane = Counter()\n# self.planeLocation = Counter()\n# self.values = Counter()\n# self.tempValue = Counter()\n\n# def LongLaToXY(self,position):\n# C = sin(self.Left)*sin(self.Left)*cos(position[0]-self.Down)+cos(self.Left)*cos(self.Left)\n# positionX = self.R*pi/180*acos(C)\n# C = sin(self.Left)*sin(position[1])*cos(self.Down-self.Down)+cos(self.Left)*cos(position[1])\n# positionY = self.R*pi/180*acos(C)\n\n# return [int(positionX//2750+1),int(positionY//2750+1)]\n\n# def AltToZ(self,alt):\n# # print(alt)\n# return int((alt-1000)//1000+1)\n# def getStates(self):\n# states = []\n# for i in range(self.x):\n# for j in range(self.y):\n# for k in range(self.z):\n# states.append([i,j,k])\n# return states\n\n# def getPossibleActions(self,state):\n# if state == self.End:\n# return []\n# x,y,z = state\n# actions = []\n# if x > 0 : \n# actions.append(Directions.WEST)\n# if x < self.x:\n# actions.append(Directions.EAST)\n# if y > 0 :\n# actions.append(Directions.SOUTH)\n# if y < self.y:\n# actions.append(Directions.NORTH)\n# if z > 0 :\n# actions.append(Directions.DOWN)\n# if z < self.z:\n# actions.append(Directions.UP)\n# actions.append(Directions.STOP)\n# return actions\n\n# # class State():\n# # def __init__(self,x=0,y=0,z=0):\n# # self.x = x\n# # self.y = y\n# # self.z = z\n# class Directions:\n# # NORTH = 'North' #y+1\n# # SOUTH = 'South' #y-1\n# # EAST = 'East' #x-1\n# # WEST = 'West' #x+1\n# # UP = 'Up' #z+1\n# # DOWN = 'Down' #z-1\n# NORTH = [0,1,0] #y+1\n# SOUTH = [0,-1,0] #y-1\n# EAST = [1,0,0] #x-1\n# WEST = [-1,0,0] #x+1\n# UP = [0,0,1] #z+1\n# DOWN = [0,0,-1] #z-1\n# STOP = [0,0,0]\n\n\nclass QLearningAgent:\n \"\"\"\n Q-Learning Agent\n\n Functions you should fill in:\n - computeValueFromQValues\n - computeActionFromQValues\n - getQValue\n - getAction\n - update\n\n Instance variables you have access to\n - self.epsilon (exploration prob)\n - self.alpha (learning rate)\n - self.discount (discount rate)\n\n Functions you should use\n - self.getPossibleActions(state)\n which returns legal actions for a state\n \"\"\"\n def __init__(self,plane,Map = None,actionFn = None, alpha=0.5, gamma=0.95,epsilon = 0.1):\n \"You can initialize Q-values here...\"\n # if actionFn == None:\n # actionFn = lambda state: state.getPossibleActions()\n self.running = 0\n self.actionFn = actionFn\n self.plane = plane\n self.Map = Map\n # self.episodesSoFar = 0\n # self.accumTrainRewards = 0.0\n # self.accumTestRewards = 0.0\n # self.numTraining = int(numTraining)\n self.epsilon = float(epsilon)\n self.alpha = float(alpha)\n self.discount = float(gamma)\n self.values = Counter()\n self.oldValues = Counter()\n # print(plane.position,plane.altitude)\n self.X ,self.Y = self.Map.XYInDistToCoordinate(self.Map.longLaToXYInDist(plane.position))\n self.Z = int((plane.altitude-1000)//self.Map._heightResolution)\n self.kmX,self.kmY = self.Map.longLaToXYInDist(plane.position)\n self.kmZ = plane.altitude\n\n # def getPossibleActions(self,state):\n # if state == self.Map.End:\n # return []\n # x,y,z = state\n # actions = []\n # if x > 0 : \n # actions.append(Directions.WEST)\n # if x < (self.Map.x-1):\n # actions.append(Directions.EAST)\n # if y > 0 :\n # actions.append(Directions.SOUTH)\n # if y < (self.Map.y-1):\n # actions.append(Directions.NORTH)\n # if z > 0 :\n # actions.append(Directions.DOWN)\n # if z < (self.Map.z-1):\n # actions.append(Directions.UP)\n # return actions\n\n def getQValue(self, state, action):\n \"\"\"\n Returns Q(state,action)\n Should return 0.0 if we have never seen a state\n or the Q node value otherwise\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # print(self.values.keys())\n return self.oldValues[(str(state),str(action))]\n #state contains: (position_x, position_y, altitude, heading, speed]\n #action:(heading,speed, altitude)\n util.raiseNotDefined()\n\n def getReward(self,nextState):\n # if state == self.grid.terminalState:\n # return 0.0\n # print('next:',nextState)\n # ((x, y, z), (h, s))\n a = nextState\n # print('nextState:',a)\n # print('a:',a[0][0],a[0][1])\n position = self.Map.XYInDistToCoordinate([a[0][0],a[0][1]])\n z = int((a[0][2]-1000)//self.Map._heightResolution)\n # print(position,z)\n rewardS = (150-a[1][1])/4\n # print(x,y,z)\n cell = self.Map._map[position[0]][position[1]][z]\n rewardT = 0\n rewardD = -min(manhattanDistance(list(self.Map.runwayLocationCoordinate1)+[0],list(position)+[z]),manhattanDistance(list(self.Map.runwayLocationCoordinate2)+[0],list(position)+[z]))\n if (position == self.Map.runwayLocationCoordinate1) or(position == self.Map.runwayLocationCoordinate2) and (z == 0):\n if (abs(a[1][0]) < 15) and (a[1][1] < 130):\n rewardT = self.Map._map[position[0]][position[1]][0]\n # print('reward:',x,y,z,self.Map._map[x][y][z])\n return (rewardS +cell+rewardT+rewardD/(5+z/5))\n # return self.livingReward\n\n # def getNextState(self,startState,action):\n # s = startState\n # a = action\n # if type(startState) == type('a'):\n # s = list(map(int,startState[1:-1].split(', ')))\n # if type(action) == type('a'):\n # a = list(map(int,action[1:-1].split(', ')))\n\n\n # nextState = [s[0]+a[0],s[1]+a[1],s[2]+a[2]]\n # # print(nextState)\n # reward = self.getReward(nextState)\n # return nextState\n\n def computeValueFromQValues(self, state):\n \"\"\"\n Returns max_action Q(state,action)\n where the max is over legal actions. Note that if\n there are no legal actions, which is the case at the\n terminal state, you should return a value of 0.0.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # print(state)\n actions = self.Map.getPossibleActions(state)\n # actions = []\n if len(actions) != 0:\n vals = []\n for action in actions:\n nextState = self.Map.getNextState(state,action)\n a = nextState\n #print('nextState:',a)\n #print('a:',a[0][0],a[0][1])\n position = self.Map.XYInDistToCoordinate([a[0][0],a[0][1]])\n z = int((a[0][2]-1000)//self.Map._heightResolution)\n # print('computeValueFromQValues:',a,position,z)\n vals.append(self.getQValue(state,action)+self.Map._map[position[0]][position[1]][z])\n return max(vals)\n return 0.0\n util.raiseNotDefined()\n\n def computeActionFromQValues(self, state):\n \"\"\"\n Compute the best action to take in a state. Note that if there\n are no legal actions, which is the case at the terminal state,\n you should return None.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n MainDict = self.values.keys()\n Action = []\n Max = []\n nextStates = []\n actions = self.Map.getPossibleActions(state)\n # print('actions:',actions)\n for action in actions:\n nextState = self.Map.getNextState(state,action)\n reward = self.getReward(nextState)\n self.update(state,action,nextState,reward)\n a = nextState\n position = self.Map.XYInDistToCoordinate([a[0][0],a[0][1]])\n z = int((a[0][2]-1000)//self.Map._heightResolution)\n # print('nextState:',position,z,nextState)\n # print(self.Map.getPossibleActions(nextState))\n if self.Map.getPossibleActions(nextState) != []:\n Action.append(action)\n nextStates.append([nextState,self.Map._map[position[0]][position[1]][z]])\n Max.append(self.getQValue(state,action)+self.Map._map[position[0]][position[1]][z])\n if self.running == 1:\n print('action:',Action)\n print('Max:',Max)\n self.running = 0\n\n # for i in MainDict:\n # # print(i[0],str(state))\n # if i[0] == str(state):\n # # print(i)\n # # print('aaaa:',i[0],i[1])\n # nextState = self.Map.getNextState(eval(i[0]),eval(i[1]))\n # a = nextState\n # position = self.Map.XYInDistToCoordinate([a[0][0],a[0][1]])\n # z = int((a[0][2]-1000)//self.Map._heightResolution)\n # # print('nextState:',position,z,nextState)\n # # print(self.Map.getPossibleActions(nextState))\n # if self.Map.getPossibleActions(nextState) != []:\n # Action.append(b)\n # nextStates.append([nextState,self.Map._map[position[0]][position[1]][z]])\n # Max.append(self.getQValue(i[0],i[1])+self.Map._map[position[0]][position[1]][z])\n\n\n # return random.choice(Action)\n # print(nextStates,)\n # print(Max)\n if Max == []:\n actions = self.Map.getPossibleActions(state)\n if actions == []:\n return None\n return random.choice(actions)\n # if random.random() < self.epsilon:\n # return random.choice(Action)\n # else:\n return Action[Max.index(max(Max))]\n util.raiseNotDefined()\n\n # return action\n\n def update(self, state, action, nextState, reward):\n \"\"\"\n The parent class calls this to observe a\n state = action => nextState and reward transition.\n You should do your Q-Value update here\n\n NOTE: You should never call this function,\n it will be called on your behalf\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # print(nextState)\n newSample = reward+ self.discount*self.computeValueFromQValues(nextState)\n self.values[(str(state),str(action))] = (1-self.alpha)*self.oldValues[(str(state),str(action))] + (self.alpha*newSample)\n return newSample\n util.raiseNotDefined()\n\n # def getPolicy(self, state):\n # return self.computeActionFromQValues(state)\n\n # def getValue(self, state):\n # return self.computeValueFromQValues(state)\n\ndef qlearningTest():\n print(\"Testing class qlearning\")\n test = baseAirplane(flightType=\"A330\", flight=\"Ca1999\",\n registration=\"b-6878\", depature=\"PVG\", destination=\"ctu\")\n agent = QLearningAgent(test)\n space = Map()\n # print(space.x,space.y,space.z)\n agent.Map = space\n # print('End:',space._map[1][1][1])\n # print(agent.Map.getStates())\n states = filter(lambda state : len(space.getPossibleActions(state)) > 0,space.getStates())\n states.sort()\n # exit()\n randObj = random.sample(states, 1)\n print(randObj)\n # print(len(space._map))\n lastExperience = None\n space.plane[agent.plane.flight] = agent\n # End = [1,1,1]\n # count = 0\n for _ in range(1000):\n #add all planes position to the map\n # for key in space.plane.keys():\n # s = space.plane[key].state\n # space._map[s[0]][s[1]][s[2]] = -100\n startState = random.choice(states)\n # print(startState)\n action = random.choice(agent.getPossibleActions(startState))\n endState = agent.getNextState(startState,action)\n reward = agent.getReward(endState)\n lastExperience = (startState, action, endState, reward)\n agent.update(*lastExperience)\n # print(space._map)\n # print(agent.values)\n # print(space._map[12][29][3])\n # for i in agent.values.keys():\n # if i[0] == str([1,1,0]):\n # print(i,agent.values[i])\n # for _ in range(10): \n # # for each plane find next step \n # for key in space.plane.keys():\n # currentAgent = space.plane[key]\n # s = currentAgent.state\n # #remove self from the map\n # if s != space.End:\n # space._map[s[0]][s[1]][s[2]] = 0\n # else:\n # space._map[s[0]][s[1]][s[2]] = 100\n # space.plane[key]\n # a = tran(currentAgent.computeActionFromQValues(s))\n # # print(s+a)\n # nextState = [s[0]+a[0],s[1]+a[1],s[2]+a[2]]\n # currentAgent.update(s,a,nextState,1)\n # currentAgent.state = nextState\n # print(\"current:\",currentAgent.state)\n\n # print(agent.plane.flight)\n # print(test.altitude)\n\n\nif __name__ == \"__main__\":\n # Map = approachControlArea(29.51,31.13,106.22,102.28,5500,5500,2)\n \n\n # C = sin(MLatA)*sin(MLatB)*cos(MLonA-MLonB) + cos(MLatA)*cos(MLatB)\n # DistanceY = R*math.acos(C)*Pi/180\n # print('DistanceY:',DistanceY)\n\n # [[(30.563688,103.940061),(30.593320,103.953838)],[(30.519631,103.936683),(30.549429,103.950609)]]\n # (30.563688,103.940061),(30.519631,103.936683)\n # (30.563688,103.936922),(30.519631,103.933544)\n # Point = [(30.563688,103.936922),(30.519631,103.933544)]\n # x1 = abs(Point[0][0] - Map._leftMost) * cos(Point[0][0]) * 6371000\n # print('Map:',Map._length,Map.height,Map.width)\n qlearningTest()\n","sub_path":"3/QlearningAgent/qlearningAgent.py","file_name":"qlearningAgent.py","file_ext":"py","file_size_in_byte":16493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"3664314","text":"from __future__ import print_function\n\nimport os\nimport numpy as np\nimport nibabel as nib\nfrom nilearn.image import resample_img\nimport nilearn\nfrom nilearn import image\nfrom nilearn import plotting\n# from nibabel import show_slices\n\nfrom keras.utils import np_utils\n\nimport matplotlib.pyplot as plt\n\n\nAFFINE_CONST = 4\n\nimport shutil\n\ndef load_dataset_file(image_folder):\n folderlist = os.listdir(image_folder)\n # Sort List\n folderlist = sorted(folderlist)\n\n # Count number of files in folder\n nbrOfImages = 0\n for i in xrange(0, len(folderlist)):\n nbrOfImages += len(os.listdir(image_folder + \"/\" + folderlist[i]))\n print(\"Nbr of images: \", nbrOfImages)\n\n x_set = []\n y_set = []\n\n for i in xrange(0, len(folderlist)):\n # List of strings with names of images in subfolder\n subject_list = os.listdir(image_folder + \"/\" + folderlist[i])\n subject_list = sorted(subject_list)\n for j in xrange(0, len(subject_list)):\n image_name = subject_list[j] + '.nii.gz'\n x_set.append(image_folder + '/' + folderlist[i] + '/' + subject_list[j] + '/' + image_name)\n y_set.append(i)\n\n return x_set, y_set\n\n\ndef save_image(img_fold, img_files):\n for i in xrange(0, len(img_files)):\n # get file name\n img_name = img_files[i].split(\"/\")[-1]\n img_file_new = img_fold + str(i) + '_' + img_name\n shutil.copy2(img_files[i], img_file_new)\n\n\ndef cal_statistics(img_files):\n MAX_VALUE = 0\n MIN_VALUE = 100000\n MEAN_VALUE = 0\n\n # load the data\n for i in xrange(0, len(img_files)):\n # Load image from subfolder\n epi_img = nib.load(img_files[i])\n epi_img_data = epi_img.get_data()\n\n tmp = np.max(epi_img_data)\n if tmp > MAX_VALUE:\n MAX_VALUE = tmp\n\n tmp_min = np.min(epi_img_data)\n if MIN_VALUE > tmp_min:\n MIN_VALUE = tmp_min\n\n tmp = np.mean(epi_img_data)\n MEAN_VALUE += tmp\n\n # print(epi_img_data.shape)\n\n MEAN_VALUE = MEAN_VALUE / len(img_files)\n return MAX_VALUE, MIN_VALUE, MEAN_VALUE\n\n\ndef show_slices(slices):\n \"\"\" Function to display row of image slices \"\"\"\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")\n\n\ndef show_center(img_data):\n if isinstance(img_data, str):\n img = nib.load(img_data)\n img_data = img.get_data()\n\n # show the center slices for MRI image\n n_i, n_j, n_k = img_data.shape\n center_i = (n_i - 1) / 2\n center_j = (n_j - 1) / 2\n center_k = (n_k - 1) / 2\n\n slice_0 = img_data[center_i, :, :]\n slice_1 = img_data[:, center_j, :]\n slice_2 = img_data[:, :, center_k]\n\n show_slices([slice_0, slice_1, slice_2])\n plt.suptitle(\"Center slices for ADNI image\")\n\n\ndef test(img_files, image_size, depth):\n nbrOfImages = len(img_files)\n for i in xrange(0, len(img_files)):\n # Load image from subfolder\n epi_img = nib.load(img_files[i])\n\n epi_img_data = epi_img.get_data()\n epi_img_data = epi_img_data[:, :, :, 0]\n\n # affine\n print(epi_img.affine)\n epi_vox_center = (np.array(epi_img_data.shape) - 1) / 2.\n print(epi_img.affine.dot(list(epi_vox_center) + [1]))\n\n\ndef cal_cube_size(img_file, portion_i, portion_j, portion_k):\n epi_img = nib.load(img_file)\n\n epi_img_data = epi_img.get_data()\n # epi_img_data = epi_img_data[:, :, :, 0]\n num_i, num_j, num_k = epi_img_data.shape\n\n size_i = num_i / portion_i\n size_j = num_j / portion_j\n size_k = num_k / portion_k\n\n return size_i, size_j, size_k\n\n\ndef preprocess(nb_classes, img_files, target, portion_i, portion_j, portion_k, MIN_VALUE, MAX_VALUE, MEAN_VALUE,\n is_scale=True, is_mean=True, is_shuffle=True):\n nbrOfImages = len(img_files)\n\n portion_num = portion_i * portion_j * portion_k\n\n size_i, size_j, size_k = cal_cube_size(img_files[0], portion_i, portion_j, portion_k)\n\n X_set = np.zeros(shape=(nbrOfImages * portion_num, 1, size_i, size_j, size_k), dtype=np.float32)\n y_set = np.zeros(shape=(nbrOfImages * portion_num))\n\n for i in xrange(0, len(img_files)):\n # Load image from subfolder\n epi_img = nib.load(img_files[i])\n\n epi_img_data = epi_img.get_data()\n # epi_img_data = epi_img_data[:, :, :, 0]\n\n tmp = np.max(epi_img_data)\n if tmp > MAX_VALUE:\n MAX_VALUE = tmp\n\n tmp_min = np.min(epi_img_data)\n if MIN_VALUE > tmp_min:\n MIN_VALUE = tmp_min\n\n # split the data from the slice\n index_temp = 0\n for index_i in xrange(0, portion_i):\n for index_j in xrange(0, portion_j):\n for index_k in xrange(0, portion_k):\n # Store image in array\n data_index = i * portion_num + index_temp\n data = epi_img_data[(index_i * size_i):((index_i+1) * size_i),\n (index_j * size_j):((index_j + 1) * size_j),\n (index_k * size_k):((index_k + 1) * size_k)]\n if is_scale:\n data = (data - data.min()) / data.max()\n X_set[data_index, 0, :, :, :] = data\n y_set[data_index] = target[i]\n # show_center(data)\n index_temp += 1\n # if is_mean:\n # X_set = X_set - MEAN_VALUE\n # if is_scale:\n # X_set = (X_set - X_set.min()) / X_set.max()\n # X_set = (X_set - MIN_VALUE) / (MAX_VALUE - MIN_VALUE)\n\n Y_set = np_utils.to_categorical(y_set, nb_classes)\n\n if is_shuffle:\n # Shuffle images and labels\n indices = np.arange(len(y_set))\n np.random.shuffle(indices)\n excerpt = indices[0:len(y_set)]\n else:\n excerpt = np.arange(len(y_set))\n\n return X_set[excerpt], Y_set[excerpt]","sub_path":"code/python/ADNI1_mni/get_data_ad.py","file_name":"get_data_ad.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"96375295","text":"'''\nDetermine whether a doubly linked list is a palindrome. What if it’s singly linked?\n\nFor example, 1 -> 4 -> 3 -> 4 -> 1 returns True while 1 -> 4 returns False.\n'''\n\n\ndef checkPal(inputList):\n\tif inputList == inputList[::-1]:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\nsampleOne = [1, 4, 3, 4, 1]\nsampleTwo = [1, 4]\n\nprint(checkPal(sampleOne))\nprint(checkPal(sampleTwo))","sub_path":"day104_palindromicLists/day104.py","file_name":"day104.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"135420252","text":"from sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix, classification_report\nimport matplotlib.pyplot as plt\n'''\n Runs the Logistic Regression Classifier\n'''\n\n\nclass Logistic_Regression:\n def __init__(self, file_name, X_train, y_train, X_test, y_test):\n self.f = open(file_name, 'w')\n self.Logistic_Classifier_3_positive(X_train, y_train, X_test, y_test)\n self.Logistic_Classifier_3_negative(X_train, y_train, X_test, y_test)\n\n def Logistic_Classifier_3_positive(self, X_train, y_train, X_test, y_test):\n '''\n Make binary classification of positive or negative\n Considering star rating 3 as positive\n Logistic Regression Classifier\n '''\n clf = LogisticRegression(\n random_state=0,\n solver='lbfgs',\n multi_class='multinomial',\n max_iter=100)\n # Change the ratings to\n new_y_train1 = self.process_Y1(y_train)\n new_y_test1 = self.process_Y1(y_test)\n clf.fit(X_train, new_y_train1)\n y_pred = clf.predict(X_test)\n self.write_to_file(\"Star Rating 3 considered as positive\")\n self.write_to_file(\"\\n\")\n self.write_to_file('Logistic Regression Accuracy: %.2f' %\n accuracy_score(new_y_test1, y_pred))\n self.write_to_file(\"\\n\")\n self.write_to_file('Classification Report:')\n self.write_to_file(classification_report(new_y_test1, y_pred))\n\n def process_Y1(self, y_):\n '''\n Considering 3 as positive\n '''\n newY = []\n for i in range(len(y_)):\n if float(y_[i]) < 3.0: newY.append(-1)\n else: newY.append(1)\n return newY\n\n def Logistic_Classifier_3_negative(self, X_train, y_train, X_test, y_test):\n '''\n Make binary classification of positive or negative\n Considering star rating 3 as positive\n Run the Logistic Regression and Print Accuracy\n '''\n clf = LogisticRegression(\n random_state=0,\n solver='lbfgs',\n multi_class='multinomial',\n max_iter=100)\n new_y_train2 = self.process_Y2(y_train)\n new_y_test2 = self.process_Y2(y_test)\n clf.fit(X_train, new_y_train2)\n y_pred = clf.predict(X_test)\n self.write_to_file(\"\\n\")\n self.write_to_file(\"Star Rating 3 considered as negative\")\n self.write_to_file(\"\\n\")\n self.write_to_file('Logistic Regression Accuracy: %.2f' %\n accuracy_score(new_y_test2, y_pred))\n self.write_to_file(\"\\n\")\n self.write_to_file('Classification Report:')\n self.write_to_file(classification_report(new_y_test2, y_pred))\n\n def process_Y2(self, y_):\n '''\n Considering 3 as negative\n '''\n newY = []\n for i in range(len(y_)):\n if float(y_[i]) <= 3.0: newY.append(-1)\n else: newY.append(1)\n return newY\n\n def write_to_file(self, text):\n self.f.writelines(text)\n\n\ndef run_various_classifiers(X_train, y_train, X_test, y_test):\n solvers = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']\n indx = 1\n for sol in solvers:\n clf = LogisticRegression(\n random_state=0,\n solver=sol,\n multi_class='multinomial',\n max_iter=100)\n # Change the ratings to\n new_y_train1 = process_Y2(y_train)\n new_y_test1 = process_Y2(y_test)\n clf.fit(X_train, new_y_train1)\n y_pred = clf.predict(X_test)\n plt.plot(indx, accuracy_score(new_y_test1, y_pred))\n indx += 1\n plt.title(\"Logistic Regression Solvers and their accuracies\")\n plt.xlabel(\"Solvers\")\n plt.ylabel(\"Accuracy\")\n plt.xticks(solvers)\n plt.show()\n\n\ndef process_Y2(y_):\n '''\n Considering 3 as negative\n '''\n newY = []\n for i in range(len(y_)):\n if float(y_[i]) <= 3.0: newY.append(-1)\n else: newY.append(1)\n return newY\n","sub_path":"src/classifiers/classifier_logisticregression.py","file_name":"classifier_logisticregression.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"304167759","text":"###################################\n## Driftwood 2D Game Dev. Suite ##\n## pathmanager.py ##\n## Copyright 2014 PariahSoft LLC ##\n###################################\n\n## **********\n## Permission is hereby granted, free of charge, to any person obtaining a copy\n## of this software and associated documentation files (the \"Software\"), to\n## deal in the Software without restriction, including without limitation the\n## rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n## sell copies of the Software, and to permit persons to whom the Software is\n## furnished to do so, subject to the following conditions:\n##\n## The above copyright notice and this permission notice shall be included in\n## all copies or substantial portions of the Software.\n##\n## THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n## IN THE SOFTWARE.\n## **********\n\nimport os\nimport zipfile\n\n\nclass PathManager:\n \"\"\"The Path Manager\n\n Simple path abstraction class which maintains a list of data pathnames and a simple virtual filesystem for files\n therein. The class supports directories and zip archives as valid pathnames.\n\n The last item on the path has the highest priority; if a file exists in multiple pathnames, the last occurence is\n the only one recorded in the virtual filesystem.\n\n Attributes:\n driftwood: Base class instance.\n \"\"\"\n\n def __init__(self, driftwood):\n \"\"\"PathManager class initializer.\n\n Args:\n driftwood: Base class instance.\n \"\"\"\n self.driftwood = driftwood\n\n self.__vfs = {}\n\n self.__root = self.driftwood.config[\"path\"][\"root\"] # Path root.\n self.__path = [self.driftwood.config[\"path\"][\"self\"]] # Start with base module.\n\n if self.driftwood.config[\"path\"][\"path\"]:\n # Start with the configured path.\n self.append(self.driftwood.config[\"path\"][\"path\"])\n\n else:\n self.rebuild()\n\n def __contains__(self, item):\n if self.find(item):\n return True\n return False\n\n def __getitem__(self, item):\n if self.__contains__(item):\n return self.find(item)\n\n def examine(self, pathname):\n \"\"\"Examine a directory or zip archive pathname and return the list of filenames therein.\n\n Args:\n pathname: Pathname to examine.\n\n Returns:\n Tuple of files inside the pathname.\n \"\"\"\n filelist = []\n\n try:\n # This is a directory.\n if os.path.isdir(pathname):\n for root, dirs, files in os.walk(pathname):\n for name in files:\n filelist.append(name)\n\n # This is hopefully a zip archive.\n else:\n with zipfile.ZipFile(pathname, 'r') as zf:\n for name in zf.namelist():\n filelist.append(name)\n\n except:\n self.driftwood.log.msg(\"ERROR\", \"Path\", \"could not examine pathname\", pathname)\n\n return tuple(filelist)\n\n def rebuild(self):\n \"\"\"Rebuild the vfs.\n\n Rebuild the virtual filesystem from the path list, and make sure the base module is at the top.\n \"\"\"\n basepath = self.driftwood.config[\"path\"][\"self\"]\n\n # If the base module is missing, put it back at the top.\n if self.__path[0] != basepath:\n if basepath in self.__path:\n self.__path.remove(basepath)\n self.__path.insert(0, basepath)\n\n # Scan all pathnames for the files they contain and rebuild the vfs.\n for pathname in self.__path:\n filelist = self.examine(pathname)\n for name in filelist:\n self.__vfs[name] = pathname\n\n self.driftwood.log.info(\"Path\", \"rebuilt\")\n\n def prepend(self, pathnames):\n \"\"\"Prepend pathnames to the path list.\n\n Prepend additional pathnames to the path list, preserving their order. If any of the pathnames already exist,\n their priority is adjusted for their new position.\n\n Args:\n pathnames: List of pathnames to prepend.\n \"\"\"\n if not pathnames:\n return\n pathnames = list(pathnames)\n\n for i in range(len(pathnames)):\n # Jail the pathname to root.\n pathnames[i] = os.path.join(self.__root, pathnames[i])\n\n # Remove duplicates so they can be added back in the new order.\n if pathnames[i] in self.__path:\n self.__path.remove(pathnames[i])\n\n # Prepend.\n pathnames.extend(self.__path)\n self.__path = pathnames\n\n self.driftwood.log.info(\"Path\", \"prepended\", \", \".join(pathnames))\n\n self.rebuild()\n\n def append(self, pathnames):\n \"\"\"Append pathnames to the path list.\n\n Append additional pathnames to the path list, preserving their order. If any of the pathnames already exist,\n their priority is adjusted for their new position.\n\n Args:\n pathnames: List of pathnames to append.\n \"\"\"\n if not pathnames:\n return\n pathnames = list(pathnames)\n\n for i in range(len(pathnames)):\n # Jail the pathname to root.\n pathnames[i] = os.path.join(self.__root, pathnames[i])\n\n # Remove duplicates so they can be added back in the new order.\n if pathnames[i] in self.__path:\n self.__path.remove(pathnames[i])\n\n # Append.\n self.__path.extend(pathnames)\n\n self.driftwood.log.info(\"Path\", \"appended\", \", \".join(pathnames))\n\n self.rebuild()\n\n def remove(self, pathnames):\n \"\"\"Remove pathnames from the path list if present.\n\n Args:\n pathnames: List of pathnames to remove.\n \"\"\"\n if not pathnames:\n return\n pathnames = list(pathnames)\n\n for pn in pathnames:\n # Search in root where pathnames are jailed.\n pn = os.path.join(self.__root, pn)\n\n # Remove.\n if pn in self.__path:\n self.__path.remove(pn)\n\n self.driftwood.log.info(\"Path\", \"removed\", \", \".join(pathnames))\n\n self.rebuild()\n\n def find(self, filename, pathname=None):\n \"\"\"Find a filename's pathname.\n\n Return the pathname which owns the filename, if present. If pathname is set, check that specific pathname for\n existence of the file instead of checking the path list.\n\n Args:\n filename: The filename whose pathname to find.\n pathname: (optional) Check only this pathname.\n\n Returns:\n The pathname which owns the filename, if any.\n \"\"\"\n if pathname:\n if filename in self.examine(pathname):\n return pathname\n elif filename in self.__vfs:\n return self.__vfs[filename]\n","sub_path":"src/pathmanager.py","file_name":"pathmanager.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"512146081","text":"from django.conf.urls import url\nfrom . import views \n\n\nurlpatterns = [\n\turl(r'^$', views.index),\n\turl(r'^add$', views.add),\n\turl(r'^submit$', views.submit),\n\turl(r'^display/(?P\\d+)$', views.display),\n\turl(r'^addfriend', views.addfriend),\n\turl(r'^addcondolence', views.addcondolence),\n\turl(r'^addstory', views.addstory),\n\turl(r'^addimage', views.addimage),\n\turl(r'^edit/(?P\\d+)$', views.edit),\n\turl(r'^deletestory$', views.deletestory),\n\turl(r'^deleteimage$', views.deleteimage),\n\turl(r'^deletefriend$', views.deletefriend),\n\turl(r'^deletecondolence$', views.deletecondolence),\n\turl(r'^deletecondolence$', views.deletecondolence),\n\turl(r'^editpage$', views.editpage),\n\turl(r'^slideshow$', views.displayslideshow),\n]\n","sub_path":"apps/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"395125626","text":"import subprocess\nimport os\nimport json\n\n#---------------------------------Global resources-----------------------------#\nhost_ip='130.238.29.42'\nhost_port='5000'\nproblems_available=['problem1a','problem1b','problem1c','problem1b_II','all_problems']\n\n#Variable arguments, used to build argument line\nfirst_s = 'first_s='\nsecond_s = 'second_s='\nthird_s = 'third_s='\nK = 'k='\nT = 't='\nr = 'r='\nsig = 'sig='\nb_mul = 'b_mul='\nfirst_u = 'first_u='\nsecond_u = 'second_u='\nthird_u = 'third_u='\n\n#Json data\ndata_sets = ['S_1','S_2','S_3','K','T','r','sig','b_mul','U_1','U_2','U_3']\nproblem_sets = ['problem1a','problem1b','problem1c','problem1b_II']\nf = open('parameters.json')\ndata = json.load(f)\n\n#---------------------------------Script body----------------------------------#\n#List and choose problem(s)\nfor i in range(len(problems_available)):\n\tprint(str(i)+' '+problems_available[i])\n\nproblem = problems_available[int(input('choose problem '))]\n\n#Modify parameters. NOTE: Does not affect stored data\nif input('use stored parameters (yes/no)? ').lower() == 'no':\n\tprint('Input values for each parameter, leave blank to use stored value')\n\n\tif (problem == 'all_problems'):\n\t\tfor prob in problem_sets:\n\t\t\tprint(prob)\n\t\t\tfor elem in data_sets:\n\t\t\t\ttmp = input(elem +':'+str(data[prob][elem]) + '\\t new value: ')\n\t\t\t\tif (tmp != ''):\n\t\t\t\t\tdata[prob][elem] = int(tmp)\n\n\telse:\n\t\tfor elem in data_sets:\n\t\t\ttmp = input(elem +':'+str(data[problem][elem]) + '\\t new value: ')\n\t\t\tif (tmp != ''):\n\t\t\t\tdata[problem][elem] = int(tmp)\n\nelse:\n\tprint('Using stored parameters')\n\n#Extract data to argument fields\nif (problem == 'all_problems'):\n\tfor elem in problem_sets:\n\t first_s += str(data[elem]['S_1']) + ','\n\t second_s += str(data[elem]['S_2']) + ','\n\t third_s += str(data[elem]['S_3']) + ','\n \tK += str(data[elem]['K']) + ','\n \tT += str(data[elem]['T']) + ','\n \tr += str(data[elem]['r']) + ','\n \tsig += str(data[elem]['sig']) + ','\n \t\tb_mul += str(data[elem]['b_mul']) + ','\n \tfirst_u += str(data[elem]['U_1']) + ','\n \tsecond_u += str(data[elem]['U_2']) + ','\n \tthird_u += str(data[elem]['U_3']) + ','\n\nelse:\n\tfirst_s += str(data[problem]['S_1'])\n\tsecond_s += str(data[problem]['S_2'])\n\tthird_s += str(data[problem]['S_3'])\n\tK += str(data[problem]['K'])\n\tT += str(data[problem]['T'])\n\tr += str(data[problem]['r'])\n\tsig += str(data[problem]['sig'])\n\tb_mul += str(data[problem]['b_mul'])\n\tfirst_u += str(data[problem]['U_1'])\n\tsecond_u += str(data[problem]['U_2'])\n\tthird_u += str(data[problem]['U_3'])\n\n \n#Build argument string to be used by curl\nargs = '?' + first_s+'&' + second_s+'&' + third_s+'&' + K+'&' + T+'&' + r+'&' + sig+'&' + b_mul+'&' + first_u+'&' + second_u+'&' + third_u.rstrip(',')+'\"'\n#Build complete curl command\ntarget = 'curl -i \"http://' + host_ip + ':' + host_port + '/' + problem + args.replace(',&' , '&') + ' > ' + problem + '.txt'\n\nprint(target)\nsubprocess.call(target, shell=True)\n\n","sub_path":"userscript/userscript.py","file_name":"userscript.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"213332225","text":"# -*- coding: utf8 -*-\n\n\"\"\"\nEines per respondre les peticions de SISAP-Rev.\n\"\"\"\n\nfrom sidiap.config import RAMIFY_REDIS_INST, FREEZE_DB, TOOLS_DB, INPUT_DATA, \\\n RAMIFY_POB, RAMIFY_METRICS, RAMIFY_REDIS_CHAR\nimport sisaptools as u\n\n\nclass RevPatient(object):\n \"\"\"Retorna tota la informació d'un pacient.\"\"\"\n\n def __init__(self, key):\n \"\"\"Inicialització i execució seqüencial.\"\"\"\n self.key = key\n self.get_patient()\n self.get_param()\n self.get_cataleg()\n self.get_titles()\n self.get_data()\n self.get_result()\n\n def get_patient(self):\n \"\"\"ID del pacient.\"\"\"\n try:\n self.id = int(self.key)\n except ValueError:\n ids = RAMIFY_REDIS_INST['H'].smembers(self.key)\n self.id = int(ids.pop()) if len(ids) == 1 else None\n\n def get_param(self):\n \"\"\"Paràmetres a retornar a l'aplicació.\"\"\"\n sql = \"select date_format(data_ext, '%Y-%m-%d') from dextraccio\"\n self.param = [\n 'ID: {}'.format(self.id),\n 'Font: SIDIAP-RAM',\n 'Data: {}'.format(u.Database(*FREEZE_DB).get_one(sql)[0])\n ]\n\n def get_cataleg(self):\n \"\"\"Capturar catàleg.\"\"\"\n sql = 'select cataleg, codi, descripcio from ds_codis'\n self.cataleg = {(str(cat), cod): des for (cat, cod, des)\n in u.Database(*TOOLS_DB).get_all(sql)}\n\n def get_titles(self):\n \"\"\"Títols de columnes.\"\"\"\n self.titles = {}\n for domini, dades in INPUT_DATA['dominis'].items():\n if domini == RAMIFY_POB:\n self.titles[domini] = [field.strip().split(' ')[0]\n for field\n in dades['sql_d'][2:]]\n else:\n fields = ['cod', 'des', 'dat']\n if 'val' in dades['fields']:\n fields.append('val')\n if 'val_text' in dades:\n fields.append('val_txt')\n if 'attrs' in dades['fields']:\n for attr in dades['fields']['attrs']:\n fields.append(attr[0])\n fields.append('mark')\n self.titles[domini] = fields\n\n def get_data(self):\n \"\"\"Capturar dades de Redis.\"\"\"\n self.data = {}\n sep = RAMIFY_REDIS_CHAR['SEP']\n end = RAMIFY_REDIS_CHAR['END']\n if self.id:\n cluster = self.id % RAMIFY_METRICS['REDIS_INSTANCES']\n data = RAMIFY_REDIS_INST[cluster].hgetall(self.id)\n for domini, dades in data.items():\n if domini == RAMIFY_POB:\n self.data[domini] = [reg.split(sep)[2:]\n for reg in dades.split(end)[:-1]]\n else:\n cat = INPUT_DATA['dominis'][domini]['cataleg']\n self.data[domini] = [\n [reg.split(sep)[1]] +\n [self.cataleg[cat, reg.split(sep)[1]]] +\n reg.split(sep)[2:]\n for reg in dades.split(end)[:-1]]\n\n def get_result(self):\n \"\"\"Resultat en pestanyes.\"\"\"\n data = []\n for pestanya in INPUT_DATA['pestanyes']:\n data.append([pestanya['title'],\n [[domini,\n INPUT_DATA['dominis'][domini]['domini'],\n None,\n None,\n self.titles[domini],\n self.data[domini] if domini in self.data else []]\n for domini in pestanya['info']]])\n self.result = [{'param': self.param, 'data': data},\n \"fi_de_la_transmissio\"]\n","sub_path":"sidiap/service/rev.py","file_name":"rev.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"554266599","text":"#!/usr/bin/python -tt\r\n# Copyright 2010 Google Inc.\r\n# Licensed under the Apache License, Version 2.0\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n\"\"\"Wordcount exercise\r\nGoogle's Python class\r\nThe main() below is already defined and complete. It calls print_words()\r\nand print_top() functions which you write.\r\n1. For the --count flag, implement a print_words(filename) function that counts\r\nhow often each word appears in the text and prints:\r\nword1 count1\r\nword2 count2\r\n...\r\nPrint the above list in order sorted by word (python will sort punctuation to\r\ncome before letters -- that's fine). Store all the words as lowercase,\r\nso 'The' and 'the' count as the same word.\r\n2. For the --topcount flag, implement a print_top(filename) which is similar\r\nto print_words() but which prints just the top 20 most common words sorted\r\nso the most common word is first, then the next most common, and so on.\r\nUse str.split() (no arguments) to split on all whitespace.\r\nWorkflow: don't build the whole program at once. Get it to an intermediate\r\nmilestone and print your data structure and sys.exit(0).\r\nWhen that's working, try for the next milestone.\r\nOptional: define a helper function to avoid code duplication inside\r\nprint_words() and print_top().\r\n\"\"\"\r\ndef helper(fname):\r\n text=open(fname,'r')\r\n a=text.read().lower()\r\n for i in range(len(a)):\r\n if(a[i].isalnum()==False):\r\n if(a[i]!=' ' and a[i]!='\\n'):\r\n a=list(a)\r\n a[i]='~'\r\n a=''.join(a)\r\n a=list(a) \r\n while(a.count('~')!=0):\r\n a.remove('~')\r\n a=''.join(a)\r\n a=a.split()\r\n d={}\r\n for i in a:\r\n if i not in d:\r\n d[i]=1\r\n else:\r\n d[i]=d[i]+1\r\n return d\r\n\r\n\r\n\r\ndef print_words(filename):\r\n txt=helper(filename)\r\n for key in sorted(txt):\r\n print(key,' ',txt[key])\r\n\r\n\r\ndef print_top(filename):\r\n txt=helper(filename)\r\n key=sorted(txt,reverse=True,key=txt.__getitem__)\r\n for i in range(20):\r\n print(key[i],' ',txt[key[i]])\r\n \r\n \r\nimport sys\r\n\r\n# +++your code here+++\r\n# Define print_words(filename) and print_top(filename) functions.\r\n# You could write a helper utility function that reads a file\r\n# and builds and returns a word/count dict for it.\r\n# Then print_words() and print_top() can just call the utility function.\r\n\r\n###\r\n\r\n# This basic command line argument parsing code is provided and\r\n# calls the print_words() and print_top() functions which you must define.\r\ndef main():\r\n if len(sys.argv) != 3:\r\n print('usage: ./wordcount.py {--count | --topcount} file')\r\n sys.exit(1)\r\n\r\n option = sys.argv[1]\r\n filename = sys.argv[2]\r\n if option == '--count':\r\n print_words(filename)\r\n elif option == '--topcount':\r\n print_top(filename)\r\n else:\r\n print('unknown option: ' + option)\r\n sys.exit(1)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"week3/wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"118284382","text":"\"\"\"\n@author: jamielu\n\"\"\"\n\nimport os\nimport glob\nimport pandas as pd\nos.chdir(\"/Users/yourfilepathdirectory\")\n\nextension = 'csv'\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n\n\n#combine all csv files in the list\ncombined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])\n#export to csv\ncombined_csv.to_csv( \"combined_file.csv\", index=False, encoding='utf-8-sig')\n\n# Load the Pandas libraries with alias 'pd' \nimport pandas as pd \n\n# Read data from file 'combined_file.csv' \n# (in the same directory that your python process is based)\n\ndata = pd.read_csv(\"combined_file.csv\") \n\n# Preview the first 5 lines of the loaded data \ndata.head()\n\n#transform svy21 to wgs84 coordinates format\nfrom pyproj import Proj, transform\n\ninProj = Proj(init='epsg:3414')\noutProj = Proj(init='epsg:4326')\n\n# convert coordinates subset dataframe to array (arrays are fastest)\nyarray = data['y'].values\nxarray = data['x'].values\n\n# syntax pyproj.transform <- x2, y2, z2 = transform(p1, p2, x1, y1, z1)\n\nlon,lat = transform(inProj,outProj,xarray, yarray)\n\nprint(lon,lat)\n\n\n#convert lat (y) lon(x) array to df and add it back to original df\ndata['lat'] = pd.DataFrame(lat)\ndata['lon'] = pd.DataFrame(lon)\n\n#convert data df to csv file, removing the index number\ndata.to_csv('latlon_data.csv',index=False)","sub_path":"convert_svy21_to_wgs84_github.py","file_name":"convert_svy21_to_wgs84_github.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"61743084","text":"\"\"\"\n @author Victor I. Afolabi\n A.I. engineer/researcher & Software engineer\n javafolabi@gmail.com\n \n Created on 05 January, 2018 @ 3:09 PM.\n \n Copyright © 2018. Victor. All rights reserved.\n\"\"\"\n\nimport numpy as np\nfrom keras.layers import Conv2D, Dense, Flatten\nfrom keras.layers import InputLayer\nfrom keras.layers import Reshape, MaxPooling2D\nfrom keras.models import Sequential\n\nfrom models.neural_network.base import NeuralNetwork\n\n\nclass CNN(NeuralNetwork):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._model = Sequential()\n\n # keyword arguments\n self.kernel_size = kwargs.get('kernel_size', 5)\n self.filters = kwargs.get('filters', [16, 32])\n self.dense = kwargs.get('dense', [512, 1024])\n\n # Input parameters\n self._img_size = self._dataset.size\n self._img_channel = self._dataset.channel\n self._img_shape = [self._img_size, self._img_size, self._img_channel]\n self._img_size_flat = self._img_size * self._img_size * self._img_channel\n # self._img_size_flat = np.prod(self._img_shape)\n\n def fit(self, X, y, epochs=5):\n # Input layer\n self._model.add(InputLayer(input_shape=[self._img_size_flat]))\n self._model.add(Reshape(target_shape=self._img_shape))\n # Convolutional Layers\n for i, f in enumerate(self.filters):\n self._model.add(Conv2D(filters=f, kernel_size=self.kernel_size,\n padding='same', activation='relu', name=f'layer_conv{i}'))\n self._model.add(MaxPooling2D(strides=2, padding='same'))\n # flatten layer\n self._model.add(Flatten())\n # Fully Connected Layers\n for i, fc in enumerate(self.dense):\n self._model.add(Dense(units=fc, activation='relu', name=f'layer_fc{i}'))\n # Output layer\n self._model.add(Dense(units=self._dataset.num_classes, activation='softmax'))\n\n # Compilation\n self._model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n # Training\n self._model.fit(x=X, y=y, epochs=epochs, batch_size=self._batch_size)\n return self._model\n\n def predict(self, X):\n y_pred = self._model.predict(x=X)\n cls_pred = np.argmax(y_pred, axis=1)\n return y_pred, cls_pred\n\n def score(self, X, y):\n result = self._model.evaluate(x=X, y=y)\n # {'loss': 0.550, 'acc': 0.9821}\n return dict(zip(self._model.metrics_names, result))\n\n def save(self, filename):\n pass\n","sub_path":"models/neural_network/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"191684163","text":"from .tool.func import *\n\ndef give_history_hidden_2(conn, name):\n curs = conn.cursor()\n\n num = number_check(flask.request.args.get('num', '1'))\n\n if admin_check(6, 'history_hidden (' + name + '#' + num + ')') == 1:\n curs.execute(db_change(\"select title from history where title = ? and id = ? and hide = 'O'\"), [name, num])\n if curs.fetchall():\n curs.execute(db_change(\"update history set hide = '' where title = ? and id = ?\"), [name, num])\n else:\n curs.execute(db_change(\"update history set hide = 'O' where title = ? and id = ?\"), [name, num])\n\n conn.commit()\n\n return redirect('/history/' + url_pas(name))","sub_path":"route/give_history_hidden.py","file_name":"give_history_hidden.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"564732412","text":"import argparse\n\nfrom mmcv import Config\n\nfrom mmdet.models import build_detector\n\nimport torch\n\nimport matplotlib.pyplot as plt\nimport mmcv\nimport warnings\nfrom mmcv.ops import RoIAlign, RoIPool\nfrom mmcv.parallel import collate, scatter\nfrom mmcv.runner import load_checkpoint\n\nfrom mmdet.core import get_classes\nfrom mmdet.datasets.pipelines import Compose\nfrom mmdet.models import build_detector\nfrom mmdet.apis import init_detector, inference_detector\n\n\nclass LoadImage(object):\n \"\"\"A simple pipeline to load image.\"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the file name\n of the image to be read.\n\n Returns:\n dict: ``results`` will be returned containing loaded image.\n \"\"\"\n if isinstance(results['img'], str):\n results['filename'] = results['img']\n results['ori_filename'] = results['img']\n else:\n results['filename'] = None\n results['ori_filename'] = None\n img = mmcv.imread(results['img'])\n results['img'] = img\n results['img_fields'] = ['img']\n results['img_shape'] = img.shape\n results['ori_shape'] = img.shape\n return results\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train a detector')\n parser.add_argument('config', help='train config file path')\n parser.add_argument(\n '--checkpoint', help='the checkpoint file to trace')\n parser.add_argument(\n '--tracedbone', help='the name of tracedpoint')\n parser.add_argument(\n '--tracedshared', help='the name of tracedpoint')\n parser.add_argument(\n '--tracedbbox', help='the name of tracedpoint')\n parser.add_argument(\n '--shape',\n type=int,\n nargs='+',\n default=[1333, 800],\n help='input image size')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n\n if len(args.shape) == 1:\n input_shape = (1, 3, args.shape[0], args.shape[0])\n elif len(args.shape) == 2:\n input_shape = (1, 3, ) + tuple(args.shape)\n else:\n raise ValueError('invalid input shape')\n\n cfg = Config.fromfile(args.config)\n model = build_detector(\n cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda()\n\n if hasattr(model, 'forward_trace'):\n model.forward = model.forward_trace\n else:\n raise NotImplementedError\n\n checkpoint = torch.load(args.checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n model.eval()\n print(\"=========================tracedbone===============================\")\n img = torch.rand(input_shape).cuda()\n traced_bone = torch.jit.trace(model, img)\n traced_bone.save(args.tracedbone)\n\n bbox_feats = torch.rand(1000, 256, 7, 7).cuda()\n if model.with_shared_head:\n print(\"=========================shared_head===============================\")\n traced_shared = torch.jit.trace(model.roi_head.shared_head, bbox_feats)\n traced_shared.save(args.tracedshared)\n\n print(\"==========================bbox_head================================\")\n traced_bbox = torch.jit.trace(model.roi_head.bbox_head, bbox_feats)\n traced_bbox.save(args.tracedbbox)\n\n # print(\"=====================inference_detector===========================\")\n # from mmdet.apis import inference_detector, init_detector\n # model = init_detector(\n # args.config, args.checkpoint, device=torch.device('cuda', 0))\n # result = inference_detector(model, \"/run/media/eric/DATA/industrial/mmdetection/demo/demo.jpg\")\n # print(result[0].shape)\n # print(result[0])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/get_trace_fasterrcnn.py","file_name":"get_trace_fasterrcnn.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"598651548","text":"# SPDX-FileCopyrightText: Copyright (c) 2021 Artёm IG \n# SPDX-FileCopyrightText: Copyright (c) 2015 Jean-Ralph Aviles\n# SPDX-License-Identifier: BSD-3-Clause\n\nfrom typing import Iterable\n\nfrom commie.x01_common import Comment, Span\n\n\ndef extract_comments(code: str) -> Iterable[Comment]:\n\t\"\"\"Extracts a list of comments from the given shell script.\n\tComments are represented with the Comment class found in the common module.\n\tShell script comments only come in one form, single-line. Single line\n\tcomments start with an unquoted or unescaped '#' and continue on until the\n\tend of the line. A quoted '#' is one that is located within a pair of\n\tmatching single or double quote marks. An escaped '#' is one that is\n\timmediately preceeded by a backslash '\\'\n\tArgs:\n\t code: String containing code to extract comments from.\n\tReturns:\n\t Python list of common.Comment in the order that they appear in the code.\n\t\"\"\"\n\n\tDEFAULT = 0\n\tIN_COMMENT = 1\n\tIN_STRING = 2\n\tESCAPING_CHAR_INSIDE_STRING = 3\n\tESCAPING_CHAR_OUTSIDE_OF_STRING = 4\n\n\tstate = DEFAULT\n\tstring_char = ''\n\tcurrent_comment_text = ''\n\tline_counter = 1\n\n\tcomment_start_pos = None\n\tposition = -1\n\n\tfor position, char in enumerate(code):\n\t\tif state == DEFAULT:\n\t\t\t# Waiting for comment start character, beginning of string,\n\t\t\t# or escape character.\n\t\t\tif char == '#':\n\t\t\t\tstate = IN_COMMENT\n\t\t\t\tcomment_start_pos = position\n\t\t\telif char in ('\"', \"'\"):\n\t\t\t\tstring_char = char\n\t\t\t\tstate = IN_STRING\n\t\t\telif char == '\\\\':\n\t\t\t\tstate = ESCAPING_CHAR_OUTSIDE_OF_STRING\n\t\telif state == IN_COMMENT:\n\t\t\tif char == '\\n':\n\t\t\t\tyield Comment(code, code_span=Span(comment_start_pos, position),\n\t\t\t\t\t\t\t text_span=Span(comment_start_pos + 1, position), multiline=False)\n\t\t\t\tcurrent_comment_text = ''\n\t\t\t\tstate = DEFAULT\n\t\t\telse:\n\t\t\t\tcurrent_comment_text += char\n\t\telif state == IN_STRING:\n\t\t\tif char == string_char:\n\t\t\t\tstate = DEFAULT\n\t\t\telif char == '\\\\':\n\t\t\t\tstate = ESCAPING_CHAR_INSIDE_STRING\n\t\telif state == ESCAPING_CHAR_INSIDE_STRING:\n\t\t\tstate = IN_STRING\n\t\telif state == ESCAPING_CHAR_OUTSIDE_OF_STRING:\n\t\t\t# Escaping current char, outside of string.\n\t\t\tstate = DEFAULT\n\t\tif char == '\\n':\n\t\t\tline_counter += 1\n\n\t# end of file\n\n\tif state == IN_COMMENT:\n\t\tyield Comment(code, code_span=Span(comment_start_pos, position + 1),\n\t\t\t\t\t text_span=Span(comment_start_pos + 1, position + 1), multiline=False)\n","sub_path":"commie/parsers/shell_parser_state.py","file_name":"shell_parser_state.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"356314119","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 02 16:06:08 2018\r\n\r\n@author: zz\r\n\"\"\"\r\nimport numpy as np\r\nf = open('example_data_1.txt', 'r')\r\ndata=[]\r\nfor line in f:\r\n individual=[]\r\n line_content=line.strip()\r\n for snp in line_content.split(' '):\r\n if len(snp)!=0:\r\n individual.append(int(snp))\r\n \r\n data.append(individual)\r\nmyarray=np.array(data)\r\ntrue_data=np.transpose(myarray)\r\n#print true_data.shape\r\n#print true_data.shape\r\n\r\n\r\n#n=int(:\r\n #process_window(true_data[]) #seperate windows \r\n\r\ndef count_hetero(array_data): #input as np.array (n,1)\r\n count=0\r\n for i in range(array_data.shape[0]):\r\n if array_data[i]==1:\r\n count=count+1\r\n return count \r\n\r\ndef generate_permute(l): # input as [1,1,1]\r\n if l==[1]:\r\n return [[0],[1]]\r\n else:\r\n smallerlistlarger=[]\r\n for smallerlist in generate_permute(l[1:]):\r\n smallerlistlarger.append([0]+smallerlist)\r\n smallerlistlarger.append([1]+smallerlist)\r\n return smallerlistlarger\r\n\r\ndef create_unique_pair(l): #input a list of haplo\r\n mypairs=[]\r\n #mypairsnew=[]\r\n #set_list=[]\r\n for i in range(len(l)):\r\n \r\n thepair=[]\r\n for j in range(len(l[i])):\r\n n=1-l[i][j]\r\n thepair.append(n)\r\n if [l[i],thepair] not in mypairs and [thepair,l[i]] not in mypairs:\r\n mypairs.append([l[i],thepair])\r\n \r\n \r\n \r\n \r\n \r\n return mypairs \r\n\r\n \r\ndef create_pairs_haplos(data_array): #np.array shape (n,1)\r\n current_hetero_index=0\r\n #calculate which hetero position we are currently visiting.\r\n \r\n generated_haplo_pair_list=[]\r\n for i in range(2**(count_hetero(data_array)-1)):\r\n generated_haplo_pair_list.append([[],[]])\r\n #generated_haplo_pair_list=[]\r\n \r\n l=[]\r\n len_hetero=count_hetero(data_array)\r\n for i in range(len_hetero):\r\n l.append(1)\r\n single_haplo_list=generate_permute(l)\r\n hetero_pairs=create_unique_pair(single_haplo_list)\r\n for i in range(data_array.shape[0]):\r\n if data_array[i]==1:\r\n for p in range(len(hetero_pairs)):\r\n generated_haplo_pair_list[p][0].append(hetero_pairs[p][0][current_hetero_index])\r\n generated_haplo_pair_list[p][1].append(hetero_pairs[p][1][current_hetero_index])\r\n current_hetero_index=current_hetero_index+1\r\n elif data_array[i]==0:\r\n generated_haplo_pair_list[p][0].append(0)\r\n generated_haplo_pair_list[p][1].append(0)\r\n else:\r\n generated_haplo_pair_list[p][0].append(1)\r\n generated_haplo_pair_list[p][1].append(1)\r\n \r\n return generated_haplo_pair_list \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\ndef process_window(data):\r\n haplo_output=np.array(data.shape[0]*2,data.shape[1])\r\n mydict={}\r\n compatible_haplo_pairs=[]\r\n for i in range(data.shape[0]):\r\n compatible_haplo_pairs.append(0)\r\n \r\n for i in range(data.shape[0]): #for 1 person\r\n compatible_haplo_pairs[i]=create_pairs_haplos(data[i,:])\r\n all_compatible_haplos=[]\r\n for i in range(data.shape[0]):\r\n for pair in compatible_haplo_pairs[i]:\r\n if pair[0] not in all_compatible_haplos:\r\n all_compatible_haplos.append(pair[0])\r\n if pair[1] not in all_compatible_haplos:\r\n all_compatible_haplos.append(pair[1]) \r\n mydict={}\r\n for haplo in all_compatible_haplos:\r\n mydict[haplo]=1.0/len(all_compatible_haplos) #initialization\r\n mydict2_list=[] #a list of prob dictionary for haplo pairs in each genotype\r\n for i in range(data.shape[0]):\r\n mydict2={}\r\n for j in range(len(compatible_haplo_pairs[i])): #initialization\r\n mydict2[compatible_haplo_pairs[i][j]]=1.0/len(compatible_haplo_pairs[i])\r\n mydict2_list.append(mydict2)\r\n not_converge=True\r\n change_prob=[]\r\n for i in range(len(all_compatible_haplos)):\r\n change_prob.append[0]\r\n for i in range(1000):\r\n \r\n for j in range(len(all_compatible_haplos)):\r\n if change_prob[j]>0.01:\r\n \r\n break\r\n if j==len(all_compatible_haplos)-1:\r\n not_converge=False\r\n if not_converge==False:\r\n break\r\n if not_converge:\r\n new_prob_dict={}\r\n for c in range(len(all_compatible_haplos)):\r\n prob_sum=0\r\n for x in range(data.shape[0]):\r\n find_in_this_geno=False\r\n for o in mydict2_list[x]:\r\n if all_compatible_haplos[c] in o:\r\n if count_hetero(data[x,:])==0:\r\n prob_sum=prob_sum+2.0*mydict2_list[x][o]\r\n else:\r\n prob_sum=prob_sum+mydict2_list[x][o]\r\n find_in_this_geno=True\r\n break \r\n prob_sum=prob_sum/(2.0*data.shape[0])\r\n change_prob[c]=abs(prob_sum-mydict[c])\r\n mydict[c]=prob_sum\r\n for f in range(len(data.shape[0])):\r\n total_p=0\r\n for p in mydict2_list[f]:\r\n total_p=total_p+mydict[p[0]]*mydict[p[1]]\r\n for p in mydict2_list[f]:\r\n mydict2_list[f][p]=mydict[p[0]]*mydict[p[1]]*1.0/total_p\r\n \r\n for t in range(data.shape[0]):\r\n for u in compatible_haplo_pairs[t]:\r\n if mydict2_list[t][u]==max(mydict2_list[t][f] for f in compatible_haplo_pairs[t]):\r\n haplo_output[2*t,:]= u[0]\r\n haplo_output[2*t+1,:]=u[1]\r\n break\r\n \r\n return haplo_output \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"cs124try.py","file_name":"cs124try.py","file_ext":"py","file_size_in_byte":6296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"308153138","text":"\"\"\"\nCOPY-PASTE FROM AWS GOES LIKE THIS:\n\nFollow these steps to configure the webhook in Slack:\n\n 1. Navigate to https://.slack.com/services/new\n\n 2. Search for and select \"Incoming WebHooks\".\n\n 3. Choose the default channel where messages will be sent and click \"Add Incoming WebHooks Integration\".\n\n 4. Copy the webhook URL from the setup instructions and use it in the next section.\n\n\nFollow these steps to encrypt your Slack hook URL for use in this function:\n\n 1. Create a KMS key - http://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html.\n\n 2. Encrypt the event collector token using the AWS CLI.\n $ aws kms encrypt --key-id alias/ --plaintext \"\"\n\n Note: You must exclude the protocol from the URL (e.g. \"hooks.slack.com/services/abc123\").\n\n 3. Copy the base-64 encoded, encrypted key (CiphertextBlob) to the ENCRYPTED_HOOK_URL variable.\n\n 4. Give your function's role permission for the kms:Decrypt action.\n Example:\n\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"Stmt1443036478000\",\n \"Effect\": \"Allow\",\n \"Action\": [\n \"kms:Decrypt\"\n ],\n \"Resource\": [\n \"\"\n ]\n }\n ]\n}\n\"\"\"\n\nfrom __future__ import print_function\nfrom base64 import b64decode\nfrom urlparse import parse_qs\nimport boto3\nimport logging\nfrom urllib2 import Request, urlopen, URLError, HTTPError\nimport json\n\n\nENCRYPTED_EXPECTED_TOKEN = \"\" # Enter the base-64 encoded, encrypted Slack command token (CiphertextBlob)\nENCRYPTED_HOOK_URL = '' # Enter the base-64 encoded, encrypted key (CiphertextBlob)\nSLACK_CHANNEL = '' # Enter the Slack channel to send a message to\n\nHOOK_URL = \"https://\" + boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_HOOK_URL))['Plaintext']\n\n\nclass LambdaDataOpsBoto(object):\n\n def __init__(self):\n self.logger = logging.getLogger(__name__)\n self.kms = boto3.client('kms')\n self.expected_token = self.kms.decrypt(\n CiphertextBlob = b64decode(ENCRYPTED_EXPECTED_TOKEN))['Plaintext']\n\n def validate_token(self, token):\n if token != self.expected_token:\n self.logger.error(\"Request token (%s) \"\n \"does not match exptected\", token)\n raise Exception(\"Invalid request token\")\n\n def _slack_req_extractor(self, params):\n user = params['user_name'][0]\n command = params['command'][0]\n channel = params['channel_name'][0]\n command_text = params['text'][0]\n log = \"%s invoked %s in %s with the following text: %s\" % \\\n (user, command, channel, command_text)\n self.logger.info(log)\n return user, command, channel, command_text\n\n def lambda_test(self, event, context):\n req_body = event['body']\n params = parse_qs(req_body)\n token = params['token'][0]\n self.validate_token(token)\n (user, command, channel, command_text) = \\\n self._slack_req_extractor(params=params)\n return \"%s invoked %s in %s with the following text: %s\" % (user, command, channel, command_text)\n\n def lambda_cloud_watch(self, event, context):\n self.logger.info(\"Event: \" + str(event))\n message = json.loads(event['Records'][0]['Sns']['Message'])\n self.logger.info(\"Message: \" + str(message))\n\n alarm_name = message['AlarmName']\n #old_state = message['OldStateValue']\n new_state = message['NewStateValue']\n reason = message['NewStateReason']\n\n slack_message = {\n 'channel': SLACK_CHANNEL,\n 'text': \"%s state is now %s: %s\" % (alarm_name, new_state, reason)\n }\n\n req = Request(HOOK_URL, json.dumps(slack_message))\n try:\n response = urlopen(req)\n response.read()\n self.logger.info(\"Message posted to %s\", slack_message['channel'])\n except HTTPError as e:\n self.logger.error(\"Request failed: %d %s\", e.code, e.reason)\n except URLError as e:\n self.logger.error(\"Server connection failed: %s\", e.reason)\n\n def lambda_emr(self, event, context):\n req_body = event['body']\n params = parse_qs(req_body)\n token = params['token'][0]\n self.validate_token(token)\n (user, command, channel, command_text) = \\\n self._slack_req_extractor(params=params)\n client = boto3.client('emr', region_name='us-west-2')\n client.run_job_flow(\n Name='YourApp',\n ReleaseLabel='emr-4.1.0',\n Instances={\n 'MasterInstanceType': 'm3.xlarge',\n 'SlaveInstanceType': 'm3.xlarge',\n 'InstanceCount': 21,\n 'Ec2KeyName': 'ops',\n 'KeepJobFlowAliveWhenNoSteps': False,\n 'TerminationProtected': False,\n 'Ec2SubnetId': 'subnet-XXXX'\n },\n Steps=[\n {\n 'Name': 'YourStep',\n 'ActionOnFailure': 'TERMINATE_CLUSTER',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': [\n 'spark-submit',\n '--driver-memory','10G',\n '--executor-memory','4G',\n '--executor-cores','4',\n '--num-executors','20',\n '/home/hadoop/process_data.py'\n ]\n }\n },\n ],\n BootstrapActions=[\n {\n 'Name': 'cluster_setup',\n 'ScriptBootstrapAction': {\n 'Path': 's3://your-bucket/subfolder/setup.sh',\n 'Args': []\n }\n }\n ],\n Applications=[\n {\n 'Name': 'Spark'\n },\n ],\n Configurations=[\n {\n \"Classification\": \"spark-env\",\n \"Properties\": {\n\n },\n \"Configurations\": [\n {\n \"Classification\": \"export\",\n \"Properties\": {\n \"PYSPARK_PYTHON\": \"/usr/bin/python2.7\",\n \"PYSPARK_DRIVER_PYTHON\": \"/usr/bin/python2.7\"\n },\n \"Configurations\": [\n\n ]\n }\n ]\n },\n {\n \"Classification\": \"spark-defaults\",\n \"Properties\": {\n \"spark.akka.frameSize\": \"2047\"\n }\n }\n ],\n VisibleToAllUsers=True,\n JobFlowRole='EMR_EC2_DefaultRole',\n ServiceRole='EMR_DefaultRole'\n )","sub_path":"src/main/python/aws/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":7251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"445585733","text":"from django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom .models import Question, Language, Option\n# Create your views here.\n\n\nclass HomePageView(TemplateView):\n template_name = \"index.html\"\n\nfirstQuestion = Question.objects.get(is_first=True)\nnextQuestion = firstQuestion\n\ndef survey(req):\n global nextQuestion\n\n if req.method == 'POST':\n option_id = req.POST['answer']\n option = Option.objects.get(id=int(option_id))\n if option.language:\n return render(req, \"survey.html\", {\n 'language': option.language,\n })\n else:\n nextQuestion = option.next_question\n return render(req, \"survey.html\", {\n 'question': nextQuestion,\n 'options': nextQuestion.options.all(),\n })\n \n\n return render(req, \"survey.html\", {\n 'question': firstQuestion,\n 'options': firstQuestion.options.all(),\n })","sub_path":"Main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"440004750","text":"# -*- coding: utf-8 -*-\nfrom invoke import task\n\nimport multiprocessing\nimport logging\nimport socket\nimport sys\nimport os\n\nimport sys\n\nlog = logging.getLogger(__name__)\nout_hdlr = logging.StreamHandler(sys.stdout)\nout_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))\nout_hdlr.setLevel(logging.INFO)\nlog.addHandler(out_hdlr)\nlog.setLevel(logging.INFO)\n\nglobal_vars = {\n \"HOSTNAME\": socket.getfqdn(),\n \"MODE\": \"development\",\n \"COVER_PROFILE_FILE\": \"/tmp/c.out\",\n \"CURDIR\": os.path.dirname(os.path.abspath(__file__)),\n \"VENDOR_DIR\": \".vendor\",\n}\n\nglobal_vars.update({\n \"GOPATH\": os.path.join(global_vars[\"CURDIR\"], global_vars[\"VENDOR_DIR\"]),\n \"DEPENDENCIES\": os.path.join(global_vars[\"CURDIR\"], \"dependencies.txt\"),\n})\n\nGOCOMMAND = \"\"\"env GOPATH=%(GOPATH)s \\\n hostname=%(HOSTNAME)s \\\n mode=%(MODE)s \"\"\" % global_vars\n\nglobal_vars.update({\n \"GOCOMMAND\": GOCOMMAND,\n})\n\nmodules = [\n \"project\",\n \"project/models\",\n \"project/http_handlers\"\n]\n\n@task\ndef remove_deps(ctx):\n ctx.run(\"rm -rf %(GOPATH)s\" % global_vars, encoding=\"utf-8\")\n\n@task\ndef copy_src(ctx):\n ctx.run(\"mkdir -p %(GOPATH)s\" % global_vars, encoding=\"utf-8\")\n ctx.run(\"rm -rf %(GOPATH)s/src/project\" % global_vars, encoding=\"utf-8\")\n ctx.run(\"rm -rf %(GOPATH)s/pkg/\" % global_vars, encoding=\"utf-8\")\n ctx.run(\"cp -r %(CURDIR)s/src %(GOPATH)s\" % global_vars, encoding=\"utf-8\")\n\n@task(pre=[remove_deps, copy_src])\ndef get(ctx, install=True):\n if install:\n with open(global_vars[\"DEPENDENCIES\"], 'r') as f:\n for line in f:\n local_command = \"env GOPATH=%(GOPATH)s\" % global_vars\n ctx.run(\"%s go get -v %s\" % (local_command, line), encoding=\"utf-8\")\n\n\n@task(pre=[copy_src])\ndef start_fast(ctx, race=False):\n local_command = \"%(GOCOMMAND)s go run %(GOPATH)s/src/project/main.go\" % global_vars\n if race:\n local_command += \" -race \"\n ctx.run(local_command, encoding=\"utf-8\")\n\n\n@task(pre=[get, copy_src])\ndef start(ctx):\n ctx.run(\"%(GOCOMMAND)s go run %(GOPATH)s/src/project/main.go\" %\n global_vars, encoding=\"utf-8\")\n\n\n@task(pre=[copy_src])\ndef test_fast(ctx, module=\"\", race=False, cover=False, report=False, count=1, cpu=0):\n if cpu == 0:\n cpu = multiprocessing.cpu_count()\n\n if module in modules:\n local_command = \"time %(GOCOMMAND)s go test -v \" % global_vars\n if race:\n local_command += \" -race \"\n if cover:\n local_command += \" -coverprofile %s\" % global_vars[\n \"COVER_PROFILE_FILE\"]\n local_command += \" -count=%d -cpu=%d --parallel %d\" % (count, cpu, cpu)\n\n ctx.run(\"%s %s\" % (local_command, module), encoding=\"utf-8\")\n\n if report:\n ctx.run(\"%s go tool cover -html=%s\" %\n (global_vars[\"GOCOMMAND\"], global_vars[\"COVER_PROFILE_FILE\"]), encoding=\"utf-8\")\n else:\n log.error(\"module %s is not registered\" % module)\n\n@task(pre=[copy_src])\ndef bench_fast(ctx, module=\"\", race=False, cpu=0):\n if cpu == 0:\n cpu = multiprocessing.cpu_count()\n\n if module in modules:\n local_command = \"time %(GOCOMMAND)s go test -bench=. \" % global_vars\n local_command += \" -cpu=%d --parallel %d\" % (cpu, cpu)\n\n ctx.run(\"%s %s\" % (local_command, module), encoding=\"utf-8\")\n else:\n log.error(\"module %s is not registered\" % module)\n\n\n@task(pre=[copy_src])\ndef vet(ctx):\n for module in modules:\n local_command = \"%(GOCOMMAND)s go vet \" % global_vars\n log.info(\"Checking %s\" % module)\n ctx.run(\"%s %s\" % (local_command, module), encoding=\"utf-8\")\n\n\n@task(pre=[copy_src])\ndef build(ctx, tag=None):\n ctx.run(\"env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GOPATH=%(GOPATH)s go build \\\n -ldflags '-s' -a -installsuffix cgo -o ./bin/project %(GOPATH)s/src/project/main.go\" % global_vars\n )\n if tag:\n ctx.run(\"docker build -t firewut/testtask:%s .\" % tag)\n ","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"334142646","text":"class ClinicalTrial:\n\t\"\"\"Object Clinical Trial\"\"\"\n\n\tdef __init__(self, ID, sponsor, published, state, url, ongoing, title, condition, intervention, locations, last_changed, min_age, max_age, genders, health):\n\t\tself.id = ID \n\t\tself.sponsor = sponsor\n\t\tself.published = published\n\t\tself.state = state\n\t\tself.url = url\n\t\tself.ongoing = ongoing\n\t\tself.title = title\n\t\tself.condition = condition\n\t\tself.intervention = intervention\n\t\tself.locations = locations\n\t\tself.last_changed = last_changed\n\t\tself.min_age = min_age\n\t\tself.max_age = max_age\n\t\tself.genders = genders\n\t\tself.health = health","sub_path":"hophack-2016-clinicaltrials/clinicalsearch/ClinicalTrial.py","file_name":"ClinicalTrial.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"347833224","text":"\"\"\"Settings for Quacc\"\"\"\nfrom __future__ import annotations\n\nimport os\nfrom typing import List, Optional\n\nfrom pydantic import BaseSettings, Field, root_validator\n\nfrom quacc.presets import vasp as vasp_defaults\n\n_DEFAULT_CONFIG_FILE_PATH = \"~/.quacc.yaml\"\n\n__all__ = [\"QuaccSettings\"]\n\n\nclass QuaccSettings(BaseSettings):\n \"\"\"\n Settings for quacc.\n\n The default way to modify these is to make a ~/.quacc.yaml file. Alternatively,\n the environment variable QUACC_CONFIG_FILE can be set to point to a yaml file with\n quacc settings.\n\n The variables can also be modified individually though environment variables by\n using the \"QUACC\" prefix. e.g. QUACC_SCRATCH_DIR=/path/to/scratch.\n \"\"\"\n\n # ---------------------------\n # General Settings\n # ---------------------------\n\n CONFIG_FILE: str = Field(\n _DEFAULT_CONFIG_FILE_PATH, description=\"File to load alternative defaults from.\"\n )\n SCRATCH_DIR: str = Field(\n os.path.expandvars(\"$SCRATCH\")\n if \"SCRATCH\" in os.environ\n else \"/tmp\"\n if os.path.exists(\"/tmp\")\n else \".\",\n description=\"Scratch directory for calculations.\",\n )\n GZIP_FILES: bool = Field(\n True, description=\"Whether generated files should be gzip'd.\"\n )\n\n # ---------------------------\n # VASP Settings\n # ---------------------------\n\n # VASP Settings: Main\n VASP_PARALLEL_CMD: str = Field(\n os.path.expandvars(\"$VASP_PARALLEL_CMD\"),\n description=\"Parallel command to run VASP with Custodian (e.g. srun -N 2 --ntasks-per-node 24)\",\n )\n VASP_CMD: str = Field(\n \"vasp_std\", description=\"Command to run the standard version of VASP.\"\n )\n VASP_GAMMA_CMD: str = Field(\n \"vasp_gam\", description=\"Command to run the gamma-point only version of VASP.\"\n )\n\n # VASP Settings: General\n VASP_INCAR_COPILOT: bool = Field(\n True, description=\"Whether co-pilot mode should be used for VASP INCAR handling\"\n )\n VASP_BADER: bool = Field(\n True,\n description=\"Whether to run a Bader analysis when summarizing VASP results. Requires bader to be in PATH.\",\n )\n VASP_PRESET_MAG_DEFAULT: float = Field(\n 1.0,\n description=\"Default initial magmom to use for a given element if a preset with magmoms is provided but an element is missing from the list\",\n )\n VASP_MAG_CUTOFF: float = Field(\n 0.05,\n description=\"If the absolute value of all magnetic moments are below this value, they will be set to 0 such that a spin-unpolarized calculation will be performed\",\n )\n VASP_COPY_MAGMOMS: bool = Field(\n True,\n description=\"If True, any pre-existing atoms.get_magnetic_moments() will be set in atoms.set_initial_magnetic_moments().\",\n )\n VASP_VERBOSE: bool = Field(\n True,\n description=\"If True, warnings will be raised when INCAR parameters are changed.\",\n )\n VASP_PRESET_DIR: str = Field(\n os.path.dirname(vasp_defaults.__file__),\n description=\"Path to the VASP preset directory\",\n )\n\n # VASP Settings: Custodian\n VASP_CUSTODIAN: bool = Field(\n True, description=\"Whether Custodian should be used to run VASP\"\n )\n VASP_CUSTODIAN_VTST: bool = Field(\n False,\n description=\"If VTST-related input swaps should be used when running Custodian. Requires VASP to be compiled with VTST\",\n )\n VASP_CUSTODIAN_MAX_ERRORS: int = Field(\n 5, description=\"Maximum errors for Custodian\"\n )\n VASP_CUSTODIAN_HANDLERS: List[str] = Field(\n [\n \"VaspErrorHandler\",\n \"MeshSymmetryErrorHandler\",\n \"UnconvergedErrorHandler\",\n \"NonConvergingErrorHandler\",\n \"PotimErrorHandler\",\n \"PositiveEnergyErrorHandler\",\n \"FrozenJobErrorHandler\",\n \"StdErrHandler\",\n \"LargeSigmaHandler\",\n \"IncorrectSmearingHandler\",\n ],\n description=\"Handlers for Custodian\",\n )\n VASP_CUSTODIAN_VALIDATORS: List[str] = Field(\n [\"VasprunXMLValidator\", \"VaspFilesValidator\"],\n description=\"Validators for Custodian\",\n )\n VASP_CUSTODIAN_WALL_TIME: Optional[int] = Field(\n None,\n description=\"After this many seconds, Custodian will stop running and ensure that VASP writes a STOPCAR\",\n )\n\n class Config:\n \"\"\"Pydantic config settings.\"\"\"\n\n env_prefix = \"quacc_\"\n\n @root_validator(pre=True)\n def load_default_settings(cls, values: dict) -> dict:\n \"\"\"\n Load settings from file or environment variables.\n Loads settings from a root file if available and uses that as defaults in\n place of built in defaults.\n This allows setting of the config file path through environment variables.\n \"\"\"\n from pathlib import Path\n\n from monty.serialization import loadfn\n\n config_file_path = values.get(\"CONFIG_FILE\", _DEFAULT_CONFIG_FILE_PATH)\n\n new_values = {}\n if Path(config_file_path).expanduser().exists():\n new_values |= loadfn(Path(config_file_path).expanduser())\n\n new_values.update(values)\n return new_values\n","sub_path":"quacc/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"644925703","text":"import threading\nfrom spider.download_blog_csdn import DownloadCSDNPage\nfrom spider.download_course_w3school import DownloadW3School\nfrom spider.download_novel_xiaoshuodaquan import DownloadNoveXiaoShuoDaQuan\nfrom spider.download_novel_biquwu import DownloadNovelBiQuWu\nfrom spider.download_novel_xinbiquge import DownloadNovelXinBiQuGe\nfrom spider.download_novel_booktext import DownloadNovelBooktext\nfrom spider.download_novel_aszw6 import DownloadNovelASZW6\nfrom spider.download_novel_duquanben import DownloadNovelDuQuanBen\nfrom spider.download_raiders_youmin import DownloadRaidersYouMin\nfrom spider.download_raiders_youxia import DownloadRaidersYouXia\nfrom spider.download_raiders_baidujingyan import DownLoadRaidersJingYan\nfrom ui_information import websiteMessageDict\nfrom spider.spider import *\nimport time\n\n\nclass DownloadControler(threading.Thread):\n\n #网址,类型,网站\n def __init__(self,url,urlType,urlWebsite):\n super().__init__()\n self.status=STATUS_WAIT\n self.url=url\n self.urlType=urlType\n self.urlWebsite=urlWebsite\n self.downloader=self.create_downloader()\n #名称,显示用\n self.name=\"\"\n #自身状态\n self.statusChanged=False\n #页面总计\n self.pageCount=0\n #已下载页面\n self.pageDownloaded=0\n #失败数量\n self.failured=0\n #创建下载器\n def create_downloader(self):\n if self.urlType in websiteMessageDict.keys():\n #博客\n if self.urlWebsite in websiteMessageDict[\"博客\"].keys():\n if self.urlWebsite ==\"csdn\":\n return DownloadCSDNPage(self.url,self)\n #攻略\n elif self.urlWebsite in websiteMessageDict[\"攻略\"].keys():\n if self.urlWebsite == \"游民星空\":\n return DownloadRaidersYouMin(self.url,self)\n elif self.urlWebsite == \"游侠网\":\n return DownloadRaidersYouXia(self.url,self)\n elif self.urlWebsite == \"百度经验\":\n return DownLoadRaidersJingYan(self.url,self)\n #教程\n elif self.urlWebsite in websiteMessageDict[\"教程\"].keys():\n if self.urlWebsite == \"w3school\":\n return DownloadW3School(self.url,self)\n #小说\n elif self.urlWebsite in websiteMessageDict[\"小说\"].keys():\n if self.urlWebsite == \"书书网(www.xiaoshuodaquan.com)\":\n return DownloadNoveXiaoShuoDaQuan(self.url,self)\n elif self.urlWebsite == \"笔趣阁(www.biquwu.cc)\":\n return DownloadNovelBiQuWu(self.url,self)\n elif self.urlWebsite == \"新笔趣阁(www.xsbiquge.com)\":\n return DownloadNovelXinBiQuGe(self.url,self)\n elif self.urlWebsite == \"顶点小说网(www.booktxt.net)\":\n return DownloadNovelBooktext(self.url,self)\n elif self.urlWebsite == \"全本小说网(www.duquanben.com)\":\n return DownloadNovelDuQuanBen(self.url,self)\n elif self.urlWebsite == \"爱上中文网(www.aszw6.com)\":\n return DownloadNovelASZW6(self.url,self)\n\n return None\n\n #下载页面\n def run(self):\n # 创建页面\n self.page = self.create_page()\n # 获取内容\n self.crawling_information()\n # 保存信息\n self.save_information()\n\n #创建类型页面\n def create_page(self):\n return self.downloader.create_page()\n # return Page()\n\n #爬取对应信息\n def crawling_information(self):\n self.set_status(STATUS_REQUEST)\n self.downloader.crawling_information()\n\n #存储信息\n def save_information(self):\n while self.downloader.saveSignal == False:\n time.sleep(5)\n self.set_status(STATUS_SAVE)\n self.downloader.save_information()\n self.set_status(STATUS_SECCESS)\n # print(self.page.id)\n\n #获取状态\n def get_status(self):\n pass\n\n #获取信息\n def get_information(self):\n self.statusChanged = False\n\n #设置名称\n def set_Name(self,name):\n self.statusChanged = True\n self.name = name\n #设置状态\n def set_status(self,status):\n self.statusChanged = True\n self.status=status\n\n #设置页面数量\n def set_max_pageNum(self,num):\n self.statusChanged = True\n self.pageCount=num\n #设置已下载数量\n def set_downloaded_pageNum(self,num):\n self.statusChanged = True\n self.pageDownloaded=num\n\n #设置失败数量\n def set_failured_pageNum(self,num):\n self.statusChanged = True\n self.failured=num\n\n\n\n","sub_path":"spider/download_controler.py","file_name":"download_controler.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"226196825","text":"import sys\n\nnumber = str(sys.argv[1])\nnumbers = number.split(\"\\n\")\n\ntotal = 0\n\nfor i in numbers:\n left = int(i) % 3\n value = int(i) - left\n final = value/3 - 2\n total += final\n\nprint(total)\n","sub_path":"fuel.py","file_name":"fuel.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"42494966","text":"\"\"\"Perform a remote R analysis.\nNote that this script requires the requests module.\nInstall it with pip if necessary.\n\"\"\"\n\nimport bson.objectid\nimport json\nimport pymongo\nimport requests\nimport tangelo\nimport vtk\nimport vtk_arbor_utils\nfrom time import sleep\n\nimport sys\n\n@tangelo.restful\ndef get(*pargs, **query_args):\n # standard arguments. As we parse info out of query_args, we\n # also delete it from the dictionary. This way we are eventually\n # left with only our analysis parameters.\n baseURL = query_args[\"baseURL\"]\n del query_args[\"baseURL\"]\n projectName = query_args[\"projectName\"]\n del query_args[\"projectName\"]\n analysis = query_args[\"analysis\"]\n del query_args[\"analysis\"]\n\n # parse & load inputs\n inputs = []\n inputTypes = [\"tables\", \"trees\"]\n for inputType in inputTypes:\n key = \"ARBOR_ANALYSIS_INPUT_%s\" % inputType.upper()\n if key in query_args:\n input_names = query_args[key].split(\"&\")\n for input_name in input_names:\n if inputType == \"tables\":\n inputTable = LoadInputTable(query_args[input_name], baseURL, projectName)\n inputTable[\"name\"] = input_name\n inputs.append(inputTable)\n elif inputType == \"trees\":\n inputTree = LoadInputTree(query_args[input_name], baseURL, projectName)\n inputTree[\"name\"] = input_name\n inputs.append(inputTree)\n del query_args[input_name]\n del query_args[key]\n\n # parse outputs\n outputs = []\n outputTypes = [\"tables\", \"trees\"]\n # outputMap is used later on to map outputs from the name in the R script\n # to the name that the user requested.\n outputMap = {}\n for outputType in outputTypes:\n key = \"ARBOR_ANALYSIS_OUTPUT_%s\" % outputType.upper()\n if key in query_args:\n output_names = query_args[key].split(\"&\")\n for output_name in output_names:\n if outputType == \"tables\":\n outputs.append({\"name\": output_name, \"type\": \"Table\"})\n outputMap[output_name] = query_args[output_name]\n elif outputType == \"trees\":\n outputs.append({\"name\": output_name, \"type\": \"Tree\"})\n outputMap[output_name] = query_args[output_name]\n del query_args[output_name]\n del query_args[key]\n\n # get the script for this analysis and its type\n r = requests.get(\n \"%s/arborapi/projmgr/analysis/%s/script\" % (baseURL, analysis))\n script = r.text\n r = requests.get(\n \"%s/arborapi/projmgr/analysis/%s\" % (baseURL, analysis))\n response = r.json()\n analysis_type = response[0][\"analysis\"][\"type\"]\n\n # at this point, everything remaining in the query_args dict\n # is a parameter. Replace each parameter key with its value in\n # our script.\n for key, value in query_args.iteritems():\n script = script.replace(key, value)\n\n # set up the JSON object to send to the remote processing server.\n analysisJson = {}\n analysisJson[\"name\"] = analysis\n analysisJson[\"inputs\"] = inputs\n analysisJson[\"outputs\"] = outputs\n analysisJson[\"script\"] = script\n\n # send the request to the analysis server & get the task ID\n if analysis_type == \"vtkpython\":\n post_URL = \"http://arbor.kitware.com/service/tasks/celery/visomics/vtk/python\"\n else:\n post_URL = \"http://arbor.kitware.com/service/tasks/celery/visomics/vtk/r\"\n\n r = requests.post(\n post_URL,\n json.dumps(analysisJson),\n auth=requests.auth.HTTPDigestAuth('bob', 'tree'))\n jResponse = r.json()\n taskID = jResponse[\"id\"]\n\n # check the status of our job\n jobDone = False\n while(not jobDone):\n r = requests.get(\n \"http://arbor.kitware.com/service/tasks/celery/%s/status\" % taskID,\n auth=requests.auth.HTTPDigestAuth('bob', 'tree'))\n jResponse = r.json()\n status = jResponse[\"status\"]\n if status == \"PENDING\":\n sleep(1)\n else:\n jobDone = True\n\n if status == \"SUCCESS\":\n # get the results of the analysis\n r = requests.get(\n \"http://arbor.kitware.com/service/tasks/celery/%s/result\" % taskID,\n auth=requests.auth.HTTPDigestAuth('bob', 'tree'))\n analysisOutputs = r.json()[\"result\"][\"output\"]\n\n # store each output in the Tree Store\n for analysisOutput in analysisOutputs:\n if analysisOutput[\"type\"] == \"Table\":\n fileType = \"csv\"\n # get the result table & convert it to CSV\n tableSerialized = analysisOutput[\"data\"]\n table = vtk_arbor_utils.DeserializeVTKTable(tableSerialized)\n data = vtk_arbor_utils.VTKTableToCSV(table)\n\n elif analysisOutput[\"type\"] == \"Tree\" or analysisOutput[\"type\"] == \"vtkTree\":\n fileType = \"phyloxml\"\n # get the tree (serialized VTK string) and convert it to PhyloXML\n treeSerialized = analysisOutput[\"data\"]\n tree = vtk_arbor_utils.DeserializeVTKTree(treeSerialized)\n data = vtk_arbor_utils.VTKTreeToPhyloXML(tree)\n\n # push this output into the database\n analysisOutputName = outputMap[analysisOutput[\"name\"]]\n putURL = \"%s/arborapi/projmgr/project/%s\" % (baseURL, projectName)\n putURL += \"?filename=%s&filetype=%s&datasetname=%s&data=%s\" % (analysisOutputName, fileType, analysisOutputName, data)\n r = requests.put(putURL)\n\n return json.dumps({\"status\": \"Success\"})\n\n return json.dumps({\"status\": \"Failure\"})\n\n# Download tree from ArborAPI & load it into JSON in the format our\n#R engine expects: serialized VTK string\ndef LoadInputTree(treeName, baseURL, projectName):\n inputTree = {}\n inputTree[\"type\"] = \"Tree\"\n\n r = requests.get(\n \"%s/arborapi/projmgr/project/%s/PhyloTree/%s/phyloxml\" % (baseURL, projectName, treeName))\n phyloxml = r.text\n tree = vtk_arbor_utils.PhyloXMLToVTKTree(phyloxml)\n treeSerialized = vtk_arbor_utils.SerializeVTKTree(tree)\n inputTree[\"data\"] = treeSerialized\n inputTree[\"format\"] = \"vtk\"\n return inputTree\n\n# Download table from ArborAPI & load it into JSON in the format our\n#R engine expects: serialized VTK string\ndef LoadInputTable(tableName, baseURL, projectName):\n inputTable = {}\n inputTable[\"type\"] = \"Table\"\n r = requests.get(\n \"%s/arborapi/projmgr/project/%s/CharacterMatrix/%s/csv\" % (baseURL, projectName, tableName))\n csv = r.text\n table = vtk_arbor_utils.CSVToVTKTable(csv)\n tableSerialized = vtk_arbor_utils.SerializeVTKTable(table)\n inputTable[\"data\"] = tableSerialized\n inputTable[\"format\"] = \"vtk\"\n return inputTable\n","sub_path":"analysis-app/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":6274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"154170674","text":"import httptools\nimport typing\nfrom .message import HttpMessage\nfrom ...common import Url\n\n__all__ = [\n \"HttpRequest\"\n]\n\n\nclass HttpRequest(HttpMessage):\n def __init__(self, headers: typing.Dict[bytes, typing.Union[bytes, typing.Iterable[bytes]]]=None):\n HttpMessage.__init__(self)\n if headers is not None:\n for key, val in headers.items():\n self.headers[key] = val\n self.url = None # type: Url\n self.method = b''\n self.session = None\n\n def on_url(self, raw_url: bytes):\n if raw_url != b'':\n url = httptools.parse_url(raw_url)\n self.url = Url(raw_url, url.schema, url.host, url.port, url.path, url.query, url.fragment, url.userinfo)\n if self.cookies:\n for cookie in self.cookies.values():\n cookie.path = url.path\n\n def to_bytes(self) -> bytes:\n parts = [b'%b %b HTTP/%b' % (self.method, self.url.get(), self.version)]\n if self.headers:\n parts.append(self.headers.to_bytes())\n if self.cookies:\n parts.append(self.cookies.to_bytes())\n parts.append(b'')\n parts.append(self.body)\n return b'\\r\\n'.join(parts)\n\n def __repr__(self):\n return \"\".format(self.version, self.method, self.url, self.headers)\n","sub_path":"stormhttp/proto/http/primitives/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"224363454","text":"#!/usr/bin/python\n\nimport gvgen\n\n# Creates the new graph instance\ngraph = gvgen.GvGen(\"Legend\")\n\n# Creates two items labeled \"Foo\" and \"Bar\"\na = graph.newItem(\"foo\")\nb = graph.newItem(\"bar\")\n\n# Links from \"foo\" to \"bar\"\ngraph.newLink(a,b)\n\ngraph.styleAppend(\"foostyle\",\"color\",\"red\")\ngraph.styleAppend(\"foostyle\",\"shape\",\"rectangle\")\ngraph.styleApply(\"foostyle\", a)\n\ngraph.styleAppend(\"barstyle\",\"color\",\"blue\")\ngraph.styleAppend(\"barstyle\",\"style\",\"filled\")\ngraph.styleApply(\"barstyle\", b)\n\ngraph.legendAppend(\"foostyle\", \"Foo item\",1)\ngraph.legendAppend(\"barstyle\", \"This is the bar item\",1)\n\n# Outputs the graphviz code\ngraph.dot()\n\n","sub_path":"examples/legendin.py","file_name":"legendin.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"216989243","text":"from pylab import *\nfrom keras.preprocessing.image import *\nfrom PIL import Image\nimport time\nimport os\n\nnb_classes = 20 + 1 #because of the tuple\n\ndef calculate_iou(nb_classes, res_dir, label_dir, image_list, label_list):\n conf_m = zeros((nb_classes, nb_classes), dtype=float)\n\n for img_num1, img_num2 in zip(image_list, label_list):\n img_num1 = img_num1.strip('\\n')\n img_num2 = img_num2.strip('\\n')\n pred = img_to_array(Image.open('%s/%s.png' % (res_dir, img_num1))).astype(int)\n label = img_to_array(Image.open('%s/%s.png' % (label_dir, img_num2))).astype(int)\n flat_pred = np.ravel(pred)\n flat_label = np.ravel(label)\n #print flat_pred[887600:888600]\n for p, l in zip(flat_pred, flat_label):\n if l == 255:\n continue\n if (l < nb_classes) and (p < nb_classes):\n conf_m[l, p] += 1\n else:\n print('Invalid entry encountered, skipping! Label: ', l, ' Img_num: ', img_num1,\n ' Prediction: ', p, ' Img_num: ', img_num2)\n\n\n I = np.diag(conf_m)\n U = np.sum(conf_m, axis=0) + np.sum(conf_m, axis=1) - I\n IOU = I/U\n #pixel = np.sum(np.diag(conf_m))/np.sum(conf_m)\n meanIOU = np.mean(IOU)\n\n return conf_m, IOU, meanIOU\n\nlabel_list = open('/home/robotics/PycharmProjects/Mydata/test.txt').readlines()\nlabel_dir = os.path.expanduser('/home/robotics/PycharmProjects/Mydata/test-label')\nimage_list = open('/home/robotics/PycharmProjects/predict_results/FCN_0704_1/pre.txt').readlines()\nres_dir = os.path.expanduser('/home/robotics/PycharmProjects/predict_results/FCN_0704_1')\n\nstart_time = time.time()\nconf_m, IOU, meanIOU = calculate_iou(nb_classes, res_dir, label_dir, image_list, label_list)\nprint(\"IoU: \")\nprint(IOU)\nprint(\"meanIoU: %f\" % meanIOU)\n#print('pixel acc: %f' % (np.sum(np.diag(conf_m))/np.sum(conf_m)))\n#print(pixel)\nduration = time.time() - start_time\nprint('{}s used to calculate IOU.\\n'.format(duration))","sub_path":"eva.py","file_name":"eva.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"573554180","text":"import mkate_mse\nimport mse_load\nimport mkate_bayesian\nimport bayesian_load\nimport torch\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport pandas as pd\nimport numpy as np\n\n# Gets graphs for all depth/side combinations\ndef bplots_from_tar(tar, type):\n plt.close()\n plt.rcParams.update({\"figure.max_open_warning\": 0})\n # Initialize NN and optimizer\n my_nn = mkate_bayesian.Net() if type == \"b\" else mkate_mse.Net()\n optimizer = optim.SGD(my_nn.parameters(), lr=0.01)\n\n # Import my_nn.tar\n checkpoint = torch.load(tar)\n my_nn.load_state_dict(checkpoint[\"model_state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n loss = checkpoint[\"loss\"]\n\n my_nn.eval()\n\n data = pd.read_excel(\"mkate_data.xlsx\", 0, header=[0, 1])\n\n X = torch.Tensor(\n [\n [bool(int(x)) for x in y.replace(\"'\", \"\")]\n for y in data.iloc[0:8192, 0].values\n ]\n )\n Y = torch.Tensor(data.iloc[0:8192, [6, 7]].values)\n\n # Parse graphed data\n dist, real_r, real_b = (\n [torch.sum(l).item() for l in X],\n [l.tolist()[0] for l in Y],\n [l.tolist()[1] for l in Y],\n )\n\n estim_data = my_nn(X)\n\n estim_r, estim_b = [l.tolist()[0] for l in estim_data], [\n l.tolist()[1] for l in estim_data\n ]\n\n real_t, estim_t = [r + b for r, b in zip(real_r, real_b)], [\n r + b for r, b in zip(estim_r, estim_b)\n ]\n\n diff_r, diff_b, diff_t = (\n [r - e for r, e in zip(real_r, estim_r)],\n [r - e for r, e in zip(real_b, estim_b)],\n [r - e for r, e in zip(real_t, estim_t)],\n )\n a1 = []\n a2 = []\n a3 = []\n a4 = []\n a5 = []\n a6 = []\n a7 = []\n a8 = []\n a9 = []\n a10 = []\n a11 = []\n a12 = []\n a13 = []\n\n vals = [a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13]\n for n, v in zip(dist, diff_t):\n vals[int(n - 1)].append(v)\n\n fig, plots = plt.subplots()\n plt.boxplot(vals)\n # Size = fig.get_size_inches()\n # fig.set_size_inches(Size[0] * 2, Size[1] * 2, forward=True)\n\n return plt\n\n\ndef error_plot(depth, side, type):\n vals = [[], []]\n plt.close()\n\n for d in range(1, 14):\n # Save graphs from tars\n\n my_nn = mkate_bayesian.Net() if type == \"b\" else mkate_mse.Net()\n optimizer = optim.SGD(my_nn.parameters(), lr=0.01)\n\n # Import my_nn.tar\n checkpoint = (\n torch.load(\"Tars/Bayesian_\" + str(d) + side + \".tar\")\n if type == \"b\"\n else torch.load(\"Tars/MSE_\" + str(d) + side + \".tar\")\n )\n my_nn.load_state_dict(checkpoint[\"model_state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n loss = checkpoint[\"loss\"]\n\n my_nn.eval()\n\n data = pd.read_excel(\"mkate_data.xlsx\", 0, header=[0, 1])\n X = torch.Tensor(\n [\n [bool(int(x)) for x in y.replace(\"'\", \"\")]\n for y in data.iloc[0:8192, 0].values\n ]\n )\n Y = torch.Tensor(data.iloc[0:8192, [6, 7]].values)\n dist, real_r, real_b = (\n [torch.sum(l).item() for l in X],\n [l.tolist()[0] for l in Y],\n [l.tolist()[1] for l in Y],\n )\n\n estim_data = my_nn(X)\n\n estim_r, estim_b = [l.tolist()[0] for l in estim_data], [\n l.tolist()[1] for l in estim_data\n ]\n\n trimmed_real = []\n trimmed_estim = []\n\n arrayzip = (\n zip(dist, real_r, estim_r) if side == \"r\" else zip(dist, real_b, estim_b)\n )\n\n for dis, r, e in arrayzip:\n if dis == depth:\n trimmed_real.append(r)\n trimmed_estim.append(e)\n\n vals[0].append(d)\n vals[1].append(\n F.mse_loss(torch.Tensor(trimmed_estim), torch.Tensor(trimmed_real)).item()\n )\n\n plt.scatter(vals[0],vals[1])\n plt.table(cellText=[[round(x,3) for x in vals[1]]], loc = 'top')\n return plt\n\ndef error_plot_both(depth, side):\n vals_b = [[], []]\n vals_m = [[], []]\n plt.close()\n\n for d in range(1, 14):\n # Save graphs from tars\n\n my_nn_b = mkate_bayesian.Net() \n my_nn_m = mkate_mse.Net()\n\n optimizer_b = optim.SGD(my_nn_b.parameters(), lr=0.01)\n optimizer_m = optim.SGD(my_nn_m.parameters(), lr=0.01)\n\n # Import my_nn.tar\n checkpoint_b = (\n torch.load(\"Tars/Bayesian_\" + str(d) + side + \".tar\")\n )\n checkpoint_m = (\n torch.load(\"Tars/MSE_\" + str(d) + side + \".tar\")\n )\n my_nn_b.load_state_dict(checkpoint_b[\"model_state_dict\"])\n optimizer_b.load_state_dict(checkpoint_b[\"optimizer_state_dict\"])\n loss_b = checkpoint_b[\"loss\"]\n\n my_nn_b.eval()\n\n\n my_nn_m.load_state_dict(checkpoint_m[\"model_state_dict\"])\n optimizer_m.load_state_dict(checkpoint_m[\"optimizer_state_dict\"])\n loss_m = checkpoint_m[\"loss\"]\n\n my_nn_m.eval()\n\n data = pd.read_excel(\"mkate_data.xlsx\", 0, header=[0, 1])\n X = torch.Tensor(\n [\n [bool(int(x)) for x in y.replace(\"'\", \"\")]\n for y in data.iloc[0:8192, 0].values\n ]\n )\n Y = torch.Tensor(data.iloc[0:8192, [6, 7]].values)\n dist, real_r, real_b = (\n [torch.sum(l).item() for l in X],\n [l.tolist()[0] for l in Y],\n [l.tolist()[1] for l in Y],\n )\n\n estim_data = my_nn_b(X)\n\n estim_r, estim_b = [l.tolist()[0] for l in estim_data], [\n l.tolist()[1] for l in estim_data\n ]\n\n trimmed_real = []\n trimmed_estim = []\n\n arrayzip = (\n zip(dist, real_r, estim_r) if side == \"r\" else zip(dist, real_b, estim_b)\n )\n\n for dis, r, e in arrayzip:\n if dis == depth:\n trimmed_real.append(r)\n trimmed_estim.append(e)\n\n vals_b[0].append(d)\n vals_b[1].append(\n F.mse_loss(torch.Tensor(trimmed_estim), torch.Tensor(trimmed_real)).item()\n )\n\n estim_data = my_nn_m(X)\n\n estim_r, estim_b = [l.tolist()[0] for l in estim_data], [\n l.tolist()[1] for l in estim_data\n ]\n\n trimmed_real = []\n trimmed_estim = []\n\n arrayzip = (\n zip(dist, real_r, estim_r) if side == \"r\" else zip(dist, real_b, estim_b)\n )\n\n for dis, r, e in arrayzip:\n if dis == depth:\n trimmed_real.append(r)\n trimmed_estim.append(e)\n\n vals_m[0].append(d)\n vals_m[1].append(\n F.mse_loss(torch.Tensor(trimmed_estim), torch.Tensor(trimmed_real)).item()\n )\n\n p1 = plt.scatter(vals_b[0],vals_b[1], label=\"Bayesian\")\n p2 = plt.scatter(vals_m[0],vals_m[1], label=\"MSE Loss\")\n plt.legend(handles=[p1,p2])\n return plt\ndef main():\n for depth in range(1, 14):\n for side in [\"b\", \"r\"]:\n # Save graphs from tars\n bplots_from_tar(\"Tars/MSE_\" + str(depth) + side + \".tar\", \"m\").savefig(\n \"Analysis/MSE_box_\" + str(depth) + side\n )\n\n bplots_from_tar(\"Tars/Bayesian_\" + str(depth) + side + \".tar\", \"b\").savefig(\n \"Analysis/Bayesian_box_\" + str(depth) + side\n )\n \n error_plot(depth, side, \"m\").savefig(\n \"Analysis/MSE_MSE_\" + str(depth) + side\n )\n error_plot(depth, side, \"b\").savefig(\n \"Analysis/Bayesian_MSE_\" + str(depth) + side\n )\n \n error_plot_both(depth, side).savefig(\n \"Analysis/Combined_MSE_\" + str(depth) + side\n ) \n\n\nif __name__ == \"__main__\":\n main()","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":7764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"230708659","text":"import json\r\nimport random\r\nimport time\r\nfrom collections import deque\r\nfrom threading import Event, Thread\r\n\r\nimport string_constants as constants\r\nfrom email_consumer import EmailListener\r\nfrom invoice_consumer import InvoiceListener\r\nfrom messageq import MessageQ\r\nfrom order_acknowledgement import OrderAcknowledgement\r\nfrom sms_consumer import SMSListener\r\n\r\nrequest, sms, invoice, email = 0, 1, 2, 3\r\n\r\ndef push_order_requests(request_q, run_event):\r\n\t\"\"\"Send \"Order placed\" requests to the message queue that our service is listening to.\"\"\"\r\n\tfor i in range(100):\r\n\t\trequest_q.enqueue(json.dumps({constants.ORDER_ID: ('meesho' + str(i))}))\r\n\t\tprint('Placed order {}.'.format(i))\r\n\t\ttime.sleep(random.randint(0, 1) / 100)\r\n\t\tif not run_event.is_set():\r\n\t\t\tbreak\r\n\r\ndef run(message_queues, consumers, interrupt_stop):\r\n\t\"\"\"This is the main runner function. It triggers all the different consumers on different threads.\"\"\"\r\n\tthreads = []\r\n\tfor i in range(4):\r\n\t\tcleanup_thread = Thread(\r\n\t\ttarget = message_queues[i].cleanup_stragglers,\r\n\t\targs = (interrupt_stop,))\r\n\t\tthreads.append(cleanup_thread)\r\n\r\n\t\tfor consumer in consumers[i]:\r\n\t\t\tlistener_thread = Thread(\r\n\t\t\ttarget = consumer.listen,\r\n\t\t\targs = (interrupt_stop,))\r\n\t\t\tthreads.append(listener_thread)\r\n\r\n\tproducer_thread = Thread(\r\n\t\ttarget = push_order_requests,\r\n\t\targs = (message_queues[request], interrupt_stop,))\r\n\tthreads.append(producer_thread)\r\n\r\n\tstart_time = time.time()\r\n\ttry:\r\n\t\tfor t in threads:\r\n\t\t\tt.start()\r\n\t\twhile True:\r\n\t\t\ttime.sleep(1)\r\n\texcept KeyboardInterrupt:\r\n\t\tinterrupt_stop.clear()\r\n\t\tprint('Closing all threads safely.')\r\n\tfinally:\r\n\t\tfor t in threads:\r\n\t\t\tt.join()\r\n\tend_time = time.time()\r\n\tprint('Simulation complete in {} minutes.'.format((end_time - start_time) / 60))\r\n\r\ndef main():\r\n\t\"\"\"Initialize and trigger all message queues and their listeners.\"\"\"\r\n\tmessage_queues = []\r\n\tfor i in range(4):\r\n\t\tmessage_queues.append(MessageQ(deque([])))\r\n\tconsumers = [[] for i in range(4)]\r\n\r\n\t# These numbers simulate the number of various consumers to the queue.\r\n\t# Tweak these to see the performance difference\r\n\tnum_request_consumers = 10\r\n\tnum_sms_consumers = 10\r\n\tnum_invoice_consumers = 25\r\n\tnum_email_consumers = 10\r\n\r\n\tfor i in range(num_request_consumers):\r\n\t\torder_acknowledgement_service = OrderAcknowledgement(\r\n\t\t\tmessage_queues[request],\r\n\t\t\tmessage_queues[sms],\r\n\t\t\tmessage_queues[invoice])\r\n\t\tconsumers[request].append(order_acknowledgement_service)\r\n\t\r\n\tfor i in range(num_sms_consumers):\r\n\t\tsms_consumer = SMSListener(message_queues[sms])\r\n\t\tconsumers[sms].append(sms_consumer)\r\n\t\r\n\tfor i in range(num_invoice_consumers):\r\n\t\tinvoice_consumer = InvoiceListener(\r\n\t\t\tmessage_queues[invoice],\r\n\t\t\tmessage_queues[email])\r\n\t\tconsumers[invoice].append(invoice_consumer)\r\n\t\r\n\tfor i in range(num_email_consumers):\r\n\t\temail_consumer = EmailListener(message_queues[email])\r\n\t\tconsumers[email].append(email_consumer)\r\n\r\n\tstop_event = Event()\r\n\tstop_event.set()\r\n \r\n\trun(message_queues, consumers, stop_event)\r\n\r\nmain()","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"82460084","text":"import torch\nimport numpy as np\nimport random\nfrom torch import nn,optim,autograd\n\nh_dim = 400\nbatch_size = 512\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n self.net = nn.Sequential(\n nn.Linear(2, h_dim),\n nn.ReLU(True),\n nn.Linear(h_dim, h_dim),\n nn.ReLU(True),\n nn.Linear(h_dim, h_dim),\n nn.ReLU(True),\n nn.Linear(h_dim, 2)\n )\n def forward(self, z):\n output = self.net(z)\n return output\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.net = nn.Sequential(\n nn.Linear(2, h_dim),\n nn.ReLU(True),\n nn.Linear(h_dim, h_dim),\n nn.ReLU(True),\n nn.Linear(h_dim, h_dim),\n nn.ReLU(True),\n nn.Linear(h_dim, 1),\n nn.Sigmoid(),\n )\n def forward(self, x):\n output = self.net(x)\n return output.view(-1)\n\ndef data_generator():\n scale = 22.\n centers = [\n (1,0),\n (-1,0),\n (0,1),\n (0,-1),\n (1. / np.sqrt(2), 1./np.sqrt(2)),\n (1. / np.sqrt(2), -1./np.sqrt(2)),\n (-1. / np.sqrt(2), 1./np.sqrt(2)),\n (-1. / np.sqrt(2), -1./np.sqrt(2))\n ]\n centers = [(scale * x, scale * y) for x, y in centers]\n\n while True:\n dataset = []\n for i in range(batch_size):\n point = np.random.randn(2) * 0.02\n center = random.choice(centers)\n # N(0, 1) + center x1/x2\n point[0] += center[0]\n point[1] += center[1]\n dataset.append(point)\n dataset = np.array(dataset).astype(np.float32)\n dataset /= 1.414\n yield dataset\n\ndef gradient_penalty(D, xr, xf):\n t = torch.rand(batch_size, 1).cuda()\n t = t.expand_as(xr)\n mid = t * xr + (1-t) * xf\n mid.requires_grad_()\n pred = D(mid)\n grads = autograd.grad(outputs=pred, inputs=mid,\n grad_outputs=torch.ones_like(pred),\n create_graph=True,retain_graph=True,only_inputs=True)[0]\n gp = torch.pow(grads.norm(2, dim=1)-1,2).mean()\n return gp\n\nif __name__ == '__main__':\n torch.manual_seed(23)\n np.random.seed(23)\n data_iter = data_generator()\n x = next(data_iter)\n # print(x.shape)\n G = Generator().cuda()\n D = Discriminator().cuda()\n # print(G)\n # print(D)\n optimizer_G = optim.Adam(G.parameters(),lr = 5e-4, betas=(0.5, 0.9))\n optimizer_D = optim.Adam(D.parameters(),lr = 5e-4, betas=(0.5, 0.9))\n\n for epoch in range(50000):\n # 1. train Discrimator\n for _ in range(5):\n xr = next(data_iter)\n xr = torch.from_numpy(xr).cuda()\n predr = D(xr)\n lossr = predr.mean()\n # 1.2. train on fake data\n z = torch.randn(batch_size,2).cuda()\n xf = G(z)\n predf = D(xf)\n lossf = predf.mean()\n\n # 1.3 gradient penalty\n gp = gradient_penalty(D, xr, xf)\n\n loss_D = lossr + lossf + gp\n optimizer_D.zero_grad()\n loss_D.backward()\n optimizer_D.step()\n\n # 2. train Generator\n z = torch.randn(batch_size, 2).cuda()\n xf = G(z)\n predf = D(xf)\n loss_G = -predf.mean()\n optimizer_G.zero_grad()\n loss_G.backward()\n optimizer_G.step()\n\n if epoch % 100 == 0:\n print(loss_G.item(),loss_D.item())\n\n\n","sub_path":"第十四章:对抗生成网络/wgan.py","file_name":"wgan.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"194975171","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.utils import to_categorical\n\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Reshape\nfrom tensorflow.keras.layers import concatenate, add, Activation\nfrom tensorflow.keras.models import Model\n\ndef get_mnist_dataset(file_name=\"mnist.npz\"):\n mnist = np.load(file_name, allow_pickle=True)\n X_train = mnist['x_train']\n X_test = mnist['x_test']\n y_train = mnist['y_train']\n y_test = mnist['y_test']\n # image reshape to (28, 28, 1)\n image_size = X_train.shape[1]\n X_train = np.reshape(X_train, [-1, image_size, image_size, 1])\n X_test = np.reshape(X_test, [-1, image_size, image_size, 1])\n # pixel rescaling\n X_train = X_train.astype('float')/255\n X_test = X_test.astype('float')/255\n return X_train, X_test, y_train, y_test\n\ndef integer_to_one_hot(integer, num_classes):\n one_hot_vector = to_categorical(integer, num_classes=num_classes)\n return one_hot_vector\n\n# === Network parameters ======\nbatch_size = 64 # 64 or 128 for CPU\nepochs = 10\nelement_shape = (28, 28, 1) # the shape of data point\nnum_classes = 10 # the number of labels\n# -----------------------------\nn_channels = element_shape[-1] # 1 for BW, 3 for RGB\nelement_dim = np.prod(element_shape) # 28 * 28 * 1 = 784\nconv_layers = 2\n# -----------------------------\nconv_filters = 32 \nkernel_size = 3\nstrides = 1\npool_size = 2\n# -----------------------------\n\n# === Load Dataset & Preprocess\nX_train, X_test, y_train_int, y_test_int = get_mnist_dataset(file_name=\"mnist.npz\")\ny_train = integer_to_one_hot(y_train_int, num_classes=num_classes)\ny_test = integer_to_one_hot(y_test_int, num_classes=num_classes)\n# --- small subset for testing\ntrain_limit = 2000\ntest_limit = 1000\nX_train = X_train[:train_limit]\ny_train = y_train[:train_limit]\nX_test = X_test[:test_limit]\ny_test = y_test[:test_limit]\n\n\nx_in = Input(shape=element_shape)\n\n# === Naive Inception Module\ndef resnet_module(module_in, filters, kernel_size, strides=2):\n conv_out = Conv2D(filters=filters, kernel_size=kernel_size,\n strides=strides, padding='same',\n activation='relu')(module_in)\n conv_out = Conv2D(filters=filters, kernel_size=kernel_size,\n strides=1, padding='same',\n activation=None)(conv_out)\n merge_in = Conv2D(filters=filters, kernel_size=1,\n strides=strides, padding='same',\n activation=None)(module_in)\n module_out = add([conv_out, merge_in])\n module_out = Activation('relu')(module_out)\n return module_out\n\nx = x_in\ny = resnet_module(x, filters=32, kernel_size=3, strides=1)\ny = resnet_module(y, filters=32, kernel_size=3, strides=2)\ny = resnet_module(y, filters=64, kernel_size=3, strides=1)\ny = resnet_module(y, filters=64, kernel_size=3, strides=2)\ny = Flatten()(y)\ny = Dense(10, activation='softmax')(y)\n\nResNet = Model(x_in, y)\nResNet.summary()\n\n# === Train\n\nResNet.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nResNet.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,\n validation_split=0.2, verbose=2)\n\n# === Performance Report\nloss, acc = ResNet.evaluate(X_test, y_test, verbose=0)\nprint(\"-------------------------------------------\")\nprint(\"Test Loss:\", np.round(loss, 4),\n \"Test Accuracy:\", np.round(acc, 4))\nprint(\"-------------------------------------------\")\n","sub_path":"CNN_ResNet.py","file_name":"CNN_ResNet.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"471934957","text":"import time\nimport os\nimport json\n\ndef test_pong_sim(pong_sim):\n \"\"\"\n Tests that pongs are received by our mock server.\n This test is contained in it's own file because it\n captures stdout output and checks that a received\n message was printed\n \"\"\"\n pong_sim._impl._sim_connection._PING_PONG_INTERVAL = 1.0\n counter = 0\n while pong_sim.run():\n time.sleep(.1)\n if counter == 20:\n break\n counter += 1\n\n if os.path.exists('pong.json'):\n with open('pong.json', 'r') as infile:\n pong = json.load(infile)\n assert pong['PONG'] == 1\n os.remove('pong.json')\n else:\n assert False\n","sub_path":"bonsai-ai/tests/test_ping_pong.py","file_name":"test_ping_pong.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"417610144","text":"\"\"\"Contains the Metrics class, which computes, stores, prints and saves performance metrics\nfor a Keras model.\n\"\"\"\nimport json\nimport logging\nfrom operator import itemgetter\nimport os\nfrom statistics import mean\n\nimport numpy as np\nfrom prettytable import PrettyTable\nfrom keras.callbacks import Callback\n\nfrom . import constants\nfrom .utils import model_utils\nfrom .utils.generic_utils import make_dir\nfrom .preprocessor import Preprocessor\n\nclass Metrics(Callback):\n \"\"\"A class for handling performance metrics, inherits from Callback.\n\n Args:\n training_data (dict): a dictionary with keys 'X_', 'y_' which point\n to the data and labels for all partitions 'train', and optionally 'valid', 'test'\n respectively\n idx_to_tag (dict): maps each unique integer index to its label, or tag\n output_dir (str): base directory to save all output to\n criteria (str): criteria which determines which predictions are true-positives.\n One of: 'left' for left-boundary matching, 'right' for right-boundary matching and\n 'exact' for exact-boundary matching (default)\n fold (int): current fold, if using k-fold cross validation, defaults to 0.\n \"\"\"\n # define this at the class level because some methods are static\n log = logging.getLogger(__name__)\n\n def __init__(self, training_data, idx_to_tag, output_dir, criteria='exact', fold=None):\n\n self.training_data = training_data\n\n # inversed mapping from idx: tag\n self.idx_to_tag = idx_to_tag\n\n self.output_dir = output_dir\n\n # matching criteria to use when evaluating true_positives\n self.criteria = criteria\n\n # epoch counter for model tied to this object\n self.current_epoch = 0\n # current k-fold counter for model tied to this object\n self.current_fold = fold\n\n # Model performance metrics accumulators\n self.performance_metrics_per_epoch = {p: [] for p in constants.PARTITIONS}\n\n def on_train_begin(self, logs={}):\n \"\"\"Series of steps to perform when training begins.\"\"\"\n pass\n\n def on_epoch_end(self, epoch, logs={}):\n \"\"\"Series of steps to perform when epoch ends.\"\"\"\n # get train/valid/test scores, accumulate them, and print them\n\n # Train\n train_scores = self._eval(self.training_data['X_train'], self.training_data['y_train'])\n self.print_performance_scores(train_scores, title='train')\n self.performance_metrics_per_epoch['train'].append(train_scores)\n\n # Valid\n valid_scores = self._eval(self.training_data['X_valid'], self.training_data['y_valid'])\n self.print_performance_scores(valid_scores, title='valid')\n self.performance_metrics_per_epoch['valid'].append(valid_scores)\n\n # Test (optional)\n if self.training_data['X_test'] is not None:\n test_scores = self._eval(self.training_data['X_test'], self.training_data['y_test'])\n self.print_performance_scores(test_scores, title='test')\n self.performance_metrics_per_epoch['test'].append(test_scores)\n\n # write the performance metrics for the current epoch to disk\n self._write_metrics_to_disk()\n\n self.current_epoch += 1 # update the current epoch counter\n\n def _eval(self, X, y):\n \"\"\"Performs all evaluation steps for given X (input) and y (labels).\n\n For a given input (X) and labels (y) performs all the steps in the evaluation pipeline,\n namely: performs prediction on X, chunks the annotations by type, and computes performance\n scores by type.\n\n Args:\n X: input matrix, of shape (num examples X sequence length)\n y: lables, of shape (num examples X sequence length X num classes)\n\n Returns:\n a dictionary of label, score key, value pairs where label is a class tag and scores is\n a 4-tuple containing precision, recall, f1 and support\n \"\"\"\n # get predictions and gold labels\n y_true, y_pred = self._get_y_true_and_pred(X, y)\n # convert idx sequence to tag sequence\n y_true_tag = [self.idx_to_tag[idx] for idx in y_true]\n y_pred_tag = [self.idx_to_tag[idx] for idx in y_pred]\n # chunk the entities\n y_true_chunks = Preprocessor.chunk_entities(y_true_tag)\n y_pred_chunks = Preprocessor.chunk_entities(y_pred_tag)\n\n # get performance scores per label\n performance_scores = \\\n self.get_precision_recall_f1_support(y_true_chunks, y_pred_chunks,\n criteria=self.criteria)\n\n # TEMP: Check CoNLLEval script\n # self.conll_eval(y_true_tag, y_pred_tag)\n\n return performance_scores\n\n # TEMP: Check CoNLLEval script\n def conll_eval(self, y_true, y_pred):\n \"\"\"Calls conlleval script on ConLL-formated file containing predictions and gold labels.\n\n Args:\n y_true: list of gold label tags for a sequence, e.g. ['O', 'O', 'B-DISO', 'I-DISO']\n y_pred: list of predicted tags for a sequence\n \"\"\"\n conll_file = os.path.join(self.output_dir, 'conll.tsv')\n with open(conll_file, 'w') as f:\n for gold, pred in zip(y_true, y_pred):\n if gold != constants.PAD and pred != constants.PAD:\n f.write('{}\\t{}\\t{}\\n'.format('DUMMY', gold, pred))\n os.system('python2 ./conlleval.py {}'.format(conll_file))\n\n def _get_y_true_and_pred(self, X, y):\n \"\"\" Get y_true and y_pred for given input data (X) and labels (y)\n\n Performs prediction for the current model (self.model), and returns a 2-tuple contain 1D\n array-like objects containing the true (gold) labels and the predicted labels, where labels\n are integers corresponding to the sequence tags as per self.tag_type_to_idx.\n\n Args:\n X: input matrix, of shape (num examples X sequence length)\n y: lables, of shape (num examples X sequence length X num classes)\n\n Returns:\n y_true: 1D array like object containing the gold label sequence\n y_pred: 1D array like object containing the predicted sequences\n \"\"\"\n # gold labels\n y_true = y.argmax(axis=-1) # get class label\n y_true = np.asarray(y_true).ravel() # flatten to 1D array\n # predicted labels\n y_pred = self.model.predict(X, batch_size=constants.PRED_BATCH_SIZE)\n y_pred = np.asarray(y_pred.argmax(axis=-1)).ravel()\n\n # sanity check\n if not y_true.shape == y_pred.shape:\n err_msg = \"'y_true' and 'y_pred' have different shapes\"\n Metrics.log.error('AssertionError: %s', err_msg)\n raise AssertionError(err_msg)\n\n return y_true, y_pred\n\n @staticmethod\n def get_precision_recall_f1_support(y_true, y_pred, criteria='exact'):\n \"\"\"Returns precision, recall, f1 and support.\n\n For given gold (y_true) and predicited (y_pred) labels, returns the precision, recall, f1\n and support per label and the average across labels. Expected y_true and y_pred to be a\n sequence of entity chunks.\n\n Args:\n y_true: list of (chunk_type, chunk_start, chunk_end)\n y_pred: list of (chunk_type, chunk_start, chunk_end)\n criteria (str): criteria to use for evaluation, 'exact' matches\n boundaries directly, 'left' requires only a left boundary match\n and 'right requires only a right boundary match'.\n Returns:\n a dictionary of label, score key, value pairs where label is a class tag and scores is\n a 4-tuple containing precision, recall, f1 and support\n\n Raises:\n ValueError, if 'criteria' is not one of 'exact', 'left', or 'right'\n \"\"\"\n performance_scores = {} # dict accumulator of per label of scores\n # micro performance accumulators\n FN_total = 0\n FP_total = 0\n TP_total = 0\n\n labels = list(set([chunk[0] for chunk in y_true])) # unique labels\n\n # get performance scores per label\n for lab in labels:\n # get chunks for current lab\n y_pred_lab = [], []\n # either retain or discard left or right boundaries depending on\n # matching criteria\n if criteria not in ['exact', 'left', 'right']:\n err_msg = (\"Expected criteria to be one of 'exact', 'left', or 'right'. \"\n \"Got: {}\").format(criteria)\n Metrics.log.error(\"ValueError %s\", err_msg)\n raise ValueError(err_msg)\n if criteria == 'exact':\n y_true_lab = [chunk for chunk in y_true if chunk[0] == lab]\n y_pred_lab = [chunk for chunk in y_pred if chunk[0] == lab]\n elif criteria == 'left':\n y_true_lab = [chunk[:2] for chunk in y_true if chunk[0] == lab]\n y_pred_lab = [chunk[:2] for chunk in y_pred if chunk[0] == lab]\n elif criteria == 'right':\n y_true_lab = [chunk[::2] for chunk in y_true if chunk[0] == lab]\n y_pred_lab = [chunk[::2] for chunk in y_pred if chunk[0] == lab]\n\n # per label performance accumulators\n FN = 0\n FP = 0\n TP = 0\n\n # FN\n for gold in y_true_lab:\n if gold not in y_pred_lab:\n FN += 1\n\n for pred in y_pred_lab:\n # FP\n if pred not in y_true_lab:\n FP += 1\n # TP\n elif pred in y_true_lab:\n TP += 1\n\n # get performance metrics\n performance_scores[lab] = \\\n model_utils.precision_recall_f1_support(TP, FP, FN)\n\n # accumulate FNs, FPs, TPs\n FN_total += FN\n FP_total += FP\n TP_total += TP\n\n # get macro and micro peformance metrics averages\n macro_p = mean([v[0] for v in performance_scores.values()])\n macro_r = mean([v[1] for v in performance_scores.values()])\n macro_f1 = mean([v[2] for v in performance_scores.values()])\n total_support = TP_total + FN_total\n\n performance_scores['MACRO_AVG'] = \\\n (macro_p, macro_r, macro_f1, total_support)\n performance_scores['MICRO_AVG'] = \\\n model_utils.precision_recall_f1_support(TP_total, FP_total, \\\n FN_total)\n\n return performance_scores\n\n @staticmethod\n def print_performance_scores(performance_scores, title=None):\n \"\"\"Prints an ASCII table of performance scores.\n\n Args:\n performance_scores: a dictionary of label, score pairs where label\n is a class tag and scores is a 4-tuple\n containing precision, recall, f1 and support\n title (str): the title of the table (uppercased).\n\n Preconditions:\n assumes the values of performance_scores are 4-tuples, where the\n first three items are float representaions of a percentage and the\n last item is an count integer.\n \"\"\"\n # create table, give it a title a column names\n table = PrettyTable()\n if title is not None:\n table.title = title.upper()\n table.field_names = ['Label', 'Precision', 'Recall', 'F1', 'Support']\n\n # column alignment\n table.align['Label'] = 'l'\n table.align['Precision'] = 'r'\n table.align['Recall'] = 'r'\n table.align['F1'] = 'r'\n table.align['Support'] = 'r'\n\n # create and add the rows\n for label, scores in performance_scores.items():\n row = [label]\n # convert scores to formatted percentage strings\n support = scores[-1]\n performance_metrics = ['{:.2%}'.format(x) for x in scores[:-1]]\n row_scores = performance_metrics + [support]\n\n row.extend(row_scores)\n table.add_row(row)\n\n print(table)\n\n def _write_metrics_to_disk(self):\n \"\"\"Write performance metrics to disk as json-formatted .txt file.\n\n At the end of each epoch, writes a json-formatted .txt file to disk\n (name epoch_.txt). File contains performance scores\n per label as well as the best-achieved macro and micro averages thus\n far for the current epoch.\n \"\"\"\n # create evaluation output directory\n eval_dirname = self.output_dir\n if self.current_fold is not None:\n fold = 'fold_{}'.format(self.current_fold + 1)\n eval_dirname = os.path.join(self.output_dir, fold)\n make_dir(eval_dirname)\n\n # create filepath to evaluation file\n eval_filename = 'epoch_{0:03d}.txt'.format(self.current_epoch + 1)\n eval_filepath = os.path.join(eval_dirname, eval_filename)\n\n # per partition performance metrics accumulator\n performance_metrics = {p: {} for p in constants.PARTITIONS}\n\n for partition in self.performance_metrics_per_epoch:\n # test partition may be empty\n if self.performance_metrics_per_epoch[partition]:\n # get best epoch based on macro / micro averages\n macro_avg_per_epoch = [x['MACRO_AVG'] for x in\n self.performance_metrics_per_epoch[partition]]\n micro_avg_per_epoch = [x['MICRO_AVG'] for x in\n self.performance_metrics_per_epoch[partition]]\n\n best_macro_avg = max(macro_avg_per_epoch, key=itemgetter(2))\n best_micro_avg = max(micro_avg_per_epoch, key=itemgetter(2))\n\n best_micro_epoch = micro_avg_per_epoch.index(best_micro_avg)\n best_macro_epoch = macro_avg_per_epoch.index(best_macro_avg)\n\n performance_metrics[partition]['scores'] = \\\n self.performance_metrics_per_epoch[partition][self.current_epoch]\n performance_metrics[partition]['best_epoch_macro_avg'] = \\\n {'epoch': best_macro_epoch, 'scores': best_macro_avg}\n performance_metrics[partition]['best_epoch_micro_avg'] = \\\n {'epoch': best_micro_epoch, 'scores': best_micro_avg}\n\n # write performance metrics for current epoch to file\n with open(eval_filepath, 'a') as eval_file:\n eval_file.write(json.dumps(performance_metrics, indent=4))\n","sub_path":"saber/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":14565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"474474743","text":"from collections import defaultdict\n\nN, Q = [int(x) for x in input().split()]\n\ntillgangliga_a_b = defaultdict(list)\n\nqueries = []\ncount = 0\nfor i in range(Q):\n query = [int(x) for x in input().split()]\n if query[0] == 1:\n count += 1\n queries.append(query)\n\n\ndef make_tillgangligt_g(a, b):\n tillgangliga_a_b[str(a) + '-' + str(b)].append([a, b])\n\n\ndef ta_bort_g(a, b):\n tillgangliga_a_b[str(a) + '-' + str(b)].pop()\n\n\ndef check_if_possible_g(x):\n for i in tillgangliga_a_b.values():\n if i != []:\n if x % i[0][1] == i[0][0]:\n print('Ja')\n break\n else:\n print('Nej')\n\n\ntillgangliga_las = defaultdict(dict)\n\n\ndef make_tillgangligt(a, b):\n for i in range(N // b + 1):\n i *= b\n if tillgangliga_las[i + a].get(str(a) + '-' + str(b)):\n tillgangliga_las[i + a][str(a) + '-' + str(b)] += 1\n else:\n tillgangliga_las[i + a][str(a) + '-' + str(b)] = 0\n\ndef ta_bort(a, b):\n for i in range(N // b + 1):\n i *= b\n tillgangliga_las[i + a][str(a) + '-' + str(b)] -= 1\n\n\ndef check_if_possible(x):\n if tillgangliga_las[x]:\n print('Ja')\n else:\n print('Nej')\n\n\nif count <= 20:\n for query in queries:\n typ = query[0]\n\n if typ == 1:\n check_if_possible_g(query[1])\n elif typ == 2:\n make_tillgangligt_g(query[1], query[2])\n else:\n ta_bort_g(query[1], query[2])\n\n\nelse:\n for query in queries:\n typ = query[0]\n\n if typ == 1:\n check_if_possible(query[1])\n elif typ == 2:\n make_tillgangligt(query[1], query[2])\n else:\n ta_bort(query[1], query[2])\n\n","sub_path":"ProgrammeringsOlympiadenLägertävling2020/Låssmeden/låssmed2.py","file_name":"låssmed2.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"321292972","text":"class node:\r\n def __init__(self,data=None):\r\n self.data = data\r\n self.next = None\r\n\r\nclass linked_list:\r\n def __init__(self):\r\n \tself.head = node()\r\n \r\n def append(self, data):\r\n \"To append data to linked list\"\r\n new_node = node(data)\r\n cur = self.head\r\n while(cur.next!=None):\r\n cur = cur.next\r\n cur.next = new_node\r\n \r\n def length(self):\r\n \t\"To return length of linked list\"\r\n \tcnt = 0\r\n \tcur = self.head\r\n \twhile cur.next!=None:\r\n cnt+=1\r\n cur = cur.next\r\n \treturn cnt\r\n \r\n def display(self):\r\n \t\"To display contents of the linked list in order of index\"\r\n \tcur = self.head\r\n \twhile cur.next!=None:\r\n cur = cur.next\r\n print(cur.data)\r\n\r\n def extract(self,index):\r\n \t\"To return value of particular index of a linked list\"\r\n \tif(index>=self.length()):\r\n return 'Stack Overflow'\r\n \ti = 0\r\n \tcur = self.head\r\n \twhile cur.next!=None:\r\n \t\tcur = cur.next\r\n \t\tif(i==index):\r\n \t return cur.data\r\n \t\ti+=1\r\n \r\n def erase(self,index):\r\n \"To delete value at particular index of linked list\"\r\n if(index>=self.length()):\r\n \treturn 'Stack Overflow'\r\n i = 0\r\n cur = self.head\r\n while True:\r\n \tlast_node = cur\r\n \tcur = cur.next\r\n \tif(i==index):\r\n \t last_node.next = cur.next\r\n \t return i\r\n \ti+=1\r\n# Test the code \r\nobj = linked_list()\r\nobj.append(20)\r\nobj.append(10)\r\nobj.append(30)\r\nobj.append(40)\r\nobj.append(24)\r\nobj.display()\r\nprint(obj.length())\r\nprint(obj.extract(4))\r\nobj.erase(2)\r\nobj.display()\r\n\r\n\r\n\r\n\r\n","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"553742724","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass Embedding(nn.Module):\r\n\tdef __init__(self, n_user, n_item, embedding_size=32):\r\n\t\tsuper(Embedding, self).__init__()\r\n\t\tself.user_embedding = nn.Parameter(torch.Tensor(n_user, embedding_size))\r\n\t\tself.item_embedding = nn.Parameter(torch.Tensor(n_item, embedding_size))\r\n\r\n\t\tself.reset_parameters()\r\n\r\n\tdef reset_parameters(self):\r\n\t\tnn.init.xavier_uniform_(self.user_embedding)\r\n\t\tnn.init.xavier_uniform_(self.item_embedding)\r\n\r\n\tdef regularization_loss(self):\r\n\t\tloss_reg = 0.\r\n\t\tloss_reg += torch.sum( torch.sqrt( torch.sum(self.user_embedding ** 2, 1) ) )\r\n\t\tloss_reg += torch.sum( torch.sqrt( torch.sum(self.item_embedding ** 2, 1) ) )\r\n\r\n\t\treturn loss_reg\r\n\r\nclass NNMFModel(nn.Module):\r\n\tdef __init__(self, n_user, n_item, embedding_size=32, hidden_size=64):\r\n\t\tsuper(NNMFModel, self).__init__()\r\n\t\tself.embedding_model = Embedding(n_user, n_item, embedding_size)\r\n\t\tself.user_embedding = self.embedding_model.user_embedding\r\n\t\tself.item_embedding = self.embedding_model.item_embedding\r\n\t\t\r\n\t\tself.l1 = nn.Linear(embedding_size*3, hidden_size)\r\n\t\tself.l2 = nn.Linear(hidden_size, hidden_size)\r\n\t\tself.l3 = nn.Linear(hidden_size, 1)\r\n\r\n\t\tself.user_bias = nn.Parameter(torch.Tensor(n_user, 1))\r\n\t\tself.item_bias = nn.Parameter(torch.Tensor(n_item, 1))\t\t\r\n\r\n\t\tself.reset_parameters()\r\n\t\r\n\tdef reset_parameters(self):\r\n\t\tnn.init.zeros_(self.user_bias)\r\n\t\tnn.init.zeros_(self.item_bias)\r\n\r\n\tdef regularization_loss(self):\r\n\t\treturn self.embedding_model.regularization_loss()\r\n\r\n\tdef forward(self, x):\r\n\t\tuser_id = x[:, 0]\r\n\t\titem_id = x[:, 1]\r\n \r\n\t\tuser_emb = self.user_embedding[user_id]\r\n\t\titem_emb = self.item_embedding[item_id]\r\n\r\n\t\tinteraction = torch.mul(user_emb, item_emb)\r\n\t\tratings = torch.sum(interaction, dim = 1)\r\n\r\n\t\tx = torch.cat([user_emb, item_emb, interaction], dim=1)\r\n\t\tx1 = torch.tanh(self.l1(x))\r\n\t\tx2 = torch.tanh(self.l2(x1))\r\n\t\tx3 = self.l3(x2).reshape(-1)\r\n\r\n\t\tuser_b = self.user_bias[user_id].reshape(-1)\r\n\t\titem_b = self.item_bias[item_id].reshape(-1)\r\n\r\n\t\toutput = (ratings + x3) / 2. + user_b + item_b\r\n\r\n\t\treturn output\r\n\r\n\tdef load_model(self, path):\r\n\t\tmodel_dict = torch.load(path)\r\n\t\tself.load_state_dict(model_dict)\r\n\r\n\tdef load_embedding(self, path):\r\n\t\tpretrained_dict = torch.load(path)\r\n\t\tmodel_dict = self.embedding_model.state_dict()\r\n\t\tpretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\r\n\t\tmodel_dict.update(pretrained_dict)\r\n\t\tself.embedding_model.load_state_dict(model_dict)\r\n\r\nclass RelationGAT(nn.Module):\r\n\tdef __init__(self, in_size, out_size):\r\n\t\tsuper(RelationGAT, self).__init__()\r\n\t\tself.wq = nn.Linear(in_size, out_size, bias = False)\r\n\t\tself.wk = nn.Linear(in_size, out_size, bias = False)\r\n\t\tself.wv = nn.Linear(in_size, out_size, bias = False)\r\n\r\n\t\tself.reset_parameters()\r\n\r\n\tdef reset_parameters(self):\r\n\t\tfor m in self.modules():\r\n\t\t\tif isinstance(m, nn.Linear):\r\n\t\t\t\tnn.init.xavier_normal_(m.weight.data)\r\n\r\n\tdef forward(self, x, neighbor):\r\n\t\tx = self.wq(x).unsqueeze(1)\r\n\t\tneighbor = self.wk(neighbor)\r\n\t\t#gat_input = torch.cat([x.repeat(1, neighbor.size(1), 1), neighbor], dim=2)\r\n\t\tgat_input = torch.sum(\r\n\t\t\ttorch.mul(x.repeat(1, neighbor.size(1), 1), neighbor), dim=2\r\n\t\t)\r\n\t\tattn = F.softmax(gat_input, dim=1)\r\n\t\tneighbor = neighbor.transpose(1, 2).contiguous()\r\n\t\tgat_output = self.wv(\r\n\t\t\ttorch.matmul(neighbor, attn.unsqueeze(2)).squeeze(2)\r\n\t\t)\r\n\t\treturn gat_output\r\n\r\nclass IRMC_NN_Model(nn.Module):\r\n\tdef __init__(self, n_user, n_item, supp_users, device, \r\n\t\t\tembedding_size = 32, \r\n\t\t\tout_size = None, \r\n\t\t\thidden_size = 64, \r\n\t\t\thead_num = 4, \r\n\t\t\tsample_num = 2000):\r\n\t\tsuper(IRMC_NN_Model, self).__init__()\r\n\t\tself.device = device\r\n\t\tself.supp_users = supp_users\r\n\t\tself.supp_user_num = supp_users.size(0)\r\n\t\tself.head_num = head_num\r\n\t\tself.sample_num = sample_num\r\n\t\tself.GAT_unit = nn.ModuleList()\r\n\t\tif out_size is None:\r\n\t\t\tout_size = embedding_size\r\n\t\tfor i in range(head_num):\r\n\t\t\tself.GAT_unit.append(RelationGAT(embedding_size, out_size))\r\n\t\tself.w_out = nn.Linear(out_size * head_num, out_size, bias = False)\r\n\r\n\t\tself.user_embedding = nn.Parameter(torch.Tensor(n_user, embedding_size), requires_grad = False)\r\n\t\tself.item_embedding = nn.Parameter(torch.Tensor(n_item, embedding_size), requires_grad = False)\r\n\t\t\r\n\t\tself.l1 = nn.Linear(embedding_size*3, hidden_size)\r\n\t\tself.l2 = nn.Linear(hidden_size, hidden_size)\r\n\t\tself.l3 = nn.Linear(hidden_size, 1)\r\n\r\n\t\tself.user_bias = nn.Parameter(torch.Tensor(n_user, 1), requires_grad = True)\r\n\t\tself.item_bias = nn.Parameter(torch.Tensor(n_item, 1), requires_grad = False)\r\n\r\n\t\tself.reset_parameters()\r\n\r\n\tdef reset_parameters(self):\r\n\t\tnn.init.zeros_(self.user_bias)\r\n\t\tnn.init.zeros_(self.item_bias)\r\n\t\tfor m in self.modules():\r\n\t\t\tif isinstance(m, nn.Linear):\r\n\t\t\t\tnn.init.xavier_normal_(m.weight.data)\r\n\t\t\t\tif m.bias is not None:\r\n\t\t\t\t\tm.bias.data.fill_(0.1)\r\n\r\n\tdef forward(self, x, history, history_len, mode='INTER'):\r\n\t\tuser_id = x[:, 0]\r\n\t\titem_id = x[:, 1]\r\n\r\n\t\tmask = torch.arange(history.size(1))[None, :].to(self.device)\r\n\t\tmask = mask < history_len[:, None]\r\n\t\thistory_emb = self.item_embedding[history]\r\n\t\thistory_emb[~mask] = torch.zeros(self.item_embedding.size(1)).to(self.device)\r\n\t\tuser_init_emb = torch.sum( self.item_embedding[history], dim=1)\r\n\t\tuser_init_emb /= history_len[:, None].float()\r\n\t\t\r\n\t\tfor i in range(self.head_num):\r\n\t\t\tsample_index = torch.randint(0, self.supp_user_num, (x.size(0), self.sample_num)).to(self.device)\r\n\t\t\tsample_users = self.supp_users[sample_index]\r\n\t\t\tsample_user_emb = self.user_embedding[sample_users]\r\n\t\t\tgat_output_i = self.GAT_unit[i](user_init_emb, sample_user_emb)\r\n\t\t\tif i == 0:\r\n\t\t\t\tgat_output = gat_output_i\r\n\t\t\telse:\r\n\t\t\t\tgat_output = torch.cat([gat_output, gat_output_i], dim=1)\r\n\t\tuser_emb = self.w_out(gat_output)\r\n \r\n\t\titem_emb = self.item_embedding[item_id]\r\n\r\n\t\tinteraction = torch.mul(user_emb, item_emb)\r\n\t\tratings = torch.sum(interaction, dim = 1)\r\n\r\n\t\tx = torch.cat([user_emb, item_emb, interaction], dim=1)\r\n\t\tx1 = torch.tanh(self.l1(x))\r\n\t\tx2 = torch.tanh(self.l2(x1))\r\n\t\tx3 = self.l3(x2).reshape(-1)\r\n\r\n\t\tuser_b = self.user_bias[user_id].reshape(-1)\r\n\t\titem_b = self.item_bias[item_id].reshape(-1)\r\n\r\n\t\toutput = (ratings + x3) / 2. + user_b + item_b\r\n\r\n\t\tif mode == 'EXTRA':\r\n\t\t\tuser_emb_trd = self.user_embedding[user_id]\r\n\t\t\treturn output, user_emb, user_emb_trd\r\n\t\telse:\r\n\t\t\treturn output\r\n\t\r\n\tdef embedding_lookup(self, x):\r\n\t\treturn self.user_embedding[x]\r\n\r\n\tdef load_model(self, path):\r\n\t\tmodel_dict = torch.load(path)\r\n\t\tself.load_state_dict(model_dict)\r\n\r\n\tdef load_embedding(self, path):\r\n\t\tpretrained_dict = torch.load(path)\r\n\t\tmodel_dict = self.embedding_model.state_dict()\r\n\t\tpretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\r\n\t\tmodel_dict.update(pretrained_dict)\r\n\t\tself.embedding_model.load_state_dict(model_dict)\r\n\r\n\tdef load_embedding_nn(self, path):\r\n\t\tpretrained_dict = torch.load(path)\r\n\t\tmodel_dict = self.state_dict()\r\n\t\tpretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\r\n\t\tmodel_dict.update(pretrained_dict)\r\n\t\tself.load_state_dict(model_dict, strict=False)\r\n\r\n","sub_path":"code/amazon/IDCF-NN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"173824130","text":"from django.conf.urls import patterns, url\n\nfrom ul import views\n\nurlpatterns = patterns('',\n url(r'^exam$', views.exam, name='exam'),\n url(r'^done$', views.done, name='done'),\n url(r'^save$', views.save, name='save'),\n url(r'^update/(?P[0-9]+)$', views.update, name='update'),\n\n url(r'^exam/tf$', views.exam_tf, name='exam_tf'),\n url(r'^exam/mc$', views.exam_mc, name='exam_mc'),\n url(r'^exam/essay$', views.exam_essay, name='exam_essay'),\n url(r'^db/populate$', views.db_populate, name='db_populate'),\n url(r'^db/show$', views.db_show, name='db_show'),\n url(r'^sync$', views.db_sync, name='db_sync'),\n url(r'^grade$', views.db_grade, name='db_grade'),\n\n url(r'^$', views.index, name='index'),\n)\n","sub_path":"ul/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"235513462","text":"import sys\nout_file = open(sys.argv[2], 'w')\nwith open(sys.argv[1]) as in_file:\n num_of_cases = int(in_file.readline().rstrip())\n for i in range(num_of_cases):\n number = int(in_file.readline().rstrip())\n if number == 0:\n out_file.write('Case #' + str(i+1) + ': INSOMNIA\\n')\n continue\n the_set = set()\n for j in str(number):\n the_set.add(j)\n multiplier = 1\n while len(the_set) < 10:\n multiplier += 1\n new_number = number * multiplier\n for j in str(new_number):\n the_set.add(j)\n out_file.write('Case #' + str(i+1) + ': ' + str(number * multiplier) + '\\n')\n","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_mjsull_test.py","file_name":"16_0_1_mjsull_test.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"431373552","text":"from datetime import datetime\nimport boto3\n\n\nregion_name = 'us-east-1'\n\nec2 = boto3.resource('ec2', region_name=region_name)\n\n\ndef tag_successful_backup(volume):\n volume.create_tags(\n Tags=[\n {\n 'Key': 'Works For',\n \"Value\": \"Juice Analytics\"\n }\n ])\n\n\ndef backup_ebs_volume(VolumeId, Description):\n snapshot = ec2.create_snapshot(\n VolumeId=VolumeId,\n Description=Description\n )\n\n\ndef main_backup():\n # Iterate for all Instances within the Region\n for instance in ec2.instances.all():\n # Iterate for all Block Devices`\n for block_device in instance.block_device_mappings:\n # Skip if device is not EBS\n if block_device.get('Ebs') is None:\n continue\n volume_id = block_device.get('Ebs').get('VolumeId')\n # Assume all other devices are EBS\n # Iterate through tags and look for backups\n for tag in instance.tags:\n if tag['Key'] == \"Backup\" and tag['Value'] == \"Yes\":\n current_date = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')\n snapshot_description = instance.instance_id + \"_\" + current_date\n # Create EBS Snapshot\n backup_ebs_volume(volume_id, snapshot_description)\n # snapshot = ec2.create_snapshot(\n # VolumeId=volume_id,\n # Description=snapshot_description\n # )\n\n\nmain_backup()\n","sub_path":"ebs-snapshots.py","file_name":"ebs-snapshots.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"335299202","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAudio Captioning Datasets package.\n\"\"\"\n\nfrom .datasets.audiocaps import AudioCaps\nfrom .datasets.clotho import Clotho\nfrom .datasets.macs import MACS\n\n\n__author__ = \"Etienne Labbé (Labbeti)\"\n__author_email__ = \"labbeti.pub@gmail.com\"\n__license__ = \"MIT\"\n__maintainer__ = \"Etienne Labbé (Labbeti)\"\n__name__ = \"aac-datasets\"\n__status__ = \"Development\"\n__version__ = \"0.3.3\"\n","sub_path":"src/aac_datasets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"634297833","text":"import json\nfrom ibm_watson import ToneAnalyzerV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n\nauthenticator = IAMAuthenticator('')\ntone_analyzer = ToneAnalyzerV3(\n version='2017-09-21',\n authenticator=authenticator\n)\n\ntone_analyzer.set_service_url('https://api.eu-gb.tone-analyzer.watson.cloud.ibm.com/instances/4acf30d2-7c9f-488c-96aa-26322c52b47d')\n\ntext = 'One of the very famous Japanese comedian died of the new Coronavirus last night. Many Japanese including me felt shocked and really sad to hear the news of his death. I hope some stupid people will change their behaviors against the Goverment requests.'\n\ntone_analysis = tone_analyzer.tone(\n {'text': text},\n content_type='application/json',\n sentences = False\n).get_result()\nprint(json.dumps(tone_analysis, indent=2))\n","sub_path":"app/src/features/emotions-extractor-ibm-cloud.py","file_name":"emotions-extractor-ibm-cloud.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"543603084","text":"__author__ = 'mayank'\n\nimport pickle\nimport tensorflow as tf\nimport numpy as np\n\n\npickle_file = 'notMNIST.pickle'\n\n# Load dataset already stored\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)\n\n\n# Reformat dataset\nimage_size = 28\nnum_hidden = 1024\nnum_labels = 10\n\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n # Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\n\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)\n\n\n# Build the graph\n\nbatch_size = 128\n\ngraph = tf.Graph()\nwith graph.as_default():\n\n # Input data. For the training data, we use a placeholder that will be fed\n # at run time with a training minibatch.\n tf_train_dataset = tf.placeholder(tf.float32,\n shape=(batch_size, image_size * image_size))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n\n # before non-linearity\n weight_1 = tf.Variable(tf.truncated_normal([image_size * image_size, num_hidden]))\n biases_1 = tf.Variable(tf.zeros([num_hidden]))\n\n # after non-linearity\n weight_2 = tf.Variable(tf.truncated_normal([num_hidden,num_labels]))\n biases_2 = tf.Variable(tf.zeros([num_labels]))\n\n # Training computation.\n act_1 = tf.matmul(tf_train_dataset, weight_1) + biases_1\n rect_out = tf.nn.relu(act_1)\n act_2 = tf.matmul(rect_out,weight_2) + biases_2\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(act_2, tf_train_labels))\n\n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n\n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(act_2)\n valid_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, weight_1) + biases_1),weight_2)+biases_2)\n test_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, weight_1) + biases_1),weight_2)+biases_2)\n\ndef accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])\n\n# Run the model\nnum_steps = 1200\n\nwith tf.Session(graph=graph) as session:\n tf.initialize_all_variables().run()\n print(\"Initialized\")\n for step in range(num_steps):\n # Pick an offset within the training data, which has been randomized.\n # Note: we could use better randomization across epochs.\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n # Generate a minibatch.\n batch_data = train_dataset[offset:(offset + batch_size), :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n # Prepare a dictionary telling the session where to feed the minibatch.\n # The key of the dictionary is the placeholder node of the graph to be fed,\n # and the value is the numpy array to feed to it.\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 500 == 0):\n print(\"Minibatch loss at step %d: %f\" % (step, l))\n print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n print(\"Validation accuracy: %.1f%%\" % accuracy(valid_prediction.eval(), valid_labels))\n print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))\n\n'''\nLearning rate = 0.05, N = 12000\nTest accuracy: 91.0%\n\n'''","sub_path":"first_net.py","file_name":"first_net.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"578666","text":"#!/usr/bin/env python\n\"\"\"\nUsage:\n script.py \n\"\"\"\n\nfrom docopt import docopt\nfrom datetime import datetime\nimport time\nimport asyncio\nfrom kafka import KafkaProducer\n\nMESSAGE_START_BYTE = b'\\x0B'\nMESSAGE_END_BYTE = b'\\x1C'+b'\\x0D'\nMESSAGE_SEGMENT_END_BYTE = b'\\x0D'\n\nclass TCPServerProducer:\n MAX_CONNECTION = 50\n LOG_FORMAT =\"{} UTC_TS\\t\"\\\n \"{}\"\n \n def __init__(self,kafka_host,kafka_port,\n tcp_host,tcp_port,\n topic,log_topic):\n self.kafka_host = kafka_host\n self.kafka_port = kafka_port\n self.tcp_host = tcp_host\n self.tcp_port = tcp_port\n self.topic = topic\n self.log_topic = log_topic\n self.producer = KafkaProducer(bootstrap_servers=[\"{}:{}\".format(kafka_host,kafka_port)])\n self.connections = {}\n \n def log(self,msg):\n self.producer.send( self.log_topic,\n self.LOG_FORMAT.format( datetime.now().timestamp(),\n msg\n ) \\\n .encode()\n )\n\n def run(self):\n self.log(\"running\")\n asyncio.run(self._async_run())\n \n async def _async_run(self):\n tcpServer = await asyncio.start_server(self.connection_handler,self.tcp_host,self.tcp_port)\n await tcpServer.serve_forever()\n\n async def connection_handler(self,reader,writer):\n addr = str(writer.get_extra_info(\"peername\"))\n # A new connection, but we can accept no more\n if addr not in self.connections and \\\n len(self.connections)>=self.MAX_CONNECTION:\n self.refuse_client(addr,writer)\n return\n # Add connection\n self.add_client(addr,writer)\n # Read data from connection\n remaining_data = b\"\"\n try:\n while True:\n data = await reader.read(8192) # 1024*8 bytes\n if not data:\n break\n data = remaining_data + data\n extraction = self.extract_hl7_messages(data)\n messages = extraction[\"messages\"]\n remaining_data = extraction[\"remaining\"]\n for msg in messages:\n self.producer.send(self.topic,msg)\n except BrokenPipeError:\n \"\"\"\n Catches connecton reset by peer when we are sending the batched data,\n which is also when we cannot check for reader. The broken connection\n on the writer side will ultimately lead to BrokenPipeError on the\n reader side. Hence\n \"\"\"\n pass\n finally:\n self.remove_client(addr)\n \n def extract_hl7_messages(self,byte_stream):\n messages = []\n remaining = byte_stream\n while True:\n try:\n start_idx = remaining.index(MESSAGE_START_BYTE)\n end_idx = remaining.index(MESSAGE_END_BYTE,start_idx+1)+1\n msg = remaining[start_idx:end_idx+1]\n messages.append(msg)\n remaining = remaining[end_idx+1:]\n except ValueError:\n break\n return {\"messages\":messages,\"remaining\":remaining}\n\n def refuse_client(self,addr,writer):\n self.log(\"{} refused\".format(addr))\n writer.close()\n \n def add_client(self,addr,writer):\n if addr not in self.connections:\n self.log(\"{} accepted\".format(addr))\n self.connections[addr] = writer\n else:\n self.remove_client(addr)\n self.add_client(addr)\n \n def remove_client(self,addr):\n if addr in self.connections:\n self.log(\"{} closed\".format(addr))\n writer = self.connections.pop(addr)\n try:\n writer.close()\n except ConnectionResetError:\n pass\n\n def cleanup(self):\n self.log(\"shutdown\")\n self.producer.flush()\n self.producer.close()\n\ndef main():\n options = docopt(__doc__)\n kafka_host = options[\"\"]\n kafka_port = options[\"\"]\n tcp_host = options[\"\"]\n tcp_port = options[\"\"]\n topic = options[\"\"]\n log_topic = options[\"\"]\n tcp_server_producer = TCPServerProducer(kafka_host,kafka_port,\n tcp_host,tcp_port,\n topic,log_topic)\n try:\n tcp_server_producer.run()\n except KeyboardInterrupt:\n pass\n finally:\n tcp_server_producer.cleanup()\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/kafka-tcp-server-producer.py","file_name":"kafka-tcp-server-producer.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"40554426","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\n\nfrom watermarkmypic.models import Document\nfrom watermarkmypic.forms import DocumentForm\nfrom watermarkmypic.watermarkUtils import watermark\nimport watermarkmypic.interfaces\nfrom PIL import Image\nfrom oss.oss_api import *\n\ndef list(request):\n # Handle file upload\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n newdoc = Document(docfile = request.FILES['docfile'])\n newdoc.save()\n im = Image.open(newdoc.docfile)\n mark = Image.open('/home/lets-celebrate-pride/letscelebratepride/watermarkmypic/overlay/overlay.jpg')\n file_name = str(newdoc.id) + '.jpg'\n file_path = '/home/lets-celebrate-pride/letscelebratepride/media/media/results/' + file_name\n watermarked_image = watermark(im, mark, 'scale', 0.4).save(file_path)\n\n #save to oss\n oss_api_id = watermarkmypic.interfaces.getAliyunAccessKeyId()\n oss_api_secret = watermarkmypic.interfaces.getAliyunAccessKeySecret()\n oss = OssAPI(\"oss-cn-qingdao.aliyuncs.com\", oss_api_id, oss_api_secret)\n res = oss.put_object_from_file(\"letscelebratepride\", file_name, file_path)\n oss_object_link = 'http://letscelebratepride.oss-cn-qingdao.aliyuncs.com/' + file_name\n\n # Redirect to the document list after POST\n #return HttpResponseRedirect(reverse('watermarkmypic.views.list'))\n return render_to_response(\n 'watermarkmypic/show.html',\n {'doc_path': oss_object_link, 'order': newdoc.id},\n context_instance=RequestContext(request)\n )\n else:\n form = DocumentForm() # A empty, unbound form\n\n # Load documents for the list page\n documents = Document.objects.all()\n\n # Render list page with the documents and the form\n return render_to_response(\n 'watermarkmypic/list.html',\n {'documents': documents, 'form': form},\n context_instance=RequestContext(request)\n )","sub_path":"letscelebratepride/watermarkmypic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"183534814","text":"import json\nfrom requests_futures.sessions import FuturesSession\nfrom logger import Logger\n\n\nSCAN_RANGE = range(1, 20)\n\nlog = Logger(debug=True)\n\n\ndef listings(base_url, needles):\n ''' takes the needles as a || seperated list of needles and\n returns a map of neeldes to a list of dictionaries for matches '''\n needles = [kw.strip() for kw in needles.split('||')]\n\n # Prepare the URL for requests\n url = base_url + '/tv/getProgInfo?major={}'\n session = FuturesSession(max_workers=30)\n\n # initialize our matches\n matches = {}\n for needle in needles:\n matches[needle] = []\n\n # Check each channel concurrently\n responses = {}\n for i in SCAN_RANGE:\n responses[i] = session.get(url.format(i))\n\n # Wait on all responses\n for i in SCAN_RANGE:\n responses[i] = responses[i].result()\n log.debug(\"channel {} has responded\".format(i))\n\n # Filter out non-200 responses\n responses_200 = []\n for i in SCAN_RANGE:\n if responses[i].status_code == 200:\n responses_200.append(responses[i].text)\n\n # Make nice JSON of listings\n listings = []\n for response in responses_200:\n tmp = json.loads(response)\n tmp = {\n 'title': tmp['title'],\n 'major': tmp['major'],\n 'callsign': tmp['callsign'],\n 'duration': tmp['duration'],\n 'startTime': tmp['startTime'],\n 'isRecording': tmp['isRecording']\n }\n listings.append(tmp)\n\n # Map listings to matching needles\n for listing in listings:\n for needle in needles:\n if needle.lower() in listing['title'].lower():\n log.info(\"Match for {} with {}\".format(needle,\n listing['title']))\n matches[needle].append(listing)\n\n return matches\n","sub_path":"python/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"231593549","text":"#! /usr/bin/python\n\n# This script plots the vcftools relatedness2 values\n\n\nfrom sys import argv\nimport os\nimport numpy as np\nimport pandas as pd\nimport allel as al\nimport zarr\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nsns.set_style('whitegrid')\n\n\n\n\nif __name__ == \"__main__\":\n\n zarrPath = argv[1]\n\n zarrname = zarrPath.strip('.zarr/')\n # create folders\n\n vcffP = os.path.join(zarrname, 'figs/vcftools/')\n\n folderList = [vcffP]\n for folder in folderList:\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\n\n variants = zarr.open_group(zarrPath, mode='r')\n fpath = argv[2]\n\n rel2 = pd.read_csv(fpath, sep= '\\t')\n\n ids = pd.read_table('samples109.txt', sep='\\t', index_col=False)\n ids['id_nest'] = ids['id'] + '_' + ids['nest']\n ids = ids.sort_values(by='nest')\n\n nInds = ids.groupby(by= ['nest', 'pop']).count()['sample']\n #np.all(list(variants['samples']) == ids['id_nest'].values)\n\n samples = list(variants['samples'])\n subsIndex = [samples.index(s) for s in ids['id_nest']]\n ids['subsIndex'] = subsIndex\n ids.sort_values(by=['subsIndex'], inplace= True)\n\n # Manichaikul relatedness (see Manichaikul2010.pdf table 1)\n ## based on the KING inference. You can interpret the relatedness_phi as the probability to find identical alleles when randomly sampling one allele from each heterozygous individual. So for one individual AB, and the parent AC, there is p=0.25 to choose A from both individuals. That probability is 0.5 when AB is compared to AB.\n ## -->> an estimated kinship coefficient range >0.354, [0.177, 0.354], [0.0884, 0.177] and [0.0442, 0.0884] corresponds to duplicate/MZ twin, 1st-degree, 2nd-degree, and 3rd-degree relationships respectively\n\n\n m = np.zeros(shape=(109,109))\n m = pd.DataFrame(m, index= ids['id_nest'], columns= ids['id_nest'])\n for i, ind1 in enumerate(m.index):\n for j, ind2 in enumerate(m.columns):\n mani = rel2['RELATEDNESS_PHI'][(rel2['INDV1'] == ind1) & (rel2['INDV2'] == ind2)]\n m.iloc[i,j] = np.float(mani)\n\n # truncate values < 0 to 0\n m[m < 0] = 0\n rel01 = rel2['RELATEDNESS_PHI'].astype('float')\n rel01[rel01 < 0] = 0\n\n fig, ax = plt.subplots(figsize=(12,6))\n sns.distplot(rel2['RELATEDNESS_PHI'])\n ax.set_xlabel('Manichaikul relatedness')\n fig.savefig(os.path.join(vcffP, 'rel2.distplot.untruncated.png'), bbox_inches='tight')\n\n\n fig, ax = plt.subplots(figsize=(12,6))\n sns.distplot(rel01) #rel2['RELATEDNESS_PHI'])\n ax.set_xlabel('Manichaikul relatedness')\n #plt.show()\n fig.savefig(os.path.join(vcffP, 'rel2.distplot.png'), bbox_inches='tight')\n\n fig, ax = plt.subplots(figsize=(16,16))\n sns.heatmap(m, cmap='viridis', annot=False)\n ax.set_title(\"Relatedness matrix (Manichaikul - KING)\")\n fig.tight_layout()\n fig.savefig(os.path.join(vcffP, 'rel2.heat.png'), bbox_inches='tight')\n\n\n\n","sub_path":"script/relatePlot.py","file_name":"relatePlot.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"620094899","text":"#!/usr/bin/python\n# coding:utf-8\n\n\"\"\"\n@author: yyhaker\n@contact: 572176750@qq.com\n@file: 20.有效的括号.py\n@time: 2019/8/5 09:43\n\"\"\"\n\"\"\"\nleetcode20: 有效的括号\n\"\"\"\nclass Solution:\n def isValid(self, s: str) -> bool:\n # 思路:使用栈,依次将字符串中的字符入栈,当碰到右括号时,\n # 判断栈顶字符是否匹配,若匹配,弹出栈;若不匹配,则直接返回False;\n inflect = {\")\": \"(\", \"]\":\"[\", \"}\":\"{\"}\n stack = []\n for ch in s:\n if ch in \"([{\":\n stack.append(ch)\n if ch in \")]}\":\n if len(stack) == 0 or stack.pop() != inflect[ch]:\n return False\n return not stack","sub_path":"algorithms/常见面试编程题(剑指offer&leetcode)/字符串/20.有效的括号.py","file_name":"20.有效的括号.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"18848208","text":"import pickle\nfrom geopy.geocoders import Nominatim\nimport math\n\nprint(\"Unpickling firemap\")\nfiremap = pickle.load(open(\"firemap.p\",\"rb\"))\n\nprint(\"Looking up address lat/lon\")\ngeolocator = Nominatim()\nlocation = geolocator.geocode(\"Eldorado Springs Dr, Eldorado Springs, CO 80025\") #\"5743 Teller St Arvada CO 80002\")\n\ndef lldist(p,l):\n dlat = p[1] - l.latitude\n dlon = p[0] - l.longitude\n dsq = dlat*dlat + dlon*dlon\n return math.sqrt(dsq)\n\nprint(\"Searching for result\")\nSF = 50\nlat = location.latitude\nlon = location.longitude\nlat_int = int(lat*SF)\nlon_int = int(lon*SF)\nmindist = float(\"inf\")\nval = -1\nfor pt in firemap[lon_int][lat_int]:\n dist = lldist(pt,location)\n if dist < mindist:\n mindist = dist\n val = pt[2]\nprint(val)\n","sub_path":"Scripts/test_firemap.py","file_name":"test_firemap.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"63683540","text":"'''\n#meu algoritimo usando laço\nnumero = int(input('Digite o número que você quer saber o fatorial: '))\nfat = 1\nfor f in range(1, numero+1):\n fat *= f\nprint(fat)\n'''\n#usando while\n'''\nnum = int(input('Digite o numero que deseja saber o fatorial: '))\nc = 1\nfat = 1\nwhile c <= num:\n fat *= c\n c += 1\nprint(fat)\n'''\n#usando biblioteca\nfrom math import factorial\nnum = int(input('Digite um numero: '))\nfac = factorial(num)\nprint(fac)","sub_path":"PycharmProjects/PythonexExercícios/cod/ex060.py","file_name":"ex060.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"580575145","text":"import numpy as np\nimport pandas as pd\nimport os\nfrom scipy.stats import chi2_contingency\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_score\nfrom sklearn import metrics\nimport pickle\n\ninsurance_data = pd.read_csv('./insurance_claims.csv')\n\n\n\n# dropping unneccessary columns from the dataset\ninsurance_data.drop(['collision_type', 'policy_number', 'policy_bind_date', 'insured_zip', 'incident_date', 'incident_location', 'auto_model'], axis = 1, inplace = True)\n\nreplace_values = {'?' : 'UNKNOWN'} \ninsurance_data = insurance_data.replace({\"property_damage\": replace_values, 'police_report_available': replace_values})\n\n\n\n# Removing negative values from the columns\nreplace_values = {-1000000:0}\ninsurance_data = insurance_data.replace({\"umbrella_limit\": replace_values})\ninsurance_data['capital-loss'] = insurance_data['capital-loss'].abs()\n\n# Here compare only numerical features\ncorr_mat = insurance_data.corr().abs()\nhigh_corr_var=np.where(corr_mat>0.8)\n\n# removing the highly correlated numerical column \ninsurance_data.drop(['age', 'injury_claim', 'property_claim', 'vehicle_claim'], axis=1, inplace=True)\n\n# Here we only compare catagorical features\n\nalpha = 0.05\nfor cols_1 in insurance_data.columns:\n if insurance_data[cols_1].dtype == 'object':\n for cols_2 in insurance_data.columns:\n if insurance_data[cols_2].dtype == 'object':\n table = pd.crosstab(insurance_data[cols_1], insurance_data[cols_2], margins = False) \n stat, p, dof, expected = chi2_contingency(table)\n if p <= alpha:\n #'Variables are associated (reject H0)'\n if cols_1 != cols_2:\n \tcontinue\n # note down the columns\n else:\n continue\n #'Variables are not associated(fail to reject H0)'\n\n# removing columns which gave p-value less than alpha\ninsurance_data.drop(['incident_state', 'police_report_available', 'incident_severity', 'authorities_contacted', 'property_damage'], axis = 1, inplace = True)\n\n# insurance_data.to_csv('./cleaned_data.csv')\n\n\nfor cols in insurance_data.columns:\n if insurance_data[cols].dtypes == 'object':\n if cols != 'fraud_reported':\n one_hot = pd.get_dummies(insurance_data[cols])\n insurance_data.drop(cols,axis = 1, inplace = True)\n insurance_data = pd.concat([insurance_data, one_hot], axis=1)\n\ninsurance_data = insurance_data.append(insurance_data.loc[insurance_data['fraud_reported'] == 'Y'])\n\ny = insurance_data['fraud_reported']\nX = insurance_data.drop('fraud_reported', axis = 1)\n\ny = pd.get_dummies(y, drop_first=True)\n\n\n\nkfold = StratifiedKFold(n_splits=5)\nkfold.get_n_splits(X, y)\n\nfor train_index, test_index in kfold.split(X, y):\n X_train = X.iloc[train_index,:]\n X_test = X.iloc[test_index,:]\n y_train = y.iloc[train_index]\n y_test = y.iloc[test_index]\n model = RandomForestClassifier(n_estimators=100)\n model.fit(X_train, y_train)\n y_pred = model.predict_proba(X_test)[:,1]\n \n matrix = confusion_matrix(np.where(y_pred >= 0.6, 1, 0), y_test.iloc[:])\n fpr = matrix[0,1]/(matrix[0,1]+matrix[1,1])\n tpr = matrix[0,0]/(matrix[0,0]+matrix[1,0])\n\n\n\nfilename = './rfc_model.sav'\npickle.dump(model, open(filename, 'wb'))\n\n\nloaded_model = pickle.load(open(filename, 'rb'))\nprint(loaded_model.predict(X.iloc[100:101,:]))","sub_path":"insurance_model.py","file_name":"insurance_model.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"449445227","text":"from peslearn.ml import NeuralNetwork\nfrom peslearn import InputProcessor\nimport numpy as np\nimport torch\nfrom compute_energy import pes\nimport pyforce\n\nnp.set_printoptions(threshold=5000, linewidth=200, precision=5, suppress=True)\ntorch.set_printoptions(threshold=5000, linewidth=200, precision=12)\n\n# Load NN model\nnn = NeuralNetwork('model_data/PES.dat', InputProcessor(''), molecule_type='A2B')\nparams = {'layers': (64, 64), 'morse_transform': {'morse': True, 'morse_alpha': 1.2000000000000002}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'mm11'}, 'scale_y': 'std', 'lr': 0.8}\nX, y, Xscaler, yscaler = nn.preprocess(params, nn.raw_X, nn.raw_y)\nmodel = torch.load('model_data/model.pt')\n\n# Construct computation graph for sending raw coordinates through the NN model\n# so that derivatives d^nE/d(coord)^n can be found.\ndef transform(interatomics):\n \"\"\" Takes Torch Tensor (requires_grad=True) of interatomic distances, manually transforms geometry to track gradients, computes energy\n Hard-coded based on hyperparameters above. Returns: energy in units the NN model was trained on\"\"\"\n inp2 = -interatomics / 1.2\n inp3 = torch.exp(inp2)\n inp4 = torch.stack((inp3[0], inp3[1] + inp3[2], torch.sum(torch.pow(inp3[1:],2))), dim=0) # Careful! Degree reduce?\n inp5 = (inp4 * torch.tensor(Xscaler.scale_, dtype=torch.float64)) + torch.tensor(Xscaler.min_, dtype=torch.float64)\n out1 = model(inp5)\n energy = (out1 * torch.tensor(yscaler.scale_, dtype=torch.float64)) + torch.tensor(yscaler.mean_, dtype=torch.float64)\n return energy\n\n# Compute force constants with interatomic distances\n# Define equilbrium geometry with interatomic distances, cartesians, and define internal coordinate objects\nm = np.array([1.007825032230, 1.007825032230, 15.994914619570])\ncartesians = torch.tensor([[ 0.0000000000,0.0000000000,0.9496765298],\n [ 0.0000000000,0.8834024755,-0.3485478124],\n [ 0.0000000000,0.0000000000,0.0000000000]], dtype=torch.float64, requires_grad=True)\ninteratomics = pyforce.transforms.get_interatomics(3)\n\n# Construct B tensors (interatomic distances, idm), remove from torch computation graphs and convert to numpy arrays\nB1_idm, B2_idm, B3_idm = pyforce.compute_btensors(interatomics, cartesians, order=3)\nB1_idm, B2_idm, B3_idm = B1_idm.detach().numpy(), B2_idm.detach().numpy(), B3_idm.detach().numpy()\n\n# Compute derivatives of PES at equilbrium geometry\n#eq_geom = [1.570282260121,0.949676529800,0.949676529800]\neq_geom = [1.570282260121,0.949676529800,0.969676529800]\ntmp = []\nfor i in eq_geom:\n tmp.append(torch.tensor(i, dtype=torch.float64, requires_grad=True))\ngeom = torch.stack(tmp)\nE = transform(geom)\n\ninteratomic_hess, interatomic_cubic = pyforce.differentiate_nn(E, geom, order=3)\n\nhess, cubic = pyforce.transforms.new_differentiate_nn(E, tmp, order=3)\n\nprint(torch.allclose(hess,interatomic_hess))\nprint(torch.allclose(cubic,interatomic_cubic))\n\n#\n","sub_path":"Examples/water1/play_around.py","file_name":"play_around.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"565344467","text":"# -*- coding: utf-8 -*-\nimport socket\nimport time\nfrom concurrent import futures\n\n\ndef blocking_way():\n \"\"\"同步阻塞\"\"\"\n sock = socket.socket()\n # blocking\n sock.connect((\"example.com\", 80))\n request = \"GET / HTTP/1.0\\r\\nHost: example.com\\r\\n\\r\\n\"\n sock.send(request.encode(\"ascii\"))\n response = b\"\"\n chunk = sock.recv(4096)\n while chunk:\n response += chunk\n # blocking 从socket中读取4K字节数据\n chunk = sock.recv(4096)\n return response\n\n\ndef process_way():\n \"\"\"多进程方式\"\"\"\n workers = 10\n with futures.ProcessPoolExecutor(workers) as executor:\n futs = {executor.submit(blocking_way) for _ in range(10)}\n return len([fut.result() for fut in futs])\n\n\nt1 = time.time()\nprocess_way()\nt2 = time.time()\nprint(t2 - t1)\n","sub_path":"面试问题/5-协程/example04.py","file_name":"example04.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"369471565","text":"import tensorflow as tf\nimport os,sys\n\n\nclass CreateSession():\n def __init__(self, config): \n self.config = config\n\n if self.config['accelerator'] == 'npu':\n self.estimator_config = tf.ConfigProto(\n inter_op_parallelism_threads=10,\n intra_op_parallelism_threads=10,\n allow_soft_placement=True)\n elif self.config['accelerator'] == 'gpu':\n self.estimator_config = tf.ConfigProto(allow_soft_placement=False)\n\n import horovod.tensorflow as hvd\n self.estimator_config.gpu_options.visible_device_list = str(hvd.local_rank())\n self.estimator_config.intra_op_parallelism_threads = 1\n self.estimator_config.inter_op_parallelism_threads = 5\n\n # enable XLA\n print(\"XLA is activated.\")\n self.estimator_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n\n else:\n raise ValueError(\"Invalid device: %s\" % self.config['accelerator'])\n\n self.estimator_config.gpu_options.allow_growth = True\n\n self.set_env()\n\n def set_env(self):\n gpu_thread_count = 2\n os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n os.environ['TF_GPU_THREAD_COUNT'] = str(gpu_thread_count)\n os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'\n os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'\n\n","sub_path":"built-in/TensorFlow/Official/cv/image_classification/DenseNet121_for_TensorFlow/99-origin/densenet/create_session.py","file_name":"create_session.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"432492209","text":"#coding='utf-8'\n'''\n爬取“一些”知乎用户\n多线程\nauthor:you2mu\ntime:2016.8.13\n'''\n\n\nimport time\nimport re\nfrom queue import Queue\nfrom threading import Thread\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nurl_queue = Queue()\nfor i in range(0,2000,10): #多线程 url放入队列\n url = 'https://www.zhihu.com/node/TopStory2FeedList?'+'params={\"offset\":%d,\"start\":\"%d\"}&method=next'%(i,i-1)\n url_queue.put(url)\nusers_queue = Queue()\n\n\nclass Zhihu:\n\n def __init__(self) :\n self.session = requests.Session()\n self.xsrf = None\n self.links = Queue() #save users links\n self.users = set()\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Host\": \"www.zhihu.com\",\n \"Upgrade-Insecure-Requests\": \"1\"}\n self.session.headers.update(headers)\n\n def log(self,str) :\n #str = str.encode()\n now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n log = now_time + '\\t\\t' + str\n with open('log.txt','a',encoding='utf-8') as f:\n f.writelines(log + '\\n')\n print(log)\n\n #common function\n def get_data(self,path) :\n try:\n html = self.session.get(path).text\n except:\n self.log('connect error when get the data')\n exit(1)\n soup = BeautifulSoup(html,'html.parser')\n return soup\n\n def get_xsrf(self) :\n path = 'https://www.zhihu.com'\n soup = self.get_data(path)\n tag = soup.find(\"input\",{\"name\":\"_xsrf\"})\n self.xsrf = tag.attrs['value']\n\n def login(self) :\n data = {\n '_xsrf': self.xsrf,\n 'password':'yourpassword', #parements\n 'remember_me':'true',\n 'phone_num':'yourphone'}\n try:\n lgiresp = self.session.post('https://www.zhihu.com/login/phone_num',data=data)\n except:\n self.log('can\\'t connect server when you log in')\n exit(1)\n lgiresp = lgiresp.json()\n\n if lgiresp['msg'] =='登录成功':\n self.log('logged in successful')\n return self.session\n else:\n self.log(lgiresp['msg'])\n self.log('you have not logged in successful')\n exit(1)\n\n def gt(self) :\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36\",\n \"X-Xsrftoken\":self.xsrf,\n \"Host\":\"www.zhihu.com\",\n \"Origin\":\"https://www.zhihu.com/\",\n \"Connection\":\"keep-alive\",\n # \"Cookie\":self.session\n } #设置为全局变量\n time.sleep(4)\n while not url_queue.empty():\n url = url_queue.get()\n html = self.session.post(url,headers = headers)\n try:\n data = html.json() #返回的为列表\n except:\n self.log('analysize data error')\n msg = data['msg'] #lock\n if msg==None:\n \tself.log('scraping over')\n \texit(1)\n for j in range(0,10): #知乎的xhr更新为10条\n data = msg[j]\n soup = BeautifulSoup(data,'html.parser')\n author_temp = soup.findAll('a',{'class':'author-link'}) #返回的是列表\n # print(author_temp)\n for m in author_temp:\n \t# author = author_temp[0]\n author = 'https://www.zhihu.com'+ m.attrs['href']\n self.users.add(author)\n self.log(author)\n\nif __name__ == '__main__' :\n zhihu = Zhihu()\n zhihu.get_xsrf() #step one get xsrf\n zhihu.login() #step two login\n threads = []\n for i in range(5):\n t = Thread(target=zhihu.gt,args=())\n t.start()\n threads.append(t)\n for thread in threads :\n thread.join()\n","sub_path":"zhihuuser.py","file_name":"zhihuuser.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"557370481","text":"from reportlab.pdfgen import canvas\n\ndef getTotal(list):\n total=0\n for data in list:\n total=total+data[4]\n return total\n\ndef getTotalTax(list):\n totalTax=0\n for data in list:\n totalTax=totalTax+data[5]\n return totalTax\n\ndef getTotalDis(list):\n totalDis=0\n for data in list:\n totalDis=totalDis+data[3]\n return totalDis\n\ndef rightalingn(pdf,string,left,right,ycoordinate):\n length=len(string)\n totalLength=(right-left)/7\n spaces=int(totalLength-length)\n pdf.drawString(right,ycoordinate,\" \"*spaces)\n left=left+(7*spaces)\n pdf.drawString(left,ycoordinate,string)\n\ndef header(header,pdf):\n pdf.setTitle(header.date+\"Invoice\")\n logo=\"pages/logo.png\"\n pdf.drawInlineImage(logo,450,750)\n\n pdf.line(30,815,350,815)\n pdf.setFont(\"Courier-Bold\",20)\n pdf.drawString(30,800,\"Pizza Go\")\n pdf.setFont(\"Courier-Bold\",11)\n pdf.drawString(30,785,\"Thakur College Of Engineering and Technology,\")\n pdf.drawString(30,770,\"Kandivali East,\")\n pdf.drawString(30,755,\"400101. Phone: 8888888888\")\n\n pdf.line(30,753,350,753)\n\n pdf.drawString(30,735,\"Invoice Number: \"+ str(int(header.InvoiceNumber)))\n pdf.drawString(30,720,\"Customer Name: \"+ str(header.CustomerName))\n pdf.drawString(30,705,\"Email id: \"+ str(header.Email))\n pdf.drawString(30,690,\"Date: \"+ str(header.date))\n \n\ndef middle(pdf):\n pdf.line(30,677,550,677)\n \n pdf.drawString(30,668,\"Sr.No.\")\n pdf.drawString(75,668,\"Product Name\")\n pdf.drawString(200,668,\"Quantity\")\n pdf.drawString(260,668,\"Rate\")\n pdf.drawString(320,668,\"Discount\")\n pdf.drawString(400,668,\"Total\")\n pdf.drawString(490,668,\"Tax\")\n \n pdf.line(30,662,550,662)\n pdf.line(73,677,73,150)\n pdf.line(198,677,198,150)\n pdf.line(258,677,258,150)\n pdf.line(318,677,318,150)\n pdf.line(398,677,398,150)\n pdf.line(488,677,488,150)\n pdf.line(30,150,550,150)\n\ndef additem(product,pdf,ycoordinate):\n while(len(product.name)>18):\n pdf.drawString(75,ycoordinate,product.name[:18]+\"-\")\n product.name=product.name[18:]\n ycoordinate=ycoordinate-15\n \n pdf.drawString(75,ycoordinate,product.name)\n pdf.drawString(200,ycoordinate,str(product.quantity))\n rightalingn(pdf,\"%.2f\" %product.rate,260,316,ycoordinate)\n print(product.rate)\n rightalingn(pdf,\"%.2f\" %product.discount,320,398,ycoordinate)\n rightalingn(pdf,\"%.2f\" %product.total,400,488,ycoordinate)\n rightalingn(pdf,\"%.2f\" %product.tax,490,552,ycoordinate)\n return (ycoordinate-15)\n\ndef footer(pdf,list):\n pdf.drawString(30,135,\"Total Discount:\")\n rightalingn(pdf,\"-\"+\"%.2f\" %getTotalDis(list)+\" INR\",393,488,135)\n pdf.drawString(30,120,\"Gross Total(Discount Included):\")\n rightalingn(pdf,\"%.2f\" %getTotal(list)+\" INR\",400,488,120)\n pdf.drawString(30,105,\"Tax:\")\n rightalingn(pdf,\"+\"+\"%.2f\" %getTotalTax(list)+\" INR\",393,488,105)\n pdf.line(30,100,550,100)\n pdf.drawString(30,90,\"Grand Total: \")\n rightalingn(pdf,\"%.2f\" %(getTotal(list)+getTotalTax(list))+\" INR\",400,488,90)\n pdf.drawString(400,50,\"Authorized Signatory\")\n pdf.setFont(\"Courier-Bold\",7)\n\n","sub_path":"pages/pdfgen.py","file_name":"pdfgen.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"309462369","text":"\"\"\"This tool is used for converting JabRef's journal name abbreviation files,\n,\ninto JSON.\n\nUsage example:\n```\npython3 update.py\n```\n\"\"\"\nimport argparse\nimport json\n\nimport requests\n\n\ndef _main():\n _parse_cmd_arguments()\n\n urls = [\n \"https://raw.githubusercontent.com/JabRef/jabref/master/src/main/resources/journals/IEEEJournalListText.txt\",\n \"https://raw.githubusercontent.com/JabRef/jabref/master/src/main/resources/journals/journalList.txt\",\n ]\n\n out = {}\n for url in urls:\n r = requests.get(url)\n\n assert r.status_code == 200\n\n # read input file into dictionary\n for line in r.text.split(\"\\n\"):\n sline = line.strip()\n if sline is None:\n break\n if len(sline) == 0 or sline[0] == \"#\":\n continue\n k, v = sline.split(\"=\")\n out[k.strip()] = v.strip()\n\n with open(\"journals.json\", \"w\") as f:\n json.dump(out, f, indent=2)\n return\n\n\ndef _parse_cmd_arguments():\n parser = argparse.ArgumentParser(description=\"Update journals.json.\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n _main()\n","sub_path":"betterbib/data/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"133889195","text":"import time\nfrom FWObject import FWObject\nfrom Node import Node\nfrom Tokenizer import Tokenizer\nfrom Utils import Utils\nfrom Vocabulary import Vocabulary\nfrom WordTag import WordTag\nfrom os.path import dirname, join\n\nutils = Utils()\n\nclass RDRSegmenter:\n def __init__(self):\n self._root = None\n try:\n fname = join(dirname(__file__), \"SCRDR_Model.RDR\")\n self.constructTreeFromRulesFile(fname)\n except IOError as e:\n raise e\n @property\n def root(self):\n return self._root\n @root.setter\n def root(self,value:Node):\n self._root = value\n def constructTreeFromRulesFile(self, rulesFilePath:str):\n\n self.root = Node(FWObject(False), \"NN\", None, None, None, 0)\n\n currentNode = self.root\n currentDepth = 0\n with open(rulesFilePath,'r',encoding='utf8') as rulesFile:\n for indexFileRule,line in enumerate(rulesFile):\n depth = 0\n for i in range(0,6):\n if line[i] == '\\t':\n depth += 1\n else:\n break\n if indexFileRule==0:\n continue\n line = line.strip()\n if len(line) == 0:\n continue\n\n if \"cc:\" in line:\n continue\n # print(line.split(\" : \")[0].strip())\n condition = utils.getCondition(line.split(\" : \")[0].strip())\n conclusion = utils.getConcreteValue(line.split(\" : \")[1].strip())\n\n node = Node(condition, conclusion, None, None, None, depth)\n\n if depth > currentDepth:\n currentNode.setExceptNode(node)\n else:\n if depth == currentDepth:\n currentNode.setIfnotNode(node)\n else:\n while currentNode.depth != depth:\n currentNode = currentNode.fatherNode\n currentNode.setIfnotNode(node)\n node.setFatherNode(currentNode)\n\n currentNode = node\n currentDepth = depth\n\n def findFiredNode(self,object:FWObject)->Node:\n currentN = self._root\n firedN = None\n while True:\n if currentN.satisfy(object):\n firedN = currentN\n if currentN.exceptNode == None :\n break\n else :\n currentN = currentN.exceptNode\n else:\n if currentN.ifnotNode == None:\n break\n else :\n currentN = currentN.ifnotNode\n return firedN\n def allIsLetter(self,strs:str)->bool:\n\n for char in strs:\n if char.isalpha() ==False:\n return False\n return True\n def allIsUpper(self,strs:str)->bool:\n\n for char in strs:\n if char.isupper() ==False:\n return False\n return True\n def getInitialSegmentation(self,sentence:str)->list:\n wordtags = []\n vocab = Vocabulary()\n for regex in utils.NORMALIZER_KEYS:\n if regex in sentence:\n sentence = sentence.replace(regex, utils.NORMALIZER[regex])\n tokens = sentence.split()\n lowerTokens = sentence.lower().split()\n senLength = len(tokens)\n i = 0\n while i < senLength :\n token = tokens[i]\n if self.allIsLetter(token) :\n if token[0].islower() and (i + 1) < senLength:\n if tokens[i + 1][0].isupper():\n wordtags.append(WordTag(token, \"B\"))\n i+=1\n continue\n isSingleSyllabel = True\n for j in range(min(i + 4, senLength), i + 1,-1):\n word = \" \".join(lowerTokens[i: j])\n if word in vocab.VN_DICT or word in vocab.VN_LOCATIONS or word in vocab.COUNTRY_L_NAME:\n wordtags.append(WordTag(token, \"B\"))\n for k in range(i+1,j):\n wordtags.append(WordTag(tokens[k], \"I\"))\n\n i = j - 1\n isSingleSyllabel = False\n break\n\n if isSingleSyllabel :\n lowercasedToken = lowerTokens[i]\n\n if lowercasedToken in vocab.VN_FIRST_SENT_WORDS \\\n or token[0].islower() \\\n or self.allIsUpper(token) \\\n or lowercasedToken in vocab.COUNTRY_S_NAME \\\n or lowercasedToken in vocab.WORLD_COMPANY : \\\n\n wordtags.append(WordTag(token, \"B\"))\n i+=1\n continue\n ilower = i + 1\n for ilower in range(i + 1 ,min(i + 4, senLength)):\n ntoken = tokens[ilower]\n if ntoken.islower() \\\n or not self.allIsLetter(ntoken) \\\n or ntoken==\"LBKT\" or ntoken==\"RBKT\" :\n break\n\n if ilower > i + 1:\n isNotMiddleName = True\n if lowercasedToken in vocab.VN_MIDDLE_NAMES and i >= 1:\n prevT = tokens[i-1]\n if prevT[0].isupper():\n if prevT.lower() in vocab.VN_FAMILY_NAMES:\n wordtags.append(WordTag(token, \"I\"))\n isNotMiddleName = False\n if isNotMiddleName:\n wordtags.append(WordTag(token, \"B\"))\n for k in range(i+1,ilower):\n wordtags.append( WordTag(tokens[k], \"I\"))\n\n i = ilower - 1\n else:\n wordtags.append(WordTag(token, \"B\"))\n else:\n wordtags.append(WordTag(token, \"B\"))\n i+=1\n return wordtags\n\n def segmentTokenizedString(self,strs :str)->str:\n sb = \"\"\n line = ''.join(strs).strip()\n if len(line) == 0:\n return \"\\n\"\n\n wordtags = self.getInitialSegmentation(line)\n size = len(wordtags)\n for i in range(0,size) :\n object = utils.getObject(wordtags, size, i)\n firedNode = self.findFiredNode(object)\n if firedNode.depth > 0:\n if firedNode.conclusion==\"B\":\n sb=sb+\" \" + wordtags[i].form\n else:\n sb=sb+\"_\" + wordtags[i].form\n else:\n if wordtags[i].tag == \"B\":\n sb=sb+\" \" + wordtags[i].form\n else:\n sb=sb+\"_\" + wordtags[i].form\n return sb.strip()\n\n # def segmentRawString(self,strs:str)->str:\n # return self.segmentTokenizedString(\" \".join(Tokenizer.tokenize(strs)))\n def segmentRawSentences(self,tokenizer:Tokenizer,strs:str):\n sentence = tokenizer.joinSentences(tokenizer.tokenize(strs))\n return self.segmentTokenizedString(sentence)\n\n\nif __name__ == \"__main__\":\n rdrsegment = RDRSegmenter()\n tokenizer = Tokenizer()\n t=time.time()\n output = rdrsegment.segmentRawSentences(tokenizer,\"hôm nay tôi đau bụng cảm sốt nhức đầu ho khan tại Hà Nội có triệu chứng bị Covid 19\")\n print(output,time.time()-t)\n","sub_path":"app/src/main/python/RDRSegmenter.py","file_name":"RDRSegmenter.py","file_ext":"py","file_size_in_byte":7605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"65748168","text":"import torch\nfrom torch import nn\nfrom core import models\nfrom typing import Union\nfrom core.layers.routing_vector import RoutingVector\nfrom core.layers.routing_matrix import RoutingMatrix\nfrom core.layers.routing_matrix import Length as LengthMatrix\nfrom core.layers.layers_efficient import Length as LengthVector\nfrom core.layers.layers_efficient import PrimaryCaps, FCCaps\n\n\nclass ModelVector(nn.Module):\n def __init__(self, in_shape, num_classes=10, backbone=models.resnet50_dwt_tiny_half):\n super(ModelVector, self).__init__()\n self.backbone = backbone(backbone=True)\n shape = self.backbone.compute_shape(in_shape)\n self.primary_caps = PrimaryCaps(in_channel=shape[0], out_channel=shape[0], kernel_size=shape[-1],\n num_capsule=shape[0] // 8, capsule_length=8)\n shape = self.primary_caps.compute_shape(shape)\n self.routing = RoutingVector((shape[1], shape[0]), ['Tiny_FPN'])\n self.digit_caps = FCCaps(shape[0], shape[1], num_classes, 16)\n self.length = LengthVector()\n\n def forward(self, x):\n x = self.backbone(x)\n x = self.primary_caps(x)\n x = torch.transpose(x, 1, 2)\n x = self.routing(x)\n x = torch.transpose(x, 1, 2)\n digit = self.digit_caps(x)\n classes = self.length(digit)\n return classes\n\n\nclass ModelMatrix(nn.Module):\n def __init__(self, in_shape, num_classes=10, routing_name_list: Union[list, tuple] = None,\n backbone=models.resnet50_dwt_tiny_half):\n super(ModelMatrix, self).__init__()\n\n self.backbone = backbone(backbone=True)\n shape = self.backbone.compute_shape(in_shape)\n self.primary_caps = PrimaryCaps(in_channel=shape[0], out_channel=shape[0], kernel_size=shape[-1],\n num_capsule=shape[0] // 8, capsule_length=8)\n shape = self.primary_caps.compute_shape(shape)\n self.routing = RoutingMatrix(shape[0] // 2, num_classes, routing_name_list)\n self.length = LengthMatrix()\n\n def forward(self, x):\n x = self.backbone(x)\n x = self.primary_caps(x)\n x = torch.transpose(x, 1, 2)\n x = torch.reshape(x, (x.shape[0], -1, 4, 4))\n x = self.routing(x)\n classes = self.length(x)\n return classes\n\n\ndef capsule_efficient_cifar(num_classes=10, args=None, **kwargs):\n in_shape = (3, 32, 32) if args.in_shape is None else args.in_shape\n return ModelVector(in_shape, num_classes)\n\n\ndef hr_caps_r_fpn(num_classes=10, args=None, **kwargs):\n in_shape = (3, 32, 32) if args.in_shape is None else args.in_shape\n routing_name_list = ['Tiny_FPN'] if args.routing_name_list is None else args.routing_name_list\n backbone = models.__dict__[args.backbone]\n return ModelMatrix(in_shape, num_classes, routing_name_list, backbone)\n\n\n# if __name__ == '__main__':\n# inp = torch.ones((1, 3, 32, 32))\n#\n# # out = RoutingBlockMatrix(32, 'FPN')(inp)\n# # out = RoutingBlockMatrix(32, 'Tiny_FPN')(inp)\n# # out = RoutingMatrix(32, 10, ['Tiny_FPN'])(inp)\n# out = ModelMatrix((3, 32, 32))(inp)\n# print(out.shape)\n# print(out)\n","sub_path":"core/models/hr_caps_dwt.py","file_name":"hr_caps_dwt.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"425221551","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.conf import settings\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.contrib import admin\nfrom dajaxice.core import dajaxice_autodiscover\n\n\nadmin.autodiscover()\ndajaxice_autodiscover()\n\nhandler403 = 'core.views.error_403'\nhandler404 = 'core.views.error_404'\nhandler500 = 'core.views.error_500'\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'views.home', name='home'),\n # url(r'^Minerva/', include('foo.urls')),\n # Import URLs\n url(r'', include('bulletin.urls')),\n url(r'', include('account.urls')),\n url(r'', include('homeroom.urls')),\n url(r'', include('course.urls')),\n url(r'', include('portfolio.urls')),\n url(r'', include('backstage.urls')),\n url(r'', include('data.urls')),\n # Admin URLs\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n # AJAX URLs\n url(r'^%s/' % settings.DAJAXICE_MEDIA_PREFIX, include('dajaxice.urls'))\n)\n\n# Static File URLs\nurlpatterns += staticfiles_urlpatterns()\n","sub_path":"schoolax/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"287141392","text":"\nimport logging\n\nconsole_template = '%(levelname)s: %(message)s,\"service\":\"api\",\"timestamp\":\"%(asctime)s\"'\nlog_template = '{\"message\":\"%(message)s\",\"level\":\"%(levelname)s\",\"service\":\"api\",\"timestamp\":\"%(asctime)s\"}'\n\nlogname = 'log/combined.log'\n\nerror_handler = logging.FileHandler(\"log/error.log\")\nerror_handler.setLevel(logging.ERROR)\nerror_handler.setFormatter(logging.Formatter(log_template))\n\nbase_handler = logging.FileHandler(\"log/combined.log\")\nbase_handler.setLevel(logging.INFO)\nbase_handler.setFormatter(logging.Formatter(log_template))\n\nstream_handler = logging.StreamHandler()\nstream_handler.setLevel(logging.INFO)\nstream_handler.setFormatter(logging.Formatter(console_template))\n\nlogging.basicConfig(\n format='{\"message\":\"%(message)s\",\"level\":\"%(levelname)s\",\"service\":\"api\",\"timestamp\":\"%(asctime)s\"}',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n handlers=[\n error_handler,\n base_handler,\n stream_handler\n ]\n)\n\ndef get_logger(name):\n logger = logging.getLogger(name)\n return logger\n\n","sub_path":"api/helpers/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"458277687","text":"# from databroker import DataBroker as db, get_table\n\nfrom bluesky.plans import abs_set, trigger, trigger_and_read, read, sleep, subs_decorator\n\n\"\"\"\nExample:\ndata = simple_ascan(cam_fs2, \"stats1_total\", hdcm.p, -2, 2, 100)\n# or\ndata = simple_ascan(keithley, \"\", hdcm.p, -2, 2, 100)\nprint data.values\n\"\"\"\ndef simple_ascan(camera, stats, motor, start, end, steps):\n gs.DETS = [camera]\n gs.MASTER_DET = camera\n \n stats_name = \"_\".join((camera.name,stats)) if stats else camera.name\n gs.PLOT_Y = stats_name\n\n uid = RE(ascan(motor, start, end, steps))[0]\n table = get_table(db[uid])\n try:\n return table[[motor.name, stats_name]]\n except:\n return table[[motor.name+\"_readback\", stats_name]]\n\ndef wire_scan(detector, motor, start, stop, steps, sleep_time=1):\n gonio.py.move(start)\n time.sleep(sleep_time)\n\n def dwell(detectors, motor, step):\n yield from checkpoint()\n yield from abs_set(motor, step, wait=True)\n yield from sleep(sleep_time)\n \n # Do I need to bundle these?\n # How to get back the detector reading and return the difference to the previous one?\n # How to emit that difference?\n #yield from trigger(detector)\n #yield from wait(detector)\n #yield from read(detector)\n #yield from read(motor)\n \n return (yield from trigger_and_read(list(detectors)+[motor]))\n \n\n table = LiveTable([detector, motor])\n plot = LivePlot(detector.name, motor.name)\n\n @subs_decorator([table, plot])\n def inner():\n yield from abs_set(motor, start, wait=True)\n yield from sleep(sleep_time)\n yield from scan([detector], motor, start, stop, steps, per_step=dwell)\n \n uid = RE(inner)\n data = get_table(db[uid])\n y = data[detector.name]\n dy = np.diff(y)\n x = data[motor.name]\n\n plt.plot(x[1:], dy)\n return get_table(db[uid])\n \n","sub_path":"startup/99-macros.py","file_name":"99-macros.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"31824434","text":"# -*- coding:utf-8 -*- \n'''\n滑动平均模型\nhttps://blog.csdn.net/u012436149/article/details/56484572\n类似与PID算法,在参数更新的时候,定义一个影子变量,可以得到一个稳定输出的影子变量\nshadow_variable = decay x shadow_variable + (1-decay) x variable\nshadow_variable为影子变量\nvariable为待更新变量\ndecay决定模型更新速度,越大越稳定,一般取值0.99,接近1\n若设置了num_updates,则\ndecay = min(decay, (1+num_updates / 10+num_updates))\n\n在saver.py中展示了加载模型是怎样加载指数平均\n'''\nimport tensorflow as tf\n#变量\nv1 = tf.Variable(0, dtype=tf.float32)\nstep = tf.Variable(0, trainable=False)\n#定义一个指数加权平均\nema = tf.train.ExponentialMovingAverage(0.99, step)\n#按照公式计算一次,更新影子变量\nmaintain_average_op = ema.apply([v1])\n\nwith tf.Session() as sess:\n init_op = tf.initialize_all_variables()\n sess.run(init_op)\n print(sess.run([v1, ema.average(v1)]))\n sess.run(tf.assign(v1, 5))\n sess.run(maintain_average_op)\n #ema.average(v1)获取滑动平均之后变量的取值\n print([v1, ema.average(v1)])\n sess.run(tf.assign(step, 10000))\n sess.run(tf.assign(v1, 10))\n sess.run(maintain_average_op)\n print(sess.run([v1, ema.average(v1)]))\n sess.run(maintain_average_op)\n print(sess.run([v1, ema.average(v1)]))","sub_path":"TensorFlow/TensorFlow_Framework_Insights/ExponentialMovingAverage.py","file_name":"ExponentialMovingAverage.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"356640752","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom rest_framework import viewsets, status\nfrom .models import ProjectPermission\nfrom .serializers import ProjectPermissionSerializer\n\nfrom rest_framework import permissions\n\nfrom django.shortcuts import render\n\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\n\nfrom django.http import JsonResponse, HttpResponse\n\nfrom accounts.models import UserProfile\nfrom django.contrib.auth.models import User\n\n# Create your views here.\n\n# https://stackoverflow.com/questions/30871033/django-rest-framework-remove-csrf\nclass IsCreationOrIsAuthenticated(permissions.BasePermission):\n def has_permission(self, request, view):\n \"\"\"\n if request.user.is_authenticated:\n print(\"user.is_authenticated\", request.user.is_authenticated)\n else:\n print(\"user.is_authenticated\", request.user.is_authenticated)\n\n print(\"--------------: user\", request.user)\n #post_id = self.request.query_params.get('post_id', None)\n #userProfile = UserProfile.objects.get(user=user, user_type=user_type)\n print(\"userProfile\", userProfile)\n \"\"\"\n print(\"--------------: user\")\n return True\n\nclass CsrfExemptSessionAuthentication(SessionAuthentication):\n def enforce_csrf(self, request):\n return\n\nclass ProjectPermissionViewSet(viewsets.ModelViewSet):\n queryset = ProjectPermission.objects.all()\n serializer_class = ProjectPermissionSerializer\n permission_classes = (IsCreationOrIsAuthenticated, )\n #authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)\n\n def get_queryset(self):\n # Refer to: https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-the-url\n qset = ProjectPermission.objects.all()\n post_id = self.request.query_params.get('post_id', None)\n #print(\"get permission(\", post_id, \")\")\n if post_id is None or not post_id.isdigit():\n return qset\n qset = qset.filter(post_id=post_id)\n stage_id = self.request.query_params.get('stage', None)\n #print(\"get permission(\", post_id, \") stage(\", stage_id, \")\")\n if stage_id is not None and stage_id.isdigit():\n qset = qset.filter(stage=stage_id)\n elif stage_id is not None and isinstance(stage_id, list):\n qset = qset.filter(stage__in=stage_id)\n user_id = self.request.query_params.get('user', None)\n if user_id is not None and user_id.isdigit():\n qset = qset.filter(user_id=user_id)\n\n return qset\n def create(self, request):\n #print(\"come here\", request.data)\n stageList = request.data.get('stage', None)\n postId = request.data.get('post', None)\n userId = request.data.get('user', None)\n if stageList == None or postId == None or userId == None:\n #print(\"None\", stageList, postId, userId)\n return HttpResponse(\"Fail\")\n # stage is a array, list it\n for stage in stageList:\n permissionInfo = {\n \"post\": postId,\n \"stage\": stage,\n \"user\": userId,\n }\n serializer = ProjectPermissionSerializer(data=permissionInfo)\n if serializer.is_valid(raise_exception=True):\n saved = serializer.save()\n print(\"saved success: \", saved)\n else:\n print(\"Fail to save the permission Info\")\n\n return HttpResponse(\"Success\")\n","sub_path":"permissions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"386933720","text":"#!/usr/bin/env python3\n\"\"\"\nIn this listing, you are directly manipulating the HTTP protocol: asking it to connect to a specific machine, to\nissue a GET request with a path that you have constructed by hand, and finally to read the reply directly from the\nHTTP connection. Instead of being able conveniently to provide your query parameters as separate keys and values\nin a dictionary,\n\"\"\"\nimport http.client\nimport json\nfrom urllib.parse import quote_plus\nbase = '/maps/api/geocode/json'\ndef geocode(address):\n path = '{}?address={}&sensor=false'.format(base, quote_plus(address))\n connection = http.client.HTTPConnection('maps.google.com')\n connection.request('GET', path)\n rawreply = connection.getresponse().read()\n reply = json.loads(rawreply.decode('utf-8'))\n print(reply['results'][0]['geometry']['location'])\nif __name__ == '__main__':\n geocode('207 N. Defiance St, Archbold, OH')\n","sub_path":"geo_env/search3.py","file_name":"search3.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"153216254","text":"# inspired by : https://www.ritchievink.com/blog/2017/04/23/understanding-the-fourier-transform-by-example/\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfreq_list = [2.05, 2.1, 2.15,2.2]\nfreq_list = np.arange(2.05, 5.05, 0.5)\nt = np.linspace(0, 10, 500) # Number of sample points\n\nsignals = []\n\nfor freq in freq_list:\n s = np.cos(2 * np.pi * freq * t) # + 0.5 * np.sin(90 * 2 * np.pi * t)\n signals.append(s)\n # plt.plot(t, s, label='freq={} Hz'.format(freq))\n\ny_mean1 = sum(signals)/len(freq_list)\nplt.plot(t, y_mean1, c='k', linewidth=5, label='average wave, {} freqs'.format(len(freq_list)))\n\n\nplt.ylabel(\"Amplitude\")\nplt.xlabel(\"Time [s]\")\nplt.grid()\nplt.legend()\nplt.tight_layout()\nplt.savefig('decoherence_2fres.png')\nplt.show()\n","sub_path":"run_simulation/run_PyHEADTAIL_no_htcondor/playground/services/decoherence.py","file_name":"decoherence.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"220658450","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom ToolMappingClass import ToolMapping\nfrom CSVHandlerClass import *\nimport tempfile\n\n\nclass FidoLoader:\n\n fidoheader = \"info.status,info.time,info.puid,info.formatname,info.signaturename,info.filesize,info.filename,info.mimetype,info.matchtype\"\n basedb = \"\"\n\n def __init__(self, basedb):\n self.basedb = basedb\n\n # need a temporary fild with fido headers\n def createtmpfile(self, fidoexport):\n tmpfile = tempfile.NamedTemporaryFile()\n with open(fidoexport, \"rb\") as csvfile:\n for i, row in enumerate(csvfile):\n # add fido header\n if i == 0:\n tmpfile.write(self.fidoheader + \"\\n\")\n tmpfile.write(row)\n else:\n tmpfile.write(row)\n tmpfile.seek(0)\n return tmpfile\n\n def fidoDBSetup(self, fidoexport, cursor):\n sys.stdout.write(\n \"Placeholder Code: Currently not handling FIDO exports.\" + \"\\n\"\n )\n if fidoexport != False:\n tmpfile = self.createtmpfile(fidoexport)\n fidocsvhandler = genericCSVHandler()\n fidolist = fidocsvhandler.csvaslist(tmpfile.name)\n tmpfile.close()\n","sub_path":"sqlitefid/libs/FidoLoaderClass.py","file_name":"FidoLoaderClass.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"276590527","text":"from GameData.cc_file_loader import *\nfrom GameData.cc_logger import *\nfrom GameData.cc_sprite_manager import *\nfrom GameData.cc_texture import *\nfrom GameData.cc_sprite import *\nfrom GameData.cc_resource_paths import *\nfrom pygame import *\n\n\nclass ccSpritesFileLoader(ccFileLoader):\n\n def __init__(self):\n pass\n\n def process_file(self, filename):\n try:\n self.load_file(ccResourcePaths.get_sprites() + filename)\n\n except:\n ccLogger.error(str(filename) + ' file could not be loaded.')\n raise RuntimeError('File could not be loaded.')\n self.__configure()\n self.__process_sprites()\n\n def __configure(self):\n self.file_name = self.current_dict['Config']['filename']\n self.cc_texture = ccTexture()\n self.cc_texture.load_image(self.file_name)\n ccSpriteManager.add_texture(self.file_name, self.cc_texture)\n\n def __process_sprites(self):\n self.set_first_section()\n while self.next_section():\n if 'num_of_sprites' in self.current_section:\n self.__create_multiple_sprites()\n else:\n self.__create_one_sprite()\n\n def __create_one_sprite(self):\n self.name = list(self.current_dict)[self.current_section_id]\n section = self.current_section\n hitbox = self.create_hitbox(section)\n rect = pygame.Rect(section['offset_x'], section['offset_y'], section['width'], section['height'])\n ccSpriteManager.add_sprite(self.name, ccSprite(ccSpriteManager.get_texture(self.file_name), rect, hitbox))\n\n def __create_multiple_sprites(self):\n offset_x = int(self.get_field(\"offset_x\"))\n offset_y = int(self.get_field(\"offset_y\"))\n num = 0\n\n for i in range(self.current_section['num_of_sprites']):\n if offset_x >= self.cc_texture.get_width():\n offset_y += int(self.get_field(\"height\"))\n offset_x = 0\n self.name = list(self.current_dict)[self.current_section_id] + \"%03d\" % num\n section = self.current_section\n hitbox = self.create_hitbox(section)\n rect = pygame.Rect(offset_x, offset_y, section['width'], section['height'])\n ccSpriteManager.add_sprite(self.name, ccSprite(ccSpriteManager.get_texture(self.file_name), rect, hitbox))\n offset_x += section['width']\n num += 1\n\n def create_hitbox(self,section):\n if 'hitbox_width' in section:\n hitbox_offset_x = self.get_field('hitbox_offset_x')\n hitbox_offset_y = self.get_field('hitbox_offset_y')\n if hitbox_offset_x is None:\n hitbox_offset_x = 0\n if hitbox_offset_y is None:\n hitbox_offset_y = 0\n return pygame.Rect(hitbox_offset_x, hitbox_offset_y,\n section['hitbox_width'], section['hitbox_height']) \n return None\n \n ","sub_path":"alpha_version/GameData/cc_sprites_file_loader.py","file_name":"cc_sprites_file_loader.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"478795248","text":"import sys\nfrom distutils.version import StrictVersion\n\nOK_flag = True\n\n###### PYTHON VERSION #######\n\nif sys.version_info.major != 2:\n sys.stderr.write('ERROR: Please use Python 2 only\\n')\n OK_flag = False\n\nif sys.version_info.minor < 7:\n sys.stderr.write('ERROR: Please use Python 2.7 or higher, but not Python 3\\n')\n OK_flag = False\n\ntry:\n import numpy\n if StrictVersion(numpy.__version__) < StrictVersion('1.6.2'):\n raise ImportError\n \n###### NUMPY VERSION #######\n\nexcept ImportError:\n sys.stderr.write('ERROR: Please install numpy version 1.6.2 or higher\\n')\n OK_flag = False\n\n###### OCT2PY VERSION #######\n\ntry:\n import oct2py\n if StrictVersion(oct2py.__version__) < StrictVersion('2.4.0'):\n raise ImportError\n \nexcept ImportError:\n sys.stderr.write('WARNING: Please install oct2py version 2.4.0 or higher\\n')\n OK_flag = False\n \n###### OCTAVE VERSION #######\n\noctave_ver = oct2py.octave.eval('OCTAVE_VERSION()', verbose=False)\nif StrictVersion(octave_ver) < StrictVersion('3.6.4'):\n sys.stderr.write('WARNING: Please install Octave version 3.6.4 or higher\\n')\n OK_flag = False\n\n\nif OK_flag:\n sys.stderr.write('Success, all required packages are installed and up-to-date\\n')\nelse:\n sys.stderr.write('Failure, some of the required packages are missing or have an older version\\n')\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"158838831","text":"# coding: utf-8\n#\n# Copyright (c) Alexandr Emelin. BSD license.\n# All rights reserved.\n\nimport re\nfrom wtforms import TextField, IntegerField, BooleanField, validators, SelectField\nfrom centrifuge.utils import Form\n\n\n# regex pattern to match project and namespace names\nNAME_RE = re.compile('^[^_]+[A-z0-9@\\-_\\.]{2,}$')\n\n# how many times we are trying to authorize subscription by default\nDEFAULT_MAX_AUTH_ATTEMPTS = 5\n\n# milliseconds, increment for back-off\nDEFAULT_BACK_OFF_INTERVAL = 100\n\n# milliseconds, max timeout between auth attempts\nDEFAULT_BACK_OFF_MAX_TIMEOUT = 5000\n\n# how many messages keep in channel history by default\nDEFAULT_HISTORY_SIZE = 20\n\n\nclass ProjectForm(Form):\n\n BOOLEAN_FIELDS = []\n\n def __init__(self, *args, **kwargs):\n super(ProjectForm, self).__init__(*args, **kwargs)\n namespace_choices = kwargs.get('namespace_choices')\n if namespace_choices:\n self.default_namespace.choices = namespace_choices\n else:\n del self.default_namespace\n\n name = TextField(\n label='project name',\n validators=[\n validators.Regexp(regex=NAME_RE, message=\"invalid name\")\n ],\n description=\"project name, must contain ascii symbols only\"\n )\n\n display_name = TextField(\n label='display name',\n validators=[\n validators.Length(min=3, max=50),\n validators.Optional()\n ],\n description=\"human readable project name, will be used in web interface\"\n )\n\n auth_address = TextField(\n label='auth url address',\n validators=[\n validators.URL(require_tld=False),\n validators.Optional()\n ],\n description=\"your application's url address to authorize subscriptions on private channels\"\n )\n\n max_auth_attempts = IntegerField(\n label='maximum auth attempts',\n validators=[\n validators.NumberRange(min=1, max=100)\n ],\n default=DEFAULT_MAX_AUTH_ATTEMPTS,\n description=\"maximum amount of POST requests from Centrifuge to your application \"\n \"during client's authorization\"\n )\n\n back_off_interval = IntegerField(\n label='back-off interval in milliseconds',\n validators=[\n validators.NumberRange(min=50, max=10000)\n ],\n default=DEFAULT_BACK_OFF_INTERVAL,\n description=\"please, keep it default until you know what you do\"\n )\n\n back_off_max_timeout = IntegerField(\n label='back-off max timeout in milliseconds',\n validators=[\n validators.NumberRange(min=50, max=120000)\n ],\n default=DEFAULT_BACK_OFF_MAX_TIMEOUT,\n description=\"please, keep it default until you know what you do\"\n )\n\n default_namespace = SelectField(\n label='default namespace',\n validators=[],\n default='',\n description=\"namespace which will be used by default when no namespace \"\n \"provided in request params\"\n )\n\n\nclass NamespaceForm(Form):\n\n BOOLEAN_FIELDS = [\n 'is_watching', 'is_private', 'publish',\n 'presence', 'history', 'join_leave'\n ]\n\n name = TextField(\n label='namespace name',\n validators=[\n validators.Regexp(regex=NAME_RE, message=\"invalid name\")\n ],\n description=\"unique namespace name, ascii symbols only\"\n )\n\n is_watching = BooleanField(\n label='is watching',\n validators=[],\n default=False,\n description=\"publish all namespace messages into admin channel \"\n \"(messages will be visible in web interface)\"\n )\n\n is_private = BooleanField(\n label='is private',\n validators=[],\n default=False,\n description=\"authorize every subscription on channel in this namespace using \"\n \"POST request to auth address\"\n )\n\n publish = BooleanField(\n label='publish',\n validators=[],\n default=False,\n description=\"allow clients to publish messages in channels\"\n )\n\n presence = BooleanField(\n label='presence',\n validators=[],\n default=True,\n description=\"check if you want to use presence info for channels in \"\n \"this namespace (state must be configured)\"\n )\n\n history = BooleanField(\n label='history',\n validators=[],\n default=True,\n description=\"check if you want to get history info for channels in \"\n \"this namespace (state must be configured)\"\n )\n\n history_size = IntegerField(\n label=\"history size\",\n validators=[\n validators.NumberRange(min=1)\n ],\n default=DEFAULT_HISTORY_SIZE,\n description=\"maximum amount of messages in history for channels in this namespace\"\n )\n\n join_leave = BooleanField(\n label=\"join/leave messages\",\n validators=[],\n default=True,\n description=\"send join(leave) messages when client subscribes on channel \"\n \"(unsubscribes from channel)\"\n )\n\n auth_address = TextField(\n label='auth url address',\n validators=[\n validators.URL(require_tld=False),\n validators.Optional()\n ],\n description=\"url address to authorize clients specific for namespace \"\n \"(leave it blank to use auth address from project)\"\n )\n","sub_path":"src/centrifuge/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"457010312","text":"\nimport csv\nimport os\nfrom functools import reduce\nfrom file_processing import CSV_Proc\n\n\ndef main():\n\n \n rootdir = '/root/data'\n #rootdir = '/Users/jmiracles80/data/ppp/data'\n fnames = []\n\n file_out = './count.csv'\n row_num = 0\n wrd_cnt = {}\n file_num = 0\n\n\n cp = CSV_Proc(wrd_cnt,row_num)\n\n try:\n #walk through every sub directory to get only the csv files\n for subdir, dirs, files in os.walk(rootdir):\n for file in files:\n if file.endswith('.csv'):\n fnames.append(str(os.path.join(subdir,file)))\n file_num +=1\n\n except Exception as e:\n print(e)\n\n wdCntDict = {}\n total = 0\n iter_csv = iter(cp.process_file(fnames))\n next(iter_csv) # Skipping the column names\n #iterate through each row\n for row in iter_csv:\n total += 1\n #iterate through each column in rwo\n for x in row:\n if x in wdCntDict.keys():\n wdCntDict[x] += 1\n else:\n wdCntDict[x] = 1\n \n # write dict out to file\n cp.write_dict(wdCntDict,file_out)\n\n print(\"# Questions\")\n\n print(\"## what's the average number of fields across all the `.csv` files?\")\n print(cp.col_count(fnames))\n\n print(\"## create a csv file that shows the word count of every value of every dataset (dataset being a `.csv` file)\")\n print(file_out)\n\n print(\"## what's the total number or rows for the all the `.csv` files?\")\n print(total)\n\n \n\nif __name__ == \"__main__\":\n\n main()\n\n ","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"472410353","text":"class Spell:\n \"\"\" A castable spell \"\"\"\n all_spells = []\n\n def __init__(self, json_data):\n self.name = json_data[\"name\"]\n self.damage = json_data[\"damage\"]\n\n def deal_damage(self, message_log, *enemies):\n \"\"\" Deal spell's damage to enemies \"\"\"\n for enemy in enemies:\n message_log.add_message(enemy.name + \" takes \" + str(self.damage)\n + \" damage\")\n enemy.take_damage(self.damage)\n\n @classmethod\n def add_spell(cls, spell):\n \"\"\" Add a spell to the list of spells \"\"\"\n cls.all_spells.append(spell)\n\n @classmethod\n def get_spell(cls, spell_name):\n \"\"\" Get a spell by name \"\"\"\n for spell in cls.get_all_spells():\n if spell_name == spell.name:\n return spell\n else:\n raise SpellNotFoundException('Spell with name \"' + spell_name +\n '\" not found')\n\n @classmethod\n def get_all_spells(cls):\n \"\"\" Get a list of all spells \"\"\"\n return cls.all_spells\n\n\nclass SpellNotFoundException(Exception):\n pass\n","sub_path":"spell.py","file_name":"spell.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"114024646","text":"import person_db\nimport pymysql\nimport valider_søk_og_slett\n\n\n\"\"\"person_db.opprett_person(\"Bjørnstjerne Børspn\", \"Norge\")\nperson_db.endre_navn(\"Bjørnstjerne Bjørnson\", \"Norge\")\nprint(person_db.finn_personer(\"Norge\"))\nperson_db.slett_person(\"Bjørnstjerne Bjørnson\")\"\"\"\n#Erstatt brukernavn, passord og database med din egen. putt test_db_sql.txt inn i SQL for å teste dette programmet\n#person_db.py er ikke nødvendig for programmet, men ka være nyttig til testing av det. (men det er ikke helt oppdatert til databasens nye format.\n#Erstatt pers_id med regnr hvis det skal testes med løsningsforslaget\n\n\n#Erstatt delete_person med delete_gjenstand\ndef delete_person(pers_id):\n valider_søk_og_slett.valider_pers_id(pers_id)\n db = pymysql.connect(\"mysql.stud.iie.ntnu.no\", \"simenmyh\",\"bcyFp79h\",\"simenmyh\")\n db.autocommit(True)\n cursor = db.cursor()\n sql1 = \"DELETE FROM adresse WHERE pers_id=%s;\"\n cursor.execute(sql1, pers_id)\n sql2 = \"DELETE FROM yrke WHERE pers_id=%s;\"\n cursor.execute(sql2, pers_id)\n sql3 = \"DELETE FROM person WHERE pers_id=%s;\"\n cursor.execute(sql3, pers_id)\n # Grunnen til at sql 1, 2 og 3 er delt opp er at hver \"%s\" refererer til per_id, men pers_id er bare ett argument, og kan ikke refereres til av alle tre. Jeg er usikker på hvordan jeg kan gjøre dette bedre.\n # Siden adresse tabellern og yrke tabellen har fremmednøkler som refererer til pers_id i person tabellen, er det viktig at de to slettes først, ellers vil ikke sql tillate at de blir slettet for å opprettholde integriteten i databasen.\n valider_søk_og_slett.valider_slettet_person(pers_id)\n db.close()\ndelete_person(\"2\")\n\n# Jeg tror programmet gjør det det er ment å gjøre, med unntak av tilbakemelding hvis noe går feil. Det kan helt sikker ryddes opp endel.\n","sub_path":"slettefunksjon med feedback/slett_gjenstand.py","file_name":"slett_gjenstand.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"132156210","text":"# 生成指定位数随机字符串的函数\n\n\"\"\"\n1. Create a base character list which contains all the characters or digits used to create the random string. The python string module digits, ascii_letters property contains number and lower and upper characters.\n2. Use random.choice method to select one character from the base character list randomly.\n3. Use the random selected character to build a string.\n\"\"\"\n\nimport string\nimport random\nimport uuid\n\n\n# Common function to generate a string_size length random string based on the first parameter.\ndef generate_random_string(base_string_character, string_size=10):\n ret_str = ''\n\n # Select random character for string_size times.\n for i in range(string_size):\n # Random select one character from the base character string.\n\n character = random.choice(base_string_character)\n\n # Append the selected character to the return string.\n ret_str += character\n\n return ret_str\n\n\n# Generate random digit content string only.\ndef generate_random_digit(str_len=10):\n ret = generate_random_string(string.digits, str_len)\n print(\"Random digit : \" + ret)\n\n return ret\n\n\n# Generate random alphabet content string.\ndef generate_random_alphabet(str_len=10):\n ret = generate_random_string(string.ascii_letters, str_len)\n print(\"Random alphabet : \" + ret)\n\n return ret\n\n\n# Generate random alphabet and digits content string.\ndef generate_random_alphabet_digit(str_len=10):\n ret = generate_random_string(string.digits + string.ascii_letters, str_len)\n print(\"Random alphabet and digit: \" + ret)\n\n return ret\n\n\n# Generate random string with specified digit count and alphabet character count.\ndef generate_random_alphabet_digit_with_special_number(digit_number=5, alphabet_number=5):\n # Generate specified length digit string.\n digit_str = generate_random_digit(digit_number)\n\n # Generate specified length alphabet string.\n alphabet_str = generate_random_alphabet(alphabet_number)\n\n # Add above two random string.\n tmp_str = digit_str + alphabet_str\n\n # Convert above string to list.\n tmp_str_list = list(tmp_str)\n\n tmp_str_len = len(tmp_str)\n\n # Scatter characters order in the string list and return a new ordered string list.\n ret = random.sample(tmp_str_list, tmp_str_len)\n\n # Convert string list back to a string.\n ret = str(ret)\n ret = ret.strip('[').strip(']')\n ret = ret.replace(\",\", \"\").replace(\"'\", \"\").replace(\" \", \"\")\n\n print(\"Random special length alphabet digit : \" + ret)\n\n return ret\n\n\n# Use python uuid module to generate a uuid.\ndef generate_random_uuid():\n # Create uuid.\n ret = uuid.uuid1()\n\n # Convert uuid to string.\n ret = str(ret)\n print(\"uuid : \" + ret)\n\n return ret\n\n\nimport secrets\n\n# generates binary sequences of random bytes\n# if you prefer a raw binary string, without any encodings, then use token_bytes()\nprint(secrets.token_bytes())\n# b'U\\xdd\\xd0\\xc14\\xe0\\xda\\xf6\\xb9km\\x81Z\\x11tX\\x16a\\x01C[MYU\\x95\\xb3\\xd8M\\x1e\\x9c\\x8c['\nprint(secrets.token_bytes(20))\n# b'\\x1b\\x97\\x8d\\xf8oM\\x07\\x11i\\x98>\\x95\\x9c\\x0e\\x14\\xfc\\xefK\\xd8\\xa9'\n\n# Invoking the token_bytes() function without any arguments returns a token with a default length that is determined to be sufficiently safe and secure.\n# You can also pass the desired length as an argument, as you can see in the second example above.\n\n\n\n\nprint(secrets.choice(['apple', 'banana', 'pear']))\n# This function can be combined with a list comprehension to generate random strings that only use a specific set of characters. For example, if you want to generate a random string of 20 characters that only uses the letters abcd you can do so as follows:\nrandomString=''.join([secrets.choice('abcd') for i in range(20)])\nprint(randomString)\n\n\n\n# token_urlsafe(), which returns the random string encoded in base64 format\nprint(f'secrets.token_urlsafe(): {secrets.token_urlsafe()}')\nprint(f'secrets.token_urlsafe(): {secrets.token_urlsafe(20)}')\n\n\n\n# The randbelow() function generates a random integer number between 0 and the number given as an argument (not including this number):\nprint(f'print(secrets.randbelow(10)): {secrets.randbelow(10)}')\n# the randbits() function returns an random integer number that has the specified number of bits:\nprint(f'secrets.randbits(2): {secrets.randbits(8)}')\n\n\nif __name__ == '__main__':\n generate_random_digit()\n\n generate_random_alphabet()\n\n generate_random_alphabet_digit()\n\n generate_random_alphabet_digit_with_special_number(3, 6)\n\n generate_random_uuid()\n\n\nprint('======================================================================================================================')\n\n\nimport secrets\n\n# generates binary sequences of random bytes\n# if you prefer a raw binary string, without any encodings, then use token_bytes()\nprint(secrets.token_bytes())\n# b'U\\xdd\\xd0\\xc14\\xe0\\xda\\xf6\\xb9km\\x81Z\\x11tX\\x16a\\x01C[MYU\\x95\\xb3\\xd8M\\x1e\\x9c\\x8c['\nprint(secrets.token_bytes(20))\n# b'\\x1b\\x97\\x8d\\xf8oM\\x07\\x11i\\x98>\\x95\\x9c\\x0e\\x14\\xfc\\xefK\\xd8\\xa9'\n\n# Invoking the token_bytes() function without any arguments returns a token with a default length that is determined to be sufficiently safe and secure.\n# You can also pass the desired length as an argument, as you can see in the second example above.\n\n\n\n\nprint(secrets.choice(['apple', 'banana', 'pear']))\n# This function can be combined with a list comprehension to generate random strings that only use a specific set of characters. For example, if you want to generate a random string of 20 characters that only uses the letters abcd you can do so as follows:\nrandomString=''.join([secrets.choice('abcd') for i in range(20)])\nprint(randomString)\n\n\n\n# token_urlsafe(), which returns the random string encoded in base64 format\nprint(f'secrets.token_urlsafe(): {secrets.token_urlsafe()}')\nprint(f'secrets.token_urlsafe(): {secrets.token_urlsafe(20)}')\n\n\n\n# The randbelow() function generates a random integer number between 0 and the number given as an argument (not including this number):\nprint(f'print(secrets.randbelow(10)): {secrets.randbelow(10)}')\n# the randbits() function returns an random integer number that has the specified number of bits:\nprint(f'secrets.randbits(2): {secrets.randbits(8)}')\n\n","sub_path":"src/TestFrameworkDev/TestAutoTools/randomData/randomStrings.py","file_name":"randomStrings.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"465777434","text":"#Insert a document into the specific collection.\nfrom pymongo import MongoClient\nimport datetime\nimport pprint\nmongodb_client = MongoClient(\"\", 27017)\ndb = mongodb_client.sample_db #Change the name sample_db to your database name\nlog = {\"user\": \"Andy\",\n\t\t\"behavior\": \"insert a document\",\t\t\n\t\t\"datetime\": datetime.datetime.utcnow()}\nprint(\"Inserting document:\")\npprint.pprint(log)\t\t\nlogs = db.sample_col #Change the name sample_col to your collection name\ninserted_log = logs.insert_one(log) #Insert the document into specific collection\nprint(\"Document has been inserted successfully. Document id: {}\".format(inserted_log.inserted_id))","sub_path":"Python to MongoDB/insert_document.py","file_name":"insert_document.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"274465974","text":"#!/usr/bin/env python\nfrom __future__ import unicode_literals\n\nimport unittest\nimport six\n\nfrom prompt_toolkit.inputstream import InputStream\nfrom prompt_toolkit.line import Line, Document, ReturnInput\n\n\nclass _CLILogger(object):\n \"\"\" Dummy CLI class that records all the called methods. \"\"\"\n def __init__(self):\n self.log = []\n\n def __call__(self, name, *a):\n self.log.append((name,) + a)\n\n\nclass InputProtocolTest(unittest.TestCase):\n def setUp(self):\n self.cli = _CLILogger()\n self.stream = InputStream(self.cli)\n\n def test_simple_feed_text(self):\n self.stream.feed('test')\n self.assertEqual(self.cli.log, [\n ('insert_char', 't'),\n ('insert_char', 'e'),\n ('insert_char', 's'),\n ('insert_char', 't')\n ])\n\n def test_some_control_sequences(self):\n self.stream.feed('t\\x01e\\x02s\\x03t\\x04\\x05\\x06')\n self.assertEqual(self.cli.log, [\n ('insert_char', 't'),\n ('ctrl_a', ),\n ('insert_char', 'e'),\n ('ctrl_b', ),\n ('insert_char', 's'),\n ('ctrl_c', ),\n ('insert_char', 't'),\n ('ctrl_d', ),\n ('ctrl_e', ),\n ('ctrl_f', ),\n ])\n\n def test_enter(self):\n self.stream.feed('A\\rB\\nC\\t')\n self.assertEqual(self.cli.log, [\n ('insert_char', 'A'),\n ('ctrl_m', ),\n ('insert_char', 'B'),\n ('ctrl_j', ),\n ('insert_char', 'C'),\n ('ctrl_i', ),\n ])\n\n def test_backspace(self):\n self.stream.feed('A\\x7f')\n self.assertEqual(self.cli.log, [\n ('insert_char', 'A'),\n ('backspace', ),\n ])\n\n def test_cursor_movement(self):\n self.stream.feed('\\x1b[AA\\x1b[BB\\x1b[CC\\x1b[DD')\n self.assertEqual(self.cli.log, [\n ('arrow_up',),\n ('insert_char', 'A'),\n ('arrow_down',),\n ('insert_char', 'B'),\n ('arrow_right',),\n ('insert_char', 'C'),\n ('arrow_left',),\n ('insert_char', 'D'),\n ])\n\n def test_home_end(self):\n self.stream.feed('\\x1b[H\\x1b[F')\n self.stream.feed('\\x1b[1~\\x1b[4~') # tmux\n self.stream.feed('\\x1b[7~\\x1b[8~') # xrvt\n self.assertEqual(self.cli.log, [\n ('home',), ('end',),\n ('home',), ('end',),\n ('home',), ('end',),\n ])\n\n def test_page_up_down(self):\n self.stream.feed('\\x1b[5~\\x1b[6~')\n self.assertEqual(self.cli.log, [\n ('page_up',),\n ('page_down',),\n ])\n\n def test_f_keys(self):\n # F1 - F4\n self.stream.feed('\\x1bOP')\n self.stream.feed('\\x1bOQ')\n self.stream.feed('\\x1bOR')\n self.stream.feed('\\x1bOS')\n\n # F5 - F10\n self.stream.feed('\\x1b[15~')\n self.stream.feed('\\x1b[17~')\n self.stream.feed('\\x1b[18~')\n self.stream.feed('\\x1b[19~')\n self.stream.feed('\\x1b[20~')\n self.stream.feed('\\x1b[21~')\n\n self.assertEqual(self.cli.log, [\n ('F1',), ('F2',), ('F3',), ('F4',),\n ('F5',), ('F6',), ('F7',), ('F8',), ('F9',), ('F10',),\n ])\n\n\nclass LineTest(unittest.TestCase):\n def setUp(self):\n self.cli = Line()\n\n def test_setup(self):\n self.assertEqual(self.cli.text, '')\n self.assertEqual(self.cli.cursor_position, 0)\n\n def test_insert_text(self):\n self.cli.insert_text('some_text')\n self.assertEqual(self.cli.text, 'some_text')\n self.assertEqual(self.cli.cursor_position, len('some_text'))\n\n def test_cursor_movement(self):\n self.cli.insert_text('some_text')\n self.cli.cursor_left()\n self.cli.cursor_left()\n self.cli.cursor_left()\n self.cli.cursor_right()\n self.cli.insert_text('A')\n\n self.assertEqual(self.cli.text, 'some_teAxt')\n self.assertEqual(self.cli.cursor_position, len('some_teA'))\n\n def test_home_end(self):\n self.cli.insert_text('some_text')\n self.cli.home()\n self.cli.insert_text('A')\n self.cli.end()\n self.cli.insert_text('B')\n self.assertEqual(self.cli.text, 'Asome_textB')\n self.assertEqual(self.cli.cursor_position, len('Asome_textB'))\n\n def test_backspace(self):\n self.cli.insert_text('some_text')\n self.cli.cursor_left()\n self.cli.cursor_left()\n self.cli.delete_character_before_cursor()\n\n self.assertEqual(self.cli.text, 'some_txt')\n self.assertEqual(self.cli.cursor_position, len('some_t'))\n\n def test_cursor_word_back(self):\n self.cli.insert_text('hello world word3')\n self.cli.cursor_word_back()\n\n self.assertEqual(self.cli.text, 'hello world word3')\n self.assertEqual(self.cli.cursor_position, len('hello world '))\n\n def test_cursor_to_start_of_line(self):\n self.cli.insert_text('hello world\\n line2\\nline3')\n self.assertEqual(self.cli.cursor_position, len('hello world\\n line2\\nline3'))\n self.cli.cursor_position = len('hello world\\n li') # Somewhere on the second line.\n\n self.cli.cursor_to_start_of_line()\n self.assertEqual(self.cli.cursor_position, len('hello world\\n'))\n\n self.cli.cursor_to_start_of_line(after_whitespace=True)\n self.assertEqual(self.cli.cursor_position, len('hello world\\n '))\n\n def test_cursor_to_end_of_line(self):\n self.cli.insert_text('hello world\\n line2\\nline3')\n self.cli.cursor_position = 0\n\n self.cli.cursor_to_end_of_line()\n self.assertEqual(self.cli.cursor_position, len('hello world'))\n\n def test_cursor_word_forward(self):\n self.cli.insert_text('hello world word3')\n self.cli.home()\n self.cli.cursor_word_forward()\n\n self.assertEqual(self.cli.text, 'hello world word3')\n self.assertEqual(self.cli.cursor_position, len('hello '))\n\n def test_cursor_to_end_of_word(self):\n self.cli.insert_text('hello world')\n self.cli.home()\n\n self.cli.cursor_to_end_of_word()\n self.assertEqual(self.cli.cursor_position, len('hello') - 1)\n\n self.cli.cursor_to_end_of_word()\n self.assertEqual(self.cli.cursor_position, len('hello world') - 1)\n\n def test_delete_word(self):\n self.cli.insert_text('hello world word3')\n self.cli.home()\n self.cli.cursor_word_forward()\n self.cli.delete_word()\n\n self.assertEqual(self.cli.text, 'hello word3')\n self.assertEqual(self.cli.cursor_position, len('hello '))\n\n def test_delete_until_end(self):\n self.cli.insert_text('this is a sentence.')\n self.cli.home()\n self.cli.cursor_word_forward()\n self.cli.delete_until_end()\n\n self.assertEqual(self.cli.text, 'this ')\n self.assertEqual(self.cli.cursor_position, len('this '))\n\n def test_delete_until_end_of_line(self):\n self.cli.insert_text('line1\\nline2\\nline3')\n self.cli.cursor_position = len('line1\\nli')\n\n deleted_text = self.cli.delete_until_end_of_line()\n\n self.assertEqual(self.cli.text, 'line1\\nli\\nline3')\n self.assertEqual(deleted_text, 'ne2')\n\n # If we only have one line.\n self.cli.reset()\n self.cli.insert_text('line1')\n self.cli.cursor_position = 2\n\n deleted_text = self.cli.delete_until_end_of_line()\n\n self.assertEqual(self.cli.text, 'li')\n self.assertEqual(deleted_text, 'ne1')\n\n def test_cursor_up(self):\n # Cursor up to a line thats longer.\n self.cli.insert_text('long line1\\nline2')\n self.cli.cursor_up()\n\n self.assertEqual(self.cli.document.cursor_position, 5)\n\n # Going up when already at the top.\n self.cli.cursor_up()\n self.assertEqual(self.cli.document.cursor_position, 5)\n\n # Going up to a line that's shorter.\n self.cli.reset()\n self.cli.insert_text('line1\\nlong line2')\n\n self.cli.cursor_up()\n self.assertEqual(self.cli.document.cursor_position, 5)\n\n def test_cursor_down(self):\n self.cli.insert_text('line1\\nline2')\n self.cli.cursor_position = 3\n\n # Normally going down\n self.cli.cursor_down()\n self.assertEqual(self.cli.document.cursor_position, len('line1\\nlin'))\n\n # Going down to a line that's storter.\n self.cli.reset()\n self.cli.insert_text('long line1\\na\\nb')\n self.cli.cursor_position = 3\n\n self.cli.cursor_down()\n self.assertEqual(self.cli.document.cursor_position, len('long line1\\na'))\n\n def test_auto_up_and_down(self):\n self.cli.insert_text('line1\\nline2')\n with self.assertRaises(ReturnInput):\n self.cli.return_input()\n self.cli.insert_text('long line3\\nlong line4')\n\n # Test current\n self.assertEqual(self.cli.text, 'long line3\\nlong line4')\n self.assertEqual(self.cli.cursor_position, len('long line3\\nlong line4'))\n\n # Go up.\n self.cli.auto_up()\n self.assertEqual(self.cli.text, 'long line3\\nlong line4')\n self.assertEqual(self.cli.cursor_position, len('long line3'))\n\n # Go up again (goes to first item.)\n self.cli.auto_up()\n self.assertEqual(self.cli.text, 'line1\\nline2')\n self.assertEqual(self.cli.cursor_position, len('line1\\nline2'))\n\n # Go up again (goes to first line of first item.)\n self.cli.auto_up()\n self.assertEqual(self.cli.text, 'line1\\nline2')\n self.assertEqual(self.cli.cursor_position, len('line1'))\n\n # Go up again (while we're at the first item in history.)\n # (Nothing changes.)\n self.cli.auto_up()\n self.assertEqual(self.cli.text, 'line1\\nline2')\n self.assertEqual(self.cli.cursor_position, len('line1'))\n\n # Go down (to second line of first item.)\n self.cli.auto_down()\n self.assertEqual(self.cli.text, 'line1\\nline2')\n self.assertEqual(self.cli.cursor_position, len('line1\\nline2'))\n\n # Go down again (to first line of second item.)\n # (Going down goes to the first character of a line.)\n self.cli.auto_down()\n self.assertEqual(self.cli.text, 'long line3\\nlong line4')\n self.assertEqual(self.cli.cursor_position, len(''))\n\n # Go down again (to second line of second item.)\n self.cli.auto_down()\n self.assertEqual(self.cli.text, 'long line3\\nlong line4')\n self.assertEqual(self.cli.cursor_position, len('long line3\\n'))\n\n # Go down again after the last line. (nothing should happen.)\n self.cli.auto_down()\n self.assertEqual(self.cli.text, 'long line3\\nlong line4')\n self.assertEqual(self.cli.cursor_position, len('long line3\\n'))\n\n def test_delete_current_line(self):\n self.cli.insert_text('line1\\nline2\\nline3')\n self.cli.cursor_up()\n\n deleted_text = self.cli.delete_current_line()\n\n self.assertEqual(self.cli.text, 'line1\\nline3')\n self.assertEqual(deleted_text, 'line2')\n self.assertEqual(self.cli.cursor_position, len('line1\\n'))\n\n def test_join_next_line(self):\n self.cli.insert_text('line1\\nline2\\nline3')\n self.cli.cursor_up()\n self.cli.join_next_line()\n\n self.assertEqual(self.cli.text, 'line1\\nline2line3')\n\n # Test when there is no '\\n' in the text\n self.cli.reset()\n self.cli.insert_text('line1')\n self.cli.cursor_position = 0\n self.cli.join_next_line()\n\n self.assertEqual(self.cli.text, 'line1')\n\n def test_go_to_matching_bracket(self):\n self.cli.insert_text('A ( B [ C ) >')\n self.cli.home()\n self.cli.cursor_right()\n self.cli.cursor_right()\n\n self.assertEqual(self.cli.cursor_position, 2)\n self.cli.go_to_matching_bracket()\n self.assertEqual(self.cli.cursor_position, 10)\n self.cli.go_to_matching_bracket()\n self.assertEqual(self.cli.cursor_position, 2)\n\n def test_newline(self):\n self.cli.insert_text('hello world')\n self.cli.newline()\n\n self.assertEqual(self.cli.text, 'hello world\\n')\n\n def test_swap_characters_before_cursor(self):\n self.cli.insert_text('hello world')\n self.cli.cursor_left()\n self.cli.cursor_left()\n self.cli.swap_characters_before_cursor()\n\n self.assertEqual(self.cli.text, 'hello wrold')\n\n\nclass DocumentTest(unittest.TestCase):\n def setUp(self):\n self.document = Document(\n 'line 1\\n' +\n 'line 2\\n' +\n 'line 3\\n' +\n 'line 4\\n',\n len(\n 'line 1\\n' +\n 'lin')\n )\n\n def test_current_char(self):\n self.assertEqual(self.document.current_char, 'e')\n\n def test_text_before_cursor(self):\n self.assertEqual(self.document.text_before_cursor, 'line 1\\nlin')\n\n def test_text_after_cursor(self):\n self.assertEqual(self.document.text_after_cursor,\n 'e 2\\n' +\n 'line 3\\n' +\n 'line 4\\n')\n\n def test_lines(self):\n self.assertEqual(self.document.lines, [\n 'line 1',\n 'line 2',\n 'line 3',\n 'line 4', '' ])\n\n def test_line_count(self):\n self.assertEqual(self.document.line_count, 5)\n\n def test_current_line_before_cursor(self):\n self.assertEqual(self.document.current_line_before_cursor, 'lin')\n\n def test_current_line_after_cursor(self):\n self.assertEqual(self.document.current_line_after_cursor, 'e 2')\n\n def test_current_line(self):\n self.assertEqual(self.document.current_line, 'line 2')\n\n def test_cursor_position(self):\n self.assertEqual(self.document.cursor_position_row, 1)\n self.assertEqual(self.document.cursor_position_col, 3)\n\n d = Document('', 0)\n self.assertEqual(d.cursor_position_row, 0)\n self.assertEqual(d.cursor_position_col, 0)\n\n def test_translate_index_to_position(self):\n pos = self.document.translate_index_to_position(\n len('line 1\\nline 2\\nlin'))\n\n self.assertEqual(pos[0], 3)\n self.assertEqual(pos[1], 3)\n\n def test_cursor_at_end(self):\n doc = Document('hello', 3)\n self.assertEqual(doc.cursor_at_the_end, False)\n\n doc2 = Document('hello', 5)\n self.assertEqual(doc2.cursor_at_the_end, True)\n\n\nfrom prompt_toolkit.code import Code\nfrom prompt_toolkit.prompt import Prompt\n\nimport pygments\n\nclass PromptTest(unittest.TestCase):\n def setUp(self):\n self.line = Line()\n self.line.insert_text('some text')\n\n self.code = Code(self.line.document)\n self.prompt = Prompt(self.line, self.code)\n\n def _test_token_text_list(self, data):\n # Test whether data is list of (Token, text) tuples.\n for token, text in data:\n self.assertIsInstance(token, pygments.token._TokenType)\n self.assertIsInstance(text, six.text_type)\n\n def test_get_prompt(self):\n result = list(self.prompt.get_prompt())\n self._test_token_text_list(result)\n\n def test_second_line_prefix(self):\n result = list(self.prompt.get_second_line_prefix())\n self._test_token_text_list(result)\n\n def test_get_help_tokens(self):\n result = list(self.prompt.get_second_line_prefix())\n self._test_token_text_list(result)\n\n\n#--\n\n\nfrom prompt_toolkit.contrib.shell.lexer import ParametersLexer, TextToken\nfrom pygments.token import Token\n\nclass ParameterLexerTest(unittest.TestCase):\n def setUp(self):\n self.lexer = ParametersLexer(stripnl=False, stripall=False, ensurenl=False)\n\n def test_simple(self):\n t = list(self.lexer.get_tokens('aaa bbb ccc'))\n self.assertEqual(t, [\n (Token.Text, 'aaa'),\n (Token.WhiteSpace, ' '),\n (Token.Text, 'bbb'),\n (Token.WhiteSpace, ' '),\n (Token.Text, 'ccc') ])\n\n def test_complex(self):\n t = list(self.lexer.get_tokens('''a'a 'a \" b \"bb ccc\\\\'''))\n # The tokenizer separates text and whitespace, but keeps all the characters.\n self.assertEqual(t, [\n (Token.Text, \"a'a 'a\"),\n (Token.WhiteSpace, ' '),\n (Token.Text, '\" b \"bb'),\n (Token.WhiteSpace, ' '),\n (Token.Text, 'ccc\\\\') ])\n\n\nclass TextTokenTest(unittest.TestCase):\n def test_simple(self):\n t = TextToken('hello')\n t.unescaped_text = 'hello'\n\n def test_double_quotes(self):\n t = TextToken('h\"e\"llo\" wor\"ld')\n self.assertEqual(t.unescaped_text, 'hello world')\n self.assertEqual(t.inside_double_quotes, False)\n self.assertEqual(t.inside_single_quotes, False)\n self.assertEqual(t.trailing_backslash, False)\n\n def test_single_quotes(self):\n t = TextToken(\"h'e'llo' wo'rld\")\n self.assertEqual(t.unescaped_text, 'hello world')\n self.assertEqual(t.inside_double_quotes, False)\n self.assertEqual(t.inside_single_quotes, False)\n self.assertEqual(t.trailing_backslash, False)\n\n def test_backslashes(self):\n t = TextToken(\"hello\\ wo\\\\rld\")\n self.assertEqual(t.unescaped_text, 'hello world')\n self.assertEqual(t.inside_double_quotes, False)\n self.assertEqual(t.inside_single_quotes, False)\n self.assertEqual(t.trailing_backslash, False)\n\n def test_open_double_quote(self):\n t = TextToken('he\"llo world')\n self.assertEqual(t.unescaped_text, 'hello world')\n self.assertEqual(t.inside_double_quotes, True)\n self.assertEqual(t.inside_single_quotes, False)\n self.assertEqual(t.trailing_backslash, False)\n\n def test_open_single_quote(self):\n t = TextToken(\"he'llo world\")\n self.assertEqual(t.unescaped_text, 'hello world')\n self.assertEqual(t.inside_double_quotes, False)\n self.assertEqual(t.inside_single_quotes, True)\n self.assertEqual(t.trailing_backslash, False)\n\n def test_trailing_backslash(self):\n t = TextToken(\"hello\\\\ world\\\\\")\n self.assertEqual(t.unescaped_text, 'hello world')\n self.assertEqual(t.inside_double_quotes, False)\n self.assertEqual(t.inside_single_quotes, False)\n self.assertEqual(t.trailing_backslash, True)\n\n#---\n\nfrom prompt_toolkit.contrib.shell.rules import TokenStream\n\nclass TokenStreamTest(unittest.TestCase):\n def test_tokenstream(self):\n s = TokenStream([ 'aaa', 'bbb', 'ccc', ])\n\n # Test top\n self.assertEqual(s.first_token, 'aaa')\n self.assertEqual(s.has_more_tokens, True)\n\n # Pop\n self.assertEqual(s.pop(), 'aaa')\n self.assertEqual(s.first_token, 'bbb')\n self.assertEqual(s.has_more_tokens, True)\n\n # Test restore point\n with s.restore_point:\n self.assertEqual(s.pop(), 'bbb')\n self.assertEqual(s.first_token, 'ccc')\n self.assertEqual(s.pop(), 'ccc')\n\n self.assertEqual(s.has_more_tokens, False)\n self.assertEqual(s.first_token, None)\n\n # State should have been restored after the with block.\n self.assertEqual(s.first_token, 'bbb')\n self.assertEqual(s.has_more_tokens, True)\n\n#--\n\nfrom prompt_toolkit.contrib.shell.rules import Literal\nfrom prompt_toolkit.contrib.shell.nodes import LiteralNode\n\nclass LiteralTest(unittest.TestCase):\n def setUp(self):\n self.literal = Literal('my-variable', dest='key')\n\n def test_literal_match(self):\n stream = TokenStream([ 'my-variable' ])\n result = list(self.literal.parse(stream))\n\n self.assertEqual(len(result), 1)\n self.assertIsInstance(result[0], LiteralNode)\n self.assertEqual(result[0].rule, self.literal)\n self.assertEqual(result[0]._text, 'my-variable')\n self.assertEqual(result[0].get_variables(), { 'key': 'my-variable' })\n\n def test_literal_nomatch_suffix(self):\n stream = TokenStream([ 'my-variable', 'suffix' ])\n result = list(self.literal.parse(stream))\n\n self.assertEqual(len(result), 0)\n\n def test_literal_nomatch_invalid(self):\n stream = TokenStream([ 'invalid' ])\n result = list(self.literal.parse(stream))\n\n self.assertEqual(len(result), 0)\n\n\n#class VariableTest(unittest.TestCase):\n# def setUp(self):\n# self.variable = Variable(placeholder='my-variable', dest='destination')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":21077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"84332809","text":"\"\"\"Provide a mock plugin.\"\"\"\nimport asyncio\nimport logging\nimport sys\n\nimport janus\nimport numpy as np\n\nfrom imjoy.utils import dotdict\nfrom imjoy.workers.python_worker import PluginConnection\nfrom imjoy.workers.python3_client import JOB_HANDLERS_PY3\n\nlogging.basicConfig(stream=sys.stdout)\nlogger = logging.getLogger(__name__)\n\nlogger.setLevel(logging.INFO)\n\nNAME_SPACE = \"/\"\n\n\nclass ImJoyAPI:\n \"\"\" Represent a set of mock ImJoy API \"\"\"\n\n def log(self, message):\n logger.info(\"log: %s\", message)\n\n def error(self, message):\n logger.info(\"error: %s\", message)\n\n def alert(self, message):\n logger.info(\"alert: %s\", message)\n\n def showStatus(self, message):\n logger.info(\"showStatus: %s\", message)\n\n def showMessage(self, message):\n logger.info(\"showMessage: %s\", message)\n\n\nclass TestPlugin:\n \"\"\" Represent a mock proxy plugin \"\"\"\n\n def __init__(self, loop, sio, pid, secret):\n self.conn = None\n self.loop = loop\n self.sio = sio\n self.pid = pid\n self.secret = secret\n self._plugin_message_handler = []\n self.api = None\n self.imjoy_api = ImJoyAPI()\n self.janus_queue = janus.Queue(loop=self.loop)\n self.queue = self.janus_queue.sync_q\n\n @sio.on(\"message_from_plugin_\" + secret)\n async def on_message(msg): # pylint:disable=unused-variable\n logger.info(\"Message from plugin: %s\", msg)\n self.message_handler(msg)\n\n def get_api(self):\n \"\"\"return the plugin api functions.\"\"\"\n return self.conn.local[\"api\"]\n\n async def message_worker(self, async_q, abort=None):\n \"\"\"Implement a message worker.\"\"\"\n while True:\n try:\n if abort is not None and abort.is_set():\n break\n\n job = await async_q.get()\n async_q.task_done()\n if job is None:\n continue\n\n if \"setInterface\" == job[\"type\"]:\n api = self.conn.set_remote(job[\"api\"])\n self.conn.local[\"np\"] = np\n self.conn.emit({\"type\": \"interfaceSetAsRemote\"})\n if not self.conn.init:\n self.conn.set_interface(self.imjoy_api)\n self.conn.init = True\n async_q.task_done()\n else:\n handler = JOB_HANDLERS_PY3.get(job[\"type\"])\n if handler is None:\n continue\n try:\n await handler(self.conn, job, logger)\n except Exception: # pylint: disable=broad-except\n logger.error(\n \"Error occured in the loop %s\", traceback.format_exc()\n )\n finally:\n sys.stdout.flush()\n except Exception as e:\n print(e)\n\n def terminate(self, msg):\n \"\"\"mark the plugin as terminated.\"\"\"\n logger.info(\"Plugin disconnected: %s\", msg)\n self.terminated = True\n\n def setup(self, conn):\n\n conn.emit = self.emit\n\n async def init(self):\n \"\"\"initialize the plugin.\"\"\"\n opt = dotdict(id=self.pid, secret=self.secret)\n self.conn = PluginConnection(self, opt)\n self.conn.setup()\n self.terminated = False\n initialized = self.loop.create_future()\n self.on_plugin_message(\"initialized\", initialized)\n self.on_plugin_message(\"disconnected\", self.terminate)\n await initialized\n\n workers = [\n self.message_worker(self.janus_queue.async_q, self.conn.abort)\n for i in range(2)\n ]\n asyncio.ensure_future(asyncio.gather(*workers))\n\n async def _emit(self, channel, data):\n \"\"\"Emit a message.\"\"\"\n fut = self.loop.create_future()\n\n def callback(ret=None):\n fut.set_result(ret)\n\n await self.sio.emit(channel, data, namespace=NAME_SPACE, callback=callback)\n return await fut\n\n async def emit_plugin_message(self, data):\n \"\"\"Emit plugin message.\"\"\"\n await self._emit(\n \"message_to_plugin_\" + self.secret, {\"type\": \"message\", \"data\": data}\n )\n\n def emit(self, data):\n \"\"\"Emit plugin message.\"\"\"\n asyncio.ensure_future(\n self.emit_plugin_message({\"type\": \"message\", \"data\": data})\n )\n\n def on_plugin_message(self, message_type, callback_or_future):\n \"\"\"Add a new plugin message.\"\"\"\n self._plugin_message_handler.append(\n {\"type\": message_type, \"callback_or_future\": callback_or_future}\n )\n\n async def execute(self, code):\n \"\"\"Execute plugin code.\"\"\"\n future = self.loop.create_future()\n\n def resolve(ret):\n future.set_result(ret)\n\n def reject(_):\n future.set_exception(Exception(\"executeFailure\"))\n\n self.on_plugin_message(\"executeSuccess\", resolve)\n self.on_plugin_message(\"executeFailure\", reject)\n await self.emit_plugin_message({\"type\": \"execute\", \"code\": code})\n result = await future\n assert result == {\"type\": \"executeSuccess\"}\n await self.emit_plugin_message({\"type\": \"getInterface\"})\n\n def message_handler(self, msg):\n \"\"\"Handle plugin message.\"\"\"\n msg_type = msg[\"type\"]\n handlers = self._plugin_message_handler\n for h in handlers:\n # extract message\n if msg_type == \"message\":\n job = msg[\"data\"]\n self.queue.put(job)\n logger.debug(\"Added task to the queue\")\n\n elif msg_type == h[\"type\"]:\n callback_or_future = h[\"callback_or_future\"]\n if isinstance(callback_or_future, asyncio.Future):\n callback_or_future.set_result(msg)\n else:\n callback_or_future(msg)\n","sub_path":"tests/mock_plugin.py","file_name":"mock_plugin.py","file_ext":"py","file_size_in_byte":5956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"528302172","text":"import copy\nimport json\nimport logging\nimport os\nimport urllib2\nimport html2text\nimport sys\n\nfrom smart_open import smart_open\n\nlogger = logging.getLogger(__name__)\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nclass JSONWriter:\n def __init__(self, name):\n self._file = smart_open(name, 'w')\n\n def page(self, page, content):\n if page is not None and page != \"\":\n if content[\"topic\"] == \"Top/World/Deutsch/Computer/Programmieren/Werkzeuge/Versionskontrolle\":\n newcontent = copy.copy(content)\n newcontent[\"url\"] = page\n self._file.write(json.dumps(newcontent) + \"\\n\")\n else:\n logger.info(\"Skipping page %s, wrong topic\", page)\n else:\n logger.info(\"Skipping page %s, page attribute is missing\", page)\n\n def finish(self):\n self._file.close()\n\n\nclass TaxonomieWriter:\n def __init__(self, name):\n self._file = smart_open(name, 'w')\n\n def page(self, page, content):\n h = html2text.HTML2Text()\n h.ignore_links = True\n h.unicode_snob = True\n h.escape_snob = True\n\n if page is not None and page != \"\":\n topic = content['topic']\n if self.checkTopic(topic):\n directory = \"./\" + topic\n if not os.path.exists(directory):\n os.makedirs(directory)\n try:\n file_path = directory + \"/\" + content[\"d:Title\"] + \".txt\"\n if not os.path.exists(file_path):\n response = urllib2.urlopen(page, timeout=10)\n html_content = response.read()\n html_content = unicode(html_content, errors='ignore')\n text = h.handle(html_content)\n f = open(file_path, 'w')\n f.write(text)\n f.close()\n logging.info(\"Downloaded: %s\", page)\n except Exception as e:\n logger.warn(\"Skipping page %s, Error: %s\", page, e)\n else:\n logger.info(\"Skipping topic %s\", topic)\n else:\n logger.info(\"Skipping page %s, page attribute is missing\", page)\n\n def checkTopic(self, topic):\n topics = [\"Top/Computers\", \"Top/Science\", \"Top/World/Deutsch/Computer\", \"Top/World/Deutsch/Wissenschaft\"]\n match = False\n for t in topics:\n if t in topic:\n match = True\n break\n\n return match\n\n def finish(self):\n self._file.close()\n","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"458252761","text":"# pylint:skip-file\nimport sys\nimport mxnet as mx\nimport numpy as np\nfrom collections import namedtuple\nimport time\nimport math\n\nfrom my_layer import conv_relu, maxpool\nfrom my_net import pm as Param\nimport mxnet.symbol as S\n\ndef gen():\n for i in range(1000):\n yield i\n\ngtr = gen()\n\ndef cnn_forward(data, P, D):\n\n def C(idx, indata, bn):\n p= Param['c%d'%idx]\n i= 4*(idx-1)\n \n c= S.Convolution(name='c'+str(idx),\n data=indata,\n kernel=p['fsize'],\n num_filter=p['fnum'],\n pad=p['pad'],\n stride=p['stride'],\n weight=P[i], \n bias=P[i+1]\n )\n if bn:\n c = mx.sym.BatchNorm(\n name='b'+str(next(gtr)), data=c, \n gamma=P[i+2], beta=P[i+3],\n )\n r = mx.sym.Activation(name='r'+str(idx), data=c, act_type='relu')\n return r\n \n conv1 = C(1, data, True)\n conv2 = C(2, conv1, True)\n pool1 = maxpool(conv2)\n\n conv3 = C(3, pool1, True)\n pool2 = maxpool(conv3)\n\n conv4 = C(4, pool2, True)\n pool3 = maxpool(conv4)\n\n conv5 = C(5, pool3, True)\n conv6 = C(6, conv5, True)\n up1 = S.Deconvolution(\n data=conv6, kernel=(4,4), stride=(2,2), pad=(1,1),\n num_filter=64, no_bias=True,\n weight=D[0]\n )\n\n conv7 = C(7, up1, True)\n up2 = S.Deconvolution(\n data=conv7, kernel=(4,4), stride=(2,2), pad=(1,1),\n num_filter=64, no_bias=True,\n weight=D[1] \n )\n\n conv8 = C(8, up2, True)\n up3 = S.Deconvolution(\n data=conv8, kernel=(4,4), stride=(2,2), pad=(1,1),\n num_filter=32, no_bias=True,\n weight=D[2] \n )\n\n conv9 = C(9, up3, True)\n conv10 = C(10, conv9, True)\n\n return conv10\n\n\nLSTMState = namedtuple(\"LSTMState\", [\"c\", \"h\"])\nLSTMParam = namedtuple(\"LSTMParam\", [\"i2h_weight\", \"i2h_bias\",\n \"h2h_weight\", \"h2h_bias\"])\nLSTMModel = namedtuple(\"LSTMModel\", [\"rnn_exec\", \"symbol\",\n \"init_states\", \"last_states\",\n \"seq_data\", \"seq_labels\", \"seq_outputs\",\n \"param_blocks\"])\n\ndef lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0.):\n \"\"\"LSTM Cell symbol\"\"\"\n if dropout > 0.:\n indata = mx.sym.Dropout(data=indata, p=dropout)\n i2h = mx.sym.FullyConnected(data=indata,\n weight=param.i2h_weight,\n bias=param.i2h_bias,\n num_hidden=num_hidden * 4,\n name=\"t%d_l%d_i2h\" % (seqidx, layeridx))\n h2h = mx.sym.FullyConnected(data=prev_state.h,\n weight=param.h2h_weight,\n bias=param.h2h_bias,\n num_hidden=num_hidden * 4,\n name=\"t%d_l%d_h2h\" % (seqidx, layeridx))\n gates = i2h + h2h\n slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,\n name=\"t%d_l%d_slice\" % (seqidx, layeridx))\n in_gate = mx.sym.Activation(slice_gates[0], act_type=\"sigmoid\")\n in_transform = mx.sym.Activation(slice_gates[1], act_type=\"tanh\")\n forget_gate = mx.sym.Activation(slice_gates[2], act_type=\"sigmoid\")\n out_gate = mx.sym.Activation(slice_gates[3], act_type=\"sigmoid\")\n next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)\n next_h = out_gate * mx.sym.Activation(next_c, act_type=\"tanh\")\n return LSTMState(c=next_c, h=next_h)\n\n\n# we define a new unrolling function here because the original\n# one in lstm.py concats all the labels at the last layer together,\n# making the mini-batch size of the label different from the data.\n# I think the existing data-parallelization code need some modification\n# to allow this situation to work properly\ndef lstm_unroll(num_lstm_layer, seq_len, num_hidden, dropout=0., shapes=None):\n\n # embed_weight = mx.sym.Variable(\"embed_weight\")\n cls_weight = mx.sym.Variable(\"cls_weight\")\n cls_bias = mx.sym.Variable(\"cls_bias\")\n param_cells = []\n last_states = []\n pred_all = []\n for i in range(num_lstm_layer):\n param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable(\"l%d_i2h_weight\" % i),\n i2h_bias=mx.sym.Variable(\"l%d_i2h_bias\" % i),\n h2h_weight=mx.sym.Variable(\"l%d_h2h_weight\" % i),\n h2h_bias=mx.sym.Variable(\"l%d_h2h_bias\" % i)))\n state = LSTMState(c=mx.sym.Variable(\"l%d_init_c\" % i),\n h=mx.sym.Variable(\"l%d_init_h\" % i))\n last_states.append(state)\n assert(len(last_states) == num_lstm_layer)\n\n # embeding layer\n data = mx.sym.Variable('data')\n label = mx.sym.Variable('softmax_label')\n timeseq = mx.sym.SliceChannel(data=data, num_outputs=seq_len, squeeze_axis=1)\n labelseq = mx.sym.SliceChannel(data=label, num_outputs=seq_len, squeeze_axis=1)\n \n # CNN param\n layer_num = 10\n P = []\n for i in range(layer_num):\n P.append(S.Variable('c%d_weight'%i))\n P.append(S.Variable('c%d_bias'%i))\n P.append(S.Variable('bn%d_gamma'%i))\n P.append(S.Variable('bn%d_beta'%i))\n up_num = 3\n D = []\n for i in range(up_num):\n D.append( S.Variable('deconv%d_weight'%i))\n # D.append( S.Variable('deconv%d_bias'%i) )\n\n for seqidx in range(seq_len):\n hidden = timeseq[seqidx]\n # embed in CNN\n hidden = cnn_forward(hidden, P, D)\n hidden = mx.sym.Reshape(data=hidden, target_shape=(0,1*256*256))\n \n #print seqidx\n #for _ in hidden.infer_shape(**shapes):\n # print _\n\n # stack LSTM\n for i in range(num_lstm_layer):\n if i == 0:\n dp_ratio = 0.\n else:\n dp_ratio = dropout\n next_state = lstm(num_hidden, indata=hidden,\n prev_state=last_states[i],\n param=param_cells[i],\n seqidx=seqidx, layeridx=i, dropout=dp_ratio)\n hidden = next_state.h\n last_states[i] = next_state\n # decoder\n if dropout > 0.:\n hidden = mx.sym.Dropout(data=hidden, p=dropout)\n #hidden_all.append(hidden)\n \n pred = mx.sym.FullyConnected(data=hidden, weight=cls_weight, bias=cls_bias, name='pred%d'%seqidx, \n num_hidden=1*256*256)\n pred = mx.sym.Reshape(data=pred, target_shape=(0,1,256,256))\n pred = mx.sym.LogisticRegressionOutput(data=pred, label=labelseq[seqidx], name='logis%d'%seqidx)\n pred_all.append(pred)\n\n return mx.sym.Group(pred_all)","sub_path":"synthesis/c_lstm.py","file_name":"c_lstm.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"342322701","text":"import scrapy\nimport json\n\nclass KnittingSpider(scrapy.Spider):\n name = \"knittingLowDomain\"\n\n def start_requests(self):\n with open('url.json', 'r') as file:\n file.readline()\n data = []\n for line in file.readlines():\n data.append(line.split(',')[0])\n for url in data[:-4]: # one ']' and 3 not wanted links\n if url is not None: \n yield scrapy.Request(url=json.loads(url)['url'], callback=self.parse)\n\n def parse(self, response):\n for product in response.css('div.product'):\n yield {\n 'url': response.urljoin(product.css('a').attrib['href']),\n 'category': response.url.split('.cz')[1]\n }\n","sub_path":"knitting/knitting/spiders/lowDomainSpider.py","file_name":"lowDomainSpider.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"513964715","text":"from logging import Handler, LogRecord\n\nimport telebot\n\n\nclass TelegramBotHandler(Handler):\n def __init__(self, token: str, chat_id: str):\n super().__init__()\n self.token = token\n self.chat_id = chat_id\n\n def emit(self, record: LogRecord):\n bot = telebot.TeleBot(self.token)\n bot.send_message(\n self.chat_id,\n self.format(record)\n )\n","sub_path":"config/logging_handlers.py","file_name":"logging_handlers.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"105898646","text":"class ciag:\r\n def __init__(self,a1,r,n):\r\n self.a1=int(a1)\r\n self.r=int(r)\r\n self.n=int(n)\r\n \r\n def wyswietl_dane(self):\r\n print('a1 = '+str(self.a1))\r\n print('roznica = '+str(self.r))\r\n print('dlugosc = '+str(self.n))\r\n\r\n def policz_sume(self):\r\n ostatni= self.a1 + (self.n - 1)*self.r\r\n suma= (self.a1 + ostatni)/2*self.n\r\n print('suma = '+str(suma))\r\n\r\n\r\nobiekt=ciag(0,3,5)\r\nobiekt.wyswietl_dane()\r\nobiekt.policz_sume()\r\n","sub_path":"wd_cw04/zad5.py","file_name":"zad5.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"379850877","text":"\"\"\"\nDescription: 运行测试用例模块\nVersion: 2.0\nAutor: byh\nDate: 2020-11-26 19:16:49\nLastEditors: byh\nLastEditTime: 2020-12-30 12:42:10\n\"\"\"\n# import os\n# import sys\n#\n# base_path = os.path.dirname(os.path.dirname(__file__))\n# sys.path.append(base_path)\n\nimport os\nfrom middleware.logger_handler import logger\nimport time\nimport unittest\nfrom HTMLTestRunner import HTMLTestRunner\nfrom config.config_path import Configuration\n\ntestloader = unittest.TestLoader()\nsuite = testloader.discover(Configuration.testcase_path)\n\nif not os.path.exists(Configuration.report_folder_path):\n os.mkdir(Configuration.report_folder_path)\n\nnow = time.strftime(\"%y-%m-%d %H_%M_%S\")\nreport_name = r\"\\testreport {}.html\".format(now)\nreport_path = Configuration.report_folder_path + report_name\nprint(report_path)\n\nwith open(report_path, \"wb\") as f:\n runner = HTMLTestRunner(stream=f, verbosity=2, title=\"自动化测试报告\", description=\"前程贷项目\", tester=\"byh\")\n try:\n runner.run(suite)\n except Exception as e:\n\n logger.exception(\"生成测试报告失败:{}\".format(e))\n raise e\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"45377425","text":"#-----------------------------------------------------------------------------\n# Copyright (C) 2013 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\nimport os\n\nimport logging\nimport markdown \n\nfrom cgi import escape\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom tornado import web, httpserver, ioloop, log\nfrom tornado.httpclient import AsyncHTTPClient\n\nimport tornado.options\nfrom tornado.options import define, options\n\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom IPython.config import Config\nfrom IPython.nbconvert.exporters import HTMLExporter\n\nfrom .handlers import handlers, LocalFileHandler\nfrom .cache import DummyAsyncCache, AsyncMultipartMemcache, MockCache, pylibmc\ntry:\n from .client import LoggingCurlAsyncHTTPClient as HTTPClientClass\nexcept ImportError:\n from .client import LoggingSimpleAsyncHTTPClient as HTTPClientClass\nfrom .github import AsyncGitHubClient\nfrom .log import log_request\nfrom .utils import git_info, ipython_info\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\naccess_log = log.access_log\napp_log = log.app_log\n\nhere = os.path.dirname(__file__)\npjoin = os.path.join\n\ndef nrhead():\n try:\n import newrelic.agent\n except ImportError:\n return ''\n return newrelic.agent.get_browser_timing_header()\n\ndef nrfoot():\n try:\n import newrelic.agent\n except ImportError:\n return ''\n return newrelic.agent.get_browser_timing_footer()\n\ndef main():\n # command-line options\n define(\"debug\", default=False, help=\"run in debug mode\", type=bool)\n define(\"no_cache\", default=False, help=\"Do not cache results\", type=bool)\n define(\"localfiles\", default=\"\", help=\"Allow to serve local files under /localfile/* this can be a security risk\", type=str)\n define(\"port\", default=80, help=\"run on the given port\", type=int)\n define(\"cache_expiry_min\", default=10*60, help=\"minimum cache expiry (seconds)\", type=int)\n define(\"cache_expiry_max\", default=2*60*60, help=\"maximum cache expiry (seconds)\", type=int)\n define(\"mc_threads\", default=1, help=\"number of threads to use for Async Memcache\", type=int)\n define(\"threads\", default=1, help=\"number of threads to use for background IO\", type=int)\n tornado.options.parse_command_line()\n \n # NBConvert config\n config = Config()\n config.HTMLExporter.template_file = 'basic'\n config.NbconvertApp.fileext = 'html'\n config.CSSHTMLHeaderTransformer.enabled = False\n # don't strip the files prefix - we use it for redirects\n # config.Exporter.filters = {'strip_files_prefix': lambda s: s}\n \n exporter = HTMLExporter(config=config, log=log.app_log)\n \n # DEBUG env implies both autoreload and log-level\n if os.environ.get(\"DEBUG\"):\n options.debug = True\n logging.getLogger().setLevel(logging.DEBUG)\n \n # setup memcache\n mc_pool = ThreadPoolExecutor(options.mc_threads)\n pool = ThreadPoolExecutor(options.threads)\n memcache_urls = os.environ.get('MEMCACHIER_SERVERS',\n os.environ.get('MEMCACHE_SERVERS')\n )\n if options.no_cache :\n log.app_log.info(\"Not using cache\")\n cache = MockCache()\n elif pylibmc and memcache_urls:\n kwargs = dict(pool=mc_pool)\n username = os.environ.get('MEMCACHIER_USERNAME', '')\n password = os.environ.get('MEMCACHIER_PASSWORD', '')\n if username and password:\n kwargs['binary'] = True\n kwargs['username'] = username\n kwargs['password'] = password\n log.app_log.info(\"Using SASL memcache\")\n else:\n log.app_log.info(\"Using plain memecache\")\n \n cache = AsyncMultipartMemcache(memcache_urls.split(','), **kwargs)\n else:\n log.app_log.info(\"Using in-memory cache\")\n cache = DummyAsyncCache()\n \n # setup tornado handlers and settings\n \n template_path = pjoin(here, 'templates')\n static_path = pjoin(here, 'static')\n env = Environment(loader=FileSystemLoader(template_path))\n env.filters['markdown'] = markdown.markdown\n try:\n git_data = git_info(here)\n except Exception as e:\n app_log.error(\"Failed to get git info: %s\", e)\n git_data = {}\n else:\n git_data['msg'] = escape(git_data['msg'])\n\n\n if options.no_cache:\n # force jinja to recompile template every time\n env.globals.update(cache_size=0)\n env.globals.update(nrhead=nrhead, nrfoot=nrfoot, git_data=git_data,\n ipython_info=ipython_info()\n )\n AsyncHTTPClient.configure(HTTPClientClass)\n client = AsyncHTTPClient()\n github_client = AsyncGitHubClient(client)\n github_client.authenticate()\n \n settings = dict(\n log_function=log_request,\n jinja2_env=env,\n static_path=static_path,\n client=client,\n github_client=github_client,\n exporter=exporter,\n cache=cache,\n cache_expiry_min=options.cache_expiry_min,\n cache_expiry_max=options.cache_expiry_max,\n pool=pool,\n gzip=True,\n render_timeout=20,\n localfile_path=os.path.abspath(options.localfiles),\n )\n \n # create and start the app\n if options.localfiles:\n log.app_log.warning(\"Serving local notebooks in %s, this can be a security risk\", options.localfiles)\n # use absolute or relative paths:\n handlers.insert(0, (r'/localfile/(.*)', LocalFileHandler))\n\n app = web.Application(handlers, debug=options.debug, **settings)\n http_server = httpserver.HTTPServer(app, xheaders=True)\n log.app_log.info(\"Listening on port %i\", options.port)\n http_server.listen(options.port)\n ioloop.IOLoop.instance().start()\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"nbviewer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"425333509","text":"import logging\n\nimport json\nimport re\nimport requests \n\nfrom xml_utils import fieldval, XmlReader\nfrom utils import read_all_pages, get_auth_key\n \nimport_logger = logging.getLogger('import_log')\nlogger = logging.getLogger('console')\n\n\n\nclass EmpClient(object):\n\n def __init__(self, url_base, key_secret):\n self.url_base = url_base\n self.key_secret = key_secret\n self.list_url = '{}/app/api/v1/employers/'.format(self.url_base)\n self.quick_list_url = '{}/app/api/v1/employers/quick-list'.format(self.url_base)\n\n def get_headers(self):\n return {\n 'Authentication': 'CL {}'.format(self.key_secret),\n 'Content-type': 'application/json'\n }\n\n def get_existing_ids(self):\n \"\"\" returns list of existing old ids in the CL databse,\n so we can skip them during the import \n \"\"\" \n url = '{}?page_size={}'.format(self.quick_list_url, 250)\n headers = self.get_headers()\n ids = []\n for page in read_all_pages(url, headers):\n for item in page:\n old_id = item.get('old_id')\n if old_id:\n ids.append(int(old_id))\n return ids \n\n def save(self, data, id=None):\n headers = self.get_headers()\n\n import_logger.debug('Save with headers [{}]'.format(headers))\n\n if not id:\n url = self.list_url\n import_logger.debug(url)\n res = requests.post(url, data=json.dumps(data), headers=headers)\n else:\n raise NotImplementedError(\"update is not implemented\")\n\n if res.status_code not in [200, 201]: \n import_logger.info('failed for record: %s, status_code=%s' % (data['name'], res.status_code))\n import_logger.error('input was:', data)\n import_logger.error('%s', data)\n import_logger.error('response was:')\n import_logger.error(res.content) \n return False \n \n #res.raise_for_status()\n #print 'saved: {}'.format(data['name'])\n return True\n\n\n\n\n\nclass Parser(object):\n def __init__(self, node, reader):\n self.node = node \n company_id = self.id = int(fieldval(node, 'id'))\n self.users = reader.get_users_for_id(company_id)\n\n def record_identity(self):\n \"\"\" get identity for the failed record \"\"\"\n for k in ['name', 'full_name', 'id']:\n val = fieldval(self.node, k)\n if val:\n return '{}={}'.format(k, val)\n\n def _fix_url(self, url):\n if url and not re.match(r'http(s)?:', url):\n url = 'http://{}'.format(url) \n return url\n\n def get_data(self):\n n = self.node \n company_name = fieldval(n, 'name') \n\n if not company_name: \n return\n\n users = [] \n for full_name,email in self.users: \n name_parts = full_name.split(' ')\n first_name = name_parts[0]\n last_name = ' '.join(name_parts[1:]) \n u = {\n 'first_name': first_name, \n 'last_name': last_name, \n 'email': email\n }\n users.append(u)\n\n assert users, \"must have at least one user: %s\" % self.id\n\n return {\n 'name': company_name, \n 'old_id': self.id, \n 'url': self._fix_url(fieldval(n, 'url')), \n 'users': users,\n 'logo_url': fieldval(n, 'logo_url')\n }\n\n\n\nfrom collections import defaultdict\n\nclass GroupUsersReader(object):\n \"\"\" quick modification on standard reader that reads though the data \n and groups users \n it introduces new method, that can be used for the data access \n \"\"\"\n def __init__(self, reader):\n self.reader = reader \n users = self.users = defaultdict(list)\n\n nodes = self.nodes = [] \n for node in reader.read():\n id = fieldval(node, 'id') \n user = (fieldval(node, 'full_name'), fieldval(node, 'email'))\n\n if not id or not user[0] or not user[1]:\n continue\n \n if not id in users:\n nodes.append(node)\n\n # print id, user \n users[id].append(user)\n\n def get_users_for_id(self, id):\n #print 'get_users_for_id: %s, has_key: %s' % (id, self.users.has_key(id))\n #print self.users.keys()\n return self.users[str(id)]\n\n def read(self):\n for node in self.nodes:\n yield node \n\n\n\n\ndef cleanup_data(reader):\n # users are listed under the same name of the company/id\n # must be treated as users for the same company \n return GroupUsersReader(reader)\n\n\n\ndef run(config, limit=None):\n # read all existing, don't send a request if data is already there \n url = config.get('careerleaf', 'url')\n key_secret = key_secret = get_auth_key(config) \n file_name = config.get('employers', 'file')\n\n client = EmpClient(url, key_secret)\n \n reader = XmlReader(file_name)\n reader = cleanup_data(reader)\n\n \n total = 0\n success_count = 0\n skipped = 0\n existing = client.get_existing_ids()\n processed_ids = []\n for node in reader.read():\n parser = Parser(node, reader)\n id = parser.id \n if id in existing:\n import_logger.debug('skipping : %s' % id)\n skipped +=1\n continue \n\n assert not id in processed_ids \n processed_ids.append(id) # ensuring that we do not process more than once \n\n data = parser.get_data()\n\n if data:\n is_successful = client.save(data)\n if is_successful:\n success_count+=1\n import_logger.info('successful for: {}'.format(data['name']))\n else:\n import_logger.error('failed, data problem for: {}'.format(parser.record_identity()))\n else:\n import_logger.error('failed, could not parse data for: {}'.format(parser.record_identity()))\n\n total +=1\n\n if limit and total > limit: \n import_logger.info('reached the limit: {}, stopping'.format(limit) )\n break\n if total % 10 == 0:\n import_logger.info('processing record %s' % total) \n\n import_logger.info('parsed {} records, {} are successfull, {} are failed, {} skipped'.format(total, success_count, (total - success_count), skipped))\n\n\n\n# TODO: delete example ","sub_path":"employers/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":6491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"455710402","text":"# coding : utf-8\n\nimport datetime\n\nimport server\n\n\nclass TestServer:\n @classmethod\n def setup_class(cls):\n cls.app = server.app.test_client()\n\n cls.competitions = server.competitions\n cls.clubs = server.clubs\n\n cls.cost_per_place = server.COST_PER_PLACE\n\n # cls.ref_clubs = [\n # {\"name\": \"Simply Lift\", \"email\": \"john@simplylift.co\", \"points\": \"13\"},\n # {\"name\": \"Iron Temple\", \"email\": \"admin@irontemple.com\", \"points\": \"4\"},\n # {\"name\": \"She Lifts\", \"email\": \"kate@shelifts.co.uk\", \"points\": \"12\"},\n # ]\n # cls.ref_competitions = [\n # {\n # \"name\": \"Spring Festival\",\n # \"date\": \"2020-03-27 10:00:00\",\n # \"numberOfPlaces\": \"25\",\n # },\n # {\n # \"name\": \"Fall Classic\",\n # \"date\": \"2020-10-22 13:30:00\",\n # \"numberOfPlaces\": \"13\",\n # },\n # ]\n\n def setup_method(self, method):\n print(\"RESET\")\n server.competitions = server.loadCompetitions()\n server.clubs = server.loadClubs()\n server.booking = {}\n\n # --- HELPERS --- #\n\n def login(self, email):\n return self.app.post(\"/showSummary\", data=dict(email=email))\n\n def logout(self):\n return self.app.get(\"/logout\", follow_redirects=True)\n\n def add_fake_club(self, points=0, name=\"fake_club\", email=\"fake@email.com\"):\n \"\"\" Create a fake club for test purpose \"\"\"\n\n server.clubs.append(\n {\n \"name\": f\"{name}\",\n \"email\": f\"{email}\",\n \"points\": f\"{points}\",\n }\n )\n self.clubs = server.clubs\n\n return len(server.clubs) - 1\n\n def add_fake_competition(self, places, name=\"fake_compet\", day_offset=0):\n \"\"\" Create a fake competition for test purpose \"\"\"\n\n date = datetime.datetime.now() + datetime.timedelta(days=day_offset)\n date = date.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n server.competitions.append(\n {\n \"name\": f\"{name}\",\n \"date\": f\"{date}\",\n \"numberOfPlaces\": f\"{places}\",\n }\n )\n self.competitions = server.competitions\n\n return len(server.competitions) - 1\n\n # --- TESTS LOGIN / LOGOUT --- #\n\n def test_happy_login_logout(self):\n \"\"\" Check if right email login + login is correctly handled \"\"\"\n\n rv = self.login(\"john@simplylift.co\")\n assert rv.status_code in [200]\n assert b\"Logout\" in rv.data\n\n rv = self.logout()\n assert rv.status_code in [200]\n assert b\"Please enter your secretary email to continue\" in rv.data\n\n def test_sad_login_wrong_email(self):\n \"\"\" Check if wrong email is correctly handled \"\"\"\n\n rv = self.login(\"wrong@email.com\")\n assert rv.status_code in [404]\n assert b\"The provided email is invalid\" in rv.data\n\n def test_sad_login_empty_email(self):\n \"\"\" Check if an empty email field is correctly handled \"\"\"\n\n rv = self.login(\"\")\n assert rv.status_code in [404]\n assert b\"The provided email is invalid\" in rv.data\n\n # --- TESTS BOOKING --- #\n\n def test_happy_booking(self):\n \"\"\" Display the booking page for an existing competition with an existing club \"\"\"\n\n now = datetime.datetime.now()\n\n for club in self.clubs:\n for competition in self.competitions:\n rv = self.app.get(f\"/book/{competition['name']}/{club['name']}\")\n\n print(rv.data, rv.status_code, \"\\n\")\n\n if server.formatDate(competition[\"date\"]) <= now:\n continue\n\n assert rv.status_code in [200]\n assert (\n str.encode(f\"Places available: {competition['numberOfPlaces']}\")\n in rv.data\n )\n\n def test_sad_booking_wrong_compet(self):\n \"\"\" Display the booking page for an existing club with a non existing competition \"\"\"\n\n for competition in self.competitions:\n rv = self.app.get(f\"/book/wrong_compet_name/{self.clubs[0]['name']}\")\n\n assert rv.status_code in [404]\n assert b\"The provided competition is invalid\" in rv.data\n\n def test_sad_booking_wrong_club(self):\n \"\"\" Display the booking page for an existing competition with a non existing club \"\"\"\n\n for competition in self.competitions:\n rv = self.app.get(f\"/book/{competition['name']}/wrong_club_name\")\n\n assert rv.status_code in [404]\n assert b\"The provided club is invalid\" in rv.data\n\n # --- TESTS PURCHASE PLACES --- #\n\n def test_happy_purchasePlaces_once(self):\n \"\"\" Book less places than club points or competitions available places \"\"\"\n\n points = int(self.clubs[0][\"points\"])\n booked = 0\n num_places = 1\n for competition in self.competitions:\n places = int(competition[\"numberOfPlaces\"])\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": num_places,\n \"club\": self.clubs[0][\"name\"],\n \"competition\": competition[\"name\"],\n },\n )\n\n booked += num_places\n\n print(rv.data, rv.status_code)\n\n cost = points - (self.cost_per_place * booked)\n\n assert rv.status_code in [200]\n assert str.encode(f\"Number of Places: {places-num_places}\") in rv.data\n assert str.encode(f\"Points available: {cost}\") in rv.data\n\n def test_sad_purchasePlaces_negative(self):\n \"\"\" Book a negative number of places \"\"\"\n\n num_places = -1\n for competition in self.competitions:\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": num_places,\n \"club\": self.clubs[0][\"name\"],\n \"competition\": competition[\"name\"],\n },\n )\n\n assert rv.status_code in [400]\n assert b\"Something went wrong-please try again\" in rv.data\n\n def test_sad_purchasePlaces_zero(self):\n \"\"\" Book a negative number of places \"\"\"\n\n num_places = 0\n for competition in self.competitions:\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": num_places,\n \"club\": self.clubs[0][\"name\"],\n \"competition\": competition[\"name\"],\n },\n )\n\n assert rv.status_code in [400]\n assert b\"Something went wrong-please try again\" in rv.data\n\n def test_sad_purchasePlaces_12_places_max__all_in_one(self):\n \"\"\" Book more than 12 places > forbidden \"\"\"\n\n club_index = self.add_fake_club(points=100)\n\n print(\"INIT:\", self.competitions, self.clubs)\n\n points = int(self.clubs[club_index][\"points\"])\n slots = int(self.competitions[0][\"numberOfPlaces\"])\n\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 13,\n \"club\": self.clubs[club_index][\"name\"],\n \"competition\": self.competitions[0][\"name\"],\n },\n )\n\n print(rv.data, rv.status_code)\n\n assert rv.status_code in [400]\n assert str.encode(f\"Number of Places: {slots}\") in rv.data\n assert str.encode(f\"Points available: {points}\") in rv.data\n assert b\"You can't book more than 12 places per competition\" in rv.data\n\n def test_sad_purchasePlaces_12_places_max__step_by_step(self):\n \"\"\" Book more than 12 places > forbidden \"\"\"\n\n club_index = self.add_fake_club(points=100)\n\n print(\"INIT:\", self.competitions, self.clubs)\n\n points = int(self.clubs[club_index][\"points\"])\n slots = int(self.competitions[0][\"numberOfPlaces\"])\n booked = 0\n\n num_actions = 12 + 1\n\n for i in range(1, num_actions + 1):\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 1,\n \"club\": self.clubs[club_index][\"name\"],\n \"competition\": self.competitions[0][\"name\"],\n },\n )\n\n booked += 1\n print(i, \"\\n\", rv.data, rv.status_code, \"\\n\", server.booking)\n\n if i < num_actions - 1:\n cost = points - (self.cost_per_place * booked)\n assert rv.status_code in [200]\n assert str.encode(f\"Number of Places: {slots-booked}\") in rv.data\n assert str.encode(f\"Points available: {cost}\") in rv.data\n\n assert rv.status_code in [400]\n assert b\"You can't book more than 12 places per competition\" in rv.data\n\n def test_happy_purchasePlaces_all_club_points(self):\n \"\"\" Use all points of a club \"\"\"\n\n slots = 10\n points = 10\n booked = 0\n\n compet_index = self.add_fake_competition(\n places=slots, name=\"test compet\", day_offset=20\n )\n club_index = self.add_fake_club(points=points)\n\n while True:\n\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 1,\n \"club\": self.clubs[club_index][\"name\"],\n \"competition\": self.competitions[compet_index][\"name\"],\n },\n )\n\n print(rv.data, rv.status_code, \"\\r\")\n\n if points < self.cost_per_place:\n break\n\n assert rv.status_code in [200]\n\n booked += 1\n assert str.encode(f\"Number of Places: {slots-booked}\") in rv.data\n\n points -= self.cost_per_place\n assert str.encode(f\"Points available: {points}\") in rv.data\n\n assert rv.status_code in [400]\n assert b\"You don't have enough points available\" in rv.data\n\n def test_happy_purchasePlaces_all_compet_places(self):\n \"\"\" Book all places of a competition \"\"\"\n\n slots = 10\n points = 1000\n booked = 0\n\n compet_index = self.add_fake_competition(\n places=slots, name=\"test compet\", day_offset=20\n )\n club_index = self.add_fake_club(points=points)\n\n while True:\n\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 1,\n \"club\": self.clubs[club_index][\"name\"],\n \"competition\": self.competitions[compet_index][\"name\"],\n },\n )\n\n print(rv.data, rv.status_code, \"\\r\")\n\n if booked + 1 > slots:\n break\n\n assert rv.status_code in [200]\n\n booked += 1\n assert str.encode(f\"Number of Places: {slots-booked}\") in rv.data\n\n points -= self.cost_per_place\n assert str.encode(f\"Points available: {points}\") in rv.data\n\n assert rv.status_code in [400]\n assert b\"You can't book more places than available\" in rv.data\n\n def test_sad_purchasePlaces_more_than_compet(self):\n \"\"\" Book more places than available in the competition \"\"\"\n\n slots = 5\n cName = \"test compet\"\n\n _ = self.add_fake_competition(places=slots, name=cName, day_offset=20)\n club_index = self.add_fake_club(points=100)\n\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": slots + 1,\n \"club\": self.clubs[club_index][\"name\"],\n \"competition\": cName,\n },\n )\n\n print(rv.data, rv.status_code)\n\n assert rv.status_code in [400]\n assert b\"You can't book more places than available\" in rv.data\n\n def test_sad_purchasePlaces_more_than_club(self):\n \"\"\" Book more places than the number of points available in the club \"\"\"\n\n for club in self.clubs:\n for competition in self.competitions:\n num_booked = int(club[\"points\"]) + 1\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": num_booked,\n \"club\": club[\"name\"],\n \"competition\": competition[\"name\"],\n },\n )\n\n assert rv.status_code in [400]\n assert b\"You don't have enough points available\" in rv.data\n\n def test_sad_purchasePlaces_wrong_compet(self):\n \"\"\" Book places with an existing club and a non existing competition \"\"\"\n\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 1,\n \"club\": self.clubs[0][\"name\"],\n \"competition\": \"fake_competition_name\",\n },\n )\n\n assert rv.status_code in [404]\n assert b\"The provided competition is invalid\" in rv.data\n\n def test_sad_purchasePlaces_wrong_club(self):\n \"\"\" Book places with an existing competition and a non existing club \"\"\"\n\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 1,\n \"club\": \"fake_club_name\",\n \"competition\": self.competitions[0][\"name\"],\n },\n )\n\n assert rv.status_code in [404]\n assert b\"The provided club is invalid\" in rv.data\n\n # --- TESTS PAST COMPETITIONS --- #\n\n def test_happy_showSummary_with_future_events(self):\n \"\"\" Show summary with both past & incoming events \"\"\"\n\n # incoming_date = datetime.datetime.now() + datetime.timedelta(days=20, hours=3)\n # test_incoming_date = incoming_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n # test_name = \"test compet\"\n # test_num = 5\n\n # server.competitions.append(\n # {\n # \"name\": test_name,\n # \"date\": test_incoming_date,\n # \"numberOfPlaces\": test_num,\n # }\n # )\n # self.competitions = server.competitions\n\n # NOTE replaced the above code with 2 new incoming events in the json file\n\n rv = self.login(\"john@simplylift.co\")\n print(rv.data, rv.status_code)\n\n assert rv.status_code in [200]\n assert b\"Book Places\" in rv.data\n assert rv.data.count(b\"Finished\") == 2\n assert rv.data.count(b\"Book Places\") == 2\n\n def test_sad_booking_past_compet(self):\n \"\"\" Must redirect to summary if someone directly write the booking url of a past competition \"\"\"\n\n rv = self.app.get(\n f\"/book/{self.competitions[0]['name']}/{self.clubs[0]['name']}\"\n )\n assert rv.status_code in [400]\n assert b\"The booking page for a past competition is closed\" in rv.data\n assert b\"Welcome\" in rv.data\n\n # --- TESTS DISPLAY CLUBS' POINTS BOARD --- #\n\n def test_happy_display_points_board_index(self):\n \"\"\" Check if the points board is displayed on the index page \"\"\"\n\n rv = self.app.get(\"/\")\n\n assert rv.status_code in [200]\n assert b\"Registration Portal\" in rv.data\n\n assert b\"Points Board\" in rv.data\n for club in self.clubs:\n assert str.encode(club[\"name\"]) in rv.data\n assert str.encode(f\"Current Points: {club['points']}\") in rv.data\n\n def test_happy_display_points_board_welcome(self):\n \"\"\" Check if the points board is displayed on the main (welcome) page \"\"\"\n\n rv = self.login(\"john@simplylift.co\")\n\n assert rv.status_code in [200]\n assert b\"Logout\" in rv.data\n\n assert b\"Points Board\" in rv.data\n for club in self.clubs:\n assert str.encode(club[\"name\"]) in rv.data\n assert str.encode(f\"Current Points: {club['points']}\") in rv.data\n","sub_path":"test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":15880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"634008776","text":"import os, sys\nimport codecs\nimport pandas as pd\n\nsys.path.append(os.path.expanduser(\"../ddi.py/\"))\n\nfrom ddi.onrails.repos import merge_instruments, dor1, copy, extract_variables, convert_r2ddi\n\ndef recode_datasets(in_enc=\"windows-1252\", out_enc=\"utf-8\"):\n dpath = os.path.join(\"ddionrails\", \"datasets\")\n for filename in os.listdir(dpath):\n filepath = os.path.join(dpath, filename)\n content = codecs.open(filepath, encoding=in_enc, errors=\"strict\").read()\n with codecs.open(filepath, \"w+\", encoding=out_enc) as f:\n f.write(content)\n\ndef main():\n # copy.study()\n # copy.f(\"instruments.csv\")\n # copy.f(\"questions_variables.csv\")\n # copy.f(\"questions_images.csv\")\n # copy.f(\"attachments.csv\")\n convert_r2ddi.Parser(\"gip\").write_json()\n recode_datasets()\n extract_variables.XmlParser(\"r2ddi/v1/en\", \"gip\").run()\n # copy.f(\"datasets.csv\")\n # merge_instruments.main()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lib_py/fill_ddionrails.py","file_name":"fill_ddionrails.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"396298997","text":"'''\r\n#Developers: Minh Phung and Christopher Blake Matis\r\n#Course: CSC 138\r\n#Socket Programming Assignment 2: Web Server\r\n\r\n#Project Objective/Functions\r\nReceives request, parse files and sends file\r\nvia tcp connection including an http response\r\nor an error if the file is not found.\r\n'''\r\n\r\nfrom socket import *\r\nserverPort = 8000\r\n\r\n#Sock_STREAM for tcp socket\r\nserverSocket = socket(AF_INET, SOCK_STREAM)\r\nserverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\r\nserverSocket.bind((\"\", serverPort))\r\nserverSocket.listen(1)\r\n\r\n#Message the client will receive from the server.\r\nsentMessage = ''\r\n\r\nprint ('The Web server is up on port: ', serverPort)\r\n\r\nwhile True:\r\n print (\"Ready to receive HTTP request via web browser (Chrome or Firefox)\")\r\n print(\"Accessible files that can be requested include (localhost:'portnumber'/hello.html)\")\r\n print(\"as well as (localhost:'portnumber'/ReadMe.html)\")\r\n connectionSocket, addr = serverSocket.accept()\r\n try:\r\n request = connectionSocket.recv(1024)\r\n #This will print the original form of request\r\n print (\"Origin form of request: ->> \", request)\r\n #request is in bytes form b'GET /fileName etc..\r\n #using split() to get the fileName from request\r\n fileName = request.split()[1]\r\n #open file name, if file not exit throw in error\r\n fileHandle = open(fileName[1:])\r\n #if file exit, read from file\r\n retrieveData = fileHandle.read()\r\n #this will print the contents of the file\r\n print (retrieveData)\r\n #http header\r\n http_response = \"\\nHTTP/1.1 200 OK\\n\\n\"\r\n #send http response\r\n sentMessage = http_response.encode()\r\n connectionSocket.sendall(sentMessage)\r\n #send file content to browser\r\n sentMessage = retrieveData.encode()\r\n connectionSocket.sendall(sentMessage)\r\n #close file\r\n fileHandle.close()\r\n #close TCP connection\r\n connectionSocket.close()\r\n except IOError:\r\n pass\r\n #http header\r\n http_response2 = \"\\nHTTP/1.1 404 Not Found\\n\\n\"\r\n #file not found exit\r\n print ('404 Not Found')\r\n #send HTTP response\r\n connectionSocket.send(http_response2.encode())\r\n \r\n #send the 404 not found error to the client as well\r\n connectionSocket.send('404 Not Found')\r\n #close connection\r\n connectionSocket.close()\r\n print (\"Webserver closed after one request\")\r\n break\r\n#this web server is intended to process only 1 request\r\n\r\n","sub_path":"Webserver.py","file_name":"Webserver.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"552923081","text":"\"\"\"\nIndex metadata for Faiss indices.\n\"\"\"\n\nimport re\nfrom enum import Enum\nfrom math import ceil, log2\n\nfrom autofaiss.utils.cast import cast_bytes_to_memory_string\nfrom autofaiss.external.descriptions import (\n INDEX_DESCRIPTION_BLOCKS,\n IndexBlock,\n TUNABLE_PARAMETERS_DESCRIPTION_BLOCKS,\n TunableParam,\n)\n\n\nclass IndexType(Enum):\n FLAT = 0\n HNSW = 1\n OPQ_IVF_PQ = 2\n OPQ_IVF_HNSW_PQ = 3\n PAD_IVF_HNSW_PQ = 4\n NOT_SUPPORTED = 5\n\n\nclass IndexMetadata:\n \"\"\"\n Class to compute index metadata given the index_key, the number of vectors and their dimension.\n\n Note: We don't create classes for each index type in order to keep the code simple.\n \"\"\"\n\n def __init__(self, index_key: str, nb_vectors: int, dim_vector: int):\n\n self.index_key = index_key\n self.nb_vectors = nb_vectors\n self.dim_vector = dim_vector\n self.fast_description = \"\"\n self.description_blocs = []\n self.tunable_params = []\n self.params = {}\n\n params = [int(x) for x in re.findall(r\"\\d+\", index_key)]\n\n if any(re.findall(r\"OPQ\\d+_\\d+,IVF\\d+,PQ\\d+\", index_key)):\n self.index_type = IndexType.OPQ_IVF_PQ\n self.fast_description = \"An inverted file index (IVF) with quantization and OPQ preprocessing.\"\n self.description_blocs = [IndexBlock.IVF, IndexBlock.PQ, IndexBlock.OPQ]\n self.tunable_params = [TunableParam.NPROBE, TunableParam.HT]\n\n self.params[\"pq\"] = params[3]\n self.params[\"nbits\"] = params[4] if len(params) == 5 else 8 # default value\n self.params[\"ncentroids\"] = params[2]\n self.params[\"out_d\"] = params[1]\n self.params[\"M_OPQ\"] = params[0]\n\n elif any(re.findall(r\"OPQ\\d+_\\d+,IVF\\d+_HNSW\\d+,PQ\\d+\", index_key)):\n self.index_type = IndexType.OPQ_IVF_HNSW_PQ\n self.fast_description = \"An inverted file index (IVF) with quantization, OPQ preprocessing, and HNSW index.\"\n self.description_blocs = [IndexBlock.IVF_HNSW, IndexBlock.HNSW, IndexBlock.PQ, IndexBlock.OPQ]\n self.tunable_params = [TunableParam.NPROBE, TunableParam.EFSEARCH, TunableParam.HT]\n\n self.params[\"M_HNSW\"] = params[3]\n self.params[\"pq\"] = params[4]\n self.params[\"nbits\"] = params[5] if len(params) == 6 else 8 # default value\n self.params[\"ncentroids\"] = params[2]\n self.params[\"out_d\"] = params[1]\n self.params[\"M_OPQ\"] = params[0]\n\n elif any(re.findall(r\"Pad\\d+,IVF\\d+_HNSW\\d+,PQ\\d+\", index_key)):\n self.index_type = IndexType.PAD_IVF_HNSW_PQ\n self.fast_description = (\n \"An inverted file index (IVF) with quantization, a padding on input vectors, and HNSW index.\"\n )\n self.description_blocs = [IndexBlock.IVF_HNSW, IndexBlock.HNSW, IndexBlock.PQ, IndexBlock.PAD]\n self.tunable_params = [TunableParam.NPROBE, TunableParam.EFSEARCH, TunableParam.HT]\n\n self.params[\"out_d\"] = params[0]\n self.params[\"M_HNSW\"] = params[2]\n self.params[\"pq\"] = params[3]\n self.params[\"nbits\"] = params[4] if len(params) == 5 else 8 # default value\n self.params[\"ncentroids\"] = params[1]\n\n elif any(re.findall(r\"HNSW\\d+\", index_key)):\n self.index_type = IndexType.HNSW\n self.fast_description = \"An HNSW index.\"\n self.description_blocs = [IndexBlock.HNSW]\n self.tunable_params = [TunableParam.EFSEARCH]\n\n self.params[\"M_HNSW\"] = params[0]\n\n elif index_key == \"Flat\":\n self.index_type = IndexType.FLAT\n self.fast_description = \"A simple flat index.\"\n self.description_blocs = [IndexBlock.FLAT]\n self.tunable_params = []\n\n else:\n self.index_type = IndexType.NOT_SUPPORTED\n self.fast_description = \"No description for this index, feel free to contribute :)\"\n self.description_blocs = []\n self.tunable_params = []\n\n def get_index_type(self) -> IndexType:\n \"\"\"\n return the index type.\n \"\"\"\n return self.index_type\n\n def estimated_index_size_in_bytes(self) -> int:\n \"\"\"\n Compute the estimated size of the index in bytes.\n \"\"\"\n\n if self.index_type == IndexType.FLAT:\n return self.nb_vectors * self.dim_vector * 4\n\n if self.index_type == IndexType.HNSW:\n # M bidirectional links per vector in the HNSW graph\n hnsw_graph_in_bytes = self.nb_vectors * self.params[\"M_HNSW\"] * 2 * 4\n vectors_size_in_bytes = self.nb_vectors * self.dim_vector * 4\n return vectors_size_in_bytes + hnsw_graph_in_bytes\n\n if self.index_type in [IndexType.OPQ_IVF_PQ, IndexType.OPQ_IVF_HNSW_PQ, IndexType.PAD_IVF_HNSW_PQ]:\n # We neglict the size of the OPQ table for the moment.\n code_size = ceil(self.params[\"pq\"] * self.params[\"nbits\"] / 8)\n cluster_size_byte = 1 + int((log2(self.params[\"ncentroids\"]) - 1) // 8)\n vector_size_byte = code_size + cluster_size_byte\n\n vectors_size_in_bytes = self.nb_vectors * vector_size_byte\n centroid_size_in_bytes = self.params[\"ncentroids\"] * self.dim_vector * 4\n\n total_size_in_byte = vectors_size_in_bytes + centroid_size_in_bytes\n\n if self.index_type in [IndexType.OPQ_IVF_HNSW_PQ, IndexType.PAD_IVF_HNSW_PQ]:\n total_size_in_byte += self.params[\"ncentroids\"] * self.params[\"M_HNSW\"] * 2 * 4\n\n if self.index_type in [IndexType.OPQ_IVF_PQ, IndexType.OPQ_IVF_HNSW_PQ]:\n total_size_in_byte += self.params[\"M_OPQ\"] * self.params[\"out_d\"] * 4\n\n return total_size_in_byte\n\n return -1\n\n def get_index_description(self, tunable_parameters_infos=False) -> str:\n \"\"\"\n Gives a generic description of the index.\n \"\"\"\n\n description = self.fast_description\n\n if self.index_type == IndexType.NOT_SUPPORTED:\n return description\n\n description += \"\\n\"\n index_size_string = cast_bytes_to_memory_string(self.estimated_index_size_in_bytes())\n description += f\"The size of the index should be around {index_size_string}.\\n\\n\"\n description += \"\\n\".join(INDEX_DESCRIPTION_BLOCKS[desc] for desc in self.description_blocs) + \"\\n\\n\"\n\n if tunable_parameters_infos:\n if not self.tunable_params:\n description += \"No parameters can be tuned to find a query speed VS recall tradeoff\\n\\n\"\n else:\n description += \"List of parameters that can be tuned to find a query speed VS recall tradeoff:\\n\"\n description += (\n \"\\n\".join(TUNABLE_PARAMETERS_DESCRIPTION_BLOCKS[desc] for desc in self.tunable_params) + \"\\n\\n\"\n )\n\n description += \"\"\"\nFor all indices except the flat index, the query speed can be adjusted.\nThe lower the speed limit the lower the recall. With a looser constraint\non the query time, the recall can be higher, but it is limited by the index\nstructure (if there is quantization for instance).\n\"\"\"\n return description\n","sub_path":"autofaiss/external/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"113476690","text":"# ATTEMPT ONE\n# \n# Time Complexity: O(n)\n#\n# To simplify the work done, lets first analyze the requirements for a palindrome.\n# The nature of palindrome consists of even pairs of characters with the exeption of\n# one odd number character (will always be the center). Folllowing this pattern, \n# we can assume that if there are more than two occurences of odd number characters,\n# then the given arrangement cannot form a palindrome.\n\ndef pp(s):\n store = [0] * 26\n s = list(s.replace(\" \", \"\"))\n l = 0\n for c in s:\n store[ord(c)-97] += 1\n l += 1 if store[ord(c)-97] % 2 != 0 else -1\n return l <= 1","sub_path":"Cracking the Coding Interview/Chapter 1 | Arrays and Strings/1.4.py","file_name":"1.4.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"638124535","text":"import time\nimport math\n\nfor i in range(0,10):\n string = 'tr3v0r sux'\n #print(len(string))\n print(string)\n time.sleep(0.05)\nprint('\\n')\nstop = 'n'\nrans = 0\n\n\"\"\"\nis this a comment for\nmultiple\nlines\n\"\"\"\n\nwhile stop == 'n':\n\twhile rans == 0:\n\t\tans = str(input('Does tr3v0r suc? (y/n)\\n'))\n\t\ttime.sleep(0.5)\n\t\t\n\t\tif ans == 'y':\n\t\t\trans = 1\n\t\t\tprint(string)\n\t\t\tprint('\\n')\n\t\telif ans == 'n':\n\t\t\trans = 1\n\t\t\tprint(string)\n\t\t\tprint('\\n')\n\t\telse:\n\t\t\trans = 0\n\t\t\tprint('\\nPlox input a real answer\\n')\n\t\t\t\n\t\ttime.sleep(1)\n\t\n\tstop = str(input('Stop? (y/n)\\n'))\n\tprint('\\n')\n","sub_path":"ganm.py","file_name":"ganm.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"649223030","text":"import os\nimport time\nimport numpy as np\n\nimport TGA\n\nstart = time.time()\n\npath_input = './input'\npath_output = './output_TGA'\n\n#---------------- Automatic mechanism reader-----------------------------------------------------------\n#--------------- (to be removed once kinetics and evaporation are optimized)---------------------------\n#------------------------------------------------------------------------------------------------------\nwith open(os.path.join(path_input,'nitramine-liquid-phase-mechanism-input.txt'),'r') as File:\n lines = File.readlines()\n \n flag1 = 0\n species_list = []\n reactions_list = []\n \n for line in lines:\n if line.startswith('REACTIONS'):\n flag1 = 1\n\n if (flag1 == 1 and (line.strip() not in ['!','REACTIONS','END'])) and not line.startswith('!'):\n reactions_list.append(line)\n reactants,products = line.split(' ')[0].split('=')\n species = reactants.split('+') + products.split('+')\n species_list.extend(species)\n\nseen = set()\nseen_add = seen.add\nspecies_list = [x for x in species_list if not(x in seen or seen_add(x))]\n\nwith open(os.path.join(path_input,'nitramine-liquid-phase-mechanism.txt'),'w') as File2:\n File2.writelines('ELEMENTS\\nC\\nH\\nN\\nO\\nAr\\nEND\\n')\n File2.writelines('SPECIES\\n')\n for species in species_list:\n File2.writelines(species + '\\n')\n File2.writelines('N2\\n')\n File2.writelines('END\\n')\n File2.writelines('THERMO\\n')\n\nwith open(os.path.join(path_input,'thermo-data.txt'),'r') as File:\n lines = File.readlines()\n \n \n for species in species_list: \n for i in range(len(lines)):\n if lines[i].split()[0] == species and lines[i].split()[0] != 'N2':\n with open(os.path.join(path_input,'nitramine-liquid-phase-mechanism.txt'),'a') as File2:\n File2.writelines(lines[i:i+4])\n \n if lines[i].split()[0] == 'N2':\n N2_lines = lines[i:i+4]\n \n with open(os.path.join(path_input,'nitramine-liquid-phase-mechanism.txt'),'a') as File2:\n File2.writelines(N2_lines)\n \nwith open(os.path.join(path_input,'nitramine-liquid-phase-mechanism.txt'),'a') as File2:\n File2.writelines('END\\n')\n File2.writelines('REACTIONS\\n')\n File2.writelines(reactions_list)\n File2.writelines('END')\n \n#------------------------------------------------------------------------------\n\n#---------------- Input parameters---------------------------------------------\nparameters = {\n \"Reactant\":'HMX',\n \"liquid-phase-mechanism\":'nitramine-liquid-phase-mechanism.txt',\n \"species_radius\":'log-file-data-minima.txt',\n \"evaporation_parameters\":'evaporation-parameters.txt',\n \n \"evaporation_global\":1,\n \"global_AnE\":[6.38e18,0,51.3e3],\n \n \"Tinit\":275.00, \n \"Tend\":310.00,\n \"Heating_rate\":15.0,\n \"dt\":0.001,\n \"Mc0\":1.492,\n \n \"Gas-cell-Pressure(Pa)\":101325.0,\n \"Gas-cell-Volume(m3)\":8.7e-6,\n \"Gas-cell-Temperature(K)\":473.15,\n \"Purge-gas-flow-rate(g/s)\":70.0*(1.13*0.001/60.0),\n \n \"T_onset\": 283.0,\n \"T1sin\":275.0,\n \"T2sin\":290.0,\n \"dTmax\":0.0,\n \n \"path_input\":path_input,\n \"path_output\":path_output,\n \"loglevel\":1\n}\n#------------------------------------------------------------------------------\n \nTGA.main(parameters)\n\nend = time.time()\r\n\nimport plots_TGA\nimport plots_TGA_liquid \n\r\nprint('Wall time = %f'%(end-start))\n\n","sub_path":"run_TGA.py","file_name":"run_TGA.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"232611562","text":"class Human:\n firstname = 'Junki'\n lastname = 'Hori'\n age = 29\n bloodtype = 'O'\n\nhuman = Human()\nprint(human.firstname)\nprint(human.lastname)\n\nhuman2 = Human()\nhuman2.firstname = 'Ryoma'\nhuman2.lastname = 'Kurosawa'\nprint(human2.firstname)\nprint(human2.lastname)\n","sub_path":"python/schoo4-1.py","file_name":"schoo4-1.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"292014356","text":"# -*- coding: utf-8 -*-\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom datetime import date\n\n\nclass cash_remittance_line(osv.osv):\n '''\nA line of cash.\n '''\n _name = 'cash.remittance.line'\n \n def _get_amount(self, cr, uid, ids, field, args, context=None):\n res = {}\n for line_record in self.browse(cr, uid, ids, context=context):\n res[line_record.id] = line_record.value * line_record.quantity\n return res\n \n _columns = {\n 'cash_remittance_id': fields.many2one('cash.remittance', 'Cash remittance'),\n 'name': fields.char('Name', size=32, readonly=True),\n 'value': fields.float('Value', readonly=True),\n 'quantity': fields.integer('Quantity'),\n 'amount': fields.function(_get_amount, string='Amount', type='float', method=True, store=True, readonly=True),\n }\n\n\nclass cash_remittance(osv.osv):\n '''\nContain cash remittances.\n '''\n _name = 'cash.remittance'\n \n STATE_SELECTION = [\n ('open', 'Opened'),\n ('done', 'Done'),\n ('cancel', 'Cancelled'),\n ]\n \n def _get_amount(self, cr, uid, ids, field, args, context=None):\n res = {}\n for record in self.browse(cr, uid, ids, context=context):\n res[record.id] = sum([line.amount for line in record.lines])\n return res\n \n def _get_default_model_ids(self, cr, uid, context=None):\n line_obj = self.pool.get('cash.remittance.line')\n values = [\n {'name': '0.01 €', 'value': 0.01},\n {'name': '0.02 €', 'value': 0.02},\n {'name': '0.05 €', 'value': 0.05},\n {'name': '0.10 €', 'value': 0.1},\n {'name': '0.20 €', 'value': 0.2},\n {'name': '0.50 €', 'value': 0.5},\n {'name': '1 €', 'value': 1},\n {'name': '2 €', 'value': 2},\n {'name': '5 €', 'value': 5},\n {'name': '10 €', 'value': 10},\n {'name': '20 €', 'value': 20},\n {'name': '50 €', 'value': 50},\n {'name': '100 €', 'value': 100},\n {'name': '200 €', 'value': 200},\n {'name': '500 €', 'value': 500},\n ]\n ids = []\n for data in values:\n line_id = line_obj.create(cr, uid, data, context=context)\n ids.append(line_id)\n return ids\n \n _columns = {\n 'date': fields.date('Date', required=True),\n 'memo': fields.text('Memo'),\n 'lines': fields.one2many('cash.remittance.line', 'cash_remittance_id', 'Details'),\n 'amount': fields.function(_get_amount, string='Total amount', type='float', method=True, store=True, readonly=True),\n 'state': fields.selection(STATE_SELECTION, 'Status', translate=True),\n }\n \n _defaults = {\n 'date': date.today().strftime('%Y-%m-%d'),\n 'lines': _get_default_model_ids,\n 'state': 'open',\n }\n \n def create(self, cr, uid, vals, context=None):\n cash_remittance_id = super(cash_remittance, self).create(cr, uid, vals, context=context)\n \n if vals['lines']:\n ids = [line_id for op, line_id, data in vals['lines']]\n line_obj = self.pool.get('cash.remittance.line')\n line_obj.write(cr, uid, ids, {'cash_remittance_id': cash_remittance_id}, context=context)\n \n return cash_remittance_id\n \n def write(self, cr, uid, ids, vals, context=None):\n for ID in ids:\n state = self.browse(cr, uid, ID, context=context).state\n if state == 'done' and not 'state' in vals:\n raise osv.except_osv(_('Permission denied'), _(\"You can't edit a cash remittance in state 'Done' ! Cancel it and create a new one.\"))\n return super(cash_remittance, self).write(cr, uid, ids, vals, context=context)\n \n def unlink(self, cr, uid, ids, context=None):\n for ID in ids:\n state = self.browse(cr, uid, ID, context=context).state\n if state != 'cancel':\n raise osv.except_osv(_('Permission denied'), _(\"You must cancel a cash remittance before deleting it !\"))\n return super(cash_remittance, self).unlink(cr, uid, ids, context=context)\n \n def validate(self, cr, uid, ids, context=None):\n return self.write(cr, uid, ids, {'state': 'done'})\n \n def cancel(self, cr, uid, ids, context=None):\n return self.write(cr, uid, ids, {'state': 'cancel'})\n\n","sub_path":"cash/cash.py","file_name":"cash.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"442911877","text":"import numpy as np\nimport pyGM as gm\n\n# names of 500 movies to be rated:\nwith open('top-names.txt') as f: names = f.read().split('\\n')\n \n# ratings = int(2000 x 500) ratings of 500 movies by 2000 people; -1 = not rated\nratings = np.loadtxt('top-ratings-missing.txt')\nnUsers,nMovies = ratings.shape\n\nX = (ratings >= 7).astype(int); # did each user like the movie? (binary)\n# (use any threshold you like, but \"7+\" might be \"worth recommending\"?)\n\n# Let's split into training & test:\nnp.random.seed(0)\npi = np.random.permutation(nUsers)\niTr,iTe = pi[:int(nUsers*.7)], pi[int(nUsers*.7):]\nXtr,Xte = X[iTr,:],X[iTe,:]","sub_path":"recommender-project.py","file_name":"recommender-project.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"195866135","text":"import pygame\n#initializes pygame\npygame.init()\nplayer_img = pygame.image.load('C:/Users/Suzan Balami/Desktop/jet.png')\nplayerX = 370\nplayerY = 40\n\ngame_screen = pygame.display.set_mode((800,600))\ndef player(x,y) :\n\tgame_screen.blit(player_img,(x,y))\nrunning = True\nwhile running:\n\tfor event in pygame.event.get():\n\t\tgame_screen.fill((255,0,0))\n\t\tgame_screen.blit(player_img,(playerX,playerY))\n\t\t\n\t\tif event.type == pygame.QUIT:\n\t\t\trunning = False\n\t\tplayer(playerX,playerY)\n\t\tplayerX=playerX +0.1\n\t\t\n\t\tif playerX == 736:\n\t\t\tplayer(736,480)\n\t\tpygame.display.update();","sub_path":"suzan/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"14157422","text":"from typing import Optional, Any, Mapping\n\nfrom .error_code import SlaveErrorCode\nfrom ..base import ResponsibleException\n\n\nclass ConnectionRefuse(ResponsibleException):\n\n def __init__(self, data: Optional[Mapping[str, Any]] = None):\n ResponsibleException.__init__(\n self,\n SlaveErrorCode.SLAVE_CONNECTION_REFUSED,\n message='Connection refused!',\n data=data or {},\n status_code=403,\n )\n\n\nclass DisconnectionRefuse(ResponsibleException):\n\n def __init__(self, data: Optional[Mapping[str, Any]] = None):\n ResponsibleException.__init__(\n self,\n SlaveErrorCode.SLAVE_DISCONNECTION_REFUSED,\n message='Disconnection refused!',\n data=data or {},\n status_code=403,\n )\n\n\nclass TaskRefuse(ResponsibleException):\n\n def __init__(self, data: Optional[Mapping[str, Any]] = None):\n ResponsibleException.__init__(\n self,\n SlaveErrorCode.TASK_REFUSED,\n message='Task refused!',\n data=data or {},\n status_code=403,\n )\n\n\nclass TaskFail(Exception):\n\n def __init__(self, result: Optional[Mapping[str, Any]], message: Optional[str] = None):\n if message:\n Exception.__init__(self, 'Task process failed - {message}.'.format(message=message))\n else:\n Exception.__init__(self, 'Task process failed.')\n self.__result = result or {}\n\n @property\n def result(self) -> Mapping[str, Any]:\n return self.__result\n","sub_path":"config/samples/di-mock/interaction/slave/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"9671726","text":"import sys\nfrom computer import Computer\n\ndef main():\n filename = \"input.txt\"\n steps = False\n if len(sys.argv) > 1:\n filename = sys.argv[1]\n f = open(filename)\n s = f.readline()\n memory = list(map(int, s.split(\",\")))\n\n c = Computer(memory)\n c.run()\n\nif __name__== \"__main__\":\n main()\n","sub_path":"day5/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"460482585","text":"#!/usr/bin/env python3\n\nimport random\n\nclass plansza:\n\t\n\tdef __init__(self, lWierszy=10, lKolumn=10, znakiGracza=['x'], znakiKomputera=['o']):\n\t\tself.w = lWierszy\n\t\tself.k = lKolumn\n\t\tself.zG = znakiGracza\n\t\tself.zK = znakiKomputera\n\t\tself.pln = [[\" \"] * lKolumn for i in range(lWierszy)]\n\n\tdef wypisz(self):\n\t\tprint(' +' + \"\".join((['-'] * (2*self.k+1))) + '+')\n\t\tfor i in range(self.k):\n\t\t\tprint(str((self.k-i)%10) + \"| \" + \" \".join( i for i in self.pln[i]) + ' |')\n\t\tprint(' +' + \"\".join((['-'] * (2*self.k+1))) + '+')\n\t\tprint(' ' + \" \".join( chr(i) for i in range(97,97+self.w) ) + ' ')\n\n\tdef wypelnijLosowo(self):\n\t\tznaki = self.zG + self.zK\n\t\tfor i in range(self.w):\n\t\t\tfor j in range(self.k):\n\t\t\t\tself.pln[i][j] = znaki[random.randint(0,len(znaki)-1)]\n\n\tdef poprawPlansze(self):\n\t\t\"\"\"dzialanie grawitacji na plansze\"\"\"\n\t\t\n\t\t#grawitacja w pionie:\n\t\tfor j in range(self.k):\n\t\t\tlicznik = 0\n\t\t\tfor i in range(self.w):\n\t\t\t\tif self.pln[self.w-i-1][j] == ' ':\n\t\t\t\t\tlicznik += 1\n\t\t\t\telif licznik > 0:\n\t\t\t\t\tself.pln[self.w-i-1+licznik][j] = self.pln[self.w-i-1][j]\n\t\t\t\t\tself.pln[self.w-i-1][j] = ' '\n\n\t\t#grawitacja lewostronna w poziomie:\n\t\tlicznik = 0\n\t\tfor j in range(self.k):\n\t\t\tif self.pln[self.w-1][j] == ' ':\n\t\t\t\tlicznik += 1\n\t\t\telif licznik > 0:\n\t\t\t\tfor i in range(self.w):\n\t\t\t\t\tself.pln[self.w-i-1][j-licznik] = self.pln[self.w-i-1][j]\n\t\t\t\t\tself.pln[self.w-i-1][j] = ' '\n\n\tdef kasujBlok(self, doUsuniecia):\n\t\t\"\"\"kasuje blok oznaczony w doUsuniecia\"\"\"\n\t\tfor i in range(self.w):\n\t\t\tfor j in range(self.k):\n\t\t\t\tif doUsuniecia[i][j]:\n\t\t\t\t\tself.pln[i][j] = ' '\n\n\t\tself.poprawPlansze()\n","sub_path":"plansza.py","file_name":"plansza.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"294622502","text":"# Sys utils\nimport logging\nimport signal\nimport sys\nimport RPi.GPIO as GPIO\nfrom smbus import SMBus\n\n# Sensors\nfrom scripts.Clock import Clock\nfrom scripts.ShuttersController import ShuttersController\nfrom scripts.PresemceDetector import PresenceDetector\nfrom scripts.LampController import LampController\nfrom scripts.TemperatureSensor import TemperatureSensor\nfrom scripts.LuminositySensor import LuminositySensor\n# import shutters\n\n\ndef i2c_scan():\n _I2C_ADDR_RANGE = 127\n try :\n bus = SMBus(1)\n except:\n return []\n devices = []\n for addr in range(0, _I2C_ADDR_RANGE) :\n try :\n bus.write_quick(addr, 0x00)\n devices.append(addr)\n except:\n pass\n return devices\n\n\nclock = None\ntemperature = None\nluminosity = None\nshutter = None\nlamp = None\npresence = None\nkeep_alive = True\n\nIP = \"127.0.0.1\"\nPORT = 1883\n\ndef ctrlc_handler(signum, frame):\n global clock, temperature, luminosity, shutter, lamp, presence, keep_alive\n if clock is not None:\n clock.stop()\n clock = None\n if temperature is not None:\n temperature.stop()\n temperature = None\n if luminosity is not None:\n luminosity.stop()\n luminosity = None\n if shutter is not None:\n shutter.stop()\n shutter = None\n if lamp is not None:\n lamp.stop()\n lamp = None\n if presence is not None:\n presence.stop()\n presence = None\n keep_alive = False\n GPIO.cleanup()\n\ndef main() :\n global clock, temperature, luminosity, shutter, lamp, presence, keep_alive\n # SETTING LOGGER\n logging.basicConfig(format=\"[%(asctime)s][%(module)s:%(funcName)s:%(lineno)d][%(levelname)s] %(message)s\",\n stream=sys.stdout)\n logging.getLogger().setLevel(logging.DEBUG)\n # SETTING INTERRUPTER\n signal.signal(signal.SIGINT, ctrlc_handler)\n\n devices = i2c_scan()\n if len(devices) < 2:\n logging.getLogger().error(\"Not all devices are found : expected 2, got \" + str(devices) + \" : \" + str(devices))\n return -1\n\n clock = Clock(ip=IP, port=PORT)\n temperature = TemperatureSensor(mqtt_ip=IP, mqtt_port=PORT)\n luminosity = LuminositySensor(mqtt_ip=IP, mqtt_port=PORT)\n shutter = ShuttersController(led_pin=19, mqtt_ip=IP, mqtt_port=PORT)\n lamp = LampController(lamp_pin=16, mqtt_ip=IP, mqtt_port=PORT)\n presence = PresenceDetector(int_pin=20, mqtt_ip=IP, mqtt_port=PORT)\n\n while keep_alive:\n None\n\n print(\"The end\")\n\nif __name__ == '__main__':\n main()","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"18904680","text":"'''\nAssignment-3\n'''\n\ndef is_valid_word(word, hand, word_list):\n \"\"\"\n Returns True if word is in the word_list and is entirely\n composed of letters in the hand. Otherwise, returns False.\n\n Does not mutate hand or word_list.\n word: string\n hand: dictionary (string -> int)\n word_list: list of lowercase strings\n \"\"\"\n # TO DO ... <-- Remove this comment when you code this function\n for i in word:\n if i not in hand.keys():\n return False\n return word in word_list\ndef main():\n \"\"\"main\"\"\"\n word = input()\n n_input = int(input())\n adict = {}\n for i in range(n_input):\n data = input()\n list1 = data.split()\n adict[list1[0]] = int(list1[1])\n i += 1\n list2 = input().split()\n print(is_valid_word(word, adict, list2))\nif __name__ == \"__main__\":\n main()\n","sub_path":"m11/p3/assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"622337969","text":"# -*- coding:utf-8 -*- \n'''\nCreated on 2016年4月14日\n\n@author: wgq\n'''\nimport json\nimport db_wrapper\nimport util\n\nclass TermCategory2:\n DEFAULT_CATEGORY_ID = 2\n \n def __init__(self, project):\n self.project = project\n \n def getDefaultConfig(self):\n db = db_wrapper.getDBByProjectName(self.project)\n res = db.query(\"select * from TermCategory where ID=%d\"%(self.DEFAULT_CATEGORY_ID))\n configuration = {}\n for item in res:\n configuration['Channel_ID'] = item['Channel_ID']\n configuration['PreDownload_Channel_ID'] = item['PreDownload_Channel_ID']\n configuration['MainServer'] = item['MainServer']\n configuration['UpgradeURL'] = item['UpgradeURL']\n configuration['LogURL'] = item['LogURL']\n configuration['HeartBeat_Period'] = item['HeartBeat_Period']\n configuration['Volume'] = item['Volume']\n configuration['WorkSeqments'] = item['WorkSeqments']\n configuration['DownloadSeqments'] = item['DownloadSeqments']\n configuration['RestartTimer'] = item['RestartTimer']\n configuration['ProgramSync'] = item['ProgramSync']\n configuration['CityIDs'] = item['CityIDs']\n return configuration\n \n def addTerm2DefaultCategory(self, termID):\n db = db_wrapper.getDBByProjectName(self.project)\n db.insert(\"TermCategoryTree\", ID=termID, Type=1, ParentID=self.DEFAULT_CATEGORY_ID)\n \n ","sub_path":"backend_stb/controllers/TermCategory2.py","file_name":"TermCategory2.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"507677594","text":"#! /home/akmatali/project_akmatali/parse_popular_films_kinopoisk/venv/bin python3\n\n#Импортируем необходимые нам библиотеки и модули\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\n#-----------------------------------------------\n#Константа содержащий необходимый url сайта\nURL = \"https://www.kinopoisk.ru/popular/day/2017-06-27/page/\"\n\n#Создаем новый класс с именем Parse\nclass Parse:\n\t#Инициализируем переменные html and list_films\n\tdef __init__(self,url):\n\t\tself.html = urlopen(URL)\n\t\t#Пустой список\n\t\tself.list_films = []\n\n\t#Парсим html страницу\n\tdef parseHtml(self,url):\n\t\thtml = urlopen(url)\n\t\t#Создаем объект BeautifulSoup и записываем в переменный soup\n\t\tsoup = BeautifulSoup(html,'html.parser')\n\t\t\n\t\t#Получим загаловок страницы и записываем в переменный title\n\t\tself.title = soup.h1.string.strip()\n\n\t\t#Найдем главный div с классом stat и записываем в переменную main_div\n\t\tmain_div = soup.find('div',class_='stat')\n\t\t\n\t\t#Найдем все div внутри главного main_div\n\t\tdiv_in_main = main_div.find_all('div')\n\t\t\n\t\t#Берем каждый div и в цикле найдем внутри div теги a\n\t\tfor div in div_in_main:\n\t\t\t#Найдем все теги a и записываем в переменную a_in_div\n\t\t\ta_in_div = div.find_all('a')\n\t\t\tif a_in_div:\n\t\t\t\t#Если теги а есть берем теги с индексом [1] и добавляем.\n\t\t\t\tself.list_films.append(div.find_all('a')[1].string)\n\t\t\telse:\n\t\t\t\t#Если div пустой, внутри него ничего нет тогда вернемся к началу цикла\n\t\t\t\tcontinue\n\n\t#Получим все страницы pagination и парсим все страницы\n\tdef get_html_page(self,url):\n\t\t#Создаем объект soupa'а\n\t\tsoup = BeautifulSoup(self.html,'html.parser')\n\t\t#Найдем div pagination навигатора \n\t\tdiv_ul = soup.find('div',class_='navigator')\n\t\t#Найдем все теги a\n\t\ta_href = div_ul.find_all('a')\n\t\t#Берем последний тег\n\t\thref = a_href[-1].get('href')\n\t\t#И создаем шаблон для поиска с помоши регулярные выражения и найдем количество страниц\n\t\tlast_page = int(re.match('.*\\/(\\d+)\\/$', href).group(1))\n\t\t\n\t\t#В цикле переберем все ссылки и парсим каждый ссылку\n\t\tfor i in range(1,last_page+1): \n\t\t\t#Увеличиваем ссылку QueryString +1\n\t\t\tgo_url = url + str(i)\n\t\t\t#Передаем подготовленную ссылку в метод parseHtml() для парсинга\n\t\t\tself.parseHtml(go_url)\n\n\n\t#Сохраняем все найденные данные в файл\n\tdef save_get_information(self):\n\t\tprint(21)\n\nparser = Parse(URL)\nparser.get_html_page(URL)\nprint('\\n'.join(parser.list_films))","sub_path":"scraping/option_oop_kinopoisk.py","file_name":"option_oop_kinopoisk.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"99589073","text":"import sqlite3\nfrom bs4 import BeautifulSoup\n\n\ndef get_title(file_path,get_title_string):\n with open(file_path, 'rt',encoding='utf-8') as f:\n soup=BeautifulSoup(f.read(),'html.parser')\n soup_content=soup.find(id=get_title_string).get_text()\n return(soup_content)\n\nclass GetAnswer:\n # 单选题&判断题\n def get_one_answer(get_title,get_db,get_table):\n conn=sqlite3.connect(get_db)\n c=conn.cursor()\n sql_statement='SELECT * FROM testBank'+str(get_table)+' WHERE title=\\\"'+str(get_title)+'\\\"'\n c.execute(sql_statement)\n try:\n data=c.fetchone()[1]\n except TypeError:\n print(\"没有这个题目的答案!\")\n data=[]\n conn.close()\n return(data)\n # 多选题\n def get_multi_answer(get_title,get_db):\n data=['','','','']\n conn=sqlite3.connect(get_db)\n c=conn.cursor()\n sql_statement='SELECT * FROM testBank2 WHERE title=\\\"'+str(get_title)+'\\\"'\n c.execute(sql_statement)\n title_and_answer=c.fetchone()\n try:\n for i in range(4):\n data[i]=title_and_answer[i+1]\n except TypeError:\n print(\"没有这个多选题的答案!\")\n conn.close()\n return(data)\n\n\nif __name__ == '__main__':\n title=get_title('./index.html','Mydatalist__ctl0_Mydatalist3__ctl0_tm')\n print(title)\n answer=GetAnswer.get_one_answer(title,'./psychology_tiku.db',3)\n print(answer)\n","sub_path":"database_extraction.py","file_name":"database_extraction.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"176231046","text":"#coding=utf-8\n__author__ = 'wm'\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib import auth\n#from django.core import exceptions as django_error\nfrom models import *\n\nfrom io import BytesIO\n\nimport json\n\n\ndef request_code_img(task_uuid, project_uuid, template_uuid='', count=1):\n if CodeSet.objects.filter(project_uuid=project_uuid).exists() and not CodeGen.objects.filter(\n task_uuid=task_uuid).exists():\n\n try:\n nCount = int(count)\n except ValueError:\n nCount = 1\n\n if nCount > 1:\n nCount = 1\n\n code_set_obj = CodeSet.objects.filter(project_uuid=project_uuid)[0]\n\n code_gen_obj = CodeGen(remark=u'来自接口请求', count=nCount, task_uuid=task_uuid, codeset=code_set_obj)\n code_gen_obj.save()\n\n code_obj = Code.objects.filter(codeset=code_set_obj, gen=None)[0]\n\n if code_obj:\n code_obj.gen = code_gen_obj\n code_obj.save()\n\n template_obj = code_set_obj.template\n\n elem_dict = {\n 'R': data_uri_to_image(eval(template_obj.red)),\n 'G': data_uri_to_image(eval(template_obj.green)),\n 'B': data_uri_to_image(eval(template_obj.blue)),\n 'D': data_uri_to_image(eval(template_obj.black))\n }\n\n if len(eval(template_obj.background)):\n bg_image = data_uri_to_image(eval(template_obj.background))[0]\n else:\n bg_image = 'W'\n\n objImageCode = ImageCode()\n\n objImageCode.setBaseStyle(template_obj.width, template_obj.border, template_obj.frame, template_obj.gutter)\n objImageCode.setCodeType(template_obj.klass)\n objImageCode.setColors(code_obj.blocks)\n objImageCode.setElementImage(elem_dict)\n objImageCode.setBackground(bg_image)\n\n image = objImageCode.generateCode(0, 0)\n\n temp = BytesIO()\n\n image.save(temp, 'PNG')\n\n return '200', code_obj.hash_code0, temp.getvalue(), 'png'\n else:\n return '400', '', '', ''\n else:\n return '400', '', '', ''\n\n\ndef get_code_img(request):\n if request.method == 'POST':\n format_ = request.POST.get('format', '')\n callback = request.POST.get('callback', '')\n\n # project 为code_set_uuid\n uuid = request.POST.get('project', '')\n #code_set_uuid = request.POST.get('code_set' '')\n code_hash = request.POST.get('code', '')\n else:\n format_ = request.GET.get('format', '')\n callback = request.GET.get('callback', '')\n\n uuid = request.GET.get('project', '')\n code_hash = request.GET.get('code', '')\n\n if uuid:\n\n if CodeSet.objects.filter(uuid=uuid).exists():\n\n code_set_obj = CodeSet.objects.get(uuid=uuid)\n\n code_gen_obj = CodeGen(remark=u'来自接口请求', count=1, codeset=code_set_obj)\n code_gen_obj.save()\n\n try:\n code_obj = Code.objects.filter(codeset=code_set_obj, gen=None)[0]\n except IndexError:\n code_obj = None\n\n if code_obj:\n code_obj.gen = code_gen_obj\n code_obj.save()\n\n # template_obj = code_set_obj.template\n #\n # elem_dict = {\n # 'R': data_uri_to_image(eval(template_obj.red)),\n # 'G': data_uri_to_image(eval(template_obj.green)),\n # 'B': data_uri_to_image(eval(template_obj.blue)),\n # 'D': data_uri_to_image(eval(template_obj.black))\n # }\n #\n # if len(eval(template_obj.background)):\n # bg_image = data_uri_to_image(eval(template_obj.background))[0]\n # else:\n # bg_image = 'W'\n #\n # objImageCode = ImageCode()\n #\n # objImageCode.setBaseStyle(template_obj.width, template_obj.border, template_obj.frame,\n # template_obj.gutter)\n # objImageCode.setCodeType(template_obj.klass)\n # objImageCode.setColors(code_obj.blocks)\n # objImageCode.setElementImage(elem_dict)\n # objImageCode.setBackground(bg_image)\n #\n # image = objImageCode.generateCode(0, 0)\n #\n # temp = BytesIO()\n #\n # image.save(temp, 'PNG')\n\n if format_ == '':\n #return HttpResponse(temp.getvalue(), mimetype='image/png')\n return HttpResponseRedirect('?code=%s' % code_obj.hash_code0)\n else:\n return HttpResponseRedirect('?code=%s&format=%s' % (code_obj.hash_code0, format_))\n # image = {\n # 'type': 'PNG',\n # 'data': temp.getvalue().encode('base64'),\n # 'hash': code_obj.hash_code0,\n # 'text': code_obj.text\n # }\n #\n # return HttpResponse(get_cgi_content(image, format_, callback),\n # mimetype='application/json;charset=utf-8')\n else:\n if format_ == '':\n return HttpResponse('')\n else:\n image = {\n 'type': '',\n 'data': '',\n 'hash': '',\n 'text': ''\n }\n\n return HttpResponse(get_cgi_content(image, format_, callback),\n mimetype='application/json;charset=utf-8')\n else:\n if format_ == '':\n return HttpResponse('')\n else:\n image = {\n 'type': '',\n 'data': '',\n 'hash': '',\n 'text': ''\n }\n\n return HttpResponse(get_cgi_content(image, format_, callback), mimetype='application/json;charset=utf-8')\n\n elif code_hash:\n\n if Code.objects.filter(hash_code0=code_hash).exists():\n\n code_obj = Code.objects.select_related().get(hash_code0=code_hash)\n template_obj = code_obj.codeset.template\n\n image = draw_code(code_obj, template_obj)\n\n temp = BytesIO()\n\n image.save(temp, 'PNG')\n\n if format_ == '':\n return HttpResponse(temp.getvalue(), mimetype='image/png')\n else:\n image = {\n 'type': 'PNG',\n 'data': temp.getvalue().encode('base64'),\n 'hash': code_obj.hash_code0,\n 'text': code_obj.text,\n }\n\n return HttpResponse(get_cgi_content(image, format_, callback),\n mimetype='application/json;charset=utf-8')\n\n else:\n if format_ == '':\n return HttpResponse('')\n else:\n image = {\n 'type': '',\n 'data': '',\n 'hash': '',\n 'text': ''\n }\n\n return HttpResponse(get_cgi_content(image, format_, callback),\n mimetype='application/json;charset=utf-8')\n else:\n return HttpResponse('')\n\n\ndef sync_interface(request):\n if request.method == 'POST':\n format_ = request.POST.get('format', 'json')\n callback = request.POST.get('callback', '')\n interface_dict_list_str = request.POST.get('interface_dict_list', '')\n else:\n format_ = request.GET.get('format', 'json')\n callback = request.GET.get('callback', '')\n interface_dict_list_str = request.GET.get('interface_dict_list', '')\n\n if interface_dict_list_str:\n try:\n interface_dict_list = json.loads(interface_dict_list_str, encoding='utf-8')\n except ValueError:\n re_dict = {\n 'status': u'error',\n 'code': 0,\n 'message': u'无效数据'\n }\n\n return HttpResponse(get_cgi_content(re_dict, format_, callback), mimetype='application/json;charset=utf-8')\n\n for interface_dict in interface_dict_list:\n try:\n name = interface_dict.keys().pop()\n value = interface_dict.values().pop()\n\n if InterfaceConfig.objects.filter(name=name).exists():\n interface_config_obj = InterfaceConfig.objects.get(name=name)\n interface_config_obj.value = value\n interface_config_obj.save()\n else:\n InterfaceConfig(name=name, value=value).save()\n\n except AttributeError:\n pass\n except IndexError:\n pass\n\n re_dict = {\n 'status': u'success',\n 'code': 1,\n 'message': u'接口已同步'\n }\n\n return HttpResponse(get_cgi_content(re_dict, format_, callback), mimetype='application/json;charset=utf-8')\n\n else:\n re_dict = {\n 'status': u'error',\n 'code': 2,\n 'message': u'无效参数'\n }\n\n return HttpResponse(get_cgi_content(re_dict, format_, callback), mimetype='application/json;charset=utf-8')\n\n\ndef user_active(request):\n format_ = request.GET.get('format', 'json')\n callback = request.GET.get('callback', '')\n\n id_ = request.GET.get('id', None)\n\n if not id_:\n re_dict = {\n 'status': u'error',\n 'code': 0,\n 'message': u'无效参数',\n }\n\n else:\n\n User = auth.get_user_model()\n\n if User.objects.filter(id=id_).exists():\n\n user_obj = User.objects.get(id=id_)\n\n if user_obj.is_active:\n user_obj.is_active = False\n else:\n user_obj.is_active = True\n\n user_obj.save()\n\n re_dict = {\n 'status': u'success',\n 'code': 1,\n 'message': u'用户状态已改变',\n }\n\n else:\n re_dict = {\n 'status': u'error',\n 'code': 2,\n 'message': u'用户不存在',\n }\n\n if format_ == 'json':\n pass\n else:\n re_dict = []\n\n if callback:\n return HttpResponse(u'%s(%s)' % (callback, json.dumps(re_dict, ensure_ascii=False, indent=4)),\n mimetype='application/json;charset=utf-8')\n else:\n return HttpResponse(json.dumps(re_dict, ensure_ascii=False, indent=4),\n mimetype='application/json;charset=utf-8')\n\n\ndef send_code_zip(request):\n\n if request.method == 'POST':\n format_ = request.POST.get('format', 'json')\n callback = request.POST.get('callback', '')\n\n uuid = request.POST.get('uuid', '')\n else:\n format_ = request.GET.get('format', 'json')\n callback = request.GET.get('callback', '')\n\n uuid = request.GET.get('uuid', '')\n\n if uuid:\n from tasks import zip_code\n zip_code.delay(uuid)\n\n re_dict = {\n 'status': u'success',\n 'code': 1,\n 'message': u'打包请求已发送'\n }\n else:\n re_dict = {\n 'status': u'error',\n 'code': 0,\n 'message': u'无效参数'\n }\n\n return HttpResponse(get_cgi_content(re_dict, format_, callback), mimetype='application/json;charset=utf-8')\n\n\ndef get_code_zip(request):\n\n if request.method == 'POST':\n format_ = request.POST.get('format', 'json')\n callback = request.POST.get('callback', '')\n\n str_request = request.POST.get('code_set', '')\n else:\n format_ = request.GET.get('format', 'json')\n callback = request.GET.get('callback', '')\n\n str_request = request.GET.get('code_set', '')\n\n if uuid:\n from tasks import zip_code\n zip_code.delay(uuid)\n\n re_dict = {\n 'status': u'success',\n 'code': 1,\n 'message': u'打包请求已发送'\n }\n else:\n re_dict = {\n 'status': u'error',\n 'code': 0,\n 'message': u'无效参数'\n }\n\n return HttpResponse(get_cgi_content(re_dict, format_, callback), mimetype='application/json;charset=utf-8')\n\n\ndef get_template_preview(request):\n if request.method == 'POST':\n format_ = request.POST.get('format', 'image')\n callback = request.POST.get('callback', '')\n template_uuid = request.POST.get('uuid', '')\n else:\n format_ = request.GET.get('format', 'image')\n callback = request.GET.get('callback', '')\n template_uuid = request.GET.get('uuid', '')\n\n if Template.objects.filter(uuid=template_uuid).exists():\n\n template_obj = Template.objects.get(uuid=template_uuid)\n\n code_set_list = template_obj.codeset_set.all()\n\n for code_set in code_set_list:\n code_list = code_set.code_set.all()\n\n if code_list:\n code_obj = code_list[0]\n img = draw_code(code_obj, template_obj)\n\n if img:\n io_temp = BytesIO()\n\n img.save(io_temp, 'PNG')\n\n if format_ == 'image':\n return HttpResponse(io_temp.getvalue(), mimetype='image/png')\n else:\n\n re_dict = {\n 'data': base64_to_data_uri(io_temp.getvalue())[0]\n }\n\n return HttpResponse(get_cgi_content(re_dict, format_, callback),\n mimetype='application/json;charset=utf-8')\n\n f_img = open(os.path.join(settings.STATIC_ROOT, 'img/no_preview.png'), 'r')\n\n if format_ == 'image':\n return HttpResponse(f_img.read(), mimetype='image/png')\n else:\n re_dict = {\n 'data': ''\n }\n\n return HttpResponse(get_cgi_content(re_dict, format_, callback),\n mimetype='application/json;charset=utf-8')\n\n else:\n if format_ == 'image':\n return HttpResponse('')\n else:\n re_dict = {\n 'data': ''\n }\n\n return HttpResponse(get_cgi_content(re_dict, format_, callback),\n mimetype='application/json;charset=utf-8')","sub_path":"gen/views_cgi.py","file_name":"views_cgi.py","file_ext":"py","file_size_in_byte":14694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"306515825","text":"fname = None\nwhile True:\n try:\n if fname is None:\n fname = input(\"Enter file name: \")\n else:\n fname = input(\"File does not exist, try again or type 'quit' to exit: \")\n if fname == \"quit\":\n break\n fhandler = open(fname)\n break\n except:\n continue\n\nif fname == \"quit\":\n exit()\n\nspam_conf_total = 0\nspam_conf_ave = 0\nspam_count = 0\nfor line in fhandler:\n if line.startswith(\"X-DSPAM-Confidence\"):\n spam_conf = line[line.find(\":\")+1:].lstrip()\n spam_conf_total = spam_conf_total + float(spam_conf)\n spam_count = spam_count + 1\n\n\n\n\nprint(\"Average spam confidence:\",spam_conf_total/spam_count)\n","sub_path":"grep_exercise.py","file_name":"grep_exercise.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"553443219","text":"import os\nimport sys\nsys.path.append(os.path.abspath('../../'))\n\nfrom test_fixtures import ddpg_config_dict_ma, RoboSumoenv, RoboSumoTask, RoboSumoWRSenv, RoboSumoWRSTask\n\nfrom rl_algorithms.agents import build_DDPG_Agent\nfrom rl_algorithms import AgentHook\nfrom rl_algorithms.networks import PreprocessFunctionToTorch\nfrom RoboSumo_test import learns_against_fixed_opponent_RoboSumo_parallel, record_against_fixed_opponent_RoboSumo\n\nimport numpy as np \n\nclass RoboDohyoZeroAgent:\n def __init__(self, nbr_actor):\n self.nbr_actor = nbr_actor\n self.name = \"ZeroAgent\"\n def take_action(self, state):\n #return np.concatenate( [np.zeros((1,8), dtype=\"float32\") for _ in range(self.nbr_actor)], axis=0)\n return np.concatenate( [np.asarray([[-0.5, 0.5, 0.25, -0.75, -0.75, -0.5, -0.5, -0.5]]) for _ in range(self.nbr_actor)], axis=0)\n def handle_experience(self, s, a, r, succ_s, done):\n pass \n\ndef robodohyo_zero_agent(nbr_actor):\n return RoboDohyoZeroAgent(nbr_actor)\n\ndef ddpg_config_dict():\n config = dict()\n config['discount'] = 0.99\n config['tau'] = 1e-3\n config['use_cuda'] = True\n config['nbrTrainIteration'] = 1 \n config['action_scaler'] = 1.0 \n config['use_HER'] = False\n config['HER_k'] = 2\n config['HER_strategy'] = 'future'\n config['HER_use_singlegoal'] = False \n config['use_PER'] = True \n config['PER_alpha'] = 0.7 \n config['replay_capacity'] = 25e3\n config['min_capacity'] = 5e3 \n config['batch_size'] = 32#128\n config['learning_rate'] = 3.0e-4\n config['nbr_actor'] = 1#32\n return config\n\ndef RoboDohyoenv():\n import roboschool\n import gym\n #return gym.make('RoboschoolSumo-v0')\n return gym.make('RoboschoolSumoWithRewardShaping-v0')\n\n\ndef RoboDohyoTask(RoboSumoenv):\n from environments.gym_parser import parse_gym_environment\n return parse_gym_environment(RoboSumoenv)\n\ndef test_learns_to_beat_zero_in_RoboSumo(RoboSumoWRSTask, ddpg_config_dict_ma):\n '''\n Test used to make sure that agent is 'learning' by learning a best response\n against an agent that only plays rock in rock paper scissors.\n i.e from random, learns to play only (or mostly) paper\n '''\n load_agent = False\n \n if load_agent:\n agent = AgentHook.load(load_path='/tmp/test_DDPG_agent_RoboschoolSumoWithRewardShaping-v0.agent')\n else:\n agent = build_DDPG_Agent(RoboSumoWRSTask, ddpg_config_dict_ma, 'DDPG_agent')\n agent.training = True\n assert agent.training\n \n opponent = robodohyo_zero_agent(ddpg_config_dict_ma['nbr_actor'])\n \n envname = 'RoboschoolSumoWithRewardShaping-v0'\n learns_against_fixed_opponent_RoboSumo_parallel(agent, fixed_opponent=opponent,\n total_episodes=100, training_percentage=0.9,\n reward_threshold_percentage=0.25, envname=envname, nbr_parallel_env=ddpg_config_dict_ma['nbr_actor'], save=True)\n\ndef record_RoboDohyo_ZeroAgent(RoboDohyoTask, config_dict):\n load_agent = False\n \n if load_agent:\n agent = AgentHook.load(load_path='/tmp/test_ddpg_RoboschoolSumoWithRewardShaping-v0.agent')\n else:\n agent = build_DDPG_Agent(RoboDohyoTask, config_dict, 'DDPG_agent')\n agent.training = True\n assert agent.training\n \n opponent = robodohyo_zero_agent(config_dict['nbr_actor'])\n \n envname = 'RoboschoolSumoWithRewardShaping-v0'\n record_against_fixed_opponent_RoboSumo(agent, fixed_opponent=opponent, envname=envname)\n\n\nif __name__ == \"__main__\":\n #test_learns_to_beat_rock_in_RoboSumo(RoboSumoTask(RoboSumoenv()), ddpg_config_dict_ma())\n record_RoboDohyo_ZeroAgent(RoboDohyoTask(RoboDohyoenv()), ddpg_config_dict())","sub_path":"tests/environments/robodohyo_ddpg_zero_agent_test.py","file_name":"robodohyo_ddpg_zero_agent_test.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"521591122","text":"from itertools import chain, product\nfrom collections import Counter\nimport csv\n\nimport tarantool\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\nfrom settings import TARANTOOL_CONNCTION, CHUNK_LENGTH, BAG_OF_WORDS_INDEXES\n\nBAG_OF_WORDS_TYPE2ORDER = {\n 'style': 18,\n 'characteristic': 19,\n 'gastronomy': 20,\n 'style_pairs': 21,\n 'characteristics_pairs': 22,\n 'gastronomy_pairs': 23,\n 'style_pairs_row': 24,\n 'characteristics_pairs_row': 25,\n 'gastronomy_pairs_row': 26 \n}\n\n# 1: name: str\n# 2: image_url str\n# 3: color: red/white/pink str\n# 4: switness: dry/sweet/semi-sweet str\n# 5: grape str\n# 6: country str\n# 7: region str\n# 8: alcohol str -> num\n# 9: serving temperature str\n# 10: decantation str\n# 11: vintage num\n# 12: ageing str\n\n# [need to be treated as bag of words]\n# 13: style\n# 14: charateristics\n# 15: gastronomy\n\n#[postprocessed results]\n# 16: downloaded photo name\n# 17: temperature min\n# 18: temperature max\n# 19: bag of words_style\n# 20: bag of words_characteristics\n# 21: bag of words_gastronomy\n\ndef _flatten_bag_of_words(tuples, extract_func):\n return chain.from_iterable(extract_func(t) for t in tuples)\n \ndef _count_accuracies(word_list):\n return dict(Counter(word_list))\n \ndef find_words_fequences__total(tuples, keys2order):\n # считаем каждое вхождение, в т.ч. и в одном описании\n res = {}\n for key, order in keys2order.items():\n print('finding total accurency 4 {}'.format(key))\n res[key] = _count_accuracies(\n _flatten_bag_of_words(\n tuples,\n lambda x: x[order]\n )\n )\n #print(res[key])\n return res\n \ndef find_words_frequences__by_wine(tuples, keys2order):\n #одно вхождение == упоминание в описании вина\n res = {}\n for key, order in keys2order.items():\n print('finding accurency by wine {}'.format(key))\n res[key] = _count_accuracies(\n _flatten_bag_of_words(\n tuples,\n lambda x: list(set(x[order]))\n )\n )\n #print(res[key])\n return res\n \ndef _find_tf_idf(x):\n #import gc; gc.collect() #to prevent MemoryError\n res = TfidfTransformer().fit_transform(x).max(axis=0).todense().tolist()\n return res[0]\n \ndef find_word_tf_idf_metric(x, features):\n res = {}\n for key, counters in x.items():\n print('estimating tf-idf {}'.format(key))\n counters = _find_tf_idf(counters)\n #print(counters)\n #print(features[key])\n res[key] = dict(\n zip(\n features[key], counters \n )\n )\n return res\n \ndef _words2counts_dict(word_lits):\n return dict(Counter(word_lits))\n \ndef _tuples2dict_features(tuples, extract_features_func):\n res = [[], [], [], [], [], [], [], [], [] ]\n for t in tuples:\n #print(extract_features_func(t))\n for i, word_list in enumerate(extract_features_func(t)):\n res[i].append(_words2counts_dict(word_list))\n return res\n\ndef split_tuples2features(tuples, extract_features_func):\n y = [[t[0]] for t in tuples]\n x = []\n features = []\n dict_features = _tuples2dict_features(tuples, extract_features_func)\n for feature_set in dict_features:\n vec = DictVectorizer()\n x_ = vec.fit_transform(feature_set)#.toarray()\n x.append(\n x_\n )\n f_ = vec.get_feature_names()\n features.append(\n f_\n )\n #print(f_)\n return y, x, features \n\ndef _find_min_max(keys2order):\n orders = keys2order.values()\n return [min(orders), max(orders)]\n \ndef estimate_word_stat(tuples, keys2order):\n wc_total = find_words_fequences__total(tuples, keys2order)\n wc_by_wine = find_words_frequences__by_wine(tuples, keys2order)\n\n min_order, max_order = _find_min_max(keys2order)\n x_ = []\n f_ = []\n y, x_, f_ = split_tuples2features(tuples, lambda x: x[min_order: max_order + 1])\n \n x = {}\n features = {}\n MIN_ORDER = 18\n \n for key, order in BAG_OF_WORDS_TYPE2ORDER.items():\n x[key] = x_[order - MIN_ORDER]\n features[key] = f_[order - MIN_ORDER]\n \n tf_idf = find_word_tf_idf_metric(x, features)\n \n return {\n 'wc_total': wc_total,\n 'word_by_wine': wc_by_wine,\n 'tf_idf': tf_idf\n } \n\ndef _generate_all_word_pairs(words):\n return [' '.join(sorted(p)) for p in product(words, words) if p[0] != p[1]]\n\ndef _generate_all_word_pairs_in_row(words):\n return [' '.join(sorted([words[i], words[i + 1]])) for i in range(len(words) - 1)]\n \ndef add_word_pairs(tuples):\n print('adding pairs')\n for t in tuples:\n for order in BAG_OF_WORDS_INDEXES:\n t.append(_generate_all_word_pairs(t[order])) \n #print(tuples)\n \ndef add_word_pairs_in_row(tuples):\n print('adding pairs in rows')\n for t in tuples:\n for order in BAG_OF_WORDS_INDEXES:\n t.append(_generate_all_word_pairs_in_row(t[order])) \n #print(tuples) \n\ndef _save_data2csv(f, features):\n with open(f, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n for feature in features:\n writer.writerow(feature)\n\ndef _features2csv(feature_dict):\n return sorted(feature_dict.items(), key=lambda x: x[1])\n \ndef save2csv(res):\n for stat_key, stat in res.items():\n for feature_key, features in stat.items():\n features = _features2csv(features)\n file_name = 'csv/{}__{}.csv'.format(stat_key, feature_key)\n _save_data2csv(file_name, features)\n \ndef count_word_stat():\n tnt = tarantool.connect(**TARANTOOL_CONNCTION)\n offset = 0\n tuples = tnt.call('wine.find_by_chunk', [offset, CHUNK_LENGTH, False ]).data\n \n result_tuples = []\n while len(tuples) > 0 and tuples[0]:\n result_tuples.extend(tuples)\n offset += CHUNK_LENGTH \n tuples = tnt.call('wine.find_by_chunk', [offset, CHUNK_LENGTH, False ]).data\n \n add_word_pairs(result_tuples)\n add_word_pairs_in_row(result_tuples) \n \n res2csv = estimate_word_stat(result_tuples, BAG_OF_WORDS_TYPE2ORDER)\n #print(res2csv)\n save2csv(res2csv)\n \n \nif __name__ == '__main__':\n count_word_stat()\n\n","sub_path":"bin/count_word_stat.py","file_name":"count_word_stat.py","file_ext":"py","file_size_in_byte":6436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"229013831","text":"\"\"\"\nCreated on 29.03.2014\n\n@author: Carbon\n\"\"\"\nfrom ..util import DESIGNER_ID, get_arm_from_context\nfrom .AnmData import AnmBone, AnmData, AnmTransformation, MODE_FILE, \\\n MODE_INTERNAL\nfrom os import path\nimport struct\n\ndef export_to_file(filepath, context, **options):\n \"\"\"\n Exports the active object in the context given to the file given\n \"\"\"\n if path.isfile(filepath):\n file = open(filepath, mode='wb')\n writer = AnmWriter()\n writer.from_scene(context)\n try:\n if options[\"dumpData\"]:\n # this is debug\n # pylint: disable=protected-access\n stream = None\n try:\n stream = options[\"ostream\"]\n except KeyError:\n pass\n print(writer._data, file=stream)\n except KeyError:\n pass # do nothing\n writer.write_to_file(file)\n file.flush()\n file.close()\n return {'FINISHED'}\n else:\n return {'CANCELLED'}\n\nclass AnmWriter(object):\n \"\"\"\n Convenience class to write data\n \"\"\"\n def __init__(self):\n self._data = AnmData(MODE_INTERNAL)\n self._data.version = 3\n\n def from_scene(self, context):\n \"\"\"\n Initializes the writer (reads from the context given)\n Will work on the currently selected object\n \"\"\"\n # setup\n armature = get_arm_from_context(context)\n data = self._data\n scene = context.scene\n data.fps = scene.render.fps / scene.render.fps_base\n start = scene.frame_start\n end = scene.frame_end\n # setup data\n data.num_frames = (end - start) + 1\n data.bones = [AnmBone(bone.parent is None, bone.name) for bone in armature.pose.bones]\n # append poses of current motion\n for frame in range(start, end+1):\n scene.frame_current = frame\n for bone, bone_internal in zip(data.bones, armature.pose.bones):\n bone.poses.append(AnmTransformation(bone_internal.rotation_quaternion, bone_internal.location))\n # all bones done\n # all frames done\n\n def write_to_file(self, fostream):\n \"\"\"\n Writes the previously initialized data to the stream given\n \"\"\"\n data = self._data\n data.switch_mode(MODE_FILE)\n fostream.write(\"r3d2anmd\".encode())\n header_data = struct.pack(\"<4If\", data.version, DESIGNER_ID, len(data.bones), data.num_frames, data.fps)\n fostream.write(header_data)\n for bone in data.bones:\n buffer = bytearray(AnmBone.kHeaderSize + len(bone.poses) * AnmTransformation.kSizeInFile)\n name_encoded = bone.name.encode(\"latin-1\")[:AnmBone.kNameLen]\n buffer[:len(name_encoded)] = name_encoded\n struct.pack_into(\" 30:\n self.logging.error('Deferred cleanup action are still pending after 3s')\n break\n count += 1\n\n self.async_pomp_task = []\n self.deferred_pomp_task = []\n self.futures = []\n\n def _register_future(self, f):\n self.futures.append(f)\n\n def _unregister_future(self, f, ignore_error=False):\n try:\n self.futures.remove(f)\n except ValueError:\n if not ignore_error:\n raise\n","sub_path":"src/olympe/_private/pomp_loop_thread.py","file_name":"pomp_loop_thread.py","file_ext":"py","file_size_in_byte":16486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"247357036","text":"width = 25\nheight = 6\n\ndata = []\nwith open(\"day8.input\") as file:\n data = [int(pixel) for pixel in file.read()]\n\nlayers = []\nlayer_info = {}\npixel_pos = 0\nwhile pixel_pos < len(data):\n layer = []\n for row in range(0, height):\n layer.append(data[pixel_pos:pixel_pos+width])\n pixel_pos += width\n layer_info[len(layers)] = {0: sum(x.count(0) for x in layer),\n 1: sum(x.count(1) for x in layer),\n 2: sum(x.count(2) for x in layer)}\n layers.append(layer)\n\nbest_layer = None\nfor layer in layer_info:\n if not best_layer:\n best_layer = layer\n elif layer_info[layer][0] < layer_info[best_layer][0]:\n best_layer = layer\n\nprint(best_layer)\nprint(layer_info[best_layer][1]*layer_info[best_layer][2])\n\nvisible = []\nfor layer in reversed(layers):\n if not visible:\n visible = layer\n else:\n for row in range(0, height):\n for column in range(0, width):\n if layer[row][column] != 2:\n visible[row][column] = layer[row][column]\n\nfor row in visible:\n for pixel in row:\n print('*' if pixel else ' ', end = '')\n print()\n \n\n \n\n","sub_path":"8/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"283661146","text":"from itertools import cycle\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.cache import cache\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.views.generic.base import View\n\nfrom .google_analytics import get_access_token\nfrom .models import Service\nfrom .models import GoogleAnalyticsSite\nfrom .models import GoogleAnalyticsSiteGoal\n\nimport requests\nfrom requests.exceptions import ConnectionError\n\n\nspotligth_cycle = cycle('AB')\n\n\nclass HomeView(LoginRequiredMixin, TemplateView):\n template_name = 'home.html'\n\n\nclass SpotligthView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n case = next(spotligth_cycle)\n if case == 'A':\n obj = Service.objects.all().order_by('?').first()\n if not obj:\n raise Http404('Create a Service first')\n\n return render(request, 'service_detail.html', {\n 'obj': obj,\n })\n\n elif case == 'B':\n obj = GoogleAnalyticsSite.objects.all().order_by('?').first()\n if not obj:\n raise Http404('Create a GoogleAnalyticsSite first')\n\n return render(request, 'googleanalyticssite_detail.html', {\n 'ACCESS_TOKEN': get_access_token(),\n 'obj': obj,\n })\n\n\nclass TickerView(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n response_list = []\n\n # Zendesk\n zendesk_data = cache.get('zendesk_data')\n if not zendesk_data:\n try:\n req = requests.get(\n settings.ZENDESK_URL,\n auth=(settings.ZENDESK_EMAIL, settings.ZENDESK_API),\n )\n if req.ok:\n zendesk_data = {\n 'title': 'Tickets',\n 'label': 'Zendesk',\n 'value': req.json()['view_count']['value'],\n }\n cache.set('zendesk_data', zendesk_data, 120)\n except ConnectionError:\n zendesk_data = None\n\n if zendesk_data:\n response_list.append(zendesk_data)\n\n # Sentry\n sentry_data = cache.get('sentry_data')\n if not sentry_data:\n try:\n req = requests.get(\n settings.SENTRY_URL,\n auth=(settings.SENTRY_KEY, ''),\n )\n if req.ok:\n sentry_data = {\n 'title': 'Events',\n 'label': 'Sentry',\n 'value': sum([x[1] for x in req.json()]),\n }\n cache.set('sentry_data', sentry_data, 60)\n except ConnectionError:\n sentry_data = None\n\n if sentry_data:\n response_list.append(sentry_data)\n\n # Mmonit\n mmonit_list = cache.get('mmonit_list')\n if not mmonit_list:\n try:\n s = requests.Session()\n s.get(settings.MMONIT_URL + 'index.csp')\n s.post(\n settings.MMONIT_URL + 'z_security_check',\n params={\n 'z_username': settings.MMONIT_USER,\n 'z_password': settings.MMONIT_PASS,\n }\n )\n req = s.post(\n settings.MMONIT_URL + 'reports/uptime/list',\n params={'range': '6'},\n )\n if req.ok:\n mmonit_list = []\n for item in req.json()['items']:\n mmonit_list.append({\n 'title': item['name'],\n 'label': 'Uptime',\n 'value': '{0}%'.format(item['uptime']),\n })\n cache.set('mmonit_list', mmonit_list, 90)\n except ConnectionError:\n mmonit_list = None\n\n if mmonit_list:\n response_list += mmonit_list\n\n return render(request, 'ticker_detail.html', {\n 'response_list': response_list,\n })\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"49343012","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 20 13:34:13 2019\r\n\r\n@author: tvign\r\n\"\"\"\r\n\r\nimport socket # Import socket module\r\nimport time\r\nimport os\r\ns = socket.socket() # Create a socket object\r\nhost = socket.gethostname() # Get local machine name\r\nport=8080\r\nt1 = time.time()\r\ns.connect((host,port))\r\nprint(\"Decoding of Encoded Data\")\r\nprint(\"Connected....\")\r\nt2 = time.time()\r\nfile=open(\"trans.png\",'wb')\r\nfile_data=s.recv(16777216)\r\nfile.write(file_data)\r\nt3 = time.time()\r\nfile.close()\r\nprint (file_data)\r\nprint ('Total:', t3 - t1)\r\nprint ('Throughput:', round((1024.0 * 0.001) / (t3 - t1), 3),'K/sec.')\r\nprint(\"File received\")\r\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"159315979","text":"# Copyright (c) 2013-2014 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\nfrom oslo_log import helpers as log_helpers\nfrom oslo_log import log as logging\n\nfrom neutron.db.models import securitygroup\nfrom neutron.extensions import multiprovidernet as mpnet\nfrom neutron_lib.api.definitions import provider_net as providernet\nfrom neutron_lib import constants as p_const\nfrom neutron_lib.plugins import directory\nfrom neutron_lib.plugins.ml2 import api\n\nfrom networking_odl._i18n import _\nfrom networking_odl.common import callback\nfrom networking_odl.common import config as odl_conf\nfrom networking_odl.common import constants as odl_const\nfrom networking_odl.journal import cleanup\nfrom networking_odl.journal import full_sync\nfrom networking_odl.journal import journal\nfrom networking_odl.journal import maintenance\nfrom networking_odl.journal import recovery\nfrom networking_odl.ml2 import port_binding\nfrom networking_odl.qos import qos_driver_v2 as qos_driver\nfrom networking_odl.trunk import trunk_driver_v2 as trunk_driver\n\nLOG = logging.getLogger(__name__)\n\n\nclass OpenDaylightMechanismDriver(api.MechanismDriver):\n \"\"\"OpenDaylight Python Driver for Neutron.\n\n This code is the backend implementation for the OpenDaylight ML2\n MechanismDriver for OpenStack Neutron.\n \"\"\"\n\n def initialize(self):\n LOG.debug(\"Initializing OpenDaylight ML2 driver\")\n cfg.CONF.register_opts(odl_conf.odl_opts, \"ml2_odl\")\n self.sg_handler = callback.OdlSecurityGroupsHandler(\n self.sync_from_callback_precommit,\n self.sync_from_callback_postcommit)\n self.journal = journal.OpendaylightJournalThread()\n self.port_binding_controller = port_binding.PortBindingManager.create()\n self.trunk_driver = trunk_driver.OpenDaylightTrunkDriverV2.create()\n if odl_const.ODL_QOS in cfg.CONF.ml2.extension_drivers:\n qos_driver.OpenDaylightQosDriver.create()\n self._start_maintenance_thread()\n\n def _start_maintenance_thread(self):\n # start the maintenance thread and register all the maintenance\n # operations :\n # (1) JournalCleanup - Delete completed rows from journal\n # (2) CleanupProcessing - Mark orphaned processing rows to pending\n # (3) Full sync - Re-sync when detecting an ODL \"cold reboot\"\n cleanup_obj = cleanup.JournalCleanup()\n self._maintenance_thread = maintenance.MaintenanceThread()\n self._maintenance_thread.register_operation(\n cleanup_obj.delete_completed_rows)\n self._maintenance_thread.register_operation(\n cleanup_obj.cleanup_processing_rows)\n self._maintenance_thread.register_operation(full_sync.full_sync)\n self._maintenance_thread.register_operation(recovery.journal_recovery)\n self._maintenance_thread.start()\n\n @staticmethod\n def _record_in_journal(context, object_type, operation, data=None):\n if data is None:\n data = context.current\n journal.record(context._plugin_context, object_type,\n context.current['id'], operation, data,\n ml2_context=context)\n\n @log_helpers.log_method_call\n def create_network_precommit(self, context):\n OpenDaylightMechanismDriver._record_in_journal(\n context, odl_const.ODL_NETWORK, odl_const.ODL_CREATE)\n\n @log_helpers.log_method_call\n def create_subnet_precommit(self, context):\n OpenDaylightMechanismDriver._record_in_journal(\n context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)\n\n @log_helpers.log_method_call\n def create_port_precommit(self, context):\n OpenDaylightMechanismDriver._record_in_journal(\n context, odl_const.ODL_PORT, odl_const.ODL_CREATE)\n\n @log_helpers.log_method_call\n def update_network_precommit(self, context):\n OpenDaylightMechanismDriver._record_in_journal(\n context, odl_const.ODL_NETWORK, odl_const.ODL_UPDATE)\n\n @log_helpers.log_method_call\n def update_subnet_precommit(self, context):\n OpenDaylightMechanismDriver._record_in_journal(\n context, odl_const.ODL_SUBNET, odl_const.ODL_UPDATE)\n\n @log_helpers.log_method_call\n def update_port_precommit(self, context):\n OpenDaylightMechanismDriver._record_in_journal(\n context, odl_const.ODL_PORT, odl_const.ODL_UPDATE)\n\n @log_helpers.log_method_call\n def delete_network_precommit(self, context):\n OpenDaylightMechanismDriver._record_in_journal(\n context, odl_const.ODL_NETWORK, odl_const.ODL_DELETE, data=[])\n\n @log_helpers.log_method_call\n def delete_subnet_precommit(self, context):\n # Use the journal row's data field to store parent object\n # uuids. This information is required for validation checking\n # when deleting parent objects.\n new_context = [context.current['network_id']]\n OpenDaylightMechanismDriver._record_in_journal(\n context, odl_const.ODL_SUBNET, odl_const.ODL_DELETE,\n data=new_context)\n\n @log_helpers.log_method_call\n def delete_port_precommit(self, context):\n # Use the journal row's data field to store parent object\n # uuids. This information is required for validation checking\n # when deleting parent objects.\n new_context = [context.current['network_id']]\n for subnet in context.current['fixed_ips']:\n new_context.append(subnet['subnet_id'])\n OpenDaylightMechanismDriver._record_in_journal(\n context, odl_const.ODL_PORT, odl_const.ODL_DELETE,\n data=new_context)\n\n def _make_security_group_dict(self, sg):\n return {\n 'id': sg['id'],\n 'name': sg['name'],\n 'tenant_id': sg['tenant_id'],\n 'description': sg['description']\n }\n\n def _make_security_group_rule_dict(self, rule, sg_id=None):\n if sg_id is None:\n sg_id = rule['security_group_id']\n return {\n 'id': rule['id'],\n 'tenant_id': rule['tenant_id'],\n 'security_group_id': sg_id,\n 'ethertype': rule['ethertype'],\n 'direction': rule['direction'],\n 'protocol': rule['protocol'],\n 'port_range_min': rule['port_range_min'],\n 'port_range_max': rule['port_range_max'],\n 'remote_ip_prefix': rule['remote_ip_prefix'],\n 'remote_group_id': rule['remote_group_id']\n }\n\n def _sync_security_group_create_precommit(\n self, context, operation, object_type, res_id, resource_dict):\n # TODO(yamahata): remove this work around once\n # https://review.openstack.org/#/c/281693/\n # is merged.\n # For now, SG rules aren't passed down with\n # precommit event. We resort to get it by query.\n new_objects = context.session.new\n sgs = [sg for sg in new_objects\n if isinstance(sg, securitygroup.SecurityGroup)]\n if res_id is not None:\n sgs = [sg for sg in sgs if sg.id == res_id]\n for sg in sgs:\n sg_id = sg['id']\n res = self._make_security_group_dict(sg)\n journal.record(context, object_type, sg_id, operation, res)\n # NOTE(yamahata): when security group is created, default rules\n # are also created.\n # NOTE(yamahata): at this point, rule.security_group_id isn't\n # populated. but it has rule.security_group\n rules = [rule for rule in new_objects\n if (isinstance(rule, securitygroup.SecurityGroupRule) and\n rule.security_group == sg)]\n for rule in rules:\n res_rule = self._make_security_group_rule_dict(rule, sg_id)\n journal.record(context, odl_const.ODL_SG_RULE,\n rule['id'], odl_const.ODL_CREATE, res_rule)\n\n @log_helpers.log_method_call\n def sync_from_callback_precommit(self, context, operation, res_type,\n res_id, resource_dict, **kwargs):\n object_type = res_type.singular\n if resource_dict is not None:\n resource_dict = resource_dict[object_type]\n\n if (operation == odl_const.ODL_CREATE and\n object_type == odl_const.ODL_SG):\n self._sync_security_group_create_precommit(\n context, operation, object_type, res_id, resource_dict)\n return\n\n # NOTE(yamahata): in security group/security gorup rule case,\n # orm object is passed. not resource dict. So we have to convert it\n # into resource_dict\n if not isinstance(resource_dict, dict) and resource_dict is not None:\n if object_type == odl_const.ODL_SG:\n resource_dict = self._make_security_group_dict(resource_dict)\n elif object_type == odl_const.ODL_SG_RULE:\n resource_dict = self._make_security_group_rule_dict(\n resource_dict)\n # NOTE(yamahata): bug work around\n # callback for update of security grouop doesn't pass complete\n # info. So we have to build it. Once the bug is fixed, remove\n # this bug work around.\n # https://launchpad.net/bugs/1546910\n # https://review.openstack.org/#/c/281693/\n elif (object_type == odl_const.ODL_SG and\n operation == odl_const.ODL_UPDATE):\n # NOTE(yamahata): precommit_update is called before updating\n # values. so context.session.{new, dirty} doesn't include sg\n # in question. a dictionary with new values needs to be build.\n core_plugin = directory.get_plugin()\n sg = core_plugin._get_security_group(context, res_id)\n tmp_dict = self._make_security_group_dict(sg)\n tmp_dict.update(resource_dict)\n resource_dict = tmp_dict\n\n object_uuid = (resource_dict.get('id')\n if operation == 'create' else res_id)\n if object_uuid is None:\n # NOTE(yamahata): bug work around bug/1546910\n # TODO(yamahata): once the following patch is merged\n # remove this bug work around\n # https://review.openstack.org/#/c/281693/\n assert object_type == odl_const.ODL_SG_RULE\n # NOTE(yamahata): bulk creation case\n # context.session.new accumulates all newly created orm object.\n # there is no easy way to pick up the lastly added orm object.\n rules = [rule for rule in context.session.new\n if (isinstance(rule, securitygroup.SecurityGroupRule))]\n if len(rules) == 1:\n object_uuid = rules[0].id\n resource_dict['id'] = object_uuid\n else:\n LOG.error(\"bulk creation of sgrule isn't supported\")\n raise NotImplementedError(\n _(\"unsupporetd bulk creation of security group rule\"))\n journal.record(context, object_type, object_uuid,\n operation, resource_dict)\n # NOTE(yamahata): DB auto deletion\n # Security Group Rule under this Security Group needs to\n # be deleted. At NeutronDB layer rules are auto deleted with\n # cascade='all,delete'.\n if (object_type == odl_const.ODL_SG and\n operation == odl_const.ODL_DELETE):\n for rule in kwargs['security_group'].rules:\n journal.record(context, odl_const.ODL_SG_RULE,\n rule.id, odl_const.ODL_DELETE, [object_uuid])\n\n def sync_from_callback_postcommit(self, context, operation, res_type,\n res_id, resource_dict, **kwargs):\n self._postcommit(context)\n\n def _postcommit(self, context):\n self.journal.set_sync_event()\n\n create_network_postcommit = _postcommit\n create_subnet_postcommit = _postcommit\n create_port_postcommit = _postcommit\n update_network_postcommit = _postcommit\n update_subnet_postcommit = _postcommit\n update_port_postcommit = _postcommit\n delete_network_postcommit = _postcommit\n delete_subnet_postcommit = _postcommit\n delete_port_postcommit = _postcommit\n\n @log_helpers.log_method_call\n def bind_port(self, port_context):\n \"\"\"Set binding for a valid segments\n\n \"\"\"\n return self.port_binding_controller.bind_port(port_context)\n\n def check_vlan_transparency(self, context):\n \"\"\"Check VLAN transparency\n\n \"\"\"\n # TODO(yamahata): This should be odl service provider dependent\n # introduce ODL yang model for ODL to report which network types\n # are vlan-transparent.\n # VLAN and FLAT cases, we don't know if the underlying network\n # supports QinQ or VLAN.\n # For now, netvirt supports only vxlan tunneling.\n VLAN_TRANSPARENT_NETWORK_TYPES = [p_const.TYPE_VXLAN]\n network = context.current\n # see TypeManager._extend_network_dict_provider()\n # single providernet\n if providernet.NETWORK_TYPE in network:\n return (network[providernet.NETWORK_TYPE] in\n VLAN_TRANSPARENT_NETWORK_TYPES)\n # multi providernet\n segments = network.get(mpnet.SEGMENTS)\n if segments is None:\n return True\n return all(segment[providernet.NETWORK_TYPE]\n in VLAN_TRANSPARENT_NETWORK_TYPES\n for segment in segments)\n","sub_path":"networking_odl/ml2/mech_driver_v2.py","file_name":"mech_driver_v2.py","file_ext":"py","file_size_in_byte":14047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"470445499","text":"#! /usr/bin/env python3\n#NoGuiLinux\n#this script is to replace the optDepTree.py script as this relies on an\n#iterable based system rather than the massive reliance on recursion\n#that is seen in optDepTree.py\n\nimport pyalpm,argparse\n#loop through a list of pkgs, if the current pkg has deps add to list of pkgs, until for loop reaches end of list\n\n#getDeps() will get the dependency list of the current package and return the list() if there is one, otherwise return None\n\nclass container:\n master=None\n class modes:\n OPT_DEPS=1\n REQ_DEPS=2\n BOTH_DEPS=3\n\n class application:\n master=None\n pkgs=['mplayer']\n\n def setHandle(self):\n h=pyalpm.Handle('/','/var/lib/pacman')\n return h\n\n def setRemote(self,handle):\n dbs=['extra','community','core']\n db={}\n for d in dbs:\n db[d]=handle.register_syncdb(d,pyalpm.SIG_DATABASE_OPTIONAL)\n sync=handle.get_syncdbs()\n return db\n\n def setLocal(self,handle):\n return handle.get_localdb()\n\n def getDeps(self,pkg,db,mode=2):\n deps=None\n for repo in db.keys():\n pk=db[repo].get_pkg(pkg)\n if pk != None:\n if mode == self.master.modes.REQ_DEPS:\n deps=pk.depends\n elif mode == self.master.modes.OPT_DEPS:\n deps=[i.split(':')[0] for i in pk.optdepends]\n elif mode == self.master.modes.BOTH_DEPS:\n deps=pk.depends\n deps.extend([i.split(':')[0] for i in pk.optdepends])\n break\n return deps\n\n def resolve(self,pkgs=[],mode=2,statusInstalled=False):\n opk=len(pkgs)\n oPk=pkgs\n if pkgs != []:\n handle=self.setHandle()\n lDb=self.setLocal(handle)\n rDb=self.setRemote(handle)\n \n for pkg in pkgs:\n deps=self.getDeps(pkg,rDb,mode=mode)\n if deps:\n for pkg_sub in deps:\n #ensure we are not adding duplicate pkg names to the list\n if pkg_sub not in pkgs: \n if statusInstalled == False:\n installed=lDb.get_pkg(pkg_sub)\n if installed == None:\n pkgs.append(pkg_sub)\n elif statusInstalled == True:\n pkgs.append(pkg_sub)\n del(deps)\n if len(pkgs) > opk:\n return pkgs[opk:]\n else:\n return []\n else:\n return 'return empty pkgs list'\n\n class cmdline:\n master=None\n opts={\n 'pkg':['-p','--package','package, or comma delimited list of packages to get dependencies for','yes'],\n 'mode':['-m','--mode','one of [req=2||opt=1||both=3] for dependency types i.e. -m 2','yes'],\n 'installed':['-i','--include-installed','include installed packages in list','store_true']\n }\n options=None\n\n def description(self):\n desc='''\n utilize the alpm library to gather depency \n information on a pkg, and only display the \n dependencies not installed. This script was\n create as a result of pacmans dependency/\n optional depency listing including trailing\n newline characters muddying my dependency\n list results, resulting in needing to do\n more processing than necessary.\n '''.replace('\\t','')\n return desc\n\n def args(self):\n parser=argparse.ArgumentParser(description=self.description())\n for opt in self.opts.keys():\n if opt == 'installed':\n parser.add_argument(self.opts[opt][0],self.opts[opt][1],help=self.opts[opt][2],action=self.opts[opt][3])\n else:\n parser.add_argument(self.opts[opt][0],self.opts[opt][1],help=self.opts[opt][2],required=self.opts[opt][3])\n \n self.options=parser.parse_args()\n def parseModes(self):\n if self.options.mode != None:\n try:\n mode=int(self.options.mode)\n except:\n exit('that is not a valid mode!')\n if 0 < mode < 4:\n return mode\n else:\n exit('mode must be one of the range 1-3')\n \n\n class display:\n master=None\n def display(self,dataSet):\n if type(dataSet) == type(list()):\n for element in dataSet:\n print(element)\n else:\n print(element)\n class void:\n master=None\n\n def run(self,workArea):\n workArea.cmdline.args()\n mode=workArea.cmdline.parseModes()\n installed=workArea.cmdline.options.include_installed\n workArea.application.pkgs=workArea.cmdline.options.package.split(',')\n deps=workArea.application.resolve(workArea.application.pkgs,mode=mode,statusInstalled=installed)\n workArea.display.display(deps)\n\n def assemble(self):\n workArea=self.void()\n workArea.master=self\n \n workArea.modes=self.modes()\n\n workArea.cmdline=self.cmdline()\n workArea.cmdline.master=workArea\n\n workArea.application=self.application()\n workArea.application.master=workArea\n \n workArea.display=self.display()\n workArea.display.master=workArea\n\n self.run(workArea)\n\napp=container()\napp.assemble()\n","sub_path":"pacmanQuery2Xml/depTree.py","file_name":"depTree.py","file_ext":"py","file_size_in_byte":5875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"246316712","text":"from sklearn.preprocessing import LabelBinarizer\nfrom tqdm import tqdm\nimport pandas as pd\nfrom Applications.TextClassification.utils import hangul_preprocessing, words_transform\nimport pickle\nfrom hparams import create_hparams\nhparams = create_hparams(None)\nimport os\n#\n# a = '''* 심장관상동맥조영CT검사결과 유소견입니다.\n# [finding]\n# 1. Coronary variation or anomaly: None.\n# 2. CACS (Agatston score): 424.19\n# 3. Atherosclerotic CAD:\n# (Pls. note that the degree of stenosis suggested in this report is not quantitative but observational and empirical.)\n# \n# --------------------------------------------------------------------------------------------------\n# Segment: Stenosis / Plaque type, shape, size /\n# --------------------------------------------------------------------------------------------------\n# LM,p,m-LAD: mild / calcified, mixed, nodular, tubular /\n# p,m-LCX: minimal to mild / calcified, mixed, nodular/\n# p,m-RCA: minimal to mild / calcified, nodular/\n# ---------------------------------------------------------------------------------------------------\n# \n# Minimal: <25%, Mild: 25-49%, Moderate: 50-69%, Severe: >70-99%, Occluded.\n#\n# 4. Extra-coronary CV findings:\n# - Not remarkable.\n# 5. Covered lung and upper abdomen:\n# - No evidence of abnormal findings in both lungs and upper abdomen covered in this study performed by using the scanning and reconstruction protocol for CCTA\n# [conclusion]'''\n\ndef process(doctor_opinions_df,tokenizer_path=None, label_path=None, balance=False):\n\n\tdoctor_opinions_df.drop_duplicates(keep=\"first\", inplace=True)\n\tclasses = list(sorted(set(doctor_opinions_df[\"DSES_NM\"])))\n\n\ttotal_ = []\n\tless_data = []\n\tfor cls in classes:\n\t\ttemp_df = doctor_opinions_df.loc[doctor_opinions_df[\"DSES_NM\"]==cls]\n\t\tprint(f'{cls}----{len(temp_df)}')\n\t\tif len(temp_df)>=100:\n\t\t\ttotal_.append(temp_df.sample(frac=1))\n\t\telse:\n\t\t\tless_data.append(temp_df.sample(frac=1))\n\n\tif balance:\n\t\tmax_count = min([len(i) for i in total_])\n\t\ttotal_df = pd.concat([df.iloc[:max_count] for df in total_])\n\telse:\n\t\ttotal_df = pd.concat([df for df in total_])\n\n\t\t# less_data_df[['소견','DSES_CD','DSES_NM']].to_csv(\"processed/out_of_distribution_classes.csv\", encoding=encoding)\n\t# total_df[['소견','DSES_CD','DSES_NM']].to_csv(\"processed/in_of_distribution_classes.csv\", encoding=encoding)\n\n\ttraining_classes = list(sorted(set(total_df[\"DSES_NM\"])))\n\tdoctor_opinions_df_new = total_df\n\topinions = list(doctor_opinions_df_new[\"소견\"])\n\tlabels = list(doctor_opinions_df_new[\"DSES_NM\"])\n\tcleaned_opinions = []\n\tcleaned_labels = []\n\tfor opinion, label in tqdm(zip(opinions, labels)):\n\t\tcleaned_opinion = hangul_preprocessing(doctor_opinions=str(opinion).replace('\\n',' ').strip().lower())\n\t\tcleaned_label = hangul_preprocessing(doctor_opinions=str(label).strip().lower())\n\t\tcleaned_opinions.append(cleaned_opinion)\n\t\tcleaned_labels.append(cleaned_label)\n\topinions_sequences, labels_sequences, tokenizer = words_transform(opinionslist=cleaned_opinions,\n\t labelslist=cleaned_labels,\n\t input_len=hparams.input_sequence_length,\n\t output_len=hparams.output_sequence_length,\n\t tokenizer_path=tokenizer_path)\n\n\n\n\n\tif not os.path.isfile(label_path):\n\t\tlabels_text = tokenizer.sequences_to_texts(labels_sequences)\n\t\tunique_labels_text, unique_labels_sequences = [],[]\n\t\tfor text, seq in zip(labels_text,labels_sequences):\n\t\t\tif text not in unique_labels_text:\n\t\t\t\tunique_labels_text.append(text)\n\t\t\t\tunique_labels_sequences.append(seq.tolist())\n\t\t### save labels_text and sequence for futher using\n\t\tlabels_dict = dict(zip(unique_labels_text, unique_labels_sequences))\n\t\tf = open(label_path,'w')\n\t\tf.write(str(labels_dict))\n\t\tf.close()\n\tprint('Summary:')\n\tprint(f'input column name: {\"소견\"}')\n\tprint(f'label column name: {\"DSES_NM\"}')\n\tprint(f'number of labels: {len(training_classes)}')\n\tprint(f'datarow: {len(total_df)}')\n\n\t# p22rint(f'info ----- Saved label-sequence mapping file to: {labels_sequences_path}'1)\n\t# 1dataloader = data_generator(opinions_sequences, labels_sequences, batch_size=hpar\n\t# ams.batch_size)\n\t### when train model using mse loss function (output is label's sequence)\n\tood_opinions_sequences, ood_labels_sequences = None, None\n\tif len(less_data) > 0:\n\t\tless_data_df = pd.concat([df_ for df_ in less_data])\n\t\topinions = list(less_data_df[\"소견\"])\n\t\tlabels = list(less_data_df[\"DSES_NM\"])\n\t\tcleaned_opinions = []\n\t\tcleaned_labels = []\n\t\tfor opinion, label in tqdm(zip(opinions, labels)):\n\t\t\tcleaned_opinion = hangul_preprocessing(doctor_opinions=str(opinion).replace('\\n', ' ').strip().lower())\n\t\t\tcleaned_label = hangul_preprocessing(doctor_opinions=str(label).strip().lower())\n\t\t\tcleaned_opinions.append(cleaned_opinion)\n\t\t\tcleaned_labels.append(cleaned_label)\n\t\tood_opinions_sequences, ood_labels_sequences, _ = words_transform(opinionslist=cleaned_opinions,\n\t\t labelslist=cleaned_labels,\n\t\t input_len=hparams.input_sequence_length,\n\t\t output_len=hparams.output_sequence_length,\n\t\t tokenizer_path=tokenizer_path)\n\treturn opinions_sequences, labels_sequences, ood_opinions_sequences, ood_labels_sequences, tokenizer\n\n\n\t### when train model using cross-entropy loss function (output is softmax propability)\n\t# return opinions_sequences, labels_onehot_sequences, tokenizer.word_counts, training_classes\n","sub_path":"api/source_odin/Applications/TextClassification/preprocessing/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":5857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"604237804","text":"#%%-*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 8 15:34:25 2018\n\n@author: kuifenhu\n\"\"\"\n\n# this GUI is developed from the origin below\n# it use tkinter group function \n\n#http://interactivepython.org/runestone/static/CS152f17/GUIandEventDrivenProgramming/05_widget_grouping.html\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\nfrom objs.GuiObjFcn import GuiObjFcn\nfrom functools import partial\nimport matplotlib.pyplot as plt\nimport numpy as np\n# call pop up file functions\nimport easygui\nimport dill\n# data frame\nimport pandas as pd\n#%%\n# self written Obj: symbol process\nfrom objs.SymbObj import SymbObj\n# self written Obj: data download using iex and parser\nfrom objs.DownloaderObj import DownloaderObj\n# self written Obj: plotting tools for OHLC plotting etc\nfrom objs.GuiObj import GuiObj\n# self written obj: to process Techincal indicator\nfrom objs.TaObj import TaObj\n#%%\n\nclass entry_holder(object):\n pass\n\n \nclass ControllerObj():\n def __init__(self):\n # from tk Grouping Examples\n #Create the consel window\n self.window = tk.Tk()\n self.window.title(\"Main Control Consel\")\n self.lines=[]\n \n \n #instaniate symbol obj\n self.s=SymbObj()\n #set defualt symb ticker\n# s.symb=['A','KEYS','COST','FTR','LTC','NHI','WELL','HD','SNH','CHCT','GEO','AAPL','AVGO','GOOG']\n# s.symb=['BBY','NVDA','TSN','AAP','GE']\n self.s.symb=['AAPL']\n \n #insitaniate data obj\n self.d=DownloaderObj(self.s)\n # config data obj\n # select the range of time series strting from today back ndays\n self.d.ndays=100\n #select the frequency by default 1 day\n self.d.period=1\n # execute download action\n self.d.Download_ts()\n \n \n # control panel. attached to GUI main frame\n #instaniate object TA given the data obj d. \n self.ta=TaObj(self.d)\n #config ta obj: time period\n self.ta.tp=14\n # config ta obj: input data\n self.ta.inputdata=self.d.ts_result[0]\n \n #instaniate object GUI, the main trace plotting window, give the data obj d. \n self.g=GuiObj(self.d)\n \n #populate the rest of widgets on control consel panel\n self.create_widgets()\n\n#%% Define the action associated with each button, from cmd of TA\n # if cmd output format is Pattern\n def showPattern(self,str1):\n plt.sca(self.g.ax)\n try:\n plt.plot(self.ta.t[self.ta.integer==100],self.ta.close[self.ta.integer==100],color='red', marker='*',markersize=15,linestyle=\"None\",label=str1+'pos')\n plt.plot(self.ta.t[self.ta.integer==-100],self.ta.close[self.ta.integer==-100],color='blue', marker='*',markersize=15,linestyle=\"None\",label=str1+'neg')\n self.g.fig_ohlc.canvas.draw()\n self.g.fig_ohlc.canvas.flush_events() \n except:\n pass\n # if cmd output format is Indicator\n def showIndicator(self,str1):\n plt.sca(self.g.ax1)\n plt.cla()\n N=len(self.ta.output)\n if N>=5:\n l1,=plt.plot(self.ta.t,self.ta.output,label=str1)\n self.lines.append(l1) \n else: \n for i in np.arange(N):\n l1,=plt.plot(self.ta.t,self.ta.output[i],label=str1)\n self.lines.append(l1)\n # exec('self.'+str1+'.append(l1)')\n self.g.fig_ohlc.canvas.draw()\n self.g.fig_ohlc.canvas.flush_events() \n \n #if cmd output format is Overlay\n def showOverlay(self,str1) :\n plt.sca(self.g.ax)\n N=len(self.ta.output)\n if N>=5:\n l1,=plt.plot(self.ta.t,self.ta.output,label=str1)\n self.lines.append(l1) \n else: \n for i in np.arange(N):\n l1,=plt.plot(self.ta.t,self.ta.output[i],label=str1)\n self.lines.append(l1)\n# exec('self.'+str1+'.append(l1)')\n self.g.fig_ohlc.canvas.draw()\n self.g.fig_ohlc.canvas.flush_events() \n \n#%% Define the action associated with each button -- from cmd of action\n def showCustom(self,str1):\n if str1 is 'Cursor':\n if self.g.cursor.visible==True:\n self.g.cursor.visible=False\n else:\n self.g.cursor.visible=True\n elif str1 is 'LoadHolding':\n # open exel file from fidelity. parse the symbol and cost basis.\n path= easygui.fileopenbox()\n df=pd.DataFrame.from_csv(path)\n d = dict(zip(df['Symbol'][df['Cost Basis Per Share']!='n/a'],df['Cost Basis Per Share'][df['Cost Basis Per Share']!='n/a']))\n d1 = {k: float(re.sub('[$,]','',v)) for k, v in d.items()}\n d2 = {key:val for key, val in d1.items() if val != 0}\n self.d.symb=list(d2.keys())\n self.d.holdings=d2\n self.d.Download_ts()\n self.treeview.update(self.d.symb)\n \n elif str1 is 'AddTicker':\n tmp=self.entrys.addsymb.get().strip().upper().split(',')\n self.d.symb=self.d.symb+tmp\n self.d.Download_ts()\n self.treeview.update(self.d.symb)\n \n elif str1 is 'LoadTicker':\n \n path=self.s.loadfromfile()\n self.entrys.loadsymb.insert(0,path)\n self.d = DownloaderObj(self.s)\n self.treeview.update(self.d.symb)\n \n elif str1 is 'Load_ts':\n if self.entrys.pd.get().strip() is '':\n self.d.period=1\n else:\n self.d.period=float(self.entrys.pd.get().strip())\n \n if self.entrys.ndays.get().strip() is '':\n self.d.ndays=500\n else:\n self.d.ndays=float(self.entrys.ndays.get().strip())\n self.d.Download_ts()\n self.g=GuiObj(self.d)\n \n elif str1 is 'Load_key':\n self.d.Download_keynum()\n elif str1 is 'Load_pulse':\n self.d.Download_pulse()\n elif str1 is 'Load_finviz':\n self.d.Download_finviz()\n elif str1 is 'TA':\n pass\n \n elif str1 is 'OHLC':\n self.g.chart_ohlc()\n elif str1 is 'KeysChart':\n self.g.chart_keys()\n \n \n elif str1 is 'Save':\n path= easygui.fileopenbox()\n with open(path, 'wb') as f:\n dill.dump(self.d, f)\n dill.dump(self.ta, f)\n elif str1 is 'Load':\n path= easygui.fileopenbox()\n with open(path, 'rb') as f:\n self.d = dill.load(f)\n self.ta = dill.load(f)\n \n \n def remove(self):\n for i in np.arange(len(self.lines)):\n self.lines.pop(0).remove()\n \n def remove1(self,ax,para):\n plt.sca(ax)\n tmp=[]\n str1='tmp=self.'+para\n for i in np.arange(len(tmp)):\n tmp.pop(0).remove()\n#%%\n def GuiObjFcn(self,str1):\n self.ta.calculate(str1)\n if str1 in list(self.Grp_custom.keys()):\n self.showCustom(str1)\n elif str1 in list(self.Grp_overlay.keys()) :\n self.showOverlay(str1)\n elif str1 in list(self.Grp_momentum.keys()) :\n self.showIndicator(str1)\n elif str1 in list(self.Grp_volum.keys()) :\n self.showIndicator(str1)\n elif str1 in list(self.Grp_cycle_ind.keys()) :\n self.showIndicator(str1)\n elif str1 in list(self.Grp_price_trans.keys()) :\n self.showIndicator(str1)\n elif str1 in list(self.Grp_volatility.keys()) :\n self.showOverlay(str1)\n elif str1 in list(self.Grp_pattern.keys()) :\n self.showPattern(str1) \n elif str1 in list(self.Grp_stat.keys()) :\n self.showIndicator(str1)\n else:\n pass\n#%% \n def create_buttons(self, parent,bts):\n k=1\n m=1\n for i in bts:\n if k>=15:\n k=1\n m=m+1\n #str1=\"self.bt_\"+i+\" = ttk.Button(parent, text='\"+i+\"',command=lambda: partial(GuiObjFcn,'\"+i+\"'))\"\n str1=\"self.bt_\"+i+\" = ttk.Button(parent, text='\"+i+\"',command= partial(self.GuiObjFcn,'\"+i+\"'))\"\n #print(str1)\n exec(str1) \n str1=\"self.bt_\"+i+\".grid(row=\"+str(k)+\", column=\"+str(m)+\")\"\n # print(str)\n exec(str1)\n k=k+1\n \n#%% \n def create_entry1(self,parent,row,column):\n# \n# setattr(self.entrys,name+'Val',StringVar())\n## exec(str1)\n### label=Label(parent, text=caption)\n# str1=\"self.entrys.\"+name+\"=Entry(parent, text='14',textvariable=self.entrys.\"+name+\"Val)\"\n# exec(str1)\n# str1=\"self.entrys.\"+name+\".grid(row=row,column=column)\"\n# exec(str1)\n v=StringVar()\n entry=Entry(parent, text='14',textvariable=v)\n entry.grid(row=row,column=column)\n return entry\n#%% \n def create_label1(self,parent,caption,row,column):\n \n label=Label(parent, text=caption)\n label.grid(row=row,column=column)\n return label\n \n#%% \n def create_entrys(self, parent):\n \n# self.entry_content=[]\n# self.v0 = StringVar()\n# self.v1 = StringVar()\n# self.v2= StringVar()\n# self.v3 = StringVar()\n# \n# caption='TimePeriod 1:'\n# label=Label(parent, text=caption)\n# label.grid(row=1,column=1)\n# self.e1 = Entry(parent, text='14',textvariable=self.v0)\n# self.e1.grid(row=2,column=1)\n# self.entry_content.append((self.v0.get()))\n# \n# caption='TimePeriod 2:'\n# label=Label(parent, text=caption)\n# label.grid(row=3,column=1)\n# self.e2 = Entry(parent, text='14',textvariable=self.v1)\n# self.e2.grid(row=4,column=1)\n# self.entry_content.append((self.v1.get()))\n# \n# caption='TimePeriod 3:'\n# label=Label(parent, text=caption)\n# label.grid(row=5,column=1)\n# self.e3 = Entry(parent, text='14',textvariable=self.v2)\n# self.e3.grid(row=6,column=1)\n# self.entry_content.append((self.v2.get()))\n# \n# caption='Ticker'\n# label=Label(parent, text=caption)\n# label.grid(row=7,column=1)\n# self.e4 = Entry(parent, text='14',textvariable=self.v3)\n# self.e4.grid(row=8,column=1)\n# self.entry_content.append((self.v3.get()))\n \n pass\n \n#%% \n def create_widgets(self):\n self.entrys=entry_holder()\n self.labels=entry_holder()\n \n \n # Create some room around all the internal frames\n self.window['padx'] = 5\n self.window['pady'] = 5\n\n# # - - - - - - - - - - - - - - - - - - - - -\n # Frame\n frame_label = ttk.Label(self.window, text=\"Parameter Input\")\n frame_label.grid(row=1, column=1, sticky=tk.W+ tk.N, pady=3)\n\n frame1 = ttk.Frame(self.window, relief=tk.RIDGE)\n frame1.grid(row=2, column=1, sticky=tk.E + tk.W + tk.N+tk.S, padx=30, pady=4)\n #Instanitate treeview obj on the left.\n self.treeview=AddTreeView(frame1,self.g)\n self.treeview.tree.pack(expand=True, fill='both')\n \n \n frame2 = ttk.Frame(self.window, relief=tk.RIDGE)\n frame2.grid(row=3, column=1, sticky=tk.E + tk.W + tk.N+tk.S, padx=30, pady=4)\n \n notebook_label = ttk.Label(self.window, text=\"Notebook\")\n notebook_label.grid(row=1, column=2, sticky=tk.W, pady=3)\n\n notebookHolder = ttk.Notebook(self.window)\n notebookHolder.grid(row=2, column=2, sticky=tk.E + tk.W + tk.N + tk.S, padx=30, pady=4)\n\n # add tabs to notebook\n tab0 = tk.Frame(notebookHolder)\n tab1 = tk.Frame(notebookHolder)\n tab2 = tk.Frame(notebookHolder)\n tab3 = tk.Frame(notebookHolder)\n tab4 = tk.Frame(notebookHolder)\n tab5 = tk.Frame(notebookHolder)\n tab6 = tk.Frame(notebookHolder)\n tab7 = tk.Frame(notebookHolder)\n tab8 = tk.Frame(notebookHolder)\n \n notebookHolder.add(tab0, text=\"Custome\", compound=tk.TOP)\n notebookHolder.add(tab1, text=\"Overlap \", compound=tk.TOP)\n notebookHolder.add(tab2, text=\"Momentum\", compound=tk.TOP)\n notebookHolder.add(tab3, text=\"Volumn\", compound=tk.TOP)\n notebookHolder.add(tab4, text=\"Cycle indicator\", compound=tk.TOP)\n notebookHolder.add(tab5, text=\"Voltality\", compound=tk.TOP)\n notebookHolder.add(tab6, text=\"Price Transformation\", compound=tk.TOP)\n notebookHolder.add(tab7, text=\"Pattern Recognition\", compound=tk.TOP)\n notebookHolder.add(tab8, text=\"Statistics\", compound=tk.TOP)\n \n #define the buttons cmd\n self.Grp_custom={'Cursor':'Turn on off Cursor',\n 'LoadHolding':'Load holding from Fidelity',\n 'AddTicker':'Add new ticker',\n 'LoadTicker':'Load ticker from file',\n 'Load_ts':'Load TS',\n 'Load_key':'Load keys', \n 'Load_pulse':'Load pulse',\n 'Load_finviz':'Load finviz',\n 'TA':'Calculate TA',\n 'OHLC':'OHLC chart',\n 'KeysChart':'Chart for Keys',\n 'Save':'Save Class',\n 'Load':'Load Class',\n 'OHLC':'OHLC chart',\n 'reserved':'test'}\n self.Grp_overlay={'BBANDS' :'Bollinger Bands ',\n 'DEMA':'Double Exponential Moving Average ',\n 'EMA':'Exponential Moving Average ',\n 'HT_TRENDLINE':'Hilbert Transform - Instantaneous Trendline ',\n 'KAMA':'Kaufman Adaptive Moving Average ',\n 'MA':'Moving average ',\n 'MAMA':'MESA Adaptive Moving Average ',\n 'MAVP':'Moving average with variable period ',\n 'MIDPOINT':'MidPoint over period ',\n 'MIDPRICE':'Midpoint Price over period ',\n 'SAR':'Parabolic SAR ',\n 'SAREXT':'Parabolic SAR - Extended ',\n 'SMA':'Simple Moving Average ',\n 'T3':'Triple Exponential Moving Average (T3) ',\n 'TEMA':'Triple Exponential Moving Average ',\n 'TRIMA':'Triangular Moving Average ',\n 'WMA':'Weighted Moving Average '}\n self.Grp_momentum={'ADX': 'Average Directional Movement Index',\n 'ADXR':'AverageDirectionalMovementIndexRating',\n 'APO':'AbsolutePriceOscillator',\n 'AROON':'Aroon',\n 'AROONOSC':'AroonOscillator',\n 'BOP':'BalanceOfPower',\n 'CCI':'CommodityChannelIndex',\n 'CMO':'ChandeMomentumOscillator',\n 'DX':'DirectionalMovementIndex',\n 'MACD':'MovingAverageConvergence/Divergence',\n 'MACDEXT':'MACDwithcontrollableMAtype',\n 'MACDFIX':'MovingAverageConvergence/DivergenceFix12/26',\n 'MFI':'MoneyFlowIndex',\n 'MINUS_DI':'MinusDirectionalIndicator',\n 'MINUS_DM':'MinusDirectionalMovement',\n 'MOM':'Momentum',\n 'PLUS_DI':'PlusDirectionalIndicator',\n 'PLUS_DM':'PlusDirectionalMovement',\n 'PPO':'PercentagePriceOscillator',\n 'ROC':'Rateofchange:((price/prevPrice)-1)*100',\n 'ROCP':'RateofchangePercentage:(price-prevPrice)/prevPrice',\n 'ROCR':'Rateofchangeratio:(price/prevPrice)',\n 'ROCR100':'Rateofchangeratio100scale:(price/prevPrice)*100',\n 'RSI':'RelativeStrengthIndex',\n 'STOCH':'Stochastic',\n 'STOCHF':'StochasticFast',\n 'STOCHRSI':'StochasticRelativeStrengthIndex',\n 'TRIX':'1-dayRate-Of-Change(ROC)ofaTripleSmoothEMA',\n 'ULTOSC':'UltimateOscillator',\n 'WILLR': 'Williams%R'}\n self.Grp_volum={'AD':' Chaikin A/D Line',\n 'ADOSC' :'Chaikin A/D Oscillator',\n 'OBV': 'On Balance Volume' }\n self.Grp_cycle_ind={'HT_DCPERIOD' :'Hilbert Transform - Dominant Cycle Period ',\n 'HT_DCPHASE' :'Hilbert Transform - Dominant Cycle Phase ',\n 'HT_PHASOR' :'Hilbert Transform - Phasor Components ',\n 'HT_SINE' :'Hilbert Transform - SineWave ',\n 'HT_TRENDMODE' :'Hilbert Transform - Trend vs Cycle Mode ' }\n self.Grp_price_trans={'AVGPRICE':'Average Price ',\n 'MEDPRICE':'Median Price ',\n 'TYPPRICE':'Typical Price ',\n 'WCLPRICE':'Weighted Close Price '\n }\n self.Grp_volatility={'ATR': 'Average True Range',\n 'NATR': ' Normalized Average True Range',\n 'TRANGE': 'True Range'}\n self.Grp_pattern={ 'CDL2CROWS' :'Two Crows ',\n 'CDL3BLACKCROWS':'Three Black Crows ',\n 'CDL3INSIDE':'Three Inside Up/Down ',\n 'CDL3LINESTRIKE':'Three-Line Strike ',\n 'CDL3OUTSIDE':'Three Outside Up/Down ',\n 'CDL3STARSINSOUTH':'Three Stars In The South ',\n 'CDL3WHITESOLDIERS':'Three Advancing White Soldiers ',\n 'CDLABANDONEDBABY':'Abandoned Baby ',\n 'CDLADVANCEBLOCK':'Advance Block ',\n 'CDLBELTHOLD':'Belt-hold ',\n 'CDLBREAKAWAY':'Breakaway ',\n 'CDLCLOSINGMARUBOZU':'Closing Marubozu ',\n 'CDLCONCEALBABYSWALL':'Concealing Baby Swallow ',\n 'CDLCOUNTERATTACK':'Counterattack ',\n 'CDLDARKCLOUDCOVER':'Dark Cloud Cover ',\n 'CDLDOJI':'Doji ',\n 'CDLDOJISTAR':'Doji Star ',\n 'CDLDRAGONFLYDOJI':'Dragonfly Doji ',\n 'CDLENGULFING':'Engulfing Pattern ',\n 'CDLEVENINGDOJISTAR':'Evening Doji Star ',\n 'CDLEVENINGSTAR':'Evening Star ',\n 'CDLGAPSIDESIDEWHITE':'Up/Down-gap side-by-side white lines ',\n 'CDLGRAVESTONEDOJI':'Gravestone Doji ',\n 'CDLHAMMER':'Hammer ',\n 'CDLHANGINGMAN':'Hanging Man ',\n 'CDLHARAMI':'Harami Pattern ',\n 'CDLHARAMICROSS':'Harami Cross Pattern ',\n 'CDLHIGHWAVE':'High-Wave Candle ',\n 'CDLHIKKAKE':'Hikkake Pattern ',\n 'CDLHIKKAKEMOD':'Modified Hikkake Pattern ',\n 'CDLHOMINGPIGEON':'Homing Pigeon ',\n 'CDLIDENTICAL3CROWS':'Identical Three Crows ',\n 'CDLINNECK':'In-Neck Pattern ',\n 'CDLINVERTEDHAMMER':'Inverted Hammer ',\n 'CDLKICKING':'Kicking ',\n 'CDLKICKINGBYLENGTH':'Kicking - bull/bear determined by the longer marubozu ',\n 'CDLLADDERBOTTOM':'Ladder Bottom ',\n 'CDLLONGLEGGEDDOJI':'Long Legged Doji ',\n 'CDLLONGLINE':'Long Line Candle ',\n 'CDLMARUBOZU':'Marubozu ',\n 'CDLMATCHINGLOW':'Matching Low ',\n 'CDLMATHOLD':'Mat Hold ',\n 'CDLMORNINGDOJISTAR':'Morning Doji Star ',\n 'CDLMORNINGSTAR':'Morning Star ',\n 'CDLONNECK':'On-Neck Pattern ',\n 'CDLPIERCING':'Piercing Pattern ',\n 'CDLRICKSHAWMAN':'Rickshaw Man ',\n 'CDLRISEFALL3METHODS':'Rising/Falling Three Methods ',\n 'CDLSEPARATINGLINES':'Separating Lines ',\n 'CDLSHOOTINGSTAR':'Shooting Star ',\n 'CDLSHORTLINE':'Short Line Candle ',\n 'CDLSPINNINGTOP':'Spinning Top ',\n 'CDLSTALLEDPATTERN':'Stalled Pattern ',\n 'CDLSTICKSANDWICH':'Stick Sandwich ',\n 'CDLTAKURI':'Takuri (Dragonfly Doji with very long lower shadow) ',\n 'CDLTASUKIGAP':'Tasuki Gap ',\n 'CDLTHRUSTING':'Thrusting Pattern ',\n 'CDLTRISTAR':'Tristar Pattern ',\n 'CDLUNIQUE3RIVER':'Unique 3 River ',\n 'CDLUPSIDEGAP2CROWS':'Upside Gap Two Crows ',\n 'CDLXSIDEGAP3METHODS':'Upside/Downside Gap Three Methods '}\n self.Grp_stat={'BETA' :'Beta ',\n 'CORREL' :'Pearsons Correlation Coefficient (r) ',\n 'LINEARREG' :'Linear Regression ',\n 'LINEARREG_ANGLE' :'Linear Regression Angle ',\n 'LINEARREG_INTERCEPT' :'Linear Regression Intercept ',\n 'LINEARREG_SLOPE' :'Linear Regression Slope ',\n 'STDDEV' :'Standard Deviation ',\n 'TSF' :'Time Series Forecast ',\n 'VAR' :'Variance '}\n self.create_buttons(tab0,list(self.Grp_custom.keys()))\n \n self.create_label1(tab0,'Symbol:',3,2)\n self.entrys.addsymb=self.create_entry1(tab0,3,3)\n \n self.create_label1(tab0,'From File:',4,2)\n self.entrys.loadsymb=self.create_entry1(tab0,4,3)\n \n self.create_label1(tab0,'Period',5,2)\n self.entrys.pd=self.create_entry1(tab0,5,3)\n self.create_label1(tab0,'Ndays',5,4)\n self.entrys.ndays=self.create_entry1(tab0,5,5)\n \n self.create_buttons(tab1,list(self.Grp_overlay.keys()))\n self.create_buttons(tab2,list(self.Grp_momentum.keys())) \n self.create_buttons(tab3,list(self.Grp_volum.keys())) \n self.create_buttons(tab4,list(self.Grp_cycle_ind.keys())) \n self.create_buttons(tab5,list(self.Grp_price_trans.keys())) \n self.create_buttons(tab6,list(self.Grp_volatility.keys())) \n self.create_buttons(tab7,list(self.Grp_pattern.keys())) \n self.create_buttons(tab8,list(self.Grp_stat.keys())) \n \n \n # - - - - - - - - - - - - - - - - - - - - -\n # Quit button in the lower right corner\n quit_button = ttk.Button(self.window, text=\"Quit\", command=self.window.destroy)\n quit_button.grid(row=1, column=3)\n#%% \nclass AddTreeView():\n def __init__(self, parent,g):\n self.tree=ttk.Treeview(parent)\n self.g=g\n for symb in g.d.symb:\n self.tree.insert(\"\" , \"end\", text=symb)\n self.tree.bind(\"\", self.OnDoubleClick)\n def OnDoubleClick(self,event):\n item = self.tree.selection()[0]\n print(\"you clicked on\", self.tree.item(item,\"text\"))\n self.g.plot_tk(self.tree.item(item,\"text\"))\n def update(self,symbs):\n self.tree.delete(*self.tree.get_children())\n for symb in symbs:\n self.tree.insert(\"\" , \"end\", text=symb)\n self.tree.bind(\"\", self.OnDoubleClick)","sub_path":"objs/ControllerObj.py","file_name":"ControllerObj.py","file_ext":"py","file_size_in_byte":25718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"57633022","text":"import os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nsearch_root = sys.argv[1]\nfiles = os.listdir(search_root)\nfiles = [file for file in files if file.endswith('.tfrecord')]\noptions = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)\n\nfor file in tqdm(files):\n input_path = os.path.join(search_root, file)\n output_path = file\n writer = tf.python_io.TFRecordWriter(output_path, options=options)\n for record in tf.python_io.tf_record_iterator(input_path):\n example = tf.train.Example()\n example.ParseFromString(record)\n\n audio = example.features.feature['audio'].float_list.value\n pitch = example.features.feature['pitch'].int64_list.value[0]\n\n freq = 2 ** ((pitch - 69) / 12.0) * 440\n\n for i in range(4):\n start = (i + 1) * 1024\n end = start + 1024\n segment = audio[start:end]\n if np.linalg.norm(segment) <= 1e-6:\n continue\n example = tf.train.Example(features=tf.train.Features(feature={\n \"audio\": tf.train.Feature(float_list=tf.train.FloatList(value=segment)),\n \"pitch\": tf.train.Feature(float_list=tf.train.FloatList(value=[freq]))\n }))\n writer.write(example.SerializeToString())\n writer.close()\n","sub_path":"scripts/convert_nsynth.py","file_name":"convert_nsynth.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"276174225","text":"from rest_framework import generics\nfrom rest_framework.permissions import IsAdminUser\nfrom .serializers import ClassroomSerializer, ClassroomListSerializer\n\nfrom .models import Classroom\n\n# Create your views here.\n\n\nclass ClassroomListAPIView(generics.ListAPIView):\n queryset = Classroom.objects.all()\n serializer_class = ClassroomListSerializer\n\n\nclass ClassroomListCreateAPIView(generics.ListCreateAPIView):\n queryset = Classroom.objects.all()\n serializer_class = ClassroomSerializer\n\n def get_queryset(self):\n owner = self.request.user\n return Classroom.objects.filter(instructor=owner)\n\n\nclass ClassroomEditAPIView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Classroom.objects.all()\n serializer_class = ClassroomSerializer\n\n\nclass ClassroomAdminAPIView(generics.RetrieveDestroyAPIView):\n queryset = Classroom.objects.all()\n serializer_class = ClassroomSerializer\n","sub_path":"classrooms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"203524664","text":"import math\r\n\r\nclass Snippets:\r\n def __init__(self, bm25_ranking, hashmap, queries):\r\n self.ranking = bm25_ranking\r\n self.h = hashmap\r\n self.queries = queries\r\n self.index = self.get_index()\r\n self.idf = self.idfv()\r\n\r\n def get_list_of_senences(self, filepath_with_name):\r\n f=open(filepath_with_name,'r')\r\n k=f.read()\r\n k=k.split()\r\n topk=12\r\n sentences=[]\r\n count=0\r\n while count len(k):\r\n for i in range(count,len(k)):\r\n sentence=sentence+\" \"+k[i]\r\n else:\r\n for i in range(count,count+topk):\r\n sentence=sentence+\" \"+k[i]\r\n sentences.append(sentence)\r\n count=count+topk\r\n return sentences[:]\r\n \r\n def createIndex(self, dic,filename):\r\n path=\"tokenized_corpus/\"\r\n d=dic.copy()\r\n docId = filename\r\n f= open(path+filename,'r')\r\n k = f.read()\r\n k = k.split()\r\n term_count = len(set(k))\r\n for c in range(0, len(k)- 1):\r\n item = k[c]\r\n if item in d:\r\n if docId in d[item]:\r\n d[item][docId] = d[item][docId] + 1\r\n else:\r\n d[item][docId] = 1\r\n else:\r\n d[item] = {docId: 1}\r\n return d.copy()\r\n\r\n def get_index(self):\r\n dic={}\r\n for name in self.h.values():\r\n dic=self.createIndex(dic.copy(),name)\r\n return dic.copy()\r\n\r\n def get_sentences_for_all_topkfiles(self, filenames):\r\n s=[]\r\n for filename in filenames:\r\n filename=\"C:/Users/saura/Downloads/tokenized_corpus/tokenized_corpus/\"+filename\r\n s.extend(self.get_list_of_senences(filename))\r\n return s\r\n\r\n def idfv(self ):\r\n idf_uni={}\r\n N=len(self.index)\r\n for k,v in self.index.items():\r\n idf_uni[k]=math.log10(N*1.0/len(self.index[k]))\r\n return idf_uni.copy()\r\n\r\n def queryVec(self,query):\r\n qv={}\r\n q=query.split()\r\n N=len(q)\r\n for k in q:\r\n if k in self.idf:\r\n qv[k]=(float(1.0*self.occurenceofword(k,query))/N)*self.idf[k]\r\n else:\r\n qv[k]=(1.0*self.occurenceofword(k,query)/N)*1\r\n return qv.copy()\r\n\r\n def occurenceofword(self,word,query):\r\n q=query.split()\r\n count=0\r\n for k in q:\r\n if word==k:\r\n count=count+1\r\n return count*1.0\r\n\r\n def Cosine(self, sentences,uni,query):\r\n cv={}\r\n for sentence in sentences:\r\n dv=self.queryVec(sentence)\r\n qv=self.queryVec(query)\r\n sumdv=self.sqrsum(dv)\r\n sumqv=self.sqrsum(qv)\r\n c=0\r\n for k1, v1 in uni.items():\r\n if k1 in dv and k1 in qv:\r\n c=c+(dv[k1]*qv[k1]*1.0)\r\n c=c/math.sqrt(sumdv*sumqv)\r\n cv[sentence]=c\r\n return cv.copy()\r\n\r\n def sqrsum(self,dv):\r\n c=0\r\n for k,v in dv.items():\r\n c=c+v*v\r\n return c\r\n\r\n def get_snippet(self,cv):\r\n l=sorted(cv, key=cv.get,reverse=True)\r\n return l[:10]\r\n\r\n def get_snippets_all(self):\r\n sn={}\r\n cacmTopk=[]\r\n topk=5\r\n for j in range (0, len(self.ranking)):\r\n snippet={}\r\n for i in range(0,topk):\r\n cacmTopk.append(self.ranking[j+1][i][0]) #top 10 for query 1\r\n filenames=[]\r\n for docid in cacmTopk:\r\n filenames.append(self.h[str(docid)])\r\n for filename in filenames:\r\n filepath_with_name=\"tokenized_corpus/\"+filename\r\n sentences=self.get_list_of_senences(filepath_with_name)\r\n\r\n cv = self.Cosine(sentences,self.index, self.queries[j])\r\n snippet[filename]=cv\r\n new_dic={}\r\n for k,v in snippet.items():\r\n new_dic=self.makeDic(new_dic,v)\r\n\r\n l=sorted(new_dic,key=new_dic.get,reverse=True)\r\n l=l[:5]\r\n sn[j+1]=l\r\n\r\n x = self.highlight2(sn)\r\n return x\r\n\r\n def highlight2(self, sn):\r\n count=0\r\n sn2={}\r\n for k,v in sn.items():\r\n ls=[]\r\n for sentence in v:\r\n s=\"\"\r\n k=sentence.split()\r\n for word in k:\r\n if word in self.queries[count].split():\r\n word=word.upper()\r\n sentence=sentence+\" \"+word\r\n ls.append(sentence)\r\n sn2[count]=ls\r\n count=count+1\r\n return sn2\r\n\r\n def makeDic(self,updateDic,d):\r\n for k, v in d.items():\r\n if k not in updateDic.keys():\r\n updateDic[k]=v\r\n return updateDic\r\n\r\n def get_key(self,ele,snippet):\r\n for k,v in snippet.items():\r\n if ele in v.values():\r\n return k\r\n \r\n \r\n \r\n\r\n\r\n\r\n \r\n ","sub_path":"snippet_genertaion.py","file_name":"snippet_genertaion.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"216971198","text":"# coding=utf-8\n\"\"\"\n文件读写: 一个函数(open),三个方法(read、write、close)\nopen(): 默认r只读,w只写(有内容就覆盖),a追加,r+可读可写;操作图片、视频等二进制文件: rb,wb,ab\nread(size): 不写size就一次读取所有行,返回str,执行完指针会移动到文件末尾\nreadline(): 每次读取一行,返回str,执行完指针会移到下一行,包括 \"\\n\" 字符\nreadlines(): 一次读取所有行,返回list,每行都是一个元素\n f.readlines()[1:]可以通过切片指定读取哪些行\n注意:read()和readlines()会把文件所有内容读取到内存,数据量大的话慎用!\ntell(): 获取当前文件位置\nseek(offset, from): 调整当前文件位置\n offset: 偏移量(注意:utf-8格式中文占3个字节,gbk格式中文占2个字节)\n from: 方向 0表示文件开头 1表示当前位置 2表示文件结尾(python3目前只能写0!)\n\n文件操作: 导入os模块\nos.rename(path1, path2): 重命名\nos.remove(): 刪除文件\nos.mkdir(): 创建文件夹\nos.getcwd(): 获取当前目录\nos.listdir(): 遍历指定目录下所有文件(夹),返回list列表\nos.rmdir(): 删除文件夹\nos.path.isfile(): 判断是否是文件\nos.path.isdir(): 判断是否是文件夹\nos.path.getsize(filename): 获取文件大小,求文件夹大小的话需要递归遍历所有文件\n\"\"\"\n\nimport os\nimport json\n\ndef test01():\n \"\"\"\n 文件读写\n \"\"\"\n\n # 1、打开文件\n file1 = open(\"E://aaa.txt\", encoding='utf-8')\n file2 = open(\"E://bbb.txt\", \"w\")\n # 2、读写文件\n while True:\n text = file1.readlines()\n print(type(text))\n if not text:\n break\n for t in text:\n file2.write(t)\n # 3、关闭文件\n file1.close()\n file2.close()\n\n\ndef digui(path, suffix):\n \"\"\"\n 递归文件夹做相关操作\n \"\"\"\n\n files = os.listdir(path)\n for file in files:\n if os.path.isfile(path + file):\n if file.endswith(suffix):\n file_new = file.replace(\"龙天论坛\", \"\")\n os.rename(path + file, path + file_new)\n else:\n digui(path + file + \"/\", suffix)\n\n\ndef test03():\n \"\"\"\n 由于字符串的replace()方法是生成新的结果,原字符串不变,所以要生成新文件\n \"\"\"\n\n with open(\"C://Users/chenq/Desktop/shop.sql\", \"r\", encoding=\"utf-8\") as f1:\n lines = json.loads(json.dumps(f1.readlines()).replace(\"`\", \"\"))\n\n with open(\"C://Users/chenq/Desktop/shop.sql\", \"w\", encoding=\"utf-8\") as f2:\n f2.write(lines[0])\n for line in lines[1:]:\n if (\"PRIMARY\" and \" KEY \") not in line:\n if \"(\" in line:\n index = line.find(\"(\")\n f2.write(line.replace(line[index:-2], \"\").replace(\"varchar\", \"string\"))\n elif \" date\" in line:\n index = line.find(\" date\")\n f2.write(line.replace(line[index + 5:-2], \"\").replace(\" date\", \" string\"))\n elif \" timestamp \" in line:\n index = line.find(\" timestamp\")\n f2.write(line.replace(line[index + 10:-2], \"\").replace(\" timestamp\", \" string\"))\n elif \"ENGINE\" in line:\n f2.write(line[:1] + \"\\n\")\n else:\n pass\n f2.write(\"comment ''\\nROW FORMAT DELIMITED\\nFIELDS TERMINATED BY '\\\\001'\\nLINES TERMINATED BY '\\\\n'\\nSTORED AS TEXTFILE;\")\n\n\nif __name__ == \"__main__\":\n # test01()\n # digui(\"D://学习资料/python数据分析与机器学习实战/python数据分析与机器学习实战/\", \".flv\")\n test03()\n","sub_path":"object/10_文件.py","file_name":"10_文件.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"537191612","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nimport os\nimport jinja2\nimport lib.models\nimport logging\nfrom google.appengine.api import memcache\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\nimport time\nimport private\nimport lib.markdown2\n\nclass Handler(webapp2.RequestHandler):\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n\nclass BlogFront(Handler):\n\n def allposts(self):\n key = 'posts'\n posts = memcache.get(key)\n logging.error(posts)\n if posts is None:\n posts = db.GqlQuery(\"SELECT * FROM Blogentry WHERE published = 'True' ORDER BY created DESC\")\n posts = list(posts)\n memcache.set(key, posts)\n memcache.set('posts_cachetime', time.time())\n logging.error('CACHED')\n return posts\n\n def get(self):\n blogentries = self.allposts()\n #for blogentry in blogentries:\n # blogentry.text[:400]\n self.render(\"blogfront.html\",blogentries=blogentries)\n\nclass PermalinkPage(Handler):\n\n def get(self, blog_id):\n\n entry = memcache.get(str(blog_id))\n if entry is None:\n entry = lib.models.Blogentry.get_by_id(int(blog_id))\n memcache.set(str(blog_id),entry)\n memcache.set('%s_ct' % blog_id, time.time())\n logging.error(int(blog_id))\n self.render('postpage.html',blogentry=entry, cachetime=(time.time() - memcache.get('%s_ct' % blog_id)))\n\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=True)\njinja_env.filters['markdown'] = lib.markdown2.markdown\napp = webapp2.WSGIApplication([('/', BlogFront),\n ('/(\\d+)', PermalinkPage),\n ('/private/?', private.PrivatePage),\n ('/private/newpost/?', private.NewPostPage),\n ('/private/?', private.PrivatePage),\n ('/private/edit', private.EditPostPage)\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"199629474","text":" #-*- coding:utf-8 -*-\n\nimport sys\n\nfrom django import template\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.templatetags.static import static\n\nfrom classytags.core import Tag, Options\nfrom classytags.helpers import InclusionTag\nfrom importlib import import_module\n\n\ndef get_extension(setting, default, context, *args, **kwargs):\n extension = getattr(settings, setting, default)\n if isinstance(extension, dict):\n curr_url = context.get('request').path\n for key in extension:\n admin_site_mod, admin_site_inst = key.rsplit('.', 1)\n admin_site_mod = import_module(admin_site_mod)\n admin_site = getattr(admin_site_mod, admin_site_inst)\n admin_url = reverse('%s:index' % admin_site.name)\n if curr_url.startswith(admin_url):\n mod, inst = extension[key].rsplit('.', 1)\n mod = import_module(mod)\n return getattr(mod, inst)\n else:\n mod, inst = extension.rsplit('.', 1)\n mod = import_module(mod)\n return getattr(mod, inst)\n raise ValueError('Extension matching \"%s\" not found' % dashboard_cls)\n\n\ndef get_navbar(context):\n return get_extension('GRAPPELLI_EXTENSIONS_NAVBAR',\n 'grappelli_extensions.navbar.Navbar',\n context)\n\n\ndef get_sidebar(context):\n return get_extension('GRAPPELLI_EXTENSIONS_SIDEBAR',\n 'grappelli_extensions.navbar.Navbar',\n context)\n\n\ndef get_theme():\n return getattr(settings, 'GRAPPELLI_THEME', None)\n\n\ndef has_perms(request, params):\n if 'perm' in params:\n perms = [params['perm']]\n else:\n perms = params.get('perms', [])\n if perms:\n perms = [request.user.has_perm(p) for p in perms]\n return any(perms)\n return True\n\n\ndef get_children(Navbar, request):\n children = []\n for node in Navbar.nodes:\n if node.__class__.__name__.endswith(\"Node\"):\n title, params = node.as_tuple()\n else:\n title, params = node\n\n if not has_perms(request, params):\n continue\n\n nodes = params.get(\"nodes\", [])\n url = params.get('url')\n root = {'title': title, 'children': [], 'url': url}\n for node in nodes:\n if node.__class__.__name__.endswith(\"Node\"):\n title, params = node.as_tuple()\n else:\n title, params = node\n\n url = params.get('url')\n node = {'title': title, 'url': url}\n if has_perms(request, params):\n root['children'].append(node)\n\n if root['children'] or root['url']:\n children.append(root)\n return children\n\n\nclass GrappelliNavbar(InclusionTag):\n name = 'grappelli_navbar'\n template = 'grappelli/navbar.html'\n\n def get_context(self, context):\n navbar = get_navbar(context)\n return {'children': get_children(navbar, context['request'])}\n\n\nclass GrappelliSidebar(InclusionTag):\n name = 'grappelli_sidebar'\n template = 'grappelli/sidebar.html'\n\n def get_context(self, context):\n sidebar = get_sidebar(context)\n return {\n 'sidebar_children': get_children(sidebar, context['request']),\n 'request': context['request']\n }\n\n\nclass GrappelliHasSidebar(Tag):\n name = 'grappelli_has_sidebar'\n options = Options(\n blocks=[('endsidebar', 'nodelist')],\n )\n\n def render_tag(self, context, nodelist):\n output = ''\n sidebar = get_sidebar()\n if len(sidebar.nodes):\n output = nodelist.render(context)\n return output\n\n\nclass GrappelliTheme(Tag):\n name = 'grappelli_theme'\n\n def render_tag(self, context):\n theme = get_theme()\n if not theme:\n return ''\n\n theme_static_url = static('css/%s.css' % (theme, ))\n output = ' ' % (theme_static_url, )\n return output\n\n\nregister = template.Library()\nregister.tag(GrappelliNavbar)\nregister.tag(GrappelliSidebar)\nregister.tag(GrappelliHasSidebar)\nregister.tag(GrappelliTheme)\n","sub_path":"grappelli_extensions/templatetags/grappelli_navbar.py","file_name":"grappelli_navbar.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"95933501","text":"from django.contrib import admin\nfrom .models import Order, Sticker\nfrom django.contrib.auth.models import Group\nfrom django.utils.safestring import mark_safe\n# Register your models here.\n\n\nadmin.site.site_header = \"Tawabe3 Admin Dashboard\"\nadmin.site.site_title = \"tawabe3 admin\"\nadmin.site.index_title = \"Welcome to Tawabe3\"\nadmin.site.unregister(Group)\n\n@admin.register(Order)\nclass OrderAdmin(admin.ModelAdmin):\n search_fields = ['full_name', 'mobile_number']\n list_filter = ['ordered_on', 'delivery_on' ,'order_status', 'delivery_status']\n\n fieldsets = [\n ('Contact Details', {'fields': ['full_name', \n 'mobile_number']}),\n\n ('Delivery Details', {'fields': ['building_number', \n 'street_name',\n 'area',\n 'city',\n 'delivery_on',\n 'delivery_status']}),\n\n ('Order Details', {'fields': ['name_field', \n 'class_field',\n 'school_field',\n 'sticker_name',\n 'sticker_id',\n 'stickerImage',\n 'quantity',\n 'order_status',\n 'ordered_on']}),\n\n ('Invoice Details', {'fields': ['order_fees', \n 'delivery_fees',\n 'total_fees']}),\n ]\n list_display = (\n 'pk',\n 'full_name',\n 'ordered_on',\n 'total_fees',\n 'city',\n 'completed',\n 'delivered')\n \n \n\n\n@admin.register(Sticker)\nclass StickerAdmin(admin.ModelAdmin):\n readonly_fields = [\"id\", \"sticker_display\"]\n list_display = (\n \"id\",\n \"name\",\n \"category\"\n )\n def sticker_display(self, obj):\n return mark_safe(' '.format(\n url = obj.sticker_image.url,\n width=obj.sticker_image.width*0.3,\n height=obj.sticker_image.height*0.3))\n","sub_path":"myapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"235119732","text":"#!/usr/bin/python3\n\n'''\nThe listening port must be the value of the environment variable HBNB_API_PORT\nThe IP bound to must be the value of the environment variable HBNB_API_HOST\nGET / must return \"Holberton School\"\nGET /c must return \"C is fun!\"\nYou must use the option strict_slashes=False in your route definition\nYour code should not be executed when imported\n(by using if __name__ == \"__main__\":)\n'''\n\nfrom flask import Flask\nimport os\n\napp = Flask(__name__)\nHBNB_API_PORT = os.environ.get('HBNB_API_PORT')\nHBNB_API_HOST = os.environ.get('HBNB_API_HOST')\n\n\n@app.route('/', methods=['GET'], strict_slashes=False)\ndef run_flask():\n '''\n start flask application\n '''\n return \"Holberton School\"\n\n\n@app.route('/c', methods=['GET'], strict_slashes=False)\ndef retunC():\n '''\n c route\n '''\n return \"C is fun!\"\n\n\nif __name__ == \"__main__\":\n app.run(host=HBNB_API_HOST, port=HBNB_API_PORT)\n","sub_path":"0x02-restful_api_users/2-warmup_flask.py","file_name":"2-warmup_flask.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"20630398","text":"class Library:\n def __init__(self):\n self.bookname=\"\"\n self.author=\"\"\ndef getdata(self):\n self.bookname = input(\"Enter Name of the Book: \")\n self.author = input(\"Enter Author of the Book: \")\ndef display(self):\n print(\"Name of the Book: \",self.bookname)\n print(\"Author of the Book: \",self.author)\n print(\"\\n\")\nbook=[] #empty list\nch = 'y'\nwhile(ch=='y'):\n print(\"1. Add New Book \\n 2.Display Books\")\n resp = int(input(\"Enter your choice : \"))\nif(resp==1):\n L=Library()\n L.getdata()\n book.append(L)\nelif(resp==2):\n for x in book:\n x.display()\nelse:\n print(\"Invalid input....\")\nch = input(\"Do you want continue....\")","sub_path":"page177_2.py","file_name":"page177_2.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"1619493","text":"from src.base import BaseGraph\nfrom src.utils.logger import get_logger\n\nlogger = get_logger(__name__)\n\n__all__ = [\n \"SelectorBase\",\n]\n\n\nclass SelectorBase(BaseGraph):\n def __init__(self, name, parent, *args, **kwargs):\n super(SelectorBase, self).__init__(name=name, parent=parent, meta=kwargs.get(\"meta\", None))\n","sub_path":"src/selectors/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"47047759","text":"import numpy as np\nimport os\nimport random\nimport h5py\nimport cv2\nfrom six.moves import cPickle as pickle\nfrom tensorflow.python.platform import gfile\nimport glob\nimport TensorflowUtils as utils\nimport scipy.io\n\ndef read_dataset(data_dir):\n allsplit_dir = os.path.join(data_dir, 'SUNRGBDtoolbox/traintestSUNRGBD/allsplit.mat')\n images_dir = os.path.join(data_dir, 'SUNRGBD')\n SUNRGBDMeta_dir = os.path.join(data_dir, 'SUNRGBDtoolbox/Metadata/SUNRGBDMeta.mat')\n SUNRGBD2Dseg_dir = os.path.join(data_dir, 'SUNRGBDtoolbox/Metadata/SUNRGBD2Dseg.mat')\n SUNRGBD2Dseg = h5py.File(SUNRGBD2Dseg_dir, mode='r', libver='latest')\n\n SUNRGBDMeta = scipy.io.loadmat(SUNRGBDMeta_dir, squeeze_me=True,\n struct_as_record=False)['SUNRGBDMeta']\n split = scipy.io.loadmat(allsplit_dir, squeeze_me=True, struct_as_record=False)\n trainval = split['alltrain']\n test = split['alltest']\n\n return create_image_lists(data_dir, SUNRGBDMeta, SUNRGBD2Dseg, trainval, test)\n\ndef create_image_lists(data_dir, SUNRGBDMeta, SUNRGBD2Dseg, split_train, split_test):\n image_list_train = []\n image_list_test = []\n\n seglabel = SUNRGBD2Dseg['SUNRGBD2Dseg']['seglabel']\n\n for i, meta in enumerate(SUNRGBDMeta):\n meta_dir = '/'.join(meta.rgbpath.split('/')[:-2])\n real_dir = meta_dir.replace('/n/fs/sun3d/data', data_dir)\n rgb_path = os.path.join(real_dir, 'image/' + meta.rgbname)\n\n label_path = os.path.join(real_dir, 'label/label.npy')\n label_img_path = os.path.join(real_dir, 'label/label.png')\n\n if not os.path.exists(label_img_path):\n os.makedirs(os.path.join(real_dir, 'label'), exist_ok=True)\n label = np.array(SUNRGBD2Dseg[seglabel.value[i][0]].value.transpose(1, 0))\n #np.save(label_path, label)\n cv2.imwrite(label_img_path, label)\n\n record = {'image': rgb_path, 'annotation': label_img_path, 'filename': str(meta.rgbname)}\n\n if meta_dir in split_train:\n image_list_train.append(record)\n else:\n image_list_test.append(record)\n\n random.shuffle(image_list_train)\n random.shuffle(image_list_test)\n no_of_images_train = len(image_list_train)\n no_of_images_test = len(image_list_test)\n print ('No. of %s train files: %d' % (type, no_of_images_train))\n print('No. of %s test files: %d' % (type, no_of_images_test))\n return image_list_train, image_list_test","sub_path":"read_SUNRGBDData.py","file_name":"read_SUNRGBDData.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"230341166","text":"from __future__ import print_function\nimport argparse\n\nimport cv2\nimport torch\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom mcnn.crowd_count import CrowdCounter\nfrom mcnn import network\n\nis_cuda = False\ntorch.backends.cudnn.enabled = is_cuda\ntorch.backends.cudnn.benchmark = False\n\n\ndef image_to_array(img):\n img = img.astype(np.float32, copy=False)\n ht, wd = img.shape[:2]\n img = img.reshape((1, 1, ht, wd))\n return img\n\n\ndef run(net, im_data):\n den = net(im_data)\n den = den.data.cpu().numpy()\n et_count = np.sum(den)\n return den, et_count\n\n\ndef show(frame, den, count):\n den = den[0][0]\n den = 255 * den / np.max(den)\n den = den.astype(np.uint8, copy=False)\n den = cv2.equalizeHist(den)\n den = cv2.applyColorMap(den, cv2.COLORMAP_JET)\n for i, a in enumerate(den):\n for j, b in enumerate(a):\n if np.array_equal(b, [128, 0, 0]):\n den[i, j] = [255, 255, 255]\n\n overlay = cv2.resize(den, frame.shape[:2][::-1])\n alpha = .4\n print(overlay.shape, frame.shape)\n cv2.addWeighted(overlay, alpha, frame, 1-alpha, 0, frame)\n\n txt = 'cnt: %d' % count\n cv2.putText(frame, txt, (50, 50),\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)\n\n cv2.imshow('frame', frame)\n cv2.imshow('density', den)\n return cv2.waitKey(1) in (ord('q'), 27) # q or ESC\n\n\ndef parse_args():\n p = argparse.ArgumentParser()\n p.add_argument('video')\n p.add_argument('--model-path', '-M',\n default='./final_models/mcnn_shtechB_110.h5')\n p.add_argument('--estimate-rate', '-r', default=10)\n p.add_argument('--resize-fx', '-x', default=.5)\n return p.parse_args()\n\n\ndef main():\n args = parse_args()\n\n net = CrowdCounter(is_cuda)\n network.load_net(args.model_path, net)\n if is_cuda:\n net.cuda()\n print('eval:', net.eval())\n\n video = cv2.VideoCapture(args.video)\n nframe = 0\n hist = []\n while 1:\n ok, frame = video.read()\n frame = cv2.resize(frame, (0, 0), fx=args.resize_fx, fy=args.resize_fx)\n nframe += 1\n if nframe % args.estimate_rate != 0:\n continue\n print('nframe', nframe)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n imgarr = image_to_array(gray)\n\n den, cnt = run(net, imgarr)\n \"\"\"\n if len(hist) > 5:\n hist.pop(0)\n hist.append(den)\n histden = sum(hist) / len(hist)\n \"\"\"\n if show(frame, den, cnt):\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"videodemo.py","file_name":"videodemo.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"196230101","text":"\"\"\" Test script for using pygame to read dualshock 4 (ds4) with ds4drv running as \na daemon.\n\nDS4 Controller axis maps:\nAxis0: Left stick l-r (-1 left, 1 right)\nAxis1: Left stick u-d (-1 up, ` 1 down)\nAxis2: Left Trigger (-1 unpressed, 1 completely pressed)\nAxis3: Right stick l-r (-1 left, 1 right)\nAxis4: Right stick u-d (-1 up, 1 down)\nAxis5: Right trigger (-1 unpressed, 1 completely pressed)\n\"\"\"\nimport pygame\nimport wiringpi\nfrom time import sleep\n\n#Defines for button numbers\nBUTTON_SQUARE = 3\nBUTTON_CROSS = 0\nBUTTON_CIRCLE = 1\nBUTTON_TRIANGLE = 2\nBUTTON_L1 = 4\nBUTTON_R1 = 5\nBUTTON_L2 = 6\nBUTTON_R2 = 7\nBUTTON_SHARE = 8\nBUTTON_OPTIONS = 9\nBUTTON_LEFT_STICK = 11\nBUTTON_RIGHT_STICK = 12\nBUTTON_PS = 10\nBUTTON_BUTTON_PAD = 13\n\n#create array to track button presses\nbutton = {}\n\n#initialize DS4 controller\nscreen = pygame.display.set_mode([100,100]) # make a 10x10 window\npygame.joystick.init() #find the joysticks\ncontroller = pygame.joystick.Joystick(0)\ncontroller.init()\n\nfor i in range(controller.get_numbuttons()):\n button[i] = False\n\nif(controller.get_name()=='Wireless Controller'):\n print(\"DS4 connected\")\nelse:\n print(\"Not a DS4\")\n\nMotor1PWM \t= 1 \t\t\t# gpio pin 12 = wiringpi no 1 (BCM 18)\nMotor1AIN1 \t= 4 \t\t\t# gpio pin 16 = wiringpi no. 4 (BCM 23)\nMotor1AIN2 \t= 5 \t\t\t# gpio pin 18 = wiringpi no. 5 (BCM 24)\nMotorStandby \t= 6 \t\t\t# gpio pin 22 = wiringpi no. 6 (BCM 25)\nMotor2PWM \t= 23 \t\t\t# gpio pin 33 = wiringpi no. 23 (BCM 13)\nMotor2BIN1 \t= 21 \t\t\t# gpio pin 29 = wiringpi no. 21 (BCM 5)\nMotor2BIN2 \t= 22 \t\t\t# gpio pin 31 = wiringpi no. 22 (BCM 6)\n \n# Initialize PWM output\nwiringpi.wiringPiSetup()\nwiringpi.pinMode(Motor1PWM, 2) \t# PWM mode\nwiringpi.pinMode(Motor1AIN1, \t 1) \t# Digital out mode\nwiringpi.pinMode(Motor1AIN2, \t 1) \t# Digital out mode\nwiringpi.pinMode(MotorStandby, \t 1) \t# Ditial out mode\n \nwiringpi.pinMode(Motor2PWM, \t 2)\t# PWM mode\nwiringpi.pinMode(Motor2BIN1, 1) \t# Digital out mode\nwiringpi.pinMode(Motor2BIN2, 1) \t# Digital out mode\n \nwiringpi.pwmWrite(Motor1PWM, 0)\t# OFF\nwiringpi.pwmWrite(Motor2PWM, 0) # OFF\nwiringpi.digitalWrite(Motor1AIN1, 1) \t#forward mode\nwiringpi.digitalWrite(Motor1AIN2, 0) \t#forward mode\nwiringpi.digitalWrite(Motor2BIN1, 1)\nwiringpi.digitalWrite(Motor2BIN2, 0)\nwiringpi.digitalWrite(MotorStandby, 1) \t#enabled\n \n# Set Motor Speed\ndef motorspeed(speed1, speed2):\n wiringpi.pwmWrite(Motor1PWM, speed1) #motorspeed from -480 to 480\n wiringpi.pwmWrite(Motor2PWM, speed2) \n\ndef Scale_Speed(speed):\n speed + 1 # speed -(-1) lowest range of measurement\n range = 1 + 1 # 1-(-1) range of measurement\n speed = speed / range\n d_range = 480 + 480 # 480 -(-480) range of target\n speed = speed * d_range\n speed = speed - 480 # initialize to lowest point\n return speed\n\n#print(\"Analog Inputs:\\n\")\nprint(\"Button Inputs:\\n\")\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.JOYBUTTONDOWN:\n button[event.button] = True\n if event.type == pygame.JOYBUTTONUP:\n button[event.button] = False\n\n l_horz = controller.get_axis(0)\n l_vert = controller.get_axis(1)\n l_trig = controller.get_axis(2)\n r_horz = controller.get_axis(3)\n r_vert = controller.get_axis(4)\n r_trig = controller.get_axis(5)\n\n print(\" \", end='\\r')\n# print(\"LV: \", round(l_vert,3), \"LH: \", round(l_horz,3), \"LT: \", round(l_trig,3), \"RV: \", round(r_vert,3), \"RH: \", round(r_horz,3), \"RT: \", round(r_trig,3), end='\\r')\n\n print(\"A: \", button[BUTTON_CROSS], \"B: \", button[BUTTON_CIRCLE], \"X: \", button[BUTTON_SQUARE], \"Y: \", button[BUTTON_TRIANGLE], \"L1: \", button[BUTTON_L1], \"L2: \", button[BUTTON_L2], \"R1: \", button[BUTTON_R1], \"R2: \", button[BUTTON_R2], \"LS: \", button[BUTTON_LEFT_STICK], \"RS: \", button[BUTTON_RIGHT_STICK], \"Shr: \", button[BUTTON_SHARE], \"Opt: \", button[BUTTON_OPTIONS], \"PS: \", button[BUTTON_PS], \" \", end='\\r')\n\n speed = Scale_Speed(l_vert)\n\n speed1 = (speed)\n speed2 = speed1\n motorspeed(int(speed1),int(speed2))\n sleep(0.07) #limit the frequency to 50Hz\n\n\n\"\"\"\nDS4 Controller axis maps:\nAxis0: Left stick l-r (-1 left, 1 right)\nAxis1: Left stick u-d (-1 up, ` 1 down)\nAxis2: Left Trigger (-1 unpressed, 1 completely pressed)\nAxis3: Right stick l-r (-1 left, 1 right)\nAxis4: Right stick u-d (-1 up, 1 down)\nAxis5: Right trigger (-1 unpressed, 1 completely pressed)\n\"\"\"\n","sub_path":"bluetooth_control.py","file_name":"bluetooth_control.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"100102534","text":"import numpy as np\nimport pandas as pd\nimport time\n\nimport local_environment as local\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\ndf = pd.read_csv('../data/clean/train_clean.csv')\ndf_targets = pd.read_csv('../data/clean/train_labels.csv')\ndf_purchasers = pd.read_csv('../data/clean/train_purchasers.csv', index_col='index')\n\nkey = 'PREV_PRODS_RF_ORIGINAL'\nprint(key)\n\ns = time.time()\n\nf = open('final.txt', 'a')\nf.write('\\n' + key)\nf.write('\\nInicio ' + time.strftime(\"%Y/%m/%d %H:%M\") + '\\n')\nf.close()\n\ndates = df['fecha_dato'].unique()\ndate_test = dates[-6:]\n\nresults = pd.DataFrame(columns=['date_test', 'score', 'amount_data', 'time'])\n\nfor i in range(1, len(date_test)):\n \n start = time.time()\n \n f = open('final.txt', 'a')\n f.write(str(date_test[i-1]) + ' - ' + str(date_test[i]))\n f.write('\\n') \n f.close()\n \n \n date_range = dates[:-(len(date_test)-i)]\n \n #Procesamiento de datos de entrenamiento\n \n df_aux = df.loc[df['fecha_dato'].isin(date_range[:-1]), ['ncodpers']].join(df_targets.loc[df.index]) \n df_aux = df_aux.groupby(['ncodpers']).sum()\n df_aux.reset_index(inplace=True)\n \n x_train = df.loc[df['fecha_dato'] == date_test[i-1]]\n y_train = df_targets.loc[x_train.index].reset_index(drop=True)\n x_train = x_train.merge(df_aux, on='ncodpers', how='left')\n x_train.replace(np.nan, 0, inplace=True)\n \n x = x_train.drop(['fecha_dato', 'fecha_alta'], axis=1).as_matrix()\n y = y_train.as_matrix()\n \n model = local.model(x, y, RandomForestClassifier(n_jobs=4))\n \n #Procesamiento de datos de test\n \n df_aux = df.loc[df['fecha_dato'].isin(date_range), ['ncodpers']].join(df_targets)\n df_aux = df_aux.groupby(['ncodpers']).sum()\n df_aux.reset_index(inplace=True)\n \n df_test = df.loc[df['fecha_dato'] == date_test[i]]\n y_test = df_targets.loc[df_test.index]\n \n x_test = df_test.merge(df_aux, on='ncodpers', how='left')\n x_test.replace(np.nan, 0, inplace=True)\n x_test = x_test.drop(['fecha_dato', 'fecha_alta'], axis=1).as_matrix()\n \n probs, preds = local.calculatePredsProbs(x_test, model)\n \n #Validación del modelo\n \n x_prev = df.loc[df['fecha_dato'] == date_test[i-1]]\n y_prev = df_targets.loc[x_prev.index]\n \n predicted, actual = local.processPredictions(probs, preds, x_prev, df_test, y_prev, y_test)\n \n score = local.mapk(actual, predicted, 7)\n \n end = time.time()\n \n results.loc[i] = [date_test[i], score, x_train.shape[0], end-start]\n\nresults.to_csv('results/'+key+'.csv', index=False)\ne = time.time()\nf = open('final.txt', 'a')\nf.write('\\nFinal ' + time.strftime(\"%Y/%m/%d %H:%M\") + '\\n')\nfinal_time = (e - s)/60\nf.write(str(final_time)+'\\n')\nf.close()\n \n\n","sub_path":"scripts/PREV_PRODS_PURCHASERS.py","file_name":"PREV_PRODS_PURCHASERS.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"497563195","text":"\"\"\" Defines a set of helper functions that make constructing hardware easier.\n\nThe set of functions includes\nas_wires: converts consts to wires if needed (and does nothing to wires)\nand_all_bits, or_all_bits, xor_all_bits: apply function across all bits\nparity: same as xor_all_bits\nmux: generate a multiplexer\nconcat: concatenate multiple wirevectors into one long vector\nget_block: get the block of the arguments, throw error if they are different\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\nimport keyword\nimport re\nimport six\n\nfrom .core import working_block\nfrom .pyrtlexceptions import PyrtlError, PyrtlInternalError\nfrom .wire import WireVector, Input, Output, Const, Register\n\n_rtl_assert_number = 1\n_probe_number = 1\n\n# -----------------------------------------------------------------\n# ___ __ ___ __ __\n# |__| |__ | |__) |__ |__) /__`\n# | | |___ |___ | |___ | \\ .__/\n#\n\n\ndef input_list(names, bitwidth=1):\n \"\"\" Allocate and return a list of Inputs. \"\"\"\n return wirevector_list(names, bitwidth, wvtype=Input)\n\n\ndef output_list(names, bitwidth=1):\n \"\"\" Allocate and return a list of Outputs. \"\"\"\n return wirevector_list(names, bitwidth, wvtype=Output)\n\n\ndef register_list(names, bitwidth=1):\n \"\"\" Allocate and return a list of Registers. \"\"\"\n return wirevector_list(names, bitwidth, wvtype=Register)\n\n\ndef wirevector_list(names, bitwidth=1, wvtype=WireVector):\n \"\"\" Allocate and return a list of WireVectors. \"\"\"\n if '/' in names and bitwidth != 1:\n raise PyrtlError('only one of optional \"/\" or bitwidth parameter allowed')\n names = names.replace(',', ' ')\n\n wirelist = []\n for fullname in names.split():\n try:\n name, bw = fullname.split('/')\n except:\n name, bw = fullname, bitwidth\n wirelist.append(wvtype(bitwidth=bw, name=name))\n return wirelist\n\n\ndef as_wires(val, bitwidth=None, truncating=True, block=None):\n \"\"\" Return wires from val which may be wires, integers, strings, or bools.\n\n :param val: a wirevector-like object or something that can be converted into\n a Const\n :param bitwidth: The bitwidth the resulting wire should be\n :param bool truncating: determines whether bits will be dropped to acheive\n the desired bitwidth if it is too long (if true, the most-significant-bits\n will be dropped)\n :param Block block: block to use for wire\n\n This function is mainly used to coerce values into WireVectors (for\n example, operations such as \"x+1\" where \"1\" needs to be converted to\n a Const WireVector.)\n \"\"\"\n from .memory import _MemIndexed\n block = working_block(block)\n\n if isinstance(val, (int, six.string_types)):\n # note that this case captures bool as well (as bools are instances of ints)\n return Const(val, bitwidth=bitwidth, block=block)\n elif isinstance(val, _MemIndexed):\n # covert to a memory read when the value is actually used\n return as_wires(val.mem._readaccess(val.index), bitwidth, truncating, block)\n elif not isinstance(val, WireVector):\n raise PyrtlError('error, expecting a wirevector, int, or verilog-style '\n 'const string got %s instead' % repr(val))\n elif bitwidth == '0':\n raise PyrtlError('error, bitwidth must be >= 1')\n elif val.bitwidth is None:\n raise PyrtlError('error, attempting to use wirevector with no defined bitwidth')\n elif bitwidth and bitwidth > val.bitwidth:\n return val.zero_extended(bitwidth)\n elif bitwidth and truncating and bitwidth < val.bitwidth:\n return val[:bitwidth] # truncate the upper bits\n else:\n return val\n\n\ndef match_bitwidth(*args):\n # TODO: allow for custom bit extension functions\n \"\"\" Matches the bitwidth of all of the input arguments\n\n :type args: WireVector\n :return tuple of args in order with extended bits\n \"\"\"\n max_len = max(len(wv) for wv in args)\n return (wv.zero_extended(max_len) for wv in args)\n\n\ndef probe(w, name=None):\n \"\"\" Print useful information about a WireVector when in debug mode.\n\n :type w: WireVector\n :type name: None or string\n :return: original WireVector w\n\n Probe can be inserted into a existing design easily as it returns the original wire unmodified.\n For example \"y <<= x[0:3] + 4\" could be turned into \"y <<= probe(x)[0:3] + 4\" to give visibility\n into both the origin of x (including the line that WireVector was originally created) and the\n run-time values of x (which will be named and thus show up by default in a trace. Likewise\n \"y <<= probe(x[0:3]) + 4\", \"y <<= probe(x[0:3] + 4)\", and \"probe(y) <<= x[0:3] + 4\" are all\n valid uses of probe. Note: probe does actually add wire to the working block of w (which can\n confuse various post-processing transforms such as output to verilog)\n \"\"\"\n global _probe_number\n if not isinstance(w, WireVector):\n raise PyrtlError('Only WireVectors can be probed')\n\n print('(Probe-%d)' % _probe_number, end=' ')\n print(get_stack(w))\n\n if name:\n pname = 'Probe%d_%s__%s)' % (_probe_number, name, w.name)\n else:\n pname = '(Probe%d__%s)' % (_probe_number, w.name)\n\n p = Output(name=pname)\n p <<= w # late assigns len from w automatically\n _probe_number += 1\n return w\n\n\ndef get_stacks(*wires):\n call_stack = getattr(wires[0], 'init_call_stack', None)\n if not call_stack:\n return ' No call info found for wires: use set_debug_mode() ' \\\n 'to provide more information\\n'\n else:\n return '\\n'.join(str(wire) + \":\\n\" + get_stack(wire) for wire in wires)\n\n\ndef get_stack(wire):\n if not isinstance(wire, WireVector):\n raise PyrtlError('Only WireVectors can be traced')\n\n call_stack = getattr(wire, 'init_call_stack', None)\n if call_stack:\n frames = ' '.join(frame for frame in call_stack[:-1])\n return \"Wire Traceback, most recent call last \\n\" + frames + \"\\n\"\n else:\n return ' No call info found for wire: use set_debug_mode()'\\\n ' to provide more information'\n\n\ndef rtl_assert(w, exp, block=None):\n \"\"\" Add hardware assertions to be checked on the RTL design.\n\n :param w: should be a WireVector\n :param Exception exp: Exception to throw when assertion fails\n :param Block block: block to which the assertion should be added (default to working block)\n :return: the Output wire for the assertion (can be ignored in most cases)\n\n If at any time during execution the wire w is not `true` (i.e. asserted low)\n then simulation will raise exp.\n \"\"\"\n\n global _rtl_assert_number\n\n block = working_block(block)\n\n if not isinstance(w, WireVector):\n raise PyrtlError('Only WireVectors can be asserted with rtl_assert')\n if len(w) != 1:\n raise PyrtlError('rtl_assert checks only a WireVector of bitwidth 1')\n if not isinstance(exp, Exception):\n raise PyrtlError('the second argument to rtl_assert must be an instance of Exception')\n if isinstance(exp, KeyError):\n raise PyrtlError('the second argument to rtl_assert cannot be a KeyError')\n if w not in block.wirevector_set:\n raise PyrtlError('assertion wire not part of the block to which it is being added')\n if w not in block.wirevector_set:\n raise PyrtlError('assertion not a known wirevector in the target block')\n\n if w in block.rtl_assert_dict:\n raise PyrtlInternalError('assertion conflicts with existing registered assertion')\n\n assertion_name = 'assertion%d' % _rtl_assert_number\n assert_wire = Output(bitwidth=1, name=assertion_name, block=block)\n assert_wire <<= w\n _rtl_assert_number += 1\n block.rtl_assert_dict[assert_wire] = exp\n return assert_wire\n\n\ndef check_rtl_assertions(sim):\n \"\"\" Checks the values in sim to see if any registers assertions fail.\n\n :param sim: Simulation in which to check the assertions\n :return: None\n \"\"\"\n\n for (w, exp) in sim.block.rtl_assert_dict.items():\n try:\n value = sim.inspect(w)\n if not value:\n raise exp\n except KeyError:\n pass\n\n\ndef _check_for_loop(block=None):\n block = working_block(block)\n logic_left = block.logic.copy()\n wires_left = block.wirevector_subset(exclude=(Input, Const, Output, Register))\n prev_logic_left = len(logic_left) + 1\n while prev_logic_left > len(logic_left):\n prev_logic_left = len(logic_left)\n nets_to_remove = set() # bc it's not safe to mutate a set inside its own iterator\n for net in logic_left:\n if not any(n_wire in wires_left for n_wire in net.args):\n nets_to_remove.add(net)\n wires_left.difference_update(net.dests)\n logic_left -= nets_to_remove\n\n if 0 == len(logic_left):\n return None\n return wires_left, logic_left\n\n\ndef find_loop(block=None):\n block = working_block(block)\n block.sanity_check() # make sure that the block is sane first\n\n result = _check_for_loop(block)\n if not result:\n return\n wires_left, logic_left = result\n import random\n\n class _FilteringState(object):\n def __init__(self, dst_w):\n self.dst_w = dst_w\n self.arg_num = -1\n\n def dead_end():\n # clean up after a wire is found to not be part of the loop\n wires_left.discard(cur_item.dst_w)\n current_wires.discard(cur_item.dst_w)\n del checking_stack[-1]\n\n # now making a map to quickly look up nets\n dest_nets = {dest_w: net_ for net_ in logic_left for dest_w in net_.dests}\n initial_w = random.sample(wires_left, 1)[0]\n\n current_wires = set()\n checking_stack = [_FilteringState(initial_w)]\n\n # we don't use a recursive method as Python has a limited stack (default: 999 frames)\n while len(checking_stack):\n cur_item = checking_stack[-1]\n if cur_item.arg_num == -1:\n # first time testing this item\n if cur_item.dst_w not in wires_left:\n dead_end()\n continue\n current_wires.add(cur_item.dst_w)\n cur_item.net = dest_nets[cur_item.dst_w]\n if cur_item.net.op == 'r':\n dead_end()\n continue\n cur_item.arg_num += 1 # go to the next item\n if cur_item.arg_num == len(cur_item.net.args):\n dead_end()\n continue\n next_wire = cur_item.net.args[cur_item.arg_num]\n if next_wire not in current_wires:\n current_wires.add(next_wire)\n checking_stack.append(_FilteringState(next_wire))\n else: # We have found the loop!!!!!\n loop_info = []\n for f_state in reversed(checking_stack):\n loop_info.append(f_state)\n if f_state.dst_w is next_wire:\n break\n else:\n raise PyrtlError(\"Shouldn't get here! Couldn't figure out the loop\")\n return loop_info\n raise PyrtlError(\"Error in detecting loop\")\n\n\ndef find_and_print_loop(block=None):\n loop_data = find_loop(block)\n print_loop(loop_data)\n return loop_data\n\n\ndef print_loop(loop_data):\n if not loop_data:\n print(\"No Loop Found\")\n else:\n print(\"Loop found:\")\n print('\\n'.join(\"{}\".format(fs.net) for fs in loop_data))\n # print '\\n'.join(\"{} (dest wire: {})\".format(fs.net, fs.dst_w) for fs in loop_info)\n print(\"\")\n\n\ndef _currently_in_ipython():\n \"\"\" Return true if running under ipython, otherwise return False. \"\"\"\n try:\n __IPYTHON__ # pylint: disable=undefined-variable\n return True\n except NameError:\n return False\n\npy_regex = '^[^\\d\\W]\\w*\\Z'\n\n\nclass NameIndexer(object):\n \"\"\" Provides internal names that are based on a prefix and an index\"\"\"\n def __init__(self, internal_prefix='_sani_temp'):\n self.internal_prefix = internal_prefix\n self.internal_index = 0\n\n def make_valid_string(self):\n \"\"\" Inputting a value for the first time \"\"\"\n internal_name = self.internal_prefix + str(self.internal_index)\n self.internal_index += 1\n return internal_name\n\n\nclass NameSanitizer(NameIndexer):\n \"\"\"\n Sanitizes the names so that names can be used in places that don't allow\n for arbitrary names while not mangling valid names\n\n Put the values you want to validate into make_valid_string the first time\n you want to sanitize a particular string (or before the first time), and\n retrieve from the NameSanitizer through indexing directly thereafter\n eg: sani[\"__&sfhs\"] for retrieval after the first time\n\n \"\"\"\n def __init__(self, identifier_regex_str, internal_prefix='_sani_temp', map_valid_vals=True):\n self.identifier = re.compile(identifier_regex_str)\n self.val_map = {}\n self.map_valid = map_valid_vals\n super(NameSanitizer, self).__init__(internal_prefix)\n\n def __getitem__(self, item):\n \"\"\" Get a value from the sanitizer\"\"\"\n if not self.map_valid and self.is_valid_str(item):\n return item\n return self.val_map[item]\n\n def is_valid_str(self, string):\n return re.match(self.identifier, string) and self._extra_checks(string)\n\n def _extra_checks(self, string):\n return True\n\n def make_valid_string(self, string=''):\n \"\"\" Inputting a value for the first time \"\"\"\n if not self.is_valid_str(string):\n if string in self.val_map:\n raise IndexError(\"Value {} has already been given to the sanitizer\".format(string))\n internal_name = super(NameSanitizer, self).make_valid_string()\n self.val_map[string] = internal_name\n return internal_name\n else:\n if self.map_valid:\n self.val_map[string] = string\n return string\n\n\nclass PythonSanitizer(NameSanitizer):\n def __init__(self, internal_prefix='_sani_temp', map_valid_vals=True):\n super(PythonSanitizer, self).__init__(py_regex, internal_prefix, map_valid_vals)\n\n def _extra_checks(self, str):\n return not keyword.iskeyword(str)\n","sub_path":"pyrtl/helperfuncs.py","file_name":"helperfuncs.py","file_ext":"py","file_size_in_byte":14146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"492263792","text":"def solution(nums):\n max_list = [0] * len(nums)\n min_list = [0] * len(nums)\n max_list[0] = nums[0]\n min_list[0] = nums[0]\n for i in range(1,len(nums)):\n max_list[i] = max(max_list[i-1]*nums[i],min_list[i-1]*nums[i],nums[i])\n min_list[i] = min(min_list[i-1]*nums[i],nums[i],max_list[i-1]*nums[i])\n return max(max_list)\n\nif __name__ == \"__main__\":\n print(solution([2,3,-2,4])) # 6\n print(solution([-2,0,-1])) # 0 \n print(solution([-7,-8,-1,-2,-9,-6])) # 108\n print(solution([2,-5,-2,-4,3])) # 24","sub_path":"maxiumuProduct.py","file_name":"maxiumuProduct.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"232865917","text":"\"\"\"\nudp.py\nCopyright (C) 2018 by Valentin Gutierrez \n\nUDP Monitor class implementation for PyBal\n\"\"\"\n\n# Python imports\nimport logging\n\n# Twisted imports\nfrom twisted.internet import protocol\nfrom twisted.python import runtime\n\n# Pybal imports\nfrom pybal import monitor\n\n\nclass UDPMonitoringProtocol(monitor.LoopingCheckMonitoringProtocol, protocol.DatagramProtocol):\n \"\"\"\n Monitor that sends a Len=0 UDP packet to the server.\n As long as it doesn't get an ICMP destination unreachable it will\n keep the state set to up.\n \"\"\"\n\n __name__ = 'UDP'\n\n # After ICMP_TIMEOUT seconds it will consider the monitor up again\n ICMP_TIMEOUT = 20\n\n def __init__(self, coordinator, server, configuration):\n \"\"\"Constructor\"\"\"\n\n super(UDPMonitoringProtocol, self).__init__(coordinator, server, configuration)\n\n self.port = None\n self.last_down_timestamp = 0\n self.icmp_timeout = self._getConfigInt('icmp-timeout', self.ICMP_TIMEOUT)\n\n def __report_prefix(self):\n return '{}:{}:'.format(self.server.ip, self.server.port)\n\n def startProtocol(self):\n self.transport.connect(self.server.ip, self.server.port)\n\n def run(self):\n \"\"\"Start the monitoring\"\"\"\n\n super(UDPMonitoringProtocol, self).run()\n\n self.port = self.reactor.listenUDP(0, self)\n\n def stop(self):\n \"\"\"Stop the monitoring\"\"\"\n\n super(UDPMonitoringProtocol, self).stop()\n\n if self.port:\n self.port.loseConnection()\n\n def check(self):\n \"Periodically called method that does a single check\"\n\n if not self.active:\n return\n\n self.transport.write(\"\")\n self.is_up()\n\n def is_up(self):\n \"\"\"\n Mark the monitor as up iff no ICMP errors were received\n self.icmp_timeout seconds after the last error\n \"\"\"\n if not self.active:\n return\n\n if (runtime.seconds() - self.last_down_timestamp) > self.icmp_timeout:\n self._resultUp()\n self.report(\"{} marked as UP\".format(self.__report_prefix()))\n\n def connectionRefused(self):\n \"\"\"Called if an ICMP destination unreachable is received\"\"\"\n\n self.last_down_timestamp = runtime.seconds()\n self._resultDown()\n self.report(\"{} ICMP destination unreachable received\".format(self.__report_prefix()),\n level=logging.WARN)\n","sub_path":"pybal/monitors/udp.py","file_name":"udp.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"28514952","text":"#!/usr/bin/env python3\n# vim:sw=4:ts=4:et:\n\"\"\"Branches and prepares a MediaWiki tarball release.\"\"\"\n\nimport argparse\nimport sys\n\nfrom mwrelease.branch import branch, gerrit_client\n\n\ndef get_wmf_branch(version):\n \"\"\"Returns the latest mediawiki/core wmf/ branch for a given version.\"\"\"\n\n response = gerrit_client().get('/projects/mediawiki%2Fcore/branches/')\n branches = [r['ref'][len('refs/heads/'):] for r in response\n if r['ref'].startswith('refs/heads/wmf/%s-wmf.' % version)]\n branches = sorted(branches, key=lambda b: b.split('.').pop())\n\n if len(branches) > 0:\n return branches.pop()\n else:\n return None\n\n\ndef get_rel_branch(version):\n (maj, min, _) = version.split('.')\n return 'REL%s_%s' % (maj, min)\n\n\ndef parse_args():\n \"\"\"Parse command line arguments and return options.\"\"\"\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument('version',\n help='MediaWiki major/minor/patch version (e.g. 1.34.0)')\n\n args = parser.parse_args()\n\n if len(args.version.split('.')) != 3:\n print('version must include all major.minor.patch numbers')\n sys.exit(1)\n\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n rel_branch = get_rel_branch(args.version)\n wmf_branch = get_wmf_branch(args.version)\n\n if wmf_branch is None:\n print('failed to find a wmf/ branch for version %s' % args.version)\n sys.exit(2)\n\n # Branch WMF-deployed extensions/skins and vendor from latest wmf/* branch\n branch(branch=rel_branch,\n bundle='wmf',\n branch_point=wmf_branch)\n\n # Branch remaining extensions/skins from master\n branch(branch=rel_branch,\n bundle='*',\n branch_point='master')\n\n # Branch core from latest wmf/* branch and prepare core using the given\n # version and 'base' bundle.\n branch(branch=rel_branch,\n branch_point=wmf_branch,\n core=True,\n core_bundle='base',\n core_version=args.version)\n","sub_path":"make-release/branch-version.py","file_name":"branch-version.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"559183262","text":"from numpy import delete\n\n\nclass Solution(object):\n def findMinStep(self, board, hand):\n \"\"\"\n :type board: str\n :type hand: str\n :rtype: int\n \"\"\"\n have = []\n for i in range(0,26):\n have.append(0)\n for b in hand:\n have[ord(b) - ord('A')] += 1\n res = self.play(board, have)\n return res if res <= 5 else -1\n\n def play(self, board, have):\n if len(board) == 0:\n return 0\n i = 0\n j = 0\n res = 6\n while j < len(board):\n j += 1\n if j < len(board) and board[i] == board[j]:\n continue\n need = 3 - j + i\n index = ord(board[i]) - ord('A')\n if need <= have[index]:\n have[index] -= need\n res = min(need + self.play(self.clean(board[:i] + board[j:]), have), res)\n have[index] += need\n i = j\n return res\n\n def clean(self, board):\n i = 0\n j = 0\n while j < len(board):\n j += 1\n if j - i >= 3 and (j >= len(board) or board[i] != board[j]):\n return self.clean(board[:i] + board[j:])\n if j < len(board) and board[i] == board[j]:\n continue\n i = j\n return board\n\n#n = 100\nimport random\n\ns = Solution()\nboard = 'RBYYBBRRB'\nhand = 'YRBGB'\nprint(s.findMinStep(board, hand))\n#for i in range(0, n):\n# lb = random.randint(1, 20)\n# lh = random.randint(0, 5)\n# board = ''.join(random.choice(['R', 'Y', 'B', 'G', 'W', 'RR', 'YY', 'BB', 'GG', 'WW', 'RR', 'YY', 'BB', 'GG', 'WW', 'RR', 'YY', 'BB', 'GG', 'WW']) for _ in range(lb))\n# hand = ''.join(random.choice(['R', 'Y', 'B', 'G', 'W']) for _ in range(lh))\n# print(board)\n# print(hand)\n# print(s.findMinStep(board, hand))","sub_path":"488_zumaGame.py","file_name":"488_zumaGame.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"289726796","text":"import pandas as pd\nfrom collections import Counter\nimport numpy as np\nimport random\n\n############################# Breast Cancer data scratch KNN ############################################\n\n# definig the classifier for K nearest neighbor\ndef K_nearest(data ,predict ,k):\n if len(data) >= 3:\n print('Data is less than required to compare ! Idiot !')\n dist = []\n # eculidean distance\n for groups in data:\n for fet in data[groups]:\n euc_dist = np.sqrt(np.sum(np.array(fet)-np.array(predict))**2)\n # appending euc_distance into dist variable\n dist.append([euc_dist,groups])\n # sorted will sort the data in dist in ascending order based on the features euc distance and take the class alone [the crucial part in the conclusion]\n votes = [i[1] for i in sorted(dist)[:k]]\n\n #print(votes)\n # Counter will select the top most answer in votes\n vote_res = Counter(votes).most_common(1)[0][0]\n\n #print(vote_res)\n return vote_res\n\n\n\n# a = [3,3,3,3,4,1,7,6,2,9]\n#\n# print(Counter(a).most_common(1)[0][0])\n\n\n\n\n# read the data\ndata = pd.read_csv('breast-cancer-wisconsin.data.txt')\n# removing id from the data\ndata = data.drop(['id'],1)\ndata.replace('?',-9999,inplace=True)\n# passing data as float and changing them to list\ndf = data.astype(float).values.tolist()\n# shuffling the data\nrandom.shuffle(df)\n# amount of data for test and training\ntest_size = 0.2\n# empty dictonary and populate it later\ntrain_set = {2:[], 4:[]}\ntest_set = {2:[], 4:[]}\n# slicing the data\n # the data except the last 20 % of the data\ntrain_data = df[:-int(test_size*len(df))]\n # the last 20 % of the data as test data\ntest_data = df[-int(test_size*len(df)):]\n\n\n\n# populating the empty dictonary\nfor i in train_data:\n train_set[i[-1]].append(i[:-1])\n# appending all the data except the last element in the data\n\n\nfor i in test_data:\n test_set[i[-1]].append(i[:-1])\n # appending the last data to test data ...because we dont want to test the same data that we train .\n\n\ncorrect = 0\nwrong = 0\ntotal = 0\n\n\n# passing the train data into the classifier\nfor group in test_set:\n #print(group)\n # passuing data in test_data one by one as training data to K_nearest neighbors\n for features_2 in test_set[group]:\n #print(features_2)\n #print(train_set)\n # prints out the nearest data point\n vote = K_nearest(train_set,features_2,5)\n\n if group == vote:\n correct += 1\n else:\n wrong += 1\n total += 1\nprint('Acc : ', correct/total)\nprint('err :',wrong/total)\n\n\n'''\n# what do we do here ?\nGenerally, we find the K number of nearby points to the data we want to predict and take the majority of the data and consider that our data belongs to the majority of the data.\n# How do we do ?\nEuclidean Distance - Its the sum of the square root of the difference between the features of data with classes squared and the data which has only the features for which we want to predict the class(2 or 4) squared and \n Now we list this sum with its class and sort it in ascending order so that we can take the top K \n\nNOw is the crucial part ,WE don't want those features euclidean Distance after sorting them and taking the top K values ,Now all we want is the class of \nthose top euclidean Distance ,We take those features by using i[1] for i in dist[:K]-this is taking the top K number of values in dist- which has the euclidean distance and its corresponding class ,\nThus i[1] will take the second part of the array that is for [25,2] it will take 2,which is the class and thats what we need \nRemember this is for K values that is K nearby neighbors so now we'll have an array of classes among which we should take the majority of the classes this is where Counter().mostcommon() comes in ,it'll take the classes that has \nrepeated and put them in an tuple within an array with the number of times it has repeated ,like this [(2,3),(4,2)]-this means 2 has repeated 3 times and 4 has repeated 2 times ,\nso inorder to print the class we need the first of the of the array [0] and first of the tuple [0] so we type [0][0]\n\n\nNow the accuracy part I really am confused with this is part ,I think what we do is add 1 point whenever the class we passed is one among the group which is 2 or 4 and divide this count by the total number of class \npresent giving the percentage of accuracy ,similarly we can find the error .\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\n\n\n\n# ################################## Example_ to know what is KNN ###################################################\n# d_points = { 'k':[[1,2],[2,3],[3,1]] ,'r':[[6,5],[7,7],[8,6]]}\n#\n# features = [5,10]\n#\n#\n# # [[plt.scatter(ii[0],ii[1],s = 100 ,color='r')for ii in d_points[i]] for i in d_points]\n# # plt.scatter(features[0],features[1])\n# # plt.show()\n#\n#\n# def K_nearest(data ,predict ,k=3):\n# if len(data) >= 3:\n# print('Data is less than required to compare ! Idiot !')\n# dist = []\n# for groups in data:\n# for fet in data[groups]:\n# euc_dist = np.sqrt(np.sum(np.array(fet)-np.array(predict))**2)\n# dist.append([euc_dist,groups])\n#\n# votes = [i[1] for i in sorted(dist) [:k]]\n# print(Counter(votes).most_common(1))\n# vote_res = Counter(votes).most_common(1)[0][0]\n# print(vote_res)\n# K_nearest(d_points,features)\n#\n","sub_path":"Knearest_math.py","file_name":"Knearest_math.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"376475199","text":"# coding=utf-8\n# 螺旋矩阵\n\n\nclass Solution(object):\n \"\"\"\n 1、对于这个列表矩阵,先输出第一行并将其pop除去,然后将矩阵逆时针旋转90度,\n 继续输出第一行并将其pop出去,递归的执行上述操作直至矩阵为空。\n \"\"\"\n def spiralOrder(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[int]\n \"\"\"\n ret = []\n\n while matrix:\n ret.extend(matrix.pop(0))\n matrix = list(map(list, zip(*matrix)))[::-1]\n if not matrix or not matrix[0]:\n break\n return ret\n\n\nif __name__ == \"__main__\":\n matrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n ]\n So = Solution()\n print(So.spiralOrder(matrix))\n\n\n","sub_path":"Python/_0054_Spiral_Matrix/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"654092250","text":"# for files\nimport time\n# for data\nimport re\nimport numpy as np\nimport geopandas as gpd\n\n\n\n# --------------------------------------------------------------------------------------------------- OBJECT TO DATETIME\n\n\ndef str_to_datetime(date_string):\n \"\"\"\n input: '2015-07-02 14:04:06' \n output: '2015-07-02T14:04:06' \n \"\"\"\n try:\n date_stt = date_string.split(\" \")[0]\n time_str = date_string.split(\" \")[1]\n result = np.datetime64(date_stt) + np.timedelta64(time_str.split(\":\")[0], 'h') + np.timedelta64(\n time_str.split(\":\")[1], 'm') + np.timedelta64(time_str.split(\":\")[2], 's')\n return result\n\n\n except:\n print(f\"!! \\t String value does not match: {date_string}\")\n return np.nan\n\n\ndef datetime_transformations(datetime_cols, df, drop_cols):\n \"\"\"\n input:\n output:\n \"\"\"\n str_to_datetime_vect = np.vectorize(str_to_datetime)\n print(f\"---------------------- Transform OBJECT to DATETIME\")\n\n for Dtime_col in datetime_cols:\n\n tic = time.perf_counter()\n new_col_name = Dtime_col.split(\"_\")[1] + '_' + Dtime_col.split(\"_\")[2]\n\n df.loc[:,new_col_name] = str_to_datetime_vect(df.loc[:,Dtime_col])\n if drop_cols: df.drop(columns=[Dtime_col], axis=1, inplace=True)\n\n toc = time.perf_counter();\n mins = (toc - tic) // 60;\n secs = np.around((toc - tic) % 60, 3)\n\n print(f\"{Dtime_col} transformed in {mins}'{secs}''\")\n\n\n# --------------------------------------------------------------------------------------------------- OBJECT TO GEOMETRY\n\n\ndef coord_to_geomObject(df, drop_bool=False):\n \"\"\"\n input: df[lat], df[long] --> 40 3\n output: df[point] --> Point(40,3)\n \"\"\"\n print(f\"---------------------- Transform OBJECT to GEOM\")\n\n # find pairs\n latitude = sorted([col for col in df.columns if re.findall(r'latitud', col)])\n longitud = sorted([col for col in df.columns if re.findall(r'longitude', col)])\n print(f\"Latitude cols found:\\t {latitude}\");\n print(f\"Longitude cols found:\\t {longitud}\");\n print()\n\n # error if\n assert len(latitude) == len(longitud)\n # there has to be the same lats as longs for constr points\n\n for lng, lat in zip(longitud, latitude):\n\n if lng.split('longitude') == lat.split('latitude'):\n new_col_name = [n.strip() + 'geometry' for n in lng.split('longitude') if len(n) != 0][0]\n\n df.loc[:,new_col_name] = gpd.points_from_xy(df.loc[:,lng], df.loc[:,lat])\n print(f\"Adding new col:\\t {new_col_name}\\tDone\")\n\n print(f\"\\nDropping cols {latitude + longitud}\\t\\t{str(drop_bool).upper()}\")\n if drop_bool: df.drop(columns=latitude + longitud, axis=1, inplace=True)\n\n\n# --------------------------------------------------------------------------------------------------- OUTLIERS\n\ndef clean_outliers(df, columns, iqr_range):\n \"\"\"\n Returns df without outliers in defined columns defined range\n \"\"\"\n print(f\"\\n---------------------- Droping OUTLIers in {columns}\")\n iqr_range_sorted = sorted(iqr_range)\n\n Q_down = df[columns].quantile(iqr_range_sorted[0])\n Q_up = df[columns].quantile(iqr_range_sorted[1])\n IQR = Q_up - Q_down\n\n filtr = ((df[columns] < (Q_down - 1.5 * IQR)) | (df[columns] > (Q_up + 1.5 * IQR)))\n print(f\"Done\")\n\n return df[~filtr.any(axis=1)]\n\n# --------------------------------------------------------------------------------------------------- PRICES TO POSITIVE NUMBS\n\ndef abs_var_col(df, cols):\n \"\"\"\n some numerics cols for prices are negative (all row neg). This is a corrective def\n \"\"\"\n print(f\"\\n---------------------- Transform NEG PRICES to POSITIVE PRICES\")\n tic = time.perf_counter()\n\n for col in cols:\n try: df.loc[:,col] = df.loc[:,col].abs()\n except: pass\n\n toc = time.perf_counter()\n mins = (toc - tic) // 60; secs = np.around((toc - tic) % 60, 3)\n\n print(f\" Objects transformed in {mins}'{secs}''\")\n return df\n\n# --------------------------------------------------------------------------------------------------- GETTING RID OF CASH ERRORS\n\ndef total_amount_inconsistency(df, cols_to_sum, total_col):\n \"\"\"\n drop all cols that are inconsistent in the total amount payed for the trip\n \"\"\"\n print(f\"\\n---------------------- Dropping CASH ERRORS\")\n tic = time.perf_counter()\n\n # create col for and drop if aux_col == False\n aux_col = 'amount_equality'\n\n df[aux_col] = df.apply(lambda x: np.around(np.sum(x[cols_to_sum]), 3) == x[total_col], axis=1)\n df.drop(df[df[aux_col] == False].index, inplace=True)\n\n # del aux_col\n df.drop(columns=[aux_col], axis=1, inplace=True)\n toc = time.perf_counter()\n mins = (toc - tic) // 60;\n secs = np.around((toc - tic) % 60, 3)\n\n print(f\" Objects transformed in {mins}'{secs}''\")\n # no return\n\n","sub_path":"nyc_taxi_etl/m_data_transformation.py","file_name":"m_data_transformation.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"587920790","text":"import Create_Dataset\nfrom ULA import *\nimport matplotlib.pyplot as plt\n\niris = Create_Dataset.create_dataset('/Users/yywxenia/PycharmProjects/MLProj3/iris.data.txt',0,4,4)\n###(2) Training and Testing data\n\nX = iris['data']\ny = iris['target']\n\nkmean1 = []\nem1 = []\nkmean2 = []\nem2 = []\nkmean3 = []\nem3 = []\nkmean4 = []\n\ntrainK=[]\ntestK=[]\ntrainE=[]\ntestE=[]\n\ni_number=[6,5,4,3,2]\nfor i in i_number:\n a = cluster_process('kmeans', i, X, y)\n b= cluster_process('em', i, X, y)\n kmean1.append(a[0])\n em1.append(b[0])\n kmean2.append(a[1])\n em2.append(b[1])\n kmean3.append(a[2])\n em3.append(b[2])\n kmean4.append(a[3])\n\n trainK.append(a[4])\n testK.append(a[5])\n trainE.append(b[3])\n testE.append(b[4])\n\n\n\n\n## Ploting the performances:\ni_axis=[6,5,4,3,2]\n\nplt.plot(i_axis, kmean1,\"r.-\",i_axis, em1,'b.-',linewidth=1.5)\nplt.legend(['K-means', 'EM'], loc='upper right')\nplt.ylabel('Clustering performance')\nplt.xlabel('Cluster number')\nplt.title(\"V measure: Performance of K-means and EM\")\nplt.show()\n\nplt.plot(i_axis, kmean2,\"r.-\",i_axis, em2,'b.-',linewidth=1.5)\nplt.legend(['K-means', 'EM'], loc='upper right')\nplt.ylabel('Clustering performance')\nplt.xlabel('Cluster number')\nplt.title(\"Adjusted mutual_info: Performance of K-means and EM\")\nplt.show()\n\nplt.plot(i_axis, kmean3,\"r.-\",i_axis, em3,'b.-',linewidth=1.5)\nplt.legend(['K-means', 'EM'], loc='upper right')\nplt.ylabel('Clustering performance')\nplt.xlabel('Cluster number')\nplt.title(\"Adjusted rand index: Performance of K-means and EM\")\nplt.show()\n\nplt.plot(i_axis, kmean4,\"r.-\",linewidth=1.5)\nplt.legend(['K-means'], loc='upper right')\nplt.ylabel('Clustering performance')\nplt.xlabel('Cluster number')\nplt.title(\"Silhouette coefficient: Performance of K-means\")\nplt.show()\n\n\n\n#### for train and test time:\n## kmeans\nplt.subplot(211)\nplt.plot(i_axis, trainK, \"c\", label='Train time',linewidth=1.5)\nplt.legend()\nplt.ylabel('Time')\nplt.xlabel('Cluster number')\nplt.title(\"Train and Test Time of K-means\")\n\nplt.subplot(212)\nplt.plot(i_axis, testK, 'm', label='Test time',linewidth=1.5)\nplt.ylabel('Time')\nplt.xlabel('Cluster number')\nplt.legend()\nplt.show()\n\n## EM\nplt.subplot(211)\nplt.plot(i_axis, trainE, \"c\", label='Train time',linewidth=1.5)\nplt.legend()\nplt.ylabel('Time')\nplt.xlabel('Cluster number')\nplt.title(\"Train and Test Time of EM\")\n\nplt.subplot(212)\nplt.plot(i_axis, testE, 'm', label='Test time',linewidth=1.5)\nplt.ylabel('Time')\nplt.xlabel('Cluster number')\nplt.legend()\nplt.show()","sub_path":"ML_Algorithms/Unsupervised/ml3_code/cluster_iris.py","file_name":"cluster_iris.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"80618524","text":"\nfrom datetime import datetime, timedelta, date\nfrom workalendar.america import Mexico\n\ndef add_business_days(from_date, add_days):\n holydaysmex = Mexico()\n business_days_to_add = add_days\n current_date = from_date\n while business_days_to_add > 0:\n current_date += timedelta(days=1)\n weekday = current_date.weekday()\n if weekday >= 5 or not holydaysmex.is_working_day(current_date): # sunday = 6 and Holydays\n continue\n business_days_to_add -= 1\n return current_date\n","sub_path":"denker/denker_sale_report/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"517120856","text":"\"\"\"\nTitle: Linked list reverse nodes.\n\nProblem:\n Reverse nodes in linked list.\n\nExecution: python reverse_nodes.py\n\"\"\"\nfrom typing import List\nimport unittest\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def print_list(self):\n cur_node = self.head\n while cur_node:\n print(cur_node.data)\n cur_node = cur_node.next\n\n def append(self, data):\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n return\n\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n last_node.next = new_node\n\n def reverse_iterative(self):\n\n prev = None\n cur = self.head\n while cur:\n nxt = cur.next\n cur.next = prev\n prev = cur\n cur = nxt\n self.head = prev\n\n def reverse_recursive(self):\n\n def _reverse_recursive(cur, prev):\n if not cur:\n return prev\n nxt = cur.next\n cur.next = prev\n prev = cur\n cur = nxt\n return _reverse_recursive(cur, prev)\n\n self.head = _reverse_recursive(cur=self.head, prev=None)\n\n\nclass TestReverseNodes(unittest.TestCase):\n \"\"\"Unit test for reverse_nodes.\"\"\"\n\n def test_recursive(self):\n llist = LinkedList()\n llist.append(\"A\")\n llist.append(\"B\")\n llist.append(\"C\")\n llist.append(\"D\")\n\n llist.reverse_recursive()\n cur_node = llist.head\n nodes: List[str] = []\n while cur_node:\n nodes.append(cur_node.data)\n cur_node = cur_node.next\n self.assertEqual(nodes, [\"D\", \"C\", \"B\", \"A\"])\n\n def test_iterative(self):\n llist = LinkedList()\n llist.append(\"A\")\n llist.append(\"B\")\n llist.append(\"C\")\n llist.append(\"D\")\n\n llist.reverse_iterative()\n cur_node = llist.head\n nodes: List[str] = []\n while cur_node:\n nodes.append(cur_node.data)\n cur_node = cur_node.next\n self.assertEqual(nodes, [\"D\", \"C\", \"B\", \"A\"])\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"quickstart_guides/linked_lists/python/reverse_nodes.py","file_name":"reverse_nodes.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"57456479","text":"\"\"\"\nSerializers for the easyPCE API.\n\"\"\"\nfrom rest_framework import serializers\n\nimport models\n\n\nclass TermSerializer(serializers.ModelSerializer):\n\n courses = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n\n class Meta:\n model = models.Term\n fields = (\n 'suffix',\n 'name',\n 'code',\n 'courses',\n 'start_date',\n 'end_date',\n 'season',\n 'year',\n )\n\n\nclass SubjectSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Subject\n fields = '__all__'\n # fields = (\n # 'code',\n # 'name',\n # 'courses',\n # )\n\n\nclass SubjectCodeField(serializers.RelatedField):\n \"\"\"Represents a subject code, ex. COS, EGR, etc.\"\"\"\n\n def to_representation(self, value):\n return value.code\n\n\nclass CourseNumberSerializer(serializers.ModelSerializer):\n\n course = serializers.PrimaryKeyRelatedField(read_only=True)\n subject = SubjectCodeField(read_only=True)\n\n class Meta:\n model = models.CourseNumber\n fields = (\n 'course',\n 'subject',\n 'number',\n )\n\n\nclass InstructorSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Instructor\n fields = (\n 'first_name',\n 'last_name',\n )\n\n\nclass MeetingSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Meeting\n fields = (\n 'start_time',\n 'end_time',\n 'days',\n 'location'\n )\n\n\nclass SectionSerializer(serializers.ModelSerializer):\n\n meetings = MeetingSerializer(many=True, read_only=True)\n\n class Meta:\n model = models.Section\n fields = (\n 'class_id',\n 'name',\n 'type',\n 'status',\n 'enrollment',\n 'capacity',\n 'meetings',\n )\n\n\nclass EvaluationSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Evaluation\n fields = (\n 'question_text',\n 'response_avg',\n )\n\n\nclass AdviceSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Advice\n fields = (\n 'text',\n )\n\n\nclass CourseSerializer(serializers.ModelSerializer):\n\n sections = SectionSerializer(many=True, read_only=True)\n evaluations = EvaluationSerializer(many=True, read_only=True)\n advice = AdviceSerializer(many=True, read_only=True)\n\n class Meta:\n model = models.Course\n fields = (\n 'course_id',\n 'title',\n 'term',\n 'primary_number',\n 'pdf',\n 'pdf_only',\n 'audit',\n 'dist_req',\n 'description',\n 'additional_info',\n 'instructors',\n 'last_updated',\n 'sections',\n 'evaluations',\n 'advice',\n )\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.User\n fields = (\n 'netid',\n )\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"249191692","text":"from django.urls import path\n\nfrom rest_framework_simplejwt.views import TokenRefreshView\n\nfrom .views import MyObtainTokenPairView, RegisterView, UserLastLoginAndLastRequestView\n\n\napp_name = 'api_accounts'\n\nurlpatterns = [\n path('login/', MyObtainTokenPairView.as_view(), name='token_obtain_pair'),\n path('login/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('register/', RegisterView.as_view(), name='auth_register'),\n path('last_login_and_last_request/',\n UserLastLoginAndLastRequestView.as_view(),\n name='last_login_and_last_request'),\n]\n","sub_path":"accounts/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"163386181","text":"import sys\n\nlines = []\nfor line in sys.stdin:\n\tlines.append(line.rstrip('\\n'))\n\nnbr = 0\nx = 0\n\nfor i in lines:\n if (i[x] == '#'):\n nbr += 1\n if (len(i) <= x + 3):\n x = x + 3 - len(i)\n else:\n x += 3\n\nprint (nbr)","sub_path":"AdventOfCode2020/3-TobogganTrajectory/script1.py","file_name":"script1.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"136184098","text":"candle_data = CandleData()\ntest = CandleData().getCandleMinute(market_code, unit, None, 1)\n\n\nprint(candle_data.checkMinute(2))\ncandle_data.unit_list\ntest = candle_data.getCandleMinute('KRW-BTC', 1, 2)\n\n\n\nmarket_code = 'KRW-BTC'\nunit = 1\ncount = 400\nurl = 'https://api.upbit.com/v1/candles/minutes/%s?market=%s&count=%s&page=%s' % (unit, market_code, count, 1)\nurl2 = 'https://api.upbit.com/v1/candles/minutes/%s?market=%s&count=%s&page=%s' % (unit, market_code, count, 2)\nlen(getjData(url) + getjData(url2))\ntestset1 = getjData(url)\ntestset2 = getjData(url2)\n\ntestset1[0] == testset2[0]\n\n\na = Market()\na.name('KRW-BTC')\nfilter(lambda x: x['market'] == market_code, jData)\n\n\nlist(map(lambda x: x['korean_name'], filter(lambda x: x['market'] == market_code, jData)))[0]\n\njData\n\n\nlists = list(map(lambda x: x['market'], jData))\nfiltered = filter(lambda x: x[0:3] == market_currency, lists)\nreturn list(filtered)\n\n[x['market'] for x in jData if x['market'][0:3] == market_currency]\n\n\n\ninit_time = '2018-07-07T10:16:00'\ndata = CandleData()\ndata = data.getCandleMinute(market_code, unit, None)\ntime = data[-1][\"candle_date_time_utc\"] + '.000Z'\nwhile time > init_time:\n data += CandleData().getCandleMinute(market_code, unit, time)\n time = data[-1][\"candle_date_time_utc\"] + '.000Z'\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"317926470","text":"\"\"\"\nTakes the csv file (for given N) containg the repeats for each noise level,\ncomputes, average, std, standard error and exports to new file.\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef main(type, model, N, folder, name_of_file):\n\n # read in the data as a df from the correct location\n df = pd.read_csv(\"./averages_{}_{}/N_{}.csv\".format(type, model, N))\n\n # get the number of averages\n num_averages = len(df.columns) - 1\n\n\n # get the mean, std, standard error\n df[\"mean ({} averages)\".format(num_averages)] = df.drop(type, 1).mean(axis = 1)\n df[\"std\"] = df.drop([\"mean ({} averages)\".format(num_averages), type], 1).std(axis = 1)\n df[\"std_error\"] = df.drop([type, \"mean ({} averages)\".format(num_averages), \"std\"], 1).std(axis = 1) / np.sqrt(num_averages)\n\n # get another df with the correct information and write it to a results file\n df2 = df[[type, \"mean ({} averages)\".format(num_averages), \"std\", \"std_error\"]]\n df2.to_csv(\"../results/{}/{}.csv\".format(folder, name_of_file), index = False)\n\n return 0\n\n\nmain(\"density\", \"SVM\", 100, \"SVM\", \"density_averages_N100\")\n","sub_path":"week by week code/Week_13/results_with_errors.py","file_name":"results_with_errors.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"208096453","text":"''' All function for telegram api '''\r\nfrom telethon.sync import TelegramClient\r\nfrom telethon.tl.types import PeerUser\r\nfrom telethon.tl.functions.messages import GetHistoryRequest\r\n''' Utility function date, json format'''\r\nfrom datetime import datetime\r\nimport json\r\n\r\nwith open(\"setting.json\", \"r\", encoding = \"utf-8\") as read_file:\r\n json_data = json.loads(read_file.read())\r\n\r\napi_id = json_data['user']['api_id']\r\napi_hash = json_data['user']['api_hash']\r\nusername = json_data['user']['username']\r\n\r\nclient = TelegramClient(username, api_id, api_hash)\r\nclient.start()\r\n\r\nasync def write_log(data_file, data) -> None:\r\n data_file.write(data)\r\n\r\nasync def console_log(data) -> None:\r\n print(data)\r\n\r\nasync def between(message, _from, to) -> bool:\r\n date = str(message.date).split()[0].split('-')\r\n date[0] = date[0][:2]\r\n\r\n return (\r\n datetime.strptime(to, '%y-%m-%d') <= \\\r\n datetime.strptime('-'.join(date), '%y-%m-%d') <= \\\r\n datetime.strptime(_from, '%y-%m-%d')\r\n )\r\n\r\nasync def delete() -> None:\r\n dialogs = await client.get_dialogs()\r\n for dialog in dialogs:\r\n print(f'[{dialogs.index(dialog)}] {dialog.name}')\r\n \r\n select = int(input(f'Select dialog: '))\r\n print('Select date range, from - to. For example: from 20-09-16 to 20-09-17')\r\n _from = input('From: ').replace(' ', ''); to = input('To: ').replace(' ', '')\r\n\r\n messages = await client.get_messages(dialogs[select], limit = None)\r\n\r\n messages = [\r\n message for message in messages \r\n if await between(message, to, _from)\r\n ]\r\n\r\n lenth_of_message = len(messages)\r\n messages_cout = 1\r\n date_now = str(datetime.now()).split()[0]\r\n file_name = f'deleted-messages-{date_now}-{dialogs[select].id}.txt'\r\n\r\n with open(file_name, 'a', encoding = 'utf') as logfile:\r\n for message in messages:\r\n try:\r\n message_text = message.message.replace('\\n', ' ') \\\r\n if not message.media else ''\r\n except:\r\n message_text = ''\r\n \r\n author = await client.get_entity(PeerUser(message.from_id))\r\n author = author.username if author.username \\\r\n else author.first_name if author.first_name \\\r\n else author.id\r\n\r\n await client.delete_messages(dialogs[select], message.id)\r\n await write_log(logfile, f'{str(message.date)} [{author}] - {message_text}\\n')\r\n\r\n message_date = str(message.date).split()[0]\r\n process = int(100 * messages_cout / lenth_of_message)\r\n logged_message = f'Deleted id {message.id} start: {_from}, ' +\\\r\n f'current: {message_date}, end: {to}. Total: {messages_cout} ({process}%)'\r\n \r\n await console_log(logged_message)\r\n\r\n messages_cout += 1\r\n\r\nif __name__ == \"__main__\":\r\n with client:\r\n client.loop.run_until_complete(delete())\r\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"183966731","text":"'''\n\nThis file is part of the zone_plate_testing repository and an exntension of the recipie to simulate one tilted zone plate fonund in the notebook simlate_zp_with_tilt.ipynb to a workflow which does the aforementioned task multiple times. \n\n'''\n\nimport numpy as np\nimport os,pickle\nimport matplotlib.pyplot as plt\nfrom multislice import prop,prop_utils\nfrom os.path import dirname as up\n\n\n'''\n\nmake_zp_from_rings : make a zone plate from the rings which were created earlier.\n\nInputs : n - number of rings, grid_size \n\nOutputs : a numpy array containing the zone plate\n\n'''\ndef make_zp_from_rings(n,grid_size):\n zp = np.zeros((grid_size,grid_size))\n for i in range(n):\n if i%2 == 1 :\n locs_ = np.load('ring_locs_'+str(i)+'.npy')\n locs_ = tuple((locs_[0],locs_[1]))\n vals_ = np.load('ring_vals_'+str(i)+'.npy')\n zp[locs_] = vals_\n return zp\n\n\n'''\n\ntilt : get the focal spot for a given zone plate and tilt angle of the input wave and save it.\n\nInputs : i-tilt angle in degrees,zp - zone plate pattern, thickness (of the zone plate), parameters.\n\n'''\n\ndef tilt(i,zp,thickness,parameters):\n zp_thickness = thickness \n beta = parameters['beta']\n delta = parameters['delta'] \n zp_coords = parameters['zp_coords'] \n step_xy = parameters['step_xy']\n energy = parameters['energy(in eV)']\n wavel = parameters['wavelength in m']\n f = parameters['focal_length']\n L = step_xy*np.shape(zp)[0] \n n = np.shape(zp)[0] \n\n print('claclulating for tilt angle : ',i) \n theta = (i)*(np.pi/180)\n slope = np.tan(theta)\n x = np.linspace(zp_coords[0],zp_coords[1],n)\n X,Y = np.meshgrid(x,x)\n z1 = 2*np.pi*(1/wavel)*slope*X\n wave_in = np.multiply(np.ones((n,n),dtype='complex64'),np.exp(1j*(z1)))\n \n number_of_steps_zp = (prop_utils.number_of_steps(step_xy,wavel,zp_thickness)+1)*2\n wave_focus,L2 = prop_utils.optic_illumination(wave_in,zp,delta,beta,zp_thickness,step_xy,wavel,number_of_steps_zp,0,f)\n focal_spot,x_,y_,max_val = prop_utils.get_focal_spot(wave_focus,grid_size)\n \n np.save('foc_spot_Q_0.33_'+str(round(angle,3))+'_degree.npy',focal_spot)\n np.save('foc_loc_Q_0.33_'+str(round(angle,3))+'_degree.npy',np.array([x_,y_]))\n \n return\n\n\n'''\nLoad the zone plate from memeory, set parameters.\n'''\npwd = os.getcwd()\nos.chdir(up(up(os.getcwd()))+str('/zp_database/soft_xray_zp/'))\nparameters = pickle.load(open('parameters.pickle','rb'))\ngrid_size = parameters['grid_size']\nnum_zones = 700\nzp = make_zp_from_rings(num_zones,grid_size)\nos.chdir(pwd)\n\nthickness = 0.03807e-6 #Q=0.333\ninputs = np.arange(51)*0.1\n\n\nprint(num_zones,' zones, ',thickness*1e6,' microns thick')\nmax_loc = []\n\n'''\nLoop to run the simluation over the variable number of tilt angles.\n'''\nfor angle in inputs:\n tilt(angle,zp,thickness,parameters)\n","sub_path":"optic_aligned/Q_0.33_0.5keV/simulate_var_width_thickness.py","file_name":"simulate_var_width_thickness.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"150866820","text":"\"\"\"\nLinear Discriminant Analysis (LDA) Example.\n\nStrictly speaking, LDA is a classifier, but it is often used for dimensionality \nreduction. Since it's a supervised approach, it requires the label set to \noptimize the reduction step. LDA outputs linear combinations of the input \nfeatures, trying to model the difference between the classes that best \ndiscriminate them (since LDA uses label information). \n\nCompared to PCA, the output dataset that is obtained with the help of LDA \ncontains neat distinction between classes. However, it cannot be used in \nregression problems.\n\"\"\"\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nimport matplotlib.pyplot as plt\n\niris = load_iris()\n\nlda_2c = LinearDiscriminantAnalysis(n_components=2)\nX_lda = lda_2c.fit_transform(iris.data, iris.target)\nprint(\"Shape of reduced matrix:\", X_lda.shape)\n\n\nplt.scatter(X_lda[:,0], X_lda[:,1], c=iris.target, alpha=0.8,\n edgecolors='none')\nplt.show()\n","sub_path":"machine_learning/dimensionality_reduction/linear_discriminant_analysis.py","file_name":"linear_discriminant_analysis.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"128267642","text":"import argparse\n\nimport read\nfrom sentence import add_pad_unk, adjust_sentence, filter_unused_word\nfrom relation import adjust_relation\nimport write\n\n\ndef main(clean_folder: str, pretrained_embedding_location: str,\n max_length: int, pad_token: str, unk_token: str, ready_folder: str):\n\n # read all files\n (train_sentences, train_relations,\n valid_sentences, valid_relations,\n test_sentences, test_relations,\n embeddings) = read.run(clean_folder,\n pretrained_embedding_location)\n\n # xd: sentence\n add_pad_unk(embeddings, pad_token, unk_token)\n\n train_sentences_, train_case_seqs = adjust_sentence(train_sentences,\n max_length, pad_token,\n embeddings, unk_token)\n valid_sentences_, valid_case_seqs = adjust_sentence(valid_sentences,\n max_length, pad_token,\n embeddings, unk_token)\n test_sentences_, test_case_seqs = adjust_sentence(test_sentences,\n max_length, pad_token,\n embeddings, unk_token)\n\n # filter unused words\n sentences = train_sentences_ + valid_sentences_ + test_sentences_\n word_lookup, word_embedding = filter_unused_word(embeddings, sentences)\n\n # yd: relation\n train_relations_ = adjust_relation(train_relations, max_length)\n valid_relations_ = adjust_relation(valid_relations, max_length)\n test_relations_ = adjust_relation(test_relations, max_length)\n\n # write sentence and relation\n write.run(train_sentences_, valid_sentences_, test_sentences_, word_lookup, word_embedding,\n train_case_seqs, valid_case_seqs, test_case_seqs,\n train_relations_, valid_relations_, test_relations_, ready_folder)\n\n\nif __name__ == '__main__':\n\n # accept arguments\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--pretrained_embedding_location', type=str,\n default=('/home/yytang/Projects/Word-Embedding/' +\n 'glove.6B.50d.txt'))\n\n parser.add_argument('--clean_folder', type=str, default='../clean/conll/')\n parser.add_argument('--ready_folder', type=str, default='../ready/conll/')\n\n parser.add_argument('--max_length', type=int, default=120)\n parser.add_argument('--pad_token', type=str, default='__pad__')\n parser.add_argument('--unk_token', type=str, default='__unk__')\n\n args = parser.parse_args()\n\n main(args.clean_folder, args.pretrained_embedding_location,\n args.max_length, args.pad_token, args.unk_token, args.ready_folder)\n","sub_path":"etype-uncased-nomc/adjust/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"470490472","text":"__author__ = '崔畅'\nfrom public.common import datainfo\nfrom time import strftime\nimport time\nimport os\ndef createTestCase(dataFile,scriptAuthor=\"cuichang\"):\n \"\"\"\n 生成 .py文件 测试用例\n :param dataFile:\n :param scriptAuthor:\n :return:\n \"\"\"\n billList = datainfo.getAllDataList(dataFile, \"bill\")\n createTime = strftime(\"%Y-%m-%d_%H_%M_%S\")\n fileHeadInfo = \"#coding=UTF-8\\n\" \\\n \"# Author: \" + scriptAuthor + \"\\n\" \\\n \"# Date: \" + createTime + \"\\n\" \\\n \"# Modify the record:\\n\"\n fileHeadImport = \"from public.common import mytest\\n\" \\\n \"from public.pages import SubmitToBillPage\\n\" \\\n \"from time import strftime\\n\" \\\n \"from config import globalparam\\n\" \\\n \"import unittest\\n\" \\\n \"from public.common import publicfunction\\n\" \\\n \"from public.pages.billTestData import BillTestData\\n\"\n for bill in billList: # 如果bill[0] == \"billGroup\" or bill[3] == \"No\"则不创建用例\n if bill[0]==\"billGroup\":\n continue\n if bill[3] == \"No\":\n continue\n billBroupDir = os.path.join(os.path.abspath('../..'), 'testcase',bill[0].lower())\n #使用billCode作为文件名称\n billFilePath = os.path.join(billBroupDir,\"test_\"+bill[2]+\".py\")\n if os.path.exists(billBroupDir) is False:\n os.makedirs(billBroupDir)\n file = open(os.path.join(billBroupDir, \"__init__.py\"), 'w')\n file.close()\n if os.path.exists(billFilePath) is False:#如果用例文件不存在,则创建文件\n file = open(billFilePath,'w',encoding = 'utf-8')\n file.write(fileHeadInfo)\n file.write(fileHeadImport)\n #使用billCode作为类名\n file.write(\"class {0}(mytest.MyTest):\\n\".format(\"test_\"+bill[2]))\n file.write(\"\\t\\\"\\\"\\\"测试单据-\"+bill[0]+\"-\"+bill[1]+\"\\\"\\\"\\\"\\n\")\n file.write(\"\\t@unittest.skipUnless(globalparam.usecase_run_mode >= 1, \\\"\\\")\\n\"\n \"\\tdef test_{0}_01(self):\\n\".format(bill[2]))\n file.write(\"\\t\\t\\\"\\\"\\\"测试单据-\"+bill[0]+\"-\"+bill[1]+\"_\"+bill[5]+\"模式\"+\"\\\"\\\"\\\"\\n\"\n \"\\t\\t#初始化测试对象\\n\"\n \"\\t\\tfsscTest = SubmitToBillPage.SubmitToBillPage(self.dr)\\n\"\n \"\\t\\t#准备测试数据\\n\"\n # \"\\t\\tbillGroup = \\\"\"+bill[0]+\"\\\"\\n\"\n \"\\t\\tbillName = \\\"\"+bill[1]+\"\\\"\\n\"\n \"\\t\\tfillPerson = \\\"\"+bill[4]+\"\\\"\\n\"\n \"\\t\\tapprovalModel = \\\"\"+bill[5]+\"\\\"\\n\"\n \"\\t\\ttestCaseFile = \\\"\"+bill[0]+\"\\\\\"+bill[1]+\".xls\\\"\\n\"\n \"\\t\\ttestCaseData = BillTestData(testCaseFile)\\n\"\n \"\\t\\t#打开系统\\n\"\n \"\\t\\tfsscTest.openSystem(globalparam.system_address)\\n\"\n \"\\t\\tfsscTest.login(fillPerson)\\n\"\n \"\\t\\tfsscTest.intoFillBillPage(billName)\\n\"\n \"\\t\\tbillNum = fsscTest.getBillNum()\\n\"\n \"\\t\\tpublicfunction.get_img(self.dr,billNum+\\\"_\\\"+strftime('%Y-%m-%d_%H_%M_%S')+\\\".jpg\\\")\\n\"\n \"\\t\\tfsscTest.typeInputBillValue(billNum,testCaseData)\\n\"\n \"\\t\\tpublicfunction.get_img(self.dr,billNum+\\\"_\\\"+strftime('%Y-%m-%d_%H_%M_%S')+\\\".jpg\\\")\\n\"\n \"\\t\\tfsscTest.saveBill(billNum)\\n\"\n \"\\t\\tfsscTest.switchToContentIframe()\\n\"\n \"\\t\\tverifyResult = fsscTest.verifyBillValue(billNum,testCaseData)\\n\"\n \"\\t\\tfsscTest.switch_to_iframe_out()\\n\"\n \"\\t\\tself.assertTrue(verifyResult[\\\"verifyResult\\\"],verifyResult[\\\"verifyMsg\\\"])\\n\"\n \"\\t\\tpublicfunction.get_img(self.dr,billNum+\\\"_\\\"+strftime('%Y-%m-%d_%H_%M_%S')+\\\".jpg\\\")\\n\"\n \"\\t\\tnextApproveList = fsscTest.submissionBill(billNum)\\n\"\n \"\\t\\tfsscTest.logoutSystem()\\n\"\n \"\\t\\tif approvalModel == \\\"Auto\\\":\\n\"\n \"\\t\\t\\tfsscTest.handleBillAuto(nextApproveList, billNum, testCaseData)\\n\"\n \"\\t\\telif approvalModel == \\\"Manual\\\":\\n\"\n \"\\t\\t\\tfsscTest.handleBillManual(billNum, testCaseData)\\n\")\n file.close()\n\nif __name__ == '__main__':\n createTestCase(\"allBaseData.xls\")\n","sub_path":"tool/pc/createTestCase.py","file_name":"createTestCase.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"449979531","text":"import pandas as pd\nimport numpy as np\n\n\ndef Fix_edad(fila, m1, m2, m3):\n if fila[\"nulo\"]:\n if fila[\"Pclass\"] == 1: \n #return m1\n fila[\"Age\"] = m1\n elif fila[\"Pclass\"] == 2: \n #return m2\n fila[\"Age\"] = m2\n else:\n #return m3\n fila[\"Age\"] = m3\n #else:\n return fila\n\n\ndf = pd.read_csv(\"input/titanic.csv\",sep=\",\")\n#Dado el # de None eliminamos columna cabin\ndf.drop(columns = \"Cabin\",inplace=True)\n#Hallamos edad media por cada clase dado que es la vaiable con mayor correlacion\ndf_aux = df[df[\"Age\"].notnull()][[\"Pclass\",\"Age\"]].sort_values(by=\"Pclass\")\nmedia_1 = np.mean((df_aux[(df_aux[\"Pclass\"]==1)][\"Age\"]))\nmedia_2 = np.mean((df_aux[(df_aux[\"Pclass\"]==2)][\"Age\"]))\nmedia_3 = np.mean((df_aux[(df_aux[\"Pclass\"]==3)][\"Age\"]))\n\n\n#incluimos la edad en funcion de la clase\ndf[\"nulo\"]=df[\"Age\"].isnull() \ndf = df.apply(Fix_edad,args=(media_1,media_2,media_3),axis=1)\n\ndf.drop(columns = \"nulo\",inplace=True)\n\n\n#Eliminamos filas sin info en embarked\ndf.drop(index=df[df[\"Embarked\"].isnull()].index, inplace=True)\n\n#Cambiamos survidad a boolean y sex a is_male y boolean\ndf[\"Survived\"] = df[\"Survived\"].apply(lambda x: True if x==1 else False)\ndf[\"Sex\"] = df[\"Sex\"].apply(lambda x: True if x==\"male\" else False)\ndf = df.rename(columns = {\"Sex\":\"is_male\"})\n\n#Ajustamos los tipos de valores\ndf = df.astype({'PassengerId': 'int',\n \"Pclass\": \"int\",\n \"SibSp\":\"int\",\n \"Parch\":\"int\"})\n\ntry:\n file = df.to_csv(\"output/clean_titanic.csv\")\n print(\"Done!\")\nexcept Exception as e:\n print(\"Found some error generating the CSV: \", e)\n","sub_path":"src/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"387470608","text":"# BEGIN: Imports from datastore_pb2\nfrom .datastore_pb2 import AllocateIdsRequest\nfrom .datastore_pb2 import AllocateIdsResponse\nfrom .datastore_pb2 import BeginTransactionRequest\nfrom .datastore_pb2 import BeginTransactionResponse\nfrom .datastore_pb2 import CommitRequest\nfrom .datastore_pb2 import CommitResponse\nfrom .datastore_pb2 import LookupRequest\nfrom .datastore_pb2 import LookupResponse\nfrom .datastore_pb2 import Mutation\nfrom .datastore_pb2 import MutationResult\nfrom .datastore_pb2 import ReadOptions\nfrom .datastore_pb2 import RollbackRequest\nfrom .datastore_pb2 import RollbackResponse\nfrom .datastore_pb2 import RunQueryRequest\nfrom .datastore_pb2 import RunQueryResponse\n# END: Imports from datastore_pb2\nimport grpc\nfrom grpc.beta import implementations as beta_implementations\nfrom grpc.beta import interfaces as beta_interfaces\nfrom grpc.framework.common import cardinality\nfrom grpc.framework.interfaces.face import utilities as face_utilities\n\n\nclass DatastoreStub(object):\n \"\"\"Each RPC normalizes the partition IDs of the keys in its input entities,\n and always returns entities with keys with normalized partition IDs.\n This applies to all keys and entities, including those in values, except keys\n with both an empty path and an empty or unset partition ID. Normalization of\n input keys sets the project ID (if not already set) to the project ID from\n the request.\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Lookup = channel.unary_unary(\n '/google.datastore.v1.Datastore/Lookup',\n request_serializer=LookupRequest.SerializeToString,\n response_deserializer=LookupResponse.FromString,\n )\n self.RunQuery = channel.unary_unary(\n '/google.datastore.v1.Datastore/RunQuery',\n request_serializer=RunQueryRequest.SerializeToString,\n response_deserializer=RunQueryResponse.FromString,\n )\n self.BeginTransaction = channel.unary_unary(\n '/google.datastore.v1.Datastore/BeginTransaction',\n request_serializer=BeginTransactionRequest.SerializeToString,\n response_deserializer=BeginTransactionResponse.FromString,\n )\n self.Commit = channel.unary_unary(\n '/google.datastore.v1.Datastore/Commit',\n request_serializer=CommitRequest.SerializeToString,\n response_deserializer=CommitResponse.FromString,\n )\n self.Rollback = channel.unary_unary(\n '/google.datastore.v1.Datastore/Rollback',\n request_serializer=RollbackRequest.SerializeToString,\n response_deserializer=RollbackResponse.FromString,\n )\n self.AllocateIds = channel.unary_unary(\n '/google.datastore.v1.Datastore/AllocateIds',\n request_serializer=AllocateIdsRequest.SerializeToString,\n response_deserializer=AllocateIdsResponse.FromString,\n )\n\n\nclass DatastoreServicer(object):\n \"\"\"Each RPC normalizes the partition IDs of the keys in its input entities,\n and always returns entities with keys with normalized partition IDs.\n This applies to all keys and entities, including those in values, except keys\n with both an empty path and an empty or unset partition ID. Normalization of\n input keys sets the project ID (if not already set) to the project ID from\n the request.\n\n \"\"\"\n\n def Lookup(self, request, context):\n \"\"\"Looks up entities by key.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def RunQuery(self, request, context):\n \"\"\"Queries for entities.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def BeginTransaction(self, request, context):\n \"\"\"Begins a new transaction.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Commit(self, request, context):\n \"\"\"Commits a transaction, optionally creating, deleting or modifying some\n entities.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Rollback(self, request, context):\n \"\"\"Rolls back a transaction.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def AllocateIds(self, request, context):\n \"\"\"Allocates IDs for the given keys, which is useful for referencing an entity\n before it is inserted.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_DatastoreServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Lookup': grpc.unary_unary_rpc_method_handler(\n servicer.Lookup,\n request_deserializer=LookupRequest.FromString,\n response_serializer=LookupResponse.SerializeToString,\n ),\n 'RunQuery': grpc.unary_unary_rpc_method_handler(\n servicer.RunQuery,\n request_deserializer=RunQueryRequest.FromString,\n response_serializer=RunQueryResponse.SerializeToString,\n ),\n 'BeginTransaction': grpc.unary_unary_rpc_method_handler(\n servicer.BeginTransaction,\n request_deserializer=BeginTransactionRequest.FromString,\n response_serializer=BeginTransactionResponse.SerializeToString,\n ),\n 'Commit': grpc.unary_unary_rpc_method_handler(\n servicer.Commit,\n request_deserializer=CommitRequest.FromString,\n response_serializer=CommitResponse.SerializeToString,\n ),\n 'Rollback': grpc.unary_unary_rpc_method_handler(\n servicer.Rollback,\n request_deserializer=RollbackRequest.FromString,\n response_serializer=RollbackResponse.SerializeToString,\n ),\n 'AllocateIds': grpc.unary_unary_rpc_method_handler(\n servicer.AllocateIds,\n request_deserializer=AllocateIdsRequest.FromString,\n response_serializer=AllocateIdsResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'google.datastore.v1.Datastore', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\nclass BetaDatastoreServicer(object):\n \"\"\"Each RPC normalizes the partition IDs of the keys in its input entities,\n and always returns entities with keys with normalized partition IDs.\n This applies to all keys and entities, including those in values, except keys\n with both an empty path and an empty or unset partition ID. Normalization of\n input keys sets the project ID (if not already set) to the project ID from\n the request.\n\n \"\"\"\n def Lookup(self, request, context):\n \"\"\"Looks up entities by key.\n \"\"\"\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)\n def RunQuery(self, request, context):\n \"\"\"Queries for entities.\n \"\"\"\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)\n def BeginTransaction(self, request, context):\n \"\"\"Begins a new transaction.\n \"\"\"\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)\n def Commit(self, request, context):\n \"\"\"Commits a transaction, optionally creating, deleting or modifying some\n entities.\n \"\"\"\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)\n def Rollback(self, request, context):\n \"\"\"Rolls back a transaction.\n \"\"\"\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)\n def AllocateIds(self, request, context):\n \"\"\"Allocates IDs for the given keys, which is useful for referencing an entity\n before it is inserted.\n \"\"\"\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)\n\n\nclass BetaDatastoreStub(object):\n \"\"\"Each RPC normalizes the partition IDs of the keys in its input entities,\n and always returns entities with keys with normalized partition IDs.\n This applies to all keys and entities, including those in values, except keys\n with both an empty path and an empty or unset partition ID. Normalization of\n input keys sets the project ID (if not already set) to the project ID from\n the request.\n\n \"\"\"\n def Lookup(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n \"\"\"Looks up entities by key.\n \"\"\"\n raise NotImplementedError()\n Lookup.future = None\n def RunQuery(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n \"\"\"Queries for entities.\n \"\"\"\n raise NotImplementedError()\n RunQuery.future = None\n def BeginTransaction(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n \"\"\"Begins a new transaction.\n \"\"\"\n raise NotImplementedError()\n BeginTransaction.future = None\n def Commit(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n \"\"\"Commits a transaction, optionally creating, deleting or modifying some\n entities.\n \"\"\"\n raise NotImplementedError()\n Commit.future = None\n def Rollback(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n \"\"\"Rolls back a transaction.\n \"\"\"\n raise NotImplementedError()\n Rollback.future = None\n def AllocateIds(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n \"\"\"Allocates IDs for the given keys, which is useful for referencing an entity\n before it is inserted.\n \"\"\"\n raise NotImplementedError()\n AllocateIds.future = None\n\n\ndef beta_create_Datastore_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):\n request_deserializers = {\n ('google.datastore.v1.Datastore', 'AllocateIds'): AllocateIdsRequest.FromString,\n ('google.datastore.v1.Datastore', 'BeginTransaction'): BeginTransactionRequest.FromString,\n ('google.datastore.v1.Datastore', 'Commit'): CommitRequest.FromString,\n ('google.datastore.v1.Datastore', 'Lookup'): LookupRequest.FromString,\n ('google.datastore.v1.Datastore', 'Rollback'): RollbackRequest.FromString,\n ('google.datastore.v1.Datastore', 'RunQuery'): RunQueryRequest.FromString,\n }\n response_serializers = {\n ('google.datastore.v1.Datastore', 'AllocateIds'): AllocateIdsResponse.SerializeToString,\n ('google.datastore.v1.Datastore', 'BeginTransaction'): BeginTransactionResponse.SerializeToString,\n ('google.datastore.v1.Datastore', 'Commit'): CommitResponse.SerializeToString,\n ('google.datastore.v1.Datastore', 'Lookup'): LookupResponse.SerializeToString,\n ('google.datastore.v1.Datastore', 'Rollback'): RollbackResponse.SerializeToString,\n ('google.datastore.v1.Datastore', 'RunQuery'): RunQueryResponse.SerializeToString,\n }\n method_implementations = {\n ('google.datastore.v1.Datastore', 'AllocateIds'): face_utilities.unary_unary_inline(servicer.AllocateIds),\n ('google.datastore.v1.Datastore', 'BeginTransaction'): face_utilities.unary_unary_inline(servicer.BeginTransaction),\n ('google.datastore.v1.Datastore', 'Commit'): face_utilities.unary_unary_inline(servicer.Commit),\n ('google.datastore.v1.Datastore', 'Lookup'): face_utilities.unary_unary_inline(servicer.Lookup),\n ('google.datastore.v1.Datastore', 'Rollback'): face_utilities.unary_unary_inline(servicer.Rollback),\n ('google.datastore.v1.Datastore', 'RunQuery'): face_utilities.unary_unary_inline(servicer.RunQuery),\n }\n server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)\n return beta_implementations.server(method_implementations, options=server_options)\n\n\ndef beta_create_Datastore_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('google.datastore.v1.Datastore', 'AllocateIds'): AllocateIdsRequest.SerializeToString,\n ('google.datastore.v1.Datastore', 'BeginTransaction'): BeginTransactionRequest.SerializeToString,\n ('google.datastore.v1.Datastore', 'Commit'): CommitRequest.SerializeToString,\n ('google.datastore.v1.Datastore', 'Lookup'): LookupRequest.SerializeToString,\n ('google.datastore.v1.Datastore', 'Rollback'): RollbackRequest.SerializeToString,\n ('google.datastore.v1.Datastore', 'RunQuery'): RunQueryRequest.SerializeToString,\n }\n response_deserializers = {\n ('google.datastore.v1.Datastore', 'AllocateIds'): AllocateIdsResponse.FromString,\n ('google.datastore.v1.Datastore', 'BeginTransaction'): BeginTransactionResponse.FromString,\n ('google.datastore.v1.Datastore', 'Commit'): CommitResponse.FromString,\n ('google.datastore.v1.Datastore', 'Lookup'): LookupResponse.FromString,\n ('google.datastore.v1.Datastore', 'Rollback'): RollbackResponse.FromString,\n ('google.datastore.v1.Datastore', 'RunQuery'): RunQueryResponse.FromString,\n }\n cardinalities = {\n 'AllocateIds': cardinality.Cardinality.UNARY_UNARY,\n 'BeginTransaction': cardinality.Cardinality.UNARY_UNARY,\n 'Commit': cardinality.Cardinality.UNARY_UNARY,\n 'Lookup': cardinality.Cardinality.UNARY_UNARY,\n 'Rollback': cardinality.Cardinality.UNARY_UNARY,\n 'RunQuery': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'google.datastore.v1.Datastore', cardinalities, options=stub_options)\n","sub_path":"gcloudoem/datastore/_generated/datastore_grpc_pb2.py","file_name":"datastore_grpc_pb2.py","file_ext":"py","file_size_in_byte":13799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"633340781","text":"from django.core.cache import caches\nfrom django.test import Client, TestCase\n\nfrom wagtail.wagtailcore.blocks import StreamValue\n\nfrom mock import patch\nfrom scripts import _atomic_helpers as atomic\n\nfrom v1.models.blog_page import BlogPage\nfrom v1.models.browse_filterable_page import BrowseFilterablePage\nfrom v1.tests.wagtail_pages.helpers import (\n publish_changes, publish_page, save_new_page\n)\n\n\nclass TestFragmentCacheExtension(TestCase):\n def test_cache_gets_called_when_visiting_filterable_page(self):\n # Create a filterable page\n page = BrowseFilterablePage(\n title='test browse filterable page',\n slug='test-browse-filterable-page'\n )\n page.content = StreamValue(\n page.content.stream_block,\n [atomic.filter_controls],\n True\n )\n publish_page(page)\n\n # Add a child to that filterable page so that there are results with a post preview\n child_page = BlogPage(\n title='test blog page',\n slug='test-blog-page'\n )\n page.add_child(instance=child_page)\n\n cache = caches['post_preview']\n with patch.object(cache, 'add') as add_to_cache:\n # Navigate to the filterable page so that `post-preview.html` loads\n self.client.get('/test-browse-filterable-page/')\n\n self.assertTrue(add_to_cache.called)\n","sub_path":"cfgov/v1/tests/jinja2tags/test_fragment_cache.py","file_name":"test_fragment_cache.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"419848432","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nfrom node_lib import *\nclass Solution:\n def deleteNode(self, node):\n #node.val = node.next.val\n #node.next = node.next.next\n node = node.next\n pr(node)\nsol = Solution()\nnode = makeNode([4,5,1,9])\nprint(sol.deleteNode(node))\n","sub_path":"539. Delete Node in a Linked List.py","file_name":"539. Delete Node in a Linked List.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"391428343","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Product, Category, Slider,School_Services, Company_Services, IT_Services\nfrom django.http import HttpResponse\nfrom django.db.models import Q\n\n# Create your views here.\n\n\ndef search(request, user):\n categories = Category.objects.all()\n query = request.GET['q']\n if query:\n products = Product.objects.filter(name__icontains=query) | Product.objects.filter(company__icontains=query) | Product.objects.filter(short_description__icontains=query) | Product.objects.filter(type__name__icontains=query)\n return render(request, 'szukaj.html', {'products': products, 'categories': categories})\n else:\n sliders = Slider.objects.all()\n return render(request, 'index.html', {'categories': categories, 'sliders': sliders})\n\n\ndef main_page(request):\n categories = Category.objects.all()\n sliders = Slider.objects.all()\n return render(request, 'index.html', {'categories': categories, 'sliders': sliders})\n\ndef get_product(request, name):\n categories = Category.objects.all()\n products = Product.objects.filter(available=True, category__name=name).order_by('type','company', 'name')\n return render(request, 'produkty.html', {'products': products, 'categories': categories})\n\ndef product_detail(request, name):\n categories = Category.objects.all()\n product = get_object_or_404(Product, name=name)\n # product = Product.objects.get(name=name)\n return render(request, 'product_detail.html', {'product': product, 'categories': categories})\n\ndef szkoly(request):\n categories = Category.objects.all()\n school_services = School_Services.objects.all()\n return render(request, 'szkoly.html', {'categories': categories, 'school_services': school_services})\n\ndef firmy(request):\n categories = Category.objects.all()\n company_services = Company_Services.objects.all()\n return render(request, 'firmy.html', {'categories': categories, 'company_services': company_services})\n\ndef uslugi(request):\n categories = Category.objects.all()\n it_services = IT_Services.objects.all()\n return render(request, 'uslugi_informatyczne.html', {'categories': categories, 'it_services': it_services})\n\ndef kontakt(request):\n categories = Category.objects.all()\n return render(request, 'kontakt.html', {'categories': categories})","sub_path":"webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"608767385","text":"from __future__ import absolute_import\n\nfrom rest_framework.response import Response\n\nfrom sentry.api.bases import OrganizationMemberEndpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.api.paginator import DateTimePaginator\nfrom sentry.models import Group, GroupStatus, OrganizationMemberTeam, Project\n\n\nclass OrganizationMemberIssuesBookmarkedEndpoint(OrganizationMemberEndpoint):\n def get(self, request, organization, member):\n \"\"\"\n Return a list of issues assigned to the given member.\n \"\"\"\n project_list = Project.objects.filter(\n organization=organization,\n team__in=OrganizationMemberTeam.objects.filter(\n organizationmember=member,\n is_active=True,\n ).values('team')\n )\n\n queryset = Group.objects.filter(\n bookmark_set__user=member.user,\n bookmark_set__project__in=project_list,\n ).extra(\n select={'sort_by': 'sentry_groupbookmark.date_added'},\n ).order_by('-sort_by')\n\n status = request.GET.get('status', 'unresolved')\n if status == 'unresolved':\n queryset = queryset.filter(\n status=GroupStatus.UNRESOLVED,\n )\n elif status:\n return Response({'status': 'Invalid status choice'}, status=400)\n\n return self.paginate(\n request=request,\n queryset=queryset,\n order_by='-sort_by',\n paginator_cls=DateTimePaginator,\n on_results=lambda x: serialize(x, request.user),\n )\n","sub_path":"src/sentry/api/endpoints/organization_member_issues_bookmarked.py","file_name":"organization_member_issues_bookmarked.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"619726411","text":"import os\nfrom PyQt5.QAxContainer import *\nfrom PyQt5.QtCore import *\nfrom config.errorCode import *\nfrom PyQt5.QtTest import *\n#QAxWidget에 있는 것들을 전부 사용하겠다.\nclass Kiwoom(QAxWidget) :\n def __init__(self):\n super().__init__() #QAxWidget에 있는 메소드를 사용하기 위해 초기화\n print('키움 Class 입니다')\n #eventLoop 모듈\n self.login_event_loop = None\n self.detail_account_info_event_loop = QEventLoop()\n self.calculator_event_loop = QEventLoop()\n ################\n\n #스크린변수모음##\n self.screen_my_info = \"2000\"\n self.screen_day_info = \"3000\"\n self.screen_calculation_stock = \"4000\"\n self.screen_meme_stock = \"6000\" # 종목별 할당할 주문용스크린 번호\n self.screen_real_stock = \"5000\" # 종목별 할당할 스크린 번호\n ##############\n\n #변수모음 ########\n self.account_num = None\n self.account_stock_dict = {}\n self.not_account_stock_dict = {}\n #################\n\n ######종목분석용#####\n self.calcul_data = []\n ###################\n\n ######## 종목 정보 가져오기##########\n self.portfolio_stock_dict = {}\n ################################\n\n ###계좌관련변수 ####\n self.use_money = 0\n self.use_money_percent= 0.5\n ###############\n\n self.get_ocx_instance()\n self.event_slots()\n\n self.signal_login_commconnect()\n self.get_account_info()\n self.detail_account_info() # 예수금 가져오기\n self.detail_account_mystock() # 계좌평가잔고내역요청 가져오기\n self.not_concluded_account() #미체결 요청\n #self.calculator_fnc() #종목 분석용, 임시용\n\n self.read_code()\n self.screen_number_setting()\n def get_ocx_instance(self):\n self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api 모듈 불러오기\n\n def event_slots(self):\n self.OnEventConnect.connect(self.login_slot)\n self.OnReceiveTrData.connect(self.trdata_slot)\n\n def signal_login_commconnect(self):\n self.dynamicCall('CommConnect()') #다른 응용프로그램에 전송을 할 수 있게끔 하는 함수\n self.login_event_loop = QEventLoop()\n self.login_event_loop.exec_()\n\n def login_slot(self, errCode):\n print(errors(errCode))\n self.login_event_loop.exit()\n\n def get_account_info(self):\n account_list = self.dynamicCall(\"GetLoginInfo(String)\",\"ACCLIST\")\n self.account_num = account_list.split(';')[0]\n print(\"나의 보유 계좌번호 %s \" % self.account_num) # 8142633311\n\n def detail_account_info(self):\n print('예수금 가져오기')\n self.dynamicCall(\"SetInputValue(String,String)\", \"계좌번호\", self.account_num)\n self.dynamicCall(\"SetInputValue(String,String)\", \"비밀번호\", \"0000\")\n self.dynamicCall(\"SetInputValue(String,String)\", \"비밀번호입력매체구분\", \"00\")\n self.dynamicCall(\"SetInputValue(String,String)\", \"조회구분\", \"1\")\n self.dynamicCall(\"CommRqData(String,String,int,String)\", \"예수금상세현황요청\",\"opw00001\",\"0\",self.screen_my_info)\n\n self.detail_account_info_event_loop.exec_()\n\n def detail_account_mystock(self, sPrevNext=\"0\"):\n print('계좌평가잔고내역요청')\n self.dynamicCall(\"SetInputValue(String,String)\", \"계좌번호\", self.account_num)\n self.dynamicCall(\"SetInputValue(String,String)\", \"비밀번호\", \"0000\")\n self.dynamicCall(\"SetInputValue(String,String)\", \"비밀번호입력매체구분\", \"00\")\n self.dynamicCall(\"SetInputValue(String,String)\", \"조회구분\", \"1\")\n self.dynamicCall(\"CommRqData(String,String,int,String)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info)\n\n self.detail_account_info_event_loop.exec_()\n\n def not_concluded_account(self, sPrevNext=\"0\"):\n\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num)\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\")\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\")\n self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext,self.screen_my_info)\n\n self.detail_account_info_event_loop.exec_()\n\n def trdata_slot(self, sScrNo,sRQName,sTrCode,sRecordName,sPrevNext):\n '''\n tr 요청을 받는 구역임. 슬롯\n :param sScrNo: 스크린번호\n :param sRQName: 내가 요청했을 때 지은 이름\n :param sTrCode: 요청ID, tr코드\n :param sRecordName: 사용 안함\n :param sPrevNext: 다음 페이지가 있는지\n :return:\n '''\n if sRQName == \"예수금상세현황요청\" :\n deposit = self.dynamicCall(\"GetCommData(String,String,int,String)\",sTrCode,sRQName, 0 ,\"예수금\")\n print(\"예수금 %s\" %int(deposit))\n\n self.use_money = int(deposit) * self.use_money_percent\n self.use_moeny = self.use_money / 4\n\n ok_deposit = self.dynamicCall(\"GetCommData(String,String,int,String)\", sTrCode, sRQName, 0, \"출금가능금액\")\n print(int(ok_deposit))\n self.detail_account_info_event_loop.exit()\n elif sRQName == \"계좌평가잔고내역요청\" :\n total_buy_money = self.dynamicCall(\"GetCommData(String,String,int,String)\",sTrCode,sRQName, 0 ,\"총매입금액\")\n total_buy_money_result = int(total_buy_money)\n\n print(\"총매입금액 %s\" %total_buy_money_result)\n\n total_profit_loss_rate = self.dynamicCall(\"GetCommData(String,String,int,String)\",sTrCode,sRQName, 0 ,\"총수익률(%)\")\n total_profit_loss_rate_result = float(total_profit_loss_rate)\n\n print(\"총수익률(%%) : %s\" %total_profit_loss_rate_result)\n\n\n rows = self.dynamicCall(\"GetRepeatCnt(QString,QString)\",sTrCode,sRQName)\n cnt = 0\n print(rows)\n for i in range(rows) :\n code = self.dynamicCall(\"GetCommData(QString,QString,int, QString)\",sTrCode,sRQName,i,\"종목번호\") # 출력 : A039423 // 알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목\n code = code.strip()[1:]\n\n code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"종목명\") # 출럭 : 한국기업평가\n stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"보유수량\") # 보유수량 : 000000000000010\n buy_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"매입가\") # 매입가 : 000000000054100\n learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"수익률(%)\") # 수익률 : -000000001.94\n current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"현재가\") # 현재가 : 000000003450\n total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"매입금액\")\n possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"매매가능수량\")\n\n if code in self.account_stock_dict :\n pass\n else :\n self.account_stock_dict.update({code:{}})\n\n code_nm = code_nm.strip()\n stock_quantity = int(stock_quantity.strip())\n buy_price = int(buy_price.strip())\n learn_rate = float(learn_rate.strip())\n current_price = int(current_price.strip())\n total_chegual_price = int(total_chegual_price.strip())\n\n possible_quantity = int(possible_quantity.strip())\n\n self.account_stock_dict[code].update({\"종목명\": code_nm})\n self.account_stock_dict[code].update({\"보유수량\": stock_quantity})\n self.account_stock_dict[code].update({\"매입가\": buy_price})\n self.account_stock_dict[code].update({\"수익률(%)\": learn_rate})\n self.account_stock_dict[code].update({\"현재가\": current_price})\n self.account_stock_dict[code].update({\"매입금액\": total_chegual_price})\n self.account_stock_dict[code].update({'매매가능수량': possible_quantity})\n\n cnt += 1\n\n #스크롤페이징기능\n if sPrevNext == \"2\":\n self.detail_account_mystock(sPrevNext=\"2\")\n else:\n self.detail_account_info_event_loop.exit()\n\n self.detail_account_info_event_loop.exit()\n\n elif sRQName == \"실시간미체결요청\":\n print('실시간미체결요청')\n rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName)\n for i in range(rows):\n code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목코드\")\n code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\")\n order_no = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문번호\")\n order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"주문상태\") # 접수,확인,체결\n order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"주문수량\")\n order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"주문가격\")\n order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"주문구분\") # -매도, +매수, -매도정정, +매수정정\n not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"미체결수량\")\n ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"체결량\")\n\n code = code.strip()\n code_nm = code_nm.strip()\n order_no = int(order_no.strip())\n order_status = order_status.strip()\n order_quantity = int(order_quantity.strip())\n order_price = int(order_price.strip())\n order_gubun = order_gubun.strip().lstrip('+').lstrip('-')\n not_quantity = int(not_quantity.strip())\n ok_quantity = int(ok_quantity.strip())\n\n if order_no in self.not_account_stock_dict:\n pass\n else:\n self.not_account_stock_dict[order_no] = {}\n\n self.not_account_stock_dict[order_no].update({'종목코드': code})\n self.not_account_stock_dict[order_no].update({'종목명': code_nm})\n self.not_account_stock_dict[order_no].update({'주문번호': order_no})\n self.not_account_stock_dict[order_no].update({'주문상태': order_status})\n self.not_account_stock_dict[order_no].update({'주문수량': order_quantity})\n self.not_account_stock_dict[order_no].update({'주문가격': order_price})\n self.not_account_stock_dict[order_no].update({'주문구분': order_gubun})\n self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity})\n self.not_account_stock_dict[order_no].update({'체결량': ok_quantity})\n\n self.logging.logger.debug(\"미체결 종목 : %s \" % self.not_account_stock_dict[order_no])\n\n self.detail_account_info_event_loop.exit()\n\n elif sRQName == \"주식일봉차트조회\":\n\n code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"종목코드\")\n code = code.strip()\n # data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName)\n # [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]]\n cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName)\n print(cnt)\n #print('%s일봉데이터 요청' %code)\n\n #한번 조회하면 600일치의 일봉데이터를 볼 수 있음.\n print(\"데이터 일수 %s\" % cnt)\n for i in range(cnt) :\n data = []\n\n current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"현재가\") # 출력 : 000070\n value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"거래량\") # 출력 : 000070\n trading_value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"거래대금\") # 출력 : 000070\n date = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"일자\") # 출력 : 000070\n start_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"시가\") # 출력 : 000070\n high_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"고가\") # 출력 : 000070\n low_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,\"저가\") # 출력 : 000070\n\n data.append(\"\")\n data.append(current_price.strip())\n data.append(value.strip())\n data.append(trading_value.strip())\n data.append(date.strip())\n data.append(start_price.strip())\n data.append(high_price.strip())\n data.append(low_price.strip())\n data.append(\"\")\n #getCommDataEx를 쓰면 이 방법을 사용하지 않아도 됨.\n self.calcul_data.append(data.copy())\n #print(self.calcul_data)\n if sPrevNext == \"2\":\n self.day_kiwoom_db(code=code, sPrevNext=sPrevNext)\n else :\n #원하는 종목만 보고싶다.\n print(self.calcul_data)\n pass_success = False\n # 120일 이평선을 그릴만큼의 데이터가 있는지 체크\n if self.calcul_data == None or len(self.calcul_data) < 120:\n pass_success = False\n\n else:\n # 120일 이평선의 최근 가격 구함\n total_price = 0\n for value in self.calcul_data[:120]:\n total_price += int(value[1])\n moving_average_price = total_price / 120\n\n # 오늘자 주가가 120일 이평선에 걸쳐있는지 확인\n bottom_stock_price = False\n check_price = None\n #6:고가 7:저가\n if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]):\n print(\"오늘 주가 120이평선 아래에 걸쳐있는 것 확인\")\n bottom_stock_price = True\n check_price = int(self.calcul_data[0][6]) #현재기준 '고가'가 과거기준 '저가'보다 높아야 하는 case를 찾아야함.\n #그랜빌의 매수법칙 참조할 것 .\n\n # 과거 일봉 데이터를 조회하면서 120일 이평선보다 주가가 계속 밑에 존재하는지 확인\n prev_price = None\n if bottom_stock_price == True:\n\n moving_average_price_prev = 0\n price_top_moving = False\n idx = 1\n while True:\n\n if len(self.calcul_data[idx:]) < 120: # 120일치가 있는지 계속 확인\n print(\"120일치가 없음\")\n break\n\n total_price = 0\n for value in self.calcul_data[idx:120 + idx]:\n total_price += int(value[1])\n moving_average_price_prev = total_price / 120\n\n if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <= 5:\n print(\"5일 동안 주가가 120일 이평선과 같거나 위에 있으면 조건 통과 못함\")\n price_top_moving = False\n break\n\n elif int(self.calcul_data[idx][\n 7]) > moving_average_price_prev and idx > 5: # 120일 이평선 위에 있는 구간 존재\n print(\"120일치 이평선 위에 있는 구간 확인됨\")\n price_top_moving = True\n prev_price = int(self.calcul_data[idx][7])\n break\n\n idx += 1\n # 해당부분 이평선이 가장 최근의 이평선 가격보다 낮은지 확인\n if price_top_moving == True:\n if moving_average_price > moving_average_price_prev and check_price > prev_price:\n self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선 가격보다 낮은 것 확인\")\n self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의 고가보다 낮은지 확인\")\n pass_success = True\n\n if pass_success == True:\n print(\"조건부 통과됨\")\n\n code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code)\n\n f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\")\n f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1])))\n f.close()\n\n\n elif pass_success == False:\n print(\"조건부 통과 못함\")\n\n self.calcul_data.clear() #리스트삭제\n self.calculator_event_loop.exit()\n\n self.calculator_event_loop.exit()\n\n def get_code_list_by_market(self, market_code):\n '''\n 종목코드 리스트 받기\n #0:장내, 10:코스닥\n :param market_code: 시장코드 입력\n :return:\n '''\n code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code)\n code_list = code_list.split(';')[:-1]\n return code_list\n\n def calculator_fnc(self):\n '''\n 종목 분석관련 함수 모음\n :return:\n '''\n\n code_list = self.get_code_list_by_market(\"10\") #코스닥종목 가져오기\n print(\"코스닥 갯수 %s \" % len(code_list))\n\n for idx, code in enumerate(code_list):\n self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결 끊기\n\n print(\"%s / %s : KOSDAQ Stock Code : %s is updating... \" % (idx + 1, len(code_list), code))\n self.day_kiwoom_db(code=code)\n\n def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"):\n\n QTest.qWait(3600) # 3.6초마다 딜레이를 준다.\n\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code)\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\")\n\n if date != None:\n self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date)\n\n self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext,self.screen_calculation_stock) # Tr서버로 전송 -Transaction\n\n self.calculator_event_loop.exec_()\n\n def read_code(self):\n\n if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이 있는지 체크한다.\n f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을 인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다.\n\n lines = f.readlines() # 파일에 있는 내용들이 모두 읽어와 진다.\n for line in lines: # 줄바꿈된 내용들이 한줄 씩 읽어와진다.\n if line != \"\":\n ls = line.split(\"\\t\")\n stock_code = ls[0]\n stock_name = ls[1]\n stock_price = int(ls[2].split(\"\\n\")[0])\n stock_price = abs(stock_price)\n\n self.portfolio_stock_dict.update({stock_code: {\"종목명\": stock_name, \"현재가\": stock_price}})\n print(self.portfolio_stock_dict)\n f.close()\n\n def screen_number_setting(self):\n\n screen_overwrite = []\n\n # 계좌평가잔고내역에 있는 종목들\n for code in self.account_stock_dict.keys():\n if code not in screen_overwrite:\n screen_overwrite.append(code)\n\n # 미체결에 있는 종목들\n for order_number in self.not_account_stock_dict.keys():\n code = self.not_account_stock_dict[order_number]['종목코드']\n\n if code not in screen_overwrite:\n screen_overwrite.append(code)\n\n # 포트폴리로에 담겨있는 종목들\n for code in self.portfolio_stock_dict.keys():\n\n if code not in screen_overwrite:\n screen_overwrite.append(code)\n\n # 스크린번호 할당\n cnt = 0\n for code in screen_overwrite:\n\n temp_screen = int(self.screen_real_stock)\n meme_screen = int(self.screen_meme_stock)\n\n if (cnt % 50) == 0:\n temp_screen += 1\n self.screen_real_stock = str(temp_screen)\n\n if (cnt % 50) == 0:\n meme_screen += 1\n self.screen_meme_stock = str(meme_screen)\n\n if code in self.portfolio_stock_dict.keys():\n self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)})\n self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)})\n\n elif code not in self.portfolio_stock_dict.keys():\n self.portfolio_stock_dict.update(\n {code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}})\n\n cnt += 1\n print(self.portfolio_stock_dict)","sub_path":"kiwoom/kiwoom.py","file_name":"kiwoom.py","file_ext":"py","file_size_in_byte":23460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"619551832","text":"#!/usr/bin/python\nimport sys\n\nf = open(sys.argv[1], 'r')\nlines = []\n\nfor line in f:\n if line == \"\" or line == \"\\n\":\n continue\n\n line = line.rstrip()\n lines.append(line)\n\nnumLines = int(lines.pop(0))\n\nlines.sort(key = len)\n\nfor x in range(1, numLines + 1):\n print(lines[-x])\n \nf.close()\n","sub_path":"longest-lines/longest-lines.py","file_name":"longest-lines.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"549700706","text":"from pygame.locals import *\nimport pygame\nimport time\n\n##SNAKE\nclass Apple():\n x = 0\n y = 0\n apple_width = 30\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def draw(self, surface, image):\n surface.blit(image, (self.x, self.y))\n\nclass Player:\n x = [0]\n y = [0]\n block_width = 35\n direction = 0\n length = 3\n\n\n def __init__(self,length):\n self.length = length\n for i in range(0,length):\n self.x.append(0)\n self.y.append(0)\n\n self.x[1] = self.block_width * -1\n self.x[2] = self.block_width * -2\n\n def update(self):\n for j in range(0,2):\n for i in range(self.length-1,0,-1):\n self.x[i] = self.x[i-1]\n self.y[i] = self.y[i-1]\n if self.direction == 0:\n self.x[0] = self.x[0] + self.block_width\n if self.direction == 1:\n self.x[0] = self.x[0] - self.block_width\n if self.direction == 2:\n self.y[0] = self.y[0] - self.block_width\n if self.direction == 3:\n self.y[0] = self.y[0] + self.block_width\n\n\n def moveRight(self):\n self.direction = 0\n\n def moveLeft(self):\n self.direction = 1\n\n def moveUp(self):\n self.direction = 2\n\n def moveDown(self):\n self.direction = 3\n\n\n def draw(self, surface, image):\n for i in range(0,self.length):\n surface.blit(image,(self.x[i],self.y[i]))\n\nclass App:\n windowWidth = 800\n windowHeight = 600\n player = 0\n\n def __init__(self):\n self.running = True\n self.display_surf = None\n self.image_surf = None\n self.player = Player(3)\n self.apple = Apple(400,400)\n\n def on_init(self):\n pygame.init()\n self.display_surf = pygame.display.set_mode((self.windowWidth, self.windowHeight), pygame.HWSURFACE)\n pygame.display.set_caption(\"Snake\")\n self.running = True\n self.image_surf = pygame.image.load(\"square.png\").convert()\n self.apple_surf = pygame.image.load(\"square.png\").convert()\n\n def on_event(self,event):\n if event.type == QUIT:\n self.running = False\n\n def on_loop(self):\n self.player.update()\n\n for i in range(0, self.player.length):\n if self.isCollision(self.apple.x, self.apple.y, self.player.x[i], self.player.y[i], self.apple.apple_width):\n self.apple.x = randint(0,self.windowWidth-self.apple.apple_width)\n self.apple.y = randint(0,self.windowHeight-self.apple.apple_width)\n self.player.length = self.player.length + 1\n\n for i in range(2,self.player.length):\n if self.isCollision(self.player.x[0],self.player.y[0],self.player.x[i],self.player.y[i],self.player.block_width):\n print(\"You lose! Collision: \")\n print(\"x[0] (\" + str(self.player.x[0]) + \",\" + str(self.player.y[0]) + \")\")\n print(\"x[\" + str(i) + \"] (\" + str(self.player.x[i]) + \",\" + str(self.player.y[i]) + \")\")\n exit(0)\n\n\n def on_render(self):\n self.display_surf.fill((0, 0, 0))\n self.player.draw(self.display_surf, self.image_surf)\n self.apple.draw(self.display_surf, self.apple_surf)\n pygame.display.flip()\n\n def on_cleanup(self):\n pygame.quit()\n\n def isCollision(self,x1,y1,x2,y2,bsize):\n if x1 >= x2 and x1 <= x2+bsize:\n if y1 >= y2 and y1<= y2+bsize:\n return True\n return False\n\n def on_execute(self):\n if self.on_init() == False:\n self.running = False\n\n while self.running:\n pygame.event.pump()\n keys = pygame.key.get_pressed()\n\n if keys[K_RIGHT]:\n self.player.moveRight()\n\n if keys[K_LEFT]:\n self.player.moveLeft()\n\n if keys[K_UP]:\n self.player.moveUp()\n\n if keys[K_DOWN]:\n self.player.moveDown()\n\n self.on_loop()\n self.on_render()\n time.sleep(50.0 / 1000.0)\n self.on_cleanup()\n\n\nif __name__ == \"__main__\":\n app1 = App()\n app1.on_execute()","sub_path":"Snake/Snake1.py","file_name":"Snake1.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"33766327","text":"\"\"\"Convert a Matlab patternsDesign file to a config file for rtfMRI\"\"\"\nimport os\nimport logging\nimport numpy as np # type: ignore\nfrom rtfMRI.StructDict import StructDict\nfrom rtfMRI.utils import loadMatFile, findNewestFile\nfrom rtfMRI.Errors import ValidationError\n\n\ndef getLocalPatternsFile(session, subjectDataDir, runId):\n if session.findNewestPatterns:\n # load the newest file patterns\n patternsFilename = findPatternsDesignFile(session, subjectDataDir, runId)\n else:\n idx = getRunIndex(session, runId)\n if idx >= 0 and len(session.patternsDesignFiles) > idx:\n patternsFilename = session.patternsDesignFiles[idx]\n patternsFilename = os.path.join(subjectDataDir, os.path.basename(patternsFilename))\n else:\n # either not enough runs specified or not enough patternsDesignFiles specified\n if idx < 0:\n raise ValidationError(\"Insufficient runs specified in config file session: \"\n \"run {} idx {}\".format(runId, idx))\n else:\n raise ValidationError(\"Insufficient patternsDesignFiles specified in \"\n \"config file session for run {}\".format(runId))\n # load and parse the pattensDesign file\n logging.info(\"Using Local Patterns file: %s\", patternsFilename)\n patterns = loadMatFile(patternsFilename)\n return patterns, patternsFilename\n\n\ndef createRunConfig(session, patterns, runId, scanNum=-1):\n run = StructDict()\n run.runId = runId\n idx = getRunIndex(session, runId)\n if scanNum >= 0:\n run.scanNum = scanNum\n elif session.ScanNums is not None and idx >= 0 and len(session.ScanNums) > idx:\n run.scanNum = session.ScanNums[idx]\n else:\n run.scanNum = -1\n\n run.disdaqs = int(patterns.disdaqs)\n run.nBlocksPerPhase = int(patterns.nBlocksPerPhase)\n run.TRTime = int(patterns.TR)\n run.nTRs = int(patterns.nTRs)\n run.nTRsFix = int(patterns.nTRsFix)\n\n run.firstVolPhase1 = int(np.min(np.where(patterns.block.squeeze() == 1)))\n run.lastVolPhase1 = int(np.max(np.where(patterns.block.squeeze() == patterns.nBlocksPerPhase)))\n if run.lastVolPhase1 != patterns.lastVolPhase1-1:\n raise ValidationError(\"createRunConfig: calulated lastVolPhase1 is same as loaded from\"\n \"patternsdesign {} {}\".format(run.lastVolPhase1, patterns.lastVolPhase1))\n run.nVolsPhase1 = run.lastVolPhase1 - run.firstVolPhase1 + 1\n run.firstVolPhase2 = int(np.min(np.where(patterns.block.squeeze() == (patterns.nBlocksPerPhase+1))))\n if run.firstVolPhase2 != patterns.firstVolPhase2-1:\n raise ValidationError(\"createRunConfig: calulated firstVolPhase2 is same as loaded from \"\n \"patternsdesign {} {}\".format(run.firstVolPhase2, patterns.firstVolPhase2))\n run.lastVolPhase2 = int(np.max(np.where(patterns.type.squeeze() != 0)))\n run.nVolsPhase2 = run.lastVolPhase2 - run.firstVolPhase2 + 1\n\n sumRegressor = patterns.regressor[0, :] + patterns.regressor[1, :]\n run.firstTestTR = int(np.min(np.where(sumRegressor == 1)))\n\n run.nVols = patterns.block.shape[1]\n\n blockGroups = []\n\n blkGrp1 = createBlockGroupConfig(range(run.firstVolPhase2), patterns)\n blkGrp1.blkGrpId = 1\n blkGrp1.nTRs = run.firstVolPhase2\n blockGroups.append(blkGrp1)\n\n blkGrp2 = createBlockGroupConfig(range(run.firstVolPhase2, run.nVols), patterns)\n blkGrp2.blkGrpId = 2\n blkGrp2.nTRs = run.nVols - run.firstVolPhase2\n blockGroups.append(blkGrp2)\n\n run.blockGroups = blockGroups\n return run\n\n\ndef createBlockGroupConfig(tr_range, patterns):\n blkGrp = StructDict()\n blkGrp.blocks = []\n blkGrp.type = 0\n blkGrp.firstVol = tr_range[0]\n block = StructDict()\n blockNum = -1\n for iTR in tr_range:\n if patterns.block[0, iTR] > 0 and patterns.block[0, iTR] != blockNum:\n if blockNum >= 0:\n blkGrp.blocks.append(block)\n blockNum = int(patterns.block[0, iTR])\n block = StructDict()\n block.blockId = blockNum\n block.TRs = []\n tr = StructDict()\n tr.trId = iTR - blkGrp.firstVol\n tr.vol = iTR + 1\n tr.attCateg = int(patterns.attCateg[0, iTR])\n tr.stim = int(patterns.stim[0, iTR])\n tr.type = int(patterns.type[0, iTR])\n if tr.type != 0:\n if blkGrp.type == 0:\n blkGrp.type = tr.type\n if blkGrp.type != tr.type:\n raise ValidationError(\"createBlockGroupConfig: inconsistent TR types in block group\")\n tr.regressor = [int(patterns.regressor[0, iTR]), int(patterns.regressor[1, iTR])]\n block.TRs.append(tr)\n if len(block.TRs) > 0:\n blkGrp.blocks.append(block)\n return blkGrp\n\n\ndef getPatternsFileRegex(session, dataDir, runId, addRunDir=False):\n filePattern = 'patternsdesign_' + str(runId) + '*.mat'\n if addRunDir:\n patternsFilename = os.path.join(dataDir, 'run'+str(runId), filePattern)\n else:\n patternsFilename = os.path.join(dataDir, filePattern)\n return patternsFilename\n\n\ndef findPatternsDesignFile(session, dataDir, runId):\n fullPathRegex = getPatternsFileRegex(session, dataDir, runId, addRunDir=True)\n baseDir, filePattern = os.path.split(fullPathRegex)\n pdesignFile = findNewestFile(baseDir, filePattern)\n if pdesignFile is not None and pdesignFile != '':\n return pdesignFile\n fullPathRegex = getPatternsFileRegex(session, dataDir, runId)\n pdesignFile = findNewestFile('', fullPathRegex)\n if pdesignFile is None or pdesignFile == '':\n raise FileNotFoundError(\"No files found matching {}\".format(fullPathRegex))\n return pdesignFile\n\n\ndef getRunIndex(session, runId):\n if session.Runs is None:\n print(\"session config has no Runs value defined\")\n return -1\n ids = [idx for (idx, run) in enumerate(session.Runs) if run == runId]\n if len(ids) == 0:\n print(\"Run {} not in Runs List\".format(runId))\n return -1\n elif len(ids) > 1:\n print(\"Run {} declared multiple times in Runs List\".format(runId))\n return -1\n idx = ids[0]\n return idx\n","sub_path":"rtAtten/PatternsDesign2Config.py","file_name":"PatternsDesign2Config.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"23130284","text":"import zipfile\nimport shutil\nimport os\nimport fileinput\n\ndef UnzipDOCX(FilePath, TemplateName, NewFileName):\n\tFilePathCreateFolder = TemplateName.replace(\".docx\", \"\")\n\tos.mkdir(NewFileName)\n\t\n\tFilePathZip = NewFileName + \"\\\\\" + TemplateName.replace(\".docx\", \".zip\") \n\tFilePathInFolder = NewFileName + \"\\\\\" + TemplateName\n\tshutil.copyfile(FilePath, FilePathZip)\n\t\n\twith zipfile.ZipFile(FilePathZip, \"r\") as z:\n\t\tz.extractall(NewFileName)\n\t\t\n\tos.remove(FilePathZip)\n\t\ndef InsertIntoDOCX(FilePath, ReplacementString, StringToReplace):\n\tFilePath = FilePath + \"\\\\word\\\\document.xml\"\n\t\n\tfor line in fileinput.input(FilePath, inplace=True):\n\t\tprint(line.replace(StringToReplace, ReplacementString), end='')\n\t\ndef ZipDOCX(FilePath):\n\tshutil.make_archive(FilePath, 'zip', FilePath)\n\n\tFilePathZip = FilePath + \".zip\"\n\t\n\tFilePathDocx = FilePathZip.replace(\".zip\", \".docx\")\n\tos.rename(FilePathZip, FilePathDocx)\n\tshutil.rmtree(FilePath)\n\t","sub_path":"Python/docxFunctions.py","file_name":"docxFunctions.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"205574230","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport collections\n\nimport copy\nfrom federatedml.param.intersect_param import IntersectParam\nfrom types import SimpleNamespace\nfrom federatedml.param.base_param import BaseParam, deprecated_param\nfrom federatedml.util import consts\nfrom federatedml.param.encrypt_param import EncryptParam\nfrom federatedml.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam\nfrom federatedml.param.predict_param import PredictParam\nfrom federatedml.param.callback_param import CallbackParam\n\ndeprecated_param_list = [\"validation_freqs\", \"metrics\"]\n\n\n@deprecated_param(*deprecated_param_list)\nclass FTLParam(BaseParam):\n\n def __init__(self, alpha=1, tol=0.000001,\n n_iter_no_change=False, validation_freqs=None, optimizer={'optimizer': 'Adam', 'learning_rate': 0.01},\n nn_define={}, epochs=1, intersect_param=IntersectParam(consts.RSA), config_type='keras', batch_size=-1,\n encrypte_param=EncryptParam(),\n encrypted_mode_calculator_param=EncryptedModeCalculatorParam(mode=\"confusion_opt\"),\n predict_param=PredictParam(), mode='plain', communication_efficient=False,\n local_round=5, callback_param=CallbackParam()):\n \"\"\"\n Parameters\n ----------\n alpha : float\n a loss coefficient defined in paper, it defines the importance of alignment loss\n tol : float\n loss tolerance\n n_iter_no_change : bool\n check loss convergence or not\n validation_freqs : None or positive integer or container object in python\n Do validation in training process or Not.\n if equals None, will not do validation in train process;\n if equals positive integer, will validate data every validation_freqs epochs passes;\n if container object in python, will validate data if epochs belong to this container.\n e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.\n The default value is None, 1 is suggested. You can set it to a number larger than 1 in order to\n speed up training by skipping validation rounds. When it is larger than 1, a number which is\n divisible by \"epochs\" is recommended, otherwise, you will miss the validation scores\n of last training epoch.\n optimizer : str or dict\n optimizer method, accept following types:\n 1. a string, one of \"Adadelta\", \"Adagrad\", \"Adam\", \"Adamax\", \"Nadam\", \"RMSprop\", \"SGD\"\n 2. a dict, with a required key-value pair keyed by \"optimizer\",\n with optional key-value pairs such as learning rate.\n defaults to \"SGD\"\n nn_define : dict\n a dict represents the structure of neural network, it can be output by tf-keras\n epochs : int\n epochs num\n intersect_param\n define the intersect method\n config_type : {'tf-keras'}\n config type\n batch_size : int\n batch size when computing transformed feature embedding, -1 use full data.\n encrypte_param\n encrypted param\n encrypted_mode_calculator_param\n encrypted mode calculator param:\n predict_param\n predict param\n mode: {\"plain\", \"encrypted\"}\n plain: will not use any encrypt algorithms, data exchanged in plaintext\n encrypted: use paillier to encrypt gradients\n communication_efficient: bool\n will use communication efficient or not. when communication efficient is enabled, FTL model will\n update gradients by several local rounds using intermediate data\n local_round: int\n local update round when using communication efficient\n \"\"\"\n\n super(FTLParam, self).__init__()\n self.alpha = alpha\n self.tol = tol\n self.n_iter_no_change = n_iter_no_change\n self.validation_freqs = validation_freqs\n self.optimizer = optimizer\n self.nn_define = nn_define\n self.epochs = epochs\n self.intersect_param = copy.deepcopy(intersect_param)\n self.config_type = config_type\n self.batch_size = batch_size\n self.encrypted_mode_calculator_param = copy.deepcopy(encrypted_mode_calculator_param)\n self.encrypt_param = copy.deepcopy(encrypte_param)\n self.predict_param = copy.deepcopy(predict_param)\n self.mode = mode\n self.communication_efficient = communication_efficient\n self.local_round = local_round\n self.callback_param = copy.deepcopy(callback_param)\n\n def check(self):\n self.intersect_param.check()\n self.encrypt_param.check()\n self.encrypted_mode_calculator_param.check()\n\n self.optimizer = self._parse_optimizer(self.optimizer)\n\n supported_config_type = [\"keras\"]\n if self.config_type not in supported_config_type:\n raise ValueError(f\"config_type should be one of {supported_config_type}\")\n\n if not isinstance(self.tol, (int, float)):\n raise ValueError(\"tol should be numeric\")\n\n if not isinstance(self.epochs, int) or self.epochs <= 0:\n raise ValueError(\"epochs should be a positive integer\")\n\n if self.nn_define and not isinstance(self.nn_define, dict):\n raise ValueError(\"bottom_nn_define should be a dict defining the structure of neural network\")\n\n if self.batch_size != -1:\n if not isinstance(self.batch_size, int) \\\n or self.batch_size < consts.MIN_BATCH_SIZE:\n raise ValueError(\n \" {} not supported, should be larger than 10 or -1 represent for all data\".format(self.batch_size))\n\n for p in deprecated_param_list:\n # if self._warn_to_deprecate_param(p, \"\", \"\"):\n if self._deprecated_params_set.get(p):\n if \"callback_param\" in self.get_user_feeded():\n raise ValueError(f\"{p} and callback param should not be set simultaneously,\"\n f\"{self._deprecated_params_set}, {self.get_user_feeded()}\")\n else:\n self.callback_param.callbacks = [\"PerformanceEvaluate\"]\n break\n\n descr = \"ftl's\"\n\n if self._warn_to_deprecate_param(\"validation_freqs\", descr, \"callback_param's 'validation_freqs'\"):\n self.callback_param.validation_freqs = self.validation_freqs\n\n if self._warn_to_deprecate_param(\"metrics\", descr, \"callback_param's 'metrics'\"):\n self.callback_param.metrics = self.metrics\n\n if self.validation_freqs is None:\n pass\n elif isinstance(self.validation_freqs, int):\n if self.validation_freqs < 1:\n raise ValueError(\"validation_freqs should be larger than 0 when it's integer\")\n elif not isinstance(self.validation_freqs, collections.Container):\n raise ValueError(\"validation_freqs should be None or positive integer or container\")\n\n assert isinstance(self.communication_efficient, bool), 'communication efficient must be a boolean'\n assert self.mode in [\n 'encrypted', 'plain'], 'mode options: encrpyted or plain, but {} is offered'.format(\n self.mode)\n\n self.check_positive_integer(self.epochs, 'epochs')\n self.check_positive_number(self.alpha, 'alpha')\n self.check_positive_integer(self.local_round, 'local round')\n\n @staticmethod\n def _parse_optimizer(opt):\n \"\"\"\n Examples:\n\n 1. \"optimize\": \"SGD\"\n 2. \"optimize\": {\n \"optimizer\": \"SGD\",\n \"learning_rate\": 0.05\n }\n \"\"\"\n\n kwargs = {}\n if isinstance(opt, str):\n return SimpleNamespace(optimizer=opt, kwargs=kwargs)\n elif isinstance(opt, dict):\n optimizer = opt.get(\"optimizer\", kwargs)\n if not optimizer:\n raise ValueError(f\"optimizer config: {opt} invalid\")\n kwargs = {k: v for k, v in opt.items() if k != \"optimizer\"}\n return SimpleNamespace(optimizer=optimizer, kwargs=kwargs)\n else:\n raise ValueError(f\"invalid type for optimize: {type(opt)}\")\n","sub_path":"python/federatedml/param/ftl_param.py","file_name":"ftl_param.py","file_ext":"py","file_size_in_byte":8929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"180415931","text":"import sys, os, json\r\nimport find_next_cut\r\nfrom ffmpy import FFmpeg\r\nfrom os import path as osp\r\n\r\n## TODO put root path as argument\r\nrootPath = \"/home/opekta/copaeurope/\"\r\n#trimmedVideosPath = osp.join(rootPath, \"mmaction2/data/soccernet/trimmed\")\r\nlabelsPath = osp.join(rootPath, \"mmaction2/data/soccernet/SoccerNet_V1.1_Labels\")\r\ndatasetPath = osp.join(rootPath, \"mmaction2/data/soccernet/videos\")\r\noutputExtension = \".mp4\"\r\nvideosLQPath = osp.join(rootPath, \"mmaction2/data/soccernet/LQ\")\r\nsoccernetClassesFile = open(\"soccernet_classes_conversion.json\")\r\nsoccernetClassesJson = json.load(soccernetClassesFile)\r\nclassesDurationFile = open(\"classes_duration.json\")\r\nclassesDurationJson = json.load(classesDurationFile)\r\n\r\ndef findShotAction(jsonAnnotation, halfTime, timingValue):\r\n for annotation in jsonAnnotation['annotations']:\r\n if annotation[\"visibility\"] == \"not shown\":\r\n continue\r\n classe = getClasse(annotation)\r\n if (classe in ['Corner', 'FreeKick', 'Goal', 'Penalty']):\r\n actionTimeValue = 60*int(annotation['gameTime'][4:6]) + int(annotation['gameTime'][7:9])\r\n if (halfTime == annotation['gameTime'][0]) & (abs(actionTimeValue - timingValue) < 3):\r\n return True\r\n return False\r\n\r\ndef getClasse(annotation):\r\n if annotation['label'] in soccernetClassesJson[\"classes\"]:\r\n return annotation['label']\r\n elif annotation['label'] in soccernetClassesJson[\"conversion\"]:\r\n return soccernetClassesJson[\"conversion\"][annotation['label']]\r\n else:\r\n return None\r\n\r\ndef main():\r\n\r\n for classe in soccernetClassesJson[\"classes\"]:\r\n # Create folders to store trimmed copy videos ordered by action.\r\n tmpPath = osp.join(datasetPath, classe)\r\n os.makedirs(tmpPath, exist_ok=True)\r\n\r\n for root, dirs, files in os.walk(videosLQPath):\r\n\r\n # Create folders to store trimmed copy videos ordered by match.\r\n # tmpPath = os.path.join(trimmedVideosPath, root[1+len(videosLQPath):])\r\n # os.makedirs(tmpPath, exist_ok=True)\r\n\r\n labelFolder = osp.join(labelsPath, root[1+len(videosLQPath):])\r\n labelPath = osp.join(labelFolder, \"Labels-v2.json\")\r\n cutAnnotationsPath = osp.join(labelFolder, \"Labels-cameras.json\")\r\n ligueTrigram = root[1+len(videosLQPath):4+len(videosLQPath)]\r\n\r\n if osp.exists(labelPath):\r\n\r\n f = open(labelPath)\r\n annotationsData = json.load(f)\r\n f.close()\r\n videoFolder = osp.join(videosLQPath, annotationsData[\"UrlLocal\"])\r\n folderName = osp.basename(osp.normpath(videoFolder))\r\n dateExtension = folderName[:10] + '_' + folderName[13:18]\r\n firstLettersHostTeam = folderName[19:22]\r\n\r\n for element in annotationsData['annotations']:\r\n\r\n if element[\"visibility\"] == \"not shown\":\r\n continue\r\n classe = getClasse(element)\r\n if classe is not None:\r\n halfTime = element[\"gameTime\"][0]\r\n videoLQPath = osp.join(videoFolder, halfTime + \".mkv\")\r\n if osp.exists(videoLQPath):\r\n start = 60*int(element['gameTime'][4:6]) + int(element['gameTime'][7:9])\r\n newTrimmedPath = osp.join(datasetPath, classe + \"//\" + classe + '_'\r\n + ligueTrigram + '_' + dateExtension + '_'\r\n + firstLettersHostTeam + '_'\r\n + str(start + 45*60*(int(halfTime)-1))\r\n + outputExtension)\r\n if osp.exists(newTrimmedPath):\r\n print(newTrimmedPath, \"already exists. Extraction is skipped\")\r\n else:\r\n if (classe == 'Shot'):\r\n alreadyDealt = findShotAction(annotationsData, halfTime, start)\r\n if alreadyDealt:\r\n continue\r\n # start = int(element['gameTime'][4:]) - classesDurationJson[classe][1]\r\n ff = FFmpeg(\r\n inputs={videoLQPath: ['-fflags', 'genpts', '-y', '-an', \"-ss\",\r\n str(start - classesDurationJson[classe][\"anticipation\"]),\r\n '-t', str(classesDurationJson[classe][\"duration\"])]},\r\n outputs={newTrimmedPath: ['-c', 'copy', '-copyts', '-avoid_negative_ts', 'make_zero']}\r\n )\r\n # print(ff.cmd)\r\n # print('start', start)\r\n # print('nextCut', nextCut)\r\n # The usual command line can be found in ff.cmd\r\n ff.run()\r\n\r\nsoccernetClassesFile.close()\r\nclassesDurationFile.close()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"tools/data/soccernet/trimscript_broadcast_only.py","file_name":"trimscript_broadcast_only.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"222057182","text":"#!python3\n\n# Dict operations\n\nif __name__ == '__main__':\n test_dict = {\n \"name\": \"Sahil\",\n \"age\": 30,\n \"country\": \"CA\"\n }\n\n print(f'original dict: \\n{test_dict}')\n\n # Adding a key\n test_dict['mother_tongue'] = \"punjabi\"\n print(f'dict after inserting a key: \\n{test_dict}')\n\n # Removing a key using pop() which returns the deleted value (not key)\n dict_for_pop = test_dict.copy()\n deleted_element = test_dict.pop('mother_tongue')\n print(f'Deleted element: {deleted_element}')\n\n # Removing a key using popitem() which returns key and value as tuple\n dict_for_popitem = test_dict.copy()\n deleted_element = dict_for_popitem.popitem()\n print(f'Deleted element: {deleted_element}')\n\n # Removing a key using del keyword\n # This can be used to delete the entire dict\n dict_for_del = test_dict.copy()\n del dict_for_del['age']\n print(f'dict after deleting a key using del keyword: \\n{dict_for_del}')\n\n # Removing all elements/keys using clear method\n dict_for_clear = test_dict.copy()\n dict_for_clear.clear()\n print(\n f'dict after deleting all keys using clear keyword: \\n{dict_for_clear}')\n\n # using in operator to check if a key exists\n if 'age' in test_dict: # or test_dict.keys()\n print(\"used 'in' operator to check if 'age' key exists\")\n\n # all() method to check if all elements in dict are true\n bool_dict = {\n \"field1\": True,\n \"field2\": False\n }\n if all(test_dict):\n print('checked string dict using all() method...')\n\n if all(bool_dict.values()):\n print('checked bool dict using all() method...')\n\n # len() method returns number of pairs for dict\n print(f'pairs in our test dict: {len(test_dict)}')\n\n # sorted() method sorts elements in specific order\n print(f'sorted dict: {sorted(test_dict, reverse=False)}')\n","sub_path":"data-structures/dictionaries/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"405727594","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom widgets_servo_test import Servo, Servo_pwm,Motor_rotate, Magneto_sensor,UltrasonicSensor,Light,Buzzer\nfrom widgets import *\nimport time\nimport cart\n\ndef Lightwork(light_port,color):\n light=Light(light_port)\n red=[80,0,0]\n green=[0,80,0]\n yellow=[80,80,0]\n off=[0,0,0]\n light_color=[0,0,0]\n if color =='red':\n light_color=red\n elif color=='green':\n light_color=green\n elif color=='yellow':\n light_color=yellow\n elif color=='off':\n light_color = off\n light.lightcontrol(0,light_color[0],light_color[1],light_color[2])\n\nif __name__ == '__main__':\n pass","sub_path":"src/obstacle.py","file_name":"obstacle.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"541774497","text":"# -*- coding: utf-8 -*-\nimport sys\nimport locale\nlocale.setlocale(locale.LC_ALL, '')\n\n_ultang = 0\n_boolsHorario = 0\noutput = \"\"\nareaTotal = 0.0\n\ndef pCardeais (graus):\n\tgraus = int(graus)\n\tif graus>180:\n\t\tif graus>270:\n\t\t\tif graus>315:\n\t\t\t\tif graus>340:\n\t\t\t\t\treturn \"LESTE\"\n\t\t\t\telse:\n\t\t\t\t\treturn \"SUDESTE\"\n\t\t\telse:\n\t\t\t\tif graus>290:\n\t\t\t\t\treturn \"SUDESTE\"\n\t\t\t\telse:\n\t\t\t\t\treturn \"SUL\"\n\t\telse:\n\t\t\tif graus>225:\n\t\t\t\tif graus>250:\n\t\t\t\t\treturn \"SUL\"\n\t\t\t\telse:\n\t\t\t\t\treturn \"SUDOESTE\"\n\t\t\telse:\n\t\t\t\tif graus>200:\n\t\t\t\t\treturn \"SUDOESTE\"\n\t\t\t\telse:\n\t\t\t\t\treturn \"OESTE\"\n\telse:\n\t\tif graus>90:\n\t\t\tif graus>135:\n\t\t\t\tif graus>160:\n\t\t\t\t\treturn \"OESTE\"\n\t\t\t\telse:\n\t\t\t\t\treturn \"NOROESTE\"\n\t\t\telse:\n\t\t\t\tif graus>110:\n\t\t\t\t\treturn \"NOROESTE\"\n\t\t\t\telse:\n\t\t\t\t\treturn \"NORTE\"\n\t\telse:\n\t\t\tif graus>45:\n\t\t\t\tif graus>70:\n\t\t\t\t\treturn \"NORTE\"\n\t\t\t\telse:\n\t\t\t\t\treturn \"NORDESTE\"\n\t\t\telse:\n\t\t\t\tif graus>20:\n\t\t\t\t\treturn \"NORDESTE\"\n\t\t\t\telse:\n\t\t\t\t\treturn \"LESTE\"\n\ndef CorrAng (ang):\n\tang = int(ang)\n\tif ang>=360:\n\t\treturn ang - 360\n\tif ang<0:\n\t\treturn ang + 360\n\treturn ang\n\ndef checarAng(arrayLinhas):\n\tsoma = 0.0\n\tqLinhas = 0\n\tfor linha in arrayLinhas:\n\t\tdados = linha.split(\" \")\n\t\tqLinhas += 1\n\t\tif dados[0] == \"*\":\n\t\t\tdel dados[0]\n\t\t\tsoma += int(dados[0])\n\t\t\tsoma += int(dados[1]) / 60\n\t\t\tsoma += int(dados[2]) / 3600\n\t\t\tbreak\n\t\tsoma += int(dados[0])\n\t\tsoma += int(dados[1]) / 60\n\t\tsoma += int(dados[2]) / 3600\n\tif not (soma == 180*(qLinhas - 2)):\n\t\treturn \"Erro em checarAng(). Os ângulos não fecham!!!\"\n\treturn 0\n\ndef primDescr (boolsHorario, nome, boolurbana, area, rua, munic):\n\n\tglobal _boolsHorario\n\t_boolsHorario = boolsHorario\n\tt = \"{} \".format(nome)\n\tt += \"DESCRIÇÃO DO IMÓVEL: \"\n\tif boolurbana:\n\t\tt += \"Um lote urbano\"\n\telse:\n\t\tt += \"Uma área de terras\"\n\tt += \" com uma superfície de {} m²,\".format(area)\n\tt += \" localizada n{},\".format(rua.replace(\" \", \" \"))\n\tif boolurbana:\n\t\tt += \" zona urbana\"\n\telse:\n\t\tt += \" zona rural\"\n\tt += \" do município de {}. \".format(munic.replace(\" \", \" \"))\n\treturn t\n\ndef pelaFrente (angInicial, med, conf):\n\tglobal _ultAng\n\tpfAng = int(angInicial.split(\"°\")[0])\n\t_ultAng = pfAng\n\tif _boolsHorario:\n\t\t_ultAng += 90\n\telse:\n\t\t_ultAng -= 90\n\t_ultAng = CorrAng(_ultAng)\n\tconfront = \"\"\n\tfor co in conf:\n\t\tconfront += \" {}\".format(co)\n\tpCardeal = pCardeais(_ultAng)\n\treturn \"Pela frente a {}, onde mede {} metros e confronta com{}, \".format(pCardeal, med, confront.replace(\" \", \" \"))\n\ndef angMedConf (ang, med, conf):\n\tglobal _ultAng\n\tangNum = int(ang.split(\"°\")[0])\n\tcAng = CorrAng(angNum)\n\tif _boolsHorario:\n\t\t_ultAng -= cAng\n\telse:\n\t\t_ultAng += cAng\n\tconfront = \"\"\n\tfor co in conf:\n\t\tconfront += \" {}\".format(co)\n\t_ultAng = CorrAng(_ultAng)\n\tpCardeal = pCardeais(_ultAng)\n\treturn \"a seguir forma um ângulo interno de {}\\\", seguindo rumo {}, onde mede {} e confronta com{}, \".format(ang, pCardeal, med, confront)\n\ndef final(ang):\n\treturn \"chegando ao ponto de início, onde forma um ângulo de {}.
\".format(ang)\n\ndef html(texto):\n\to = open(\"template.html\", encoding='utf-8').read()\n\to = o.replace(\"%areatotal%\", locale.format(\"%.2f\", areaTotal, True))\n\to += texto\n\topen(\"output.html\", \"w\", encoding='utf-8').write(o)\n\narq = open(\"raw.txt\", encoding=\"utf-8\").read()\n\nlotes = arq.split(\"#\")\n\ndel lotes[0]\n\nfor lote in lotes:\n\tlinhas = lote.split(\"\\n\")\n\tdados = linhas[0].split(\";\")\n\tif dados[1] == \"H\":\n\t\thor = 1\n\telse:\n\t\thor = 0\n\tif dados[2] == \"U\":\n\t\turb = 1\n\telse:\n\t\turb = 0\n\tt = primDescr(hor, dados[0], urb, dados[3], dados[4], dados[5])\n\tareaTotal += float(dados[3].replace(\".\", \"\").replace(\",\", \".\"))\n\tdel linhas[0]\n\tpelaf = linhas[0].split(\" \")\n\tt += pelaFrente(\"{}°{}'{}\".format(pelaf[0], pelaf[1], pelaf[2]), pelaf[3], pelaf[4:])\n\tdel linhas[0]\n\tcAngStatus = checarAng(linhas)\n\tfor linha in linhas:\n\t\tl = linha.split(\" \")\n\t\tif not (l == ['']):\n\t\t\tif l[0] == \"*\":\n\t\t\t\tt += final(\"{}°{}'{}\".format(l[1], l[2], l[3]))\n\t\t\telse:\n\t\t\t\tt += angMedConf(\"{}°{}'{}\".format(l[0], l[1], l[2]), l[3], l[4:])\n\tif not(cAngStatus == 0):\n\t\tprint(\"A criação do memorial falhou. Motivo: {}\".format(cAngStatus))\n\t\tt = \"***** A criação do memorial falhou. Motivo: {} \".format(cAngStatus)\n\tt += \" \"\n\toutput += t\nhtml(output)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"350411424","text":"import os.path\n\nPROCESS_ID = '/monitoring/process_id.pid'\nSTACKTRACE = '/monitoring/stacktrace.txt'\n\nEMPTY = ''\n\n\ndef read_file(path):\n with open(os.path.dirname(__file__) + '/..' + path, 'r') as f:\n contents = f.read()\n f.close()\n return contents\n\n\ndef write_to_file(path, string):\n with open(os.path.dirname(__file__) + '/..' + path, 'w') as f:\n f.seek(0)\n f.truncate()\n f.write(str(string))\n f.close()\n\n\ndef erase_contents(path):\n with open(os.path.dirname(__file__) + '/..' + path, 'w') as f:\n f.seek(0)\n f.truncate()\n f.close()\n\n\ndef is_empty(path):\n with open(os.path.dirname(__file__) + '/..' + path, 'r') as f:\n contents = f.read()\n f.close()\n if contents == '':\n return True\n return False\n","sub_path":"bot/helpers/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"302216555","text":"#!/usr/bin/env python\nimport roslib; roslib.load_manifest('PSoC_Listener')\nimport rospy, tty, sys, termios\nfrom geometry_msgs.msg import Twist\n\nLIN_INC = 1\nANG_INC = 1\npub_cmd = rospy.Publisher('vel_data', Twist)\n\nif __name__ == \"__main__\":\n try:\n rospy.init_node('PSoC_Teleop_Key')\n\n print(\"hit w/a/s/d to move, q to quit\")\n\n flag = True\n\n while flag:\n # wait for key stroke\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n\n p = Twist()\n p.linear.x = 0\n p.angular.z = 0\n\n if ch == \"w\" :\n print(\"forward!\")\n p.linear.x = LIN_INC\n elif ch == \"a\" :\n print(\"counter-clockwise!\")\n p.angular.z = ANG_INC\n elif ch == \"s\" :\n print(\"backward!\")\n p.linear.x = -LIN_INC\n elif ch == \"d\" :\n print(\"clockwise!\")\n p.angular.z = -ANG_INC\n elif ch == \"q\" :\n flag = False\n else :\n print(\"hit w/a/s/d to move, q to quit\")\n\n pub_cmd.publish(p)\n\n except rospy.ROSInterruptException: pass\n","sub_path":"PSoC_Listener/src/PSoC_Data_Teleop_Key.py","file_name":"PSoC_Data_Teleop_Key.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"444542824","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport pickle\n\nfrom flask import jsonify, request\n\nfrom .startup import app\nfrom .lib import (create_token, get_chunk_contract,\n verify_proof, update_contract,\n lookup_contract)\nfrom .models import Contract\nfrom datetime import datetime\n\n\n@app.route('/')\ndef api_index():\n return jsonify(msg='ok')\n\n\n@app.route('/api/downstream/status')\ndef api_downstream_status():\n try:\n contracts = Contract.query.filter(Contract.expiration\n > datetime.utcnow()).all()\n\n farmers = list(\n map(lambda x: {'id': x.id,\n 'uptime': int((datetime.utcnow()-x.start).\n total_seconds())},\n contracts))\n\n return jsonify(farmers=farmers)\n except Exception as ex:\n resp = jsonify(status='error',\n message=str(ex))\n resp.status_code = 500\n return resp\n\n\n@app.route('/api/downstream/new/')\ndef api_downstream_new_token(sjcx_address):\n # generate a new token\n try:\n db_token = create_token(sjcx_address)\n beat = pickle.loads(db_token.heartbeat)\n pub_beat = beat.get_public()\n return jsonify(token=db_token.token,\n type=type(beat).__name__,\n heartbeat=pub_beat.todict())\n except Exception as ex:\n resp = jsonify(status='error',\n message=str(ex))\n resp.status_code = 500\n return resp\n\n\n@app.route('/api/downstream/chunk/')\ndef api_downstream_chunk_contract(token):\n try:\n db_contract = get_chunk_contract(token)\n\n with open(db_contract.tag_path, 'rb') as f:\n tag = pickle.loads(f.read())\n chal = pickle.loads(db_contract.challenge)\n\n # now since we are prototyping, we can delete the tag and file\n os.remove(db_contract.file.path)\n os.remove(db_contract.tag_path)\n\n return jsonify(seed=db_contract.seed,\n size=db_contract.size,\n file_hash=db_contract.file.hash,\n challenge=chal.todict(),\n tag=tag.todict(),\n expiration=db_contract.expiration.isoformat())\n\n except Exception as ex:\n resp = jsonify(status='error',\n message=str(ex))\n resp.status_code = 500\n return resp\n\n\n@app.route('/api/downstream/challenge//')\ndef api_downstream_chunk_contract_status(token, file_hash):\n \"\"\"For prototyping, this will generate a new challenge\n \"\"\"\n try:\n db_contract = update_contract(token, file_hash)\n\n return jsonify(challenge=pickle.loads(db_contract.challenge).todict(),\n expiration=db_contract.expiration.isoformat())\n\n except Exception as ex:\n print(ex)\n resp = jsonify(status='error',\n message=str(ex))\n resp.status_code = 500\n return resp\n\n\n@app.route('/api/downstream/answer//', methods=['POST'])\ndef api_downstream_challenge_answer(token, file_hash):\n try:\n d = request.get_json(silent=True)\n\n if (dict is False or not isinstance(d, dict) or 'proof' not in d):\n raise RuntimeError('Posted data must be an JSON encoded \\\nproof object: {\"proof\":\"...proof object...\"}')\n\n db_contract = lookup_contract(token, file_hash)\n\n beat = pickle.loads(db_contract.token.heartbeat)\n\n try:\n proof = beat.proof_type().fromdict(d['proof'])\n except:\n raise RuntimeError('Proof corrupted.')\n\n if (not verify_proof(token, file_hash, proof)):\n raise RuntimeError('Invalid proof, or proof expired.')\n\n return jsonify(status='ok')\n\n except Exception as ex:\n print(ex)\n resp = jsonify(status='error',\n message=str(ex))\n resp.status_code = 500\n return resp\n","sub_path":"downstream_node/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"197190216","text":"import json\n\ndef test_busca_projetos_carregados(client):\n response = client.get('/api/projetos')\n \n data = json.loads(response.data.decode('utf-8'))['data']\n is_filled = True\n for item in data:\n if not (item['autores'] and item['curso'] and item['orientadores'] and item['palavrasChave']):\n is_filled = False\n \n assert is_filled\n\n\n# def test_cadastro_projeto(client): \n# response = client.post('/api/projetos/cadastrar', headers={'Authorization':'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VybmFtZSI6ImFkbWluIiwidGlwb1VzdWFyaW8iOiI0IiwiZXh0IjoiMjAyMC0wNS0yOSAxNTozODo0MS42Njc2NjMifQ.49IMXbzgU9zpruJbKc7F5QBi8vCr1yszWc2UpoVYY0M'})\n# data = json.loads(response.data.decode(\"utf-8\"))\n# assert response.status_code >= 200 and response.status_code < 300","sub_path":"tests/projetos/test_projetos.py","file_name":"test_projetos.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"108039060","text":"def histogram(num):\n\tfor i in num:\n\t\tprint(\"x\"*i);\n\n\nnumbers=[];\nend=int(input(\"Enter length of list :\"));\nfor i in range(end):\n\tnum=int(input(\"Enter value :\"));\n\tnumbers.append(num);\nprint(numbers);\t\nhistogram(numbers);\n\n","sub_path":"prg10.py","file_name":"prg10.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"55142150","text":"import os\n_data_root = os.environ.get('DATA_ROOT')\ndel os\n\n_base_ = [\n '../_base_/default_runtime.py', \n '../_base_/schedules/schedule_160k.py'\n]\n\n# model settings\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderRefine',\n pretrained='open-mmlab://msra/hrnetv2_w48',\n backbone=dict(\n type='HRNetRefine',\n norm_cfg=norm_cfg,\n norm_eval=False,\n extra=dict(\n stage1=dict(\n num_modules=1,\n num_branches=1,\n block='BOTTLENECK',\n num_blocks=(4, ),\n num_channels=(64, )),\n stage2=dict(\n num_modules=1,\n num_branches=2,\n block='BASIC',\n num_blocks=(4, 4),\n num_channels=(48, 96)),\n stage3=dict(\n num_modules=4,\n num_branches=3,\n block='BASIC',\n num_blocks=(4, 4, 4),\n num_channels=(48, 96, 192)),\n stage4=dict(\n num_modules=3,\n num_branches=4,\n block='BASIC',\n num_blocks=(4, 4, 4, 4),\n num_channels=(48, 96, 192, 384)))),\n decode_head=dict(\n type='FCNHead',\n in_channels=[48, 96, 192, 384],\n in_index=(0, 1, 2, 3),\n channels=sum([48, 96, 192, 384]),\n input_transform='resize_concat',\n kernel_size=1,\n num_convs=1,\n concat_input=False,\n dropout_ratio=-1,\n num_classes=2,\n norm_cfg=norm_cfg,\n align_corners=False,\n loss_decode=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))\n# model training and testing settings\ntrain_cfg = dict()\ntest_cfg = dict(mode='whole')\n\n# dataset settings\ndataset_type = 'RefineDataset'\ndata_root = _data_root\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n\ncrop_size = (256, 256)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='LoadCoarseMask'),\n dict(type='Resize', img_scale=crop_size, ratio_range=(1.0, 1.0)),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_semantic_seg', 'coarse_mask']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='LoadCoarseMask'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=crop_size,\n flip=False,\n transforms=[\n dict(type='Resize', img_scale=crop_size, keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img', 'coarse_mask']),\n ])\n]\ndata = dict(\n samples_per_gpu=8,\n workers_per_gpu=8,\n train=dict(\n type=dataset_type,\n data_root=data_root,\n img_dir='img_dir/train',\n mask_dir='mask_dir/train',\n ann_dir='ann_dir/train',\n pipeline=train_pipeline),\n val=dict(\n type=dataset_type,\n data_root=data_root,\n img_dir='img_dir/val',\n mask_dir='mask_dir/val',\n ann_dir='ann_dir/val',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n data_root=data_root,\n img_dir='img_dir/val',\n mask_dir='mask_dir/val',\n ann_dir='ann_dir/val',\n pipeline=test_pipeline))","sub_path":"configs/bpr/hrnet48_256.py","file_name":"hrnet48_256.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"470196656","text":"# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\nimport shutil\nimport time\nimport tempfile\nfrom os.path import join, abspath, dirname\n\nimport six\nimport pytest\n\nfrom asv import config\nfrom asv.commands.run import Run\nfrom asv.commands.publish import Publish\n\nfrom . import tools\nfrom .tools import browser\nfrom .test_workflow import basic_conf\n\n\n@pytest.fixture(scope=\"session\")\ndef basic_html(request):\n tmpdir = tempfile.mkdtemp()\n request.addfinalizer(lambda: shutil.rmtree(tmpdir))\n\n local = abspath(dirname(__file__))\n cwd = os.getcwd()\n\n os.chdir(tmpdir)\n try:\n machine_file = join(tmpdir, 'asv-machine.json')\n\n shutil.copyfile(join(local, 'asv-machine.json'),\n machine_file)\n\n dvcs = tools.generate_test_repo(tmpdir, list(range(10)))\n repo_path = dvcs.path\n\n conf = config.Config.from_json({\n 'env_dir': join(tmpdir, 'env'),\n 'benchmark_dir': join(local, 'benchmark'),\n 'results_dir': join(tmpdir, 'results_workflow'),\n 'html_dir': join(tmpdir, 'html'),\n 'repo': repo_path,\n 'dvcs': 'git',\n 'project': 'asv',\n 'matrix': {\n \"six\": [None],\n \"psutil\": [\"1.2\", \"2.1\"]\n }\n })\n\n Run.run(conf, range_spec=\"master~5..master\", steps=3,\n _machine_file=machine_file, quick=True)\n Publish.run(conf)\n finally:\n os.chdir(cwd)\n\n return conf, dvcs\n\n\ndef test_web_smoketest(browser, basic_html):\n conf, dvcs = basic_html\n\n with tools.preview(conf.html_dir) as base_url:\n browser.get(base_url)\n\n assert browser.title == 'airspeed velocity of an unladen asv'\n\n # Open a graph display\n browser.find_element_by_link_text('params_examples.track_param').click()\n\n # Verify there's a plot of some sort\n browser.find_element_by_css_selector('canvas.flot-base')\n\n # Click a parameterized test button, which should toggle the button\n param_button = browser.find_element_by_link_text('benchmark.params_examples.ClassOne')\n assert 'active' in param_button.get_attribute('class').split()\n param_button.click()\n assert 'active' not in param_button.get_attribute('class').split()\n\n # Check there's no error popup; needs an explicit wait because\n # there is no event that occurs on successful load that\n # doesn't also occur on a failed load\n time.sleep(1.0)\n error_box = browser.find_element_by_id('error-message')\n assert not error_box.is_displayed()\n","sub_path":"test/test_web.py","file_name":"test_web.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"221868907","text":"import sqlite3\n\nDB = 'db.sqlite'\n\n\ndef crate_table():\n connection = sqlite3.connect(DB)\n create_sql = \"\"\"create TABLE IF NOT EXISTS contacts\n (\n id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n email TEXT DEFAULT NULL,\n phone TEXT NOT NULL,\n address TEXT,\n created DATETIME DEFAULT CURRENT_TIMESTAMP\n );\"\"\"\n connection.execute(create_sql)\n connection.close()\n\n\ndef connect():\n return sqlite3.connect(DB)\n\n\ndef insert_data(name, phone, email = None, address = None):\n conn = connect()\n sql = \"INSERT INTO contacts (name, email, phone, address) VALUES (?, ?, ?, ?)\"\n data = (name, email, phone, address)\n conn.execute(sql, data)\n conn.commit()\n conn.close()\n\n\ndef get_data():\n conn = connect()\n sql = \"SELECT * FROM contacts\"\n return conn.execute(sql).fetchall()\n\n\ndef remove_data(id):\n conn = connect()\n sql = \"DELETE FROM contacts WHERE id = ?\"\n conn.execute(sql, (id,))\n conn.commit()\n conn.close()\n","sub_path":"flask_site/src/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"71022476","text":"#\n# Copyright © 2021 Uncharted Software Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\nimport sys\nimport unittest\nimport time\nfrom d3m.metadata.problem import PerformanceMetricBase, PerformanceMetric\nimport typing\nimport pandas as pd\nimport signal\n\n# test\nSEEDATASETS = os.getenv(\"D3MINPUTDIR\", \"seed_datasets_current\")\nD3MOUTPUTDIR = \"output\"\nD3MINPUTDIR = SEEDATASETS\nD3MSTATICDIR = \"/static\"\n\nenv = {}\nenv.update(os.environ.copy())\nenv[\"PYTHONPATH\"] = \":\".join(sys.path)\nenv[\"D3MLOCAL\"] = \"True\"\n\nlower_is_better = {\n \"ACCURACY\": False,\n \"PRECISION\": False,\n \"RECALL\": False,\n \"F1\": False,\n \"F1_MICRO\": False,\n \"F1_MACRO\": False,\n \"MEAN_SQUARED_ERROR\": True,\n \"ROOT_MEAN_SQUARED_ERROR\": True,\n \"MEAN_ABSOLUTE_ERROR\": True,\n \"R_SQUARED\": True,\n \"NORMALIZED_MUTUAL_INFORMATION\": False,\n \"JACCARD_SIMILARITY_SCORE\": False,\n \"PRECISION_AT_TOP_K\": False,\n \"OBJECT_DETECTION_AVERAGE_PRECISION\": False,\n \"HAMMING_LOSS\": True,\n}\n\nproblem_thresholds = {\n \"LL1_Haptics_MIN_METADATA\": 0.41, # F1_MACRO\n \"124_174_cifar10_MIN_METADATA\": 0.8, # Accuracy\n \"124_188_usps_MIN_METADATA\": 0.9, # Accuracy\n \"124_214_coil20_MIN_METADATA\": 0.95, # Accuracy\n \"124_95_uc_merced_land_use_MIN_METADATA\": 0.85, # Accuracy\n \"1491_one_hundred_plants_margin_MIN_METADATA\": 0.8, # F1_MACRO\n \"1567_poker_hand_MIN_METADATA\": 0.0, # F1_MACRO todo what score do we get?\n \"185_baseball_MIN_METADATA\": 0.7, # F1_MACRO\n \"196_autoMpg_MIN_METADATA\": 7, # MSE,\n \"22_handgeometry_MIN_METADATA\": 0.3, # MSE,\n \"26_radon_seed_MIN_METADATA\": 0.05, # RMSE\n \"27_wordLevels_MIN_METADATA\": 0.15, # F1_MACRO\n \"299_libras_move_MIN_METADATA\": 0.75, # Accuracy\n \"30_personae_MIN_METADATA\": 0.6, # F1_MACRO\n \"313_spectrometer_MIN_METADATA\": 0.4, # F1_MACRO,\n \"31_urbansound_MIN_METADATA\": 0.9, # Accuracy,\n \"32_fma_MIN_METADATA\": 0, # Accuracy\n \"32_wikiqa_MIN_METADATA\": 0.45, # F1\n \"38_sick_MIN_METADATA\": 0.9, # F1\n \"4550_MiceProtein_MIN_METADATA\": 1, # F1\n \"49_facebook_MIN_METADATA\": 0.85, # Accuracy\n \"534_cps_85_wages_MIN_METADATA\": 20, # MSE\n \"56_sunspots_MIN_METADATA\": 55, # RMSE\n \"56_sunspots_monthly_MIN_METADATA\": 60, # RMSE\n \"57_hypothyroid_MIN_METADATA\": 0.98, # F1_MACRO\n \"59_LP_karate_MIN_METADATA\": 0.4, # Accuracy,\n \"59_umls_MIN_METADATA\": 0.93, # Accuracy\n \"60_jester_MIN_METADATA\": 99, # MAE TODO what score?\n \"66_chlorineConcentration_MIN_METADATA\": 0.75, # F1_MACRO\n \"6_70_com_amazon_MIN_METADATA\": 0.8, # NORMALIZED_MUTUAL_INFORMATION\n \"6_86_com_DBLP_MIN_METADATA\": 0.7, # NORMALIZED_MUTUAL_INFORMATION\n \"kaggle_music_hackathon_MIN_METADATA\": 99, # RMSE TODO what score\n \"LL0_1100_popularkids_MIN_METADATA\": 0.35, # F1_MACRO\n \"LL0_186_braziltourism_MIN_METADATA\": 0.17, # F1_MACRO\n \"LL0_207_autoPrice_MIN_METADATA\": 5000000, # MSE\n \"LL0_acled_reduced_MIN_METADATA\": 0.9, # Accuracy\n \"LL1_336_MS_Geolife_transport_mode_prediction_MIN_METADATA\": 0.9, # Accuracy\n \"LL1_336_MS_Geolife_transport_mode_prediction_separate_lat_lon_MIN_METADATA\": 0.9, # Accuracy\n \"LL1_50words_MIN_METADATA\": 0.43, # F1_MACRO\n \"LL1_726_TIDY_GPS_carpool_bus_service_rating_prediction_MIN_METADATA\": 0.35, # F1_MACRO\n \"LL1_736_population_spawn_MIN_METADATA\": 1600, # MAE\n \"LL1_736_population_spawn_simpler_MIN_METADATA\": 1350, # MAE\n \"LL1_736_stock_market_MIN_METADATA\": 1.6, # MAE\n \"LL1_Adiac_MIN_METADATA\": 0.65, # F1_MACRO\n \"LL1_ArrowHead_MIN_METADATA\": 0.65, # F1_MACRO\n \"LL1_bn_fly_drosophila_medulla_net_MIN_METADATA\": 0.85, # NORMALIZED_MUTUAL_INFORMATION\n \"LL1_CinC_ECG_torso_MIN_METADATA\": 0.5, # F1_MACRO\n \"LL1_Cricket_Y_MIN_METADATA\": 0.5, # F1_MACRO\n \"LL1_crime_chicago_MIN_METADATA\": 0.65, # Accuracy,\n \"LL1_DIC28_net_MIN_METADATA\": 0.75, # Accuracy\n \"LL1_ECG200_MIN_METADATA\": 0.88, # F1\n \"LL1_EDGELIST_net_nomination_seed_MIN_METADATA\": 0.68, # Accuracy\n \"LL1_ElectricDevices_MIN_METADATA\": 0.45, # F1_MACRO\n \"LL1_FaceFour_MIN_METADATA\": 0.85, # F1_MACRO\n \"LL1_FISH_MIN_METADATA\": 0.7, # F1_MACRO\n \"LL1_FordA_MIN_METADATA\": 0.65, # F1\n \"LL1_GS_process_classification_tabular_MIN_METADATA\": 0.125, # F1 TODO these are low\n \"LL1_GS_process_classification_text_MIN_METADATA\": 0.1, # F1 TODO these are low\n \"LL1_GT_actor_group_association_prediction_MIN_METADATA\": 0.2, # RMSE\n \"LL1_HandOutlines_MIN_METADATA\": 0.88, # F1\n \"LL1_ItalyPowerDemand_MIN_METADATA\": 0.95, # F1\n \"LL1_Meat_MIN_METADATA\": 0.91, # F1_MACRO\n \"LL1_net_nomination_seed_MIN_METADATA\": 0.77, # Accuracy\n \"LL1_OSULeaf_MIN_METADATA\": 0.47, # F1_MACRO\n \"LL1_penn_fudan_pedestrian_MIN_METADATA\": 0.94, # OBJECT_DETECTION_AVERAGE_PRECISION\n \"LL1_PHEM_Monthly_Malnutrition_MIN_METADATA\": 830, # MAE\n \"LL1_PHEM_weeklyData_malnutrition_MIN_METADATA\": 3.5, # MAE\n \"LL1_retail_sales_total_MIN_METADATA\": 2150, # RMSE\n \"LL1_terra_canopy_height_long_form_s4_100_MIN_METADATA\": 85, # MAE\n \"LL1_terra_canopy_height_long_form_s4_70_MIN_METADATA\": 190, # MAE\n \"LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA\": 110, # MAE\n \"LL1_terra_canopy_height_long_form_s4_90_MIN_METADATA\": 70000, # MAE\n \"LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA\": 0.9, # MAE\n \"LL1_tidy_terra_panicle_detection_MIN_METADATA\": 0.26, # OBJECT_DETECTION_AVERAGE_PRECISION\n \"LL1_TXT_CLS_3746_newsgroup_MIN_METADATA\": 0.06, # F1 MACRO\n \"LL1_TXT_CLS_airline_opinion_MIN_METADATA\": 0.62, # Accuracy,\n \"LL1_TXT_CLS_apple_products_sentiment_MIN_METADATA\": 0.55, # Accuracy\n \"LL1_VTXC_1343_cora_MIN_METADATA\": 0.06, # F1_MACRO\n \"LL1_VTXC_1369_synthetic_MIN_METADATA\": 0.20, # F1_MACRO\n \"loan_status_MIN_METADATA\": 0.35, # F1_MACRO\n \"political_instability_MIN_METADATA\": 0.83, # F1\n \"SEMI_1040_sylva_prior_MIN_METADATA\": 0.932, # F1\n \"SEMI_1044_eye_movements_MIN_METADATA\": 0.6, # F1_MACRO\n \"SEMI_1053_jm1_MIN_METADATA\": 0.39, # F1\n \"SEMI_1217_click_prediction_small_MIN_METADATA\": 0.15, # F1\n \"SEMI_1459_artificial_characters_MIN_METADATA\": 0.63, # F1_MACRO\n \"SEMI_155_pokerhand_MIN_METADATA\": 0, # F1_MACRO #todo what score\n \"uu_101_object_categories_MIN_METADATA\": 0, # Accuracy #todo what score\n \"uu10_posts_3_MIN_METADATA\": 0.66, # F1_MACRO\n \"uu1_datasmash_MIN_METADATA\": 0, # F1_MACRO #Todo what score\n \"uu2_gp_hyperparameter_estimation_MIN_METADATA\": 99, # MSE # todo what score\n \"uu3_world_development_indicators_MIN_METADATA\": 700000000000, # RMSE\n \"uu4_SPECT_MIN_METADATA\": 0.89, # F1\n \"uu5_heartstatlog_MIN_METADATA\": 0.65, # F1\n \"uu6_hepatitis_MIN_METADATA\": 0.56, # F1\n \"uu7_pima_diabetes_MIN_METADATA\": 0.41, # F1\n \"uu8_posts_1_MIN_METADATA\": 55, # RMSE\n \"uu9_posts_2_MIN_METADATA\": 6, # RMSE\n}\n\n\ndef _run_seed_dataset(problem):\n if problem in [\n \"LL1_3476_HMDB_actio_recognition_MIN_METADATA\",\n \"LL1_VID_UCF11_MIN_METADATA\",\n ]:\n raise Exception(\"Problem type not supported\")\n\n command = \"python main.py\"\n with open(\"d3m_test_server.txt\", \"w\") as f_server:\n env[\n \"CUDA_VISIBLE_DEVICES\"\n ] = \"2\" # no cuda for test, OOM is a pain with multiprocessing.\n server_process = subprocess.Popen(\n command.split(\" \"),\n env=env,\n stderr=f_server,\n stdout=f_server,\n preexec_fn=os.setsid,\n )\n time.sleep(10)\n\n with open(\"d3m_test_dummy.txt\", \"w\") as f:\n env[\n \"CUDA_VISIBLE_DEVICES\"\n ] = \"2\" # no cuda for test, OOM is a pain with multiprocessing.\n command = f\"python -m dummy_ta3.dummy_ta3 -p ./seed_datasets_current/{problem}/TRAIN/problem_TRAIN/problemDoc.json -d ./seed_datasets_current -e 0.0.0.0 -t 45042 --time-bound-search 1800\"\n process = subprocess.Popen(\n command.split(\" \"), env=env, stderr=f, stdout=f, preexec_fn=os.setsid\n )\n\n start = time.time()\n while time.time() - start <= 60 * 60:\n poll = process.poll()\n if poll != None:\n break\n time.sleep(1)\n else:\n process.terminate()\n process.communicate()\n try:\n os.killpg(\n os.getpgid(process.pid), signal.SIGTERM\n ) # Really make sure everything is closed\n except Exception:\n pass\n raise TimeoutError(\"timeout on pipeline generation\")\n\n # shutdown server\n os.killpg(os.getpgid(server_process.pid), signal.SIGTERM)\n\n pipeline_ids = []\n with open(\"pipeline_id.txt\", \"r\") as f:\n for line in f:\n pipeline_ids.append(line.strip())\n\n search_id = pipeline_ids[0]\n # if not os.path.isdir(f\"{D3MOUTPUTDIR}/{search_id}\"):\n # os.mkdir(f\"{D3MOUTPUTDIR}/{search_id}\")\n if not os.path.isdir(f\"{D3MOUTPUTDIR}/{search_id}/score/\"):\n os.mkdir(f\"{D3MOUTPUTDIR}/{search_id}/score/\")\n\n pipeline_ids = pipeline_ids[1:]\n for pipeline_id in pipeline_ids:\n run_pipeline_command = (\n \"python -m d3m runtime \"\n f\"--volumes {D3MSTATICDIR} \"\n f\"-d {problem} \"\n f\"--context TESTING --random-seed 0 \"\n f\"fit-score \"\n f\"--scores {D3MOUTPUTDIR}/{search_id}/score/{pipeline_id}.csv \"\n f\"-p {D3MOUTPUTDIR}/{search_id}/pipelines_ranked/{pipeline_id}.json \"\n f\"-r {D3MINPUTDIR}/{problem}/{problem}_problem/problemDoc.json \"\n f\"-i {D3MINPUTDIR}/{problem}/TRAIN/dataset_TRAIN/datasetDoc.json \"\n f\"-t {D3MINPUTDIR}/{problem}/TEST/dataset_TEST/datasetDoc.json \"\n f\"-a {D3MINPUTDIR}/{problem}/SCORE/dataset_SCORE/datasetDoc.json\"\n )\n\n with open(\"d3m_test_fit.txt\", \"w\") as f:\n process = subprocess.Popen(\n run_pipeline_command.split(\" \"),\n env=env,\n stderr=f,\n stdout=f,\n preexec_fn=os.setsid,\n )\n env[\"CUDA_VISIBLE_DEVICES\"] = \"2\" #\n\n start = time.time()\n while time.time() - start <= 60 * 60 * 2:\n poll = process.poll()\n if poll != None:\n break\n time.sleep(1)\n else:\n process.terminate()\n process.communicate()\n os.killpg(os.getpgid(process.pid), signal.SIGTERM)\n raise TimeoutError(\"timeout on fit\")\n\n best_metric = None\n for pipeline_id in pipeline_ids:\n try:\n score = pd.read_csv(\n f\"{D3MOUTPUTDIR}/{search_id}/score/{pipeline_id}.csv\", \"r\"\n )\n except pd.errors.EmptyDataError:\n # raise Exception(f\"No score was generated for pipeline {pipeline_id}\")\n continue # only one pipeline needs to work\n metric = score[\"met\"][0].split(\",\")\n if best_metric is None:\n best_metric = metric[1]\n print(f\"\\n {metric[0]}: {metric[1]}\")\n if lower_is_better[metric[0]]:\n if metric[1] < best_metric:\n best_metric = metric[1]\n else:\n if metric[1] > best_metric:\n best_metric = metric[1]\n if best_metric is None:\n raise Exception(\"No valid pipeline was fitted\")\n with open(\"test_results.csv\", \"a\") as f:\n f.write(f\"{problem}: {best_metric}\\n\")\n if problem in problem_thresholds:\n print(f\"{best_metric} <> {problem_thresholds[problem]}\")\n if lower_is_better[metric[0]]:\n assert float(best_metric) <= problem_thresholds[problem]\n else:\n assert float(best_metric) >= problem_thresholds[problem]\n else:\n # there are some new datasets that we don't know the threshold of yet.\n assert best_metric.is_digit()\n\n\ndef test_fn():\n problems = os.listdir(SEEDATASETS)\n for problem in problems:\n # for problem in [\"185_baseball_MIN_METADATA\"]:\n yield _run_seed_dataset, problem\n\n # server_process.terminate()\n # server_process.communicate()\n","sub_path":"test_scripts/test_seeds.py","file_name":"test_seeds.py","file_ext":"py","file_size_in_byte":12647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"297370336","text":"# encoding: utf-8\n\"\"\"\n@describe: 大众点评美食抓取\nlist_city :城市的ID号码,依次是:上海,北京,广州,深圳,天津,杭州,南京,苏州,成都,武汉,重庆,西安\n\"\"\"\nimport json\nimport random\nimport requests\nimport sys\nimport io\n\n# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') #改变标准输出的默认编码\n\n# 城市列表\nlist_city = [[\"武汉\", \"d96a24c312ed7b96fcc0cedd6c08f68c08e25c702ab1b810071e8e2c39502be1\"], ]\n# 请求头\nUSER_AGENT_LIST = [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\"]\nhead = {\n 'User-Agent': '{0}'.format(random.sample(USER_AGENT_LIST, 1)[0]) # 随机获取\n}\n\nflag = 0\ncode = 0\n\n\n# 解析\ndef findFood(city, data):\n f = open('./' + city + 'data.txt', 'w', encoding='utf-8')\n global flag, code\n for data in json.loads(data)[\"shopBeans\"]:\n flag += 1\n # 人均消费\n avgPrice = str(data[\"avgPrice\"])\n # 分类名称\n mainCategoryName = str(data[\"mainCategoryName\"])\n # 所在区域名称\n mainRegionName = str(data[\"mainRegionName\"])\n # 口味评分\n tasteScore = str(data[\"score1\"])\n # 环境评分\n environmentScore = str(data[\"score2\"])\n # 服务评分\n serviceScore = str(data[\"score3\"])\n # 商品编号\n shopId = str(data[\"shopId\"])\n # 商铺名称\n shopName = str(data[\"shopName\"])\n # 商铺星级\n shopPower = str((data[\"shopPower\"]))\n\n f.write(shopName + ' ' + shopId + ' ' + shopPower + ' ' + mainRegionName + ' ' + mainCategoryName + ' ' +\n tasteScore + ' ' + environmentScore + ' ' + serviceScore + ' ' + avgPrice + ' ' + city + ' ' + '\\n')\n\n # params = (shopUrl,shopName, shopId, shopPower, mainRegionName, mainCategoryName, tasteScore, environmentScore, serviceScore, avgPrice, shopAddress, defaultPic, city)\n # try:\n # mysql_db.insert(sql,*params)\n # code +=1\n # print(\"----- 插入:\", code, \"条------\")\n # except:\n # print(\"已存在不再重复插入!!\")\n print(\"总条数:\", flag)\n f.close()\n\n\n# 抓取\ndef foodSpider(city_list):\n city = city_list[0]\n url = city_list[1]\n base_url = \"http://www.dianping.com/mylist/ajax/shoprank?rankId=\" + url\n html = requests.get(base_url, headers=head)\n findFood(city=city, data=str(html.text))\n\n\nif __name__ == '__main__':\n for city_data in list_city:\n foodSpider(city_data)\n","sub_path":"history/code/getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"347230647","text":"import matplotlib.pyplot as plt\nimport os\nimport Processing\nimport numpy as np\nimport constant as const\nplt.switch_backend('agg')\n\nField = 'Ey'\n\ndef plotE(E,x,title,savedir):\n #pngdir = const.gifdir + 'Field/'\n #os.makedirs(pngdir,exist_ok = True)\n ####savedir###\n #savedir = pngdir + title + 'Field.jpg'\n\n ###plotField####\n ################ \n #plt.figure(figsize=[4,3])\n #x = np.linspace(0,const.x_end/1e-6,const.Nx)\n #y = np.linspace(0,const.y_lenth/1e-6,const.Ny)\n x = np.linspace(const.x_min+const.delta_x/2,const.x_end-const.delta_x/2,const.Nx)\n y = np.linspace(const.y_min+const.delta_y/2,const.y_max-const.delta_y/2,const.Ny)\n\n\n X,Y = np.meshgrid(x,y)\n X = X/1e-6\n Y = Y/1e-6\n plt.figure(figsize=[4,3])\n im = plt.pcolormesh(X[::int(E.shape[1]/500),::int(E.shape[0]/500)],Y[::int(E.shape[1]/500),::int(E.shape[0]/500)],E[::int(E.shape[0]/500),::int(E.shape[1]/500)].T,cmap = plt.cm.bwr)#\n\n #im = plt.pcolormesh(X,Y,E.T,cmap = plt.cm.bwr)\n\n #,norm=mpl.colors.LogNorm())\n #plt.pcolormesh(X,Y,E.T,cmap=plt.cm.bwr)\n cbar = plt.colorbar()\n im.set_clim([-(np.abs(E)).max(),(np.abs(E)).max()])\n plt.xlabel('um')\n plt.ylabel('um')\n plt.xlim([0,400])\n plt.ylim([-200,200])\n #title = str(str(float(x)*const.dt_snapshot/1e-15) + 'fs')\n plt.title(title)\n #plt.axis('off')\n plt.savefig(savedir,dpi=160,bbox_inches = 'tight')\n plt.close('all')\n\ndef plotFieldWrapper(E,x,title,Field):\n pngdir = const.gifdir + Field + 'Field/'\n os.makedirs(pngdir,exist_ok = True)\n ####savedir###\n savedir = const.figdir + title + 'Field.jpg'\n plotE(E,x,title,savedir)\n\ndef main(x):\n ey ,title = Processing.getEy(x)\n plotFieldWrapper(ey,x,title,Field)\n\nif __name__ == '__main__':\n x = 1500\n #pngdir = const.gifdir + 'BzThz/'\n #os.makedirs(pngdir,exist_ok = True)\n ####savedir###\n #savedir = const.figdir +'_' + str(x) + 'THz.jpg'\n main(x)\n","sub_path":"plotEy.py","file_name":"plotEy.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"544792600","text":"s = input()\na = int(s[:2]); b = int(s[2:])\n\ndef check_mm(n):\n if 0 < n and n < 13:\n return True\n else:\n return False\n\nif a == 0 or b == 0:\n print('NA')\nelif check_mm(a) and check_mm(b):\n print('AMBIGUOUS')\nelif check_mm(b):\n print('YYMM')\nelif check_mm(a):\n print('MMYY')\n\n\n\n\n\n\n\n\n'''\nn, k = map(int, input().split())\ns = input()\n\nss = ''\nfor u, v in enumerate(s):\n if u == k-1:\n ss = ss + v.lower()\n else:\n ss = ss + v\nprint(ss)\n\n\nprint('YYMM')\nprint('MMYY')\nprint('AMBIGUOUS')\nprint('NA')\n\n\nex.1\n3 1\nABC\n\nex.1\n1905\nYYMM\n\nex.2\n0112\nAMBIGUOUS\n\nex.3\n1700\nNA\n'''\n","sub_path":"abc126/abc126_b.py","file_name":"abc126_b.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"404836942","text":"from collections import namedtuple\nCard = namedtuple('Card', ('suite', 'face'))\ncard1 = Card('红桃', 13)\ncard2 = Card('草花', 5)\nprint(f'{card1.suite}{card1.face}')\nprint(f'{card2.suite}{card2.face}')\n\nclass MyCard(Card):\n def show(self):\n faces = ['', 'A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n return f'{self.suite}{faces[self.face]}'\n\n\nif __name__ == '__main__':\n print(Card) # \n card3 = MyCard('方块', 12)\n print(card3.show()) # 方块Q\n print(dict(card1._asdict())) # {'suite': '红桃', 'face': 13}\n print(card2._replace(suite='方块')) # Card(suite='方块', face=5)","sub_path":"algo/chapter06/MyCard.py","file_name":"MyCard.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"323169034","text":"import os\nfrom collections import namedtuple\nfrom contextlib import contextmanager\n\nfrom conans.client.loader import parse_conanfile\nfrom conans.client.recorder.action_recorder import ActionRecorder\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.model.ref import ConanFileReference\nfrom conans.model.requires import Requirement\nfrom conans.util.conan_v2_mode import CONAN_V2_MODE_ENVVAR\nfrom conans.util.conan_v2_mode import conan_v2_behavior\n\nPythonRequire = namedtuple(\"PythonRequire\", [\"ref\", \"module\", \"conanfile\",\n \"exports_folder\", \"exports_sources_folder\"])\n\n\nclass PyRequire(object):\n def __init__(self, module, conanfile, ref, path):\n self.module = module\n self.conanfile = conanfile\n self.ref = ref\n self.path = path\n\n\nclass PyRequires(object):\n \"\"\" this is the object that replaces the declared conanfile.py_requires\"\"\"\n def __init__(self):\n self._pyrequires = {} # {pkg-name: PythonRequire}\n self._transitive = {}\n\n def update_transitive(self, conanfile):\n transitive = getattr(conanfile, \"python_requires\", None)\n if not transitive:\n return\n for name, transitive_py_require in transitive.all_items():\n existing = self._pyrequires.get(name)\n if existing and existing.ref != transitive_py_require.ref:\n raise ConanException(\"Conflict in py_requires %s - %s\"\n % (existing.ref, transitive_py_require.ref))\n self._transitive[name] = transitive_py_require\n\n def all_items(self):\n new_dict = self._pyrequires.copy()\n new_dict.update(self._transitive)\n return new_dict.items()\n\n def all_refs(self):\n return ([r.ref for r in self._pyrequires.values()] +\n [r.ref for r in self._transitive.values()])\n\n def items(self):\n return self._pyrequires.items()\n\n def __getitem__(self, item):\n try:\n return self._pyrequires[item]\n except KeyError:\n raise ConanException(\"'%s' is not a python_require\" % item)\n\n def __setitem__(self, key, value):\n # single item assignment, direct\n existing = self._pyrequires.get(key)\n if existing:\n raise ConanException(\"The python_require '%s' already exists\" % key)\n self._pyrequires[key] = value\n\n\nclass PyRequireLoader(object):\n def __init__(self, proxy, range_resolver):\n self._proxy = proxy\n self._range_resolver = range_resolver\n self._cached_py_requires = {}\n\n def enable_remotes(self, check_updates=False, update=False, remotes=None):\n self._check_updates = check_updates\n self._update = update\n self._remotes = remotes\n\n @contextmanager\n def capture_requires(self):\n # DO nothing, just to stay compatible with the interface of python_requires\n yield []\n\n def load_py_requires(self, conanfile, lock_python_requires, loader):\n if not hasattr(conanfile, \"python_requires\") or isinstance(conanfile.python_requires, dict):\n return\n py_requires_refs = conanfile.python_requires\n if isinstance(py_requires_refs, str):\n py_requires_refs = [py_requires_refs, ]\n\n py_requires = self._resolve_py_requires(py_requires_refs, lock_python_requires, loader)\n if hasattr(conanfile, \"python_requires_extend\"):\n py_requires_extend = conanfile.python_requires_extend\n if isinstance(py_requires_extend, str):\n py_requires_extend = [py_requires_extend, ]\n for p in py_requires_extend:\n pkg_name, base_class_name = p.rsplit(\".\", 1)\n base_class = getattr(py_requires[pkg_name].module, base_class_name)\n conanfile.__bases__ = (base_class,) + conanfile.__bases__\n conanfile.python_requires = py_requires\n\n def _resolve_py_requires(self, py_requires_refs, lock_python_requires, loader):\n result = PyRequires()\n for py_requires_ref in py_requires_refs:\n py_requires_ref = self._resolve_ref(py_requires_ref, lock_python_requires)\n try:\n py_require = self._cached_py_requires[py_requires_ref]\n except KeyError:\n conanfile, module, new_ref, path = self._load_pyreq_conanfile(loader,\n lock_python_requires,\n py_requires_ref)\n py_require = PyRequire(module, conanfile, new_ref, path)\n self._cached_py_requires[py_requires_ref] = py_require\n result[py_require.ref.name] = py_require\n # Update transitive and check conflicts\n result.update_transitive(py_require.conanfile)\n return result\n\n def _resolve_ref(self, py_requires_ref, lock_python_requires):\n ref = ConanFileReference.loads(py_requires_ref)\n if lock_python_requires:\n locked = {r.name: r for r in lock_python_requires}[ref.name]\n ref = locked\n else:\n requirement = Requirement(ref)\n self._range_resolver.resolve(requirement, \"py_require\", update=self._update,\n remotes=self._remotes)\n ref = requirement.ref\n return ref\n\n def _load_pyreq_conanfile(self, loader, lock_python_requires, ref):\n recipe = self._proxy.get_recipe(ref, self._check_updates, self._update,\n remotes=self._remotes, recorder=ActionRecorder())\n path, _, _, new_ref = recipe\n conanfile, module = loader.load_basic_module(path, lock_python_requires, user=new_ref.user,\n channel=new_ref.channel)\n conanfile.name = new_ref.name\n conanfile.version = str(new_ref.version) \\\n if os.environ.get(CONAN_V2_MODE_ENVVAR, False) else new_ref.version\n\n if getattr(conanfile, \"alias\", None):\n ref = ConanFileReference.loads(conanfile.alias)\n conanfile, module, new_ref, path = self._load_pyreq_conanfile(loader,\n lock_python_requires,\n ref)\n return conanfile, module, new_ref, os.path.dirname(path)\n\n\nclass ConanPythonRequire(object):\n def __init__(self, proxy, range_resolver):\n self._cached_requires = {} # {reference: PythonRequire}\n self._proxy = proxy\n self._range_resolver = range_resolver\n self._requires = None\n self.valid = True\n self._check_updates = False\n self._update = False\n self._remote_name = None\n self.locked_versions = None\n\n def enable_remotes(self, check_updates=False, update=False, remotes=None):\n self._check_updates = check_updates\n self._update = update\n self._remotes = remotes\n\n @contextmanager\n def capture_requires(self):\n old_requires = self._requires\n self._requires = []\n yield self._requires\n self._requires = old_requires\n\n def _look_for_require(self, reference):\n ref = ConanFileReference.loads(reference)\n ref = self.locked_versions[ref.name] if self.locked_versions is not None else ref\n try:\n python_require = self._cached_requires[ref]\n except KeyError:\n requirement = Requirement(ref)\n self._range_resolver.resolve(requirement, \"python_require\", update=self._update,\n remotes=self._remotes)\n ref = requirement.ref\n result = self._proxy.get_recipe(ref, self._check_updates, self._update,\n remotes=self._remotes,\n recorder=ActionRecorder())\n path, _, _, new_ref = result\n module, conanfile = parse_conanfile(conanfile_path=path, python_requires=self)\n\n # Check for alias\n if getattr(conanfile, \"alias\", None):\n # Will register also the aliased\n python_require = self._look_for_require(conanfile.alias)\n else:\n package_layout = self._proxy._cache.package_layout(new_ref, conanfile.short_paths)\n exports_sources_folder = package_layout.export_sources()\n exports_folder = package_layout.export()\n python_require = PythonRequire(new_ref, module, conanfile,\n exports_folder, exports_sources_folder)\n self._cached_requires[ref] = python_require\n\n return python_require\n\n def __call__(self, reference):\n conan_v2_behavior(\"Old syntax for python_requires is deprecated\")\n if not self.valid:\n raise ConanException(\"Invalid use of python_requires(%s)\" % reference)\n try:\n python_req = self._look_for_require(reference)\n self._requires.append(python_req)\n return python_req.module\n except NotFoundException:\n raise ConanException('Unable to find python_requires(\"{}\") in remotes'.format(reference))\n","sub_path":"conans/client/graph/python_requires.py","file_name":"python_requires.py","file_ext":"py","file_size_in_byte":9337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"497935299","text":"import queue\n\ndef createMaze():\n maze = []\n maze.append([\"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\", \"O\", \"#\"])\n maze.append([\"#\", \"#\", \"#\", \"#\", \"#\", \" \", \" \", \" \", \" \", \"#\", \" \", \"#\"])\n maze.append([\"#\", \" \", \"#\", \" \", \" \", \" \", \"#\", \" \", \"#\", \"#\", \" \", \"#\"])\n maze.append([\"#\", \" \", \"#\", \" \", \"#\", \"#\", \" \", \" \", \" \", \"#\", \" \", \" \"])\n maze.append([\"#\", \" \", \"#\", \" \", \"#\", \"#\", \" \", \"#\", \" \", \" \", \" \", \"#\"])\n maze.append([\"#\", \" \", \" \", \" \", \" \", \" \", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\"])\n maze.append([\"#\", \"#\", \"#\", \"#\", \"#\", \"X\", \"#\", \"#\", \"#\", \"#\", \"#\", \"#\"])\n\n return maze\n\ndef printMaze(path, maze):\n print(\"printing...\")\n for i, pos in enumerate(maze[0]):\n if pos == \"O\":\n start = i\n\n xCoord = start\n yCoord = 0\n coords = set()\n for move in path:\n if move == \"U\":\n yCoord -= 1\n elif move == \"D\":\n yCoord += 1\n elif move == \"L\":\n xCoord -= 1\n elif move == \"R\":\n xCoord += 1\n\n coords.add((yCoord, xCoord))\n\n for y, ypos in enumerate(maze):\n for x, xpos in enumerate(maze[y]):\n if (y, x) in coords:\n print(\"+ \", end=\"\")\n else:\n print(maze[y][x] + \" \", end=\"\")\n print()\n print(\"maze printed.\")\n\ndef validate(path, maze):\n #invalid if outside maze or crosses over a hash\n print(\"validating \" + str(path))\n for i, pos in enumerate(maze[0]):\n if pos == \"O\":\n start = i\n \n already_visited = []\n xCoord = start\n yCoord = 0\n for i, move in enumerate(path):\n if move == \"U\":\n yCoord -= 1\n if ([yCoord, xCoord] in already_visited):\n #print(\"invalid: doubled back\")\n return False\n else:\n already_visited.append([yCoord, xCoord])\n\n elif move == \"D\":\n yCoord += 1\n if ([yCoord, xCoord] in already_visited):\n print(\"invalid: doubled back\")\n return False\n else:\n already_visited.append([yCoord, xCoord])\n\n elif move == \"L\":\n xCoord -= 1\n if ([yCoord, xCoord] in already_visited):\n #print(\"invalid: doubled back\")\n return False\n else:\n already_visited.append([yCoord, xCoord])\n\n elif move == \"R\":\n xCoord += 1\n if ([yCoord, xCoord] in already_visited):\n #print(\"invalid: doubled back\")\n return False\n else:\n already_visited.append([yCoord, xCoord])\n\n #print(yCoord, xCoord, already_visited)\n \n if not(0 <= yCoord < len(maze) and 0 < xCoord < len(maze[0])):\n print(\"invalid: exceeded borders\")\n return False\n elif (maze[yCoord][xCoord] == \"#\"):\n print(\"invalid: barrier\")\n return False\n\n print(\"validated\")\n return True\n\ndef findEnd(path, maze):\n #see if last coordinate is equal to the X\n print(\"finding end...\")\n for i, pos in enumerate(maze[0]):\n if pos == \"O\":\n start = i\n\n xCoord = start\n yCoord = 0\n for move in path:\n if move == \"U\":\n yCoord -= 1\n elif move == \"D\":\n yCoord += 1\n elif move == \"L\":\n xCoord -= 1\n elif move == \"R\":\n xCoord += 1\n if maze[yCoord][xCoord] == \"X\":\n printMaze(path, maze)\n print(True)\n return True\n print(False)\n return False\n\n\nnums = queue.Queue()\nnums.put(\"\")\nmaze = createMaze()\naddedTo = \"\"\n\nwhile not findEnd(addedTo, maze):\n addedTo = nums.get()\n print(addedTo)\n for i in [\"U\", \"D\", \"L\", \"R\"]:\n toBeQueued = addedTo + i\n if validate(toBeQueued, maze):\n nums.put(toBeQueued)","sub_path":"Breadth_First_by_Z.py","file_name":"Breadth_First_by_Z.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"281717938","text":"# Given an object/dictionary with keys and values that consist of both strings and integers, design an algorithm to calculate and return the sum of all of the numeric values.\n# For example, given the following object/dictionary as input:\n\ndef returnSum(obj):\n\n sum = 0\n for i in obj:\n if isinstance(obj[i], int):\n sum = sum + obj[i]\n\n return sum\n\ndi = {\n\"cat\": \"bob\",\n\"dog\": 23,\n19: 18,\n90: \"fish\"\n}\n\nprint(returnSum(di))\n# Your algorithm should return 41, the sum of the values 23 and 18.","sub_path":"Whiteboard.py","file_name":"Whiteboard.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"328504111","text":"import os\nimport threading\nimport socket\nfrom datetime import datetime \nserver_name = \"2016310932 web server \"\nresponse_202 = \"HTTP/1.1 200 OK\\r\\nServer: {0}\\r\\n\".format(server_name)\nresponse_404 = \"HTTP/1.1 404 NOT FOUND\\r\\nServer: {0}\\r\\nContent-Length: 13\\r\\nContent-Type: text/plain\\r\\n\\r\\n404 NOT FOUND\".format(\n server_name)\nresponse_403 = \"HTTP/1.1 403 FORBIDDEN\\r\\nServer: {0}\\r\\nContent-Length: 13\\r\\nContent-Type: text/plain\\r\\n\\r\\n403 Forbidden\".format(\n server_name)\n \n# table for ID,PASSWORD\nuser_table ={\"hyun\":\"1234\",\"aaa\":\"aaa\"}\nexpire_table ={} # ID / datetime\n# cookie for ID\ndef cookie_setting(ID,PW):\n return \"Set-Cookie: id=\"+ID+\"; max-age=30 Secure; \\r\\n\" + \"Set-Cookie: password=\"+PW+\";max-age=30 Secure; \\r\\n\"\ndef cookie_clear():\n text=\"\"\n for ID,PW in user_table.items():\n text += \"Set-Cookie: id=\"+ID+\"; max-age=0 Secure; \\r\\n\" + \"Set-Cookie: password=\"+PW+\";max-age=0 Secure; \\r\\n\"\n print(text)\n return text\n \ndef print_info():\n for id,passwd in user_table.items():\n print(id+\" \"+passwd)\ndef Login_info(refined_ID,refined_PW):\n for id,passwd in user_table.items():\n #print(id+\" \"+passwd)\n if (id == refined_ID) and (passwd == refined_PW) :\n return True\n # if not found\n return False\ndef time_cal(id_compare):\n now = datetime.now()\n for ID in expire_table.keys():\n print(ID)\n if(ID == id_compare):\n diff = (now-expire_table[ID]).seconds\n return str(30 - diff)\n return \" \"\ndef serv_work(client_socket, addr):\n data = client_socket.recv(65535)\n if len(data) < 1 :\n return\n print(data)\n request_data = data.decode().split()\n request_method = request_data[0]\n request_loc = request_data[1]\n request_version = request_data[2]\n if request_method == \"GET\":\n if request_loc == '/' :\n response_data = response_202 + \"Date: {0}\\r\\n\".format(datetime.now().strftime('%a, %d %b %Y %H:%M:%S KST'))\n #response_data += cookie_setting\n if os.path.exists(\"./index.html\"):\n page = open(\"./index.html\",\"r\")\n page_data = page.read()\n #response_data += \"Content-Length: %d\\r\\n\" % len(page_data) # this doesn't work\n response_data += \"Content-Type: text/html;charset=UTF-8\\r\\n\\r\\n\"\n response_data += (page_data +\"\\r\\n\\r\\n\")\n print(\"sending data:\")\n #print(response_data)\n client_socket.sendall(response_data.encode())\n else :\n response_data = response_404 #+ \"Date: {2}\\r\\n\".format(datetime.now().strftime('%a, %d %b %Y %H:%M:%S KST'))\n client_socket.sendall(response_data.encode())\n elif request_loc == \"/cookie.html\":\n request_id = request_data[-2][3:-1]\n request_pw = request_data[-1][9:]\n if Login_info(request_id,request_pw) == False :\n client_socket.sendall(response_403.encode())\n page = open(\"./cookie.html\",\"r\")\n page_data = page.read()\n page_data = page_data.replace(\"$ID\",request_id)\n diff = time_cal(request_id)\n page_data = page_data.replace(\"$TIME\",diff)\n\n page_len = str(len(page_data))\n response_data = response_202\n response_data += \"Content-Length: \"+page_len+\"\\r\\nContent-Type: text/html;charset=UTF-8\\r\\n\\r\\n\"\n client_socket.sendall(response_data.encode())\n client_socket.sendall(page_data.encode())\n \n elif request_loc == \"/favicon.ico\" :\n request_id = request_data[-2][3:-1]\n request_pw = request_data[-1][9:] \n if Login_info(request_id,request_pw) == False :\n client_socket.sendall(response_403.encode())\n print(\"favicon fail\")\n return\n response_data = response_202 + \"Date: {0}\\r\\n\".format(datetime.now().strftime('%a, %d %b %Y %H:%M:%S KST'))\n item = open(\"./favicon.ico\",\"rb\")\n item_data = item.read()\n response_data += \"Content-Length: {0}\\r\\n\".format(len(item_data))\n response_data += \"Content-Type: image/jpeg\\r\\n\\r\\n\"\n client_socket.sendall(response_data.encode())\n client_socket.sendall(item_data)\n # return;\n elif len(request_loc) > 1:\n # split b/w image & other\n print(\"request loc :\"+ request_loc)\n file_loc = \".\"+request_loc\n #print(file_loc[-3:])\n # should check privileged to access it\n if os.path.exists(file_loc) :\n #if request_data[-2][:1] == \"id\":\n request_id = request_data[-2][3:-1]\n #if request_data[-1][:7] == \"password\":\n request_pw = request_data[-1][9:]\n #print(request_id,request_pw)\n #print(request_id,request_pw)\n #print(Login_info(request_id,request_pw))\n if Login_info(request_id,request_pw) == False :\n if \"index\" not in file_loc:\n client_socket.sendall(response_403.encode())\n return\n response_data = response_202 + \"Date: {0}\\r\\n\".format(datetime.now().strftime('%a, %d %b %Y %H:%M:%S KST'))\n item = open(file_loc,\"rb\")\n item_data = item.read()\n response_data += \"Content-Length: {0}\\r\\n\".format(len(item_data))\n if file_loc[-3:] in \"jpg/jpeg/png/JPG\" :\n response_data += \"Content-Type: image/jpeg\\r\\n\\r\\n\" \n elif file_loc[-4:] in \"html\" :\n response_data += \"Content-Type: text/html;charset=UTF-8\\r\\n\\r\\n\" \n elif file_loc[-3:] in \"pdf\" :\n response_data += \"Content-Type: application/pdf;charset=UTF-8\\r\\n\\r\\n\" \n else :\n response_data += \"Content-Type: text/plain;charset=UTF-8\\r\\n\\r\\n\" \n client_socket.sendall(response_data.encode())\n print(\"sending data: {0}\".format(file_loc))\n client_socket.sendall(item_data)\n else :\n print(\"sending data: {0} NOT FOUND\".format(file_loc))\n response_data = response_404 #+ \"Date: {2}\\r\\n\".format(datetime.now().strftime('%a, %d %b %Y %H:%M:%S KST'))\n #print(response_data)\n client_socket.sendall(response_data.encode())\n elif request_method == \"POST\":\n #print(\"here is -1, -2\")\n #print(request_data[-1]) # password\n #print(request_data[-2]) # ID\n request_id = request_data[-2][3:]\n request_pw = request_data[-1][9:]\n #print(request_id,request_pw)\n if Login_info(request_id,request_pw) == False :\n client_socket.sendall(response_403.encode())\n return\n print(\"POST request loc :\"+ request_loc)\n file_loc = \".\"+request_loc\n page = open(file_loc,\"r\")\n page_data = page.read()\n response_data = response_202 + \"Date: {0}\\r\\n\".format(datetime.now().strftime('%a, %d %b %Y %H:%M:%S KST'))\n response_data += cookie_setting(request_id,request_pw)\n expire_table[request_id] = datetime.now()\n #print(\"setting time: \" + str(expire_table[request_id]))\n response_data += \"Content-Type: text/html\\r\\n\" + \"charset=UTF-8\\r\\n\\r\\n\"\n response_data += (page_data +\"\\r\\n\\r\\n\")\n print(\"sending data:\")\n print(response_data)\n client_socket.send(response_data.encode())\n \n else: # request is not GET\n response_data = \"{0} 405 Method Not Allowed\\nServer: {1}\\nDate: {2}\\n\".format(request_version, server_name, \n datetime.now().strftime('%a, %d %b %Y %H:%M:%S KST'))\n client_socket.close()\n\ndef main():\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind(('',10080))\n server_socket.listen(5)\n print('The TCP server is ready to receive')\n while True:\n client_socket, addr = server_socket.accept()\n con_Thread = threading.Thread(target=serv_work,\n args=(client_socket,addr))\n con_Thread.start()\nif __name__ == \"__main__\":\n #print(Login_info(\"ID=hwbae\",\"password=1234\"))\n main()\n\n#https://noodle-dev.tistory.com/63\n#https://kentakang.com/133","sub_path":"network/assignment2/tmp/server_with_favicon.py","file_name":"server_with_favicon.py","file_ext":"py","file_size_in_byte":8412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"40886717","text":"# String Checking Function\ndef string_checker(question, to_check):\n valid = False\n while not valid:\n\n response = input(question).lower()\n\n for item in to_check:\n if response == item:\n return response\n elif response == item[0]:\n return item\n\n print(\"sorry that is not a valid response\")\n\n# Asks user for prefered shape\nprefered_shape = [\"rectangle\",\"triangle\",\"circle\",\"square\"]\nshape = string_checker(\"What shape do you want? \", prefered_shape)\nprint(shape)\n\n","sub_path":"06_Ask_user_shape.py","file_name":"06_Ask_user_shape.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"119036343","text":"import itertools\n\nimport imghdr\nimport json\nimport os\nimport re\n\nfrom ALICE.models import Specimen\nfrom ALICE.models.logger import logger\n\n\nclass SpecimenQueue(object):\n \"\"\"\n A list of specimens and their associated views.\n :param specimens: a list of Specimen objects\n\n \"\"\"\n\n def __init__(self, specimens):\n self.specimens = specimens\n\n def __getitem__(self, item):\n return self.specimens[item]\n\n def __setitem__(self, key, value):\n self.specimens[key] = value\n\n def __delitem__(self, key):\n del self.specimens[key]\n\n @classmethod\n def load(cls, calibrator, root, specimen_sep='_', lookup_file='.lookup'):\n \"\"\"\n Load specimens from a folder using the given calibration.\n :param calibrator: the calibrator instance for this batch\n :param root: the folder where the images are stored\n :param specimen_sep: the character separating the specimen ID from the camera\n ID in the filename, e.g. in SPECIMEN_ALICE2.jpg the sep\n is _ (Default value = '_')\n :param lookup_file: a json config file linking filenames with camera IDs using\n regex (Default value = '.lookup')\n :returns: SpecimenQueue\n\n \"\"\"\n img_files = [f for f in os.listdir(root) if\n os.path.isfile(os.path.join(root, f)) and imghdr.what(\n os.path.join(root, f)) is not None]\n specimen_files = {k: list(v) for k, v in itertools.groupby(sorted(img_files),\n lambda x:\n x.split(specimen_sep)[\n 0])}\n specimens = []\n\n with open(lookup_file, 'r') as f:\n camera_lookup = json.load(f)\n\n for ix, (specimen, filenames) in enumerate(specimen_files.items()):\n images = []\n for rgx, camera_id in camera_lookup.items():\n try:\n f = next(i for i in filenames if\n re.search(rgx, i))\n try:\n vp = calibrator[camera_id].position\n images.append((vp, os.path.join(root, f)))\n except KeyError:\n continue\n except StopIteration:\n continue\n s = Specimen.from_images(specimen, images)\n specimens.append(s)\n logger.debug(f'loaded {s.id} ({ix + 1}/{len(specimen_files)})')\n return cls(specimens)\n\n def limit(self, n):\n \"\"\"\n Return a queue with only the top n specimens.\n :param n: the number of specimens in the new queue\n\n \"\"\"\n return SpecimenQueue(self.specimens[:min(n, len(self.specimens))])\n\n def try_process(self, specimen_transform, log_stage=None):\n \"\"\"\n Try to apply the specified function/transform to each specimen in the queue.\n :param specimen_transform: a function taking a specimen object as the argument\n and outputting a specimen object\n :param log_stage: a description of the stage, e.g. 'find features' (optional)\n :returns: a new specimen queue\n\n \"\"\"\n specimens = []\n q = self.specimens.copy()\n attempts = {s.id: 0 for s in self.specimens}\n log_stage = '' if log_stage is None else f' at {log_stage} stage'\n while len(q) > 0 and all([v < 3 for v in attempts.values()]):\n s = q.pop(0)\n try:\n specimens.append(specimen_transform(s))\n logger.debug(f'processed {s.id}{log_stage}')\n del attempts[s.id]\n except Exception as e:\n logger.debug(f'{s.id} failed{log_stage}: {e}')\n attempts[s.id] += 1\n q.append(s)\n continue\n return SpecimenQueue(specimens)\n","sub_path":"build/lib/ALICE/models/specimen_queue.py","file_name":"specimen_queue.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"374087562","text":"# INPUT: 'ZpglnRxqenU' OUTPUT: 'Z-Pp-Ggg-Llll-Nnnnn-Rrrrrr-Xxxxxxx-Qqqqqqqq-Eeeeeeeee-Nnnnnnnnnn-Uuuuuuuuuuu'\n\ndef accum(seq):\n output=str()\n j=int(1)\n for i in range(0,len(seq),1):\n if 'a' <= seq[i] <= 'z':\n output=output+((seq[i].upper())+seq[i]*(j-1))\n if (i!=len(seq)-1):\n output=output+'-'\n else:\n output=output+(seq[i] + (seq[i].lower() * (j - 1)))\n if (i != len(seq) - 1):\n output = output + '-'\n j=j+1\n return output\n\nif __name__ == '__main__':\n print(accum('ZpglnRxqenU'))\n","sub_path":"Mumbling.py","file_name":"Mumbling.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"342322749","text":"from flask import Blueprint, jsonify, Response, request, g, url_for, session\r\nimport json\r\nfrom app.common.models import Post, db, User\r\nfrom app.common.forms import PostForm\r\nfrom app.common.decorators import login_required\r\n\r\napi = Blueprint('api', __name__)\r\n@api.before_request\r\ndef current_user():\r\n user = None\r\n if 'user_id' in session:\r\n user = User.query.filter_by(id=session['user_id']).first()\r\n g.user = user\r\n\r\n# serializer\r\ndef get_posts(post_id=None):\r\n\tdata = []\r\n\tif (post_id is None):\r\n\t\tposts = Post.query.order_by(Post.updated_at.desc()).all()[:5]\r\n\t\tfor post in posts:\r\n\t\t\tobj = {\r\n\t\t\t\t'id': post.id,\r\n\t\t\t\t'author': post.author.username,\r\n\t\t\t\t'created_at': post.created_at.strftime('%Y-%m-%dT%H:%M:%S'),\r\n\t\t\t\t'updated_at': post.updated_at.strftime('%Y-%m-%dT%H:%M:%S'),\r\n\t\t\t\t'content': post.content,\r\n\t\t\t\t'profile_link': url_for('site.profile_page', username=post.author.username)\r\n\t\t\t}\r\n\t\t\tdata.append(obj)\r\n\t\treturn json.dumps(data, indent=4, separators=(',', ': '))\r\n\tpost = Post.query.filter_by(id=post_id).first()\r\n\tdata = [{\r\n\t\t'id': post.id,\r\n\t\t'author': post.author.username,\r\n\t\t'created_at': post.created_at.strftime('%Y-%m-%dT%H:%M:%S'),\r\n\t\t'updated_at': post.updated_at.strftime('%Y-%m-%dT%H:%M:%S'),\r\n\t\t'content': post.content,\r\n\t\t'profile_link': url_for('site.profile_page', username=post.author.username, _external=True)\r\n\r\n\t}]\r\n\treturn json.dumps(data, indent=4, separators=(',', ': '))\r\n\r\n\r\n@api.route('/posts')\r\ndef posts():\r\n\tjson = get_posts()\r\n\treturn make_response(json)\r\n\r\n\r\n@api.route('/add_post', methods=['POST'])\r\ndef add_post():\r\n\tform = PostForm(request.form)\r\n\tif form.validate_on_submit():\r\n\t\tpost = Post(author_id=g.user.id, content=form.content.data)\r\n\t\tdb.session.add(post)\r\n\t\tdb.session.commit()\r\n\t\t# success\r\n\t\treturn make_response(get_posts(post_id=post.id))\r\n\treturn jsonify([{'message': 'This is an error!'}])\r\n\r\n@api.route('/user1/follow', methods=['POST', 'GET'])\r\ndef follow_user():\r\n\treturn jsonify({'message': 'Success'})\r\n\r\n@api.route('/user1/unfollow', methods=['POST', 'GET'])\r\ndef unfollow_user():\r\n\treturn jsonify({'message': 'Success'})\r\n\r\n\r\n\r\ndef make_response(data):\r\n\tresponse = Response(response=data, status=200, mimetype=\"application/json\")\r\n\treturn response\r\n","sub_path":"app/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"380096764","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom threading import Thread\nfrom colorama import Fore, Back, Style\nimport telepot, time, os\n\nprint('Conectando ao Telegram....')\ntime.sleep(1)\n\nbot = telepot.Bot('317350650:AAHD_eHPCQ1iaQzo6js0KU8UitSEK04MUFQ')\nchat_id = None\n\nprint(Back.BLACK + 'Setando o fundo preto!')\n\nos.system('clear')\nprint(Fore.BLUE + 'Pronto para receber mensagens! Para enviar, basta apertar CTRL+C e digitar a sua mensagem!\\n\\n')\n\n\ndef handler(msg):\n global chat_id\n\n nome = msg['from']['first_name']\n txt = msg['text']\n chat_id = msg['chat']['id']\n\n if msg['chat']['type'] == 'group':\n group_name = msg['chat']['title']\n print(Fore.BLUE + ' Mensagem de ', end='')\n print(Fore.RED + nome, end='')\n print(Fore.BLUE + ' em ', end='')\n print(Fore.GREEN + group_name)\n print(Fore.GREEN + '\\n>', end='')\n print(Fore.RED + txt + '\\n\\n')\n else:\n print(Fore.BLUE + ' Mensagem privada de ', end='')\n print(Fore.RED + nome, end='')\n print(Fore.GREEN + '\\n>', end='')\n print(Fore.RED + txt + '\\n\\n')\n\n\n\ndef s_message():\n print(Fore.BLUE + '\\n Mensagem a enviar:')\n message = input(Fore.GREEN + '>')\n bot.sendMessage(chat_id, message)\n print(Fore.BLUE + '\\n Mensagem enviada!\\n\\n')\n time.sleep(0.5)\n\n\ndef receive():\n bot.message_loop(handler)\n\n\nt_receive = Thread(target=receive())\nt_receive.start()\n\n\ndef send_loop():\n try:\n while True:\n pass\n except KeyboardInterrupt:\n s_message()\n send_loop()\n except:\n send_loop()\n\n\nsend_loop()\n","sub_path":"speak-bot.py","file_name":"speak-bot.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"45857811","text":"# -*- coding: utf-8 -*-\nimport decimal\nfrom google.protobuf import struct_pb2\nimport six\n\nfrom . import issues, types, _apis\n\n\n_SHIFT_BIT_COUNT = 64\n_SHIFT = 2 ** 64\n_SIGN_BIT = 2 ** 63\n_DecimalNanRepr = 10 ** 35 + 1\n_DecimalInfRepr = 10 ** 35\n_DecimalSignedInfRepr = -(10 ** 35)\n_primitive_type_by_id = {}\n\n\ndef _initialize():\n for pt in types.PrimitiveType:\n _primitive_type_by_id[pt._idn_] = pt\n\n\n_initialize()\n\n\nclass _DotDict(dict):\n def __init__(self, *args, **kwargs):\n super(_DotDict, self).__init__(*args, **kwargs)\n\n def __getattr__(self, item):\n return self[item]\n\n\ndef _is_decimal_signed(hi_value):\n return (hi_value & _SIGN_BIT) == _SIGN_BIT\n\n\ndef _pb_to_decimal(type_pb, value_pb, table_client_settings):\n hi = (\n (value_pb.high_128 - (1 << _SHIFT_BIT_COUNT))\n if _is_decimal_signed(value_pb.high_128)\n else value_pb.high_128\n )\n int128_value = value_pb.low_128 + (hi << _SHIFT_BIT_COUNT)\n if int128_value == _DecimalNanRepr:\n return decimal.Decimal(\"Nan\")\n elif int128_value == _DecimalInfRepr:\n return decimal.Decimal(\"Inf\")\n elif int128_value == _DecimalSignedInfRepr:\n return decimal.Decimal(\"-Inf\")\n return decimal.Decimal(int128_value) / decimal.Decimal(\n 10 ** type_pb.decimal_type.scale\n )\n\n\ndef _pb_to_primitive(type_pb, value_pb, table_client_settings):\n return _primitive_type_by_id.get(type_pb.type_id).get_value(\n value_pb, table_client_settings\n )\n\n\ndef _pb_to_optional(type_pb, value_pb, table_client_settings):\n if value_pb.WhichOneof(\"value\") == \"null_flag_value\":\n return None\n if value_pb.WhichOneof(\"value\") == \"nested_value\":\n return _to_native_value(\n type_pb.optional_type.item, value_pb.nested_value, table_client_settings\n )\n return _to_native_value(type_pb.optional_type.item, value_pb, table_client_settings)\n\n\ndef _pb_to_list(type_pb, value_pb, table_client_settings):\n return [\n _to_native_value(\n type_pb.list_type.item, value_proto_item, table_client_settings\n )\n for value_proto_item in value_pb.items\n ]\n\n\ndef _pb_to_tuple(type_pb, value_pb, table_client_settings):\n return tuple(\n _to_native_value(item_type, item_value, table_client_settings)\n for item_type, item_value in six.moves.zip(\n type_pb.tuple_type.elements, value_pb.items\n )\n )\n\n\ndef _pb_to_dict(type_pb, value_pb, table_client_settings):\n result = {}\n for kv_pair in value_pb.pairs:\n key = _to_native_value(\n type_pb.dict_type.key, kv_pair.key, table_client_settings\n )\n payload = _to_native_value(\n type_pb.dict_type.payload, kv_pair.payload, table_client_settings\n )\n result[key] = payload\n return result\n\n\nclass _Struct(_DotDict):\n pass\n\n\ndef _pb_to_struct(type_pb, value_pb, table_client_settings):\n result = _Struct()\n for member, item in six.moves.zip(type_pb.struct_type.members, value_pb.items):\n result[member.name] = _to_native_value(member.type, item, table_client_settings)\n return result\n\n\ndef _pb_to_void(type_pb, value_pb, table_client_settings):\n return None\n\n\n_to_native_map = {\n \"type_id\": _pb_to_primitive,\n \"decimal_type\": _pb_to_decimal,\n \"optional_type\": _pb_to_optional,\n \"list_type\": _pb_to_list,\n \"tuple_type\": _pb_to_tuple,\n \"dict_type\": _pb_to_dict,\n \"struct_type\": _pb_to_struct,\n \"void_type\": _pb_to_void,\n}\n\n\ndef _to_native_value(type_pb, value_pb, table_client_settings=None):\n return _to_native_map.get(type_pb.WhichOneof(\"type\"))(\n type_pb, value_pb, table_client_settings\n )\n\n\ndef _decimal_to_int128(value_type, value):\n if value.is_nan():\n return _DecimalNanRepr\n elif value.is_infinite():\n if value.is_signed():\n return _DecimalSignedInfRepr\n return _DecimalInfRepr\n\n sign, digits, exponent = value.as_tuple()\n int128_value = 0\n digits_count = 0\n for digit in digits:\n int128_value *= 10\n int128_value += digit\n digits_count += 1\n\n if value_type.decimal_type.scale + exponent < 0:\n raise issues.GenericError(\"Couldn't parse decimal value, exponent is too large\")\n\n for _ in range(value_type.decimal_type.scale + exponent):\n int128_value *= 10\n digits_count += 1\n\n if digits_count > value_type.decimal_type.precision + value_type.decimal_type.scale:\n raise issues.GenericError(\"Couldn't parse decimal value, digits count > 35\")\n\n if sign:\n int128_value *= -1\n\n return int128_value\n\n\ndef _decimal_to_pb(value_type, value):\n value_pb = _apis.ydb_value.Value()\n int128_value = _decimal_to_int128(value_type, value)\n if int128_value < 0:\n value_pb.high_128 = (int128_value >> _SHIFT_BIT_COUNT) + (1 << _SHIFT_BIT_COUNT)\n int128_value -= (int128_value >> _SHIFT_BIT_COUNT) << _SHIFT_BIT_COUNT\n else:\n value_pb.high_128 = int128_value >> _SHIFT_BIT_COUNT\n int128_value -= value_pb.high_128 << _SHIFT_BIT_COUNT\n value_pb.low_128 = int128_value\n return value_pb\n\n\ndef _primitive_to_pb(type_pb, value):\n value_pb = _apis.ydb_value.Value()\n data_type = _primitive_type_by_id.get(type_pb.type_id)\n data_type.set_value(value_pb, value)\n return value_pb\n\n\ndef _optional_to_pb(type_pb, value):\n if value is None:\n return _apis.ydb_value.Value(null_flag_value=struct_pb2.NULL_VALUE)\n return _from_native_value(type_pb.optional_type.item, value)\n\n\ndef _list_to_pb(type_pb, value):\n value_pb = _apis.ydb_value.Value()\n for element in value:\n value_item_proto = value_pb.items.add()\n value_item_proto.MergeFrom(_from_native_value(type_pb.list_type.item, element))\n return value_pb\n\n\ndef _tuple_to_pb(type_pb, value):\n value_pb = _apis.ydb_value.Value()\n for element_type, element_value in six.moves.zip(\n type_pb.tuple_type.elements, value\n ):\n value_item_proto = value_pb.items.add()\n value_item_proto.MergeFrom(_from_native_value(element_type, element_value))\n return value_pb\n\n\ndef _dict_to_pb(type_pb, value):\n value_pb = _apis.ydb_value.Value()\n for key, payload in value.items():\n kv_pair = value_pb.pairs.add()\n kv_pair.key.MergeFrom(_from_native_value(type_pb.dict_type.key, key))\n kv_pair.payload.MergeFrom(\n _from_native_value(type_pb.dict_type.payload, payload)\n )\n return value_pb\n\n\ndef _struct_to_pb(type_pb, value):\n value_pb = _apis.ydb_value.Value()\n for member in type_pb.struct_type.members:\n value_item_proto = value_pb.items.add()\n value_item = (\n value[member.name]\n if isinstance(value, dict)\n else getattr(value, member.name)\n )\n value_item_proto.MergeFrom(_from_native_value(member.type, value_item))\n return value_pb\n\n\n_from_native_map = {\n \"type_id\": _primitive_to_pb,\n \"decimal_type\": _decimal_to_pb,\n \"optional_type\": _optional_to_pb,\n \"list_type\": _list_to_pb,\n \"tuple_type\": _tuple_to_pb,\n \"dict_type\": _dict_to_pb,\n \"struct_type\": _struct_to_pb,\n}\n\n\ndef _decimal_type_to_native(type_pb):\n return types.DecimalType(type_pb.decimal_type.precision, type_pb.decimal_type.scale)\n\n\ndef _optional_type_to_native(type_pb):\n return types.OptionalType(type_to_native(type_pb.optional_type.item))\n\n\ndef _primitive_type_to_native(type_pb):\n return _primitive_type_by_id.get(type_pb.type_id)\n\n\n_type_to_native_map = {\n \"optional_type\": _optional_type_to_native,\n \"type_id\": _primitive_type_to_native,\n \"decimal_type\": _decimal_type_to_native,\n}\n\n\ndef type_to_native(type_pb):\n return _type_to_native_map.get(type_pb.WhichOneof(\"type\"))(type_pb)\n\n\ndef _from_native_value(type_pb, value):\n return _from_native_map.get(type_pb.WhichOneof(\"type\"))(type_pb, value)\n\n\ndef to_typed_value_from_native(type_pb, value):\n typed_value = _apis.ydb_value.TypedValue()\n typed_value.type.MergeFrom(type_pb)\n typed_value.value.MergeFrom(from_native_value(type_pb, value))\n return typed_value\n\n\ndef parameters_to_pb(parameters_types, parameters_values):\n if parameters_values is None or not parameters_values:\n return {}\n\n param_values_pb = {}\n for name, type_pb in six.iteritems(parameters_types):\n result = _apis.ydb_value.TypedValue()\n ttype = type_pb\n if isinstance(type_pb, types.AbstractTypeBuilder):\n ttype = type_pb.proto\n elif isinstance(type_pb, types.PrimitiveType):\n ttype = type_pb.proto\n result.type.MergeFrom(ttype)\n result.value.MergeFrom(_from_native_value(ttype, parameters_values[name]))\n param_values_pb[name] = result\n return param_values_pb\n\n\ndef _unwrap_optionality(column):\n c_type = column.type\n current_type = c_type.WhichOneof(\"type\")\n while current_type == \"optional_type\":\n c_type = c_type.optional_type.item\n current_type = c_type.WhichOneof(\"type\")\n return _to_native_map.get(current_type), c_type\n\n\nclass _ResultSet(object):\n __slots__ = (\"columns\", \"rows\", \"truncated\")\n\n def __init__(self, columns, rows, truncated):\n self.columns = columns\n self.rows = rows\n self.truncated = truncated\n\n @classmethod\n def from_message(cls, message, table_client_settings=None):\n rows = []\n # prepare columnn parsers before actuall parsing\n column_parsers = []\n if len(message.rows) > 0:\n for column in message.columns:\n column_parsers.append(_unwrap_optionality(column))\n\n for row_proto in message.rows:\n row = _Row(message.columns)\n for column, value, column_info in six.moves.zip(\n message.columns, row_proto.items, column_parsers\n ):\n v_type = value.WhichOneof(\"value\")\n if v_type == \"null_flag_value\":\n row[column.name] = None\n continue\n\n while v_type == \"nested_value\":\n value = value.nested_value\n v_type = value.WhichOneof(\"value\")\n\n column_parser, unwrapped_type = column_info\n row[column.name] = column_parser(\n unwrapped_type, value, table_client_settings\n )\n rows.append(row)\n return cls(message.columns, rows, message.truncated)\n\n @classmethod\n def lazy_from_message(cls, message, table_client_settings=None):\n rows = _LazyRows(message.rows, table_client_settings, message.columns)\n return cls(message.columns, rows, message.truncated)\n\n\nResultSet = _ResultSet\n\n\nclass _Row(_DotDict):\n def __init__(self, columns):\n super(_Row, self).__init__()\n self._columns = columns\n\n def __getitem__(self, key):\n if isinstance(key, int):\n return self[self._columns[key].name]\n elif isinstance(key, slice):\n return tuple(map(lambda x: self[x.name], self._columns[key]))\n else:\n return super(_Row, self).__getitem__(key)\n\n\nclass _LazyRowItem:\n\n __slots__ = [\"_item\", \"_type\", \"_table_client_settings\", \"_processed\", \"_parser\"]\n\n def __init__(self, proto_item, proto_type, table_client_settings, parser):\n self._item = proto_item\n self._type = proto_type\n self._table_client_settings = table_client_settings\n self._processed = False\n self._parser = parser\n\n def get(self):\n if not self._processed:\n\n self._item = self._parser(\n self._type, self._item, self._table_client_settings\n )\n self._processed = True\n return self._item\n\n\nclass _LazyRow(_DotDict):\n def __init__(self, columns, proto_row, table_client_settings, parsers):\n super(_LazyRow, self).__init__()\n self._columns = columns\n self._table_client_settings = table_client_settings\n for i, (column, row_item) in enumerate(\n six.moves.zip(self._columns, proto_row.items)\n ):\n super(_LazyRow, self).__setitem__(\n column.name,\n _LazyRowItem(row_item, column.type, table_client_settings, parsers[i]),\n )\n\n def __setitem__(self, key, value):\n raise NotImplementedError(\"Cannot insert values into lazy row\")\n\n def __getitem__(self, key):\n if isinstance(key, int):\n return self[self._columns[key].name]\n elif isinstance(key, slice):\n return tuple(map(lambda x: self[x.name], self._columns[key]))\n else:\n return super(_LazyRow, self).__getitem__(key).get()\n\n def __iter__(self):\n return super(_LazyRow, self).__iter__()\n\n def __next__(self):\n return super(_LazyRow, self).__next__().get()\n\n def next(self):\n return self.__next__()\n\n\ndef from_native_value(type_pb, value):\n return _from_native_value(type_pb, value)\n\n\ndef to_native_value(typed_value):\n return _to_native_value(typed_value.type, typed_value.value)\n\n\nclass _LazyRows:\n def __init__(self, rows, table_client_settings, columns):\n self._rows = rows\n self._parsers = [_LazyParser(columns, i) for i in range(len(columns))]\n self._table_client_settings = table_client_settings\n self._columns = columns\n\n def __len__(self):\n return len(self._rows)\n\n def fetchone(self):\n return _LazyRow(\n self._columns, self._rows[0], self._table_client_settings, self._parsers\n )\n\n def fetchmany(self, number):\n for index in range(min(len(self), number)):\n yield _LazyRow(\n self._columns,\n self._rows[index],\n self._table_client_settings,\n self._parsers,\n )\n\n def __iter__(self):\n for row in self.fetchmany(len(self)):\n yield row\n\n def fetchall(self):\n for row in self:\n yield row\n\n\nclass _LazyParser:\n __slots__ = [\"_columns\", \"_column_index\", \"_prepared\"]\n\n def __init__(self, columns, column_index):\n self._columns = columns\n self._column_index = column_index\n self._prepared = None\n\n def __call__(self, *args, **kwargs):\n if self._prepared is None:\n self._prepared = _to_native_map.get(\n self._columns[self._column_index].type.WhichOneof(\"type\")\n )\n return self._prepared(*args, **kwargs)\n\n\nclass ResultSets(list):\n def __init__(self, result_sets_pb, table_client_settings=None):\n make_lazy = (\n False\n if table_client_settings is None\n else table_client_settings._make_result_sets_lazy\n )\n result_sets = []\n initializer = (\n _ResultSet.from_message if not make_lazy else _ResultSet.lazy_from_message\n )\n for result_set in result_sets_pb:\n result_sets.append(initializer(result_set, table_client_settings))\n super(ResultSets, self).__init__(result_sets)\n","sub_path":"ydb/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":15033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"448267622","text":"import matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\nfrom matplotlib import style\r\nimport datetime as dta\r\nimport sqlite3\r\n\r\nstyle.use('ggplot')\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(1,1,1)\r\nconn=sqlite3.connect('sensor.db')\r\nc=conn.cursor()\r\ndef animate(i):\r\n dateconv = dta.datetime.fromtimestamp\r\n c.execute('select temperature,time from sensor')\r\n conn.commit()\r\n xs = []\r\n ys = []\r\n ''' y axis the date'''\r\n for row in c.fetchall():\r\n xs.append(row[0])\r\n y = dateconv(row[1])\r\n ys.append(y)\r\n ax1.clear()\r\n ax1.plot(ys, xs,label='temperature')\r\n for label in ax1.xaxis.get_ticklabels():\r\n label.set_rotation(45)\r\n plt.xlabel('date')\r\n plt.ylabel('temperature')\r\n plt.legend()\r\nani = animation.FuncAnimation(fig, animate, interval=1000)\r\n\r\nplt.subplots_adjust(left=0.09, bottom=0.23, right=0.93, top=0.90, wspace=0.2, hspace=0)\r\nplt.show()\r\nc.close()\r\nconn.close()\r\n\r\n","sub_path":"live_plot_py.py","file_name":"live_plot_py.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"491251272","text":"from locust import HttpLocust, TaskSet\nfrom bs4 import BeautifulSoup\nimport random\n\nauth_token = \"\"\n\n\ndef login(self):\n response = self.client.get(\"/login\")\n pq = BeautifulSoup(response.text)\n text = pq.find('input', attrs={'name': 'authenticity_token'}).get('value')\n global auth_token\n auth_token = text\n\n\ndef login_post(self):\n global auth_token\n self.client.post(\"/session\", {\"commit\": \"Sign in\",\n \"utf8\": \"✓\",\n \"authenticity_token\": auth_token,\n \"login\": \"\",\n \"password\": \"\"})\n\n\ndef logout(self):\n response = self.client.get(\"/logout\")\n pq = BeautifulSoup(response.text)\n text = pq.find('input', attrs={'name': 'authenticity_token'}).get('value')\n global auth_token\n auth_token = text\n\n\ndef logout_post(self):\n global auth_token\n self.client.post(\"/logout\", {\"utf8\": \"✓\",\n \"authenticity_token\": auth_token})\n\n\ndef profile(self):\n text = ['/erfdf', '/arifekubrahos']\n url = random.choice(text)\n print(url)\n response = self.client.get(url, catch_response=True)\n if not response.ok:\n response.failure(\"Get user fail\")\n\n\ndef search(self):\n text = ['et', 'te']\n variables = {'q': random.choice(text)}\n response = self.client.get(\"/search?\", params=variables)\n\n\nclass UserBehavior(TaskSet):\n tasks = {profile: 1, search: 1}\n\n\nclass LoginBehavior(TaskSet):\n tasks = {UserBehavior: 1}\n\n def on_start(self):\n login(self)\n login_post(self)\n\n def on_stop(self):\n logout(self)\n logout_post(self)\n\n\nclass GithubUser(HttpLocust):\n task_set = LoginBehavior\n min_wait = 5000\n max_wait = 10000\n","sub_path":"github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"27081854","text":"ROOT_HEIGHT = 6\nimport sys\nimport tensorflow as tf\nfrom pathlib import Path\nfile = Path(__file__).resolve()\nparent, root = file.parent, file.parents[ROOT_HEIGHT]\nsys.path.append(str(root))\ntry:\n sys.path.remove(str(parent))\nexcept ValueError: # Already removed\n pass\n\nfrom learning_to_learn.environment import Environment\nfrom learning_to_learn.pupils.mlp_for_meta import MlpForMeta as Mlp\nfrom learning_to_learn.image_batch_gens import CifarBatchGenerator\n\nfrom learning_to_learn.optimizers.chiterm import ChiTerm\n\nimport os\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\nconf_file = sys.argv[1]\nsave_path = os.path.join(conf_file.split('.')[0], 'results')\n\nwith open(conf_file, 'r') as f:\n lines = f.read().split('\\n')\nrestore_path = lines[0]\n\ndata_dir = os.path.join(*(['..']*ROOT_HEIGHT + ['datasets', 'mnist']))\n\nenv = Environment(\n pupil_class=Mlp,\n meta_optimizer_class=ChiTerm,\n batch_generator_classes=CifarBatchGenerator,\n)\nVALID_SIZE = 1000\nadd_metrics = ['bpc', 'perplexity', 'accuracy']\n\n\nBATCH_SIZE = 32\nenv.build_pupil(\n batch_size=BATCH_SIZE,\n num_layers=1,\n num_hidden_nodes=[],\n input_shape=[3072],\n num_classes=10,\n init_parameter=1.,\n additional_metrics=add_metrics,\n regime='training_with_meta_optimizer',\n)\n\nenv.build_optimizer(\n regime='inference',\n additional_metrics=add_metrics,\n chi_application='exp',\n)\n\n\nprint('building is finished')\nadd_feed = [\n {'placeholder': 'dropout', 'value': .9},\n dict(\n placeholder='learning_rate',\n value=4.\n ),\n dict(\n placeholder='chi_contribution',\n value=.01\n )\n]\nvalid_add_feed = [\n {'placeholder': 'dropout', 'value': 1.},\n]\n\ntf.set_random_seed(1)\nenv.train(\n # gpu_memory=.3,\n allow_growth=True,\n save_path='debug_early_stop',\n with_meta_optimizer=True,\n # restore_path='lstm_sample_test/scipop3_1000_bs256_11.12/checkpoints/2000',\n batch_size=BATCH_SIZE,\n checkpoint_steps=None,\n result_types=['perplexity', 'loss', 'bpc', 'accuracy'],\n printed_result_types=['perplexity', 'loss', 'bpc', 'accuracy'],\n stop=1000,\n train_dataset=dict(\n train='train'\n ),\n train_batch_kwargs=dict(\n valid_size=VALID_SIZE\n ),\n valid_batch_kwargs=dict(\n valid_size=VALID_SIZE\n ),\n\n # train_dataset_text='abc',\n validation_datasets=dict(\n valid='validation'\n ),\n results_collect_interval=100,\n additions_to_feed_dict=add_feed,\n validation_additions_to_feed_dict=valid_add_feed,\n no_validation=False\n)","sub_path":"learning_to_learn/experiments/chiterm/sgd/cifar10/launch/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"619700461","text":"# Autor: Joshua Sánchez Martínez A01274269\r\n# Lee valores para dibujar una figura\r\n\r\n\r\nimport math\r\nimport pygame\r\n\r\n\r\nANCHO = 800\r\nALTO = 800\r\n\r\n\r\nC1 = (255, 255, 255)\r\nC2 = (99, 99, 99)\r\nC3 = (0, 255, 0)\r\nC4 = (255, 0, 0)\r\nC5 = (60, 200, 180)\r\nC6 = (238, 210, 130)\r\nC7 = (0, 0, 0)\r\nC8 = (255, 40, 90)\r\n\r\n# Funcion para dibujar\r\ndef dibujar(r, R, l):\r\n\r\n pygame.init()\r\n ventana = pygame.display.set_mode((ANCHO, ALTO))\r\n reloj = pygame.time.Clock()\r\n termina = False\r\n\r\n while not termina:\r\n\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT:\r\n termina = True\r\n\r\n ventana.fill(C7)\r\n\r\n for angulo in range(0, 361 * (r//math.gcd(r, R)), 1):\r\n k = r / R\r\n x = int(R * ((1.5 - k) * math.cos(angulo) + l * k * math.cos(((1.5- k) / k) * angulo)))\r\n y = int(R * ((1.5 - k) * math.sin(angulo) - l * k * math.sin(((1.5 - k) / k) * angulo)))\r\n pygame.draw.circle(ventana, C6, (x + ANCHO // 2, ALTO // 2 - y), 1)\r\n pygame.draw.circle(ventana, C3, (x + ANCHO//2, ALTO//2 + y), 1)\r\n pygame.draw.circle(ventana, C5, (x * 2 + ANCHO // 2, ALTO // 2 - y), 1)\r\n pygame.draw.circle(ventana, C1, (x * 2 + ANCHO // 2, ALTO // 2 - 2 * y), 1)\r\n pygame.draw.circle(ventana, C8, (x * 2 + ANCHO // 2, ALTO // 2 + 2 * y), 1)\r\n pygame.display.flip()\r\n reloj.tick(40)\r\n pygame.quit()\r\n\r\n\r\n# Función principal\r\ndef main():\r\n r = 23\r\n R = 54\r\n l = 2\r\n dibujar(r, R, l)\r\n\r\n\r\n# Llamada a la función principal\r\nmain()","sub_path":"MisionImposible.py","file_name":"MisionImposible.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"310121036","text":"\"\"\"\n train\n\nAuthor: Zhengwei Li\nDate : 2018/12/24\n\"\"\"\n\nfrom tensorboardX import SummaryWriter\nimport numpy as np\nimport argparse\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.utils as vutils\nfrom torch.utils.data import DataLoader\nimport time\nimport os\nfrom data import dataset\nfrom model import network, utils\nimport torch.nn.functional as F\n\n\ndef get_args():\n # Training settings\n parser = argparse.ArgumentParser(description='Fast portrait matting !')\n parser.add_argument('--dataDir', default='./DATA/', help='dataset directory')\n parser.add_argument('--saveDir', default='./ckpt', help='model save dir')\n parser.add_argument('--trainData', default='human_matting_data', help='train dataset name')\n parser.add_argument('--trainList', default='./data/list.txt', help='train img ID')\n parser.add_argument('--load', default= 'human_matting', help='save model')\n\n parser.add_argument('--finetuning', action='store_true', default=False, help='finetuning the training')\n parser.add_argument('--without_gpu', action='store_true', default=False, help='no use gpu')\n\n parser.add_argument('--nThreads', type=int, default=4, help='number of threads for data loading')\n parser.add_argument('--train_batch', type=int, default=8, help='input batch size for train')\n parser.add_argument('--patch_size', type=int, default=256, help='patch size for train')\n\n\n parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')\n parser.add_argument('--lrDecay', type=int, default=100)\n parser.add_argument('--lrdecayType', default='keep')\n parser.add_argument('--nEpochs', type=int, default=300, help='number of epochs to train')\n parser.add_argument('--save_epoch', type=int, default=1, help='number of epochs to save model')\n\n parser.add_argument('--train_phase', default= 'end_to_end', help='train phase')\n\n\n args = parser.parse_args()\n print(args)\n return args\n\n\ndef set_lr(args, epoch, optimizer):\n\n lrDecay = args.lrDecay\n decayType = args.lrdecayType\n if decayType == 'keep':\n lr = args.lr\n elif decayType == 'step':\n epoch_iter = (epoch + 1) // lrDecay\n lr = args.lr / 2**epoch_iter\n elif decayType == 'exp':\n k = math.log(2) / lrDecay\n lr = args.lr * math.exp(-k * epoch)\n elif decayType == 'poly':\n lr = args.lr * math.pow((1 - epoch / args.nEpochs), 0.9)\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr\n\n \n\nclass Train_Log():\n def __init__(self, args):\n self.args = args\n\n self.save_dir = os.path.join(args.saveDir, args.load)\n self.summary = SummaryWriter(self.save_dir)\n self.step_cnt = 1\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n\n self.save_dir_model = os.path.join(self.save_dir, 'model')\n if not os.path.exists(self.save_dir_model):\n os.makedirs(self.save_dir_model)\n\n if os.path.exists(self.save_dir + '/log.txt'):\n self.logFile = open(self.save_dir + '/log.txt', 'a')\n else:\n self.logFile = open(self.save_dir + '/log.txt', 'w')\n\n def add_scalar(self, scalar_name, scalar, step=None):\n if step is None:\n step = self.step_cnt\n self.summary.add_scalar(scalar_name, scalar, step)\n \n def add_histogram(self, var_name, value, step=None):\n if step is None:\n step = self.step_cnt\n self.summary.add_histogram(var_name, value, step)\n \n def add_trimap(self, image):\n image = image[0, :, :, :].detach().cpu().numpy().copy()\n bg = (image[0, :, :] > image[1, :, :]) & (image[0, :, :] > image[2, :, :])\n fg = (image[2, :, :] > image[0, :, :]) & (image[2, :, :] > image[1, :, :])\n figure_fg = np.zeros((image.shape[1], image.shape[2]))\n figure_unsure = np.zeros((image.shape[1], image.shape[2]))\n figure_fg[fg] = 128\n figure_unsure[(~bg)&(~fg)] = 128\n self.summary.add_image('trimap-fg', figure_fg, self.step_cnt, dataformats='HW')\n self.summary.add_image('trimap-unsure', figure_unsure, self.step_cnt, dataformats='HW')\n \n def add_trimap_gt(self, image):\n image = image.detach().cpu().numpy().copy()\n if len(image.shape) > 4:\n print('image shape too large', image.shape)\n if len(image.shape) == 4:\n image = image[0, :, :, :]\n assert image.shape[0] == 1\n image = image[0, :, :]\n figure_fg = image.copy()\n figure_unsure = image.copy()\n figure_unsure[figure_unsure!=1] = 0\n figure_unsure[figure_unsure==1] = 128\n figure_fg[image!=2] = 0\n figure_fg[image==2] = 128\n self.summary.add_image('trimap_gt_unsure', figure_unsure, self.step_cnt, dataformats='HW')\n self.summary.add_image('trimap_gt_fg', figure_fg, self.step_cnt, dataformats='HW')\n \n def add_image(self, tag, image):\n if isinstance(image, torch.autograd.Variable):\n image = image.data\n if len(image.shape) == 4:\n image = image[0, :, :, :]\n image = image.cpu().numpy()\n self.summary.add_image(tag, image, self.step_cnt)\n \n def step(self):\n self.step_cnt += 1\n\n def save_model(self, model, epoch):\n\n # epoch_out_path = \"{}/ckpt_e{}.pth\".format(self.save_dir_model, epoch)\n # print(\"Checkpoint saved to {}\".format(epoch_out_path))\n\n # torch.save({\n # 'epoch': epoch,\n # 'state_dict': model.state_dict(),\n # }, epoch_out_path)\n\n lastest_out_path = \"{}/ckpt_lastest.pth\".format(self.save_dir_model)\n torch.save({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'step': self.step_cnt\n }, lastest_out_path)\n\n model_out_path = \"{}/model_obj.pth\".format(self.save_dir_model)\n torch.save(\n model,\n model_out_path)\n\n def load_model(self, model):\n\n lastest_out_path = \"{}/ckpt_lastest.pth\".format(self.save_dir_model)\n if self.args.without_gpu: # 用cpu载入模型到内存\n ckpt = torch.load(lastest_out_path, map_location='cpu')\n else: # 模型载入到显存\n ckpt = torch.load(lastest_out_path)\n state_dict = ckpt['state_dict'].copy()\n for key in ckpt['state_dict']:\n if key not in model.state_dict():\n print('missing key:\\t', key)\n state_dict.pop(key)\n ckpt['state_dict'] = state_dict\n start_epoch = ckpt['epoch']\n model.load_state_dict(ckpt['state_dict'], strict=False)\n self.step_cnt = ckpt['step']\n #self.step_cnt = 1\n print(\"=> loaded checkpoint '{}' (epoch {} total step {})\".format(lastest_out_path, ckpt['epoch'], self.step_cnt))\n\n return start_epoch, model\n\n\n def save_log(self, log):\n self.logFile.write(log + '\\n')\n\n\ndef loss_function(args, img, trimap_pre, trimap_gt, alpha_pre, alpha_gt):\n\n criterion = nn.CrossEntropyLoss()\n L1 = nn.L1Loss()\n # -------------------------------------\n # classification loss L_t\n # ------------------------\n # Cross Entropy \n # criterion = nn.BCELoss()\n # trimap_pre = trimap_pre.contiguous().view(-1)\n # trimap_gt = trimap_gt.view(-1)\n # L_t = criterion(trimap_pre, trimap_gt)\n if args.train_phase != 'pre_train_m_net':\n assert trimap_gt.shape[1] == 1\n\n L1_t = L1(F.softmax(trimap_pre, dim=1)[:, 0, :, :], (trimap_gt[:, 0, :, :]==0).type(torch.FloatTensor))\n L_t = criterion(trimap_pre, trimap_gt[:,0,:,:].long())\n IOU_t = [utils.iou_pytorch((trimap_pre[:, 0, :, :]>trimap_pre[:, 1, :, :]) & (trimap_pre[:, 0, :, :]>trimap_pre[:, 2, :, :]), trimap_gt[:, 0, :, :]==0),\n utils.iou_pytorch((trimap_pre[:, 1, :, :]>=trimap_pre[:, 0, :, :]) & (trimap_pre[:, 1, :, :]>=trimap_pre[:, 2, :, :]), trimap_gt[:, 0, :, :]==1),\n utils.iou_pytorch((trimap_pre[:, 2, :, :]>trimap_pre[:, 0, :, :]) & (trimap_pre[:, 2, :, :]>trimap_pre[:, 1, :, :]), trimap_gt[:, 0, :, :]==2)]\n else: # train_phase == 'pre_train_m_net', L2_t = L_t = IOU_t = tensor(0.)\n L1_t = L_t = torch.Tensor([0.])\n IOU_t = [torch.Tensor([0.])]*3\n # -------------------------------------\n # prediction loss L_p\n # ------------------------\n # l_alpha\n L_alpha = L1(alpha_pre, alpha_gt)\n IOU_alpha = utils.iou_pytorch(alpha_pre>1e-5, alpha_gt>1e-5)\n\n # L_composition\n fg = torch.cat((alpha_gt, alpha_gt, alpha_gt), 1) * img\n fg_pre = torch.cat((alpha_pre, alpha_pre, alpha_pre), 1) * img\n\n L_composition = L1(fg_pre, fg)\n\n #L_p = 0.5*L_alpha + 0.5*L_composition\n L_p = L_alpha\n\n # train_phase\n if args.train_phase == 'pre_train_t_net':\n loss = L_t + L2_t\n if args.train_phase == 'end_to_end':\n loss = L_p + 0.01*L_t\n if args.train_phase == 'pre_train_m_net':\n loss = L_p\n \n return loss, L_alpha, L_composition, L_t, L1_t, IOU_t, IOU_alpha\n\n\ndef main():\n\n print(\"=============> Loading args\")\n args = get_args()\n\n print(\"============> Environment init\")\n if args.without_gpu:\n print(\"use CPU !\")\n device = torch.device('cpu')\n else:\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n print(\"No GPU is is available !\")\n\n print(\"============> Building model ...\")\n model = network.net() \n model.to(device)\n\n print(\"============> Loading datasets ...\")\n train_data = getattr(dataset, args.trainData)(root_dir = args.dataDir, \\\n imglist = args.trainList, \\\n patch_size = args.patch_size)\n trainloader = DataLoader(train_data, \n batch_size=args.train_batch, \n drop_last=True, \n shuffle=True, \n num_workers=args.nThreads, \n pin_memory=False)\n model.train() \n\n print('============> Loss function ', args.train_phase)\n print(\"============> Set optimizer ...\")\n lr = args.lr\n train_params = model.parameters()\n target_network = model\n if args.train_phase == 'pre_train_t_net':\n train_params = model.t_net.parameters()\n target_network = model.t_net\n elif args.train_phase == 'pre_train_m_net':\n train_params = model.m_net.parameters()\n target_network = model.m_net\n model.t_net.eval()\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, train_params), \\\n lr=lr, betas=(0.9, 0.999), \n weight_decay=0.0005)\n\n print(\"============> Start Train ! ...\")\n start_epoch = 1\n trainlog = Train_Log(args)\n if args.finetuning:\n start_epoch, model = trainlog.load_model(model) \n\n for epoch in range(start_epoch, args.nEpochs+1):\n\n loss_ = 0\n L_alpha_ = 0\n L_composition_ = 0\n L_cross_, L2_bg_ = 0, 0\n loss_array = []\n IOU_t_bg_, IOU_t_unsure_, IOU_t_fg_ = 0, 0, 0\n IOU_alpha_ = 0\n if args.lrdecayType != 'keep':\n lr = set_lr(args, epoch, optimizer)\n\n t0 = time.time()\n for i, sample_batched in enumerate(trainloader):\n print('batch ', i)\n img, trimap_gt, alpha_gt = sample_batched['image'], sample_batched['trimap'], sample_batched['alpha']\n img, trimap_gt, alpha_gt = img.to(device), trimap_gt.to(device), alpha_gt.to(device)\n\n # end_to_end or pre_train_t_net\n if args.train_phase != 'pre_train_m_net':\n trimap_pre, alpha_pre = model(img)\n loss, L_alpha, L_composition, L_cross, L2_cross, IOU_t, IOU_alpha = loss_function(args, \n img,\n trimap_pre, \n trimap_gt, \n alpha_pre, \n alpha_gt)\n print(\"Loss calculated %.4f\\nL2: %.2f\\nbg IOU: %.2f\\nunsure IOU: %.2f\\nfg IOU: %.2f\"%(L_cross.item(), L2_cross.item(), IOU_t[0].item(), IOU_t[1].item(), IOU_t[2].item()))\n else: # pre_train_m_net\n trimap_softmax = torch.zeros([trimap_gt.shape[0], 3, trimap_gt.shape[2], trimap_gt.shape[3]], dtype=torch.float32)\n trimap_softmax.scatter_(1, trimap_gt.long().data.cpu(), 1)\n trimap_softmax = trimap_softmax.to(device)\n #trimap_softmax = F.softmax(trimap_gt, dim=1)\n bg_gt, unsure_gt, fg_gt = torch.split(trimap_softmax, 1, dim=1)\n m_net_input = torch.cat((img, trimap_softmax), 1).to(device)\n alpha_r = model.m_net(m_net_input)\n alpha_p = fg_gt + unsure_gt * alpha_r\n loss, L_alpha, L_composition, L_cross, L2_cross, IOU_t, IOU_alpha = loss_function(args,\n img, \n trimap_gt,\n trimap_gt, \n alpha_p,\n alpha_gt)\n print('loss: %.5f\\tL_composision: %.5f\\tL_alpha: %.5f'%(loss.item(), L_composition.item(), L_alpha.item()))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_ += loss.item()\n L_alpha_ += L_alpha.item()\n L_composition_ += L_composition.item()\n L_cross_ += L_cross.item()\n L2_bg_ += L2_cross.item()\n IOU_t_bg_ += IOU_t[0].item()\n IOU_t_unsure_ += IOU_t[1].item()\n IOU_t_fg_ += IOU_t[2].item()\n IOU_alpha_ += IOU_alpha.item()\n loss_array.append(loss.item())\n \n # TENSORBOARD SCALARS\n trainlog.add_scalar('loss', loss.item())\n trainlog.add_scalar('T_net_loss', L_cross.item())\n trainlog.add_scalar('T_net_bg_L2', L2_cross.item())\n trainlog.add_scalar('M_net_alpha', L_alpha.item())\n trainlog.add_scalar('M_net_composition', L_composition.item())\n trainlog.add_scalar('IOU_t_bg', IOU_t[0].item())\n trainlog.add_scalar('IOU_t_unsure', IOU_t[1].item())\n trainlog.add_scalar('IOU_t_fg', IOU_t[2].item())\n if (i+1) % 100 == 0:\n for var_name, value in target_network.named_parameters():\n # ignore unused parameters\n if not hasattr(value.grad, 'data'):\n continue\n var_name = var_name.replace('.', '/')\n trainlog.add_histogram(var_name, value.data.cpu().numpy())\n trainlog.add_histogram(var_name+'/grad', value.grad.data.cpu().numpy())\n\n # TENSORBOARD IMAGE\n if (i+1) % 1000 == 0 and args.train_phase == 'pre_train_m_net':\n trainlog.add_image('fg_gt', vutils.make_grid(fg_gt, normalize=True, nrow=4))\n trainlog.add_image('unsure_gt', vutils.make_grid(unsure_gt, normalize=True, nrow=4))\n trainlog.add_image('alpha_p', vutils.make_grid(alpha_p, normalize=True, nrow=4))\n trainlog.add_image('alpha_r', vutils.make_grid(alpha_r, normalize=True, nrow=4))\n trainlog.add_image('alpha_gt', vutils.make_grid(alpha_gt, normalize=True, nrow=4))\n if (i+1) % 1000 == 0 and args.train_phase != 'pre_train_m_net':\n trainlog.add_trimap(trimap_pre)\n trainlog.add_trimap_gt(trimap_gt)\n trainlog.add_image('origin_image', vutils.make_grid(img, normalize=True, nrow=4))\n \n trainlog.step()\n\n print('Done iterating all training data')\n t1 = time.time()\n\n if epoch % args.save_epoch == 0:\n\n # speed = (t1 - t0) / 60 \n\n loss_ = loss_ / (i+1)\n L_alpha_ = L_alpha_ / (i+1)\n L_composition_ = L_composition_ / (i+1)\n L_cross_ = L_cross_ / (i+1)\n L2_bg_ = L2_bg_ / (i+1)\n loss_var = np.var(loss_array)\n IOU_t_bg_ = IOU_t_bg_ / (i+1)\n IOU_t_unsure_ = IOU_t_unsure_ / (i+1)\n IOU_t_fg_ = IOU_t_fg_ / (i+1)\n IOU_alpha_ = IOU_alpha_ / (i+1)\n trainlog.add_scalar('avg_loss', loss_, epoch)\n trainlog.add_scalar('avg_t_loss', L_cross_, epoch)\n trainlog.add_scalar('avg_t_L2_bg', L2_bg_, epoch)\n trainlog.add_scalar('avg_t_loss_var', loss_var, epoch)\n trainlog.add_scalar('avg_IOU_t_bg', IOU_t_bg_, epoch)\n trainlog.add_scalar('avg_IOU_t_unsure', IOU_t_unsure_, epoch)\n trainlog.add_scalar('avg_IOU_t_fg', IOU_t_fg_, epoch)\n trainlog.add_scalar('avg_L_alpha', L_alpha_, epoch)\n trainlog.add_scalar('avg_L_composition', L_composition_, epoch)\n\n log = \"[{} / {}] \\tLr: {:.5f}\\nloss: {:.5f}\\tloss_p: {:.5f}\\tloss_t: {:.5f}\\tloss_var: {:.5f}\\tIOU_t_bg: {:.5f}\\tIOU_t_unsure: {:.5f}\\tIOU_t_fg: {:.5f}\\tIOU_alpha: {:.5f}\\t\" \\\n .format(epoch, args.nEpochs, \n lr, \n loss_, \n L_alpha_+L_composition_, \n L_cross_,\n loss_var,\n IOU_t_bg_,\n IOU_t_unsure_,\n IOU_t_fg_,\n IOU_alpha_)\n print(log)\n trainlog.save_log(log)\n trainlog.save_model(model, epoch)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":18129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"154338726","text":"import pygame\nimport os\nimport socket\nimport threading\nfrom grid import Grid\n\nos.environ['SDL_VIDEO_WINDOW_POS'] = '200,100' # screen display position\nscreen = pygame.display.set_mode((600, 600))\npygame.display.set_caption(\"Tic-tac-toe\")\ngrid = Grid()\nplayer = \"X\"\nturn = True\nplaying = \"True\"\nclient_multi_socket = socket.socket()\n\nhost = \"localhost\"\nport = 8080\n\nprint('Waiting for connection response')\ntry:\n client_multi_socket.connect((host, port))\nexcept socket.error as e:\n print(str(e))\n\n\ndef create_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()\n\n\ndef receive_data():\n global turn\n while True:\n try:\n data = client_multi_socket.recv(1024).decode()\n data = data.split('-')\n print(data)\n posx, posy = int(data[0]), int(data[1])\n if data[2] == 'yourturn':\n turn = True\n if data[3] == 'False':\n grid.game_over = True\n if grid.get_cell_value(posx, posy) == 0:\n grid.set_cell_value(posx, posy, 'O')\n except socket.error as e:\n str(e)\n\n\ncreate_thread(receive_data)\n\n\nrunning = True\n\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN and not grid.game_over:\n # index 0 : left mouse button, 1 : middle mouse button, 2 : right mouse button\n if pygame.mouse.get_pressed()[0]:\n if turn and not grid.game_over:\n pos = pygame.mouse.get_pos()\n cell_x = pos[0] // 200\n cell_y = pos[1] // 200\n grid.get_mouse(cell_x, cell_y, player)\n if grid.game_over:\n playing = 'False'\n send_data = '{}-{}-{}-{}'.format(cell_x,\n cell_y, 'yourturn', playing).encode()\n client_multi_socket.send(send_data)\n turn = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE and grid.game_over:\n grid.reset_game()\n playing = 'True'\n elif event.key == pygame.K_ESCAPE:\n running = False\n\n screen.fill((255, 255, 255))\n grid.draw(screen)\n pygame.display.flip()\n","sub_path":"client_2.py","file_name":"client_2.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"336131102","text":"import os\nimport sys\nimport psutil\nimport platform\nimport socket\n\ndef get():\n version = '0.1.2'\n return {\n 'o': get_os(),\n 'c': {\n 'l': int(psutil.cpu_count()),\n 'p': int(psutil.cpu_count(logical=False))\n },\n 'r': get_ram(),\n 's': get_swap(),\n #'d': get_disks_size(),\n 'v': version\n }\n\ndef get_os():\n operating_system = ''\n return {\n 'n': os.name,\n\n #Platform\n 'p': platform.system(),\n\n #System Platform\n 's': sys.platform,\n\n #Release\n 'r': platform.release(),\n\n #Linux distribution\n 'l': platform.linux_distribution(),\n\n #Hostname\n 'h': platform.node(),\n\n #Full qualified domain name\n 'fqn': socket.getfqdn(),\n\n #Return the system boot time expressed in seconds since the epoch.\n 'bt': int(psutil.boot_time())\n }\n\n\ndef get_ram():\n return psutil.virtual_memory().total // 1048576\n\ndef get_swap():\n return psutil.swap_memory().total // 1048576\n\ndef get_disks_size():\n disks = []\n for partition in psutil.disk_partitions():\n usage = psutil.disk_usage(partition.mountpoint)\n disks.append({\n 'p': partition.mountpoint,\n 't': usage.total // 1048576,\n 'u': usage.used // 1048576\n });\n return disks\n\ndef get_network_devices():\n devices = {}\n devices_info = psutil.net_if_addrs()\n for device_info in devices_info.iterkeys():\n devices[device_info] = {}\n devices[device_info]['addresses'] = []\n for address in devices_info[device_info]:\n devices[device_info]['addresses'].append((\n address.family,\n address.address,\n address.netmask,\n ))\n return devices\n","sub_path":"src/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"620083697","text":"import csv # obvious\r\nimport sys # used for progress\r\nimport time # used for run time\r\nimport random\r\nfrom openpyxl import Workbook, styles, worksheet\r\nsmillis = int(round(time.time() * 1000))\r\n# globals\r\nmaster_trial_tracker = []\r\ntrials = 1\r\n\r\ndef csv_reader(path):\r\n with open(path, 'r', encoding=\"utf8\") as bigfile:\r\n readbigfile = csv.reader((line.replace('\\0','') for line in bigfile), delimiter=\",\", quotechar='\"')\r\n temparray = list(readbigfile)\r\n for row in temparray:\r\n if len(row) == 0:\r\n temparray.remove(row)\r\n return temparray\r\n\r\ndef csv_writer(data, path):\r\n with open(path, \"w\", newline=\"\\n\") as f:\r\n writer = csv.writer(f)\r\n for i in data:\r\n writer.writerow(i)\r\n\r\n# returns array of rooms and teachers and zero counts for four classes\r\ndef class_creator(h_student):\r\n list_of_classes = [['room', 'teacher', '1/5', '2/6', '3/7', '4/8']]\r\n i = 0\r\n while i < len(h_student):\r\n # removes school number keeps last name only (last, first)\r\n temp_class = [h_student[i][17][:-6], h_student[i][16].partition(',')[0],0,0,0,0]\r\n if temp_class not in list_of_classes:\r\n list_of_classes.append(temp_class)\r\n i += 1\r\n if i % int(len(h_student)/100) == 0:\r\n progress = \"\\r\" + 'Creating class templates...' + str(int(i/len(h_student) * 100 + 1)) + \"%\" +\"\\r\"\r\n sys.stdout.write(progress)\r\n sys.stdout.flush()\r\n return list_of_classes\r\n\r\n# returns array of students with id, name, grade, 1st... (classes are 0)\r\ndef student_template_creator(h_student):\r\n list_of_students = [['id', 'name', 'grade', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th']]\r\n i = 0\r\n for row in h_student:\r\n i+=1\r\n id = int(row[6].replace('(','|').replace(')','|').split('|',)[1]) # gets id number as int\r\n name = row[6].split('(')[0] # gets last, first [mi]\r\n current_student = [id, name, 0, 0, 0, 0, 0, 0, 0, 0]\r\n if current_student not in list_of_students:\r\n list_of_students.append(current_student)\r\n if i % int(len(h_student)/10) == 0:\r\n progress = \"\\r\" + 'Creating student templates...' + str(int(i/len(h_student) * 100)) + \"%\"\r\n sys.stdout.write(progress)\r\n sys.stdout.flush()\r\n print()\r\n return list_of_students\r\n\r\n# removes the \"P\" from the periods and splits double block classes\r\n# returns \"cleaned up\" h_student array\r\ndef period_cleaner(h_student):\r\n i = 0\r\n new_h_student = [] # create new array\r\n while i < len(h_student):\r\n if len(h_student[i][13]) == 2 and h_student[i][13] != '0': # checks for single block\r\n h_student[i][13] = int(h_student[i][13][1::]) # picks period as int (P4 -> 4)\r\n new_h_student.append(h_student[i])\r\n elif len(h_student[i][13]) == 4:\r\n db1 = h_student[i][:] # copy the student twice\r\n db2 = h_student[i][:]\r\n dbperiod = h_student[i][13][:] # copy the period\r\n fp = int(dbperiod[1]) # store the periods\r\n sp = int(dbperiod[3])\r\n db1[13] = fp # picks first period as int (P1/5 -> 1)\r\n db2[13] = sp # picks second period as int (P1/5 -> 5)\r\n new_h_student.append(db1) # add first blocked as new line\r\n new_h_student.append(db2) # add second blocked as new line\r\n i += 1\r\n if i % int(len(h_student)/10) == 0:\r\n progress = \"\\r\" + 'Cleaning up h_student periods...' + str(int(i/len(h_student) * 100 + 1)) + \"%\"\r\n sys.stdout.write(progress)\r\n sys.stdout.flush()\r\n print()\r\n return new_h_student\r\n\r\n# goes through each student and adds their classes\r\n# returns list of students with header that have classes assigned\r\ndef create_student_schedules(list_of_students, h_student):\r\n studnum = 1\r\n no_header = list_of_students[1::]\r\n header = list_of_students[0]\r\n for srow in no_header:\r\n if studnum >= 117:\r\n studnum += 1\r\n current_id = srow[0]\r\n i = 0 # reset row at start of s_student\r\n while i < len(h_student):\r\n if len(h_student) == 0:\r\n break\r\n hid = int(h_student[i][6].replace('(','|').replace(')','|').split('|',)[1])\r\n period = h_student[i][13] # picks period from the h_studet\r\n if hid == current_id and period > 0:\r\n srow[period+1] = h_student[i][17][:-6] # assigns room to above period\r\n del h_student[i]\r\n i -= 1\r\n if period == 8: # if you get to period 8, stop iterating through h_student...\r\n break # because you are done with the current student\r\n i += 1\r\n else:\r\n studnum += 1\r\n current_id = srow[0]\r\n i = 0 # reset row at start of s_student\r\n while i < len(h_student):\r\n if len(h_student) == 0:\r\n break\r\n hid = int(h_student[i][6].replace('(','|').replace(')','|').split('|',)[1])\r\n period = h_student[i][13] # picks period from the h_studet\r\n if hid == current_id and period > 0:\r\n srow[period+1] = h_student[i][17][:-6] # assigns room to above period\r\n del h_student[i]\r\n i -= 1\r\n if period == 8: # if you get to period 8, stop iterating through h_student...\r\n break # because you are done with the current student\r\n i += 1\r\n if studnum % int(len(no_header) / 25) == 0:\r\n progress = \"\\r\" + 'Creating student schedules...' + str(int(studnum/len(no_header) * 100 + 1)) + \"%\"\r\n sys.stdout.write(progress)\r\n sys.stdout.flush()\r\n print()\r\n no_header.insert(0, header)\r\n return no_header\r\n\r\n# returns list_of_nontesters, list_of_testers\r\ndef remove_testers(list_of_students, list_of_testers):\r\n los_copy = list_of_students[:]\r\n lot_copy = list_of_testers[:]\r\n testers_ints = []\r\n testers_only = []\r\n for row in lot_copy:\r\n testers_ints.append(int(row[0]))\r\n for row in los_copy:\r\n id = row[0]\r\n if row[0] in testers_ints:\r\n testers_only.append(row)\r\n list_of_students.remove(row)\r\n return list_of_students, testers_only\r\n\r\ndef create_class_dict(list_of_classes):\r\n dict_of_classes = {}\r\n no_header = list_of_classes[1::]\r\n for row in no_header:\r\n dict_of_classes[row[0]] = row[1:6]\r\n return dict_of_classes\r\n\r\ndef create_class_roster_dict(list_of_classes):\r\n dict_of_classes = {}\r\n no_header = list_of_classes[1::]\r\n for row in no_header:\r\n dict_of_classes[row[0]] = [row[1], [], [], [], []] # [name, 1st, 2nd, ...]\r\n return dict_of_classes\r\n\r\ndef class_counter(list_of_students, dict_of_classes, day):\r\n no_header = list_of_students[1::]\r\n if day.lower() == \"a\":\r\n for student in no_header:\r\n ps = 2 # start at student period one\r\n pd = 1 # start at dict period one\r\n while ps <= 5:\r\n if student[ps] == 0:\r\n break\r\n dict_of_classes[student[ps]][pd] += 1\r\n pd += 1\r\n ps += 1\r\n else:\r\n for student in no_header:\r\n print('on student', student)\r\n ps = 6 # start at period 5\r\n pd = 1 # start at dict period one\r\n while ps <= 9:\r\n if student[ps] == 0:\r\n break\r\n dict_of_classes[student[ps]][pd] += 1\r\n pd += 1\r\n ps += 1\r\n return dict_of_classes\r\n\r\ndef remove_class_dict(dict):\r\n class_count_list = []\r\n for entry in dict:\r\n class_count_list.append([entry, dict[entry][0], dict[entry][1], dict[entry][2], dict[entry][3], dict[entry][4]])\r\n # print(class_count_list)\r\n # code below doesn't belong, keeping for copypasta\r\n # for classroom in class_count_list:\r\n # if class_max(classroom) == 0:\r\n # class_count_list.remove(classroom)\r\n return class_count_list\r\n\r\ndef class_add(class1, class2):\r\n i = 0\r\n comb = []\r\n while i <= 1:\r\n comb.append(class1[i] + ' and ' + class2[i])\r\n i += 1\r\n while i < len(class1):\r\n comb.append(class1[i] + class2[i])\r\n i += 1\r\n return comb\r\n\r\n\r\ndef class_min(classroom):\r\n return min(classroom[2::])\r\ndef class_max(classroom):\r\n return max(classroom[2::])\r\ndef class_avg(classroom):\r\n sum = 0\r\n i = 2\r\n while i <= 5:\r\n if classroom[i] != 0:\r\n sum += classroom[i]\r\n i += 1\r\n return float(sum/3)\r\n\r\n\r\ndef class_nonzero_min(classroom):\r\n temp_all = classroom[2::]\r\n for element in temp_all:\r\n if element == 0:\r\n temp_all.remove(element)\r\n return min(temp_all)\r\n\r\ndef best_comb(poss_list, avg_size, spread):\r\n best_avg = 1000\r\n best_combo = []\r\n for poss in poss_list:\r\n if class_avg(poss) < best_avg and abs(class_avg(poss) - avg_size) <= spread:\r\n best_avg = class_avg(poss)\r\n best_combo = poss\r\n return best_combo\r\n\r\n# returns list of class combos, list of errors\r\ndef class_combiner(dict_of_rec, dict_of_send, max_class_size, avg_size, spread):\r\n list_of_rec = remove_class_dict(dict_of_rec)\r\n list_of_send = remove_class_dict(dict_of_send)\r\n list_of_send_dup = list_of_send[::]\r\n random.shuffle(list_of_send_dup)\r\n list_of_comb = []\r\n list_of_errors = []\r\n for send in list_of_send_dup:\r\n if len(list_of_errors) > 0:\r\n break\r\n # print('working on', send)\r\n poss_combos = []\r\n poss_rec = []\r\n for rec in list_of_rec:\r\n comb = class_add(send, rec)\r\n if class_min(comb) == 0 and class_max(comb) <= max_class_size and class_nonzero_min(comb) >= 7:\r\n # if there is a 0 and no period over the max, this is a candidate\r\n # print('compatable!')\r\n poss_combos.append(comb)\r\n poss_rec.append(rec)\r\n # after iterating through all classes, check for the best\r\n best_avg = 1000\r\n best_combo = []\r\n best_rec = []\r\n if len(poss_combos) == 0: # checks for no matches\r\n list_of_errors.append(send)\r\n break\r\n i = 0\r\n while i < len(poss_combos):\r\n if class_avg(poss_combos[i]) < best_avg and abs(class_avg(poss_combos[i]) - avg_size) <= spread:\r\n best_avg = class_avg(poss_combos[i])\r\n best_combo = poss_combos[i]\r\n best_rec = poss_rec[i]\r\n i += 1\r\n if len(best_combo) == 0:\r\n list_of_errors.append(send[0] + ' has no matches')\r\n continue\r\n list_of_comb.append(best_combo) # add the combo to the running list\r\n try:\r\n list_of_comb.remove(best_rec)\r\n except ValueError:\r\n pass\r\n list_of_rec.append(best_combo) # add the combo to be tried again\r\n list_of_rec.remove(best_rec) # remove the recieving class if it's too big\r\n list_of_send.remove(send)\r\n comb_avg = 0\r\n if len(list_of_comb) != 0:\r\n for comb in list_of_comb:\r\n comb_avg += class_avg(comb)\r\n comb_avg = round(float(comb_avg / len(list_of_comb)), 1)\r\n rec_avg = 0\r\n for rec in list_of_rec:\r\n rec_avg += class_avg(rec)\r\n rec_avg = round(float(rec_avg / len(list_of_rec)), 1)\r\n return list_of_comb, list_of_errors, comb_avg, rec_avg\r\n\r\ndef remove_testing_teachers(class_counts, testing_teachers):\r\n testing_dict = {}\r\n no_kids = {}\r\n for teacher in testing_teachers:\r\n testing_dict[teacher] = class_counts[teacher]\r\n if max(testing_dict[teacher][1::]) == 0: # remove if no\r\n no_kids[teacher] = testing_dict[teacher]\r\n del testing_dict[teacher]\r\n del class_counts[teacher]\r\n return class_counts, testing_dict, no_kids\r\n\r\ndef remove_exempt_teachers(class_counts, exempt_teachers):\r\n for teacher in exempt_teachers:\r\n del class_counts[teacher]\r\n return class_counts\r\n\r\ndef combo_checker(dict_of_rec, dict_of_send, average_size, spread):\r\n absolute_best_avg = 100\r\n absolute_best_combos = []\r\n all_trials_combs = []\r\n avg_min = average_size - spread\r\n avg_max = average_size + spread\r\n best_comb_avg = 0\r\n best_rec_avg = 0\r\n for avg in range(avg_min, avg_max + 1):\r\n if int(avg / (avg_max - avg_min) * 100) % 5 == 0:\r\n progress = \"\\r\" + 'Finding best matches...' + str(int(avg / (avg_max) * 100)) + \"%\"\r\n sys.stdout.write(progress)\r\n sys.stdout.flush()\r\n for spread in range(0, spread + 1):\r\n current_comb, current_errors, comb_avg, rec_avg = class_combiner(dict_of_rec, dict_of_send, 35, avg, spread)\r\n if len(current_errors) == 0:\r\n all_trials_combs.append(current_comb)\r\n avg_spread = abs(comb_avg - rec_avg)\r\n if abs(comb_avg - rec_avg) < absolute_best_avg:\r\n absolute_best_avg = round(avg_spread,1)\r\n absolute_best_combos = current_comb\r\n best_comb_avg = comb_avg\r\n best_rec_avg = rec_avg\r\n print()\r\n j = 1\r\n for i in absolute_best_combos:\r\n i.insert(0,j)\r\n print(i)\r\n j += 1\r\n if len(all_trials_combs) == 0:\r\n print('no matches, expanding search with avg:', average_size + 1, 'and spread: ', spread + 1)\r\n combo_checker(dict_of_rec, dict_of_send, average_size + 1, spread + 1)\r\n sys.stdout.flush()\r\n else:\r\n master_trial_tracker.append(['start of trial ' + str(trials)])\r\n master_trial_tracker.append(['Count', 'Rooms', 'Teachers', '1st/5th', '2nd/6th', '3rd/7th', '4th/8th'])\r\n for t in absolute_best_combos:\r\n master_trial_tracker.append(t)\r\n master_trial_tracker.append(['end of trial ' + str(trials)])\r\n print(\"all trials: \", len(all_trials_combs))\r\n print(\"best combo spread: \", best_comb_avg, \" - \", best_rec_avg, \" = \", absolute_best_avg)\r\n\r\ndef split_class_combos(class_combos):\r\n separated_classes = []\r\n for combo in class_combos:\r\n curr_comb = []\r\n the_split = combo.split(\" and \")\r\n for ind_class in the_split:\r\n curr_comb.append(ind_class)\r\n separated_classes.append(curr_comb)\r\n return separated_classes\r\n\r\n# Creates a list of kids in each relocation class\r\ndef create_class_roster(student_schedules, list_of_classes, class_combos, day):\r\n class_combo_dict = {}\r\n for combo in class_combos:\r\n class_combo_dict[combo] = [combo, [], [], [], []]\r\n split_classes = split_class_combos(class_combos)\r\n roster_dict = create_class_roster_dict(list_of_classes)# [[name], [1st], [2nd], [3rd], [4th]]\r\n if day == 'a':\r\n ini_period = 2\r\n index_fixer = 1\r\n else:\r\n ini_period = 6\r\n index_fixer = 5\r\n max_period = ini_period + 4\r\n for student in student_schedules:\r\n period = ini_period\r\n while period < max_period:\r\n try:\r\n roster_dict[student[period]][period - index_fixer].append([student[0], student[1]])\r\n except KeyError:\r\n pass\r\n period += 1\r\n i = 0\r\n master_roster = []\r\n while i < len(split_classes):\r\n period = ini_period\r\n while period < max_period:\r\n master_roster.append([\"Period \" + str(period - 1) + \" | Rooms: \" + class_combos[i]])\r\n master_roster.append([\"Count\", \"ID\", \"Name\", \"Attendance\"])\r\n period_roster = []\r\n for c in split_classes[i]:\r\n for kid in roster_dict[c][period - index_fixer]:\r\n period_roster.append(kid)\r\n period_roster.sort(key=lambda x: x[1])\r\n j = 0\r\n while j < len(period_roster):\r\n period_roster[j].insert(0, j + 1)\r\n j += 1\r\n for row in period_roster:\r\n master_roster.append(row)\r\n period += 1\r\n i += 1\r\n # for entry in master_roster:\r\n # print(entry)\r\n return master_roster\r\n\r\n# Creates xlsx from a list with a header\r\ndef xlsx_creator(list_of_stuff, path, header):\r\n wb = Workbook()\r\n ws = wb.active\r\n for r in range(0, len(list_of_stuff)):\r\n for c in range(0, len(list_of_stuff[r])):\r\n if list_of_stuff[r][c] == \"Count\":\r\n ws.page_breaks.append(worksheet.pagebreak.Break(r - 1))\r\n ws.cell(row = r + 1, column = c + 1).value = list_of_stuff[r][c]\r\n ws.header_footer.center_header.text = header\r\n ws.header_footer.left_footer.text = 'Teacher Name: ____________________________'\r\n ws.header_footer.right_footer.text = 'Signature: ____________________________'\r\n wb.save(path)\r\n\r\n\r\n# hstudent = csv_reader('h_student 3-8 new.csv')\r\n# hstudent_clean_pds = period_cleaner(hstudent) # fix h_student periods (P2 -> 2) and (P1/5 split)\r\n# all_students = student_template_creator(hstudent) # create [id, name, 1st,...] templat\r\n# clean_students = create_student_schedules(all_students, hstudent_clean_pds) # assigns students classes\r\n# class_template = class_creator(hstudent) #\r\n# create_class_roster(clean_students, class_template, 'blah', 'b')\r\n# print()\r\n\r\n\r\neng1_testing_teachers = ['141', '182', '203', '209', '233', '235', '237', '239', '248', '249', '255', '257', '260',\r\n '300', '302', '310', '330', '332', '337', '339', '340', '341', '342', '343', '345', '347',\r\n '350', '351', '353', '412', '500', '502', '504', '510', '512', '601', '824', '921', '938',\r\n '6B']\r\n\r\neng2_testing_teachers = ['131', '132', '133', '182', '209', '233', '235', '237', '239', '245', '247', '248', '249',\r\n '255', '256', '257', '260', '353', '412', '500', '502', '510', '601', '700', '701',\r\n '702', '703', '824', '825', '828', '921']\r\n\r\nalg_exempt_teachers = ['125', '112', '117', '121', '115', '13A', '10A', '10B', '20B', '31A', '603',\r\n '725', '180', '182', '124', '126', '128']\r\n\r\n\r\nexempt2_teachers = ['125', '112', '117', '121', '115', '13A', '32A', '10A', '10B', '20B', '32B', '31A', '603', '22A',\r\n '31B', '128', '21B', '725']\r\n\r\nsat_testing_teachers = ['143', '801', '139', '203', '142', '140', '124', '700', '22A', '723', '134', '504', '818',\r\n '724', '133', '136', '721', '816', '135', '180', '820', '506', '722', '238', '120', '243',\r\n '601', '131', '132']\r\nexemptsat_teachers = []\r\n\r\nalg_teseting_teachers = ['141', '504', '260', '330', '829', '203', '512', '305', '337', '332', '312',\r\n '259', '300', '508', '302', '825', '207', '502', '257', '339', '333', '307', '6B', '310',\r\n '303', '249']\r\n\r\nhist_testing_teachers = ['721', '133', '134', '136', '830', '801', '502', '22B', '938', '723', '5A',\r\n '247', '824', '816', '245', '131', '132', '825']\r\n\r\nbio_testing_teachers = ['347', '340', '341', '342', '508', '412', '502',\r\n '312', '311', '303', '305', '307', '510', '350',\r\n '339', '333', '345', '343', '351', '332', '330',\r\n '337', '260']\r\n\r\n\r\nclass1_combos = ['232 and 238', '342 and 132', '504 and 339 and 727', '260 and 180', '300 and 728', '500 and 307',\r\n '502 and 207', '248 and 508', '233 and 124', '345 and 337 and 801', '601 and 816', '921 and 311',\r\n '257 and 933', '938', '341 and 343 and 182 and 245', '141 and 237 and 2A and 829', '235 and 2B',\r\n '510 and 6B', '353 and 312', '351 and 4B','209 and 5A', '824 and 5B and 313', '242 and 240',\r\n '239 and 303', '203', '330']\r\n\r\nclass2_combos = ['237 and 120', '353 and 124', '248 and 131 and 240', '132 and 233 and 249 and 252', '133 and 232',\r\n '257 and 727', '235 and 307', '245 and 313', '247 and 241', '500 and 728', '412 and 253',\r\n '182 and 504', '510 and 250', '702 and 601 and 816', '239 and 818', '701 and 828 and 820',\r\n '824 and 242', '825 and 251', '921 and 926', '260 and 933', '700 and 259', '209 and 2B',\r\n '703 and 256 and 5B', '502 and 6B']\r\n\r\n# these run through the process from start to finish\r\nhstudent = csv_reader('hstudent 4 1 16.csv')\r\ntesters = csv_reader('biotesters.csv')\r\nprint(len(testers), 'testers')\r\nhstudent_clean_pds = period_cleaner(hstudent) # fix h_student periods (P2 -> 2) and (P1/5 split)\r\nall_students = student_template_creator(hstudent) # create [id, name, 1st,...] template\r\nprint(len(all_students), 'all students')\r\nclean_students = create_student_schedules(all_students, hstudent_clean_pds) # assigns students classes\r\n# csv_writer(clean_students, 'cleanschedules 4 1.csv')\r\n\r\nclass_template = class_creator(hstudent)\r\ncs = len(clean_students)\r\nprint(len(clean_students), 'clean students')\r\nlist_of_nontesters, list_of_testers = remove_testers(clean_students, testers)\r\nprint(len(list_of_nontesters), 'non testers', len(list_of_testers), 'testers', 'difference:', cs - len(list_of_testers))\r\n\r\n# # creates rosters for non-testing classes\r\n# rost = create_class_roster(list_of_nontesters, class_template, class2_combos, 'b')\r\n# xlsx_creator(rost, 'eng2 relocation rosters 3-23.xlsx', 'STAAR Relocation Attendance - 3/31/16')\r\n\r\nclass_dict = create_class_dict(class_template)\r\nclass_dict = class_counter(list_of_nontesters, class_dict, \"b\")\r\nclass_array = remove_class_dict(class_dict)\r\n# csv_writer(class_array, 'sat leftovers 4 1.csv')\r\nclass_dict = remove_exempt_teachers(class_dict, alg_exempt_teachers)\r\n# Change the testing teachers!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\ndict_of_rec, dict_of_send, no_kids = remove_testing_teachers(class_dict, bio_testing_teachers)\r\nno_kids_list = remove_class_dict(no_kids)\r\n\r\n# creates combined classes\r\nmaster_trial_tracker.append([\"These testing teachers have no kids\"])\r\nmaster_trial_tracker.append(['Rooms', 'Teachers', '1st/5th', '2nd/6th', '3rd/7th', '4th/8th'])\r\nfor t in no_kids_list:\r\n master_trial_tracker.append(t)\r\n\r\n# for row in rec_classes:\r\n# print(row, rec_classes[row])\r\n# for row in dict_of_send:\r\n# print(row, dict_of_send[row])\r\nclass_combiner(dict_of_rec, dict_of_send, 35, 25, 10)\r\n\r\nwhile trials <= 5:\r\n combo_checker(dict_of_rec, dict_of_send, 15, 15)\r\n trials += 1\r\ncsv_writer(master_trial_tracker, '5 trials bio.csv')\r\n\r\n# calculates the run time\r\nemillis = int(round(time.time() * 1000))\r\ndif = float((emillis - smillis)/1000)\r\nprint('Seconds elapsed:',dif)","sub_path":"new_csv_functions.py","file_name":"new_csv_functions.py","file_ext":"py","file_size_in_byte":22817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"362543689","text":"from swann.utils import (get_config, get_layout,\n exclude_subjects, my_events)\nfrom swann.preprocessing import find_ica, apply_ica, mark_autoreject\nfrom swann.viz import plot_find_bads, plot_ica\n\nconfig = get_config()\nlayout = get_layout()\n\neegfs = (layout.get(task=config['task'],\n suffix='eeg', extension='bdf') +\n layout.get(task=config['task'],\n suffix='eeg', extension='vmrk'))\neegfs = exclude_subjects(eegfs)\n\noverwrite_eeg = \\\n input('Overwrite preprocessed eeg data if ' +\n 'they exist? (y/n)\\n').upper() == 'Y'\n\n# loop across subjects\nfor eegf in eegfs:\n plot_find_bads(eegf, overwrite=overwrite_eeg)\n\n# this will probably take ~5 minutes per subject, probably come back later\nfor eegf in eegfs:\n find_ica(eegf, overwrite=overwrite_eeg)\n\n# need user input to select out blinks, sacades, heartbeak and muscle artifact\nfor eegf in eegfs:\n plot_ica(eegf, overwrite=overwrite_eeg)\n\n# this will take even longer ~20+ minutes per subject depending on task length\nfor eegf in eegfs:\n raw = apply_ica(eegf)\n for event in my_events():\n mark_autoreject(eegf, raw, event, overwrite=overwrite_eeg)\n","sub_path":"03_preprocessing.py","file_name":"03_preprocessing.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"210624084","text":"import django_filters\nfrom .models import Post\n\n\nclass PostFilter(django_filters.FilterSet):\n\n CHOICES = (\n ('ascending', 'Ascending'),\n ('descending', 'Descending'),\n )\n\n CATEGORIES = (\n ('1', 'Combat'),\n ('2', 'Food'),\n ('3', 'Storage'),\n ('4', 'Magic'),\n ('5', 'World Gen'),\n ('6', 'API and Library'),\n ('7', 'Mobs and Creatures'),\n ('8', 'Armor, Tools and Weapons'),\n ('9', 'Cosmetic'),\n ('10', 'Technology'),\n ('11', 'Miscellaneous'),\n ('12', 'Other'),\n )\n\n category = django_filters.ChoiceFilter(choices=Post.CATEGORY_CHOICES)\n version = django_filters.ChoiceFilter(label='Version', choices=Post.MOD_VERSION)\n ordering = django_filters.ChoiceFilter(label='Ordering', choices=CHOICES, method='filter_by_order')\n\n class Meta:\n model = Post\n fields = {\n 'title': ['icontains'],\n 'content': ['icontains'],\n 'tags': ['icontains'],\n }\n\n def filter_by_order(self, queryset, name, value):\n expression = 'date_posted' if value == 'ascending' else '-date_posted'\n return queryset.order_by(expression)\n","sub_path":"mods/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"150921182","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\ndef pytest_addoption(parser):\n parser.addoption('--language', action='store', default=\"en\", help=\"Choose language: en, ru, es, ar, ca, cs, da, de, el, fi, fr, it, ko, nl, pl, pt, pt-br, ro, sk, uk, zh-cn\")\n\n\n@pytest.fixture(scope=\"function\")\ndef browser(request):\n lang = request.config.getoption(\"language\")\n options = Options()\n if lang is not None:\n print(\"\\nstart using language for test\")\n else:\n raise pytest.UsageError(\"language should be added, for example, en, ru, es, ar, ca, cs, da, de, el, fi, fr, it, ko, nl, pl, pt, pt-br, ro, sk, uk, zh-cn\")\n options.add_experimental_option('prefs', {'intl.accept_languages': lang})\n browser = webdriver.Chrome(options=options)\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"426915581","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n s = []\n ans = []\n\n while root or s:\n if root:\n s.append(root)\n root = root.left\n else:\n root = s.pop()\n ans.append(root.val)\n root = root.right\n #print ans\n if len(ans) < 1: return True\n\n for i in range(1, len(ans)):\n if ans[i] <= ans[i - 1]: return False\n return True\n\n\nclass Solution_02(object):\n def validBSTHelper(self, root):\n if not root:\n return True\n\n res = True\n l = self.validBSTHelper(root.left)\n\n if not self.prev:\n self.prev = root\n else:\n if root.val <= self.prev.val:\n res = False\n self.prev = root\n\n r = self.validBSTHelper(root.right)\n\n return l and r and res\n\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n self.prev = None\n return self.validBSTHelper(root)","sub_path":"LPractice/98. Validate Binary Search Tree.py","file_name":"98. Validate Binary Search Tree.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"111277698","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport random\nimport csv\nimport os\n\n\n\ndef get_urls_from_csv():\n\twith open('urls.csv', 'rU') as infile:\n\t\treader = csv.DictReader(infile)\n\t\tdata = {}\n\t\tfor row in reader:\n\t\t\tfor header, value in row.items():\n\t\t\t\ttry:\n\t\t\t\t\tdata[header].append(value)\n\t\t\t\texcept KeyError:\n\t\t\t\t\tdata[header] = [value]\n\turls = data['Old URLs']\n\treturn urls\n\t\ndef close_all_tabs():\n\ti = len(driver.window_handles) - 1\n\twhile i > 0:\n\t\t driver.switch_to.window(driver.window_handles[i])\n\t\t driver.close()\n\t\t i = i-1\n\tdriver.switch_to.window(driver.window_handles[0])\n\ndef get_manual_url():\n\turl = raw_input('Enter URL ---> ')\n\tlist_url_csv = get_urls_from_csv()\n\ti_start = list_url_csv.index(url)\n\treturn i_start\n\n\ndef log(data, message):\n\tlogging.basicConfig(filename='src.log',level=logging.DEBUG)\n\ttime_now = time.strftime(\"%Y-%m-%d %H:%M\")\n\tlogging.info(' ' + message + ' ' + data + ' DATE: ' + time_now)\n\ndef edit_url(url):\n\ts = url\n\tdomain = 'https://www.'\n\thtml = '.html'\n\ts = s.replace(domain, '')\n\ts = s.replace(html, '')\n\ts = s.replace('-', ' ')\n\ts = s.replace('/mobile/p/', ' ')\n\ts = s.replace('/', ' ')\n\ts = s.replace('img_catalog', '')\n\ts = s.replace('_', ' ')\n\ts = s.replace('=', ' ')\n\ts = s.replace('.pdf', ' ')\n\ts = s.replace('.aspx', ' ')\n\ts = s.replace('?', ' ')\n\ts = s.replace('%', ' ')\n\ts = s.replace('&', ' ')\n\turl = 'site:' + s\n\tlog(url, ' New url')\n\treturn url\n\t\ndef xpath_canonical():\n k = 'copy($x('\n k = k + \"'//*[@rel=\"\n k = k + '\"canonical\"]/@href'\n k = k + \"')[0].textContent)\"\n log(k, ' XPath canonical URL')\n return k\n\ndef xpath_mobile():\n k = 'copy($x('\n k = k + \"'//*[@rel=\"\n k = k + '\"alternate\"]/@href'\n k = k + \"')[0].textContent)\"\n log(k, ' XPath canonical URL')\n return k\n \ndef get_url_random():\n #foo = ['https://www.google.com', 'https://duckduckgo.com', 'https://www.bing.com/search?q=*']\n foo = ['https://duckduckgo.com']\n secure_random = random.SystemRandom()\n url_random = secure_random.choice(foo)\n return url_random\n\ndef xpath_search():\n\txpath = '//input[@id=\"search_form_input_homepage\" or @id=\"lst-ib\" or @id=\"sb_form_q\"]'\n\treturn xpath\n\ndef clear_console():\n\tos.system('cls' if os.name=='nt' else 'clear')\t\n\n\n\n### This is a MAIN ###\n\nopen(\"src.log\",\"w\").close()\nlist_urls = get_urls_from_csv()\ni = get_manual_url()\nm = ''\n\ndriver = webdriver.Chrome(executable_path='./chromedriver')\ndriver.get(get_url_random())\n\ndriver.execute_script('window.open(\"https://duckduckgo.com\",\"_blank\");')\ndriver.switch_to.window(driver.window_handles[1])\nelem = driver.find_element_by_xpath(xpath_search())\nelem.send_keys(xpath_canonical())\n\nActionChains(driver).key_down(Keys.LEFT_CONTROL).send_keys('a').key_up(Keys.LEFT_CONTROL).perform()\nActionChains(driver).key_down(Keys.LEFT_CONTROL).send_keys('c').key_up(Keys.LEFT_CONTROL).perform()\n\nwhile i < len(list_urls):\n\tclose_all_tabs()\n\tdriver.get(get_url_random())\n\telem = driver.find_element_by_xpath(xpath_search())\n\telem.clear()\n\telem.send_keys(edit_url(list_urls[i]))\n\telem.send_keys(Keys.ENTER)\n\tprint('URL ---> ' + list_urls[i])\n\ti = i + 1\n\telem_pos_in_sheet = i + 1\n\tprint('Element № = ' + str(elem_pos_in_sheet))\n\tm = raw_input('Enter \"q\" for exit ---> ')\n\tclear_console()\n\tif m == 'q':\n\t\tbreak\n \ndriver.quit()\n","sub_path":"srch.py","file_name":"srch.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"644215527","text":"import discord\nfrom threading import Thread\nimport time\n\nTOKEN = 'NDczNzUxNDE5NjY5OTcwOTQ0.DkGfbg.qQE0BJe9mwq1ihmaStOatt7ObsY'\n\nclient = discord.Client()\n\n\n@client.event\nasync def on_message(message):\n\n def start_music(param):\n global a\n global player\n global b\n a = True\n b = 1\n\n if param == 1:\n def start_music_in_thread():\n channel = client.get_channel(\"408661560580636682\")\n vc = client.join_voice_channel(channel)\n player = vc.create_ffmpeg_player('testing.mp3')\n player.start()\n while a:\n print(\"here2\")\n if b == 2:\n print(\"here\")\n player.stop()\n\n music_thread = Thread(target=start_music_in_thread)\n music_thread.start()\n\n if param == 2:\n a = False\n b = 2\n start_music(2)\n\n if message.author == client.user:\n return\n\n if message.content.startswith(\"!stopp\"):\n a = False\n await client.send_message(message.channel, \"Hi\")\n start_music(2)\n\n if message.content.startswith('!hello'):\n msg = 'Hello {0.author.mention}'.format(message)\n await client.send_message(message.channel, msg)\n\n if message.content.startswith(\"!creator\"):\n msg = \"My creator is Geneus003\"\n msg2 = \"EMail: geneus003@gmail.com\"\n msg3 = \"Qiwi: +79832095427\"\n await client.send_message(message.channel, msg)\n await client.send_message(message.channel, msg2)\n await client.send_message(message.channel, msg3)\n\n if message.content.startswith(\"!music\"):\n msg = \"gachi music\"\n await client.send_message(message.channel, msg)\n channel = client.get_channel(\"408661560580636682\")\n vc = await client.join_voice_channel(channel)\n player = vc.create_ffmpeg_player('testing.mp3', after=lambda: print('done'))\n player.start()\n start_music(1)\n\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\nclient.run(TOKEN)","sub_path":"main (copy).py","file_name":"main (copy).py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"641923194","text":"# ================================== IMPORT ============================================>\r\nimport numpy as np\r\nimport pandas as pd\r\n# import seaborn as sns\r\n# import matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import ExtraTreesRegressor # FOR FEATURE IMPORTANCE\r\n\r\nimport pickle\r\nfrom sklearn.ensemble import RandomForestRegressor # FOR MODEL CREATION\r\nfrom sklearn.model_selection import RandomizedSearchCV # FOR PARAMETER TUNING\r\nfrom sklearn import metrics\r\n\r\n# ================================== DATASET ========================================>\r\n\r\ndf = pd.read_csv(\"car data .csv\")\r\n\r\n# =================================== MAKING DATASET ==========================================>\r\n\r\nfinal_dataset = df[\r\n ['Year', 'Selling_Price', 'Present_Price', 'Kms_Driven', 'Fuel_Type', 'Seller_Type', 'Transmission', 'Owner']]\r\nfinal_dataset['Current Year'] = 2020\r\nfinal_dataset['no_year'] = final_dataset['Current Year'] - final_dataset['Year']\r\nfinal_dataset.drop(['Year'], axis=1, inplace=True)\r\nfinal_dataset = pd.get_dummies(final_dataset, drop_first=True)\r\nfinal_dataset = final_dataset.drop(['Current Year'], axis=1)\r\n\r\n# ============================= VISUALIZATION ================================================>\r\n\r\n# import seaborn as sns\r\n# #get correlations of each features in dataset\r\n# corrmat = df.corr()\r\n# top_corr_features = corrmat.index\r\n# plt.figure(figsize=(20,20))\r\n# #plot heat map\r\n# g = sns.heatmap(df[top_corr_features].corr(),annot=True,cmap=\"RdYlGn\")\r\n\r\n\r\n# ==================================== ================================================>\r\n\r\nX = final_dataset.iloc[:, 1:]\r\ny = final_dataset.iloc[:, 0]\r\n\r\n# ================================== FEATURE IMPORTANCE ===================================>\r\n\r\nmodel = ExtraTreesRegressor()\r\nmodel.fit(X, y)\r\n\r\n# ========================= plot graph of feature importance for better visualization =========>\r\n\r\n# feat_importances = pd.Series(model.feature_importances_, index=X.columns)\r\n# feat_importances.nlargest(5).plot(kind='barh')\r\n# plt.show()\r\n\r\n# ====================================================>\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\r\n\r\n# ================================ MODEL ==========================================================>\r\n\r\nregressor = RandomForestRegressor()\r\n\r\n# ============================ HYPER PARAMETER TUNING ================================ ===============>\r\n\r\n# Number of trees in random forest\r\nn_estimators = [int(x) for x in np.linspace(start=100, stop=1200, num=12)]\r\n# Number of features to consider at every split\r\nmax_features = ['auto', 'sqrt']\r\n# Maximum number of levels in tree\r\nmax_depth = [int(x) for x in np.linspace(5, 30, num=6)]\r\n# max_depth.append(None)\r\n# Minimum number of samples required to split a node\r\nmin_samples_split = [2, 5, 10, 15, 100]\r\n# Minimum number of samples required at each leaf node\r\nmin_samples_leaf = [1, 2, 5, 10]\r\n\r\n# ===================================# Create the random grid ================================>\r\n\r\nrandom_grid = {'n_estimators': n_estimators,\r\n 'max_features': max_features,\r\n 'max_depth': max_depth,\r\n 'min_samples_split': min_samples_split,\r\n 'min_samples_leaf': min_samples_leaf}\r\n\r\n# ====================================== ===============================================>\r\n\r\n# Use the random grid to search for best hyper parameters\r\n# First create the base model to tune\r\nrf = RandomForestRegressor()\r\n\r\n# ======================================== ===========================================================>\r\n\r\n# Random search of parameters, using 3 fold cross validation,\r\n# search across 100 different combinations\r\nrf_random = RandomizedSearchCV(estimator=rf,\r\n param_distributions=random_grid,\r\n scoring='neg_mean_squared_error',\r\n n_iter=10, cv=5, verbose=2,\r\n random_state=42, n_jobs=1)\r\n\r\nrf_random.fit(X_train,y_train)\r\n\r\n# ================================= BEST PARAMETER & SCORE ===============================>\r\n\r\nbest = rf_random.best_params_, rf_random.best_score_\r\n# print(best)\r\n\r\n# ==================================== Predictions ===========================================>\r\n\r\npredictions=rf_random.predict(X_test)\r\n\r\n# ==========================================>\r\n\r\nprint('MAE:', metrics.mean_absolute_error(y_test, predictions))\r\nprint('MSE:', metrics.mean_squared_error(y_test, predictions))\r\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))\r\n\r\n# =========================================== Model Save ===================================================>\r\n\r\nfile = open('car_model_save.pkl', 'wb')\r\n# dump information to that file\r\npickle.dump(rf_random, file)","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"84184313","text":"import os\nimport base64\n\nfrom flask import Flask, render_template, request, redirect, url_for, session\n\nfrom model import Donation \n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return redirect(url_for('all'))\n\n@app.route('/donations/')\ndef all():\n donations = Donation.select()\n return render_template('donations.jinja2', donations=donations)\n\n\n@app.route('/donate/', methods=['GET', 'POST'])\ndef create():\n\n if request.method == 'POST':\n task = Task(name=request.form['name'])\n task = Task(name=request.form['number'])\n task.save()\n\n return redirect(url_for('donations'))\n else:\n return render_template('donate.jinja2')\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 6738))\n app.run(host='0.0.0.0', port=port)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"337467304","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nimport requests\nimport json\nimport os\nimport hmac\nimport hashlib\nimport threading\ntry:\n import config\nexcept ImportError:\n import config_sample as config\n print(\"[Critical] Configuration file not found.\")\n exit()\n\n\ndef handle_status_update(post_body):\n payload = json.loads(post_body) # parse web hook payload\n print(\"[Info] Status update received\")\n if (payload['context'].find(config.statusContext) != -1) \\\n and (payload['state'] == 'success') \\\n and (payload['name'] == config.repositoryName):\n print(\"[Info] Build success, now deploying...\")\n sha = payload['sha']\n git_command = \"cd {local_path}\\n\" \\\n \"git pull origin master\\n\" \\\n \"git checkout {target_sha}\\n\\n\".format(local_path=config.localPath,\n target_sha=sha)\n commands = git_command + config.deployCommand + \"\\n\\n git checkout master\\n\\n\"\n os.system(commands)\n\n\nclass MyServer(BaseHTTPRequestHandler):\n \"\"\"\n Server class listening to GitHub web hook.\n \"\"\"\n def do_POST(self):\n self.send_response(200)\n self.end_headers()\n content_len = int(self.headers['content-length'])\n post_body = self.rfile.read(content_len).decode()\n\n # Validate signature\n sha_name, signature = self.headers['X-Hub-Signature'].split('=')\n if sha_name != 'sha1':\n return\n mac = hmac.new(config.githubSecretKey.encode(), msg=post_body.encode(), digestmod=hashlib.sha1)\n if not hmac.compare_digest(mac.hexdigest(), signature):\n print(\"[Warning] Ignoring a web hook call due to incorrect signature.\")\n return\n\n # Handle post body\n if self.headers['X-Github-Event'] == 'status':\n th = threading.Thread(target=handle_status_update, args=(post_body,))\n th.start()\n\n\nmyServer = HTTPServer((config.listenAddress, config.listenPort), MyServer)\nprint(\"[Info] Start...\")\n\ntry:\n myServer.serve_forever()\nexcept KeyboardInterrupt:\n pass\n\nmyServer.server_close()\nprint(\"[Info] Stop...\")","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"627915955","text":"import pathlib\nimport zlib\n\nimport construct\nfrom construct import (Checksum, Const, CString, Flag, GreedyBytes,\n GreedyRange, Hex, Int8ul, Int16ul, Int32ul, Padded,\n Padding, Prefixed, RawCopy, Rebuild, Struct, len_, this)\n\nDFU_SIGNATURE = b'DfuSe'\nDFU_size = Rebuild(Int32ul, 0)\n\n\ndef DFU_file_length(ctx):\n '''Compute the entire file size + 4 bytes for CRC\n The total DFU file length is ostensibly the actual\n length in bytes of the resulting file.\n However DFU File Manager does not seem to agree,\n since it's output size is 16 bytes short.\n Since this is suspiciously the same as the suffix\n length in bytes, we omit that number to match\n DFU File Manager's output.\n '''\n size = 11 # DFU Header Length\n # size += 16 # DFU Suffix Length\n for target in ctx.targets:\n # Each target has a 274 byte header consisting\n # of the following fields:\n size += Const(DFU_SIGNATURE).sizeof() # szSignature ('Target' in bytes)\n size += Int8ul.sizeof() # bAlternateSetting\n size += Int8ul.sizeof() # bTargetNamed\n size += Padding(3).sizeof() # Padding\n size += Padded(255, CString('utf8')).sizeof() # szTargetName\n size += Int32ul.sizeof() # dwTargetSize\n size += Int32ul.sizeof() # dwNbElements\n size += DFU_target_size(target)\n\n return size\n\n\ndef DFU_target_size(ctx):\n '''Returns the size of the target binary data, plus the\n dwElementAddress header, and dwElementSize byte count.\n '''\n size = 0\n\n try:\n images = ctx.images\n except AttributeError:\n images = ctx['images']\n\n size += sum([DFU_image_size(image) for image in images])\n return size\n\n\ndef DFU_image_size(image):\n return len(image['data']) + Int32ul.sizeof() + Int32ul.sizeof()\n\n\nDFU_image = Struct(\n 'dwElementAddress' / Hex(Int32ul), # Data offset address for image\n 'data' / Prefixed(Int32ul, GreedyBytes)\n)\n\nDFU_target = Struct(\n 'szSignature' / Const(b'Target'), # DFU target identifier\n 'bAlternateSetting' / Int8ul, # Gives device alternate setting for which this image can be used\n 'bTargetNamed' / Flag, # Boolean determining if the target is named\n Padding(3), # Mystery bytes!\n 'szTargetName' / Padded(255, CString('utf8')), # Target name\n # DFU File Manager does not initialise this\n # memory, so our file will not exactly match\n # its output.\n 'dwTargetSize' / Rebuild(Int32ul, DFU_target_size), # Total size of target images\n 'dwNbElements' / Rebuild(Int32ul, len_(this.images)), # Count the number of target images\n 'images' / GreedyRange(DFU_image)\n)\n\nDFU_body = Struct(\n 'szSignature' / Const(DFU_SIGNATURE), # DFU format identifier (changes on major revisions)\n 'bVersion' / Const(1, Int8ul), # DFU format revision (changes on minor revisions)\n 'DFUImageSize' / Rebuild(Int32ul, DFU_file_length), # Total DFU file length in bytes\n 'bTargets' / Rebuild(Int8ul, len_(this.targets)), # Number of targets in the file\n\n 'targets' / GreedyRange(DFU_target),\n\n 'bcdDevice' / Int16ul, # Firmware version, or 0xffff if ignored\n 'idProduct' / Hex(Int16ul), # USB product ID or 0xffff to ignore\n 'idVendor' / Hex(Int16ul), # USB vendor ID or 0xffff to ignore\n 'bcdDFU' / Const(0x011A, Int16ul), # DFU specification number\n 'ucDfuSignature' / Const(b'UFD'), # 0x44, 0x46 and 0x55 ie 'DFU' but reversed\n 'bLength' / Const(16, Int8ul) # Length of the DFU suffix in bytes\n)\n\nDFU = Struct(\n 'fields' / RawCopy(DFU_body),\n 'dwCRC' / Checksum(Int32ul, # CRC calculated over the whole file, except for itself\n lambda data: 0xffffffff ^ zlib.crc32(data),\n this.fields.data)\n)\n\n\ndef display_dfu_info(parsed):\n print(f'''\nDevice: {parsed.fields.value.bcdDevice}\nTarget: {parsed.fields.value.idProduct:04x}:{parsed.fields.value.idVendor:04x}\nSize: {parsed.fields.value.DFUImageSize:,} bytes\nTargets: {parsed.fields.value.bTargets}''')\n for target in parsed.fields.value.targets:\n print(f'''\n Name: {target.szTargetName}\n Alternate Setting: {target.bAlternateSetting}\n Size: {target.dwTargetSize:,} bytes\n Images: {target.dwNbElements}''')\n for image in target.images:\n print(f'''\n Offset: {image.dwElementAddress}\n Size: {len(image.data):,} bytes\n''')\n\n\ndef build(input_file, output_file, address, force=False, id_product=0x0000, id_vendor=0x0483):\n if not output_file.parent.is_dir():\n raise RuntimeError(f'Output directory \"{output_file.parent}\" does not exist!')\n elif output_file.is_file() and not force:\n raise RuntimeError(f'Existing output file \"{output_file}\", use --force to overwrite!')\n\n if not input_file.suffix == \".bin\":\n raise RuntimeError(f'Input file \"{input_file}\", is not a .bin file?')\n\n output = DFU.build({'fields': {'value': {\n 'targets': [{\n 'bAlternateSetting': 0,\n 'bTargetNamed': True,\n 'szTargetName': 'ST...',\n 'images': [{\n 'dwElementAddress': address,\n 'data': open(input_file, 'rb').read()\n }]\n }],\n 'bcdDevice': 0,\n 'idProduct': id_product,\n 'idVendor': id_vendor\n }}})\n\n open(output_file, 'wb').write(output)\n\n\ndef read(input_file):\n try:\n return DFU.parse(open(input_file, 'rb').read())\n except construct.core.ConstructError as error:\n RuntimeError(f'Invalid dfu file {input_file} ({error})')\n\n\ndef dump(input_file, force=False):\n parsed = read(input_file)\n\n for target in parsed.fields.value.targets:\n target_id = target.bAlternateSetting\n for image in target.images:\n address = image.dwElementAddress\n data = image.data\n dest = str(input_file).replace('.dfu', '')\n filename = f\"{dest}-{target_id}-{address}.bin\"\n\n if pathlib.Path(filename).is_file() and not force:\n raise RuntimeError(f'Existing output file \"{filename}\", use --force to overwrite!')\n\n print(f\"Dumping image at {address} to {filename} ({len(data)} bytes)\")\n\n open(filename, 'wb').write(data)\n","sub_path":"src/ttblit/core/dfu.py","file_name":"dfu.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"391917911","text":"#第5章: 係り受け解析\n#夏目漱石の小説『吾輩は猫である』の文章(neko.txt)をCaboChaを使って係り受け解析し,\n# その結果をneko.txt.cabochaというファイルに保存せよ.\n\nimport CaboCha\n\nwith open('./data/neko.txt',mode='r',encoding='utf_8') as fr:\n r = fr.read()\n\nneko = r.split('\\n')\ncabocha = CaboCha.Parser()\n\nl = []\nfor s in neko:\n tree = cabocha.parse(s)\n l.append(tree.toString(CaboCha.FORMAT_LATTICE))\n\nwith open('./data/neko.txt.cabocha',mode='w',encoding='utf_8') as fw:\n fw.write(''.join(l))","sub_path":"section5/exercise40-0.py","file_name":"exercise40-0.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"39722994","text":"# -*- coding:utf-8 -*-\nimport sys\nsys.path.append(\"../\")\n\nfrom sqlalchemy import create_engine, and_, or_\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relation, scoped_session\n\nfrom sqlalchemy import Column, String, Text, Integer, DATETIME, SMALLINT\nfrom sqlalchemy.dialects.mysql import FLOAT\n\n\n\ndef init(LOGIN_INFO):\n ip = LOGIN_INFO.IP\n port = LOGIN_INFO.PORT\n username = LOGIN_INFO.USERNAME\n password = LOGIN_INFO.PASSWORD\n db_name = LOGIN_INFO.DB_NAME\n encoding = LOGIN_INFO.ENCODING\n login_info = \"mysql+pymysql://{username}:{password}@{ip}:{port}/{db_name}?charset={encoding}\".format(\n username=username,password=password,ip=ip, port=port,db_name=db_name,encoding=encoding)\n # login_info = \"mysql+pymysql://root:0Kcloud!@#852@147@192.168.0.160:33066/okcloud\"\n global engine\n global Base\n engine = create_engine(login_info, convert_unicode=True, pool_recycle=3600, pool_size=5, max_overflow=5,\n pool_timeout=3600) # , echo=True)\n Base = declarative_base()\n\n\nclass event_DB():\n def __init__(self):\n \"\"\"\n 生成子表的 实例(即,建立与数据库具体表的链接)\n :param user_id:\n \"\"\"\n self.mySession = sessionmaker(bind=engine)\n self.MyTable = self.connDB()\n\n def connDB(self):\n \"\"\"\n 建立一个表的子类\n :return:\n \"\"\"\n\n class MyTable(Base):\n # 声明表名,与表的结构\n __tablename__ = 'event'\n __table_args__ = {\"useexisting\": True}\n \n # 表的结构\n sid = Column(Integer, primary_key=True)\n timestamp = Column(DATETIME)\n return MyTable\n\n def query(self, start_time):\n session = self.mySession()\n results = session.query(self.MyTable)\n row = results.filter( self.MyTable.timestamp>start_time).all()\n session.close()\n return row\n\n def getPayload(self, start_time):\n results = self.query(start_time)\n payload = []\n for result in results:\n payload.append({\n 'sid': result.sid,\n 'cid': result.cid,\n 'signature': result.signature,\n 'timestamp': str(result.timestamp),\n })\n return payload\n\n def update(self, d_port, start_time, end_time, event_id):\n session = self.mySession()\n # print d_port, start_time, end_time, event_id\n if d_port:\n session.query(self.MyTable).filter(and_(self.MyTable.event_id == 1000002, self.MyTable.d_port == d_port, self.MyTable.alert_time >= start_time, self.MyTable.alert_time <= end_time)).update({'event_id':event_id})\n else:\n session.query(self.MyTable).filter(and_(self.MyTable.event_id == 1000002, self.MyTable.alert_time >= start_time, self.MyTable.alert_time <= end_time)).update({'event_id':event_id})\n session.commit()\n session.close()\n # print \"更新成功\"\n return True\n\n\n def lastTime(self):\n session = self.mySession()\n rst = session.query(self.MyTable.alert_time).order_by(self.MyTable.alert_time.desc()).first()\n if rst:\n return str(rst[0])\n else:\n return rst","sub_path":"template/sqlalchemy/dbCrate/oneServer.py","file_name":"oneServer.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"340075472","text":"import sys\n\nfrom specview.ui.controller import Controller\n\nclass SpecView(Controller):\n '''Run the GUI interactively.'''\n\n app = None\n\n def __init__(self, argv=None):\n from specview.ui.qt.pyqt_nonblock import pyqtapplication\n\n if self.__class__.app is None:\n self.__class__.app = pyqtapplication(argv)\n\n super(SpecView, self).__init__()\n self.viewer.show()\n\n\nif __name__ == '__main__':\n sv = SpecView(sys.argv)\n\n sys.exit(sv.app.exec_())\n","sub_path":"specview/ui/sv.py","file_name":"sv.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"122542627","text":"n = int(input())\nstages = list(map(int, input().split()))\n\nanswer = [[i+1] for i in range(n)]\nlength = len(stages) #전체 길이 length\n\nfor i in range(n):\n answer[i].append(stages.count(i+1)) #answer에 해당 수가 몇개씩인지 저장\n\nfor i in range(n):\n if length == 0:\n answer[i].append(0) #0으로 나누는 예외 방지. length0인 것은 해당 단계 간 사람 없는것. 0 삽입.\n else:\n answer[i].append(answer[i][1] / length) #실패율 저장.\n length -= answer[i][1] #전체 수에서 아래단계부터 올라가며 count 빼줌.\n\nanswer.sort(key = lambda x : x[2], reverse = True) #실패율 기준 내림차순 정렬\nanswer = [i[0] for i in answer]\n\nprint(answer)\n","sub_path":"정렬/[Q25]실패율.py","file_name":"[Q25]실패율.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"244450387","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 3 01:48:44 2020\r\n\r\n@author: Diego\r\n\"\"\"\r\n\r\n#this reads the file\r\ndf = pd.read_csv(\"movie_s_a_dataset.csv\", encoding = 'utf-8')\r\n\r\n#this trains the data\r\nX_train = df['review'].values\r\ny_train = df['sentiment'].values\r\n\r\n#this puts the data into the vector\r\nX_train = vect.transform(X_train)\r\n\r\n#this fits the model\r\nclf.fit(X_train, y_train)\r\n\r\npickle.dump(stop, open('stopwords.pkl', 'wb'), protocol = 4)\r\npickle.dump(clf, open('classifier.pkl', 'wb'), protocol = 4)","sub_path":"TommasoDePonti/sentimentAnalysis/pkl_builder_createpkl.py","file_name":"pkl_builder_createpkl.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"484492839","text":"from django.urls import path\nfrom . import views\n \nurlpatterns = [\n path('', views.post),\n path('logout', views.logout),\n path('post_post', views.post_post),\n path('users/', views.user_infor),\n path('add_favorite', views.add_favorite),\n path('remove_favorite', views.remove_favorite),\n path('remove_post', views.remove_post),\n path('',views.edit),\n path('/edit', views.editsubmit)\n]","sub_path":"python_stack/django/django_intro/hoang_project/apps/chat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"101076025","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom checker import Checker\nfrom conf import alluminium_settings\n\n__author__ = \"Michał Kurkowski\"\n__license__ = \"MIT\"\n__email__ = \"kura@mat.umk.pl\"\n__status__ = \"Development\"\n\n\nclass JavaChecker(Checker):\n \"\"\"Class for checking java code.\"\"\"\n\n def test_java(self):\n if self.is_legal():\n if self.perform_compilation():\n time_or_error, status = self.perform_interpretation('TestRunner')\n json_file = alluminium_settings['json_file']\n with open(json_file, 'r') as f:\n temp = f.read()\n with open(json_file, 'w') as f:\n f.write(temp % {'time': time_or_error})\nif __name__ == '__main__':\n testing = JavaChecker()\n testing.test_java()\n","sub_path":"scripts/java_script.py","file_name":"java_script.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"416005065","text":"# D. Mery, UC, September, 2019\n# http://domingomery.ing.puc.cl\n\n# Face images are in directory 'faces', there are two faces of M. Bachelet (mb_01 and mb_02), \n# two faces of S. Piñera (sp_01 and sp_02) and one face of somebody else (xx_01).\n# The features are stored in npy file, one row per image using the following order:\n#\n# mb_01.jpg...\n# mb_02.jpg...\n# sp_01.jpg...\n# sp_02.jpg...\n# xx_01.jpg...\n# \n# In order to use cosine similarity, each row must normalized to uni-norm\n# the normalized features (norm = 1 for each row) must be stored in matrix X\n# Thus, X * X' = \n# mb_01 mb_02 sp_01 sp_02 xx_01\n# mb_01 1.0000 0.9755 0.8425 0.8601 0.8850\n# mb_02 0.9755 1.0000 0.8311 0.8661 0.8750\n# sp_01 0.8425 0.8311 1.0000 0.9534 0.8474\n# sp_02 0.8601 0.8661 0.9534 1.0000 0.8617\n# xx_01 0.8850 0.8750 0.8474 0.8617 1.0000\n#\n# with a threshold = 0.95 the recognition is possible\n\nimport numpy as np\nimport face_recognition # install with pip install face_recognition\n\nfrom utils import dirfiles, num2fixstr, imread\n\ndef dlibfeatures(image):\n fl = None #dlib without face detection\n x0 = face_recognition.face_encodings(image,fl)\n if len(x0)==0:\n fl = [[0,len(image)-1,len(image[0])-1,0]]\n x = face_recognition.face_encodings(image,fl)[0]\n else:\n x = x0[0]\n return x\n\n\ndirpath = '../faces/'\nimg_names = dirfiles(dirpath,'*.jpg')\n\nn = len(img_names)\n\ni = 0\nfor i in range(n):\n img = img_names[i]\n print('dlib: '+ num2fixstr(i,4)+'/'+num2fixstr(n,4)+ ': reading '+img+'...')\n\n img = imread(dirpath+img)\n features = dlibfeatures(img)\n if i==0:\n m = features.shape[0]\n data = np.zeros((n,m))\n \n data[i] = features\n\nprint('original features (not normalized) are saved in data_dlib.npy... ')\nnp.save('data_dlib',data)\n\n","sub_path":"clases/Cap03_DeepLearning/python/facerecognition/dlib/main_dlib.py","file_name":"main_dlib.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"280553496","text":"#!/usr/bin/env python\n\nimport rospy\nimport actionlib\nfrom arm_server.msg import SimplePickAction, SimplePlaceAction, SimpleTargetAction, SimplePickGoal, SimplePlaceGoal, \\\n SimpleTargetGoal\n\n\"\"\" ################################################ PICK CALLBACKS ################################################\"\"\"\n\n\n# Called once when the goal completes\ndef pick_done_callback(state, result):\n rospy.loginfo(\"[pick_client]: finished in state [%d]\", state)\n rospy.loginfo(\"[pick_client]: answer - x: %f, y: %f, z: %f\", result.x, result.y, result.z)\n # shutdown ros\n\n\n# Called once when the goal becomes active\ndef pick_active_callback():\n rospy.loginfo(\"[pick_client]: goal just went active\")\n\n\n# Called every time feedback is received for the goal\ndef pick_feedback_callback(feedback):\n rospy.loginfo(\"[pick_client]: feedback - x: %f, y: %f, z: %f, distance: %f\",\n feedback.x, feedback.y, feedback.z, feedback.distance)\n\n\n\"\"\" ################################################ PLACE CALLBACKS ################################################\"\"\"\n\n\n# Called once when the goal completes\ndef place_done_callback(state, result):\n rospy.loginfo(\"[place_client]: finished in state [%d]\", state)\n rospy.loginfo(\"[place_client]: answer - x: %f, y: %f, z: %f\", result.x, result.y, result.z)\n # shutdown ros\n\n\n# Called once when the goal becomes active\ndef place_active_callback():\n rospy.loginfo(\"[place_client]: goal just went active\")\n\n\n# Called every time feedback is received for the goal\ndef place_feedback_callback(feedback):\n rospy.loginfo(\"[pick_client]: feedback - x: %f, y: %f, z: %f, distance: %f\",\n feedback.x, feedback.y, feedback.z, feedback.distance)\n\n\n\"\"\" ############################################### TARGET CALLBACKS ###############################################\"\"\"\n\n\n# Called once when the goal completes\ndef target_done_callback(state, result):\n rospy.loginfo(\"[target_client]: finished in state [%d]\", state)\n rospy.loginfo(\"[target_client]: answer - x: %f, y: %f, z: %f\", result.x, result.y, result.z)\n # shutdown ros\n\n\n# Called once when the goal becomes active\ndef target_active_callback():\n rospy.loginfo(\"[target_client]: goal just went active\")\n\n\n# Called every time feedback is received for the goal\ndef target_feedback_callback(feedback):\n rospy.loginfo(\"[target_client]: feedback - x: %f, y: %f, z: %f, distance: %f\",\n feedback.x, feedback.y, feedback.z, feedback.distance)\n\n\n\"\"\" ################################################################################################################\"\"\"\n\n\ndef pick_demo():\n pick_client = actionlib.SimpleActionClient('simple_pick', SimplePickAction)\n\n rospy.loginfo(\"[pick_client]: waiting for pick_server...\")\n pick_client.wait_for_server()\n rospy.loginfo(\"[pick_client]: ready\")\n\n # build goal\n goal = SimplePickGoal()\n goal.frame_id = \"/base_footprint\"\n goal.obj_name = \"target\"\n\n # set target coordinates\n goal.x = 0.7\n goal.y = 0.0\n goal.z = 0.6\n # set target cylinder primitives\n goal.h = 0.145\n goal.w = 0.03\n\n # send goal to action server\n pick_client.send_goal(goal, pick_done_callback, pick_active_callback, pick_feedback_callback)\n\n\ndef place_demo():\n place_client = actionlib.SimpleActionClient('simple_place', SimplePlaceAction)\n\n rospy.loginfo(\"[place_client]: waiting for pick_server...\")\n place_client.wait_for_server()\n rospy.loginfo(\"[place_client]: ready\")\n\n # build goal\n goal = SimplePlaceGoal()\n goal.frame_id = \"/base_footprint\"\n goal.obj_name = \"target\"\n\n # set target coordinates\n goal.x = 0.7\n goal.y = 0.0\n goal.z = 0.6\n\n # send goal to action server\n place_client.send_goal(goal, place_done_callback, place_active_callback, place_feedback_callback)\n\n\ndef target_demo(x, y, z, frame_id):\n target_client = actionlib.SimpleActionClient('simple_target', SimpleTargetAction)\n\n rospy.loginfo(\"[target_client]: waiting for pick_server...\")\n target_client.wait_for_server()\n rospy.loginfo(\"[target_client]: ready\")\n\n # build goal\n goal = SimpleTargetGoal()\n goal.frame_id = frame_id # \"/base_footprint\" \"/head_pan_link\"\n\n # set target coordinates\n goal.x = x # 0.5\n goal.y = y # 0.271\n goal.z = z # 0.253\n\n # send goal to action server\n target_client.send_goal(goal, target_done_callback, target_active_callback, target_feedback_callback)\n\nif __name__ == '__main__':\n rospy.init_node('client_demo_node')\n\n chosen = 0\n\n while (not rospy.is_shutdown()) and chosen != 9:\n print(\"Please choose one of the following actions:\")\n print(\"1 - pick demo\")\n print(\"2 - place demo\")\n print(\"3 - target demo\")\n print(\"9 - quit\")\n\n chosen = input()\n if chosen == 1:\n rospy.loginfo(\"[client_demo]: executing pick demo\")\n pick_demo()\n elif chosen == 2:\n rospy.loginfo(\"[client_demo]: executing place demo\")\n place_demo()\n elif chosen == 3:\n rospy.loginfo(\"[client_demo]: executing target demo\")\n x = input(\"x = \")\n y = input(\"y = \")\n z = input(\"z = \")\n frame_id = raw_input(\"frame_id = \")\n target_demo(x, y, z, frame_id)\n elif chosen == 9:\n rospy.loginfo(\"[client_demo]: exiting...\")\n break\n else:\n rospy.logwarn(\"[client_demo]: Wrong input. Please choose valid option from menu\")\n","sub_path":"scripts/arm_client.py","file_name":"arm_client.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"439461528","text":"#!/usr/bin/python\nimport sys\ndef hcf (i,j):\n hcf_value = min(i,j)\n if i==j or i%j==0 or j%i==0:\n return hcf_value\n for k in range (hcf_value//2,0,-1):\n if i%k==0 and j%k==0:\n hcf_value=k\n break\n return hcf_value \n\nprint (\"{0},{1} highe common factore is {2}\".format(sys.argv[1],sys.argv[2],hcf(int(sys.argv[1]),int(sys.argv[2]))))\n","sub_path":"highest_common_factor.py","file_name":"highest_common_factor.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"17854479","text":"import argparse\nimport sys\nimport getopt\n\n\nprint(\"script_name:\",sys.argv[0])\n\nfor i in range(1,len(sys.argv)):\n\tprint(\"param\",i,sys.argv[i])\n\nopts,args = getopt.getopt(sys.argv[1:],\"hi:o:\")\ninput_file=\"\"\noutput_file=\"\"\nfor op,value in opts:\n\tif op == \"-i:\":\n\t\tinput_file = value\n\telif op == \"-o\":\n\t\toutput_file = value\n\telif op == \"-h\":\n\t\tusage()\n\t\tsys.exit()\t\n\n\n\nprint(\"input_file = \",input_file)\nprint(\"output_file = \",output_file)\n'''\n\n'''\n\n\n","sub_path":"argument.py","file_name":"argument.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"3742854","text":"# Question 1\n\nimport csv\nimport requests\n\nprint(\"\\nList of downloaded images\\n\")\nwith open('100books.csv') as csv_file :\n books = csv.DictReader(csv_file)\n for book in books:\n image_url = book['image_url']\n image_name = image_url.split(\"/\")[-1]\n with open(image_name,\"wb\") as file :\n response = requests.get(image_url)\n content = response.content\n file.write(content)\n print(\"\\nDownloaded {}\".format(image_name))\n\n","sub_path":"session 7/downloads_book_cover_image.py","file_name":"downloads_book_cover_image.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"505767526","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import MyFileSerializer\nfrom rest_framework.parsers import MultiPartParser, FormParser\nimport os, sys\nparser_classes = (MultiPartParser, FormParser)\nimport singlePhotoCrops\nimport getCropInfo\nimport time\nfrom django.http import FileResponse\nimport operator\nimport json\nimport collections\nimport cv2\n\nos.environ[\"MXNET_CUDNN_AUTOTUNE_DEFAULT\"] = \"0\"\n\nconnString = 'dbname = tengri_db user = postgres password = postgres host = localhost port = 5432'\nextension = '.jpg'\ncrop_size = 112\ngpu_id = 0\nflip = 0\ndet_threshold = 0.9\ndetection_model = 'models/detection/R50'\nscales = [1080,1920]\n\nimg_size = '112,112'\nrecognition_model = 'models/recognition/model,0'\nga_model = ''\ndet = 0\nthreshold = 1.24\n\nrecognizer = getCropInfo.getPhotoInfo('crops/', 'crops/', 'media/', connString, extension, img_size, recognition_model, ga_model, gpu_id, det, flip, threshold) \ncropper = singlePhotoCrops.getCrops('media/', 'crops/', connString, extension, crop_size, gpu_id, flip, det_threshold, detection_model, scales, recognizer)\n\n@api_view(['POST'])\ndef get_photo_align_large_files(request):\n if 'file' in request.data: \n\n file_serializer = MyFileSerializer(data=request.data)\n if file_serializer.is_valid():\n [os.remove('media/' + f) for f in os.listdir('media/')]\n file_serializer.save()\n\n img_name = request.FILES[u'file'].name\n # Read original and save crops somewhere \n try:\n [os.remove('crops/' + f) for f in os.listdir('crops/')]\n res = cropper.cropOriginal(45, 22, img_name)\n if res is not None:\n data = {'res': [i for i in range(len(res))]} \n else:\n data = {'res' : None}\n except:\n data = {'test': 'failure'}\n # Read original and save crops somewhere\n return Response(data)\n\n\n@api_view(['GET'])\ndef get_photo_align(request): \n if request.method == 'GET':\n # Here we need change file paths\n choose = request.query_params.get('data') \n file_path = 'crops/face_' + str(choose) + '.jpg'\n try:\n f = open(file_path, \"rb\")\n response = FileResponse(f)\n except:\n response_obj = {'error': 'File was not founded'}\n return Response(response_obj, status=204)\n return response\n\n response_obj = {'failed': 'no value of \"data\" with right order'}\n return Response(response_obj)\n\n\n@api_view(['POST'])\ndef get_photo_metadata(request):\n if 'data' in request.POST:\n choose = request.POST.get('data') \n # Read one crop and return json with top5 information\n file_path = 'face_' + str(choose) + '.jpg'\n data = recognizer.compareFeatures(file_path, '10.150.34.15', 3306, 'root', '')\n \n result_dict = dict()\n for r in data['person']:\n result_dict[r['fio']] = [r['feature'], r['udv_no'], r['iin'], r['fio']]\n\n od = collections.OrderedDict(sorted(result_dict.items(), key=operator.itemgetter(1), reverse=True))\n\n return Response(od, status=200) \n # Read one crop and return json with top5 information\n data = {'failed': 'no value of \"data\" with right order'}\n return Response(data)\n\n\n@api_view(['GET'])\ndef get_photo_images(request):\n if request.method == 'GET':\n # Here we need to change file paths\n choose = request.query_params.get('data') \n file_path = 'our_base/' + str(choose) + '.jpeg'\n try:\n f = open(file_path, \"rb\")\n response = FileResponse(f)\n except:\n response_obj = {'error': 'File was not founded'}\n return Response(response_obj, status=204)\n return response \n\n response_obj = {'failed': 'no original image found for this person in database'}\n return Response(response_obj)\n\n\n@api_view(['POST'])\ndef get_red_people(request):\n if 'file' in request.data:\n file_serializer = MyFileSerializer(data=request.data)\n\n if file_serializer.is_valid():\n [os.remove('media/' + f) for f in os.listdir('media/')]\n file_serializer.save()\n\n img_name = request.FILES[u'file'].name\n # Read original and save crops somewhere \n try:\n [os.remove('crops/' + f) for f in os.listdir('crops/')]\n faces, landmarks = cropper.cropReds(img_name) # singlePhotoCrops \n data = {'res': [i for i in range(len(faces))]}\n except:\n data = {'res': 'failure, could not get crops'}\n # Read original and save crops somewhere\n if faces is not None: \n data = recognizer.compareReds(img_name, 35, 14, faces, landmarks, '10.150.34.15', 3306, 'root', '')\n if data is not None:\n return Response(data, status=200)\n else:\n data = {'res': None} \n else: \n data = {'res': None}\n return Response(data, status=200)\n # Read one crop and return json with top5 information\n data = {'failed': 'No person found with right order'}\n return Response(data)\n\n\n@api_view(['GET'])\ndef get_photo_red(request):\n if request.method == 'GET':\n # Here we need to change file paths\n choose = request.query_params.get('data') \n file_path = 'our_base/' + str(choose) + '.jpeg'\n try:\n f = open(file_path, \"rb\")\n response = FileResponse(f)\n except:\n response_obj = {'error': 'File was not founded'}\n return Response(response_obj, status=204)\n return response \n\n response_obj = {'failed': 'no original image found for this person in database'}\n return Response(response_obj)\n\n\n@api_view(['GET'])\ndef get_photo_redbase(request): \n if request.method == 'GET':\n # Here we need change file paths\n choose = request.query_params.get('data') \n file_path = 'crops/face_' + str(choose) + '.jpg'\n try:\n f = open(file_path, \"rb\")\n response = FileResponse(f)\n except:\n response_obj = {'error': 'File was not founded'}\n return Response(response_obj, status=204)\n return response\n\n response_obj = {'failed': 'no value of \"data\" with right order'}\n return Response(response_obj)\n","sub_path":"application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"594536251","text":"import os\nimport logging\nfrom shutil import copyfile\nfrom typing import List, Any\nfrom hyperleaup.creation_mode import CreationMode\nfrom pyspark.sql import DataFrame\nfrom pyspark.sql.types import *\nfrom tableauhyperapi import SqlType, TableDefinition, NULLABLE, NOT_NULLABLE, TableName, HyperProcess, Telemetry, \\\n Inserter, Connection, CreateMode\nfrom pathlib import Path\n\n\ndef clean_dataframe(df: DataFrame) -> DataFrame:\n \"\"\"Replaces null or NaN values with '' and 0s\"\"\"\n schema = df.schema\n integer_cols = []\n long_cols = []\n double_cols = []\n float_cols = []\n string_cols = []\n for field in schema:\n if field.dataType == IntegerType():\n integer_cols.append(field.name)\n elif field.dataType == LongType():\n long_cols.append(field.name)\n elif field.dataType == DoubleType():\n double_cols.append(field.name)\n elif field.dataType == FloatType():\n float_cols.append(field.name)\n elif field.dataType == StringType():\n string_cols.append(field.name)\n\n # Replace null and NaN values with 0\n if len(integer_cols) > 0:\n df = df.na.fill(0, integer_cols)\n elif len(long_cols) > 0:\n df = df.na.fill(0, long_cols)\n elif len(double_cols) > 0:\n df = df.na.fill(0.0, double_cols)\n elif len(float_cols) > 0:\n df = df.na.fill(0.0, float_cols)\n elif len(string_cols) > 0:\n df = df.na.fill('', string_cols)\n\n return df\n\n\ndef get_rows(df: DataFrame) -> List[Any]:\n \"\"\"Returns an array of rows given a Spark DataFrame\"\"\"\n return df.rdd.map(lambda row: [x for x in row]).collect()\n\n\ndef convert_struct_field(column: StructField) -> TableDefinition.Column:\n \"\"\"Converts a Spark StructField to a Tableau Hyper SqlType\"\"\"\n if column.dataType == IntegerType():\n sql_type = SqlType.int()\n elif column.dataType == LongType():\n sql_type = SqlType.big_int()\n elif column.dataType == ShortType():\n sql_type = SqlType.small_int()\n elif column.dataType == DoubleType():\n sql_type = SqlType.double()\n elif column.dataType == FloatType():\n sql_type = SqlType.double()\n elif column.dataType == BooleanType():\n sql_type = SqlType.bool()\n elif column.dataType == DateType():\n sql_type = SqlType.date()\n elif column.dataType == TimestampType():\n sql_type = SqlType.timestamp()\n elif column.dataType == StringType():\n sql_type = SqlType.text()\n else:\n # Trap the DecimalType case\n if str(column.dataType).startswith(\"DecimalType\"):\n # Max precision is only up to 18 decimal places in Tableau Hyper API\n precision = column.dataType.precision if column.dataType.precision <= 18 else 18\n scale = column.dataType.scale\n sql_type = SqlType.numeric(precision, scale)\n else:\n raise ValueError(f'Invalid StructField datatype for column `{column.name}` : {column.dataType}')\n nullable = NULLABLE if column.nullable else NOT_NULLABLE\n return TableDefinition.Column(name=column.name, type=sql_type, nullability=nullable)\n\n\ndef get_table_def(df: DataFrame, schema_name: str, table_name: str) -> TableDefinition:\n \"\"\"Returns a Tableau TableDefintion given a Spark DataFrame\"\"\"\n schema = df.schema\n cols = list(map(convert_struct_field, schema))\n return TableDefinition(\n table_name=TableName(\"Extract\", \"Extract\"),\n columns=cols\n )\n\n\ndef insert_data_into_hyper_file(data: List[Any], name: str, table_def: TableDefinition):\n \"\"\"Helper function that inserts data into a .hyper file.\"\"\"\n # first, create a temp directory on the driver node\n tmp_dir = f\"/tmp/hyperleaup/{name}/\"\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n hyper_database_path = f\"/tmp/hyperleaup/{name}/{name}.hyper\"\n with HyperProcess(telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU) as hp:\n with Connection(endpoint=hp.endpoint,\n database=hyper_database_path,\n create_mode=CreateMode.CREATE_AND_REPLACE) as connection:\n connection.catalog.create_schema(schema=table_def.table_name.schema_name)\n connection.catalog.create_table(table_definition=table_def)\n with Inserter(connection, table_def) as inserter:\n inserter.add_rows(rows=data)\n inserter.execute()\n\n return hyper_database_path\n\n\ndef copy_data_into_hyper_file(csv_path: str, name: str, table_def: TableDefinition) -> str:\n \"\"\"Helper function that copies data from a CSV file to a .hyper file.\"\"\"\n hyper_database_path = f\"/tmp/hyperleaup/{name}/{name}.hyper\"\n with HyperProcess(telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU) as hp:\n with Connection(endpoint=hp.endpoint,\n database=Path(hyper_database_path),\n create_mode=CreateMode.CREATE_AND_REPLACE) as connection:\n\n connection.catalog.create_schema(schema=table_def.table_name.schema_name)\n connection.catalog.create_table(table_definition=table_def)\n\n # The most efficient method for adding data to a table is with the COPY command\n copy_command = f\"COPY \\\"Extract\\\".\\\"Extract\\\" from '{csv_path}' with (format csv, NULL 'null', delimiter ',', header)\"\n count = connection.execute_command(copy_command)\n logging.info(f\"Copied {count} rows.\")\n\n return hyper_database_path\n\n\ndef write_csv_to_local_file_system(df: DataFrame, name: str) -> str:\n \"\"\"Writes a Spark DataFrame to a single CSV file on the local filesystem.\"\"\"\n tmp_dir = f\"/tmp/hyperleaup/{name}/\"\n\n # write the DataFrame to local disk as a single CSV file\n cleaned_df = clean_dataframe(df)\n cleaned_df.coalesce(1).write \\\n .option(\"delimiter\", \",\") \\\n .option(\"header\", \"true\") \\\n .mode(\"overwrite\").csv(tmp_dir)\n\n # Spark DataFrameWriter will write metadata alongside the CSV,\n # ignore metedata and return only the CSV filename\n for root_dir, dirs, files in os.walk(tmp_dir):\n for file in files:\n if file.endswith(\".csv\"):\n return f\"{tmp_dir}/{file}\"\n\n\ndef write_csv_to_dbfs(df: DataFrame, name: str) -> str:\n \"\"\"Moves a CSV written to a Databricks Filesystem to a temp directory on the driver node.\"\"\"\n tmp_dir = f\"/tmp/hyperleaup/{name}/\"\n\n # write the DataFrame to DBFS as a single CSV file\n # cleaned_df = clean_dataframe(df)\n df.coalesce(1).write \\\n .option(\"delimiter\", \",\") \\\n .option(\"header\", \"true\") \\\n .option(\"nullValue\", \"null\") \\\n .mode(\"overwrite\").csv(tmp_dir)\n\n # Spark DataFrameWriter will write metadata alongside the CSV,\n # ignore metedata and return only the CSV filename\n dbfs_tmp_dir = \"/dbfs\" + tmp_dir\n csv_file = None\n for root_dir, dirs, files in os.walk(dbfs_tmp_dir):\n for file in files:\n if file.endswith(\".csv\"):\n csv_file = file\n\n if csv_file is None:\n raise FileNotFoundError(f\"CSV file '{tmp_dir}' not found on DBFS.\")\n\n # Copy CSV from DBFS location to temp dir on driver node\n if not os.path.exists(tmp_dir):\n os.makedirs(tmp_dir)\n src_path = dbfs_tmp_dir + csv_file\n dest_path = tmp_dir + csv_file\n copyfile(src_path, dest_path)\n\n return dest_path\n\n\nclass Creator:\n\n def __init__(self, df: DataFrame, name: str,\n is_dbfs_enabled: bool = False,\n creation_mode: str = CreationMode.COPY.value,\n null_values_replacement = None):\n if null_values_replacement is None:\n null_values_replacement = {}\n self.df = df\n self.name = name\n self.is_dbfs_enabled = is_dbfs_enabled\n self.creation_mode = creation_mode\n self.null_values_replacement = null_values_replacement\n\n def create(self) -> str:\n \"\"\"Creates a Tableau Hyper File given a SQL statement\"\"\"\n if self.creation_mode.upper() == CreationMode.COPY.value:\n\n # Write Spark DataFrame to CSV so that a file COPY can be done\n if not self.is_dbfs_enabled:\n logging.info(\"Writing Spark DataFrame to local disk...\")\n csv_path = write_csv_to_local_file_system(self.df, self.name)\n else:\n logging.info(\"Writing Spark DataFrame to DBFS...\")\n csv_path = write_csv_to_dbfs(self.df, self.name)\n\n # Convert the Spark DataFrame schema to a Tableau `TableDefinition`\n logging.info(\"Generating Tableau Table Definition...\")\n table_def = get_table_def(self.df, \"Extract\", \"Extract\")\n\n # COPY data into a Tableau .hyper file\n logging.info(\"Copying data into Hyper File...\")\n database_path = copy_data_into_hyper_file(csv_path, self.name, table_def)\n\n elif self.creation_mode.upper() == CreationMode.INSERT.value:\n\n # Collect the DataFrame rows into the Driver\n logging.info(\"Collecting rows back to Driver...\")\n data = get_rows(self.df)\n\n # Convert the Spark DataFrame schema to a Tableau `TableDefinition`\n logging.info(\"Converting Spark DataFrame schema to Tableau Table Definition...\")\n table_def = get_table_def(self.df, \"Extract\", \"Extract\")\n\n # Insert data into a Tableau .hyper file\n logging.info(\"Inserting data into Hyper File...\")\n database_path = insert_data_into_hyper_file(data, self.name, table_def)\n\n else:\n raise ValueError(f'Invalid \"creation_mode\" specified: {self.creation_mode}')\n\n logging.info(\"Hyper File successfully created!\")\n\n return database_path\n","sub_path":"hyperleaup/creator.py","file_name":"creator.py","file_ext":"py","file_size_in_byte":9733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"617943942","text":"#coding=utf-8\nimport re\nimport urllib.request\nfrom collections import deque\nfrom bs4 import BeautifulSoup\n\ndef test():\n queue = deque()\n visited = set()\n baseUrl = \"http://\"\n url = baseUrl\n queue.append(url)\n cnt = 0\n resultList = []\n while queue:\n url = queue.popleft()\n visited |= {url}\n print(\"已经抓取: \" + str(cnt) + \" 正在抓取: \" + url)\n\n try:\n urlop = urllib.request.urlopen(url, timeout=10)\n if 'html' not in urlop.getheader('Content-Type'):\n continue\n try:\n cnt += 1\n data = urlop.read().decode(\"utf-8\")\n except:\n print(\"读写失败: \" + url)\n continue\n soup = BeautifulSoup(data, \"lxml\")\n hrefTag = soup.find_all(\"a\", attrs={\"href\":re.compile('^/.*/')})\n for x in hrefTag:\n nextUrl = x.attrs[\"href\"]\n print(\"nextUrl = \" + nextUrl)\n tempUrl = baseUrl + nextUrl\n print(\"tempUrl = \" + tempUrl)\n if tempUrl not in visited:\n queue.append(tempUrl)\n tempDict = {}\n tempDict[\"sourceUrl\"] = url\n tempDict[\"click\"] = x.text\n tempDict[\"nextUrl\"] = tempUrl\n resultList.append(tempDict)\n print(\"加入队列 ---> \" + tempUrl)\n print(\"--------------\")\n except:\n print(\"访问失败: \" + url)\n continue\n print(\"finish\")\n\nif __name__ == \"__main__\":\n test()","sub_path":"BeautifulSoup/demo_2.py","file_name":"demo_2.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"49342692","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Pays',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('nom_pays', models.CharField(max_length=20, unique=True)),\n ('commentaire', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Region',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('nom_region', models.CharField(max_length=20, unique=True)),\n ('commentaire', models.TextField()),\n ('nom_pays', models.ForeignKey(null=True, to_field='nom_pays', blank=True, to='macave.Pays')),\n ],\n ),\n migrations.CreateModel(\n name='Vin',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('nom_vin', models.CharField(max_length=50)),\n ('etiquette', models.CharField(max_length=30)),\n ('millesime', models.IntegerField()),\n ('commentaire', models.TextField()),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('published_date', models.DateTimeField(auto_now=True, null=True)),\n ('nom_region', models.ForeignKey(null=True, to_field='nom_region', blank=True, to='macave.Region')),\n ],\n ),\n ]\n","sub_path":"macave/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"646231948","text":"#coding:utf-8\n__author__ = 'SuHan'\n\nfrom enum import Enum\nMonth = Enum('Month',('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug'\n ,'Sep','Oct','Nov','Dec'))\nfor name,member in Month.__members__.items():\n print(name,'->',member.value)\n'''\n可以看出,创建完这个枚举类Month之后,\n里面的每一组实例都有'名字'和'项'两个属性\n名字就是实例名,项就是实力本身,\nmember.value就是该实例所处的位置\n'''\n\n'''\n这样创建枚举类,里面的实例只会按照排列顺序来进行,被赋值\n如果我们要自定义每个实例的值是多少时,就要按照如下方法来写\n'''\nfrom enum import unique\n@unique\nclass Weekday(Enum):#自己创建一个类,继承枚举类\n Sun = 0\n Mon = 1\n Tue = 2\n Wed = 3\n Thu = 4\n Fri = 5\n Sat = 6\n#其中,unique装饰器可以确保枚举实例没有重复\n\na = Weekday.Tue\nprint(a)\nprint(a.name)\nprint(a.value)\n","sub_path":"enumeration.py","file_name":"enumeration.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"434143794","text":"__author__ = 'Fabio'\r\ndef str_entra():\r\n palavra = input('Digite a palavra para testar: ')\r\n return palavra.lower().replace(\" \", \"\")\r\n\r\ndef anagrama(palavra, palavra_2):\r\n count = 0\r\n for i in range(len(palavra_2)):\r\n if palavra.count(palavra_2[i]) == palavra_2.count(palavra_2[i]):\r\n count += 1\r\n if count == len(palavra) and len(palavra_2):\r\n print('São anagramas')\r\n else:\r\n print('Não são anagramas')\r\n\r\nanagrama(str_entra(), str_entra())\r\n","sub_path":"Exercicios 2/Aula 9 - Strings/aula 9 - q6(atualizada).py","file_name":"aula 9 - q6(atualizada).py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"110922472","text":"def percentage_happy(sentence):\r\n sentence=sentence.lower()\r\n amount_words=sentence.count(' ') +1\r\n amount_laugh=sentence.count('laugh')\r\n amount_happiness=sentence.count('happiness')\r\n amount_love=sentence.count('love')\r\n amount_excellent=sentence.count('excellent')\r\n amount_good=sentence.count('good')\r\n amount_happy=amount_laugh+amount_happiness+amount_love+amount_excellent+amount_good\r\n if amount_words == 0:\r\n return 0.000\r\n return amount_happy/amount_words\r\ndef percentage_sad(sentence):\r\n sentence=sentence.lower()\r\n amount_words=sentence.count(' ') +1\r\n amount_bad=sentence.count('bad')\r\n amount_sad=sentence.count('sad')\r\n amount_terrible=sentence.count('terrible')\r\n amount_horrible=sentence.count('horrible')\r\n amount_problem=sentence.count('problem')\r\n amount_sad=amount_bad+amount_sad+amount_terrible+amount_horrible+amount_problem\r\n if amount_words == 0:\r\n return 0.000 \r\n return amount_sad/amount_words\r\n\r\nsentence=input(str(\"Enter a sentence => \"))\r\nprint(sentence)\r\nprint(\"Percentages. happy: {:.3f} sad: {:.3f}\".format(percentage_happy(sentence), percentage_sad(sentence)))\r\npercentage_happy=percentage_happy(sentence)\r\npercentage_sad=percentage_sad(sentence)\r\n\r\nif percentage_happy>percentage_sad:\r\n print(\"This is a happy sentence\")\r\nelif percentage_happy= 3]\n tokens = [word.lower() for word in tokens]\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(word) for word in tokens]\n preprocessing_text = ' '.join(tokens)\n return preprocessing_text\n\n\ndef summarization(content):\n # The important content is assessed by the number of named entities and nouns in a sentence.\n results = []\n blog_content = content\n for sent_no, sentence in enumerate(nltk.sent_tokenize(blog_content)):\n no_of_tokens = len(nltk.word_tokenize(sentence))\n # POS Tagging\n tagged = nltk.pos_tag(nltk.word_tokenize(sentence))\n # Count of the Nouns in the sentence\n no_of_nouns = len([word for word, pos in tagged if pos in ['NN', 'NNP']])\n # Use NER to tag the named entities\n ners = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(sentence)), binary=False)\n no_of_ners = len([chunk for chunk in ners if hasattr(chunk, 'label()')])\n score = (no_of_nouns + no_of_ners) / float(no_of_tokens)\n results.append((sent_no, no_of_tokens, no_of_ners, no_of_nouns, score, sentence))\n return results\n\nif __name__ == '__main__':\n\n # Review analysis\n review_url = 'https://www.indeed.com/cmp/Discover-Financial-Services/reviews'\n review_html = url_request(review_url)\n review_data = review_parser(review_html)\n start_page = 20\n end_page = 1940\n for i in range(start_page, end_page, 20):\n review_url = 'https://www.indeed.com/cmp/Discover-Financial-Services/reviews?start=' + str(i)\n review_html = url_request(review_url)\n data = review_parser(review_html)\n review_data = review_data.append(data, ignore_index=True)\n\n print(review_data.shape)\n review_data.to_csv('Indeed_Review_Data1201.csv', index=False, encoding='utf-8')\n","sub_path":"NLP/Review_Analysis_Part_1.py","file_name":"Review_Analysis_Part_1.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"477926537","text":"import time\nfrom pathlib import Path\n\nfrom fastapi import FastAPI, APIRouter, Request, Depends\nfrom fastapi.templating import Jinja2Templates\nfrom sqlalchemy.orm import Session\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom app import crud\nfrom app.api import deps\nfrom app.api.api_v1.api import api_router\nfrom app.core.config import settings\n\nBASE_PATH = Path(__file__).resolve().parent\nTEMPLATES = Jinja2Templates(directory=str(BASE_PATH / \"templates\"))\n\nroot_router = APIRouter()\napp = FastAPI(title=\"Auth Service API\", openapi_url=f\"{settings.API_V1_STR}/openapi.json\")\n\n\norigins = [\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n@root_router.get(\"/\", status_code=200)\ndef root(\n request: Request\n) -> dict:\n \"\"\"\n Root GET\n \"\"\"\n return {\"message\": \"Welcome to auth service!.\"}\n\n\n@app.middleware(\"http\")\nasync def add_process_time_header(request: Request, call_next):\n start_time = time.time()\n response = await call_next(request)\n process_time = time.time() - start_time\n response.headers[\"X-Process-Time\"] = str(process_time)\n return response\n\n\napp.include_router(api_router, prefix=settings.API_V1_STR)\napp.include_router(root_router)\n\n\nif __name__ == \"__main__\":\n # Use this for debugging purposes only\n import uvicorn\n\n uvicorn.run(app, host=\"0.0.0.0\", port=8000, log_level=\"debug\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"471262365","text":"from flask import Flask\nfrom .config import Config\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_bootstrap import Bootstrap\nfrom flask_login import LoginManager\n\ndb = SQLAlchemy()\nmigrate = Migrate()\nbootstrap = Bootstrap()\nlogin_manager = LoginManager()\n\ndef create_app():\n\n app = Flask(__name__)\n app.config.from_object(Config)\n\n from . import main\n app.register_blueprint(main.bp)\n app.add_url_rule('/', endpoint='index')\n\n db.init_app(app)\n migrate.init_app(app, db)\n bootstrap.init_app(app)\n login_manager.init_app(app)\n\n from my_app.main.models import VanArtRequest, MailSubscription, User, Post\n\n @login_manager.user_loader\n def load_user(id):\n return User.query.get(int(id))\n\n @app.shell_context_processor\n def shell_context():\n return {'db': db, 'Mail': MailSubscription, 'Van': VanArtRequest, 'User': User, 'Post': Post}\n\n return app\n\n \n","sub_path":"my_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"152173525","text":"import json\n\nfrom datetime import datetime\n\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import HttpResponse, UnreadablePostError\nfrom django.views.generic.edit import FormMixin\n\nfrom fromleaf_common.utils import database as db\nfrom fromleaf_common.utils.database import UserData\n\nfrom fromleaf_playing.common.views import PlayingCommonTemplateView, PlayingCommonListView\nfrom fromleaf_playing.ourhockey.forms import CheckAttendForm, AddGameDayForm\nfrom fromleaf_playing.ourhockey.models.Member import Person, Player, Member, Attendance\nfrom fromleaf_playing.ourhockey.models.GameDay import GameDay \n\nclass OurHockeyMainView(PlayingCommonTemplateView):\n \n template_name = 'ourhockey/ourhockey_main.html'\n\n def get_context_data(self, **kwargs):\n context = super(OurHockeyMainView, self).get_context_data(**kwargs)\n \n return context\n \n \nclass MemberListView(PlayingCommonListView):\n\n template_name = 'ourhockey/member_list.html'\n context_object_name = 'member_list'\n\n def get_queryset(self):\n return Member.objects.using('ourhockey').all()\n\n def get_context_data(self, **kwargs):\n context = super(MemberListView, self).get_context_data(**kwargs)\n\n return context\n \n\nclass GameScheduleListView(FormMixin, PlayingCommonListView):\n\n template_name = 'ourhockey/game_schedule.html'\n context_object_name = 'gameday_list'\n form_class = AddGameDayForm\n\n def get_queryset(self):\n return GameDay.objects.using('ourhockey').all()\n\n def get_context_data(self, **kwargs):\n context = super(GameScheduleListView, self).get_context_data(**kwargs)\n return context\n\n def post(self, request):\n try:\n post_gameday = request.POST.get('post_gameday')\n post_gametype = request.POST.get('post_gametype')\n response_data = {}\n\n gameday = GameDay(game_day=post_gameday, game_type=post_gametype)\n gameday.save()\n\n response_data['result'] = 'Create post successful!'\n response_data['game_day'] = gameday.game_day\n response_data['game_type'] = gameday.game_type\n\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n except UnreadablePostError:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )\n\n \nclass SelectTodayAttendListView(PlayingCommonListView):\n\n template_name = 'ourhockey/select_today_attend.html'\n context_object_name = 'select_member_list'\n\n def get_queryset(self):\n select_member_list = []\n dict_member = {}\n member_list = Member.objects.using('ourhockey').all()\n\n for member_ in member_list:\n try:\n attended_ = Attendance.objects.using('ourhockey').get(\n attended=True,\n attended_date=datetime.today(),\n member=member_\n )\n dict_member = {'member': member_, 'attended': attended_}\n select_member_list.append(dict_member)\n except ObjectDoesNotExist:\n dict_member = {'member': member_}\n select_member_list.append(dict_member)\n\n return select_member_list\n\n def get_context_data(self, **kwargs):\n context = super(\n SelectTodayAttendListView, self).get_context_data(**kwargs)\n return context\n\ndef update_today_attend_member(request):\n \"\"\"\n description: Update today's attendance of members\n get: request\n return: \n - json(result of attendance of members)\n - content_type: application/json\n \"\"\"\n if request.method == 'POST':\n post_attend = request.POST.get('attend')\n\n if post_attend.checked is 'checked':\n attend_member = Member(id=post_attend.value)\n attendance = Attendance(member=attend_member)\n attendance.attended = True\n attendance.save()\n\n for member_id in request.POST.getlist('attend'):\n attend_member = Member(id=member_id)\n attendance = Attendance(member=attend_member)\n attendance.attended = True\n attendance.save()\n\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )\n\n######################### BEGIN AJAX 연습 ############################\nfrom django.http import HttpResponseRedirect\nfrom fromleaf_playing.ourhockey.models.Post import Post\nfrom fromleaf_playing.ourhockey.forms import PostForm \n\n \ndef home(req):\n\n tmpl_vars = {\n 'all_posts': Post.objects.reverse(),\n 'form': PostForm()\n }\n return render(req, 'ourhockey/post_index.html', tmpl_vars)\n\n\ndef create_post(request):\n if request.method == 'POST':\n post_text = request.POST.get('the_post')\n response_data = {}\n\n post = Post(text=post_text, author=request.user.username)\n post.save()\n\n response_data['result'] = 'Create post successful!'\n response_data['postid'] = post.id\n response_data['text'] = post.text\n response_data['created'] = post.created.strftime('%B %d, %Y %I:%M %p')\n response_data['author'] = post.author\n\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )\n\n########################## END AJAX 연습중 ########################\n \n\nclass SelectedTodayAttendListView(PlayingCommonListView):\n\n template_name = 'ourhockey/selected_today_attend.html'\n context_object_name = 'selected_member_list'\n\n def get_queryset(self):\n select_member_list = []\n dict_member = {}\n member_list = Member.objects.using('ourhockey').all()\n\n for _member in member_list:\n try:\n _attended = Attendance.objects.using('ourhockey').get(\n attended=True,\n attended_date=datetime.today(),\n member=_member\n )\n dict_member = {'member': _member, 'attended': _attended}\n select_member_list.append(dict_member)\n except ObjectDoesNotExist:\n dict_member = {'member': _member}\n select_member_list.append(dict_member)\n\n return select_member_list\n\n def get_context_data(self, **kwargs):\n context = super(\n SelectedTodayAttendListView, self).get_context_data(**kwargs)\n\n return context\n\n def post(self, request, *args, **kwargs):\n selected_member_list = self.get_queryset()\n\n return render(request, self.template_name, {\n 'selected_member_list': selected_member_list,\n })\n \n \n \nclass TodayAttendedMemberListView(PlayingCommonListView):\n template_name = 'ourhockey/today_attended_list.html'\n context_object_name = 'attended_list'\n \n def get_queryset(self):\n attended_list = Attendance.objects.using('ourhockey').filter(\n attended=True,\n attended_date=datetime.today()\n )\n return attended_list\n\n def get_context_data(self, **kwargs):\n context = super(\n TodayAttendedMemberListView, self).get_context_data(**kwargs)\n\n return context\n","sub_path":"fromleaf_playing/ourhockey/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"554834264","text":"import copy\nimport json\nimport pprint\nfrom collections import OrderedDict\nfrom typing import (\n Callable,\n Dict,\n Iterator,\n List,\n MutableMapping,\n MutableSequence,\n MutableSet,\n Optional,\n Sequence,\n Set,\n Union\n)\n\nfrom semantic_version import Version\nfrom slicedimage import Collection, TileSet\nfrom slicedimage.io import Reader, resolve_path_or_url, resolve_url\nfrom slicedimage.urlpath import pathjoin\n\nfrom starfish.codebook.codebook import Codebook\nfrom starfish.config import StarfishConfig\nfrom starfish.imagestack.imagestack import ImageStack\nfrom starfish.imagestack.parser.crop import CropParameters\nfrom starfish.spacetx_format import validate_sptx\nfrom starfish.types import Axes, Coordinates\nfrom .version import MAX_SUPPORTED_VERSION, MIN_SUPPORTED_VERSION\n\n\nclass FieldOfView:\n \"\"\"\n This encapsulates a field of view. It contains the primary image and auxiliary images that are\n associated with the field of view.\n\n All images can be accessed using a the get_image('primary') method with the name of the image\n type. The primary image is accessed using the name\n :py:attr:`starfish.experiment.experiment.FieldOFView.PRIMARY_IMAGES`.\n\n Access a FOV through a experiment. experiement.fov()\n\n Attributes\n ----------\n name : str\n The name of the FOV.\n image_types : Set[str]\n A set of all the image types.\n \"\"\"\n\n PRIMARY_IMAGES = 'primary'\n\n def __init__(\n self, name: str,\n image_tilesets: MutableMapping[str, TileSet]\n ) -> None:\n \"\"\"\n Fields of views can obtain their primary image from either an ImageStack or a TileSet (but\n only one). It can obtain their auxiliary image dictionary from either a dictionary of\n auxiliary image name to ImageStack or a dictionary of auxiliary image name to TileSet (but\n only one).\n\n Note that if the source image is from a TileSet, the decoding of TileSet to ImageStack does\n not happen until the image is accessed. Be prepared to handle errors when images are\n accessed.\n \"\"\"\n self._images: MutableMapping[str, TileSet] = dict()\n self._name = name\n self.aligned_coordinate_groups: Dict[str, List[CropParameters]] = dict()\n for name, tileset in image_tilesets.items():\n self.aligned_coordinate_groups[name] = self.parse_coordinate_groups(tileset)\n self._images = image_tilesets\n\n def __repr__(self):\n images = '\\n '.join(\n f'{k}: {v}'\n for k, v in self._images.items()\n if k != FieldOfView.PRIMARY_IMAGES\n )\n return (\n f\"\\n\"\n f\" Primary Image: {self._images[FieldOfView.PRIMARY_IMAGES]}\\n\"\n f\" Auxiliary Images:\\n\"\n f\" {images}\"\n )\n\n def parse_coordinate_groups(self, tileset: TileSet) -> List[CropParameters]:\n \"\"\"Takes a tileset and compares the physical coordinates on each tile to\n create aligned coordinate groups (groups of tiles that have the same physical coordinates)\n\n Returns\n -------\n A list of CropParameters. Each entry describes the r/ch/z values of tiles that are aligned\n (have matching coordinates)\n \"\"\"\n coord_groups: OrderedDict[tuple, CropParameters] = OrderedDict()\n for tile in tileset.tiles():\n x_y_coords = (\n tile.coordinates[Coordinates.X][0], tile.coordinates[Coordinates.X][1],\n tile.coordinates[Coordinates.Y][0], tile.coordinates[Coordinates.Y][1]\n )\n # A tile with this (x, y) has already been seen, add tile's Indices to CropParameters\n if x_y_coords in coord_groups:\n crop_params = coord_groups[x_y_coords]\n crop_params._add_permitted_axes(Axes.CH, tile.indices[Axes.CH])\n crop_params._add_permitted_axes(Axes.ROUND, tile.indices[Axes.ROUND])\n if Axes.ZPLANE in tile.indices:\n crop_params._add_permitted_axes(Axes.ZPLANE, tile.indices[Axes.ZPLANE])\n else:\n coord_groups[x_y_coords] = CropParameters(\n permitted_chs=[tile.indices[Axes.CH]],\n permitted_rounds=[tile.indices[Axes.ROUND]],\n permitted_zplanes=[tile.indices[Axes.ZPLANE]] if Axes.ZPLANE in tile.indices\n else None)\n return list(coord_groups.values())\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def image_types(self) -> Set[str]:\n return set(self._images.keys())\n\n def show_aligned_image_groups(self) -> None:\n \"\"\"\n Describe the aligned subgroups for each Tileset in this FOV\n\n ex.\n {'nuclei': ' Group 0: ',\n 'primary': ' Group 0: '}\n\n Means there are two tilesets in this FOV (primary and nuclei), and because all images have\n the same (x, y) coordinates, each tileset has a single aligned subgroup.\n \"\"\"\n all_groups = dict()\n for name, groups in self.aligned_coordinate_groups.items():\n y_size = self._images[name].default_tile_shape[0]\n x_size = self._images[name].default_tile_shape[1]\n info = '\\n'.join(\n f\" Group {k}: \"\n f\" \"\n for k, v in enumerate(groups)\n )\n all_groups[name] = f'{info}'\n pprint.pprint(all_groups)\n\n def iterate_image_type(self, image_type: str) -> Iterator[ImageStack]:\n for aligned_group, _ in enumerate(self.aligned_coordinate_groups[image_type]):\n yield self.get_image(item=image_type, aligned_group=aligned_group)\n\n def get_image(self, item: str, aligned_group: int = 0,\n x_slice: Optional[Union[int, slice]] = None,\n y_slice: Optional[Union[int, slice]] = None,\n ) -> ImageStack:\n \"\"\"\n Parameters\n ----------\n\n item: str\n The name of the tileset ex. 'primary' or 'nuclei'\n aligned_group: int\n The aligned subgroup, default 0\n x_slice: int or slice\n The cropping parameters for the x axis\n y_slice:\n The cropping parameters for the y axis\n\n Returns\n -------\n The instantiated ImageStack\n \"\"\"\n crop_params = copy.copy((self.aligned_coordinate_groups[item][aligned_group]))\n crop_params._x_slice = x_slice\n crop_params._y_slice = y_slice\n return ImageStack.from_tileset(self._images[item], crop_parameters=crop_params)\n\n\nclass Experiment:\n \"\"\"\n This encapsulates an experiment, with one or more fields of view and a codebook. An individual\n FOV can be retrieved using a key, i.e., experiment[fov_name].\n\n Methods\n -------\n from_json()\n Given a URL or a path to an experiment.json document, return an Experiment object\n corresponding to the document.\n fov()\n Given a callable that accepts a FOV, return the first FOVs that the callable returns True\n when passed the FOV. Because there is no guaranteed sorting for the FOVs, use this\n cautiously.\n fovs()\n Given a callable that accepts a FOV, return all the FOVs that the callable returns True when\n passed the FOV.\n fovs_by_name()\n Given one or more FOV names, return the FOVs that match those names.\n\n Attributes\n ----------\n codebook : Codebook\n Returns the codebook associated with this experiment.\n extras : Dict\n Returns the extras dictionary associated with this experiment.\n \"\"\"\n def __init__(\n self,\n fovs: Sequence[FieldOfView],\n codebook: Codebook,\n extras: dict,\n *,\n src_doc: dict=None,\n ) -> None:\n self._fovs = fovs\n self._codebook = codebook\n self._extras = extras\n self._src_doc = src_doc\n\n def __repr__(self):\n\n # truncate the list of fields of view if it is longer than print_n_fov\n print_n_fov = 4\n n_fields_of_view = list(self.items())[:print_n_fov]\n fields_of_view_str = \"\\n\".join(\n f'{k}: {v}' for k, v in n_fields_of_view\n )\n\n # add an ellipsis if not all fields of view are being printed\n if len(self._fovs) > print_n_fov:\n fov_repr = f\"{{\\n{fields_of_view_str}\\n ...,\\n}}\"\n else:\n fov_repr = f\"{{\\n{fields_of_view_str}\\n}}\"\n\n # return the formatted string\n object_repr = f\"\\n\"\n return object_repr + fov_repr\n\n @classmethod\n def from_json(cls, json_url: str) -> \"Experiment\":\n \"\"\"\n Construct an `Experiment` from an experiment.json file format specifier.\n Loads configuration from StarfishConfig.\n\n Parameters\n ----------\n json_url : str\n file path or web link to an experiment.json file\n\n Returns\n -------\n Experiment :\n Experiment object serving the requested experiment data\n\n \"\"\"\n\n config = StarfishConfig()\n\n if config.strict:\n valid = validate_sptx.validate(json_url)\n if not valid:\n raise Exception(\"validation failed\")\n\n backend, name, baseurl = resolve_path_or_url(json_url, config.slicedimage)\n with backend.read_contextmanager(name) as fh:\n experiment_document = json.load(fh)\n\n version = cls.verify_version(experiment_document['version'])\n\n _, codebook_name, codebook_baseurl = resolve_url(experiment_document['codebook'],\n baseurl, config.slicedimage)\n codebook_absolute_url = pathjoin(codebook_baseurl, codebook_name)\n codebook = Codebook.from_json(codebook_absolute_url)\n\n extras = experiment_document['extras']\n\n fovs: MutableSequence[FieldOfView] = list()\n fov_tilesets: MutableMapping[str, TileSet]\n if version < Version(\"5.0.0\"):\n primary_image: Collection = Reader.parse_doc(experiment_document['primary_images'],\n baseurl, config.slicedimage)\n auxiliary_images: MutableMapping[str, Collection] = dict()\n for aux_image_type, aux_image_url in experiment_document['auxiliary_images'].items():\n auxiliary_images[aux_image_type] = Reader.parse_doc(\n aux_image_url, baseurl, config.slicedimage)\n\n for fov_name, primary_tileset in primary_image.all_tilesets():\n fov_tilesets = dict()\n fov_tilesets[FieldOfView.PRIMARY_IMAGES] = primary_tileset\n for aux_image_type, aux_image_collection in auxiliary_images.items():\n aux_image_tileset = aux_image_collection.find_tileset(fov_name)\n if aux_image_tileset is not None:\n fov_tilesets[aux_image_type] = aux_image_tileset\n\n fov = FieldOfView(fov_name, image_tilesets=fov_tilesets)\n fovs.append(fov)\n else:\n images: MutableMapping[str, Collection] = dict()\n all_fov_names: MutableSet[str] = set()\n for image_type, image_url in experiment_document['images'].items():\n image = Reader.parse_doc(image_url, baseurl, config.slicedimage)\n images[image_type] = image\n for fov_name, _ in image.all_tilesets():\n all_fov_names.add(fov_name)\n\n for fov_name in all_fov_names:\n fov_tilesets = dict()\n for image_type, image_collection in images.items():\n image_tileset = image_collection.find_tileset(fov_name)\n if image_tileset is not None:\n fov_tilesets[image_type] = image_tileset\n\n fov = FieldOfView(fov_name, image_tilesets=fov_tilesets)\n fovs.append(fov)\n\n return Experiment(fovs, codebook, extras, src_doc=experiment_document)\n\n @classmethod\n def verify_version(cls, semantic_version_str: str) -> Version:\n version = Version(semantic_version_str)\n if not (MIN_SUPPORTED_VERSION <= version <= MAX_SUPPORTED_VERSION):\n raise ValueError(\n f\"version {version} not supported. This version of the starfish library only \"\n f\"supports formats from {MIN_SUPPORTED_VERSION} to \"\n f\"{MAX_SUPPORTED_VERSION}\")\n return version\n\n def fov(\n self,\n filter_fn: Callable[[FieldOfView], bool]=lambda _: True,\n key_fn: Callable[[FieldOfView], str]=lambda fov: fov.name,\n ) -> FieldOfView:\n \"\"\"\n Given a callable filter_fn, apply it to all the FOVs in this experiment. Return the first\n FOV such that filter_fn(FOV) returns True. The order of the filtered FOVs will be determined\n by the key_fn callable. By default, this matches the order of fov.name.\n\n If no FOV matches, raise LookupError.\n \"\"\"\n for fov in sorted(self._fovs, key=key_fn):\n if filter_fn(fov):\n return fov\n raise LookupError(\"Cannot find any FOV that the filter allows.\")\n\n def fovs(\n self,\n filter_fn: Callable[[FieldOfView], bool]=lambda _: True,\n key_fn: Callable[[FieldOfView], str]=lambda fov: fov.name,\n ) -> Sequence[FieldOfView]:\n \"\"\"\n Given a callable filter_fn, apply it to all the FOVs in this experiment. Return a list of\n FOVs such that filter_fn(FOV) returns True. The returned list is sorted based on the key_fn\n callable, which by default matches the order of fov.name.\n \"\"\"\n results: MutableSequence[FieldOfView] = list()\n for fov in self._fovs:\n if not filter_fn(fov):\n continue\n\n results.append(fov)\n results = sorted(results, key=key_fn)\n return results\n\n def fovs_by_name(\n self,\n *names,\n key_fn: Callable[[FieldOfView], str]=lambda fov: fov.name,\n ) -> Sequence[FieldOfView]:\n \"\"\"\n Given a callable filter_fn, apply it to all the FOVs in this experiment. Return a list of\n FOVs such that filter_fn(FOV) returns True. The returned list is sorted based on the key_fn\n callable, which by default matches the order of fov.name.\n \"\"\"\n return self.fovs(filter_fn=lambda fov: fov.name in names)\n\n def __getitem__(self, item):\n fovs = self.fovs_by_name(item)\n if len(fovs) == 0:\n raise IndexError(f\"No field of view with name \\\"{item}\\\"\")\n return fovs[0]\n\n def keys(self):\n return (fov.name for fov in self.fovs())\n\n def values(self):\n return (fov for fov in self.fovs())\n\n def items(self):\n return ((fov.name, fov) for fov in self.fovs())\n\n @property\n def codebook(self) -> Codebook:\n return self._codebook\n\n @property\n def extras(self):\n return self._extras\n","sub_path":"starfish/experiment/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":15594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"542288202","text":"import math\nimport numpy as np\n\ndef Davidson(H_func,H_diag,neig):\t#input Direct Method Hamiltonian, Diagonal of Hamiltonian, and number of eigenvalues desired\t\n\ta = 8\t\t\t\t#number of vectors in initial sample space\n\ttol = 1e-8\n\tn = len(H_diag)\n\tt = np.eye(n,a)\t\t\t#set of test unit vectors in initial sample space\n\tV = np.zeros((n,a))\t\t#array to store sample space\n\n\tfor i in range(a):\t\t#input test vectors into sample space matrix\n\t\tV[:,i] = t[:,i]\n\n\n\ttheta_old = np.zeros(neig)\t#initialize old and new eigenvalue guesses, \"Theta\"\n\ttheta_new = np.ones(neig)\n\n\tcount = 1\t\t\t#keep track of number of iterations\n\twhile np.linalg.norm(theta_old-theta_new) > tol:\n\t\ttheta_old = theta_new\t#step theta\n\t\tV,R = np.linalg.qr(V)\t#use python's QR decomp. to ensure sample space orthogonality\n\t\tHV = np.zeros((n,a*count))\n\t\tfor i in range(a*count):\n\t\t\tHV[:,i] = H_func(V[:,i])\n\t\tVHV = np.dot(V[:,:(a*count)].T,HV) #build matrix in subspace\n\t\ttheta,s = np.linalg.eig(VHV)\t#diagonalize\n\t\tindex = np.argsort(theta)\t#sort eigenvalues and eigenvectors\n\t\ttheta = theta[index]\n\t\ts = s[:,index]\n\t\tV = np.c_[V,np.zeros((n,a))]\t#grow sample space matrix\n\t\tfor i in range(a):\t\t#loop through test vectors\n\t\t\ttest = np.dot(V[:,:(a*count)],s[:,i])\t#change basis of eigenvectors into basis of original matrix\n\t\t\tr = H_func(test) - theta[i]*test\t#calculate residue vector\n\t\t\tq = -(1/H_diag[i] - 1/theta[i])*r\t#calculate correction vector\n\t\t\tV[:,(i+(a*count))] = q\t\t\t#add correction vectors to subspace\n\t\ttheta_new = theta[:neig]\t\t\t#update guesses to eigenvalues\n\t\tcount = count + 1\n\treturn theta_new\n","sub_path":"Davidson/Davidson.py","file_name":"Davidson.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"214114185","text":"from url_utils import *\nfrom article import Article\nfrom bs4 import BeautifulSoup\nimport re\nimport pymongo\n\n\nURL_CYBELANGEL_BLOG = 'https://blog.cybelangel.com/'\nARTICLE_LINK = 'article h1 a[href^=' + URL_CYBELANGEL_BLOG + ']'\nNEXT_LINK_REGEX = '^https:\\/\\/blog.cybelangel.com\\/page\\/\\d+\\/$'\n\n\ndef get_articles_links_from_page(soup):\n \"\"\"\n Return a list which contains the links to every articles blog\n in the page given in parameter.\n \"\"\"\n articles_links = soup.select(ARTICLE_LINK)\n links = [link['href'] for link in articles_links]\n return links\n\n\ndef next_blog_page(soup):\n \"\"\"\n Return the URL of the next blog page or None if there is no more.\n \"\"\"\n next_link = soup.find(name='link',\n href=re.compile(NEXT_LINK_REGEX),\n rel='next')\n if next_link is not None:\n return next_link['href']\n else:\n return None\n\n\ndef get_articles_links_from_blog(url):\n \"\"\"\n Browse the cybelangel's blog and return a list which contains\n every articles links.\n \"\"\"\n links = []\n current_url = url\n while current_url is not None:\n print(current_url)\n page = get_html_page_from_url(current_url)\n soup = BeautifulSoup(page, \"html.parser\")\n links.extend(get_articles_links_from_page(soup))\n current_url = next_blog_page(soup)\n return links\n\n\nif __name__ == '__main__':\n dbarticles = pymongo.MongoClient().cybelangel.articles\n print('Retrieves every articles')\n articles_links = get_articles_links_from_blog(URL_CYBELANGEL_BLOG)\n print('Total count of articles: {}\\n'.format(len(articles_links)))\n for i, link in enumerate(articles_links):\n print('[{:03d}/{}] processing > {}'.format(i + 1, len(articles_links), link))\n present = dbarticles.find_one({'url': link})\n if present is None:\n article = Article(link)\n article.extract_contents()\n id = dbarticles.insert_one(article.export_for_mongo()).inserted_id\n print('article \"{}\"\" inserted in mongodb with id {}\\n'.format(link, id))\n else:\n print('already in mongodb with id {}\\n'.format(present['_id']))\n print('Total articles in mongodb: {}'.format(dbarticles.count()))\n","sub_path":"cybelangel.py","file_name":"cybelangel.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"292056988","text":"\n'''\nFile: forwardingUnit.py\nAuthor: Nina Angelvik\nDescription: Implementation of the forwardingUnit element\n'''\n\nimport unittest\nfrom cpuElement import CPUElement\nfrom testElement import TestElement\nfrom trippleMux import TrippleMux\n\nclass ForwardingUnit(CPUElement):\n def connect(self, inputSources, outputValueNames, control, outputSignalNames):\n CPUElement.connect(self, inputSources, outputValueNames, control, outputSignalNames)\n\n assert(len(inputSources) == 4), 'ForwardingUnit should have 4 inputs'\n assert(len(outputValueNames) == 0), 'ForwardingUnit should not have any outputs'\n assert(len(control) == 2), 'ForwardingUnit should not have 2 control inputs'\n assert(len(outputSignalNames) == 2), 'ForwardingUnit should have 2 control outputs'\n\n self.IDEXrs = inputSources[0][1]\n self.IDEXrt = inputSources[1][1]\n self.EXMEMrd = inputSources[2][1]\n self.MEMWBrd = inputSources[3][1]\n self.EXMEMregWrite = control[0][1]\n self.MEMWBregWrite= control[1][1]\n self.forwardA = outputSignalNames[0]\n self.forwardB = outputSignalNames[1]\n\n def writeOutput(self):\n pass\n\n def setControlSignals(self):\n IDEXrs = self.inputValues[self.IDEXrs]\n IDEXrt = self.inputValues[self.IDEXrt]\n EXMEMrd = self.inputValues[self.EXMEMrd]\n MEMWBrd = self.inputValues[self.MEMWBrd]\n EXMEMregWrite = self.controlSignals[self.EXMEMregWrite]\n MEMWBregWrite = self.controlSignals[self.MEMWBregWrite]\n\n self.outputControlSignals[self.forwardA] = 0b00 # Output from register\n self.outputControlSignals[self.forwardB] = 0b00 # Output from register\n\n assert(not isinstance(IDEXrs, bool))\n assert(not isinstance(IDEXrt, bool))\n assert(not isinstance(EXMEMrd, bool))\n assert(not isinstance(MEMWBrd, bool))\n assert(isinstance(EXMEMregWrite, int))\n assert(isinstance(MEMWBregWrite, int))\n\n if (MEMWBregWrite == 0b1) and (MEMWBrd != 0b0) and (MEMWBrd == IDEXrs) : #MEM hazard\n self.outputControlSignals[self.forwardA] = 0b01 # Output from WB\n if (MEMWBregWrite == 0b1) and (MEMWBrd != 0b0) and (MEMWBrd == IDEXrt): #MEM hazard\n self.outputControlSignals[self.forwardB] = 0b01\n\n if (EXMEMregWrite == 0b1) and (EXMEMrd != 0b0) and (EXMEMrd == IDEXrs): #EX hazard:\n self.outputControlSignals[self.forwardA] = 0b10 # Output from EX/MEM\n if (EXMEMregWrite == 0b1) and (EXMEMrd != 0b0) and (EXMEMrd == IDEXrt): #EX hazard\n self.outputControlSignals[self.forwardB] = 0b10\n\nclass TestForwarding(unittest.TestCase):\n def setUp(self):\n self.forwardingUnit = ForwardingUnit()\n self.testInput = TestElement()\n self.Amux = TrippleMux()\n self.Bmux = TrippleMux()\n self.testOutput = TestElement()\n\n\n self.testInput.connect(\n [],\n ['IDEXrs', 'IDEXrt','EXMEMrd', 'MEMWBrd', 'readReg1', 'readReg2', 'WB-ALU', 'EXMEM-ALU'],\n [],\n ['EXMEMregWrite', 'MEMWBregWrite']\n )\n\n self.forwardingUnit.connect(\n [(self.testInput, 'IDEXrs'), (self.testInput, 'IDEXrt'), (self.testInput, 'EXMEMrd'), (self.testInput, 'MEMWBrd')],\n [],\n [(self.testInput, 'EXMEMregWrite'),\n (self.testInput, 'MEMWBregWrite'),],\n ['forwardA', 'forwardB']\n )\n\n self.Amux.connect(\n [(self.testInput, 'readReg1'), (self.testInput, 'WB-ALU'), (self.testInput, \"EXMEM-ALU\")],\n ['AmuxData'],\n [(self.forwardingUnit, 'forwardA')],\n []\n )\n\n self.Bmux.connect(\n [(self.testInput, 'readReg2'), (self.testInput, 'WB-ALU'), (self.testInput, \"EXMEM-ALU\")],\n ['BmuxData'],\n [(self.forwardingUnit, 'forwardB')],\n []\n )\n\n\n self.testOutput.connect(\n [(self.Amux, 'AmuxData'), (self.Bmux, 'BmuxData')],\n [],\n [],\n []\n )\n\n def test_correct_behavior(self):\n self.testInput.setOutputValue('IDEXrs', 10)\n self.testInput.setOutputValue('IDEXrt', 11)\n self.testInput.setOutputValue('EXMEMrd', 10)\n self.testInput.setOutputValue('MEMWBrd', 11)\n self.testInput.setOutputValue('readReg1', 25)\n self.testInput.setOutputValue('readReg2', 200)\n self.testInput.setOutputValue('WB-ALU', 300)\n self.testInput.setOutputValue('EXMEM-ALU', 225)\n self.testInput.setOutputControl('EXMEMregWrite', 1)\n self.testInput.setOutputControl('MEMWBregWrite', 1)\n\n self.forwardingUnit.readInput()\n self.forwardingUnit.readControlSignals()\n self.forwardingUnit.writeOutput()\n self.forwardingUnit.setControlSignals()\n\n self.Amux.readInput()\n self.Amux.readControlSignals()\n self.Amux.writeOutput()\n\n self.Bmux.readInput()\n self.Bmux.readControlSignals()\n self.Bmux.writeOutput()\n\n self.testOutput.readInput()\n\n self.assertEqual(self.testOutput.inputValues['AmuxData'], 225)\n self.assertEqual(self.testOutput.inputValues['BmuxData'], 300)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"forwardingUnit.py","file_name":"forwardingUnit.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"448447808","text":"# encoding=utf-8\n\nimport argparse\nimport logging\nimport os.path as osp\nimport time\nfrom collections import OrderedDict\n\nimport options.options as option\nimport utils.util as util\nfrom data import create_dataset, create_dataloader\nfrom models import create_model\n\n#### options\nparser = argparse.ArgumentParser()\nparser.add_argument('-opt',\n type=str,\n default='./options/test/test_HDRUNet.yml',\n help='Path to options YMAL file.')\nopt = option.parse(parser.parse_args().opt, is_train=False)\nopt = option.dict_to_nonedict(opt)\n\nutil.mkdirs((path for key, path in opt['path'].items()\n if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key))\nutil.setup_logger('base', opt['path']['log'], 'test_' + opt['name'],\n level=logging.INFO,\n screen=True,\n tofile=True)\nlogger = logging.getLogger('base')\nlogger.info(option.dict2str(opt))\n\n#### Create test dataset and dataloader\ntest_loaders = []\nfor phase, dataset_opt in sorted(opt['datasets'].items()):\n test_set = create_dataset(dataset_opt)\n test_loader = create_dataloader(test_set, dataset_opt)\n logger.info('Number of test images in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))\n test_loaders.append(test_loader)\n\nmodel = create_model(opt)\nfor test_loader in test_loaders:\n test_set_name = test_loader.dataset.opt['name']\n logger.info('\\nTesting [{:s}]...'.format(test_set_name))\n test_start_time = time.time()\n dataset_dir = osp.join(opt['path']['results_root'], test_set_name)\n util.mkdir(dataset_dir)\n\n test_results = OrderedDict()\n test_results['psnr'] = []\n\n for data in test_loader:\n need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True\n model.feed_data(data, need_GT=need_GT)\n img_path = data['GT_path'][0] if need_GT else data['LQ_path'][0]\n img_name = osp.splitext(osp.basename(img_path))[0]\n\n model.test()\n visuals = model.get_current_visuals(need_GT=need_GT)\n\n sr_img = util.tensor2numpy(visuals['SR'])\n image_path, alignratio_path = util.generate_paths(dataset_dir, img_name)\n util.save_img_with_ratio(image_path, sr_img, alignratio_path)\n\n logger.info('{:20s}'.format(img_name))\n","sub_path":"codes/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"451565404","text":"# -*- coding: utf-8 -*-\n# -*- author: jokker -*-\n\n\nimport copy\nfrom .XmlUtil import XmlUtil\n\n\nclass AreaInfo(object):\n \"\"\"to storage node attribute and class infomation\"\"\"\n\n def __init__(self, node_id, father_id=None, child_ids=None, attr_dict=None):\n # node attribute\n if attr_dict is None:\n self.__attr_dict = {}\n elif isinstance(attr_dict, dict):\n self.__attr_dict = attr_dict\n #\n if father_id is None:\n self.__father_ID = None\n else:\n if isinstance(father_id, float) or isinstance(father_id, int) or isinstance(father_id, str):\n self.__father_ID = father_id\n else:\n raise TypeError('father_ID can only be int float or str')\n #\n if child_ids is None:\n self.__child_ID = set()\n else:\n if isinstance(child_ids, list) or isinstance(child_ids, set) or isinstance(child_ids, tuple):\n self.__child_ID = set()\n for each_child_id in child_ids:\n self.add_child_node(each_child_id)\n else:\n raise ValueError('child_IDs is illegal')\n # node_id Used to uniquely identify a node\n if isinstance(node_id, float) or isinstance(node_id, int) or isinstance(node_id, str):\n self.__ID = node_id\n else:\n raise TypeError('ID can only be int float or str')\n # root_node --> 0, middle node --> 1, leaf node --> 2\n self.__element_type = None\n # the node class\n self.__element_class = None\n #\n self.__reflash_element()\n\n def __reflash_element(self):\n \"\"\"update node type and class\"\"\"\n # update node type\n if self.__father_ID is None:\n self.__element_type = 0 # root node\n elif self.__child_ID is None or len(self.__child_ID) == 0:\n self.__element_type = 2 # leaf node\n else:\n self.__element_type = 1 # middle node\n # update node class ?\n # todo if the function is needed\n\n # ------------ get infomation ----------------------------------\n\n def get_attr_info(self, attr_key):\n \"\"\"get node attribute by key\"\"\"\n if attr_key in self.__attr_dict:\n return self.__attr_dict[attr_key]\n else:\n return None\n\n def get_father_id(self):\n return self.__father_ID\n\n def get_child_id(self):\n \"\"\"child_id is a set, return a copy\"\"\"\n return self.__child_ID.copy()\n\n def get_id(self):\n \"\"\"get self node id\"\"\"\n return self.__ID\n\n def get_element_type(self):\n \"\"\"get self type\"\"\"\n self.__reflash_element()\n return self.__element_type\n\n # ------------ node structure ----------------------------------\n\n def add_child_node(self, child_node_id):\n \"\"\"add child node\"\"\"\n if child_node_id == self.__father_ID:\n raise ValueError(u'child ID equal to child ID') # father id != child id\n else:\n self.__child_ID.add(child_node_id)\n\n def remove_assign_child_node(self, child_node_id):\n \"\"\"remove assign child return True if success else False\"\"\"\n if child_node_id in self.__child_ID:\n self.__child_ID.remove(child_node_id)\n return True\n else:\n return False\n\n def remove_all_child_node(self):\n \"\"\"delete all child\"\"\"\n self.__child_ID = set()\n\n def assign_father_node(self, father_node_id):\n \"\"\"assign which node self belong\"\"\"\n if father_node_id in self.__child_ID:\n raise ValueError(u'child ID equal to child ID') # father id != child id\n else:\n self.__father_ID = father_node_id\n\n def remove_father_node(self):\n \"\"\"delete father node\"\"\"\n self.__father_ID = None\n\n # ------------ attribute operate ------------------------------------\n\n def add_attr_info(self, key, value):\n \"\"\"add or update attribute\"\"\"\n self.__attr_dict[key] = value\n\n def remove_attr_info(self, key):\n \"\"\"delete assign self attribute\"\"\"\n if key in self.__attr_dict:\n self.__attr_dict.pop(key)\n\n def clear_attr_info(self):\n \"\"\"delete all self attribute\"\"\"\n self.__attr_dict = {}\n\n\nclass AreaInfoOperation(object):\n \"\"\"for operate AreaInfo\"\"\"\n\n def __init__(self, node_dict=None, node_class_dict=None):\n # a dict to storage node infomation\n if node_dict is None:\n self.node_dict = {}\n else:\n self.node_dict = node_dict # key: node_id, value: AreaInfo\n\n # for avoid ring\n if node_class_dict is None:\n self.node_class_dict = {}\n else:\n # key: node_id, value: node class, root node class: 0, root-->child node class is 1 and so on\n self.node_class_dict = node_class_dict\n\n # ---------------------- node operate -------------------------------\n\n def add_node(self, node):\n \"\"\"if node id not in self , set it a root node\"\"\"\n\n # type checking\n if not isinstance(node, AreaInfo):\n raise TypeError('need a AreaInfo')\n\n node_id = node.get_id()\n # ensure uniqueness --> node id\n if node_id in self.node_dict:\n raise ValueError('ID {0} has been in node_dict'.format(node_id))\n\n # add to node_dict\n self.node_dict[node_id] = node\n\n def link_two_node(self, father_id, child_id):\n \"\"\"link two node\"\"\"\n if not (father_id in self.node_dict and child_id in self.node_dict):\n raise ValueError('father id or child id not in node_dict')\n\n father_node = self.node_dict[father_id]\n child_node = self.node_dict[child_id]\n #\n father_node.add_child_node(child_id)\n child_node.assign_father_node(father_id)\n\n def break_link_between_two_node(self, father_id, child_id):\n \"\"\"break link between two_node\"\"\"\n\n # TODO just get two node, juge their relationship\n\n if not (father_id in self.node_dict and child_id in self.node_dict):\n raise ValueError('father id or child id not in node_dict')\n\n father_node = self.node_dict[father_id]\n child_node = self.node_dict[child_id]\n #\n father_node.remove_assign_child_node(child_id)\n child_node.remove_father_node()\n\n def insert_node_between_two_node(self, father_id, child_id, new_node):\n \"\"\"insert node between two node\"\"\"\n\n if not isinstance(new_node, AreaInfo):\n raise TypeError('new_node should be AreaInfo')\n\n node_id = new_node.get_id()\n\n if not (father_id in self.node_dict and child_id in self.node_dict):\n raise ValueError('father id or child id not in node_dict')\n\n if node_id not in self.node_dict: # new one if not exist\n self.add_node(new_node)\n\n self.break_link_between_two_node(father_id, child_id)\n self.link_two_node(father_id, node_id)\n self.link_two_node(node_id, child_id)\n\n def delete_node_between_two_node(self, father_id, child_id):\n \"\"\"delete node between two node\"\"\"\n\n if not (father_id in self.node_dict and child_id in self.node_dict):\n raise ValueError('father id or child id or new_node_id not in node_dict')\n\n father_node = self.node_dict[father_id]\n child_node = self.node_dict[child_id]\n\n if child_node.get_father_id() not in father_node.get_child_id():\n raise ValueError('their no node in father_node and child_node')\n\n middle_node = self.node_dict[child_node.get_father_id()]\n middle_id = middle_node.get_id()\n\n self.break_link_between_two_node(father_id, middle_id)\n self.break_link_between_two_node(middle_id, child_id)\n self.link_two_node(father_id, child_id)\n\n # ---------------------- get node infomation -----------------------\n\n def get_node_copy(self, node_id):\n \"\"\"get node copy\"\"\"\n if node_id in self.node_dict:\n return copy.deepcopy(self.node_dict[node_id])\n else:\n return None\n\n def get_child_node_id(self, node_id):\n \"\"\"get child node id\"\"\"\n # node not exist\n if node_id not in self.node_dict:\n return None\n return self.node_dict[node_id].get_child_id()\n\n def get_father_node_id(self, node_id):\n \"\"\"得到父节点的 node_id\"\"\"\n # if node exist\n if node_id not in self.node_dict:\n return None\n return self.node_dict[node_id].get_father_id()\n\n def get_brother_node_id(self, node_id):\n \"\"\"get brother node id\"\"\"\n if node_id not in self.node_dict:\n return None\n\n # find father node\n now_node = self.node_dict[node_id]\n father_id = now_node.get_father_id()\n\n father_node = None\n if father_id:\n if father_id in self.node_dict:\n father_node = self.node_dict[father_id]\n #\n if not father_node:\n return None\n else:\n res_node = father_node.get_child_id() # find father node's child node\n res_node.remove(node_id)\n return res_node\n\n def get_all_child_node_id(self, node_id, algo=None, scan_all=False):\n \"\"\"get all child node id, algo ==> for filtrate node, 过滤父节点之后是否需要继续遍历子节点\"\"\"\n # 节点不存在\n if node_id not in self.node_dict:\n return None\n\n all_nodes_id = set()\n nodes = [self.node_dict[node_id]]\n\n while nodes:\n # check_nodes = copy.deepcopy(nodes)\n check_nodes = nodes.copy()\n nodes = []\n if algo is None: # no filtrate\n for each_node in check_nodes:\n for node_id_temp in each_node.get_child_id():\n if node_id_temp in self.node_dict:\n nodes.append(self.node_dict[node_id_temp])\n all_nodes_id.add(node_id_temp)\n else:\n for each_node in check_nodes:\n for node_id_temp in each_node.get_child_id():\n if node_id_temp in self.node_dict:\n node_temp = self.get_node_copy(node_id_temp)\n #\n if algo(node_temp):\n nodes.append(self.node_dict[node_id_temp])\n all_nodes_id.add(node_id_temp)\n else:\n if scan_all:\n nodes.append(self.node_dict[node_id_temp])\n return list(all_nodes_id)\n\n # ---------------------- need repair --------------------------------\n\n def get_relationship_betweent_two_node(self, node_a, node_b):\n \"\"\"get relationship betweent two node\"\"\"\n # if a in all b's childs\n # if b in all a's childs\n # father-child, ancestor-child, no-relation\n\n def get_all_root_node(self):\n \"\"\"get all root node\"\"\"\n\n def have_ring(self):\n \"\"\"find if have a ring\"\"\"\n\n def refresh_node_class(self):\n \"\"\"refresh node class\"\"\"\n # TODO self.have_ring()\n # TODO compute class\n\n\ndef parse_map_info(area_cfg_xml_path):\n \"\"\"parse geographical xml\"\"\"\n\n def get_child_node_info(node_one):\n \"\"\"get child node infomation\"\"\"\n father_info = XmlUtil.get_info_from_node(node_one)\n\n if father_info is None:\n return\n\n father_id = father_info['attr']['id']\n #\n for each_node in node_one.childNodes:\n each_child_info = XmlUtil.get_info_from_node(each_node)\n if each_child_info:\n child_id = each_child_info['attr']['id']\n area_info_0perator.add_node(\n AreaInfo(child_id, father_id=father_id, attr_dict=each_child_info['attr'])) # add node\n area_info_0perator.link_two_node(father_id, child_id) # define node relation\n get_child_node_info(each_node) # do recursion\n # --------------------------------------------------------------------------------------\n area_info_0perator = AreaInfoOperation()\n root = XmlUtil.get_root_node(area_cfg_xml_path) # root node\n root_info = XmlUtil.get_info_from_node(root)\n root_id = root_info['attr']['id']\n area_info_0perator.add_node(AreaInfo(root_id, attr_dict=root_info['attr'])) # add root node\n get_child_node_info(root)\n return area_info_0perator\n\n\nif __name__ == '__main__':\n\n areaXmlPath = r'D:\\Code\\Util_Util\\MapClassInfo\\AreaCfg.xml'\n areaInfoOperator = parse_map_info(areaXmlPath)\n city = areaInfoOperator.get_child_node_id('QHS')\n\n countrys = []\n for each in city:\n countrys.extend(areaInfoOperator.get_child_node_id(each))\n\n print(len(city))\n print(city)\n print(len(countrys))\n print(countrys)\n","sub_path":"Report/MapClassUtil.py","file_name":"MapClassUtil.py","file_ext":"py","file_size_in_byte":12920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"345453659","text":"#Encrypt\n\nimport random\n\ndef main():\n\n encrypt = [\" \"] * 26 #all letters available\n print(\"Alphabet: \", end=\"\")\n\n for numbah in range(26):\n letter = chr(numbah+65)\n print (letter, end=\"\")\n # find position for number\n notfound = True\n while notfound:\n position = random.randint(0, 25)\n if encrypt[position] == \" \":\n notfound = False\n encrypt[position] = letter\n\n print(\"\\nScrambled: \", end=\"\")\n for numbah in range(26):\n print(encrypt[numbah], end=\"\")\n print(\"\\n\\n \")\n\n msg=input(\"Now, please type in your message to encode: \")\n\n print(\"Your secret message: \" + msg)\n print(\"Your message encoded: \", end=\"\")\n for alpha in msg.upper():\n if alpha < \"A\" or alpha > \"Z\":\n print(alpha,end=\"\")\n else:\n print(encrypt[ ord(alpha) - 65 ], end=\"\")\n \n\n \nmain()\n","sub_path":"schoolwork/NYU_IntroToCP_Summer2011/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"454292234","text":" ######################## # # #Cleaning our data# # # #####################\nimport pandas as pd\nimport csv\nfrom collections import defaultdict\n\ndisease_list = []\n\ndef return_list(disease):\n disease_list = []\n match = disease.replace('^','_').split('_')\n ctr = 1\n for group in match:\n if ctr%2==0:\n disease_list.append(group)\n ctr = ctr + 1\n\n return disease_list\n#Writing our cleaned data\nwith open(\"Scraped-Data/dataset_uncleaned.csv\") as csvfile:\n reader = csv.reader(csvfile)\n disease=\"\"\n weight = 0\n disease_list = []\n dict_wt = {}\n dict_=defaultdict(list)\n for row in reader:\n\n if row[0]!=\"\\xc2\\xa0\" and row[0]!=\"\":\n disease = row[0]\n disease_list = return_list(disease)\n weight = row[1]\n\n if row[2]!=\"\\xc2\\xa0\" and row[2]!=\"\":\n symptom_list = return_list(row[2])\n\n for d in disease_list:\n for s in symptom_list:\n dict_[d].append(s)\n dict_wt[d] = weight\n\n # print (dict_)\nwith open(\"Scraped-Data/dataset_clean.csv\",\"w\") as csvfile:\n writer = csv.writer(csvfile)\n for key,values in dict_.items():\n for v in values:\n #key = str.encode(key)\n key = str.encode(key).decode('utf-8')\n '''#.strip()\n #v = v.encode('utf-8').strip()\n #v = str.encode(v)'''\n writer.writerow([key,v,dict_wt[key]])\n\ncolumns = ['Source','Target','Weight']\ndata = pd.read_csv(\"Scraped-Data/dataset_clean.csv\",names=columns, encoding =\"ISO-8859-1\")\n\n\n#separating disease an symtpoms in separate files\ndata.to_csv(\"Scraped-Data/dataset_clean.csv\",index=False) #Source/disease,Target/symp,Weight\nslist = []\ndlist = []\nwith open(\"Scraped-Data/nodetable.csv\",\"w\") as csvfile:\n writer = csv.writer(csvfile)\n\n for key,values in dict_.items():\n for v in values:\n if v not in slist:\n writer.writerow([v,v,\"symptom\"])\n slist.append(v)\n if key not in dlist:\n writer.writerow([key,key,\"disease\"])\n dlist.append(key)\n\nnt_columns = ['Id','Label','Attribute']\nnt_data = pd.read_csv(\"Scraped-Data/nodetable.csv\",names=nt_columns, encoding =\"ISO-8859-1\",)\n\nnt_data.to_csv(\"Scraped-Data/nodetable.csv\",index=False)\n\n\n\n\n\n####################### # # #Analysing our cleaned data# # # #########################\ndata = pd.read_csv(\"Scraped-Data/dataset_clean.csv\", encoding =\"ISO-8859-1\")\n# print(len(data['Source'].unique()))\n# print(len(data['Target'].unique()))\ndf = pd.DataFrame(data)\ndf_1 = pd.get_dummies(df.Target)\n\ndf_s = df['Source']\ndf_pivoted = pd.concat([df_s,df_1], axis=1)\ndf_pivoted.drop_duplicates(keep='first',inplace=True)\n# print(df_pivoted[:5])\n\ncols = df_pivoted.columns\ncols = cols[1:]\ndf_pivoted = df_pivoted.groupby('Source').sum()\ndf_pivoted = df_pivoted.reset_index()\n# print(df_pivoted[:5])\n\ndf_pivoted.to_csv(\"Scraped-Data/df_pivoted.csv\")\nx = df_pivoted[cols]\ny = df_pivoted['Source']\n\n# # Trying out our classifier to learn diseases from the symptoms\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import train_test_split\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)\nmnb = MultinomialNB()\nmnb = mnb.fit(x_train, y_train)\nprint(mnb.score(x_test, y_test))\n# print(x_train, x_test, y_train, y_test)\n\n\n\n# x\n################# # # Inferences on train and test split# # ################\nmnb_tot = MultinomialNB()\nmnb_tot = mnb_tot.fit(x, y)\nmnb_tot.score(x, y)\n\ndisease_pred = mnb_tot.predict(x)\ndisease_real = y.values\nprint(y.values)\nfor i in range(0, len(disease_real)):\n if disease_pred[i]!=disease_real[i]:\n print ('Pred: {0} Actual:{1}'.format(disease_pred[i], disease_real[i]))\n else:\n print (\"xxxxxxxxxxxxxx\",'Pred: {0} Actual:{1}'.format(disease_pred[i], disease_real[i]))\n\n\n\n\n\n####################### # # Training a decision tree # # ###################\nfrom sklearn.tree import DecisionTreeClassifier\nprint (\"DecisionTree\")\ndt = DecisionTreeClassifier()\nclf_dt = dt.fit(x,y)\nprint (\"Acurracy: \", clf_dt.score(x,y))\n\n\n\n\n\nfrom sklearn import tree \n####################### # # # Analysis of the Manual data # # # ######################################\ndata = pd.read_csv(\"Manual-Data/Training.csv\")\ndf = pd.DataFrame(data)\n# The manual data contains approximately 4920 rows.\ncols = df.columns\ncols = cols[:-1]\nx = df[cols]\ny = df['prognosis']\n\n\n####################### # # # Training a decision tree# # # #######################\nfrom sklearn.tree import DecisionTreeClassifier\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)\nprint (\"DecisionTree\")\ndt = DecisionTreeClassifier()\nclf_dt=dt.fit(x_train,y_train)\nprint (\"Acurracy: \", clf_dt.score(x_test,y_test))\n\n\n# dt.__getstate__()\n\n#Finding the Feature importances\nimport numpy as np\nimportances = dt.feature_importances_\nindices = np.argsort(importances)[::-1]\n\n# # Print the feature ranking\nprint(\"Feature ranking:\")\nfeatures = cols\nfor f in range(10):\n print(\"%d. feature %d - %s (%f)\" % (f + 1, indices[f], features[indices[f]] ,importances[indices[f]]))\n\nfeature_dict = {}\nfor i,f in enumerate(features):\n feature_dict[f] = i\n\nprint(features)\n\nm = [0 for i in range(132)]\nm = np.matrix(m)\nfor sym in range(4):\n\n ans = input(\"input sympt from above values ONLY >>> \")\n a = feature_dict[ans]\n\n a = [1 if i == int(a) else 0 for i in range(len(features))]\n a = np.matrix(np.array(a).reshape(1,len(a)))\n m = m + a\nprint(m)\nprint(dt.predict(m))\n","sub_path":"disPred.py","file_name":"disPred.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"465760446","text":"from WordCleanProcess import WordClean\n\nclass invertedIndex:\n def __init__(self):\n # create an empty inverted index\n self.inverted_lists = {}\n self.word_occur = {}\n self.doc_occur = {}\n self.totalDcos = 0\n self.cleaner = WordClean()\n\n # read doc an fill words in index\n def readFromFile(self, file_name , fileFormat = True , strLine = \"\" ):\n\n # construct index from given file\n # read file\n if fileFormat :\n with open(file_name, encoding=\"utf8\") as file:\n for line in file:\n self.totalDcos += 1\n\n self.word_occur[self.totalDcos] = 0\n self.workOnLine(line, self.totalDcos)\n\n # read line of string (query input)\n else:\n self.totalDcos += 1\n self.word_occur[self.totalDcos] = 0\n self.workOnLine(strLine,self.totalDcos)\n\n\n\n\n def workOnLine(self , line , doc_id ):\n words = self.cleaner.Parse(line)\n\n # print(\"--> : {}\".format(words))\n for word in words:\n\n if (word != -1):\n # adding keyword (token) to Index ...\n if word not in self.inverted_lists:\n self.inverted_lists[word] = []\n self.doc_occur[word] = {}\n\n\n # don't repeat docs id\n if not len(self.inverted_lists[word]) or \\\n self.inverted_lists[word][-1] != doc_id:\n # first filling for doc with id (doc_id)\n self.inverted_lists[word].append(doc_id)\n self.doc_occur[word][doc_id] = 0\n\n # increase occurrence\n self.doc_occur[word][doc_id] += 1\n self.word_occur[doc_id] += 1\n # print(word)\n\n\n def clearIndex(self):\n self.inverted_lists = {}\n self.doc_occur = {}\n\n\n #present index\n def presentInvertedIndex(self):\n #open('Visual-Index-word-doc_ids.txt', 'w').close()\n #file = open('Visual-Index-word-doc_ids.txt','a+', encoding=\"utf8\")\n\n for word, ids in self.inverted_lists.items():\n #file.write(str(word) + \" => \" + str(ids) + \"\\n\")\n print('{} => {}'.format( word , ids ) )\n #file.close()\n\n\n # present keywords of index\n def presentKeywordsIndex(self):\n #open('Visual-index-all-terms.txt', 'w').close()\n #file = open('Visual-index-all-terms.txt', 'a+', encoding=\"utf8\")\n\n for key in self.inverted_lists.keys():\n #file.write(str(key) + \"\\n\")\n print(key)\n #file.close()\n\n #present index by (keyword , occurrence)\n def presentKeywordsOccur(self):\n #open('Visual-index-word-occurence.txt', 'w').close()\n #file = open('Visual-index-word-occurence.txt', 'a+', encoding=\"utf8\")\n\n for word, ids in self.inverted_lists.items():\n #print('{} => ['.format(word) , end=\" \" )\n #file.write(str(word) + \" => [ \")\n for id in ids :\n print( \"#{}({})\".format(id , self.doc_occur[word][id] ) , end=\", \" )\n #file.write(\"#\" + str(id) + \"(\" + str(self.doc_occur[word][id]) + \"), \")\n\n #print(\"]\")\n #file.write(\" ]\\n\")\n\n #file.close()\n\n\n # comparing key\n def getCmpKey(self , item):\n return item[0]\n\n # present index by (keyword , Total occurrence) ordered by occurrence\n def presentIndexOrderByOccur(self):\n #open('Visual-index-order-by-occur.txt', 'w').close()\n #file = open('Visual-index-order-by-occur.txt', 'a+', encoding=\"utf8\")\n\n List = []\n for word, ids in self.inverted_lists.items():\n total_occur = 0\n for id in ids :\n total_occur += self.doc_occur[word][id]\n\n List.append( ( total_occur , word ) )\n\n List.sort(key=self.getCmpKey , reverse=True)\n for occur , word in List :\n print('{} => {}'.format( word , occur) )\n #file.write(str(word) + \" => \" + str(occur) + \"\\n\" )\n\n\n #file.close()\n\n","sub_path":"invertedIndexProcess.py","file_name":"invertedIndexProcess.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"397312680","text":"import html\nimport pickle\nimport re\nimport sys\nimport urllib.parse\n\nfrom huya_login import HuyaDriver\n\n\ndef get_cookies():\n driver = HuyaDriver('520667')\n driver.colse()\n\n\ndef read_cookies():\n with open(\"./cookie/cookies.pkl\", \"rb\") as cookiefile:\n cookies = pickle.load(cookiefile)\n cookie = [item[\"name\"] + \"=\" + item[\"value\"] for item in cookies]\n cookiestr = ';'.join(item for item in cookie)\n for c in cookie:\n if 'yyuid' in c:\n yyuid = c.split('=')[1]\n print(cookie)\n print(yyuid)\n\n\ndef unescape(string):\n string = urllib.parse.unquote(string)\n quoted = html.unescape(string).encode(sys.getfilesystemencoding()).decode('utf-8')\n # 转成中文\n return re.sub(r'%u([a-fA-F0-9]{4}|[a-fA-F0-9]{2})', lambda m: chr(int(m.group(1), 16)), quoted)\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"217186880","text":"import FWCore.ParameterSet.Config as cms\n\nMCResolutions = cms.EDAnalyzer('MCResolutions',\n\tjetTag\t\t\t\t= cms.InputTag('patJetsAK5PF'),\n\tmuonTag\t\t\t\t= cms.InputTag('muons'),\n\tgenJetTag\t\t\t= cms.InputTag('ak5GenJets'),\n\tweightName\t\t\t= cms.InputTag('weightProducer','weight','PAT'),\n\tEBRecHits\t\t\t= cms.InputTag('ecalRecHit','EcalRecHitsEB'),\n\tEERecHits\t\t\t= cms.InputTag('ecalRecHit','EcalRecHitsEE'),\n\tjetMultPtCut = cms.double(50.),\n\tjetMultEtaCut = cms.double(2.5),\n\tdeltaPhiDiJet\t\t= cms.double(2.7),\n\tabsCut3rdJet\t\t= cms.double(30.),\n\trelCut3rdJet\t\t= cms.double(0.2),\n\tdeltaRMatch\t\t\t= cms.double(0.1),\n\tdeltaRMatchVeto\t= cms.double(0.7),\n\tabsPtVeto\t\t\t= cms.double(30.),\n\trelPtVeto\t\t\t= cms.double(0.05),\n\tdeltaRDeadECal\t\t= cms.double(0.2),\n\tGenJetPtCut\t\t\t= cms.double(0.),\n maskedEcalChannelStatusThreshold\t= cms.int32(13),\n fileName\t\t\t\t= cms.string('MCJetResolution.root'),\n)\n","sub_path":"RA2Classic/MCResolutions/python/mcresolutions_cfi.py","file_name":"mcresolutions_cfi.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"376439231","text":"import matplotlib.pyplot as plt \nimport numpy as np\n\nf=open(\"time.txt\")\nx= []\nfor i in range(1, 11, 1):\n x.append(10000*i)\nvalues = np.array([[float(x.split(' ')[0]), float(x.split(' ')[1])] for x in f] );\ny = values[:,0]\n\n\nplt.plot(x, y) \n\nplt.xlabel('Длина строки') \nplt.ylabel('Время') \nplt.grid(True)\nplt.show() \ny = values[:,1]\nplt.plot(x, y) \nplt.xlabel('Длина строки') \nplt.ylabel('Килобайт') \nplt.grid(True)\nplt.show() ","sub_path":"lab5/graphic.py","file_name":"graphic.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"190768904","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 26 14:33:11 2016\n\n@author: SamWitte\n\"\"\"\nimport os\nimport matplotlib \nmatplotlib.use('agg')\nimport argparse\nimport numpy as np\nfrom subhalo import *\nfrom multiprocessing import Process, Pool, cpu_count\nfrom parallel_map import *\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dmax', default=False)\nparser.add_argument('--nobs', default=False)\nparser.add_argument('--simname', default='sim1')\nparser.add_argument('--tag', default='')\nparser.add_argument('--pointlike', default=False)\nparser.add_argument('--mass', default=40, type=float)\nparser.add_argument('--cross_sec', default=np.log10(3.*10**-26.), type=float)\nparser.add_argument('--annih_prod', default='BB', type=str)\nparser.add_argument('--m_low', default=np.log10(3.24 * 10.**4.), type=float)\nparser.add_argument('--m_high', default=np.log10(1.0 * 10.**7.), type=float)\nparser.add_argument('--c_low', default=np.log10(5.), type=float)\nparser.add_argument('--c_high', default=2., type=float)\nparser.add_argument('--alpha', default=0.16, type=float)\nparser.add_argument('--profile', default=1, type=int)\nparser.add_argument('--truncate', default=False)\nparser.add_argument('--arxiv_num', default=10070438, type=int)\nparser.add_argument('--b_min', default=20., type=float)\nparser.add_argument('--m_num', default=25, type=int)\nparser.add_argument('--c_num', default=15, type=int)\nparser.add_argument('--thresh', default=7 * 10.**-10., type=float)\nparser.add_argument('--M200', default=False)\nparser.add_argument('--gamma', default=0.945, type=float)\nparser.add_argument('--stiff_rb', default=False)\nparser.add_argument('--path', default=os.environ['SUBHALO_MAIN_PATH'] + '/SubhaloDetection/')\n\nargs = parser.parse_args()\n\n\ndef str2bool(v):\n if type(v) == bool:\n return v\n elif type(v) == str:\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\ndef multi_run_wrapper(args):\n return pool_map_dmax_extend(*args)\n\n\ndef pool_map_dmax_extend(arg_p):\n mass_choice, m_low, m_high, pointlike, profile, \\\n truncate, arxiv_num, gamma, m200, mass, cross_sec, \\\n annih_prod = arg_p\n masslist = np.logspace(m_low, m_high, (m_high - m_low) * 6)\n kwargs = {'point_like': pointlike, 'm_low': np.log10(masslist[mass_choice]),\n 'm_high': 2 * np.log10(masslist[mass_choice]),\n 'profile': profile, 'truncate': truncate, 'arxiv_num': arxiv_num,\n 'gam': gamma, 'm200': m200, 'mltag': m_low}\n Observable(mass, cross_sec, annih_prod, **kwargs).Table_Dmax(plike=pointlike)\n return\n\ndmax = str2bool(args.dmax)\nnobs = str2bool(args.nobs)\npointlike = str2bool(args.pointlike)\ntruncate = str2bool(args.truncate)\nm200 = str2bool(args.M200)\nstiff_rb = str2bool(args.stiff_rb)\n\nProfile_list = [\"Einasto\", \"NFW\", \"HW\"]\npf = Profile_list[args.profile]\nif pointlike:\n plike_tag = '_Pointlike'\nelse:\n plike_tag = '_Extended'\n\nif args.profile < 2:\n extra_tag = '_Truncate_' + str(args.truncate) + '_Cparam_' + str(args.arxiv_num) +\\\n '_alpha_' + str(args.alpha)\nelse:\n extra_tag = '_Gamma_{:.3f}_Stiff_rb_'.format(args.gamma) + str(stiff_rb)\n\nsimga_n_file = pf + '_mx_' + str(args.mass) + '_annih_prod_' + args.annih_prod + '_bmin_' +\\\n str(args.b_min) + plike_tag + extra_tag + '_Mlow_{:.3f}'.format(args.m_low) +\\\n args.tag + '.dat'\n\nnobs_dir = \"/Cross_v_Nobs/\"\n\nBuild_obs_class = Observable(args.mass, args.cross_sec, args.annih_prod, m_low=args.m_low, \n m_high=args.m_high, c_low=args.c_low,\n c_high=args.c_high, alpha=args.alpha, profile=args.profile, truncate=truncate,\n arxiv_num=args.arxiv_num, point_like=pointlike, gam=args.gamma,\n stiff_rb=stiff_rb, m200=m200)\n\nif dmax:\n if pointlike:\n Build_obs_class.Table_Dmax(m_num=args.m_num, c_num=args.c_num,\n threshold=args.thresh)\n else:\n num = int((args.m_high - args.m_low) * 6)\n masslist = np.logspace(args.m_low, args.m_high, (args.m_high - args.m_low) * 6)\n arg_pass = []\n for i in range(masslist.size):\n arg_hold = [i, args.m_low, args.m_high, pointlike, args.profile,\n truncate, args.arxiv_num, args.gamma, m200,\n args.mass, args.cross_sec, args.annih_prod]\n arg_pass.append(arg_hold)\n processes = 6\n runs = int(float(len(masslist)) / processes + 0.5)\n j = 0\n for i in range(runs):\n parmap(pool_map_dmax_extend, arg_pass[j:np.min([j+processes, len(masslist)])], processes=processes)\n j += processes\n \nif nobs:\n if pointlike:\n n_point_obs = Build_obs_class.N_obs(args.b_min)\n if os.path.isfile(args.path + '/Data/' + nobs_dir + simga_n_file):\n cross_sec_nobs = np.loadtxt(args.path + '/Data/' + nobs_dir + simga_n_file)\n add_to_table = np.vstack((cross_sec_nobs, [args.cross_sec, n_point_obs]))\n save_tab = add_to_table[np.lexsort(np.fliplr(add_to_table).T)]\n np.savetxt(args.path + '/Data/' + nobs_dir + simga_n_file, save_tab)\n else:\n np.savetxt(args.path + '/Data/' + nobs_dir + simga_n_file, np.array([args.cross_sec, n_point_obs]))\n else:\n n_ext_obs = Build_obs_class.N_obs(args.b_min, plike=pointlike)\n if os.path.isfile(args.path + '/Data/' + nobs_dir + simga_n_file):\n cross_sec_nobs = np.loadtxt(args.path + '/Data/' + nobs_dir + simga_n_file)\n add_to_table = np.vstack((cross_sec_nobs, [args.cross_sec, n_ext_obs]))\n save_tab = add_to_table[np.lexsort(np.fliplr(add_to_table).T)]\n np.savetxt(args.path + '/Data/' + nobs_dir + simga_n_file, save_tab)\n else:\n np.savetxt(args.path + '/Data/' + nobs_dir + simga_n_file, np.array([args.cross_sec, n_ext_obs]))\n","sub_path":"Subhalo_runner.py","file_name":"Subhalo_runner.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"5998895","text":"# helper library for rowinguard\nfrom datetime import datetime\nfrom multiprocessing import Process\nimport RPi.GPIO as GPIO\nfrom time import sleep\nimport logging\nfrom devices import EMG, MAX30102\n\nclass Rowinguard:\n\n\tdef __init__(self, interrupt, buzz):\n\t\tlogging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] [%(name)s] [%(levelname)s]: %(message)s')\n\t\tlogger = logging.getLogger('ROWINGLIB')\n\t\tself.logger = logger\n\t\tself.emg_file = self.generate_name('emg')\n\t\tself.vitals_file = self.generate_name('vitals')\n\t\tself.interrupt = interrupt\n\t\tGPIO.setmode(GPIO.BOARD)\n\t\tGPIO.setup(self.interrupt, GPIO.IN)\n\t\tGPIO.setup(buzz, GPIO.OUT)\n\t\tGPIO.output(buzz, GPIO.LOW)\n\t\tself.buzz = buzz\n\n\n\tdef generate_name(self, dataform):\n\t\tif dataform == 'emg' or dataform == 'vitals':\n\t\t\tdate = datetime.now().strftime('%Y_%m_%d-%I_%M_%S_%p_')\n\t\t\tfilename = date + dataform + '.csv'\n\t\t\treturn filename\n\t\telse:\n\t\t\tself.logger.error('invalid file type provided')\n\n\tdef start_peripherals(self, args):\n\t\tprocesses = list()\n\t\t# create a process for each target function and start it\n\t\tfor arg in args:\n\t\t\tprocess = Process(target=arg[0], args=arg[1])\n\t\t\tprocess.start()\n\t\t\tprocesses.append(process)\n\t\t# join the processes\n\t\tself.logger.debug('all processes started')\n\t\treturn processes\n\n\n\tdef alert(self, _type):\n\t\tbuzzer = GPIO.PWM(self.buzz, 1000)\n\t\tif _type == 'form':\n\t\t\tself.logger.debug('starting form alert')\n\t\t\tbuzzer.start(2)\n\t\t\tsleep(1)\n\t\t\tbuzzer.stop()\n\t\t\tsleep(1)\n\t\telif _type == 'fatigue':\n\t\t\tself.logger.debug('starting fatigue alert')\n\t\t\tbuzzer.start(2)\n\t\t\tsleep(.5)\n\t\t\tbuzzer.stop()\n\t\t\tsleep(.5)\n\t\telse:\n\t\t\tself.logger.warning('invalid alert type')\n\n\tdef start_workout(self):\n\t\t# wait for interupt here\n\t\tGPIO.wait_for_edge(self.interrupt,\tGPIO.RISING)\n\t\tself.logger.info('rising edge detected')\n\t\temg = EMG(0)\n\t\tvitals = MAX30102()\n\t\treturn emg, vitals\n\n\tdef end_workout(self):\n\t\t# wait for interupt\n\t\tGPIO.wait_for_edge(self.interrupt,\tGPIO.FALLING)\n\t\tself.logger.info('falling edge detected')\n\t\t\n\n\n\n","sub_path":"rowinglib.py","file_name":"rowinglib.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"443920276","text":"import os\nimport json\nimport random\nimport math\n\nROTATE_LEFT = \"rotate-left\"\nROTATE_RIGHT = \"rotate-right\"\nADVANCE = \"advance\"\nRETREAT = \"retreat\"\nSHOOT = \"shoot\"\nPASS = \"pass\"\n\nMOVE_UP = {\"top\" : ADVANCE, \"bottom\" : ROTATE_LEFT, \"right\" : ROTATE_LEFT ,\"left\" : ROTATE_RIGHT }\nMOVE_DOWN = {\"top\" : ROTATE_LEFT, \"bottom\" : ADVANCE, \"right\" : ROTATE_RIGHT ,\"left\" : ROTATE_LEFT }\nMOVE_RIGHT = {\"top\" : ROTATE_RIGHT, \"bottom\" : ROTATE_LEFT, \"right\" : ADVANCE ,\"left\" : ROTATE_LEFT }\nMOVE_LEFT = {\"top\" : ROTATE_LEFT, \"bottom\" : ROTATE_RIGHT, \"right\" : ROTATE_RIGHT,\"left\" : ADVANCE }\n\ndef doesCellContainWall(walls, x, y):\n for wall in walls:\n if wall[\"x\"] == x and wall[\"y\"] == y:\n return True\n return False\n\ndef wallInFrontOfPenguin(body):\n xValueToCheckForWall = body[\"you\"][\"x\"]\n yValueToCheckForWall = body[\"you\"][\"y\"]\n bodyDirection = body[\"you\"][\"direction\"]\n\n if bodyDirection == \"top\":\n yValueToCheckForWall -= 1\n elif bodyDirection == \"bottom\":\n yValueToCheckForWall += 1\n elif bodyDirection == \"left\":\n xValueToCheckForWall -= 1\n elif bodyDirection == \"right\":\n xValueToCheckForWall += 1\n return doesCellContainWall(body[\"walls\"], xValueToCheckForWall, yValueToCheckForWall)\n\ndef moveInRandom():\n return \"hello World\"\n \ndef moveTowardsPoint(body, pointX, pointY):\n penguinPositionX = body[\"you\"][\"x\"]\n penguinPositionY = body[\"you\"][\"y\"]\n plannedAction = PASS\n bodyDirection = body[\"you\"][\"direction\"]\n\n if penguinPositionX < pointX:\n plannedAction = MOVE_RIGHT[bodyDirection]\n elif penguinPositionX > pointX:\n plannedAction = MOVE_LEFT[bodyDirection]\n elif penguinPositionY < pointY:\n plannedAction = MOVE_DOWN[bodyDirection]\n elif penguinPositionY > pointY:\n plannedAction = MOVE_UP[bodyDirection]\n\n if plannedAction == ADVANCE and wallInFrontOfPenguin(body):\n plannedAction = SHOOT\n return plannedAction\n\ndef moveTowardsCenterOfMap(body):\n centerPointX = math.floor(body[\"mapWidth\"] / 2)\n centerPointY = math.floor(body[\"mapHeight\"] / 2)\n return moveTowardsPoint(body, centerPointX, centerPointY)\n\n# Check for enemy\ndef checkEnemy(body):\n try:\n body[\"enemies\"][0][\"x\"]\n return True\n except KeyError:\n return False \n\n# Move towards enemy\ndef moveTowardsEnemy(body):\n if checkEnemy(body):\n enemy = body[\"enemies\"][0]\n return moveTowardsPoint(body, enemy[\"x\"], enemy[\"y\"])\n else: \n return moveTowardsPower(body)\n \ndef checkForBonus(body):\n power = body[\"bonusTiles\"]\n if len(power) < 1:\n return False\n else:\n return True\n\ndef moveTowardsPower(body):\n if checkForBonus(body):\n power = body[\"bonusTiles\"]\n powerList = 10000000000\n best_x = power[0][\"x\"]\n best_y = power[0][\"y\"]\n for i in power:\n temp = getMagnitude(body, i[\"x\"], i[\"y\"])\n if temp < powerList:\n powerList = temp\n return moveTowardsPoint(body, best_x, best_y)\n else: \n return moveAround(body)\n\ndef moveAround(body):\n return moveTowardsCenterOfMap(body)\n\ndef getMagnitude(body, x, y):\n you_x = body[\"you\"][\"x\"]\n you_y = body[\"you\"][\"y\"]\n vector_y = y - you_y\n vector_x = x - you_x\n magnitude = math.sqrt(vector_x**2 + vector_y**2)\n return magnitude\n\ndef chooseAction(body):\n action = moveTowardsEnemy(body)\n return action\n\nenv = os.environ\nreq_params_query = env['REQ_PARAMS_QUERY']\nresponseBody = open(env['res'], 'w')\n\nresponse = {}\nreturnObject = {}\nif req_params_query == \"info\":\n returnObject[\"name\"] = \"Pingu\"\n returnObject[\"team\"] = \"Team Python\"\nelif req_params_query == \"command\": \n body = json.loads(open(env[\"req\"], \"r\").read())\n returnObject[\"command\"] = chooseAction(body)\n\nresponse[\"body\"] = returnObject\nresponseBody.write(json.dumps(response))\nresponseBody.close()","sub_path":"MyFirstPenguin/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"579774903","text":"import argparse\nimport datetime\nimport os\nimport time\nimport numpy as np\nimport tensorflow as tf\n\nfrom numpy.distutils.fcompiler import str2bool\n\nimport data_deal\nimport word2vec_helpers\nfrom Model2 import TextCNN\n\nparser = argparse.ArgumentParser(description='LSTM for Classify')\nparser.add_argument('--dev_sample_percentage', type=float, default=.1, help='Percentage of the training data to use for validation')\nparser.add_argument('--positive_data_file', type=str, default='data/ham_100.utf8', help='train data source')\nparser.add_argument('--negative_data_file', type=str, default='data/spam_100.utf8', help='test data source')\nparser.add_argument('--num_classes', type=int, default='2', help='label')\nparser.add_argument('--sequence_length', type=int, default='96', help='length')\nparser.add_argument('--embedding_size', type=int, default=128, help='#sample of each minibatch')\nparser.add_argument('--filter_sizes', type=str, default= \"3,4,5\", help='#')\nparser.add_argument('--num_filters', type=int, default=128, help='#dim of hidden state')\nparser.add_argument('--dropout_keep_prob', type=float, default=0.5, help='dropout keep_prob')\n\nparser.add_argument('--l2_reg_lambda', type=float, default=0.0, help='L2 regularization lambda (default: 0.0)')\nparser.add_argument('--batch_size', type=int, default=64, help='Batch Size (default: 64)')\nparser.add_argument('--num_epochs', type=int, default=200, help='Number of training epochs (default: 200)')\nparser.add_argument('--evaluate_every', type=int, default=100, help='Evalue model on dev set after this many steps (default: 100)')\nparser.add_argument('--checkpoint_every', type=int, default=100, help='Save model after this many steps (defult: 100)')\nparser.add_argument('--num_checkpoints', type=int, default=5, help='Number of checkpoints to store (default: 5)')\n\nparser.add_argument('--allow_soft_placement', type=str2bool, default=True, help='Allow device soft device placement')\n\nparser.add_argument('--log_device_placement', type=str2bool, default=False, help='Allow device soft device placement')\n\nparser.add_argument('--save_model', type=str, default='best_model', help='train data source')\nargs = parser.parse_args()\n\ntimestamp = str(int(time.time()))\nout_dir = os.path.join(os.path.curdir, args.save_model, timestamp)\nprint(\"Writing to {}\\n\".format(out_dir))\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n# Load data\nprint(\"Loading data...\")\npositive_data_file = os.path.join('.', args.positive_data_file)\nnegative_data_file=os.path.join('.', args.negative_data_file)\n# print(positive_data_file)\n\nx_text, y = data_deal.load_positive_negative_data_files(positive_data_file, negative_data_file)\nprint(x_text)\n\nsentences, max_document_length = data_deal.padding_sentences(x_text, '')\nx = np.array(word2vec_helpers.embedding_sentences(sentences, embedding_size = args.embedding_size, file_to_save = os.path.join(out_dir, 'trained_word2vec.model')))\n\nprint(\"x.shape = {}\".format(x.shape))\nprint(\"y.shape = {}\".format(y.shape))\n\n# # Save params\ntraining_params_file = os.path.join(out_dir, 'training_params.pickle')\nparams = {'num_classes' : args.num_classes, 'max_document_length' : max_document_length}\ndata_deal.saveDict(params, training_params_file)\n\n# Shuffle data randomly\nnp.random.seed(10)\nshuffle_indices = np.random.permutation(np.arange(len(y)))\nx_shuffled = x[shuffle_indices]\ny_shuffled = y[shuffle_indices]\n\n# Split train/test set\n# TODO: This is very crude, should use cross-validation\ndev_sample_index = -1 * int(args.dev_sample_percentage * float(len(y)))\nx_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]\ny_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]\nprint(\"Train/Dev split: {:d}/{:d}\".format(len(y_train), len(y_dev)))\n\n# Training\n# =======================================================\nwith tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement = args.allow_soft_placement,\n\tlog_device_placement = args.log_device_placement)\n sess = tf.Session(config = session_conf)\n with sess.as_default():\n # cnn = TextCNN(\n\t # sequence_length = x_train.shape[1],\n\t # num_classes = y_train.shape[1],\n\t # embedding_size = args.embedding_dim,\n\t # filter_sizes = list(map(int, args.filter_sizes.split(\",\"))),\n\t # num_filters = args.num_filters,\n\t # l2_reg_lambda = args.l2_reg_lambda)\n cnn=TextCNN(args)\n\n\t# Define Training procedure\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(1e-3)\n grads_and_vars = optimizer.compute_gradients(cnn.loss)\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n # Keep track of gradient values and sparsity (optional)\n grad_summaries = []\n for g, v in grads_and_vars:\n if g is not None:\n grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name), g)\n sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n grad_summaries.append(grad_hist_summary)\n grad_summaries.append(sparsity_summary)\n grad_summaries_merged = tf.summary.merge(grad_summaries)\n\n # Output directory for models and summaries\n print(\"Writing to {}\\n\".format(out_dir))\n\n # Summaries for loss and accuracy\n loss_summary = tf.summary.scalar(\"loss\", cnn.loss)\n acc_summary = tf.summary.scalar(\"accuracy\", cnn.accuracy)\n\n # Train Summaries\n train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n # Dev summaries\n dev_summary_op = tf.summary.merge([loss_summary, acc_summary])\n dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=args.num_checkpoints)\n\n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n\n def train_step(x_batch, y_batch):\n \"\"\"\n A single training step\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: args.dropout_keep_prob\n }\n # print('+++++++++++++++++++++++',x_batch)\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)\n\n def dev_step(x_batch, y_batch, writer=None):\n \"\"\"\n Evaluates model on a dev set\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: 1.0\n }\n step, summaries, loss, accuracy = sess.run(\n [global_step, dev_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n if writer:\n writer.add_summary(summaries, step)\n\n # Generate batches\n batches = data_deal.batch_iter(\n list(zip(x_train, y_train)), args.batch_size, args.num_epochs)\n\n # Training loop. For each batch...\n for batch in batches:\n # print(batch)\n x_batch, y_batch = zip(*batch)\n # print('11111',type(x_batch))\n train_step(x_batch, y_batch)\n current_step = tf.train.global_step(sess, global_step)\n if current_step % args.evaluate_every == 0:\n print(\"\\nEvaluation:\")\n dev_step(x_dev, y_dev, writer=dev_summary_writer)\n print(\"\")\n if current_step % args.checkpoint_every == 0:\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))\n\n\n\n\n# # print(x_text)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"236450363","text":"import os\nimport sys\npath = os.environ.get('TRAVIS_BUILD_DIR')\nsys.path.insert(0, path+'/protlearn')\nimport numpy as np\n\nfrom preprocessing import txt_to_df\nfrom feature_engineering import aaindex3\n\n\ndef test_aaindex3():\n \"Test AAIndex3\"\n \n # load data\n df = txt_to_df(path+'/tests/docs/test_seq.txt', 0)\n \n # get aaindex2\n aaind3 = aaindex3(df)\n \n # test shape\n assert aaind3.shape == (4, 43)\n \n # test some triangular indices\n TANS760101 = np.array([-4.72, -5.975, -4.18333, -4.04])\n GODA950101 = np.array([np.nan, -.05, -.1333, -.14])\n ZHAC000106 = np.array([.196, -.34875, .46666, .972])\n np.testing.assert_equal(np.round(aaind3['TANS760101'], 3),\\\n np.round(TANS760101, 3))\n # this column contains NaNs\n assert ('GODA950101' in aaind3) == False\n np.testing.assert_equal(np.round(aaind3['ZHAC000106'], 3),\\\n np.round(ZHAC000106, 3))\n \n # test some square indices\n ZHAC000102 = np.array([-.408, -1.415, .475, 1.532])\n ZHAC000103 = np.array([-.052, -.625, .59166, 1.096])\n ZHAC000105 = np.array([-.242, -.72, .17, .952])\n np.testing.assert_equal(np.round(aaind3['ZHAC000102'], 3),\\\n np.round(ZHAC000102, 3))\n np.testing.assert_equal(np.round(aaind3['ZHAC000103'], 3),\\\n np.round(ZHAC000103, 3))\n np.testing.assert_equal(np.round(aaind3['ZHAC000105'], 3),\\\n np.round(ZHAC000105, 3))\n \n # test standardization (zscore)\n aaind3_z = aaindex3(df, 'zscore')\n # test mean = 0\n for i in range(aaind3_z.shape[0]):\n assert abs(round(aaind3_z.iloc[:,1].mean())) == 0\n # test std --> 1\n for i in range(aaind3_z.shape[0]):\n assert round(aaind3_z.iloc[:,i].std(), 1) ==\\\n round(aaind3_z.iloc[:,0].std(), 1)\n \n # test standardization (minmax)\n aaind3_mm = aaindex3(df, 'minmax')\n # test minimum and maximum\n for i in range(aaind3_mm.shape[0]):\n assert round(aaind3_mm.iloc[:,i].min()) == 0\n assert round(aaind3_mm.iloc[:,i].max()) == 1","sub_path":"tests/test_aaindex3.py","file_name":"test_aaindex3.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"409122810","text":"\r\nfrom time import sleep\r\n\r\nfrom base.base_page import BasePage\r\n\r\n\r\nclass EcjiaGoogactivityPage(BasePage):\r\n LOGIN_NAME = 'i,username'\r\n LOGIN_PWD = 'i,password'\r\n LOGIN_BUTTON = 'x,//*[@id=\"login_form\"]/div[3]/button'\r\n\r\n ACTIVITY_ADMIN = 'x,//*[@id=\"side_accordion\"]/div[4]/div[1]/a'\r\n GOODS_ACTIVITY = 'x,//*[@id=\"collapse2\"]/div/ul/li[1]/a'\r\n EDIT_ADD = 'x,//*[@id=\"contentwrapper\"]/div/div[3]/div/table/tbody/tr[1]/td[3]/div/a[1]'\r\n GOODS_KEYWORDS = 'x,//*[@id=\"contentwrapper\"]/div/div[3]/div/form/fieldset/div[1]/div/input[1]'\r\n GOODS_SEARCH = 'x,//*[@id=\"contentwrapper\"]/div/div[3]/div/form/fieldset/div[1]/div/input[3]'\r\n\r\n SELECT_GOODSACTIVITY = 'x,//*[@id=\"contentwrapper\"]/div/div[3]/div/form/fieldset/div[2]/div/div'\r\n SELECT_CL = 'x,//*[@id=\"contentwrapper\"]/div/div[3]/div/form/fieldset/div[2]/div/div/div/ul/li[%s]'\r\n\r\n START_TIME = 'x,//*[@id=\"contentwrapper\"]/div/div[3]/div/form/fieldset/div[3]/div/input'\r\n\r\n END_TIME = 'x,//*[@id=\"contentwrapper\"]/div/div[3]/div/form/fieldset/div[4]/div/input'\r\n ACTIVITY_PRICE = 'x,//*[@id=\"contentwrapper\"]/div/div[3]/div/form/fieldset/div[5]/div/input'\r\n GOODS_UPDATE = 'x,//*[@id=\"contentwrapper\"]/div/div[3]/div/form/fieldset/div[6]/div/input[1]'\r\n\r\n EDIT_SUC = 'x,//*[@id=\"contentwrapper\"]/div/div[2]'\r\n def login(self, name, pwd):\r\n driver = self.base_driver\r\n sleep(2)\r\n driver.type(self.LOGIN_NAME, name)\r\n driver.type(self.LOGIN_PWD, pwd)\r\n driver.click(self.LOGIN_BUTTON)\r\n\r\n def add_goodsactivity(self, login_data):\r\n driver = self.base_driver\r\n driver.click(self.ACTIVITY_ADMIN)\r\n sleep(2)\r\n driver.click(self.GOODS_ACTIVITY)\r\n sleep(2)\r\n driver.click(self.EDIT_ADD)\r\n sleep(2)\r\n driver.type(self.GOODS_KEYWORDS, login_data['keywords'])\r\n sleep(2)\r\n driver.click(self.GOODS_SEARCH)\r\n sleep(2)\r\n driver.click(self.SELECT_GOODSACTIVITY)\r\n sleep(2)\r\n self.select(login_data['num'])\r\n sleep(2)\r\n driver.type(self.START_TIME, login_data['start_time'])\r\n sleep(2)\r\n driver.click(self.START_TIME)\r\n sleep(2)\r\n driver.type(self.END_TIME, login_data['end_time'])\r\n sleep(2)\r\n driver.click(self.END_TIME)\r\n sleep(2)\r\n driver.type(self.ACTIVITY_PRICE, login_data['price'])\r\n driver.click(self.GOODS_UPDATE)\r\n driver.click(self.ACTIVITY_ADMIN)\r\n\r\n\r\n\r\n\r\n def select(self, num):\r\n '''\r\n 选择分类\r\n :return: \r\n '''\r\n driver = self.base_driver\r\n driver.click(self.SELECT_CL % (num))\r\n","sub_path":"01_ECO2O/src/ecjia_auto/pages/admincp/promotion/promotion_goods_activity_page.py","file_name":"promotion_goods_activity_page.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"332901498","text":"alpha1 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n\r\nalpha2 = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K','L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y','Z']\r\n\r\ndef alphabet_position(letter): \r\n if letter in alpha1:\r\n letter_ord = alpha1.index(letter) \r\n elif letter in alpha2:\r\n letter_ord = alpha2.index(letter) \r\n else:\r\n return letter\r\n return letter_ord\r\n\r\ndef rotate_character(char,rot):\r\n if char in alpha1:\r\n char_idx = alphabet_position(char)\r\n new_pos = (char_idx + int(rot))%26\r\n new_letter = alpha1[new_pos]\r\n elif char in alpha2:\r\n char_idx = alphabet_position(char)\r\n new_pos = (char_idx + int(rot))%26\r\n new_letter = alpha2[new_pos]\r\n\r\n else:\r\n return char\r\n return new_letter","sub_path":"crypto/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"443930523","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nStandard forms\n\"\"\"\n\nfrom flask import render_template, request, Markup, abort, flash, redirect, json, escape, url_for\nimport flask.ext.wtf as wtf\nfrom coaster import sanitize_html\n\n\nclass RichText(wtf.TextArea):\n \"\"\"\n Rich text widget.\n \"\"\"\n def __call__(self, field, **kwargs):\n c = kwargs.pop('class', '') or kwargs.pop('class_', '')\n if c:\n kwargs['class'] = u'%s %s' % ('richtext', c)\n else:\n kwargs['class'] = 'richtext'\n return super(RichText, self).__call__(field, **kwargs)\n\n\nclass SubmitInput(wtf.SubmitInput):\n \"\"\"\n Submit input with pre-defined classes.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.css_class = kwargs.pop('class', '') or kwargs.pop('class_', '')\n super(SubmitInput, self).__init__(*args, **kwargs)\n\n def __call__(self, field, **kwargs):\n c = kwargs.pop('class', '') or kwargs.pop('class_', '')\n kwargs['class'] = u'%s %s' % (self.css_class, c)\n return super(SubmitInput, self).__call__(field, **kwargs)\n\n\nclass RichTextField(wtf.TextAreaField):\n \"\"\"\n Rich text field.\n \"\"\"\n widget = RichText()\n\n # TODO: Accept valid_tags as a init parameter\n\n def process_formdata(self, valuelist):\n super(RichTextField, self).process_formdata(valuelist)\n # Sanitize data\n self.data = sanitize_html(self.data)\n\n\nclass Form(wtf.Form):\n \"\"\"\n Form with additional methods.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(Form, self).__init__(*args, **kwargs)\n # Make editing objects easier\n self.edit_obj = kwargs.get('obj')\n\n\nclass ConfirmDeleteForm(Form):\n \"\"\"\n Confirm a delete operation\n \"\"\"\n # The labels on these widgets are not used. See delete.html.\n delete = wtf.SubmitField(u\"Delete\")\n cancel = wtf.SubmitField(u\"Cancel\")\n\n\ndef render_form(form, title, message='', formid='form', submit=u\"Submit\", cancel_url=None, ajax=False):\n if request.is_xhr and ajax:\n return render_template('baseframe/ajaxform.html', form=form, title=title,\n message=message, formid=formid, submit=submit,\n cancel_url=cancel_url)\n else:\n return render_template('baseframe/autoform.html', form=form, title=title,\n message=message, formid=formid, submit=submit,\n cancel_url=cancel_url, ajax=ajax)\n\n\ndef render_message(title, message):\n if request.is_xhr:\n return Markup(\"%s
\" % escape(message))\n else:\n return render_template('baseframe/message.html', title=title, message=message)\n\n\ndef render_redirect(url, code=302):\n if request.is_xhr:\n return render_template('baseframe/redirect.html', quoted_url=Markup(json.dumps(url)))\n else:\n return redirect(url, code=code)\n\n\ndef render_delete_sqla(ob, db, title, message, success=u'', next=None):\n if not ob:\n abort(404)\n form = ConfirmDeleteForm()\n if form.validate_on_submit():\n if 'delete' in request.form:\n db.session.delete(ob)\n db.session.commit()\n if success:\n flash(success, \"success\")\n return render_redirect(next or url_for('index'))\n return render_template('baseframe/delete.html', form=form, title=title, message=message)\n","sub_path":"baseframe/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"625058217","text":"from django.shortcuts import render\nfrom .form import KendalaForm\nfrom .models import Kendala\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\n# Create your views here.\n\ndef index(request):\n \n datas = Kendala.objects.all()\n\n return render(request, 'index.html',{'datas':datas})\n\ndef daftar(request):\n if request.method == 'POST':\n form = KendalaForm(request.POST)\n if form.is_valid:\n post = form.save(commit=False)\n post.save()\n\n return HttpResponseRedirect(reverse('kendala:list'))\n else:\n form = KendalaForm()\n\n return render(request, 'covid19_apps/daftar.html')","sub_path":"covid19_apps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"455930884","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 28 21:28:53 2017\n\n@author: leblanckh\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\ndataFolder = \"/Users/leblanckh/data/KO_WT_OpenField_RawData\"\nresultsColumns = [\"Subject\", \"Group\", \"Gender\", \"Timestamp\", \"Notes\", \"Time in center (%)\", \"Time in center while moving (%)\", \"Number of movements\", \"Average duration of movements (s)\", \"Total time moving (s)\", \"Speed while moving (cm/s)\", \"Average Velocity (cm/s)\", \"Number of movements in center\"]\nmyDataList = []\nos.chdir(dataFolder)\nFileList = os.listdir(dataFolder)\n\nNO_BASELINE_10MIN_SESSION_TRIAL_LENGTH = 601\nONE_MINUTE_AS_FRAMES = 1798\n\ndef Extract_Header_Info(dataframe):\n \"\"\"\n Identifies the length of the header,Extracts independent variable \n information from the header, and assigns the column names to the row that\n contains the column information in the header\n \n inputs: dataframe\n Returns: NumberofHeaderRows, Subject, Genotype, Gender, Timestamp, Notes, \n \"\"\"\n NumberofHeaderRows = int(dataframe.iloc[0,1])\n Header = dataframe.iloc[31:NumberofHeaderRows - 3,:]\n SubjectFilters = [\"Subject\", \"Mouse\",\"subject\", \"mouse\"]\n Subject = Header[Header[0].isin(SubjectFilters)].iloc[0,1]\n GenotypeFilters = [\"Genotype\", \"Group\", \"genotype\", \"group\", \"genotype/group\"]\n Genotype = Header[Header[0].isin(GenotypeFilters)].iloc[0,1]\n GenderFilters = [\"Sex\", \"Gender\", \"sex\", \"gender\"]\n Gender = Header[Header[0].isin(GenderFilters)].iloc[0,1]\n TimestampFilters = [\"Timestamp\", \"timestamp\"]\n Timestamp = Header[Header[0].isin(TimestampFilters)].iloc[0,1]\n NotesFilters = [\"Notes\", \"notes\"]\n Notes = Header[Header[0].isin(NotesFilters)].iloc[0,1]\n #print (\"Subject:\",Subject, \"Genotype:\", Genotype,\"Gender:\", Gender,\"Timestamp:\", Timestamp, \"Notes\", Notes)\n ColumnNames = dataframe.iloc[[NumberofHeaderRows - 2],:].values.tolist()\n dataframe.columns = ColumnNames\n return NumberofHeaderRows,Subject,Genotype,Gender,Timestamp,Notes\n\ndef Create_DataBlock(dataframe, cutoffTimeSeconds, shortTimeIdx, longTimeIdx):\n \"\"\"\n Creates a dataFrame, theData, that contains the actual raw data values.\n It also replaces missing values in the movement column of data with NaN, then interpolates\n returns: theData, movement column data (isMovingData), LED on column (LEDon)\n \"\"\"\n finalColumnName = dataframe.columns[-1]\n initialColumnName = dataframe.columns[0]\n\n if dataframe['Trial time'].iloc[-1] = 0.5] = 1\n binaryDataColumn[binaryDataColumn < 0.5] = 0\n \n return binaryDataColumn\n\ndef Binary_Data_Transition_Point_Finder(binaryDataColumn):\n \"\"\"\n Uses a diff method to find when 0 changes to 1 and vice versa in binary data column\n Defines these transitions as either the starting point or ending point of an action or state, respectively\n and sends that index value to a list\n Also sets the first and last value of the column to 0 for edge case handling\n \n input: binaryDataColumn\n Returns: a list of binaryDataStartPoints and binaryDataEndPoints\n \"\"\"\n binaryDataColumn.iat[0] = 0\n binaryDataColumn.iat[-1] = 0\n binaryDataTrans = binaryDataColumn.diff()\n binaryDataStartingPoints = binaryDataTrans[binaryDataTrans == 1].index.tolist()\n binaryDataEndingPoints = binaryDataTrans[binaryDataTrans == -1].index.tolist()\n return binaryDataStartingPoints, binaryDataEndingPoints\n \ndef Movement_Analysis(InZoneColumn,VelocityColumn,MoveStart,MoveEnd):\n \"\"\"\n Sets up the initial conditions, loops over the movement data and counts movement blocks,\n duration and velocity,identifies movements into the center, and runs all of the calculations\n \n inputs: inZoneColumn, Velocity Column, and the starting and ending points of the moving column\n returns: PercentTimeinCenter, PercentTimeinCenterMoving,MovementBlocks, AvgMoveDuration, \n TotalMoveDuration, AvgVelocityMoving, AvgVelocity,CenterTrue\n \"\"\"\n CenterTrue = 0\n inCenterCutOff = 15\n FramesCenterMoving = 0\n MovementBlocks = 0\n TotalMoveDuration = 0\n TotalVelocity = 0\n TotalFramesMoving = 0\n \n #loop over the movement data and identify if mouse enters Center\n if len(MoveStart) != len(MoveEnd):\n print (\"Uneven start and end pairs. There are\", (len(MoveStart)), \"starting points and\" , (len(MoveEnd)) ,\"MoveEnd\" )\n for i in range(len(MoveStart)):\n currentStart = MoveStart[i]\n currentEnd = MoveEnd[i] - 1\n #NOTE: MoveEnd are one frame beyond the end of the movement. To correct for this, I have adjusted the current end point back one frame.\n MovementBlocks += 1\n MovementDuration = (currentEnd-currentStart)/30\n TotalMoveDuration = TotalMoveDuration + MovementDuration\n FramesMoving = (currentEnd-currentStart)\n TotalFramesMoving = TotalFramesMoving + FramesMoving\n MoveVelocity = sum (Velocity.loc[currentStart:currentEnd])\n TotalVelocity = TotalVelocity + MoveVelocity\n isCenterSpan = isInCenter[currentStart:currentEnd]\n numCenter = len(isCenterSpan[isCenterSpan == 1])\n if numCenter > inCenterCutOff:\n CenterTrue +=1\n FramesCenterMoving = FramesCenterMoving + numCenter\n \n PercentTimeinCenterMoving = FramesCenterMoving/len(isInCenter)*100\n AvgMoveDuration = TotalMoveDuration/MovementBlocks\n AvgVelocityMoving = TotalVelocity/TotalFramesMoving\n PercentTimeinCenter = sum (isInCenter[isInCenter == 1])/len(isInCenter)*100\n AvgVelocity = sum (Velocity)/len(Velocity)\n return PercentTimeinCenter, PercentTimeinCenterMoving, MovementBlocks, AvgMoveDuration, \\\n TotalMoveDuration, AvgVelocityMoving, AvgVelocity, CenterTrue\n\n\nfor File in FileList:\n if not File.endswith('.xlsx'):\n print (\"skipping file named\", File)\n continue\n df = pd.read_excel(File, header = None)\n \n NumHead,Sbj,Gt,Sex,DateTime,Note = Extract_Header_Info(df)\n NB_10MIN = NumHead + 1\n ONE_MIN_BASE = NumHead + 1801\n DataBlock = Create_DataBlock(df, NO_BASELINE_10MIN_SESSION_TRIAL_LENGTH,NB_10MIN,ONE_MIN_BASE)\n isMovingData = DataBlock.loc[:,'Movement(Moving / Center-point)']\n isInCenter = DataBlock.loc[:,'In zone']\n Velocity = DataBlock.loc [:, 'Velocity']\n startingPoints,endingPoints = Binary_Data_Transition_Point_Finder(isMovingData)\n PerTimeCenter, PerTimeCenterMove,Moves,AvgMoveTime,TotalMoveTime,AvgVelMove,AvgVel,inCenter \\\n = Movement_Analysis(isInCenter,Velocity,startingPoints,endingPoints)\n \n Dataz = [Sbj,Gt, Sex, DateTime, Note, PerTimeCenter, PerTimeCenterMove, \\\n Moves, AvgMoveTime, TotalMoveTime, AvgVelMove, AvgVel, \\\n inCenter]\n myDataList.append(Dataz)\n print (myDataList)\n \nresultsDF = pd.DataFrame(data = myDataList, columns=resultsColumns)\nos.chdir(\"/Users/leblanckh/data/KO_WT_OpenField_RawData/OutputFiles\")\nwriter = pd.ExcelWriter('KO_and_WT_OpenField_Movement_Analysis.xlsx')\nresultsDF.to_excel(writer,'Sheet1')\nwriter.save()\n\n\n","sub_path":"OpenFieldMovingCenter.py","file_name":"OpenFieldMovingCenter.py","file_ext":"py","file_size_in_byte":8257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"617816865","text":"# 1.使用 deque(maxlen=N) 构造函数会新建一个固定大小的队列。当新的元素加入并且这个队列已满的时候, 最老的元素会自动被移除掉\n# 在队列两端插入或删除元素时间复杂度都是 O(1) ,区别于列表,在列表的开头插入或删除元素的时间复杂度为 O(N) \nfrom collections import deque, Iterator\nfrom itertools import islice\nimport time\n\nq = deque(maxlen=2)\nq.append(1)\nq.append(2)\n# print(q)\nq.append(3)\n# print(q)\n\nq.appendleft(0)\n# print(q)\nq.pop()\n# print(q)\nq.popleft()\n\n\n# 2.处理大文件读取部分数据的解决方案,使用生成器,读取文件程序和使用文件程序解耦\n# islice切片的快速选择\ndef get_words(path):\n with open(path, 'r') as f:\n line = f.readline().strip()\n while line:\n line = f.readline().strip()\n yield line\n\n\ndef deal_words(words, start, offset):\n if not isinstance(words, Iterator):\n return\n for i in islice(words, start, start + offset):\n print(i)\n\n\npath = '/Users/alpha/Desktop/entname.txt'\nt1 = time.time()\nresults = get_words(path)\ndeal_words(results, 10000, 12000)\nt2 = time.time()\nt = t1 - t2\nprint(t)\n\n\n# 3.查找最大或最小的 N 个元素\n# 堆数据结构最重要的特征是 heap[0] 永远是最小的元素。并且剩余的元素可以很容易的通过调用 heapq.heappop() 方法得到,\n# 该方法会先将第一个元素弹出来,然后用下一个最小的元素来取代被弹出元素(这种操作时间复杂度仅仅是 O(log N),N 是堆大小)\nimport heapq\nportfolio = [\n {'name': 'IBM', 'shares': 100, 'price': 91.1},\n {'name': 'AAPL', 'shares': 50, 'price': 543.22},\n {'name': 'FB', 'shares': 200, 'price': 21.09},\n {'name': 'HPQ', 'shares': 35, 'price': 31.75},\n {'name': 'YHOO', 'shares': 45, 'price': 16.35},\n {'name': 'ACME', 'shares': 75, 'price': 115.65}\n]\ncheap = heapq.nsmallest(3, portfolio, key=lambda s: s['price'])\nexpensive = heapq.nlargest(3, portfolio, key=lambda s: s['price'])\n# 进行堆排序\nheap = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]\nheapq.heapify(heap)\n\n# 实现一个优先级队列\n# Python 在做元组比较时候,如果前面的比较已经可以确定结果了, 后面的比较操作就不会发生了\n# heapq.heappush() 和 heapq.heappop() 分别在队列 _queue 上插入和删除第一个元素, 并且队列 _queue 保证第一个元素拥有最高优先级\n\nimport heapq\n\nclass PriorityQueue:\n def __init__(self):\n self._queue = []\n self._index = 0\n\n def push(self, item, priority):\n heapq.heappush(self._queue, (-priority, self._index, item))\n self._index += 1\n\n def pop(self):\n return heapq.heappop(self._queue)[-1]\n\n\n# 4.字典中的键映射多个值 (list or set)\n# 可以很方便的使用 collections 模块中的 defaultdict 来构造这样的字典。 \n# defaultdict 的一个特征是它会自动初始化每个 key 刚开始对应的值,所以你只需要关注添加元素操作了\n# defaultdict 会自动为将要访问的键(就算目前字典中并不存在这样的键)创建映射实体\nfrom collections import defaultdict\n\nd = defaultdict(list)\nd['a'].append(1)\nd['a'].append(2)\nd['b'].append(4)\n\nd = defaultdict(set)\nd['a'].add(1)\nd['a'].add(2)\nd['b'].add(4)\n\n\n# 5.字典排序\n# 为了能控制一个字典中元素的顺序,你可以使用 collections 模块中的 OrderedDict 类。 在迭代操作的时候它会保持元素被插入时的顺序\n# OrderedDict 内部维护着一个根据键插入顺序排序的双向链表。每次当一个新的元素插入进来的时候, 它会被放到链表的尾部。对于一个已经存在的键的重复赋值不会改变键的顺序。\n# 需要注意的是,一个 OrderedDict 的大小是一个普通字典的两倍,因为它内部维护着另外一个链表。\nfrom collections import OrderedDict\n\nd = OrderedDict()\nd['foo'] = 1\nd['bar'] = 2\n\n\n# 6.字典的运算\n# 为了对字典值执行计算操作,通常需要使用 zip() 函数先将键和值反转过来。\n# zip() 函数创建的是一个只能访问一次的迭代器\n\n\n# 序列中出现次数最多的元素\n# collections.Counter 类就是专门为这类问题而设计的, 它甚至有一个有用的 most_common() 方法直接给了你答案\nwords = [\n 'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',\n 'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',\n 'eyes', \"don't\", 'look', 'around', 'the', 'eyes', 'look', 'into',\n 'my', 'eyes', \"you're\", 'under'\n]\nfrom collections import Counter\nword_counts = Counter(words)\n# 出现频率最高的3个单词\ntop_three = word_counts.most_common(3)\nprint(top_three)\n# Outputs [('eyes', 8), ('the', 5), ('look', 4)]\n\n\ndef add(a: int, b:int) -> int:\n return a + b\n\nc = add(3, 4)\nprint(c)\n\n\ndef t():\n n = 10\n def inner():\n nonlocal n\n n += 1\n print(n)\n return n\n inner()\n print(n)\n return n\n\nprint(t())\n","sub_path":"utils/normal.py","file_name":"normal.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"221924950","text":"import requests\nimport numpy as np\n\nCONFIDENCE_THRESHOLD = 0.00005\n\n# find the closest point (Euclidean distance) from a list of points\ndef closest_node(node, nodes):\n nodes = np.asarray(nodes)\n dist_2 = np.sum((nodes - node)**2, axis=1)\n return np.argmin(dist_2)\n\ndef get_mean_face_position(faces):\n mean_faces = []\n for face in faces:\n result = []\n for point in face:\n if(point[2] > CONFIDENCE_THRESHOLD):\n result.append((point[0], point[1]))\n result = np.array(result)\n print(np.shape(result))\n if(np.shape(result)[0] > 0):\n mean_faces.append(np.mean(result, axis=0))\n print(mean_faces)\n rounded_mean_faces = np.rint(mean_faces)\n print(rounded_mean_faces)\n return rounded_mean_faces\n\nPOSE_DETECTION_URL = \"http://localhost:5000/get_pose\"\nr = requests.get(POSE_DETECTION_URL)\n\npose = r.json()\n\n# returns an array of points(x, y, score) for each person\nbodies = np.array(pose['body'])\nfaces = np.array(pose['face'])\nleft_hand = np.array(pose['left_hand'])\nright_hand = np.array(pose['right_hand'])\n\nprint(\"shapes...\")\nprint (np.shape(bodies))\nprint (np.shape(faces))\nprint (np.shape(left_hand))\nprint (np.shape(right_hand))\nprint(\".............\")\n\nmean_faces = get_mean_face_position(faces)\n\npoint = np.array([60, 320])\nprint(closest_node(point, mean_faces))\n\n","sub_path":"test_pose_detection_server.py","file_name":"test_pose_detection_server.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"523697675","text":"import django\nfrom ajax.models import UploadedImage\nfrom django import template\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import DatabaseError\nfrom webapp.models import Food_Type, Special_Condition, Temporality\n\nregister = template.Library()\n\n@register.simple_tag(takes_context=True)\ndef step_picture(context, form, token):\n value = form.get(\"step-picture-id_\"+str(token), '')\n context['stepimageid']=value\n return value\n\n@register.simple_tag\ndef translate_food_type(food_type):\n lang=django.utils.translation.get_language().split('-')[0]\n return Food_Type.objects.get(code=food_type).name_dict.get(lang)\n\n@register.simple_tag\ndef translate_special_conditions(special_conditions):\n lang=django.utils.translation.get_language().split('-')[0]\n return Special_Condition.objects.get(code=special_conditions).name_dict.get(lang)\n\n@register.simple_tag\ndef translate_temporality(temporality):\n lang=django.utils.translation.get_language().split('-')[0]\n return Temporality.objects.get(code=temporality).name_dict.get(lang)\n\n@register.tag(\"picture_from_id\")\ndef do_picture_from_id(parser, token):\n try:\n # split_contents() knows not to split quoted strings.\n tag_name, picture_id = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"%r tag requires a single argument\" % token.contents.split()[0])\n\n return PictureUrlNode(picture_id)\n\n\nclass PictureUrlNode(template.Node):\n def __init__(self, picture_id):\n self.picture_id = template.Variable(picture_id)\n\n def render(self, context):\n try:\n picture_id = self.picture_id.resolve(context)\n if picture_id:\n image = UploadedImage.objects.get(id=picture_id).image\n if image and image.url:\n return image.url\n else:\n return ''\n except template.VariableDoesNotExist:\n return ''\n except DatabaseError:\n return ''\n except ObjectDoesNotExist:\n return ''\n","sub_path":"webapp/templatetags/recipe_form.py","file_name":"recipe_form.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"417590390","text":"from datetime import datetime\r\nfrom ..contextHolder import ContextHolder\r\n\r\n__author__ = 'nprikazchikov'\r\n\r\n\r\nclass Screenshot(object):\r\n name = str\r\n url = str\r\n\r\n def __init__(self, name, url):\r\n self.name = name\r\n self.url = url\r\n\r\n def __repr__(self):\r\n return str(self)\r\n\r\n def __str__(self):\r\n return '{' + '\"name\":\"{name}\",\"url\":\"{url}\"'.format(name=self.name,\r\n url=self.url) + '}'\r\n\r\n\r\nclass ScreenshotMaker:\r\n def take_screenshot(self, message=\"\"):\r\n \"\"\"\r\n :param message: comment for screenshot to display. DO screenshot\r\n flag has to be set to True in ContextHolder\r\n :return: Screenshot|None\r\n \"\"\"\r\n if ( ContextHolder.get_do_screenshot() ):\r\n working_dir = ContextHolder.get_workspace_path()\r\n path = \"{path}{name}.png\".format(\r\n path=\"/../result/images/\",\r\n name=datetime.now().strftime(\"%Y_%m_%d__%H_%M_%S\"))\r\n if ContextHolder.get_driver().save_screenshot(working_dir + path):\r\n return Screenshot(\r\n \"Test suite {}; Test case {}; {}\".format(\r\n ContextHolder.get_test_suite(),\r\n ContextHolder.get_test_case(),\r\n message\r\n ),\r\n path)\r\n else:\r\n return None\r\n else:\r\n return None","sub_path":"poteen/log/screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"142031149","text":"# coding: utf-8\nfrom django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom sampleapp import views\n\nurlpatterns = [\n url(r'^login/$', views.do_login),\n url(r'^job/$', views.DoTaskView.as_view()),\n url(r'^job/(?P[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12})$', views.DoTaskView.as_view()),\n url(r'^syncjob/$', views.DoSyncTaskView.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","sub_path":"sampleapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"591745987","text":"import netdef_slim as nd\nimport os\nimport json\nfrom collections import namedtuple\nfrom model_genotype import model_func\nfrom netdef_slim.schedules.named_schedule import NamedSchedule\n\nnd.evo_manager.set_training_dir(os.path.join(os.path.dirname(__file__), 'training'))\n\n# add evolution\nmax_steps = 600000\nevo = nd.Evolution('FT3D', [], NamedSchedule('genotype', max_steps))\nnd.add_evo(evo)\n\nnd.config['model_fn'] = model_func\nnd.config['num_gpus'] = 1\nnd.config['test_batch_size'] = 1\nnd.config['no_bn'] = True\n\nGenotype = namedtuple('Genotype', 'normal reduce upsample')\n\ngenotype = Genotype(normal=[('skip_connect', 0), ('sep_conv_5x5', 1), ('sep_conv_3x3', 2), ('skip_connect', 0), ('dil_conv_3x3', 2), ('dil_conv_3x3', 0)],\n reduce=[('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 2), ('skip_connect', 0), ('skip_connect', 1), ('sep_conv_5x5', 3)],\n upsample=[('sep_conv_3x3', 3), ('dil_conv_3x3', 2), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 3), ('sep_conv_3x3', 4), ('sep_conv_3x3', 2), ('dil_conv_5x5', 4)]\n )\n\nhyperparams={'genotype': genotype}\n\nnd.config['hyperparams'] = hyperparams\n","sub_path":"nets/css/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"233741590","text":"list1=['apple','banana','custard','dragon']\nprint(list1)\n\n'''\nList items are ordered, changeable, and allow duplicate values.\n\nList items are indexed, the first item has index [0], the second item has index [1] etc.\n\nlists are ordered, it means that the items have a defined order, and that order will not change.\n\nIf you add new items to a list, the new items will be placed at the end of the list.\n\nThe list is changeable, meaning that we can change, add, and remove items in a list after it has been created\n\nSince lists are indexed, lists can have items with the same value:\n'''\n#Lists allow duplicate values:\n\nthislist = [\"apple\", \"banana\", \"cherry\", \"apple\", \"cherry\"]\nprint(thislist)\nthislist = [\"apple\", \"banana\", \"cherry\"]\nprint(len(thislist))\n\n'''\nString, int and boolean data types:\n'''\nlist1 = [\"apple\", \"banana\", \"cherry\"]\nlist2 = [1, 5, 7, 9, 3]\nlist3 = [True, False, False]\n'''\nA list with strings, integers and boolean values:\n'''\nlist1 = [\"abc\", 34, True, 40, \"male\"]\n'''type'''\nmylist = [\"apple\", \"banana\", \"cherry\"]\nprint(type(mylist))\n\n'''\nUsing the list() constructor to make a List:\n'''\nthislist = list((\"apple\", \"banana\", \"cherry\")) # note the double round-brackets\nprint(thislist)\n\n'''\nPrint the second item of the list:\n'''\nthislist = [\"apple\", \"banana\", \"cherry\"]\nprint(thislist[1])\nthislist = [\"apple\", \"banana\", \"cherry\"]\nprint(thislist[-1])\nthislist = [\"apple\", \"banana\", \"cherry\", \"orange\", \"kiwi\", \"melon\", \"mango\"]\nprint(thislist[2:5])\nthislist = [\"apple\", \"banana\", \"cherry\", \"orange\", \"kiwi\", \"melon\", \"mango\"]\nprint(thislist[:4])\nthislist = [\"apple\", \"banana\", \"cherry\", \"orange\", \"kiwi\", \"melon\", \"mango\"]\nprint(thislist[2:])\nthislist = [\"apple\", \"banana\", \"cherry\", \"orange\", \"kiwi\", \"melon\", \"mango\"]\nprint(thislist[-4:-1])\nthislist = [\"apple\", \"banana\", \"cherry\"]\nif \"apple\" in thislist:\n print(\"Yes, 'apple' is in the fruits list\")\nthislist = [\"apple\", \"banana\", \"cherry\"]\nthislist[1] = \"blackcurrant\"\nprint(thislist)\nthislist = [\"apple\", \"banana\", \"cherry\", \"orange\", \"kiwi\", \"mango\"]\nthislist[1:3] = [\"blackcurrant\", \"watermelon\"]\nprint(thislist)\nthislist = [\"apple\", \"banana\", \"cherry\"]\nthislist[1:2] = [\"blackcurrant\", \"watermelon\"]\nprint(thislist)\nthislist = [\"apple\", \"banana\", \"cherry\"]\nthislist[1:3] = [\"watermelon\"]\nprint(thislist)\nthislist = [\"apple\", \"banana\", \"cherry\"]\nthislist.insert(2, \"watermelon\")\nprint(thislist)\nthislist.append(\"custard\")\nprint(thislist)\nthislist.insert(1,'orange')\nprint(thislist)\nthislist = [\"apple\", \"banana\", \"cherry\"]\ntropical = [\"mango\", \"pineapple\", \"papaya\"]\nthislist.extend(tropical)\nprint(thislist)\nthislist = [\"apple\", \"banana\", \"cherry\"]\nthistuple = (\"kiwi\", \"orange\")\nthislist.extend(thistuple)\nprint(thislist)\nthislist.remove(\"banana\")\nprint(thislist)\nthislist.pop(1)\nprint(thislist)\nthislist.pop()\nprint(thislist)\ndel thislist[0]\nprint(thislist)\ndel thislist\nthislist = [\"apple\", \"banana\", \"cherry\"]\nthislist.clear()\nprint(thislist)\nthislist = [\"apple\", \"banana\", \"cherry\"]\nfor x in thislist:\n print(x)\nfor i in range(len(thislist)):\n print(thislist[i])\ni = 0\nwhile i < len(thislist):\n print(thislist[i])\n i = i + 1\n[print(x) for x in thislist]\nfruits = [\"apple\", \"banana\", \"cherry\", \"kiwi\", \"mango\"]\nnewlist = []\n\nfor x in fruits:\n if \"a\" in x:\n newlist.append(x)\n\nprint(newlist)\nnewlist = [x for x in fruits if \"a\" in x]\n\nprint(newlist)\nnewlist = [x for x in fruits if x != \"apple\"]\nprint(newlist)\nnewlist = [x for x in fruits]\nprint(newlist)\nnewlist = [x for x in range(10)]\nprint(newlist)\nnewlist = [x for x in range(10) if x < 5]\nprint(newlist)\nnewlist = [x.upper() for x in fruits]\nprint(newlist)\nnewlist = ['hello' for x in fruits]\nprint(newlist)\nnewlist = [x if x != \"banana\" else \"orange\" for x in fruits]\nprint(newlist)\nthislist.sort()\nprint(thislist)\nthislist = [100, 50, 65, 82, 23]\nthislist.sort()\nprint(thislist)\nthislist = [\"orange\", \"mango\", \"kiwi\", \"pineapple\", \"banana\"]\nthislist.sort(reverse = True)\nprint(thislist)\n\ndef myfunc(n):\n return abs(n - 50)\n\nthislist = [100, 50, 65, 82, 23]\n\nthislist.sort(key = myfunc)\n\nprint(thislist)\nthislist = [\"banana\", \"Orange\", \"Kiwi\", \"cherry\"]\nthislist.sort()\nprint(thislist)\nthislist = [\"banana\", \"Orange\", \"Kiwi\", \"cherry\"]\nthislist.sort(key = str.lower)\nprint(thislist)\nthislist.reverse()\nprint(thislist)\nmylist = thislist.copy()\nprint(mylist)\nmylist = list(thislist)\nprint(mylist)\nlist1 = [\"a\", \"b\", \"c\"]\nlist2 = [1, 2, 3]\n\nlist3 = list1 + list2\nprint(list3)\nfor x in list2:\n list1.append(x)\n\nprint(list1)\nlist1 = [\"a\", \"b\", \"c\"]\nlist2 = [1, 2, 3]\nlist1.extend(list2)\nprint(list1)\n\n'''\nList Methods:\n\nappend()\tAdds an element at the end of the list\nclear()\t Removes all the elements from the list\ncopy()\t Returns a copy of the list\ncount()\t Returns the number of elements with the specified value\nextend()\tAdd the elements of a list (or any iterable), to the end of the current list\nindex()\t Returns the index of the first element with the specified value\ninsert()\tAdds an element at the specified position\npop()\t Removes the element at the specified position\nremove()\tRemoves the item with the specified value\nreverse()\tReverses the order of the list\nsort()\t Sorts the list\n'''\nprint(\n'''\nList Comprehension\n'''\n)\n\n'''\nList comprehension offers a shorter syntax when you want to create a new list based on the values of an existing list.\n\nExample:\n\nBased on a list of fruits, you want a new list, containing only the fruits with the letter \"a\" in the name.\n\n'''\n\n#Without list comprehension you will have to write a for statement with a conditional test inside:\nfruits = [\"apple\", \"banana\", \"cherry\", \"kiwi\", \"mango\"]\nnewlist = []\n\nfor x in fruits:\n\n if \"a\" in x:\n\n newlist.append(x)\n\nprint(newlist)\n\n#With list comprehension you can do all that with only one line of code:\nfruits = [\"apple\", \"banana\", \"cherry\", \"kiwi\", \"mango\"]\n\nnewlist = [x for x in fruits if \"a\" in x]\n\nprint(newlist)\n\n#Only accept items that are not \"apple\":\n\nnewlist = [x for x in fruits if x != \"apple\"]\nprint(newlist)\n\n#With no if statement:\n\nnewlist = [x for x in fruits]\nprint(newlist)\n\n#The iterable can be any iterable object, like a list, tuple, set etc.\n\n#You can use the range() function to create an iterable:\n\nnewlist = [x for x in range(10)]\nprint(newlist)\nnewlist = [x for x in range(10) if x<5]\nprint(newlist)\n#Set the values in the new list to upper case:\n\nnewlist = [x.upper() for x in fruits]\nprint(newlist)\n\n","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":6412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"428115855","text":"from enum import Enum, IntEnum\nfrom typing import Union\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom model_utils.fields import MonitorField\nfrom model_utils.models import TimeStampedModel, SoftDeletableModel\nfrom tagulous.models import TagField, TagModel\n\nfrom whoweb.coldemail.api.resource import (\n CreateableResource,\n ListableResource,\n UpdateableResource,\n DeleteableResource,\n)\nfrom whoweb.contrib.fields import ObscureIdMixin\nfrom whoweb.core.models import EventLoggingModel\nfrom whoweb.payments.models import BillingAccountMember\nfrom whoweb.users.models import Seat\n\n\nclass ColdEmailTagModel(TagModel):\n class TagMeta:\n force_lowercase = True\n\n\nclass ColdemailBaseModel(\n ObscureIdMixin, TimeStampedModel, EventLoggingModel, SoftDeletableModel\n):\n api_class: Union[\n CreateableResource, ListableResource, UpdateableResource, DeleteableResource\n ] = None\n\n class CampaignObjectStatusOptions(IntEnum):\n CREATED = 0\n PENDING = 2\n PUBLISHED = 4\n PAUSED = 8\n\n seat = models.ForeignKey(Seat, on_delete=models.CASCADE, null=True, blank=True)\n billing_seat = models.ForeignKey(\n BillingAccountMember, on_delete=models.CASCADE, null=True\n )\n\n status = models.IntegerField(\n _(\"status\"),\n db_index=True,\n choices=[(s.value, s.name) for s in CampaignObjectStatusOptions],\n blank=True,\n default=CampaignObjectStatusOptions.CREATED,\n )\n status_changed = MonitorField(_(\"status changed\"), monitor=\"status\")\n coldemail_id = models.CharField(max_length=100)\n is_removed_changed = MonitorField(\n \"deleted at\", monitor=\"is_removed\", editable=False\n )\n published = MonitorField(\n monitor=\"status\",\n when=[CampaignObjectStatusOptions.PUBLISHED],\n null=True,\n default=None,\n blank=True,\n )\n tags = TagField(to=ColdEmailTagModel, blank=True)\n\n class Meta:\n abstract = True\n\n def __str__(self):\n return f\"{self.__class__.__name__} {self.pk}\" + (\n f\"(Published {self.published})\"\n if self.status == self.CampaignObjectStatusOptions.PUBLISHED\n else \"\"\n )\n\n @classmethod\n def api_create(cls, **kwargs):\n return cls.api_class.create(**kwargs)\n\n def api_retrieve(self):\n if not self.coldemail_id:\n return\n res = self.api_class.retrieve(id=self.coldemail_id)\n if not res:\n self.coldemail_id = None\n self.save()\n return res\n\n @property\n def is_locked(self):\n return self.status in [\n self.CampaignObjectStatusOptions.PENDING,\n self.CampaignObjectStatusOptions.PUBLISHED,\n ]\n\n @property\n def is_published(self):\n return (\n bool(self.coldemail_id)\n or self.status == self.CampaignObjectStatusOptions.PUBLISHED\n )\n","sub_path":"whoweb/coldemail/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"575961693","text":"import random\n\n#nums = list( map(lambda x:random.randrange(1,100), range(10)))\n#nums = [3, 4, 1, 2, 5]\n#nums = [389, 469, 795, 593, 230, 336, 791, 536, 179, 42]\n\n#print(nums)\n\ndef quick(a_list):\n\n if len(a_list) <= 1:\n return a_list\n\n pivot = a_list[0]\n left = []\n right = []\n\n if len(a_list) == 2:\n if a_list[1] < pivot:\n a_list[0] = a_list[1]\n a_list[1] = pivot\n else:\n pass\n return a_list\n\n for value in a_list:\n if value < pivot:\n left.append(value)\n elif value > pivot:\n right.append(value)\n\n #print(left, right)\n return quick(left) + [pivot] + quick(right)\n\n\n# sorted_nums = quick(nums)\n#\n# print(sorted_nums)\n#\n# before = 0\n#\n# for value in sorted_nums:\n# if value < before:\n# print('error')\n#\n# before = value\n\nfor i in range(100):\n nums = list( map(lambda x:random.randrange(1,100), range(10)))\n\n print(nums)\n\n nums = quick(nums)\n\n print(nums)\n\n before = 0\n\n for value in nums:\n if value < before:\n print('error')\n before = value\n\n print(\"---------------------------------\")","sub_path":"examples/sort_ex2.py","file_name":"sort_ex2.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"276081353","text":"# -*- coding: utf-8 -*-\r\n\r\nimport time\r\nimport cx_Oracle\r\nfrom core.database.wrapper import DatabaseWrapper\r\n\r\n\r\nclass Oracle(object):\r\n\r\n @staticmethod\r\n def _execute_sql(sql_statement):\r\n cursor = DatabaseWrapper().connection.cursor()\r\n cursor.prepare(sql_statement)\r\n cursor.execute(None)\r\n return cursor\r\n\r\n def execute_query(self, sql_statement, one_row=False):\r\n cursor = self._execute_sql(sql_statement)\r\n rows = cursor.fetchall()\r\n if one_row:\r\n united_rows = []\r\n for row in rows:\r\n united_rows.extend(row)\r\n rows = united_rows\r\n return rows\r\n\r\n def get_row_count(self, sql_statement):\r\n cursor = self._execute_sql(sql_statement)\r\n cursor.fetchall()\r\n return cursor.rowcount()\r\n\r\n def execute_sql_string(self, sql_statement):\r\n try:\r\n cursor = self._execute_sql(sql_statement)\r\n DatabaseWrapper().connection.commit()\r\n finally:\r\n if cursor:\r\n DatabaseWrapper().connection.rollback()\r\n\r\n def execute_sql_script(self, file_name, replace=None, one_row=False):\r\n sql_statement = open(file_name, \"r\", encoding=\"utf-8\").read()\r\n if replace:\r\n for key, val in replace.values():\r\n sql_statement = sql_statement.replace(key, val)\r\n cursor = self._execute_sql(sql_statement)\r\n try:\r\n rows = cursor.fetchall()\r\n if one_row:\r\n united_rows = []\r\n for row in rows:\r\n united_rows.extend(row)\r\n rows = united_rows\r\n return rows\r\n except cx_Oracle.InterfaceError:\r\n DatabaseWrapper().connection.commit()\r\n if cursor:\r\n DatabaseWrapper().connection.rollback()\r\n\r\n def wait_until_exists(self, sql_statement, timeout=10):\r\n for t in range(timeout):\r\n cursor = self._execute_sql(sql_statement)\r\n cursor.fetchall()\r\n if cursor.rowcount != 0:\r\n return True\r\n time.sleep(1)\r\n raise AssertionError(\"Record was not found in %s second(s)\" % str(timeout))\r\n\r\n def wait_until_not_exists(self, sql_statement, timeout=10):\r\n for t in range(timeout):\r\n cursor = self._execute_sql(sql_statement)\r\n cursor.fetchall()\r\n if cursor.rowcount == 0:\r\n return True\r\n time.sleep(1)\r\n raise AssertionError(\"Record was not deleted in %s second(s).\" % str(timeout))","sub_path":"core/database/oracle.py","file_name":"oracle.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"289701089","text":"import coating\nfrom numpy import *\nfrom openravepy import *\nfrom openravepy.misc import SpaceSamplerExtra\nimport scipy\nenv=Environment()\nenv.SetViewer('qtcoin')\nenv.Load(\"/home/renan/Documents/EMMA/Turbina/env_mh12_0_16.xml\")\nrobot = env.GetRobots()[0]\ntarget = env.GetBodies()[0]\nmanip = robot.GetActiveManipulator()\n\n\n# PARAMETERS\nfacevector = [1,0,0]\ntheta = [0,0,0]\ncoatingdistance = 0.23 # coating distance\nrobottobladedistance = 0 # robot to blade distance\nnumberofangles = 8 # degree step\ntolerance = 20 # degrees\n\n\n#pN = numpy.array([-1.412,-2.567,-0.617])# Extremo esquerdo superior\n\n#pN = numpy.array([-0.576,-2.984,0.106])# Extremo direito superior\n\n#pN = numpy.array([-0.4,-3.573,0.26])# Extremo direito inferior\n\npN = numpy.array([ -1.044, -3.218, 0 ])\n\nnormal = [-1,0,0]\npN = numpy.concatenate((pN,normal))\n\n# CAMERA SETTINGS\nTcamera = numpy.array([[0.53056445,0.0478718,0.84629171,-3.62191391],\n [ 0.25713833,-0.96044621,-0.10687824,-2.01647758],\n [ 0.80770121,0.27431983,-0.52188829, 2.76554561],\n [ 0,0,0,1]])\nenv.GetViewer().SetCamera(Tcamera)\n\n#MAIN\nikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)\nif not ikmodel.load():\n ikmodel.autogenerate()\n\napproachrays = load('bladepoints16Back.npz')\napproachrays = approachrays['array']\nN = approachrays.shape[0]\n\n# Initial position\na=15\nalpha = 1.0*a*pi/180;\nT = numpy.array([[1,0,0,0],[0,cos(alpha),-sin(alpha),0],\n [0,sin(alpha),cos(alpha),0],[0,0,0,1]])\n\nfor i in range(0,5):\n env.GetBodies()[i].SetTransform(dot(T,env.GetBodies()[i].GetTransform()))\n\n \n# COATING LOOP\na=40\nwhile(a):\n alpha = 1.0*pi/180;\n T = numpy.array([[1,0,0,0],[0,cos(alpha),-sin(alpha),0],\n [0,sin(alpha),cos(alpha),0],[0,0,0,1]])\n\n for i in range(0,5):\n env.GetBodies()[i].SetTransform(dot(T,env.GetBodies()[i].GetTransform()))\n\n\n Ttarget = target.GetTransform()\n\n#PLOT BLADE POINTS FOR COATING\n gapproachrays = c_[dot(approachrays[0:N,0:3],transpose(Ttarget[0:3,0:3]))+tile(Ttarget[0:3,3],(N,1)),dot(approachrays[0:N,3:6],transpose(Ttarget[0:3,0:3]))]\n approachgraphs = env.plot3(points=gapproachrays[:,0:3],pointsize=5,colors=array((1,0,0)))\n\n\n# Compute Solutions\n reachableRays, iksolList = coating.WorkspaceOnPose(pN, robottobladedistance, gapproachrays,robot,ikmodel,facevector,theta)\n#EXTRA COATING\n AllreachableRays, AlliksolList = coating.AllExtraCoating(gapproachrays,reachableRays,coatingdistance,numberofangles,tolerance,ikmodel,facevector)\n#PLOT REACHABLE POINT\n if len(reachableRays)>0:\n grays = c_[reachableRays[:,0:3],reachableRays[:,3:6]]\n raygraphs = env.plot3(points=reachableRays[:,0:3],pointsize=5,colors=array((0,0,0)))\n\n if len(AllreachableRays)>0:\n grays2 = c_[AllreachableRays[:,0:3],AllreachableRays[:,3:6]]\n raygraphs2 = env.plot3(points=AllreachableRays[:,0:3],pointsize=5,colors=array((0,0,1)))\n\n env.GetViewer().SendCommand('SetFiguresInCamera 1')\n I = env.GetViewer().GetCameraImage(640,480, Tcamera,[640,640,320,240])\n scipy.misc.imsave('/home/renan/Documents/EMMA/Python/framebyframe/distance0/'+str(a-10)+'.jpg',I)\n print(abs(a-40))\n a-=1\n \n \n \n\n","sub_path":"Python/mh12_2blade.py","file_name":"mh12_2blade.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"283925501","text":"from time import sleep\nimport cv2\nimport numpy as np\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing.image import img_to_array\n\nface_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nclassifier = load_model('sampleTrainingModel.h5')\nemotion_labels = ['Mad', 'Disgust', 'Fear', 'happy', 'Indifferent', 'sad', 'surprised']\nJoy = (168, 168, 50)\nShocked = (168, 50, 127)\nMelancholy = (50, 168, 58)\nHatred = (168, 50, 50)\nSimp = (119, 50, 168)\nIdentifier1 = (50, 146, 168)\nUnfiltered = (50, 168, 144)\n\ncap = cv2.VideoCapture(0)\n\ndef make_1080p():\n cap.set(3, 1920)\n cap.set(4, 1080)\n\ndef make_720p():\n cap.set(3, 1280)\n cap.set(4, 720)\n\ndef make_480p():\n cap.set(3, 640)\n cap.set(4, 480)\n\nmake_480p()\nwhile True:\n sleep(0.01)\n _, frame = cap.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray)\n if cv2.waitKey(1) & 0xFF == ord('x'):\n break\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), Identifier1, 2)\n face_region = gray[y:y+h, x:x+w]\n face_region = cv2.resize(face_region, (48, 48), interpolation=cv2.INTER_AREA)\n\n if np.sum([face_region]) != 0:\n object = face_region.astype('float')/255.0\n object = img_to_array(object)\n object = np.expand_dims(object, axis=0)\n prediction = classifier.predict(object)[0]\n label = emotion_labels[prediction.argmax()]\n label_position = (x, y)\n if label == 'Indifferent':\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, Simp, 2)\n elif label == 'Mad':\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, Hatred, 2)\n elif label == 'surprised':\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, Shocked, 2)\n elif label == 'happy':\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, Joy, 2)\n elif label == 'sad':\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, Melancholy, 2)\n else:\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, Unfiltered, 2)\n \n cv2.imshow('Emotions - Press x to exit', frame)\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"project3/emotionRecognition.py","file_name":"emotionRecognition.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"229988064","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.linspace(0, 2 * np.pi)\noffsets = np.linspace(0, 2*np.pi, 4, endpoint=False)\n# Create array with shifted-sine curve along each column\nyy = np.transpose([np.sin(x + phi) for phi in offsets])\n\noffsets2 = np.linspace(0, 2*np.pi, 50, endpoint=False)\nw = np.sin(offsets2)\nw2 = np.cos(offsets2)\n\nplt.rc('lines', linewidth=4)\n\nfig, (ax0, ax1) = plt.subplots(nrows=2)\n\nplt.rc('axes', color_cycle=['r', 'g', 'b', 'y'])\nax0.plot(offsets2, w)\nax0.plot(offsets2,w2)\nax0.set_title('Entradas')\n\nax1.set_color_cycle(['c', 'm', 'y', 'k'])\nax1.plot(yy)\nax1.set_title('Set axes color cycle to cmyk')\n\n# Tweak spacing between subplots to prevent labels from overlapping\nplt.subplots_adjust(hspace=0.3)\nplt.show()","sub_path":"graficasCOlores.py","file_name":"graficasCOlores.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"653843668","text":"from .colors import colors\n\nDEBUG = True # Defines if should run debug utilities\n\ncolorPrompt = True # Defines if terminal colors should be displayed\n\nisRoot = False # If the user is root\nwd = \"\" # Current working directory\nuser = \"user\" # Set username\nusername = \"user\" # Defines username\nhostname = \"computer\" # Defines hostname\nshell = \"splash\" # Name of shell\n\nps1 = colors.fg.lightblue + username + colors.fg.darkgrey + \"@\" + colors.fg.red + hostname + colors.fg.lightcyan + \":\" + colors.fg.cyan + \"[~/\"\"] \" + colors.fg.black + \"> \" + colors.fg.pink + \"$# \"[isRoot] + colors.reset # Sets PS1\n\nbash_history = [] # Saves all ran commands\nlev_bash_history = [] # All ran commands, cleared after each level\ncurrentLevel = 0 # contains current level index in levels\n\nbanned_commands = [] # List of banned commands\nbanned_tokens = [] # List of banned tokens\n\nexit_code = 0 # Contains last exit code\n\ncommand = \"\" # Contains current command\ntoken = [] # Contains current args for current command\n\nworkingDir = None # Contains reference to working dir in Interpreter\nheadDir = None # Contains reference to head dir in Interpreter\n","sub_path":"packages/resources/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"560872812","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\n\nclass rental_contract_renewal(models.TransientModel):\n _name = 'rental.contract.renewal'\n\n date_from= fields.Date ('From Date', required=True)\n date_to= fields.Date ('To Date', required=True)\n\n @api.constrains('date_from', 'date_to')\n def _check_dates(self):\n if self.filtered(lambda c: c.date_to and c.date_from > c.date_to):\n raise ValidationError(_('Contract start date must be less than contract end date.'))\n\n def confirm_renewal(self):\n rental_pool= self.env['rental.contract']\n contract = rental_pool.browse(self._context.get('active_id'))\n copied= contract.copy()\n copied.write({'date_from':self.date_from,'date_to':self.date_to,'origin':contract.name})\n contract.write({'state':'renew'})\n\n return {\n 'name': _('Rental Contract'),\n 'view_type': 'form',\n 'view_mode': 'form,tree',\n 'res_model': 'rental.contract',\n 'res_id': copied.id,\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n }","sub_path":"wizard/rental_contract_renewal.py","file_name":"rental_contract_renewal.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"537972350","text":"import sys\nimport numpy as np\nfrom socket import gethostname\n\nimport h5_storage\nimport elegant_matrix\nimport lasing\nimport config\nimport image_and_profile as iap\nimport myplotstyle as ms\n\nelegant_matrix.set_tmp_dir('~/tmp_elegant/')\n\nms.closeall()\n\nhostname = gethostname()\nif hostname == 'desktop':\n data_dir2 = '/storage/data_2021-05-19/'\nelif hostname == 'pc11292.psi.ch':\n data_dir2 = '/sf/data/measurements/2021/05/19/'\nelif hostname == 'pubuntu':\n data_dir2 = '/mnt/data/data_2021-05-19/'\ndata_dir1 = data_dir2.replace('19', '18')\n\n\n# Full lasing, but saturation\nlasing_on_fileF = data_dir1+'2021_05_18-23_42_10_Lasing_True_SARBD02-DSCR050.h5'\nlasing_off_fileF = data_dir1+'2021_05_18-23_43_39_Lasing_False_SARBD02-DSCR050.h5'\n\n# Full lasing begin\nlasing_off_fileFB = data_dir1+'2021_05_18-21_02_13_Lasing_False_SARBD02-DSCR050.h5'\nlasing_on_fileFB = data_dir1+'2021_05_18-20_52_52_Lasing_True_SARBD02-DSCR050.h5'\n\n# Short pulse begin\nlasing_on_fileSB = data_dir1+'2021_05_18-21_08_24_Lasing_True_SARBD02-DSCR050.h5'\nlasing_off_fileSB = data_dir1+'2021_05_18-21_06_46_Lasing_False_SARBD02-DSCR050.h5'\n\n\n# Short pulse\nlasing_on_fileS = data_dir1+'2021_05_18-23_47_11_Lasing_True_SARBD02-DSCR050.h5'\nlasing_off_fileS = data_dir1+'2021_05_18-23_48_12_Lasing_False_SARBD02-DSCR050.h5'\n\n#Two color pulse I=3 A, k=2\nlasing_on_file2 = data_dir1+'2021_05_18-21_41_35_Lasing_True_SARBD02-DSCR050.h5'\nlasing_off_file2 = data_dir1+'2021_05_18-21_45_00_Lasing_False_SARBD02-DSCR050.h5'\n\nscreen_x00 = 4250e-6\nscreen_x02 = 898.02e-6\n\nstreaker_offset0 = 374e-6\nstreaker_offset2 = 364e-6\n\nplot_repair = False\n\n\nfor lasing_on_file, lasing_off_file, pulse_energy, repair_data, screen_x0, streaker_offset in [\n #(lasing_on_fileF, lasing_off_fileF, 625e-6, True, screen_x00, streaker_offset0),\n #(lasing_on_fileS, lasing_off_fileS, 85e-6, True, screen_x00, streaker_offset0),\n (lasing_on_file2, lasing_off_file2, 180e-6, False, screen_x02, streaker_offset2),\n (lasing_on_fileFB, lasing_off_fileFB, 625e-6, False, screen_x02, streaker_offset2),\n (lasing_on_fileSB, lasing_off_fileSB, 85e-6, False, screen_x02, streaker_offset2),\n ]:\n\n lasing_off_dict = h5_storage.loadH5Recursive(lasing_off_file)\n lasing_on_dict = h5_storage.loadH5Recursive(lasing_on_file)\n\n\n if repair_data:\n\n for main_ctr, (data_dict, title) in enumerate([(lasing_off_dict, 'Lasing Off'), (lasing_on_dict, 'Lasing On')]):\n x_axis = data_dict['pyscan_result']['x_axis_m']\n y_axis = data_dict['pyscan_result']['y_axis_m']\n limits = 3.65e-3, 3.75e-3\n\n limits_sat_x = 3.8e-3, 4.5e-3\n limits_sat_y = 3.e-3, 5e-3\n\n def correct_image(img, x_axis, y_axis, limits):\n image = iap.Image(img, x_axis, y_axis)\n\n index1 = np.argmin((image.x_axis-limits[0])**2)\n index2 = np.argmin((image.x_axis-limits[1])**2)+1\n new_image = image.image.copy()\n for index_y in range(new_image.shape[0]):\n new_image[index_y,index1:index2] = np.interp(image.x_axis[index1:index2], [image.x_axis[index1], image.x_axis[index2]], [image.image[index_y,index1], image.image[index_y,index2]])\n image_corrected = iap.Image(new_image, image.x_axis, image.y_axis)\n\n return image, image_corrected\n\n def correct_saturation(img, x_axis, y_axis):\n where0 = np.logical_and(img==0, np.logical_and(x_axis > limits_sat_x[0], x_axis < limits_sat_x[1])[np.newaxis,:])\n where0 = np.logical_and(where0, np.logical_and(y_axis > limits_sat_y[0], y_axis < limits_sat_y[1])[:,np.newaxis])\n new_image = img.copy()\n for index_y in range(img.shape[0]):\n if np.any(where0[index_y]):\n for index_x in np.argwhere(where0[index_y]):\n new_image[index_y, index_x] = new_image[index_y, index_x-1]\n\n return new_image\n\n old_pyscan_image = data_dict['pyscan_result']['image'].astype(float)\n new_pyscan_image = np.zeros_like(old_pyscan_image)\n\n for n_image, img in enumerate(old_pyscan_image):\n\n image, image_corrected = correct_image(img, x_axis, y_axis, limits)\n img_sat = correct_saturation(image_corrected.image, image_corrected.x_axis, image_corrected.y_axis)\n image_sat = iap.Image(img_sat, image_corrected.x_axis, image_corrected.y_axis)\n new_pyscan_image[n_image] = image_sat.image\n\n if plot_repair:\n ms.figure('Repair data')\n subplot = ms.subplot_factory(2,2, grid=False)\n sp_ctr = 1\n\n sp_raw = subplot(sp_ctr, title='Image raw')\n sp_ctr += 1\n\n sp_corrected = subplot(sp_ctr, title='Image corrected')\n sp_ctr += 1\n\n sp_sat = subplot(sp_ctr, title='Saturation')\n sp_ctr += 1\n\n image.plot_img_and_proj(sp_raw)\n image_corrected.plot_img_and_proj(sp_corrected)\n image_sat.plot_img_and_proj(sp_sat)\n\n new_pyscan_dict = {\n 'image': new_pyscan_image,\n 'x_axis_m': image_corrected.x_axis,\n 'y_axis_m': image_corrected.y_axis,\n }\n\n data_dict['pyscan_result'] = new_pyscan_dict\n\n if plot_repair:\n ms.show()\n sys.exit()\n\n n_streaker = 1\n beamline = 'Aramis'\n delta_gap = -62e-6\n tracker_kwargs = config.get_default_tracker_settings()\n recon_kwargs = config.get_default_gauss_recon_settings()\n slice_factor = 3\n charge = 200e-12\n\n las_rec_images = {}\n\n for main_ctr, (data_dict, title) in enumerate([(lasing_off_dict, 'Lasing Off'), (lasing_on_dict, 'Lasing On')]):\n rec_obj = lasing.LasingReconstructionImages(screen_x0, beamline, n_streaker, streaker_offset, delta_gap, tracker_kwargs, recon_kwargs=recon_kwargs, charge=charge, subtract_median=True, slice_factor=slice_factor)\n #rec_obj.do_recon_plot = True\n\n rec_obj.add_dict(data_dict)\n if main_ctr == 1:\n rec_obj.profile = las_rec_images['Lasing Off'].profile\n rec_obj.ref_slice_dict = las_rec_images['Lasing Off'].ref_slice_dict\n rec_obj.process_data()\n las_rec_images[title] = rec_obj\n #rec_obj.plot_images('raw', title)\n #rec_obj.plot_images('tE', title)\n\n las_rec = lasing.LasingReconstruction(las_rec_images['Lasing Off'], las_rec_images['Lasing On'], pulse_energy, current_cutoff=1.0e3)\n las_rec.plot()\n\nms.show()\n\n","sub_path":"063b_lasing.py","file_name":"063b_lasing.py","file_ext":"py","file_size_in_byte":6729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"385604128","text":"def start():\n token(input('> ')) #Starts to tokenise user input\n\ndef token(data):\n debug = False\n input = [data[0]] #Sets 'input' as an array and might as well set the first value as the first character of 'data' while we're here\n for i in range(1,len(data)): #A for loop to transfer each character as a data item in 'data'\n input.append(data[i]) #Creates a data item for every character, might be able to improve memory efficiency some other way...\n if debug:\n eatDebug(input) #Transfers 'data' to the eat function below\n else:\n eat(input)\n\ndef eat(data):\n count = 0\n i = 0\n found = False\n pointer = 0\n pointerValues = [0]\n while(i != len(data)): #Goes through each data item and processes it.\n if data[i] == '(':\n start = i + 1\n while not found:\n if data[len(data)-1-count] == ')':\n end = len(data)-1-count\n found = True\n\n\n print(\"Found the end loop\")\n\n\n elif count == len(data):\n exit(0)\n\n\n #print(\"start = \", start)\n #print(\"end = \", end)\n\n\n try:\n if int(data[start],10) % 1 == 0: #Tests if it's an integer\n iterations = int(data[start])\n except:\n if data[start] == ' ': #If theres a space set iterations to the hex value BADA55, which translates to repeating forever\n iterations = 0xBADA55\n else:\n exit(0) #Exit if no number or space\n loop = [data[start+1]]\n for j in range(2,end-start):\n loop.append(data[start+j])\n\n if iterations == 0xBADA55:\n while True:\n at(loop)\n else:\n for j in range(iterations):\n eat(loop)\n i += len(loop)+1\n\n elif data[i] == 'I': #I for increase\n try:\n pointerValues[pointer] += int(data[i-1])\n except:\n pointerValues[pointer] += 1\n elif data[i] == 'R': #R for right\n try:\n pointerValues.append(0)\n pointer += int(data[i-1])\n except:\n pointer += 1\n elif data[i] == 'D': #D for decrease\n if pointerValues[pointer] != 0:\n try:\n pointerValues[pointer] -= int(data[i-1])\n except:\n pointerValues[pointer] -= 1\n else:\n exit(0)\n elif data[i] == 'L': #L for left\n if pointer != 0:\n try:\n pointer -= int(data[i-1])\n except:\n pointer -= 1\n else:\n exit(0)\n elif data[i] == 'P':\n print(pointerValues[pointer])\n i += 1\n\nwhile True:\n start()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"612249766","text":"\"\"\" Welborn Productions - Projects - Search\n Provides functions needs for the searcher app.\n -Christopher Welborn 08-30-14\n\"\"\"\nfrom projects.models import wp_project\nfrom projects import tools\n\n\ndef get_content(proj, request=None):\n \"\"\" Returns the full content for a project for searching. \"\"\"\n return tools.get_html_content(proj, request)\n\n\ndef get_desc(proj):\n \"\"\" Returns the description for a project for searching and displaying. \"\"\"\n return proj.description\n\n\ndef get_objects():\n \"\"\" Returns searchable projects. \"\"\"\n return wp_project.objects.filter(disabled=False)\n\n\ndef get_targets(proj, content=None, desc=None):\n \"\"\" Returns searchable target strings for a project. \"\"\"\n content = content or get_content(proj)\n desc = desc or get_desc(proj)\n return (\n proj.name,\n proj.alias,\n proj.version,\n desc,\n content,\n str(proj.publish_date)\n )\n\n\ndef result_args(proj, desc=None):\n \"\"\" Returns kwargs needed to create a WpResult. \"\"\"\n desc = desc or get_desc(proj)\n return {\n 'title': '{} v. {}'.format(proj.name, proj.version),\n 'desc': desc,\n 'link': '/projects/{}'.format(proj.alias),\n 'posted': str(proj.publish_date),\n 'restype': 'Project'\n }\n","sub_path":"projects/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"632850766","text":"import logging\n# XXX import actual commands needed\nfrom fabulaws.library.wsgiautoscale.api import * # noqa\nfrom fabulaws.library.wsgiautoscale.api import _setup_env\n\nroot_logger = logging.getLogger()\nroot_logger.addHandler(logging.StreamHandler())\nroot_logger.setLevel(logging.WARNING)\n\nfabulaws_logger = logging.getLogger('fabulaws')\nfabulaws_logger.setLevel(logging.INFO)\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\n@task\ndef florida(deployment_tag=env.default_deployment, answer=None):\n _setup_env(deployment_tag, 'florida')\n\n\n@task\ndef presidential(deployment_tag=env.default_deployment, answer=None):\n _setup_env(deployment_tag, 'presidential')\n\n\n@task\ndef staging01(deployment_tag=env.default_deployment, answer=None):\n _setup_env(deployment_tag, 'staging01')\n\n\n@task\ndef prod01(deployment_tag=env.default_deployment, answer=None):\n _setup_env(deployment_tag, 'prod01')\n\n\n@task\n@roles('db-master')\ndef pg_create_unaccent_ext():\n \"\"\"\n Workaround to facilitate granting the opendebates database user\n permission to use the 'unaccent' extension in Postgres.\n \"\"\"\n require('environment', provided_by=env.environments)\n sudo('sudo -u postgres psql %s -c \"CREATE EXTENSION IF NOT EXISTS unaccent;\"'\n '' % env.database_name)\n for func in [\n 'unaccent(text)',\n 'unaccent(regdictionary, text)',\n 'unaccent_init(internal)',\n 'unaccent_lexize(internal, internal, internal, internal)',\n ]:\n sudo('sudo -u postgres psql %s -c \"ALTER FUNCTION %s OWNER TO %s;\"'\n '' % (env.database_name, func, env.database_user))\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"162892710","text":"import pymel.core as pm\n\n\nimport mgear.shifter.guideTemplateExplorerUI as gteUI\nfrom mgear.shifter import io\nfrom maya.app.general.mayaMixin import MayaQWidgetDockableMixin\nfrom mgear.core import pyqt\nfrom mgear.vendor.Qt import QtCore, QtWidgets\nfrom mgear.vendor.qjsonmodel import QJsonModel\n\n\nclass GuideTemplateExplorerUI(QtWidgets.QMainWindow, gteUI.Ui_MainWindow):\n\n def __init__(self, parent=None):\n super(GuideTemplateExplorerUI, self).__init__(parent)\n self.setupUi(self)\n\n\nclass GuideTemplateExplorer(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n\n def __init__(self, parent=None):\n self.toolName = \"shifterGuideTemplateExplorer\"\n super(GuideTemplateExplorer, self).__init__(parent=parent)\n self.gteUIInst = GuideTemplateExplorerUI()\n self.__model = QJsonModel()\n self.gteUIInst.explorer_treeView.setModel(self.__model)\n\n self.start_dir = pm.workspace(q=True, rootDirectory=True)\n\n self.create_window()\n self.create_layout()\n self.create_connections()\n\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)\n\n def create_window(self):\n\n self.setObjectName(self.toolName)\n self.setWindowFlags(QtCore.Qt.Window)\n self.setWindowTitle(\"Guide Template Explorer\")\n self.resize(300, 330)\n\n def create_layout(self):\n\n self.gte_layout = QtWidgets.QVBoxLayout()\n self.gte_layout.addWidget(self.gteUIInst)\n\n self.setLayout(self.gte_layout)\n\n ###########################\n # create connections SIGNALS\n ###########################\n def create_connections(self):\n self.gteUIInst.actionOpen.triggered.connect(self.open_template)\n self.gteUIInst.actionSave_As.triggered.connect(self.save_as_template)\n self.gteUIInst.actionClear.triggered.connect(self.clear_template)\n\n self.gteUIInst.actionBuild.triggered.connect(self.build_template)\n self.gteUIInst.actionImport.triggered.connect(self.import_template)\n self.gteUIInst.actionImport_Partial.triggered.connect(\n self.import_partial_template)\n\n #############\n # SLOTS\n #############\n def open_template(self):\n template = io._import_guide_template()\n if template:\n self.__model.load(template)\n else:\n pm.displayWarning(\"Not guide template load\")\n\n def save_as_template(self):\n template = self.__model.json()\n if template:\n io.export_guide_template(conf=template)\n else:\n pm.displayWarning(\"Not guide template load\")\n\n def clear_template(self):\n self.__model.load({})\n\n def build_template(self):\n template = self.__model.json()\n if template:\n io.build_from_file(conf=template)\n else:\n pm.displayWarning(\"Not guide template load\")\n\n def import_template(self):\n template = self.__model.json()\n if template:\n io.import_partial_guide(conf=template)\n else:\n pm.displayWarning(\"Not guide template load\")\n\n def import_partial_template(self):\n template = self.__model.json()\n if template:\n indx = self.gteUIInst.explorer_treeView.selectedIndexes()[0]\n try:\n if indx.parent().internalPointer().key == \"components_list\":\n partial = indx.internalPointer().value\n oSel = pm.selected()\n if oSel and oSel[0].getParent(-1).hasAttr(\"ismodel\"):\n initParent = oSel[0]\n else:\n initParent = None\n io.import_partial_guide(partial=partial,\n initParent=initParent,\n conf=template)\n else:\n pm.displayWarning(\"Please select a component guide to \"\n \"import from components_list\")\n except AttributeError:\n pm.displayWarning(\"Please select a component guide to import\"\n \" from components_list\")\n else:\n pm.displayWarning(\"Not guide template load\")\n\n\ndef open_guide_template_explorer(*args):\n\n pyqt.showDialog(GuideTemplateExplorer, dockable=True)\n\n\nif __name__ == \"__main__\":\n\n pyqt.showDialog(GuideTemplateExplorer, dockable=True)\n","sub_path":"scripts/mgear/shifter/guideTemplateExplorer.py","file_name":"guideTemplateExplorer.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"461748597","text":"from django.db import models\nfrom django.utils import timezone\nfrom itertools import chain\nfrom operator import attrgetter\n\n# Create your models here.\n\n\nclass Equipment(models.Model):\n brand = models.CharField(max_length=20)\n name = models.CharField(max_length=50)\n acquisition_cost = models.DecimalField(max_digits=12, decimal_places=2)\n acquisition_date = models.DateField()\n details = models.CharField(max_length=2000)\n TYPE_CODES = (\n ('AC', 'Air Compressor'),\n ('BL', 'Backhoe Loader'),\n ('BR', 'Breaker'),\n ('BD', 'Bulldozer'),\n ('CR', 'Crane'),\n ('DT', 'Dump Truck'),\n ('EX', 'Excavator'),\n ('FT', 'Flatbed Truck'),\n ('FL', 'Forklifts'),\n ('GS', 'Genarating Set'),\n ('LB', 'Low Bed Trailer'),\n ('MT', 'Manlift Truck'),\n ('MC', 'Motorcycle'),\n ('PL', 'Payloader'),\n ('PM', 'Prime Mover'),\n ('RG', 'Road Grader'),\n ('RR', 'Road Roller'),\n ('SL', 'Skidsteer Loader'),\n ('OT', 'Other'),\n )\n type = models.CharField(max_length=2, choices=TYPE_CODES)\n STATUS_CODES = (\n ('IE', 'In Engagement'),\n ('AV', 'Available'),\n ('UM', 'Under Maintenance'))\n status = models.CharField(max_length=2, choices=STATUS_CODES)\n hours_worked = models.IntegerField(default=0)\n total_hours_worked = models.IntegerField(default=0)\n hourly_service_rate = models.DecimalField(max_digits=12, decimal_places=2)\n image = models.ImageField(upload_to=\"equipmentimages\", default='../media/equipmentimages/defaultequipment.png')\n\n def __str__(self):\n return '{}: {}'.format(self.type, self.name)\n\n def as_dict(self):\n return {\n 'brand': self.brand,\n 'name': self.name,\n 'acquisitioncost': self.acquisition_cost,\n 'acquisitiondate': self.acquisition_date,\n 'details': self.details,\n 'type': self.get_type_display,\n 'status': self.status,\n 'hourly_service_rate': self.hourly_service_rate,\n 'imageurl': self.image.url,\n }\n\n def checkconflict(self, index):\n from rental.models import Inquiry\n inquiry2 = Inquiry.objects.get(id=index)\n inquiries = self.inquiryequipment_set.all().exclude(inquiry_id=index)\n if self.status == \"UM\":\n return True\n\n if inquiry2.status == \"AQ\":\n for inquiryequipment in inquiries:\n if inquiryequipment.inquiry.status == \"CO\":\n test_start = inquiryequipment.inquiry.start_date\n test_end = inquiryequipment.inquiry.end_date\n if test_start <= inquiry2.start_date <= test_end:\n return True\n if test_start <= inquiry2.end_date <= test_end:\n return True\n\n return False\n\n def has_engagement(self):\n from rental.models import Inquiry\n inquiries = Inquiry.objects.filter(inquiryequipment__equipment=self).filter(start_date__lte=timezone.now()).\\\n filter(end_date__gte=timezone.now()).filter(status=\"CO\")\n if not inquiries:\n return False\n else:\n return True\n\n def get_previous_transactions(self):\n from rental.models import Inquiry\n inquiries = Inquiry.objects.filter(inquiryequipment__equipment=self).filter(end_date__lt=timezone.now()).\\\n filter(status=\"CO\")\n maintenance = MaintenanceTransaction.objects.filter(equipment=self).filter(end_date__lt=timezone.now())\n combined = sorted(chain(inquiries, maintenance), key=attrgetter('end_date'), reverse=True)\n return combined\n\n def get_roi(self):\n from rental.models import Quotation\n quotation = Quotation.objects.filter(quotationequipment__equipment=self).filter(paid=True).filter(status=\"PA\")\n income = 0\n for x in quotation:\n days = x.inquiry.end_date - x.inquiry.start_date\n days = (days.days + 1) * 24\n income = income + (days * self.hourly_service_rate)\n\n maintenance = MaintenanceTransaction.objects.filter(equipment=self).filter(end_date__lte=timezone.now())\n maintenance_cost = 0\n for y in maintenance:\n maintenance_cost = maintenance_cost + y.cost\n\n roi = (income - (maintenance_cost + self.acquisition_cost))/(maintenance_cost + self.acquisition_cost)\n\n return roi\n\n\n# WAG TO\n#\n# class EquipmentEngagement(models.Model):\n# equipment = models.ForeignKey(Equipment, on_delete=models.CASCADE)\n# date = models.DateField()\n# start_time = models.TimeField()\n# end_time = models.TimeField()\n#\n# def __str__(self):\n# return '{}, {}'.format(self.equipment.name, self.date)\n\n\nclass MaintenanceTransaction(models.Model):\n equipment = models.ForeignKey(Equipment, on_delete=models.CASCADE)\n start_date = models.DateField()\n end_date = models.DateField(null=True)\n cost = models.DecimalField(max_digits=12, decimal_places=2, null=True)\n\n def __str__(self):\n return '{}, {}'.format(self.equipment.name, self.start_date)\n","sub_path":"equipment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"577651553","text":"\"\"\"\nhttps://leetcode.com/problems/kth-smallest-element-in-a-sorted-matrix/\n\"\"\"\n\n\nclass Solution(object):\n \"\"\"\n Using builtin sort : O(n * log(n))\n \"\"\"\n def kthSmallest(self, matrix, k):\n array = [val for row in matrix for val in row]\n array.sort()\n return array[k - 1]\n\nmatrix = [\n [ 1, 5, 9],\n [10, 11, 13],\n [12, 13, 15]\n]\nk = 8\nprint(13, Solution().kthSmallest(matrix, k))\n\n\n\n\n","sub_path":"leetcode/kth-smallest-element-in-a-sorted-matrix/kth-smallest-element-in-a-sorted-matrix.py","file_name":"kth-smallest-element-in-a-sorted-matrix.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"505634716","text":"from tests.test_case import TestCase\nfrom tests.utils import build_test_authorization\n\n\nclass TestApi(TestCase):\n def test_successful_request(self):\n auth = build_test_authorization()\n re = self.client.get('/v1/me', headers=auth['headers'])\n\n self.assertEqual(200, re.status_code)\n self.assertEqual(\n re.json,\n {'identity': auth['public_address']}\n )\n\n def test_failure(self):\n re = self.client.get(\n '/v1/me',\n headers={\"Authorization\": \"\"}\n )\n\n self.assertEqual(401, re.status_code)\n self.assertEqual(\n re.json,\n {'error': 'missing Authorization in request header'}\n )\n","sub_path":"tests/test_signed_payload.py","file_name":"test_signed_payload.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"247728662","text":"import cv2\r\nimport math\r\nimport numpy as np\r\nimport VoilaJones.IntegralImage as IntImg\r\nimport VoilaJones.HaarLikeFeatures as HaarFeat\r\nimport VoilaJones.Adaboost as Ada\r\n\r\n\r\nFaceSize = 2429\r\nnonFaceSize = 4547\r\n\r\n# Loading the Images of Faces and Non-Faces\r\nTrainFacePath = 'TRAINING/TrainingFaces/'\r\nTrainNoFacePath = 'TRAINING/TrainingNonFaces/'\r\n\r\ntrain_img = []\r\n\r\nprint('Loading Faces Images From The Training Data...')\r\nfor img_no in range(1,FaceSize+1):\r\n fullImgPath = TrainFacePath + str(img_no) + '.pgm'\r\n img = cv2.imread(fullImgPath, 0)\r\n int_img = IntImg.getIntegralImg(img)\r\n train_img.append(int_img)\r\n\r\ntemp1 = [] \r\ntemp2 = [] \r\ntemp3 = [] \r\ntemp4 = [] \r\ntemp5 = []\r\n \r\nprint('Loading Non-Faces Images From The Training Data...')\r\nfor img_no in range(1,nonFaceSize+1):\r\n fullImgPath = TrainNoFacePath + str(img_no) + '.pgm'\r\n img = cv2.imread(fullImgPath, 0)\r\n int_img = IntImg.getIntegralImg(img)\r\n train_img.append(int_img)\r\n \r\n# Initialize image weights\r\nimgWeights = np.divide(np.ones(FaceSize+nonFaceSize),(FaceSize+nonFaceSize))\r\n\r\n# Size of training images\r\nwindow = 19\r\n\r\n# Matrix of Haar Feature Dimension\r\nhaars = np.array([[1,2],[2,1],[1,3],[3,1],[2,2]])\r\n\r\nfor iterations in range(1,3):\r\n weakClassifiers = np.empty((0,12))\r\n \r\n for haar in range(1,6):\r\n print(\"Working on Haar-\" + str(haar) + \"\\n\")\r\n dimX = haars[haar-1, 0]\r\n dimY = haars[haar-1, 1]\r\n \r\n for X in range(2, (window-dimX)+1):\r\n for Y in range(2, (window-dimY)+1):\r\n for haarX in range(dimX, (window-X)+1, dimX):\r\n for haarY in range(dimY, (window-Y)+1, dimY):\r\n haarVector1 = np.zeros(FaceSize) \r\n \r\n for img_no in range(0,FaceSize):\r\n val = HaarFeat.getHaarValue(train_img[img_no], haar, X, Y, haarX, haarY)\r\n haarVector1[img_no] = val\r\n \r\n faceMean = np.mean(haarVector1);\r\n faceStd = np.std(haarVector1);\r\n faceMax = np.max(haarVector1);\r\n faceMin = np.min(haarVector1);\r\n \r\n haarVector2 = np.zeros(nonFaceSize)\r\n \r\n for img_no in range(0,nonFaceSize):\r\n val = HaarFeat.getHaarValue(train_img[FaceSize+img_no], haar, X, Y, haarX, haarY)\r\n haarVector2[img_no] = val\r\n \r\n storeRatingDiff = []\r\n storeFaceRating = []\r\n storeNonFaceRating = []\r\n storeTotalError = []\r\n storeLowerBound = []\r\n storeUpperBound = []\r\n strongCounter = 0\r\n \r\n for iter in range(1, 51):\r\n C = np.ones(imgWeights.shape[0])\r\n minRating = faceMean - np.abs((iter/50)*(faceMean-faceMin))\r\n maxRating = faceMean + np.abs((iter/50)*(faceMax-faceMean))\r\n for val in range(0,FaceSize):\r\n if(haarVector1[val] >= minRating and haarVector1[val] <= maxRating):\r\n C[val] = 0\r\n \r\n faceRating = np.sum(np.multiply(imgWeights[0:FaceSize], C[0:FaceSize]))\r\n if(faceRating < 0.05):\r\n for val in range(0,nonFaceSize):\r\n if(haarVector2[val] >= minRating and haarVector2[val] <= maxRating):\r\n print(\"\")\r\n else: \r\n C[FaceSize+val] = 0\r\n \r\n nonFaceRating = np.sum(np.multiply(imgWeights[FaceSize:FaceSize+nonFaceSize], C[FaceSize:FaceSize+nonFaceSize]))\r\n totalError = np.sum(np.multiply(imgWeights, C))\r\n \r\n if(totalError < 0.5):\r\n strongCounter = strongCounter+1;\r\n storeRatingDiff.append((1-faceRating)-nonFaceRating)\r\n storeFaceRating.append(1-faceRating)\r\n storeNonFaceRating.append(nonFaceRating)\r\n storeTotalError.append(totalError) \r\n storeLowerBound.append(minRating)\r\n storeUpperBound.append(maxRating)\r\n \r\n if(len(storeRatingDiff) > 0):\r\n maxRatingIndex = -math.inf\r\n maxRatingDiff = np.max(storeRatingDiff)\r\n \r\n for index in range(0, len(storeRatingDiff)):\r\n if(storeRatingDiff[index] == maxRatingDiff):\r\n maxRatingIndex = index\r\n break\r\n \r\n if(len(storeRatingDiff) > 0):\r\n thisClassifier = np.array([haar, X, Y, haarX, haarY, \r\n maxRatingDiff, storeFaceRating[maxRatingIndex], \r\n storeNonFaceRating[maxRatingIndex],\r\n storeLowerBound[maxRatingIndex], \r\n storeUpperBound[maxRatingIndex],\r\n storeTotalError[maxRatingIndex]])\r\n imgWeights, alpha = Ada.doAdaboost(thisClassifier, train_img, imgWeights)\r\n np.append(thisClassifier, alpha)\r\n weakClassifiers = np.append(weakClassifiers, thisClassifier,0)\r\n \r\n if(haar == 1):\r\n temp1.append(thisClassifier)\r\n elif(haar == 2):\r\n temp2.append(thisClassifier)\r\n elif(haar == 3):\r\n temp3.append(thisClassifier)\r\n elif(haar == 4):\r\n temp4.append(thisClassifier)\r\n elif(haar == 5):\r\n temp5.append(thisClassifier)\r\n \r\n print(\"Finished Haar-\"+ str(haar)+ \"\\n\")\r\n\r\n\r\n\r\nprint(\"Making strong classifiers from sorting according to alpha values\\n\") \r\nalphas = np.zeros(weakClassifiers.shape[0])\r\nfor i in range(alphas.shape[0]):\r\n alphas[i] = weakClassifiers[i][11]\r\n \r\n\r\ntempClassifiers = np.zeros((alphas.shape[0],2))\r\ntempClassifiers[:,0] = alphas\r\nfor i in range(alphas.shape[0]):\r\n tempClassifiers[i,1] = i\r\n\r\n\r\ntempClassifiers = np.sort(tempClassifiers, 0)[::-1] # Sorting in desc order wrt 1 columns i.e. alphas values\r\n\r\nselectedClassifiers = np.zeros((286,12))\r\nfor i in range(286):\r\n selectedClassifiers[i,:] = weakClassifiers[tempClassifiers[i,1],:]\r\n\r\n\r\nnp.save('selectedClassifiers.npy', selectedClassifiers)\r\n\r\n\r\n \r\n \r\n ","sub_path":"VoilaJones/TrainHaarClassifier_Main.py","file_name":"TrainHaarClassifier_Main.py","file_ext":"py","file_size_in_byte":7593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"26901286","text":"import math\nimport json\n\ndef gate(gate_no):\n ans = input('Water gate {}: '.format(gate_no))\n if ans == \"yes\":\n points = gate_dict[gate_no]\n return points\n elif ans == \"no\":\n points = 0\n return points\n else:\n print(' [-] Wrong answer! ( yes/no )')\n exit()\n\ndef total(points):\n if points <= 0:\n x = 3\n return x\n else:\n x = round(points * 1.0204, 3)\n if x > 100:\n x = 100\n return x\n\ngate_dict = {\n 1: 21,\n 2: 5,\n 3: 8,\n 4: 16\n}\n\nzone_dict = {\n \"Z1\": 3,\n \"Z2\": 11,\n \"Z3\": 19,\n \"Z4\": 27,\n \"Z5\": 34,\n \"Z6\": 41,\n \"Z7\": 46,\n \"Z8\": 48,\n \"Z9\": 25,\n \"Z10\": 5,\n \"central\": 50\n}\n\nname = input('Name: ')\n\ntry:\n rounds = int(input('Round: '))\nexcept ValueError:\n print(' [-] Wrong value! [1-3]')\n\nif not 0 < rounds < 4:\n print(' [-] Wrong value! [1-3]')\n exit()\n\ngate_1 = gate(1)\ngate_2 = gate(2)\ngate_3 = gate(3)\ngate_4 = gate(4)\ngate_points = gate_1 + gate_2 + gate_3 + gate_4\n\nzone = input('Landing zone: ')\nif zone == \"Z1\" or zone == \"Z2\" or zone == \"Z3\" or zone == \"Z4\" or zone == \"Z5\" or zone == \"Z6\" or zone == \"Z7\" or \\\n zone == \"Z8\" or zone == \"Z9\" or zone == \"Z10\" or zone == \"central\":\n zone_points = zone_dict[zone]\nelif not zone:\n zone_points = 0\n gate_points = 0\nelse:\n print(' [-] Wrong Zone! ( Z[1-10] / central )')\n exit()\n\nstand = input('Stand Up? ')\nif stand == \"yes\":\n stand_points = 0\nelif stand == \"no\":\n stand_points = -10\nelse:\n print(' [-] Wrong answer! ( yes/no )')\n exit()\n\n# print ('\\nGate points: {}'.format(gate_points))\n# print ('Zone points: {}'.format(zone_points))\n# print ('Stand Up: {}'.format(stand_points))\n\npoints = gate_points + zone_points + stand_points\nscore = total(points)\nprint('\\nOFFICAL SCORE: {}'.format(score))\n\nwith open('results_accuracy.txt', 'a') as f:\n f.write('Name: {}, Round: {}, Score: {}\\n\\tG1: {}\\n\\tG2: {}\\n\\tG3: {}\\n\\tG4: {}\\n\\tZone: {}\\n\\tStandUp: {}\\n\\n'.\\\n format(name, rounds, score, \"yes\" if gate_1 > 0 else \"no\", \"yes\" if gate_2 > 0 else \"no\", \"yes\" if gate_3 > 0 \\\n else \"no\", \"yes\" if gate_4 > 0 else \"no\", zone, stand))","sub_path":"cp_accuracy.py","file_name":"cp_accuracy.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"461355788","text":"from os import path\nimport pip\nimport re\nfrom setuptools import setup, find_packages\nimport sys\n\nhere = path.abspath(path.dirname(__file__))\n\npip_version_match = re.search(r'^[0-9]*', pip.__version__)\nif pip_version_match:\n if int(pip_version_match.group(0)) < 19:\n sys.exit('Install requires pip version 19 or greater. ' +\n 'Run pip install --upgrade pip.')\nelse:\n sys.exit('There was an error getting the pip version number.')\n\n\nsetup(\n name='bnpgmm',\n version='0.1',\n description='Implements a BNP-GMM clustering using variational Bayes and evaluates sensitivity to the BNP prior. ',\n author='Runjing Liu, Ryan Giordano',\n author_email='runjing_liu@berkeley.edu',\n packages=find_packages(exclude=['docs', 'tests']),\n install_requires=['sklearn'],\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Development Status :: 2 - Pre-Alpha',\n 'Topic :: Scientific/Engineering :: Mathematics'\n ],\n)\n","sub_path":"GMM_clustering/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"623898353","text":"\"\"\"\nMIT License\n\nCopyright (c) 2020 Airbyte\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport unicodedata as ud\nfrom re import match, sub\nfrom typing import Dict, List, Optional, Set\n\nimport yaml\nfrom jinja2 import Template\n\nfrom .reserved_keywords import is_reserved_keyword\n\n\nclass TransformCatalog:\n \"\"\"\nTo run this transformation:\n```\npython3 main_dev_transform_catalog.py \\\n --integration-type \n --profile-config-dir . \\\n --catalog integration_tests/catalog.json \\\n --out dir \\\n --json-column json_blob\n```\n \"\"\"\n\n config: dict = {}\n\n def __init__(self):\n self.config = {}\n\n def run(self, args) -> None:\n self.parse(args)\n self.process_catalog()\n\n def parse(self, args) -> None:\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument(\"--integration-type\", type=str, required=True, help=\"type of integration dialect to use\")\n parser.add_argument(\"--profile-config-dir\", type=str, required=True, help=\"path to directory containing DBT profiles.yml\")\n parser.add_argument(\"--catalog\", nargs=\"+\", type=str, required=True, help=\"path to Catalog (JSON Schema) file\")\n parser.add_argument(\"--out\", type=str, required=True, help=\"path to output generated DBT Models to\")\n parser.add_argument(\"--json-column\", type=str, required=False, help=\"name of the column containing the json blob\")\n parsed_args = parser.parse_args(args)\n profiles_yml = read_profiles_yml(parsed_args.profile_config_dir)\n self.config = {\n \"integration_type\": parsed_args.integration_type,\n \"schema\": extract_schema(profiles_yml),\n \"catalog\": parsed_args.catalog,\n \"output_path\": parsed_args.out,\n \"json_column\": parsed_args.json_column,\n }\n\n def process_catalog(self) -> None:\n integration_type = self.config[\"integration_type\"]\n output = self.config[\"output_path\"]\n schema = self.config[\"schema\"]\n for catalog_file in self.config[\"catalog\"]:\n print(f\"Processing {catalog_file}...\")\n catalog = read_json_catalog(catalog_file)\n print(json.dumps(catalog, separators=(\",\", \":\")))\n sources = generate_dbt_model(\n schema=schema, output=output, integration_type=integration_type, catalog=catalog, json_col=self.config[\"json_column\"]\n )\n write_yaml_sources(output=output, sources=sources, integration_type=integration_type)\n\n\ndef read_profiles_yml(profile_dir: str) -> dict:\n with open(os.path.join(profile_dir, \"profiles.yml\"), \"r\") as file:\n config = yaml.load(file, Loader=yaml.FullLoader)\n obj = config[\"normalize\"][\"outputs\"][\"prod\"]\n return obj\n\n\ndef extract_schema(profiles_yml: dict) -> str:\n if \"dataset\" in profiles_yml:\n return profiles_yml[\"dataset\"]\n else:\n return profiles_yml[\"schema\"]\n\n\ndef generate_dbt_model(schema: str, output: str, integration_type: str, catalog: dict, json_col: str) -> Dict[str, Set[str]]:\n source_tables: Dict[str, Set[str]] = {}\n all_tables: Dict[str, Set[str]] = {}\n for configuredStream in catalog[\"streams\"]:\n stream = get_field(configuredStream, \"stream\", \"Stream is not defined in Catalog Streams\")\n schema = normalize_schema_table_name(schema, integration_type)\n name = get_field(stream, \"name\", \"name is not defined in stream: \" + str(stream))\n raw_name = normalize_schema_table_name(f\"_airbyte_raw_{name}\", integration_type)\n\n if schema not in source_tables:\n source_tables[schema] = set()\n all_tables[schema] = set()\n if raw_name not in source_tables[schema]:\n source_tables[schema].add(raw_name)\n source_tables[schema].add(name)\n all_tables[schema].add(raw_name)\n all_tables[schema].add(name)\n else:\n raise KeyError(f\"Duplicate table {name} in {schema}\")\n for configuredStream in catalog[\"streams\"]:\n stream = get_field(configuredStream, \"stream\", \"Stream is not defined in Catalog Streams\")\n\n schema = normalize_schema_table_name(schema, integration_type)\n name = get_field(stream, \"name\", \"name is not defined in stream: \" + str(stream))\n\n raw_schema = normalize_schema_table_name(\"_airbyte_\" + schema, integration_type)\n raw_name = normalize_schema_table_name(f\"_airbyte_raw_{name}\", integration_type)\n\n message = f\"json_schema is not defined for stream {name}\"\n properties = get_field(get_field(stream, \"json_schema\", message), \"properties\", message)\n\n table = jinja_call(\"source('{}', '{}')\".format(schema, raw_name))\n\n # Check properties\n if not properties:\n raise EOFError(\"Unexpected empty properties in catalog\")\n\n process_node(\n output=output,\n integration_type=integration_type,\n path=[name],\n json_col=f\"'{json_col}'\",\n tables_in_schema=all_tables,\n properties=properties,\n raw_schema=raw_schema,\n schema=schema,\n name=name,\n table=table,\n parent_hash_id=\"\",\n inject_sql_prefix=\"\",\n inject_sql_suffix=\"\",\n )\n return source_tables\n\n\ndef get_field(config, key, message):\n if key in config:\n return config[key]\n else:\n raise KeyError(message)\n\n\ndef process_node(\n output: str,\n integration_type: str,\n path: list,\n json_col: str,\n tables_in_schema: Dict[str, Set[str]],\n properties,\n raw_schema,\n schema,\n name,\n table,\n parent_hash_id: str,\n inject_sql_prefix: str,\n inject_sql_suffix: str,\n):\n # Check properties\n if not properties:\n print(f\"Ignoring '{name}' nested field from {'/'.join(path)} because properties list is empty\")\n return\n\n # Generate JSON Parsing model\n sql_file_name = normalize_schema_table_name(\"ab1_{}\".format(name), integration_type)\n template = Template(\n \"\"\"\n{{ inject_sql_prefix }}\nselect\n{%- if parent_hash_id %}\n {{ parent_hash_id }},\n{%- endif %}\n{%- for field in fields %}\n {%- if field %}\n {{ field }},\n {%- endif %}\n{%- endfor %}\n _airbyte_emitted_at\nfrom {{ table }}\n{{ inject_sql_suffix }}\n{%- if len(path) > 1 %}\n-- {{ name }} from {{ \"/\".join(path) }}\n{%- endif %}\n\"\"\"\n )\n template.globals[\"len\"] = len\n sql = template.render(\n inject_sql_prefix=inject_sql_prefix,\n parent_hash_id=parent_hash_id,\n fields=[\n json_extract_property(json_col=json_col, name=field, definition=properties[field], integration_type=integration_type)\n for field in properties.keys()\n if not is_airbyte_column(field)\n ],\n table=table,\n inject_sql_suffix=inject_sql_suffix,\n name=normalize_schema_table_name(name, integration_type),\n path=path,\n )\n output_sql_view(output, raw_schema, sql_file_name, sql, path)\n\n # Generate column typing model\n previous_sql_file_name = sql_file_name\n table = \"{{ ref('\" + previous_sql_file_name + \"') }}\"\n sql_file_name = normalize_schema_table_name(\"ab2_{}\".format(name), integration_type)\n sql = template.render(\n inject_sql_prefix=\"\",\n parent_hash_id=parent_hash_id,\n fields=[\n cast_property_type(name=field, definition=properties[field], integration_type=integration_type)\n for field in properties.keys()\n if not is_airbyte_column(field)\n ],\n table=table,\n inject_sql_suffix=\"\",\n name=normalize_schema_table_name(name, integration_type),\n path=\"/\".join(path),\n )\n output_sql_view(output, raw_schema, sql_file_name, sql, path)\n\n hash_id = quote_column(f\"_airbyte_{normalize_schema_table_name(name, integration_type)}_hashid\", integration_type)\n # Generate hash_id column model\n previous_sql_file_name = sql_file_name\n table = \"{{ ref('\" + previous_sql_file_name + \"') }}\"\n sql_file_name = normalize_schema_table_name(\"ab3_{}\".format(name), integration_type)\n template = Template(\n \"\"\"\nselect\n *,\n {{ '{{' }} dbt_utils.surrogate_key([\n{%- if parent_hash_id %}\n '{{ parent_hash_id }}',\n{%- endif %}\n{%- if fields %}\n {%- for field in fields %}\n {%- if field %}\n {{ field }},\n {%- endif %}\n {%- endfor %}\n ]) {{ '}}' }} as {{ hash_id }}\n{%- else %}\n null as {{ hash_id }}\n{%- endif %}\nfrom {{ table }}\n{%- if len(path) > 1 %}\n-- {{ name }} from {{ \"/\".join(path) }}\n{%- endif %}\n\"\"\"\n )\n template.globals[\"len\"] = len\n sql = template.render(\n parent_hash_id=parent_hash_id,\n fields=[\n safe_cast_to_varchar(name=field, definition=properties[field], integration_type=integration_type)\n for field in properties.keys()\n if not is_airbyte_column(field)\n ],\n hash_id=hash_id,\n table=table,\n name=normalize_schema_table_name(name, integration_type),\n path=path,\n )\n output_sql_view(output, raw_schema, sql_file_name, sql, path)\n\n # Generate final model\n previous_sql_file_name = sql_file_name\n table = \"{{ ref('\" + previous_sql_file_name + \"') }}\"\n sql_file_name = normalize_schema_table_name(name, integration_type)\n template = Template(\n \"\"\"\nselect\n{%- if parent_hash_id %}\n {{ parent_hash_id }},\n{%- endif %}\n{%- for field in fields %}\n {%- if field %}\n {{ field }},\n {%- endif %}\n{%- endfor %}\n _airbyte_emitted_at,\n {{ hash_id }}\nfrom {{ table }}\n{%- if len(path) > 1 %}\n-- {{ name }} from {{ \"/\".join(path) }}\n{%- endif %}\n\"\"\"\n )\n template.globals[\"len\"] = len\n sql = template.render(\n parent_hash_id=parent_hash_id,\n fields=[quote_column(field, integration_type) for field in properties.keys() if not is_airbyte_column(field)],\n hash_id=hash_id,\n table=table,\n name=normalize_schema_table_name(name, integration_type),\n path=path,\n )\n output_sql_table(output, schema, sql_file_name, sql, path)\n\n # Generate children models\n previous_sql_file_name = sql_file_name\n table = \"{{ ref('\" + previous_sql_file_name + \"') }}\"\n for field in properties.keys():\n if is_airbyte_column(field):\n pass\n elif is_combining_node(properties[field]):\n # TODO: merge properties of all combinations\n pass\n elif \"type\" not in properties[field] or is_object(properties[field][\"type\"]):\n process_nested_property(\n output=output,\n integration_type=integration_type,\n path=path,\n json_col=f\"'{field}'\",\n tables_in_schema=tables_in_schema,\n properties=properties[field],\n raw_schema=raw_schema,\n schema=schema,\n table=table,\n parent_hash_id=hash_id,\n inject_sql_prefix=\"\",\n inject_sql_suffix=f\"where {quote_column(field, integration_type)} is not null\\n\",\n field=field,\n )\n elif is_array(properties[field][\"type\"]) and \"items\" in properties[field]:\n quoted_name = f\"'{normalize_schema_table_name(name, integration_type)}'\"\n quoted_field = quote_column(field, integration_type, in_jinja=True)\n process_nested_property(\n output=output,\n integration_type=integration_type,\n path=(path),\n json_col=\"unnested_column_value('_airbyte_data')\",\n tables_in_schema=tables_in_schema,\n properties=properties[field][\"items\"],\n raw_schema=raw_schema,\n schema=schema,\n table=table,\n parent_hash_id=hash_id,\n inject_sql_prefix=jinja_call(f\"unnest_cte({quoted_name}, {quoted_field})\"),\n inject_sql_suffix=\"\"\"\n{}\nwhere {} is not null\n\"\"\".format(\n jinja_call(f\"cross_join_unnest({quoted_name}, {quoted_field})\"), quote_column(field, integration_type)\n ),\n field=field,\n )\n\n\ndef process_nested_property(\n output: str,\n integration_type: str,\n path: list,\n json_col: str,\n tables_in_schema: Dict[str, Set[str]],\n properties,\n raw_schema,\n schema,\n table,\n parent_hash_id: str,\n inject_sql_prefix: str,\n inject_sql_suffix: str,\n field: str,\n):\n children = find_properties_object(path=[], field=field, properties=properties, integration_type=integration_type)\n for child in children:\n child_name = child\n if child_name in tables_in_schema[schema]:\n child_str = normalize_schema_table_name(child_name, integration_type)\n for i in range(1, 100):\n if f\"{child_str}_{i}\" not in tables_in_schema[schema]:\n child_name = f\"{child_str}_{i}\"\n break\n tables_in_schema[schema].add(child_name)\n process_node(\n output=output,\n integration_type=integration_type,\n path=(path + [child]),\n json_col=json_col,\n tables_in_schema=tables_in_schema,\n properties=children[child],\n raw_schema=raw_schema,\n schema=schema,\n name=child_name,\n table=table,\n parent_hash_id=parent_hash_id,\n inject_sql_prefix=inject_sql_prefix,\n inject_sql_suffix=inject_sql_suffix,\n )\n\n\ndef read_json_catalog(input_path: str) -> dict:\n with open(input_path, \"r\") as file:\n contents = file.read()\n return json.loads(contents)\n\n\ndef is_string(property_type) -> bool:\n return property_type == \"string\" or \"string\" in property_type\n\n\ndef is_integer(property_type) -> bool:\n return property_type == \"integer\" or \"integer\" in property_type\n\n\ndef is_number(property_type) -> bool:\n return property_type == \"number\" or \"number\" in property_type\n\n\ndef is_boolean(property_type) -> bool:\n return property_type == \"boolean\" or \"boolean\" in property_type\n\n\ndef is_array(property_type) -> bool:\n return property_type == \"array\" or \"array\" in property_type\n\n\ndef is_object(property_type) -> bool:\n return property_type == \"object\" or \"object\" in property_type\n\n\ndef is_airbyte_column(name: str) -> bool:\n return name.startswith(\"_airbyte_\")\n\n\ndef is_simple_property(property_type) -> bool:\n return is_string(property_type) or is_integer(property_type) or is_number(property_type) or is_boolean(property_type)\n\n\ndef is_combining_node(properties: dict) -> set:\n return set(properties).intersection({\"anyOf\", \"oneOf\", \"allOf\"})\n\n\ndef jinja_call(command: str) -> str:\n return \"{{ \" + command + \" }}\"\n\n\ndef strip_accents(s):\n return \"\".join(c for c in ud.normalize(\"NFD\", s) if ud.category(c) != \"Mn\")\n\n\ndef normalize_schema_table_name(input_name: str, integration_type: str) -> str:\n # Temporarily disabling the behavior of the ExtendedNameTransformer on table/schema names, see (issue #1785)\n input_name = strip_accents(input_name)\n input_name = sub(r\"\\s+\", \"_\", input_name)\n input_name = sub(r\"[^a-zA-Z0-9_]\", \"_\", input_name)\n input_name = normalize_identifier_name(input_name, integration_type)\n input_name = normalize_identifier_case(input_name, integration_type, is_quoted=False)\n return input_name\n\n\ndef normalize_identifier_name(input_name: str, integration_type: str) -> str:\n if integration_type == \"bigquery\":\n if len(input_name) >= 1020:\n # bigquery has limit of 1024 characters\n input_name = input_name[0:1020]\n input_name = strip_accents(input_name)\n input_name = sub(r\"\\s+\", \"_\", input_name)\n doesnt_start_with_alphaunderscore = match(\"[^A-Za-z_]\", input_name[0])\n doesnt_contain_alphanumeric = match(\".*[^A-Za-z0-9_].*\", input_name)\n if doesnt_start_with_alphaunderscore or doesnt_contain_alphanumeric:\n input_name = f\"_{input_name}\"\n return sub(r\"[^a-zA-Z0-9_]\", \"_\", input_name)\n elif integration_type == \"redshift\":\n if len(input_name) >= 123:\n # redshift has limit of 127 characters\n input_name = input_name[0:123]\n elif integration_type == \"postgres\":\n if len(input_name) >= 59:\n # postgres has limit of 63 characters\n input_name = input_name[0:59]\n elif integration_type == \"snowflake\":\n if len(input_name) >= 251:\n # snowflake has limit of 255 characters\n input_name = input_name[0:251]\n else:\n raise KeyError(f\"Unknown integration type {integration_type}\")\n return input_name\n\n\ndef normalize_identifier_case(input_name: str, integration_type: str, is_quoted: bool = False):\n if integration_type == \"bigquery\":\n pass\n elif integration_type == \"redshift\":\n # all tables (even quoted ones) are coerced to lowercase.\n input_name = input_name.lower()\n elif integration_type == \"postgres\":\n if input_name[0] != \"'\" and input_name[0] != '\"' and not is_quoted:\n input_name = input_name.lower()\n elif integration_type == \"snowflake\":\n if input_name[0] != \"'\" and input_name[0] != '\"' and not is_quoted:\n input_name = input_name.upper()\n else:\n raise KeyError(f\"Unknown integration type {integration_type}\")\n return input_name\n\n\ndef quote_column(input_name: str, integration_type: str, in_jinja=False) -> str:\n if integration_type != \"bigquery\":\n result = normalize_identifier_name(input_name, integration_type)\n doesnt_start_with_alphaunderscore = match(\"[^A-Za-z_]\", result[0])\n contains_non_alphanumeric = match(\".*[^A-Za-z0-9_].*\", result)\n if doesnt_start_with_alphaunderscore or contains_non_alphanumeric or is_reserved_keyword(result, integration_type):\n result = f\"adapter.quote('{result}')\"\n result = normalize_identifier_case(result, integration_type, is_quoted=True)\n if not in_jinja:\n result = jinja_call(result)\n in_jinja = False\n else:\n result = normalize_identifier_case(result, integration_type, is_quoted=False)\n elif is_reserved_keyword(input_name, \"bigquery\"):\n result = normalize_identifier_name(input_name, \"bigquery\")\n result = f\"adapter.quote('{result}')\"\n result = normalize_identifier_case(result, \"bigquery\", is_quoted=True)\n if not in_jinja:\n result = jinja_call(result)\n in_jinja = False\n else:\n result = normalize_identifier_name(input_name, \"bigquery\")\n result = normalize_identifier_case(result, \"bigquery\", is_quoted=False)\n if in_jinja:\n # to refer to columns while already in jinja context, always quote\n return f\"'{result}'\"\n return result\n\n\ndef find_properties_object(path: List[str], field: str, properties, integration_type: str) -> dict:\n result = {}\n current_path = path + [field]\n current = \"_\".join(current_path)\n if isinstance(properties, str) or isinstance(properties, int):\n return {}\n else:\n if \"items\" in properties:\n return find_properties_object(path, field, properties[\"items\"], integration_type=integration_type)\n elif \"properties\" in properties:\n # we found a properties object\n return {current: properties[\"properties\"]}\n elif \"type\" in properties and is_simple_property(properties[\"type\"]):\n # we found a basic type\n return {current: None}\n elif isinstance(properties, dict):\n for key in properties.keys():\n child = find_properties_object(path=current_path, field=key, properties=properties[key], integration_type=integration_type)\n if child:\n result.update(child)\n elif isinstance(properties, list):\n for item in properties:\n child = find_properties_object(path=current_path, field=field, properties=item, integration_type=integration_type)\n if child:\n result.update(child)\n return result\n\n\ndef json_extract_property(json_col: str, name: str, definition: dict, integration_type: str) -> Optional[str]:\n current = [name]\n if \"type\" not in definition:\n return \"{} as {}\".format(jinja_call(f\"json_extract({json_col}, {current})\"), quote_column(name, integration_type))\n elif is_array(definition[\"type\"]):\n return \"{} as {}\".format(jinja_call(f\"json_extract_array({json_col}, {current})\"), quote_column(name, integration_type))\n elif is_object(definition[\"type\"]):\n return \"{} as {}\".format(jinja_call(f\"json_extract({json_col}, {current})\"), quote_column(name, integration_type))\n elif is_simple_property(definition[\"type\"]):\n return \"{} as {}\".format(\n jinja_call(f\"json_extract_scalar({json_col}, {current})\"),\n quote_column(name, integration_type),\n )\n else:\n return \"{} as {}\".format(jinja_call(f\"json_extract({json_col}, {current})\"), quote_column(name, integration_type))\n\n\ndef cast_property_type(name: str, definition: dict, integration_type: str) -> Optional[str]:\n if \"type\" not in definition:\n print(f\"WARN: Unknown type for column {name}\")\n return quote_column(name, integration_type)\n elif is_array(definition[\"type\"]):\n # TODO\n return quote_column(name, integration_type)\n elif is_object(definition[\"type\"]):\n # TODO in bq we can build RECORD objects...\n return \"cast({} as {}) as {}\".format(\n quote_column(name, integration_type),\n jinja_call(\"type_json()\"),\n quote_column(name, integration_type),\n )\n elif is_integer(definition[\"type\"]):\n return \"cast({} as {}) as {}\".format(\n quote_column(name, integration_type),\n jinja_call(\"dbt_utils.type_int()\"),\n quote_column(name, integration_type),\n )\n elif is_number(definition[\"type\"]):\n return \"cast({} as {}) as {}\".format(\n quote_column(name, integration_type),\n jinja_call(\"dbt_utils.type_float()\"),\n quote_column(name, integration_type),\n )\n elif is_boolean(definition[\"type\"]):\n return \"{} as {}\".format(\n jinja_call(f\"cast_to_boolean('{name}')\"),\n quote_column(name, integration_type),\n )\n elif is_string(definition[\"type\"]):\n return \"cast({} as {}) as {}\".format(\n quote_column(name, integration_type),\n jinja_call(\"dbt_utils.type_string()\"),\n quote_column(name, integration_type),\n )\n else:\n print(f\"WARN: Unknown type {definition['type']} for column {name}\")\n return quote_column(name, integration_type)\n\n\ndef safe_cast_to_varchar(name: str, definition: dict, integration_type: str) -> Optional[str]:\n if \"type\" not in definition:\n return quote_column(name, integration_type, in_jinja=True)\n elif is_boolean(definition[\"type\"]):\n return f\"boolean_to_varchar({quote_column(name, integration_type, in_jinja=True)})\"\n elif is_array(definition[\"type\"]):\n return f\"array_to_varchar({quote_column(name, integration_type, in_jinja=True)})\"\n else:\n return quote_column(name, integration_type, in_jinja=True)\n\n\ndef output_sql_view(output: str, schema: str, file: str, sql: str, path: list):\n output = os.path.join(output, \"airbyte_views\", schema)\n output_sql_file(output, schema, file, sql, path)\n\n\ndef output_sql_table(output: str, schema: str, file: str, sql: str, path: list):\n output = os.path.join(output, \"airbyte_tables\", schema)\n output_sql_file(output, schema, file, sql, path)\n\n\ndef output_sql_file(output: str, schema: str, file: str, sql: str, path: list):\n if not os.path.exists(output):\n os.makedirs(output)\n header = \"{{ config(schema='\" + schema + \"') }}\\n\"\n print(\" Generating {}.sql from {}:\".format(file, \"/\".join(path)))\n with open(os.path.join(output, f\"{file}.sql\"), \"w\") as f:\n f.write(header + sql)\n\n\ndef write_yaml_sources(output: str, sources: Dict[str, Set[str]], integration_type: str) -> None:\n schemas = []\n for schema in sources:\n quoted_schema = schema[0] == '\"'\n tables = [\n {\n \"name\": source,\n \"quoting\": {\"identifier\": True},\n }\n for source in sources[schema]\n if normalize_schema_table_name(source, integration_type)[0] == '\"'\n ] + [{\"name\": source} for source in sources[schema] if normalize_schema_table_name(source, integration_type)[0] != '\"']\n schemas.append(\n {\n \"name\": schema,\n \"quoting\": {\n \"database\": True,\n \"schema\": quoted_schema,\n \"identifier\": False,\n },\n \"tables\": tables,\n }\n )\n source_config = {\"version\": 2, \"sources\": schemas}\n source_path = os.path.join(output, \"sources.yml\")\n with open(source_path, \"w\") as fh:\n fh.write(yaml.dump(source_config))\n\n\ndef main(args=None):\n TransformCatalog().run(args)\n","sub_path":"airbyte-integrations/bases/base-normalization/normalization/transform_catalog/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":26249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"442639200","text":"#!/usr/bin/python\n# -*- coding: iso-8859-1 -*-\n#\n# Scope: Programma per ...........\n# modified: by Loreto notarantonio 2017-03-28 09.59.46\n#\n# ######################################################################################\n\n\nimport os, sys\nimport time\n__version__ = 'LnVer_2017-03-29_14.15.24'\n\n################################################################################\n# -\n################################################################################\ndef SendMsg(gv, sendPort):\n\n fDEBUG = gv.fDEBUG\n TYPE = 3\n\n if TYPE == 1:\n # ===================================================\n # = RS-485 sendMessage\n # ===================================================\n try:\n print ('... press ctrl-c to stop the process.')\n\n basedata = 'Loreto.'\n index = 0\n\n while True:\n index += 1\n dataStr = '{DATA}.{INX:04}'.format(DATA=basedata, INX=index)\n\n dataToSend = bytearray()\n dataToSend.append(gv.rs485.sourceAddress)\n dataToSend.append(gv.rs485.destAddress)\n for x in dataStr:\n dataToSend.append(ord(x))\n\n dataSent = sendPort.writeData(dataToSend, fDEBUG=True)\n time.sleep(5)\n\n\n except (KeyboardInterrupt) as key:\n print (\"Keybord interrupt has been pressed\")\n sys.exit()\n\n\n\n\n elif TYPE == 2:\n # ===================================================\n # = RS-485 sendMessage\n # ===================================================\n try:\n print ('... press ctrl-c to stop the process.')\n\n\n basedata = 'Loreto.'\n\n cmd = gv.LnClass()\n cmd.sourceAddr = gv.rs485.sourceAddress\n cmd.destAddr = gv.rs485.destAddress\n\n index = 0\n while True:\n index += 1\n cmd.dataStr = '{DATA}.{INX:04}'.format(DATA=basedata, INX=index)\n dataSent = sendPort.writeDataCMD(cmd, fDEBUG=True)\n time.sleep(5)\n\n\n except (KeyboardInterrupt) as key:\n print (\"Keybord interrupt has been pressed\")\n sys.exit()\n\n\n\n elif TYPE == 3:\n # ===================================================\n # = RS-485 sendMessage\n # ===================================================\n try:\n print ('... press ctrl-c to stop the process.')\n\n sourceAddr = gv.rs485.sourceAddress\n destAddr = gv.rs485.destAddress\n\n basedata = 'Loreto.'\n index = 0\n while True:\n index += 1\n dataStr = '{DATA}.{INX:04}'.format(DATA=basedata, INX=index)\n dataSent = sendPort.writeDataSDD(sourceAddr, destAddr, dataStr, fDEBUG=True)\n time.sleep(5)\n\n\n except (KeyboardInterrupt) as key:\n print (\"Keybord interrupt has been pressed\")\n sys.exit()\n\n","sub_path":"RaspBerry/Source/Main/SendMsg.py","file_name":"SendMsg.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"633614774","text":"import esphome.codegen as cg\nfrom esphome.components import media_player\nimport esphome.config_validation as cv\n\nfrom esphome import pins\n\nfrom esphome.const import CONF_ID, CONF_MODE\nfrom esphome.core import CORE\n\nCODEOWNERS = [\"@jesserockz\"]\nDEPENDENCIES = [\"esp32\"]\n\ni2s_audio_ns = cg.esphome_ns.namespace(\"i2s_audio\")\n\nI2SAudioMediaPlayer = i2s_audio_ns.class_(\n \"I2SAudioMediaPlayer\", cg.Component, media_player.MediaPlayer\n)\n\ni2s_dac_mode_t = cg.global_ns.enum(\"i2s_dac_mode_t\")\n\nCONF_I2S_DOUT_PIN = \"i2s_dout_pin\"\nCONF_I2S_BCLK_PIN = \"i2s_bclk_pin\"\nCONF_I2S_LRCLK_PIN = \"i2s_lrclk_pin\"\nCONF_MUTE_PIN = \"mute_pin\"\nCONF_AUDIO_ID = \"audio_id\"\nCONF_DAC_TYPE = \"dac_type\"\n\nINTERNAL_DAC_OPTIONS = {\n \"left\": i2s_dac_mode_t.I2S_DAC_CHANNEL_LEFT_EN,\n \"right\": i2s_dac_mode_t.I2S_DAC_CHANNEL_RIGHT_EN,\n \"stereo\": i2s_dac_mode_t.I2S_DAC_CHANNEL_BOTH_EN,\n}\n\nEXTERNAL_DAC_OPTIONS = [\"mono\", \"stereo\"]\n\nCONFIG_SCHEMA = cv.All(\n cv.typed_schema(\n {\n \"internal\": cv.Schema(\n {\n cv.GenerateID(): cv.declare_id(I2SAudioMediaPlayer),\n cv.Required(CONF_MODE): cv.enum(INTERNAL_DAC_OPTIONS, lower=True),\n }\n )\n .extend(media_player.MEDIA_PLAYER_SCHEMA)\n .extend(cv.COMPONENT_SCHEMA),\n \"external\": cv.Schema(\n {\n cv.GenerateID(): cv.declare_id(I2SAudioMediaPlayer),\n cv.Required(\n CONF_I2S_DOUT_PIN\n ): pins.internal_gpio_output_pin_number,\n cv.Required(\n CONF_I2S_BCLK_PIN\n ): pins.internal_gpio_output_pin_number,\n cv.Required(\n CONF_I2S_LRCLK_PIN\n ): pins.internal_gpio_output_pin_number,\n cv.Optional(CONF_MUTE_PIN): pins.gpio_output_pin_schema,\n cv.Optional(CONF_MODE, default=\"mono\"): cv.one_of(\n *EXTERNAL_DAC_OPTIONS, lower=True\n ),\n }\n )\n .extend(media_player.MEDIA_PLAYER_SCHEMA)\n .extend(cv.COMPONENT_SCHEMA),\n },\n key=CONF_DAC_TYPE,\n ),\n cv.only_with_arduino,\n)\n\n\nasync def to_code(config):\n var = cg.new_Pvariable(config[CONF_ID])\n await cg.register_component(var, config)\n await media_player.register_media_player(var, config)\n\n if config[CONF_DAC_TYPE] == \"internal\":\n cg.add(var.set_internal_dac_mode(config[CONF_MODE]))\n else:\n cg.add(var.set_dout_pin(config[CONF_I2S_DOUT_PIN]))\n cg.add(var.set_bclk_pin(config[CONF_I2S_BCLK_PIN]))\n cg.add(var.set_lrclk_pin(config[CONF_I2S_LRCLK_PIN]))\n if CONF_MUTE_PIN in config:\n pin = await cg.gpio_pin_expression(config[CONF_MUTE_PIN])\n cg.add(var.set_mute_pin(pin))\n cg.add(var.set_external_dac_channels(2 if config[CONF_MODE] == \"stereo\" else 1))\n\n if CORE.is_esp32:\n cg.add_library(\"WiFiClientSecure\", None)\n cg.add_library(\"HTTPClient\", None)\n cg.add_library(\"esphome/ESP32-audioI2S\", \"2.0.6\")\n cg.add_build_flag(\"-DAUDIO_NO_SD_FS\")\n","sub_path":"esphome/components/i2s_audio/media_player.py","file_name":"media_player.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"56564445","text":"import os\nfrom string import punctuation\n\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom settings import *\n\n\ndef define_stopwords(language='english', adicional_stopwords=[]):\n return set(stopwords.words(language) + list(punctuation) + adicional_stopwords)\n\n\ndef define_wordcloud(stopwords, maskname='cloud.png'):\n mask = np.array(Image.open(os.path.join(MASK_PATH, maskname)))\n wc = WordCloud(background_color=\"white\", mask=mask, max_words=300, stopwords=stopwords,\n relative_scaling=0.15, collocations=False)\n return wc\n\n\ndef generate_wordcloud(wc, visualize=True, save_to_file=False, filename='wordcloud.png'):\n if visualize:\n plt.imshow(wc, interpolation='bilinear')\n plt.axis(\"off\")\n plt.figure()\n\n if save_to_file:\n wc.to_file(os.path.join(WORDCLOUD_PATH, filename))\n\n\n\ndf = pd.read_csv(os.path.join(DATA_PATH, 'Donald-Tweets!.csv'))\nseries_tweets = df['Tweet_Text']\ntext = series_tweets.str.cat(sep=' ').lower()\n\nadicional_stopwords = ['http', 'https']\nstopwords = define_stopwords(adicional_stopwords=adicional_stopwords)\n\nwc = define_wordcloud(stopwords)\nwc.generate(text)\n\ngenerate_wordcloud(wc, visualize=True, save_to_file=False, filename='WC-DonaldTweets.png')\n","sub_path":"wordcloud/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"461793782","text":"from flask import Blueprint, render_template, request\nfrom server.single.database import PluginModel\n\n\nadmin_panel = Blueprint('admin', __name__, url_prefix='/admin')\n\n\n@admin_panel.route('/', methods=['GET', 'POST', 'UPDATE', 'DELETE'])\ndef admin():\n if request.method == 'GET':\n plugins = list(PluginModel.select())\n return render_template(\"admin.html\", plugins=plugins)\n","sub_path":"aggregator/server/single/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"242689652","text":"from django.contrib import admin\nfrom django.urls import path, include\n\nfrom adminapp import views\n\napp_name='adminapp'\n\nurlpatterns = [\n path('del/',views.dels,name='del'),\n path('add/',views.add,name='add'),\n path('add1/',views.add1,name='add1'),\n path('upd/',views.upd,name='upd'),\n path('upd1/',views.upd1,name='upd1'),\n path('sel/',views.sel,name='sel'),\n # path('emplist/', views.emplist, name='emplist'),\n]","sub_path":"adminapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"262408826","text":"# Desafio 98\n\nidiomas = {\n 'lucas':'francês',\n 'carlos':'espanhol',\n 'raphael':'inglês',\n 'jorge':'alemão'\n}\nprint(\"O idioma preferido de Lucas é \" + idiomas['lucas'] + \".\")\n\n# Definindo o nome do dicionário como 'idiomas', e definindo os elementos nele\n# da seguinte forma: o nome da pessoa : idioma preferido, não esquecendo da vírgula\n# para separar as chave-valores uns dos outros, e depois imprimindo a msg de qual\n# idioma a pessoa mais gosta.","sub_path":"desafio_98.py","file_name":"desafio_98.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"285043718","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''BullsAndCows game.'''\n\nfrom __future__ import print_function\n\nimport argparse\nimport random\nimport sys\nimport os\nimport re\n\n__author__ = \"Matej Buday\"\n\n\n# raw_input -> input for python 2\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d', '--difficulty', type=int, metavar='(3-9)', default=4, choices=range(3, 10),\n help='set how many digits will the guessing number have')\n parser.add_argument('-n', '--no-leading-zero', dest='leading_zero', action='store_false',\n help='when generating a random number, 0 won\\'t be a lead digit')\n parser.add_argument('-w', '--words', dest='word_file', metavar='FILE',\n help='file containg words to choose from - one per line, no repeating characters')\n # eg: `cat /usr/dict/words | grep -P '^(?:([a-z])(?!.*?\\1)){5}$'` for 5-char words\n\n args = parser.parse_args()\n\n if args.word_file and not os.path.isfile(args.word_file):\n parser.exit('Not a file: \"{}\"'.format(args.word_file))\n\n return args\n\n\nclass BullsAndCows(object):\n score_strings = ['unbelievably well', 'awesome', 'pretty good', 'all right', 'mediocre', 'real bad']\n\n def __init__(self, difficulty=None, leading_zero=None, word_file=None):\n # switch to \"word\" mode if word file is provided and ignore other options\n if word_file:\n self.word_file = word_file\n self.generate_secret = self._pick_word\n self.validate_guess = self._validate_word\n self.scoring_base = 2.4\n return\n\n self.difficulty = difficulty\n self.leading_zero = leading_zero\n self.generate_secret = self._generate_number\n self.validate_guess = self._validate_number\n self.scoring_base = 1.2\n\n def play(self):\n print('Let\\'s play Bulls and Cows!')\n self.generate_secret()\n print('(Submit empty answer to exit)')\n attempts = self._game_loop()\n\n if not attempts:\n return\n\n # only works for numbers as words could be anything from list of weekdays to random strings\n score = int(max(attempts - len(self.secret), 0) / (1.2 * len(self.secret)))\n print('You did {}'.format(self.score_strings[min(5, score)]))\n\n def _game_loop(self):\n attempts = 0\n while True:\n print('Enter a guess:')\n raw_guess = input('>>> ')\n\n # exit\n if len(raw_guess) == 0:\n break\n\n guess = self.validate_guess(raw_guess)\n if not guess:\n continue\n\n attempts += 1\n bulls, cows = self._score_guess(guess)\n if bulls == len(self.secret):\n print('Correct, you\\'ve guessed the right answer in {} guesses!'.format(attempts))\n return attempts\n\n print('{} bulls, {} cows'.format(bulls, cows))\n\n def _score_guess(self, guess):\n bulls = sum([s == g for s, g in zip(self.secret, guess)])\n cows = len(set(self.secret).intersection(guess)) - bulls\n\n return bulls, cows\n\n def _generate_number(self):\n if not self.leading_zero:\n digits = range(1, 10)\n random.shuffle(digits)\n first_digit = digits.pop()\n # add 0 to unused digits and sample those\n self.secret = [first_digit] + random.sample(digits + [0], self.difficulty - 1)\n else:\n self.secret = random.sample(range(0, 10), self.difficulty)\n\n print('I\\'ve generated a random {} digit number for you.'.format(len(self.secret)))\n\n def _validate_number(self, raw_guess):\n guess = list(raw_guess)\n\n try:\n guess = list(map(int, guess))\n except ValueError:\n print('Only use digits 0-9 as your guess.')\n return False\n\n if len(guess) != self.difficulty:\n print('Please enter a {}-digit number as a guess.'.format(self.difficulty))\n return False\n\n return guess\n\n def _pick_word(self):\n with open(self.word_file, 'r') as f:\n try:\n secret = next(f)\n except StopIteration:\n raise ValueError('No words found in the provided file ({})'.format(self.word_file))\n\n # Reservoir Sampling\n for i, word in enumerate(f):\n if not random.randint(0, i + 1):\n secret = word\n\n secret = secret.strip().lower()\n\n # validate choice\n if not re.match('[a-z]', secret):\n raise ValueError('Words can only contain letters of the English alphabet, found \"{}\"'.format(secret))\n if len(secret) != len(set(secret)):\n raise ValueError('Only words without character repetition are supported, found \"{}\"'.format(secret))\n\n self.secret = list(secret)\n print('I\\'ve picked a {}-character-long word for you.'.format(len(self.secret)))\n\n def _validate_word(self, raw_guess):\n if not re.match('[a-z]', raw_guess):\n print('Only use characters a-z as your guess')\n return False\n\n guess = list(raw_guess)\n\n if len(guess) != len(self.secret):\n print('Please enter {} characters as a guess.'.format(len(self.secret)))\n return False\n # evil TODO: only allow guesses that are valid words\n\n return guess\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n game = BullsAndCows(difficulty=args.difficulty, leading_zero=args.leading_zero, word_file=args.word_file)\n\n try:\n game.play()\n except ValueError as e:\n sys.exit(e)\n","sub_path":"python/bullsandcows.py","file_name":"bullsandcows.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"449453974","text":"from visual import *\nfrom primitive.AbstractPromitive import AbstractPrimitive\nfrom primitive.Arm import *\n\n\nclass Motor(AbstractPrimitive):\n \"\"\"\n @:param vector pos\n \"\"\"\n\n def __init__(self):\n self.center = vector(-0.5, 0, 0)\n self.motor = box(pos=vector(-1, 0, 0), width=1, height=1, color=color.red)\n self.sphere1 = sphere(pos=self.center, radius=0.5, color=color.green)\n self.objects = [self.motor, self.sphere1]\n\n def set_position(self, pos):\n self.motor.pos = pos\n self.sphere1.pos = vector(pos.x + 0.5, pos.y, pos.z)\n\n","sub_path":"primitive/Motor.py","file_name":"Motor.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"359031034","text":"# _*_ coding: utf-8 _*_\n__author__ = 'yilin.zhang'\n__date__ = '17-5-6 下午1:05'\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .views import *\n\nurlpatterns = [\n url(r'^report/cts/$', ShowCtsReport,name='ctsReport'),\n url(r'^report/gts/$', ShowGtsReport,name='gtsReport'),\n url(r'^report/ctsverifier/$', ShowVerifierReport,name='verifierReport'),\n url(r'^cts/report/add/$', AddCtsReport,name='addReport'),\n url(r'^cts/report/case$', ShowCtsReportCase,name='ctsCase'),\n url(r'^gts/report/case$', ShowGtsReportCase,name='gtsCase'),\n url(r'^gts/report/email$', SendEmail,name='sendEmail'),\n]\n","sub_path":"GoogleReports/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"84229729","text":"#!/usr/bin/env python\n# _*_ coding: utf-8_*_\n#\n# Copyright 2016 planc2c.com\n# thomas@time2box.com\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport pymongo\nimport sys\nimport os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"../\"))\nfrom comm import singleton\nfrom global_const import MONGO_HOST, MONGO_PORT, MONGO_USR, MONGO_PWD, MONGO_DB\n\n\n#activity apply options\nclass apply_dao(singleton):\n __apply_collection = None;\n\n\n def __init__(self):\n if self.__apply_collection is None:\n conn = pymongo.MongoClient(MONGO_HOST, MONGO_PORT);\n db = conn[MONGO_DB];\n db.authenticate(MONGO_USR, MONGO_PWD);\n self.__apply_collection = db.apply;\n else:\n logging.info(\"apply_dao has inited......\");\n\n\n def create(self, json):\n self.__apply_collection.insert(json);\n logging.info(\"create apply success......\");\n\n\n def query_by_activity(self, activity_id):\n cursor = self.__apply_collection.find({\"activity_id\":activity_id})\n array = []\n for i in cursor:\n array.append(i)\n return array\n\n\n def query_by_order(self, order_id):\n cursor = self.__apply_collection.find({\"order_id\":order_id})\n array = []\n for i in cursor:\n array.append(i)\n return array\n\n\n def query_pagination_by_vendor(self, vendor_id, before, limit):\n cursor = self.__apply_collection.find({\n \"vendor_id\":vendor_id,\n \"create_time\":{\"$lt\":before}}).sort(\"create_time\",-1).limit(limit);\n array = []\n for i in cursor:\n array.append(i)\n return array\n\n\n def count_not_review_by_vendor(self, vendor_id):\n return self.__apply_collection.count({\n \"vendor_id\":vendor_id,\n \"review\":False})\n\n\n # 查询活动参加人数,以此来计算活动执行状态\n # @2016/06/06\n def count_by_activity(self, activity_id):\n return self.__apply_collection.count({\"activity_id\":activity_id})\n\n\n def update(self, json):\n _id = json[\"_id\"];\n self.__apply_collection.update({\"_id\":_id},{\"$set\":json});\n logging.info(\"update apply success......\");\n\n def query_by_title_keys(self, vendor_id, title_keys_value):\n cursor = self.__apply_collection.find({\n \"vendor_id\":vendor_id,\n \"activity_title\":{'$regex':title_keys_value}}).sort(\"create_time\",-1)\n array = []\n for i in cursor:\n array.append(i)\n return array\n\n def query_by_nickname_keys(self, vendor_id, nickname_keys_value):\n cursor = self.__apply_collection.find({\n \"vendor_id\":vendor_id,\n \"account_nickname\":{'$regex':nickname_keys_value}}).sort(\"create_time\",-1)\n # \"name\":{'$regex':nickname_keys_value}})\n array = []\n for i in cursor:\n array.append(i)\n return array\n\n def query_by_time_keys(self, vendor_id, begin_keys_value, end_keys_value):\n cursor = self.__apply_collection.find({\n \"vendor_id\":vendor_id,\n \"create_time\":{'$gt':begin_keys_value,'$lt':end_keys_value}}).sort(\"create_time\",-1)\n array = []\n for i in cursor:\n array.append(i)\n return array\n","sub_path":"foo/dao/apply_dao.py","file_name":"apply_dao.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"634223695","text":"# -*- coding: utf-8 -*-\r\nimport turtle\r\n\r\nr = eval(input())\r\nn = eval(input())\r\ncolor = input()\r\n\r\ndef moveTo(x, y):\r\n turtle.penup()\r\n turtle.goto(x, y)\r\n turtle.pendown()\r\n\r\n# draw\r\nturtle.pencolor(color)\r\nfor i in range(n):\r\n moveTo(r*i, 0)\r\n turtle.seth(-90)\r\n turtle.circle(-r*i)\r\n \r\nturtle.done()\r\n","sub_path":"pythonFile/turtle/fisrt-week-exercise/draw_circles.py","file_name":"draw_circles.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"425641664","text":"from django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom rangoapp.models import Category\n\nclass CategoryMethodTests(TestCase):\n def test_ensure_views_are_positive(self):\n \"\"\"\n ensure_views_are_positive should results True for categories\n where views are zero or positive\n \"\"\"\n cat = Category(name='test', views=1, likes=0)\n cat.save()\n\n self.assertEqual((cat.views >= 0), True)\n\nclass IndexViewTest(TestCase):\n\n def test_index_view_with_no_categories(self):\n '''\n If no categories exist, an appopriate message should be displayed\n '''\n\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"There are no categories present.\")\n self.assertQueryEqual(response.context['categories'], [])","sub_path":"rangoapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"553074554","text":"import ctypes\nfrom world import *\nfrom AI.dumb import *\nfrom AI.smart import *\nfrom sdl2 import *\nfrom sdl2.sdlttf import *\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nPURPLE = (255, 0, 255)\n\ndef text(font, renderer, string, colour):\n surf = TTF_RenderText_Blended(font, str.encode(string), colour)\n texture = SDL_CreateTextureFromSurface(renderer, surf)\n SDL_FreeSurface(surf)\n return texture\n\ndef main():\n SDL_Init(SDL_INIT_VIDEO)\n TTF_Init()\n\n # Window settings\n window_width = 800\n window_height = 600\n\n # Window\n window = SDL_CreateWindow(b\"Snake 0u0\", 0, 0, window_width, window_height, SDL_WINDOW_SHOWN)\n renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC)\n\n font = TTF_OpenFont(str.encode(\"fonts/OpenSans-Regular.ttf\"), 24);\n paused_texture = text(font, renderer, \"PAUSED\", SDL_Color(255,0,0,255))\n dumb_texture = text(font, renderer, \"D\", SDL_Color(20,0,0,255))\n smart_texture = text(font, renderer, \"S\", SDL_Color(20,0,0,255))\n\n # World -- Dimensions\n world = World(40, 30)\n # World -- Food\n for i in range(20):\n world.add_food()\n # World -- Poison\n for i in range(2):\n world.add_poison()\n # World -- Snakes\n for i in range(0):\n world.add_snake(Dumb)\n for i in range(2):\n world.add_snake(Smart)\n # World -- Rats\n for i in range(8):\n world.add_rat(Dumb)\n\n for i in range(20):\n x = random.randint(0, world.w-1)\n y = random.randint(0, world.h-1)\n world.set(x, y, Tile.WALL)\n world.set(world.w-x-1, y, Tile.WALL)\n\n # Grid sizes\n grid_width = int(window_width/world.w)\n grid_height = int(window_height/world.h)\n\n # Print details\n print(F\"Window: {window_width}, {window_height}\")\n print(F\"World: {world.w}, {world.h}\")\n print(F\"Grid: {grid_width}, {grid_height}\")\n\n running = True\n paused = False\n debug = True\n event = SDL_Event()\n while running:\n while SDL_PollEvent(ctypes.byref(event)) != 0:\n if event.type == SDL_QUIT:\n running = False\n break\n elif event.type == SDL_KEYDOWN:\n if event.key.keysym.sym == SDLK_SPACE:\n paused = not paused\n elif event.key.keysym.sym == SDLK_d:\n debug = not debug\n\n # Update\n if not paused:\n world.step()\n\n # Render -- Clear\n SDL_SetRenderDrawColor(renderer, 20, 20, 20, 255)\n SDL_RenderClear(renderer)\n\n # Render -- Tiles\n for x in range(world.w):\n for y in range(world.h):\n if world.grid[x][y] == Tile.EMPTY:\n continue\n elif world.grid[x][y] == Tile.FOOD:\n SDL_SetRenderDrawColor(renderer, 128, 0, 255, 255)\n elif world.grid[x][y] == Tile.WALL:\n SDL_SetRenderDrawColor(renderer, 128, 128, 128, 255)\n elif world.grid[x][y] == Tile.POISON:\n SDL_SetRenderDrawColor(renderer, 255, 0, 0, 255)\n else:\n assert False\n\n r = SDL_Rect(grid_width*x + 1,\n grid_height*y + 1,\n grid_width - 2,\n grid_height - 2)\n SDL_RenderFillRect(renderer, r)\n\n # Render -- Snake bodies\n SDL_SetRenderDrawColor(renderer, 0, 200, 0, 255)\n for snake in world.snakes:\n for x, y in snake.body:\n r = SDL_Rect(grid_width*x + 1,\n grid_height*y + 1,\n grid_width - 2,\n grid_height - 2)\n SDL_RenderFillRect(renderer, r)\n\n # Render -- Snake heads\n SDL_SetRenderDrawColor(renderer, 0, 150, 0, 255)\n for snake in world.snakes:\n r = SDL_Rect(grid_width*snake.head[0] + 1,\n grid_height*snake.head[1] + 1,\n grid_width - 2,\n grid_height - 2)\n SDL_RenderFillRect(renderer, r)\n\n # Render -- Rats\n SDL_SetRenderDrawColor(renderer, 102, 50, 0, 255)\n for rat in world.rats:\n r = SDL_Rect(grid_width*rat.head[0] + 1,\n grid_height*rat.head[1] + 1,\n grid_width - 2,\n grid_height - 2)\n SDL_RenderFillRect(renderer, r)\n\n # Render -- Grid\n SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255)\n for x in range(world.w):\n SDL_RenderDrawLine(renderer, grid_width*x, 0, grid_width*x, window_height)\n for y in range(world.h):\n SDL_RenderDrawLine(renderer, 0, grid_height*y, window_width, grid_height*y)\n\n if debug:\n for snake in world.snakes:\n dst = SDL_Rect(snake.head[0]*grid_width, snake.head[1]*grid_height, grid_width, grid_height)\n if snake.brain.type == \"Dumb\":\n SDL_RenderCopy(renderer, dumb_texture, None, dst)\n elif snake.brain.type == \"Smart\":\n SDL_RenderCopy(renderer, smart_texture, None, dst)\n\n # Render -- Paused message\n if paused:\n dst = SDL_Rect(30, 30, 300, 300)\n SDL_RenderCopy(renderer, paused_texture, None, dst)\n\n # Flip\n SDL_RenderPresent(renderer)\n\n SDL_Delay(250)\n\n SDL_DestroyTexture(paused_texture)\n SDL_DestroyTexture(dumb_texture)\n SDL_DestroyTexture(smart_texture)\n TTF_CloseFont(font)\n SDL_DestroyRenderer(renderer)\n SDL_DestroyWindow(window)\n SDL_Quit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"379165453","text":"#!/usr/bin/python3\n\nimport sys\nimport json\n\ndata = open(sys.argv[1]).read()\n\nsimilarities = json.loads(data)\n\nclusters = set()\n\nfor key in similarities:\n if len(similarities[key]) == 0:\n clusters.add(frozenset([key]))\n else:\n cluster = set()\n cluster.add(key)\n for similar in similarities[key]:\n cluster.add(similar)\n cluster.update(similarities[similar])\n cluster = frozenset(cluster)\n clusters.add(cluster)\n\nfor cluster in clusters:\n if len(cluster) > 1:\n print('\\t'.join(cluster))\n\nfor cluster in clusters:\n if len(cluster) == 1:\n print('\\t'.join(cluster))\n","sub_path":"code/backend/preprocessing/from_txt_to_database/cluster_aff.py","file_name":"cluster_aff.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"539419636","text":"'''\nCreated on Apr 10, 2018\n\n@author: danasz\n'''\nimport twitterFeed as tw\nimport makeMovie as movie\nimport mongoDatabase as db\nimport googleVision as ggl\n\nimport wget\nimport os\nimport shutil\nimport datetime\nfrom pymongo import MongoClient\n\nfrom google.cloud import vision\n\nfrom google.cloud.vision import types\n\n#outputfolder name\noutput = \"./output\"\nimageLabels = []\n\ndef main(): \n\n twitterHandles = [\"katyperry\", \"justinbieber\", \"barackobama\",\n \"rihanna\", \"taylorswift13\", \"ladygaga\",\n \"YouTube\", \"Cristiano\", \"realdonaldtrump\",\n \"JeffBezos\", \"WIRED\", \"nytimes\",\"FoxNews\",\n \"CNN\", \"jimmyfallon\", \"Oprah\", \"BillGates\",\n \"instagram\", \"NASA\", \"TheEconomist\", \"NatGeo\"]\n \n for handle in twitterHandles:\n imageLabels[:] = []\n\n #make/clean output file\n if os.path.exists(output):\n shutil.rmtree(output)\n if not os.path.exists(output):\n os.mkdir(output)\n \n # pass in the username of the account you want to download feed from\n try:\n #download media from twitterFeed\n imageCount = tw.get_all_tweets(handle, output)\n # Only calls subsequent functions if media download from twitter feed was successful\n if (imageCount > 0):\n imageLabels.append(ggl.lable_images())\n movie.make_video()\n db.insertMongo(handle, datetime.datetime.now(), imageLabels, imageCount)\n else:\n print(\"ERROR: Unable ro run program for the selected twitter feed.\\nPlease try again with another username\")\n except Exception as e:\n print(str(e))\n else:\n print(\"Done. Program successful\")\n \nif __name__ == '__main__':\n main()","sub_path":"database/phase3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"459824700","text":"# coding:utf-8\n\nimport os\n\n\nif __name__ == \"__main__\":\n # run all unittest directly\n os.environ['DJANGO_SETTINGS_MODULE'] = \"pylucid_project.settings\"\n\n\nfrom django import forms\nfrom django.forms.util import flatatt\n\nfrom pylucid_project.tests.test_tools import basetest\n\n\nclass TestForm(forms.Form):\n foo = forms.CharField(max_length=100)\n\n\nclass FlatattPatchTest(basetest.BaseUnittest):\n \"\"\"\n Test flatatt Monkey-patch (done in pylucid test.test_tools.test_runner)\n \"\"\"\n TEST_ATTRS = {\"m\":5, \"a\":1, \"b\":2, \"c\":3, \"z\":8, 1:1, 2:2}\n\n def test_function(self):\n output = flatatt(self.TEST_ATTRS)\n self.failUnlessEqual(u' 1=\"1\" 2=\"2\" a=\"1\" b=\"2\" c=\"3\" m=\"5\" z=\"8\"', output)\n\n def test_widget_patch(self):\n test_widget = forms.TextInput()\n self.failUnlessEqual(\n test_widget.render(\"foo\", \"bar\", attrs=self.TEST_ATTRS),\n ' '\n )\n\n def test_forms_patch(self):\n form = TestForm()\n foo_field = form[\"foo\"]\n self.failUnlessEqual(\n foo_field.label_tag(attrs=self.TEST_ATTRS),\n 'Foo '\n )\n\n\nif __name__ == \"__main__\":\n # Run all unittest directly\n from django.core import management\n\n tests = __file__\n# tests = \"apps.pylucid_admin.tests.PyLucidPluginsTest.test_access_admin_views\"\n\n management.call_command('test', tests,\n# verbosity=0,\n verbosity=1,\n failfast=True\n )\n","sub_path":"venv/Lib/site-packages/PyLucid-1.5.0-py2.7.egg/pylucid_project/tests/test_Unittest.py","file_name":"test_Unittest.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"78048243","text":"import numpy\r\n\r\n\r\nclass BotIndicators(object):\r\n def __init__(self):\r\n pass\r\n\r\n @staticmethod\r\n def moving_average(data_points, period):\r\n if len(data_points) > 1:\r\n return sum(data_points[-period:]) / float(len(data_points[-period:]))\r\n\r\n @staticmethod\r\n def momentum(data_points, period=14):\r\n if len(data_points) > period - 1:\r\n return data_points[-1] * 100 / data_points[-period]\r\n\r\n @staticmethod\r\n def ema(prices, period):\r\n x = numpy.asarray(prices)\r\n weights = numpy.exp(numpy.linspace(-1., 0., period))\r\n weights /= weights.sum()\r\n\r\n a = numpy.convolve(x, weights, mode='full')[:len(x)]\r\n a[:period] = a[period]\r\n return a\r\n\r\n def macd(self, prices, n_slow=26, n_fast=12):\r\n ema_slow = self.ema(prices, n_slow)\r\n ema_fast = self.ema(prices, n_fast)\r\n return ema_slow, ema_fast, ema_fast - ema_slow\r\n\r\n @staticmethod\r\n def rsi(prices, period=14):\r\n deltas = numpy.diff(prices)\r\n seed = deltas[:period + 1]\r\n up = seed[seed >= 0].sum() / period\r\n down = -seed[seed < 0].sum() / period\r\n rs = up / down\r\n rsi = numpy.zeros_like(prices)\r\n rsi[:period] = 100. - 100. / (1. + rs)\r\n\r\n for i in range(period, len(prices)):\r\n delta = deltas[i - 1] # cause the diff is 1 shorter\r\n if delta > 0:\r\n up_val = delta\r\n down_val = 0.\r\n else:\r\n up_val = 0.\r\n down_val = -delta\r\n\r\n up = (up * (period - 1) + up_val) / period\r\n down = (down * (period - 1) + down_val) / period\r\n rs = up / down\r\n rsi[i] = 100. - 100. / (1. + rs)\r\n\r\n if len(prices) > period:\r\n return rsi[-1]\r\n else:\r\n return 50 # output a neutral amount until enough prices in list to calculate RSI\r\n","sub_path":"part 3/botindicators.py","file_name":"botindicators.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"43160621","text":"import glob\nimport subprocess\n\n\nclass ResultList:\n\n def __init__(self, artifact_path: str) -> None:\n self.artifact_path = artifact_path\n self.text = ''\n\n def append(self, status: str, test: str, comment: str = '') -> None:\n self.text += fr'{status} [{test}]({self.artifact_path}/{test}) {comment}\\n'\n\n\nif __name__ == '__main__':\n summary = ResultList(\n 'https://jenkins-mch.cscs.ch/job/spack_PR/$BUILD_ID/artifact/')\n\n # Trigger phrases that cause a test to get a yellow circle\n yellow_triggers = [\n 'Timed out waiting for a write lock', 'timed out after 5 seconds'\n ]\n\n for file_name in glob.glob('*.log'):\n with open(file_name, 'r') as file:\n content = file.read()\n if content.endswith('SUCCESS'):\n summary.append(':green_circle:', file_name)\n else:\n for trigger in yellow_triggers:\n if trigger in content:\n summary.append(':yellow_circle:', file_name, trigger)\n break\n else:\n summary.append(':red_circle:', file_name)\n\n if summary.text == '':\n summary.text = 'This message prevents a false negative. Ignore it!'\n # Comment PR\n subprocess.run(\n 'curl -v -H \"Content-Type: application/json\"'\\\n ' -H \"Authorization: token ${GITHUB_AUTH_TOKEN}\"'\\\n ' -X POST'\\\n ' -d \"{\\\\\"body\\\\\":\\\\\"' + summary.text + '\\\\\"}\"'\\\n ' \"https://api.github.com/repos/c2sm/spack-c2sm/issues/${ghprbPullId}/comments\"',\n check=True, shell=True)\n","sub_path":"send_summary_as_comment_to_PR.py","file_name":"send_summary_as_comment_to_PR.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"228155940","text":"from django.conf import settings\nimport time\nimport requests\nimport hashlib\nimport traceback\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nclass WorkFlowAPiRequest(object):\n def __init__(self, token=settings.WORKFLOWTOKEN, appname=settings.WORKFLOWAPPNAME, username='admin',\n workflowbackendurl=settings.WORKFLOWBACKENDURL):\n self.token = token\n self.appname = appname\n self.username = username\n self.workflowbackendurl = workflowbackendurl\n\n def getrequestheader(self):\n timestamp = str(time.time())[:10]\n ori_str = timestamp + self.token\n signature = hashlib.md5(ori_str.encode(encoding='utf-8')).hexdigest()\n headers = dict(signature=signature, timestamp=timestamp, appname=self.appname, username=self.username)\n return headers\n\n def getdata(self, parameters=dict(), method='get', url='/api/v1.0/workflows/', timeout=300, data=dict()):\n if method not in ['get', 'post', 'put', 'delete', 'patch']:\n return False, 'method must be one of get post put delete or patch'\n if not isinstance(parameters, dict):\n return False, 'Parameters must be dict'\n headers = self.getrequestheader()\n try:\n r = getattr(requests, method)('{0}{1}'.format(self.workflowbackendurl, url), headers=headers,\n params=parameters, timeout=timeout, data=json.dumps(data))\n result = r.json()\n return True, result\n except:\n return False, traceback.format_exc()","sub_path":"workflow/apirequest.py","file_name":"apirequest.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"268720661","text":"from keras.layers import Lambda, Input, Dense\nfrom keras.models import Model\nfrom keras.losses import mse, binary_crossentropy\nfrom keras.optimizers import Adam\nfrom keras import backend as K\n\nimport numpy as np\n\n\nclass VAE:\n def __init__(self, input_dim=2048, \n hidden_spec=[{'units':1024},{'units':512},{'units':256}], \n batch_size=1024, \n latent_dim=100, \n epochs=1, \n learning_rate=0.001, \n mse=True, \n verbose=False):\n \n self.hidden_spec=hidden_spec\n \n self.input_dim = input_dim\n self.input_shape = (input_dim, )\n self.batch_size = batch_size\n self.latent_dim = latent_dim\n self.epochs = epochs\n self.learning_rate = learning_rate\n self.mse = mse\n self.verbose = verbose\n self.build_model()\n\n def sampling(self, args):\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n \n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon\n \n \n def downsample(self, input_layer, layer_spec):\n assert isinstance(layer_spec, list)\n x = input_layer\n for layer in layer_spec:\n x = Dense(layer['units'], activation=layer.get('activation', 'relu'))(x)\n return x\n \n def upsample(self, input_layer, layer_spec):\n assert isinstance(layer_spec, list)\n x = input_layer\n for layer in reversed(layer_spec):\n x = Dense(layer['units'], activation=layer.get('activation', 'relu'))(x)\n return x\n \n def build_model(self):\n self.inputs = Input(shape=self.input_shape, name='encoder_input')\n x = self.downsample(self.inputs, self.hidden_spec)\n z_mean = Dense(self.latent_dim, name='z_mean')(x)\n z_log_var = Dense(self.latent_dim, name='z_log_var')(x)\n\n\n self.z = Lambda(self.sampling, output_shape=(self.latent_dim,), name='z')([z_mean, z_log_var])\n\n self.encoder = Model(self.inputs, [z_mean, z_log_var, self.z], name='encoder')\n if self.verbose:\n self.encoder.summary()\n \n latent_inputs = Input(shape=(self.latent_dim,), name='z_sampling')\n x = self.upsample(latent_inputs, self.hidden_spec)\n outputs = Dense(self.input_dim, activation='tanh')(x)\n\n self.decoder = Model(latent_inputs, outputs, name='decoder')\n if self.verbose:\n self.decoder.summary()\n \n self.outputs = self.decoder(self.encoder(self.inputs)[2])\n self.vae = Model(self.inputs, self.outputs, name='vae_mlp')\n \n if self.mse:\n reconstruction_loss = mse(self.inputs, self.outputs)\n else:\n reconstruction_loss = binary_crossentropy(self.inputs, self.outputs)\n\n reconstruction_loss *= self.input_dim\n kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n vae_loss = K.mean(reconstruction_loss + kl_loss)\n self.vae.add_loss(vae_loss)\n adam = Adam(lr=self.learning_rate)\n self.vae.compile(optimizer=adam)\n if self.verbose:\n self.vae.summary()\n \n def train(self, x_train, x_test, verbose=2):\n self.vae.fit(x_train, \n epochs=self.epochs, \n batch_size=self.batch_size, \n validation_data=(x_test, None),\n verbose=verbose)\n \n def encode(self, x):\n return self.encoder.predict(x)\n \n def decode(self, x):\n return self.decoder.predict(x)\n \n def tweak(self, x, index=0, amount=0.1):\n x = self.encode(x)[2]\n print(x.shape)\n x[:,index] = x[:,index] + amount\n return self.decode(x)\n \n def predict(self, x):\n return self.vae.predict(x)\n \n def train_generator(self, generator, max_iterations=None, save_interval = None, file_name = 'vae.h5', verbose=2):\n i = 0\n for data in generator:\n i+=1\n (x_train, x_test) = data\n print(\"Iteration \", i)\n self.vae.fit(x_train, epochs=self.epochs, batch_size=self.batch_size, validation_data=(x_test, None), verbose=verbose)\n if save_interval != None:\n if i % save_interval == 0:\n self.save(file_name)\n print(\"saved model\")\n\n if isinstance(max_iterations, int):\n if i >= max_iterations:\n break\n \n self.save(file_name)\n print('training finished')\n \n def test_generator(self):\n while True:\n x_train = np.random.sample(size=(np.random.randint(1000,3000), 2048))\n x_test = np.random.sample(size=(np.random.randint(100,300), 2048))\n yield (x_train, x_test)\n \n def data_generator(self, train_generator, validation_generator):\n calls = 0\n while True:\n calls += 1\n yield (next(train_generator), next(validation_generator))\n \n def test_train(self):\n g = self.test_generator()\n self.train_generator(g, max_iterations = 10)\n \n def save(self, file_name='vae.h5'):\n self.vae.save_weights(file_name)\n def load(self, file_name='vae.h5'):\n self.vae.load_weights(file_name)\n ","sub_path":"vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":5478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"101388114","text":"# coding=utf-8\n__author__ = 'Yiming'\n\nimport urllib.request\nimport urllib.parse\nimport json\n\n\ndef url_open(url):\n req = urllib.request.Request(url)\n req.add_header('User-Agent',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36')\n response = urllib.request.urlopen(req)\n html = response.read().decode('utf-8', 'ignore')\n return html\n\n\ndef get_addrCoor(addr):\n key = \"xxxxxxxxxxxxxxxxxxxxxxxxxx\" # 出于隐私原因,这里隐去了我的api key\n url = \"http://restapi.amap.com/v3/geocode/geo?key=\" + key + \"&address=\" + urllib.parse.quote(\n addr) + \"&city=\" + urllib.parse.quote(\"北京\")\n # print(url)\n html = url_open(url)\n target = json.loads(html)\n print(target)\n if target['geocodes'] != []:\n location = target['geocodes'][0]['location']\n return location\n else:\n return ''\n","sub_path":"data/getCoordinate.py","file_name":"getCoordinate.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"456482378","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.cross_decomposition import PLSRegression, PLSCanonical\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.linear_model import ElasticNet, LogisticRegression\nfrom sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier, RandomForestClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV, cross_val_predict\n\n\n# All Functions below Look very similar to each other. Their basic structure looks as follows:\n\n# def function_name(training_df, training_labels, params = None, cv = 10, scoring = 'accuracy', cvres = False, other_params):\n# '''Specify which model is going to be used'''\n# model = MyModel()\n#\n# ''' if statement that asks whether any parameters were supplied '''\n# if params is none:\n# # set default params\n#\n# ''' Perform Gridsearch '''\n# grid = GridSearchCV(model, params, cv = cv, scoring)\n# #cv controls how many folds are created during CV. Scoring allows the user to specify a metric\n#\n# '''If the raw gridsearch results are demanded'''\n# if cvres:\n# return grid.cv_results_\n#\n# '''Otherwise return them in a nice format together with training error'''\n# else:\n# return cv(grid.cv_results_), training_accuracy(training_labels, grid.best_estimator_.predict(training_df))\n\n\n\ndef training_accuracy(real, preds):\n \"\"\"Computes training accuracy\"\"\"\n return np.sum(real == preds) / len(real)\n\ndef cv_results(cv_arr, num = 5):\n \"\"\"Extracts Results from GridSearch. By default shows only top 5 models\"\"\"\n cvs = []\n for i in range(len(cv_arr['params'])):\n cvs.append((cv_arr['params'][i], cv_arr['mean_test_score'][i]))\n cvs.sort(key = lambda x: x[1])\n \n if num == 0:\n return cvs[::-1]\n else:\n return cvs[::-1][:num]\n\n############################\n########### SVM ############\n############################\n\ndef svm(df, labels, params = None, cv = 10, scoring = 'accuracy', n_jobs=-3, linearsvc = False, cvres = False):\n \n if linearsvc:\n svc_mod = LinearSVC()\n if params == None:\n params = {\n 'C':[1, 10, 100, 1000, 10000],\n 'loss':['hinge', 'squared_hinge'], \n 'penalty':['l2']\n }\n else:\n svc_mod = SVC()\n if params == None:\n params = [\n {\n 'C':[1, 10, 100, 1000, 10000, 100000],\n 'kernel':['linear'], \n },\n {\n 'C':[1, 10, 100, 1000, 10000, 100000],\n 'kernel':['rbf', 'sigmoid'], \n 'gamma':['auto', 'scale'] \n },\n {\n 'C':[1, 10, 100, 1000, 10000, 100000],\n 'kernel':['poly'],\n 'degree':[2, 3, 4],\n 'gamma':['auto', 'scale']\n }\n ]\n\n svc_grid = GridSearchCV(svc_mod, params, cv = cv, scoring = scoring, n_jobs = n_jobs, iid = False)\n svc_grid.fit(df, labels)\n if cvres:\n #print(cv_results(svc_grid.cv_results_, num = 5), training_accuracy(labels, svc_grid.best_estimator_.predict(df)))\n return svc_grid.cv_results_\n else:\t\t\n return cv_results(svc_grid.cv_results_, num = 5), training_accuracy(labels, svc_grid.best_estimator_.predict(df))\n\n############################\n######## LDA, QDA ##########\n############################\n\ndef da(df, labels, params = None, which = 'lda', scoring = 'accuracy', cv = 10, cvres = False, n_jobs = -3):\n if which == 'lda':\n if params is None:\n params = [\n {'solver':['lsqr', 'eigen'], 'shrinkage':['auto']},\n {'solver':['lsqr', 'eigen'], 'shrinkage':np.linspace(0.01, 0.99, 20)}\n ]\n da = LinearDiscriminantAnalysis()\n else:\n if params is None:\n params = {'reg_param':np.linspace(0, 1, 9)}\n da = QuadraticDiscriminantAnalysis()\n \n da_grid = GridSearchCV(da, params, scoring = scoring, cv = cv, iid = False)\n da_grid.fit(df, labels)\n \n if cvres:\n #print('Traning accuracy of the best model: ', training_accuracy(labels, da_grid.best_estimator_.predict(df)))\n return da_grid.cv_results_\n else:\n return cv_results(da_grid.cv_results_), training_accuracy(labels, da_grid.best_estimator_.predict(df))\n\n############################\n######### LogReg ###########\n############################\n\ndef log_reg(df, labels, params = None, scoring = 'accuracy', cv = 10, n_jobs = -3, cvres = False):\n \n log = LogisticRegression()\n \n if params == None:\n params = [\n {\n 'penalty':['l2', 'l1'],\n 'solver':['liblinear'],\n 'C':[0.001, 0.01, 0.1, 1, 10],\n }, \n {\n 'penalty':['l2'],\n 'solver':['lbfgs'],\n 'C':[0.001, 0.01, 0.1, 1, 10],\n }\n ]\n log_grid = GridSearchCV(log, params, scoring = scoring, cv = cv, iid = False, n_jobs = n_jobs)\n log_grid.fit(df, labels)\n \n if cvres:\n #print('Traning accuracy of the best model: ', training_accuracy(labels, log_grid.best_estimator_.predict(df)))\n return log_grid.cv_results_\n else:\n return cv_results(log_grid.cv_results_), training_accuracy(labels, log_grid.best_estimator_.predict(df))\n\n############################\n########### RF #############\n############################\n\ndef rf(df, labels, params = None, scoring = 'accuracy', cv = 10, n_jobs = -3, cvres = False):\n \n rf = RandomForestClassifier()\n \n if params == None:\n params = {\n 'n_estimators':[500],\n 'max_depth':np.arange(2, 10, 2),\n 'min_samples_leaf':np.arange(3, 6),\n 'bootstrap':[True, False]\n \n }\n rf_grid = GridSearchCV(rf, params, scoring = scoring, cv = cv, iid = False, n_jobs = n_jobs)\n rf_grid.fit(df, labels)\n \n if cvres:\n #print('Traning accuracy of the best model: ', training_accuracy(labels, rf_grid.best_estimator_.predict(df)))\n return rf_grid.cv_results_\n else:\n return cv_results(rf_grid.cv_results_), training_accuracy(labels, rf_grid.best_estimator_.predict(df))\n\n############################\n######## Boosting ##########\n############################\n\ndef boost(df, labels, clf = 'gradient', params = None, cv = 10, scoring = 'accuracy', n_jobs = -3, cvres = False):\n \n # Gradient Boosting\n if clf == 'gradient' or clf == 'grboost':\n boost_mod = GradientBoostingClassifier()\n if params is None:\n params = {'n_estimators':[100, 500, 1000, 5000], \n 'learning_rate':[0.001, 0.01, 0.1, 1], \n 'subsample':[0.3, 0.7, 1], \n 'min_samples_split':[2,3,4]}\n \n # AdaBoost \n elif clf == 'ada' or clf == 'adaboost':\n boost_mod = AdaBoostClassifier()\n if params is None:\n params = {'n_estimators':[100, 500, 1000, 5000], 'learning_rate':[0.001, 0.01, 0.1, 1]}\n \n # XGBoost \n elif clf == 'xgb' or clf == 'xgboost':\n boost_mod = XGBClassifier()\n if params is None:\n params = {'max_depth':[2, 3], \n 'learning_rate':[0.01, 0.1, 1], \n 'subsample':[0.2, 0.6, 1],\n 'reg_lambda':[0.1, 1, 10, 100],\n 'n_estimators':[200]}\n\n boost_grid = GridSearchCV(boost_mod, params, cv = cv, scoring = scoring, n_jobs = n_jobs, iid = False)\n boost_grid.fit(df, labels)\n \n if cvres:\n #print('Traning accuracy of the best model: ', training_accuracy(labels, boost_grid.best_estimator_.predict(df)))\n return boost_grid.cv_results_\n else:\n return cv_results(boost_grid.cv_results_), training_accuracy(labels, boost_grid.best_estimator_.predict(df)) \n\n\ndef knn(df, labels, params = None, scoring = 'accuracy', cv = 10, n_jobs = -3, cvres = False):\n \n knnc = KNeighborsClassifier()\n \n if params == None:\n params = {\n 'n_neighbors':[1, 5, 10, 15, 20],\n 'weights':['uniform', 'distance'],\n 'algorithm':['auto'],\n 'n_jobs':[n_jobs]\n }\n knn_grid = GridSearchCV(knnc, params, scoring = scoring, cv = cv, iid = False, n_jobs = n_jobs)\n knn_grid.fit(df, labels)\n \n if cvres:\n #print('Traning accuracy of the best model: ', training_accuracy(labels, log_grid.best_estimator_.predict(df)))\n return knn_grid.cv_results_\n else:\n return cv_results(knn_grid.cv_results_), training_accuracy(labels, knn_grid.best_estimator_.predict(df))\n \ndef pick_best(df, labels, model, candidate_params, n = 10, cv = 10):\n \"\"\"This function can be used to pick among several equivalently good models by inputting their parameter values. Then it predicts the traning data set using cross validation and each time computes the accuracy. The function returns the list of means, variances and corresponding models (their parameters).\"\"\"\n all_cv_errors = []\n \n for params in candidate_params:\n cv_errors = []\n for i in range(n):\n model.set_params(**params)\n \n preds = cross_val_predict(model, df, labels, cv = cv)\n cv_errors.append(training_accuracy(labels, preds))\n all_cv_errors.append([np.round(np.mean(cv_errors), 5), \n np.round(np.var(cv_errors), 5), \n params])\n all_cv_errors.sort(key=lambda x: x[0])\n return pd.DataFrame(all_cv_errors[::-1], columns = ['MeanCV', 'VarCV', 'Model_params'])","sub_path":"scripts/classification_gridsearch.py","file_name":"classification_gridsearch.py","file_ext":"py","file_size_in_byte":9975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"376501302","text":"# coding=utf-8\n\nfrom tornado.web import authenticated\nfrom vanellope.handlers.base import AdminBaseHandler\nfrom vanellope.da import db_backup\n\n\nclass AdminControlPanel(AdminBaseHandler):\n @authenticated\n def get(self):\n self.render(\"controlpanel.html\",\n title=u\"Control Panel\")\n\n\nclass AdminExportData(AdminBaseHandler):\n def get(self):\n \"\"\"Export database\n \"\"\"\n zip_path, zip_filename = db_backup()\n\n with open(zip_path, 'rb') as f:\n self.set_header('Content-Type', 'application/octet-stream')\n self.set_header('Content-Disposition',\n 'attachment; filename=' + zip_filename)\n self.write(f.read())\n self.finish()\n\n\nclass FriendLinkHandler(AdminBaseHandler):\n @authenticated\n def get(self):\n friend_links = self.friendlinks.find_all()\n\n data = []\n for link in friend_links:\n link['created_at'] = link['created_at'].strftime('%s')\n link['updated_at'] = link['updated_at'].strftime('%s')\n data.append(link)\n\n self.finish({\n 'info': 'success',\n 'data': data\n })\n\n @authenticated\n def post(self):\n \"\"\"\n \"\"\"\n site_title = self.get_payload_argument(u'title', None)\n site_address = self.get_payload_argument(u'address', None)\n site_notes = self.get_payload_argument(u'notes', None)\n\n result = self.friendlinks.create({\n 'title': site_title,\n 'address': site_address,\n 'notes': site_notes\n })\n\n self.finish({\n 'info': 'success',\n 'url': result\n })\n\n @authenticated\n def delete(self, uuid):\n result = self.friendlinks.remove(uuid)\n\n self.finish({\n 'info': 'success',\n 'url': result\n })\n\n\n","sub_path":"vanellope/handlers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"254986297","text":"CONFIG = {\n 'intents': {\n 'hello': {\n 'requests': ['привет', 'добрый день', 'здравствуй', 'доброе утро', 'добрый вечер', 'что как'],\n 'replies': ['Привет босс!', 'давай говори чего хочешь', 'Здравствуй'],\n },\n 'is_listening': {\n 'requests': ['ты слушаешь', 'ты тут'],\n 'replies': ['да, я тут', 'да, я слушаю', 'с какой целью интересуешься?'],\n },\n 'unmute': {\n 'requests': ['давай поговорим', 'слушай внимательно', 'давай пообщаемся', 'давай общаться', 'не засыпай',\n 'не спи'],\n 'replies': ['Слушаю не дыша', 'Хорошо, я в режиме разговора'],\n 'action': 'unmute'\n },\n 'stop': {\n 'requests': ['помолчи', 'молчи', 'тихо', 'потеряйся', 'пока', 'до свидания', 'прощай', 'усни', 'поспи',\n 'спокойной ночи', 'не болтай', 'не подслушивай', 'спокойно', ],\n 'replies': ['молчу', 'Счастливо', 'Еще увидимся', 'Если что я тут', 'Аэл би бэк', 'ура, перекур',\n 'пойду покурю'],\n 'action': 'stop',\n },\n 'die': {\n 'requests': ['умри', 'сдохни', 'уйди'],\n 'replies': ['увидимся в следующей жизни', 'Если что, знаешь где меня искать', 'пока-пока'],\n 'action': 'die',\n },\n 'name': {\n 'requests': ['как твоё имя', 'как тебя зовут'],\n 'action': 'name',\n },\n 'think': {\n 'requests': ['что ты думаешь про', 'что ты знаешь о', 'что ты думаешь о'],\n 'action': 'think',\n 'location_missing': ['о чём?']\n },\n 'quotation': {\n 'requests': ['скажи что-нибудь умное', 'сумничай', 'цитату', ],\n 'action': 'quotation',\n },\n 'u_where': {\n 'requests': ['ты где', 'куда подевалась', 'ты тут', 'почему не отвечаешь'],\n 'replies': ['я тут, ты меня спрашиваешь, что-ли?', 'тута я', 'вот я', 'отвлеклась немного'],\n },\n 'app_close': {\n 'requests': ['выключи player', 'выключи плеер', 'выключи радио', 'выключи музыку', 'закрой программу',\n 'выключить', 'закрой', ],\n 'replies': ['выключаю', 'как скажешь', 'хорошо', 'ладно', ''],\n 'action': 'app_close',\n 'subject': {\n 'радио': 'AIMP.exe',\n 'player': 'AIMP.exe',\n 'плеер': 'AIMP.exe',\n 'музыку': 'AIMP.exe',\n 'калькулятор': 'Calculator.exe',\n },\n 'subject_missing': ['закрыть что?', 'какую программу?', 'что выключить?']\n },\n 'turn_on': {\n 'requests': ['включи радио', 'включи музыку', 'включить'],\n 'replies': ['включаю', 'секунду', 'хорошо', '', '', ],\n 'action': 'turn_on',\n 'subject': {\n 'эльдорадио': 'http://emgspb.hostingradio.ru/eldoradio128.mp3',\n 'радио коммерсант fm': 'http://kommersant77.hostingradio.ru:8016/kommersant128.mp3',\n 'радио эхо москвы': 'http://ice912.echo.msk.ru:9120/stream',\n 'радио маяк': 'http://icecast.vgtrk.cdnvideo.ru/mayakfm_mp3_192kbps',\n 'радио шансон': 'https://chanson.hostingradio.ru:8041/chanson128.mp3',\n 'радио монте-карло': 'https://montecarlo.hostingradio.ru/montecarlo128.mp3',\n 'радио ретро fm': 'http://retroserver.streamr.ru:8043/retro256.mp3',\n 'русский радио': 'https://rusradio.hostingradio.ru/rusradio128.mp3',\n 'радио dfm': 'https://dfm.hostingradio.ru/dfm128.mp3',\n 'радио европа': 'http://emgregion.hostingradio.ru:8064/moscow.europaplus.mp3',\n 'радио эрмитаж': 'http://hermitage.hostingradio.ru/hermitage128.mp3',\n 'радио like fm': 'http://ic7.101.ru:8000/a219',\n 'радио лайк': 'http://ic7.101.ru:8000/a219',\n 'радио офис lounge': 'http://ic7.101.ru:8000/a30',\n 'радио офис лаунж': 'http://ic7.101.ru:8000/a30',\n 'радио office lounge': 'http://ic7.101.ru:8000/a30',\n 'радио чипльдук': 'http://radio.4duk.ru/4duk256.mp3',\n 'радио чилаут': r'C:\\Users\\go\\Local\\YandexDisk\\Музыка\\_Радио\\Chillout.aimppl4',\n 'playlist chill out': r'C:\\Users\\go\\Local\\YandexDisk\\Музыка\\_Радио\\Chillout.aimppl4',\n 'радио чилстеп': r'C:\\Users\\go\\Local\\YandexDisk\\Музыка\\_Радио\\Chillstep.aimppl4',\n 'радио chillstep': r'C:\\Users\\go\\Local\\YandexDisk\\Музыка\\_Радио\\Chillstep.aimppl4',\n 'мой музыку': r'C:\\Users\\go\\Local\\YandexDisk\\Музыка\\2020',\n 'музыку дыхания': r'C:\\Users\\go\\Local\\YandexDisk\\Музыка\\2020\\Breathe',\n },\n 'subject_missing': ['что включить', 'что ты хочешь послушать', 'что именно', 'а конкретнее'],\n 'not_exists': ['у меня такого нет', 'такого нет, выбери другое']\n },\n 'volume_up': {\n 'requests': ['погромче', 'сделай погромче', 'прибавь грокмость', 'ещё громче', 'сделать погромче'],\n 'action': 'volume_up'\n },\n 'volume_down': {\n 'requests': ['потише', 'сделай потише', 'убавь грокмость', 'еще тише'],\n 'action': 'volume_down'\n },\n 'track_next': {\n 'requests': ['следующий трек', 'следующая песня', 'перемотай дальше'],\n 'action': 'track_next'\n },\n 'track_prev': {\n 'requests': ['предыдущий трек', 'предыдущая песня', 'перемотай назад'],\n 'action': 'track_prev'\n },\n 'play_pause': {\n 'requests': ['останови музыку', 'останови песню', 'пауза', 'играй дальше', 'играй'],\n 'action': 'play_pause'\n },\n 'find': {\n 'requests': ['найти', 'спросить у', 'загуглить', 'поискать', 'пошукать'],\n 'action': 'find',\n 'target': {\n 'в яндекс музыке': 'https://music.yandex.ru/search?text=',\n 'в яндексе': 'https://yandex.ru/search/?text=',\n 'в википедии': 'https://ru.wikipedia.org/w/index.php?search=',\n 'в гугле': 'https://www.google.ru/search?q=',\n 'в маркете': 'https://market.yandex.ru/search?text=',\n 'в youtube': 'https://www.youtube.com/results?search_query=',\n 'в ютюбе': 'https://www.youtube.com/results?search_query=',\n 'в ютубе': 'https://www.youtube.com/results?search_query=',\n 'на карте': 'https://yandex.ru/maps/?text=',\n },\n 'target_missing': ['где найти?', 'а конкретнее?'],\n 'subject_missing': ['что найти?', 'уточни что искать?', 'что именно?', ]\n },\n 'app_open': {\n 'requests': ['открой'],\n 'replies': ['открываю', 'как скажешь', 'пожалуйста', 'нет проблем', ],\n 'action': 'app_open',\n 'subject': {\n 'яндекс музыку': r'C:\\Users\\go\\AppData\\Local\\Yandex\\YandexBrowser\\Application\\browser.exe '\n r'https://music.yandex.ru/home',\n 'telegram': r'C:\\Users\\go\\AppData\\Roaming\\Telegram Desktop\\Telegram.exe',\n 'whatsapp': r'C:\\Users\\go\\AppData\\Local\\WhatsApp\\WhatsApp.exe',\n 'браузер': r'C:\\Users\\go\\AppData\\Local\\Yandex\\YandexBrowser\\Application\\browser.exe',\n 'телеграмму': r'C:\\Users\\go\\AppData\\Roaming\\Telegram Desktop\\Telegram.exe',\n 'калькулятор': 'calc',\n 'spotify': r'C:\\Users\\go\\AppData\\Roaming\\Spotify\\Spotify.exe',\n },\n 'subject_missing': ['что открыть?', 'что именно?', 'а конкр��тнее?', 'какую программу?'],\n 'not_exists': ['у меня нет такой программы']\n },\n 'repeat_after_me': {\n 'requests': ['повтори за мной', 'произнеси', 'повторяй за мной'],\n 'action': 'repeat_after_me',\n 'text_missing': ['говори', 'что повторить?', 'я слушаю'],\n },\n 'whm_breathe': {\n 'requests': ['подышим', 'подышим 1 раундов', 'дыхательная тренировка', 'Вим Хоф', 'вим hof',\n 'начинай дыхательную гимнастику', 'дыхательная гимнастика', 'искусственное дыхание'],\n 'action': 'whm_breathe',\n },\n 'whm_breath_stat': {\n 'requests': ['покажи статистику дыхания', 'покажи журнал дыхания'],\n 'action': 'whm_breath_stat',\n },\n 'repeat': {\n 'requests': ['повтори', 'еще раз', 'что ты говоришь?'],\n 'action': 'repeat',\n },\n 'can': {\n 'requests': ['что ты умеешь', 'твои способности', 'что ты можешь', 'что ты знаешь'],\n 'replies': ['я умею отвечать кто такой и что такое, '\n 'говорить время, какой день, '\n 'включать радио и музыку, '\n 'устанавливать таймеры, '\n 'говорить погоду в любом месте на земле, '\n 'искать в яндексе, гугле, ютубе и википедии, '\n 'знаю свой возраст, могу повторять за тобой. '\n 'Могу узнать курс доллара или биткоина. '\n 'Могу рассказать анекдот или цитату. '\n 'Ты меня не обижай',\n ],\n },\n 'abuse': {\n 'requests': ['плохо', 'нехорошо', 'нехорошая', 'дура', 'коза', 'бестолковая',\n 'заткнись', 'задолбала', 'уродина', \"****\"],\n 'action': 'abuse',\n 'replies': ['на себя посмотри.', 'а чё сразу ругаться то?', 'ну обидно же!', 'за что?', 'я тебя запомню!',\n 'ну чё ты? нормально же общались!', 'фак ю вэри мач!', 'похоже это что-то обидное?'],\n },\n 'praise': {\n 'requests': ['красава', 'молодец', 'хороший', 'приятно поговорить',\n 'спасибо', 'благодарю', 'прикольно', 'умница', 'замечательно', 'супер'],\n 'action': 'praise',\n # 'replies': ['спасибо', 'мне очень приятно', 'стараюсь', 'это просто магия'],\n },\n 'mood': {\n 'requests': ['настроение', 'дела', ' себя чувствуешь'],\n 'action': 'my_mood',\n 'status': {\n 2: ('просто замечательно', 'просто великолепно', 'супер'),\n 1: ('очень хорошо', 'прекрасно', 'отлично'),\n 0: ('ничего', 'нормально', 'не жалуюсь'),\n -1: ('плохо', 'отвратительно', 'не очень', 'паршиво, знаешь'),\n },\n\n },\n 'ctime': {\n 'requests': ['текущее время', 'сколько время', 'сколько времени', 'который час'],\n 'action': 'ctime',\n },\n 'timer': {\n 'requests': ['поставь таймер на', 'таймер', 'засеки', ],\n 'action': 'timer',\n },\n 'age': {\n 'requests': ['сколько тебе лет', 'твой возраст', 'скажи свой возраст'],\n 'action': 'age',\n },\n 'whois': {\n 'requests': ['что такое', 'кто такой', 'кто такая'],\n 'action': 'who_wikipedia',\n },\n 'translate': {\n 'requests': ['переведи', 'по-английски', 'скажи по-английски', 'как будет по-английски'],\n 'action': 'translate',\n 'text_missing': ['говори', 'что перевести?', 'слушаю'],\n },\n 'weather': {\n 'requests': ['какая погода', 'погода', 'сколько градусов', 'сколько градусов на улице', 'холодно',\n 'тепло', 'что с погодой', 'завтра погода', 'влажность'],\n 'action': 'weather',\n },\n 'usd': {\n 'requests': ['курс доллара', 'почём доллар'],\n # 'replies': ['сейчас в банке спрошу', 'в банк позвоню', 'банк на связи'],\n 'action': 'usd',\n },\n 'btc': {\n 'requests': ['курс биткоина', 'почём биткоин', 'курс битка', 'bitcoin', 'почём биток'],\n # 'replies': ['ща узнаю', 'погоди', 'секундочку'],\n 'action': 'btc',\n },\n 'calculate': {\n 'requests': ['посчитай', 'сколько будет'],\n 'replies': ['Я только учусь считать'],\n },\n 'days_to': {\n 'requests': ['сколько дней до'],\n 'replies': ['еще не знаю', 'разработчик сказал научит до нового года но не сказал до какого'],\n },\n 'find_out_where': {\n 'requests': ['где находится', 'где', ],\n 'replies': ['сейчас поищем', 'где-то здесь', ],\n 'action': 'find',\n 'targets': {'где': 'https://yandex.ru/maps/?text='},\n 'subject_missing': ['о чем ты хотел спросить?']\n },\n 'find_out_wiki': {\n 'requests': ['кто такой', 'кто такая', 'что такое', 'что есть', 'кто', ],\n 'action': 'wikipedia',\n },\n 'anecdote': {\n 'requests': ['расскажи анекдот'],\n 'replies': ['ща.', 'короче', 'ладно, слушай.', 'слушай прикол', 'ща найду.'],\n 'action': 'anecdote',\n },\n 'weekday': {\n 'requests': ['какой сегодня день', 'какой день недели', ],\n 'action': 'weekday',\n },\n 'forget': {\n 'requests': ['забудь', ],\n 'action': 'forget',\n },\n 'redneck': {\n 'requests': ['включи пацана', 'включи пацанский режим', ],\n 'replies': ['базара нет!', 'говно вопрос!', 'базар те нужен?', 'да легко!', 'без базара',\n 'базара ноль'],\n 'action': 'redneck',\n },\n 'casual': {\n 'requests': ['говори нормально', 'выключи пацана', ],\n 'replies': ['я просто прикалывалась 📌', 'хорошо', 'как скажешь', ],\n 'action': 'casual',\n },\n 'happy_new_year': {\n 'requests': ['с новым годом', 'поздравляю с новым годом', ],\n 'replies': ['с новым годом!', 'с новым счастьем!', 'и вас тоже!', ],\n },\n 'diary': {\n 'requests': ['запиши в дневник', 'запиши на память', 'напиши дневник', 'запиши в журнал', 'запиши дневник',\n 'добавь в дневник', 'добавь в журнал', 'запиши дневник', 'напиши в дневник'],\n 'action': 'diary',\n },\n 'diary_to_pdf': {\n 'requests': ['покажи дневник', 'открой дневник'],\n 'action': 'diary_to_pdf',\n },\n 'cheeky': {\n 'requests': ['такая дерзкая'],\n 'replies': ['на себя посмотри', 'а что ты хотел?']\n },\n },\n 'i_cant': ['а самому слабоо?', 'меня этому не обучали', 'может когда-нибудь научусь', 'попробуй сам', ],\n 'failure_phrases': [\n 'а можно как-то попроще выразиться?',\n 'вас людей не всегда поймешь',\n 'вот это сейчас что было?',\n 'если честно то мне похер что ты там хочешь',\n 'ещё разочек можно?',\n 'запуталась я совсем.',\n 'здесь как говорится наши полномочия всё',\n 'к сожалению, я не смогу помочь с этим вопросом',\n 'как это понимать?',\n 'меня такому не учили',\n 'мне не понятно.',\n 'может мы забудем что ты сказал?',\n 'моя твоя не понимай',\n 'ничего не поняла, но о очень интересно',\n 'прости что-то я не вкурила',\n 'слишком сложно для меня',\n 'сначала я ничего не поняла а потом я тоже ничего не поняла',\n 'со рян не поняла',\n 'ты пытаешься меня запутать?!',\n 'уточни вопрос пожалуйста',\n 'что-то не понятно',\n 'Что-то у меня морфологический модуль сегодня барахлит',\n 'что-то я туплю, повтори',\n 'это точно правильные слова?',\n 'я же всего лишь бот. скажи попроще',\n 'я не поняла твоих намерений',\n 'я не совсем тебя поняла',\n 'я поняла, что я не поняла',\n 'я правильно интерпретирую семантику вопроса, но иногда полностью игнорирую его суть',\n ],\n \"litter\": (\n \"ну\", 'будет', 'говорить', 'да', 'если', 'если', 'ка', 'ладно', 'можешь', 'можно', 'ну-ка', 'поведай',\n 'подскажи', 'пожалуйста', 'послушай', 'расскажи', 'скажи', 'слушай', 'слушай', 'хочется', 'хочу', 'что-то',\n 'давай', 'чертов', 'гребаный', 'долбаный'\n ),\n 'reply_for_action': ('ты еще помнишь моё имя', 'я тут', 'я слушаю', 'слушаю внимательно',\n 'говори уже', 'да, это моё имя'),\n 'umlaut': {'а́': 'а', 'у́': 'у', 'е́́': 'е', '́́е': 'е', 'о́́́': 'о', 'и́́́́': 'и', 'я́́́́': 'я'},\n 'eng_nouns': ['youtube', 'google', 'player'],\n 'address': ['слушай', 'тебе говорю', 'короче', 'прикинь', 'только вкинься', 'прикинь', 'смотри'],\n 'redneck': [', ну,', 'короче', 'типа.', ', в общем ', ' слышь,', 'ваще', ', вкинься', ', прикинь', ],\n 'weekday': ['понедельник', 'вторник', 'среда', 'четверг', 'пятница', 'суббота', 'воскресение', ],\n 'nearest_day': ['сейчас', 'сегодня', 'завтра', 'послезавтра', 'послепослезавтра', ],\n 'colors': {'красный': 'd80000', 'синий': '003cd2', 'зеленый': '11654C', 'фиолетовый': '55246C',\n 'оранжевый': 'e78000'},\n 'writing_to_diary': ['Записываю', 'Диктуй', 'Говори что писать'],\n 'diary_canceler': ['удали', 'отмени', 'выкини', 'выкинь', 'удалить', 'отставить'],\n 'diary_repeater': ['повтори', 'что получилось', 'прочитай', 'повтори что получилось', 'прочитай что получилось'],\n 'diary_saver': ['готово', 'сохрани', 'сохраняй', 'записывай', 'запиши', 'сохранить', 'записать'],\n 'diary_cancel': ['да пошёл ты', 'окей, забыли', 'как скажешь', 'ладно, не будем'],\n 'usd_today': ['курс доллара ЦБ РФ {} рубль {} копейка', 'доллар сегодня {} рубль {} копейка'],\n 'im_ready': ['я слушаю', 'можешь говорить', 'чего надо?', 'привет', 'ну что, без меня никак?'],\n}\n","sub_path":"va_config.py","file_name":"va_config.py","file_ext":"py","file_size_in_byte":23327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"645520441","text":"from torch.nn.parallel._functions import Broadcast\nfrom torch.nn.parallel.data_parallel import DataParallel\nfrom torch.nn.parallel.parallel_apply import get_a_var\nfrom torch.autograd import Function\nimport torch.cuda.comm as comm\nimport torch\nimport threading\n\ntorch_ver = torch.__version__[:3]\n\n\nclass Reduce(Function):\n @staticmethod\n def forward(ctx, *inputs):\n ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))]\n inputs = sorted(inputs, key=lambda i: i.get_device())\n return comm.reduce_add(inputs)\n\n @staticmethod\n def backward(ctx, gradOutput):\n return Broadcast.apply(ctx.target_gpus, gradOutput)\n\n\nclass DataParallelModel(DataParallel):\n def gather(self, output, output_device):\n return output\n\n def replicate(self, module, device_ids):\n modules = super(DataParallelModel, self).replicate(module, device_ids)\n return modules\n\n def forward(self, inputs, **kwargs):\n # data will be calculate in parallel\n if kwargs.get('parallel', False):\n kwargs.pop('parallel', None)\n if isinstance(inputs, torch.Tensor):\n return super().forward(inputs, **kwargs)\n replicas = self.replicate(self.module, self.device_ids[:len(inputs)])\n outputs = self.parallel_apply(replicas, inputs, kwargs if kwargs else None)\n return self.gather(outputs, self.output_device)\n else:\n return self.module(inputs)\n\n\nclass DataParallelCriterion(DataParallel):\n \"\"\"\n Calculate loss in multiple-GPUs, which balance the memory usage for\n Semantic Segmentation.\n \"\"\"\n def forward(self, inputs, *targets, **kwargs):\n if not self.device_ids:\n return self.module(inputs, *targets, **kwargs)\n\n is_target_scattered = kwargs.get('is_target_scattered', False)\n kwargs.pop('is_target_scattered', None)\n\n if len(self.device_ids) == 1:\n if is_target_scattered:\n targets = (targets,)\n kwargs = (kwargs,)\n return self.module(inputs, *targets[0], **kwargs[0])\n\n if is_target_scattered:\n targets = targets[0]\n\n replicas = self.replicate(self.module, self.device_ids[:len(inputs)])\n outputs = _criterion_parallel_apply(replicas, inputs, targets)\n return Reduce.apply(*outputs) / len(outputs)\n\n\ndef _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):\n assert len(modules) == len(inputs)\n assert len(targets) == len(inputs)\n if kwargs_tup:\n assert len(modules) == len(kwargs_tup)\n else:\n kwargs_tup = ({},) * len(modules)\n if devices is not None:\n assert len(modules) == len(devices)\n else:\n devices = [None] * len(modules)\n\n lock = threading.Lock()\n results = {}\n if torch_ver != \"0.3\":\n grad_enabled = torch.is_grad_enabled()\n\n def _worker(i, module, input, target, kwargs, device=None):\n # import pdb;pdb.set_trace()\n if torch_ver != \"0.3\":\n torch.set_grad_enabled(grad_enabled)\n if device is None:\n device = get_a_var(input).get_device()\n try:\n if not isinstance(input, tuple):\n input = (input,)\n if not isinstance(target, tuple):\n target = (target,)\n with torch.cuda.device(device):\n output = module(*(input + target), **kwargs)\n with lock:\n results[i] = output\n except Exception as e:\n with lock:\n results[i] = e\n\n if len(modules) > 1:\n threads = [threading.Thread(target=_worker,\n args=(i, module, input, target,\n kwargs, device),)\n for i, (module, input, target, kwargs, device) in\n enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]\n\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n else:\n _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])\n\n outputs = []\n for i in range(len(inputs)):\n output = results[i]\n if isinstance(output, Exception):\n raise output\n outputs.append(output)\n return outputs\n","sub_path":"utils/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"72208345","text":"import sys\nimport os\n\nif(len(sys.argv)!=4):\n print(\"Incorrect number of arguments, use python verify.py \")\n sys.exit()\n\ndef checkequal(file1, file2, TestName):\n try:\n with open(file1) as f:\n content = f.readlines()\n except FileNotFoundError:\n print('File does not exist')\n print(\"FAIL, \"+TestName)\n sys.exit()\n# you may also want to remove whitespace characters like `\\n` at the end of each line\n content = [x.strip().split(\" \") for x in content]\n reference_content = []\n# flattening the 2d array\n for each_row in content:\n reference_content.extend(each_row)\n try: \n with open(file2) as f:\n content = f.readlines()\n except FileNotFoundError:\n print('File does not exist')\n print(\"FAIL, \"+TestName)\n sys.exit()\n# you may also want to remove whitespace characters like `\\n` at the end of each line\n content = [x.strip().split(\" \") for x in content]\n student_content = []\n# flattening the 2d array\n for each_row in content:\n student_content.extend(each_row)\n\n if len(student_content)!=len(reference_content):\n print(\"Total number of pixels is incorrect! Your output has \" + str(len(student_content)) + \" pixels, but reference has \" + str(len(reference_content)) + \" pixels\")\n print(\"FAIL, \"+TestName)\n sys.exit()\n else:\n number_of_inac = 0\n percent_correct = 0\n for i in range(len(student_content)):\n if int(student_content[i])!= int(reference_content[i]):\n number_of_inac += 1\n percent_correct = ((1-(number_of_inac/len(student_content)))*100)\n if percent_correct > 99.85:\n print(\"PASS, \"+TestName)\n else:\n print(\"FAIL, \"+TestName)\n return number_of_inac, len(student_content), percent_correct\n\ndef ppmequal(file1, file2, ppm, TestName):\n try:\n f, g = open(file1, \"rb\"), open(file2, \"rb\")\n except FileNotFoundError:\n print('File does not exist')\n print(\"FAIL, \"+TestName)\n sys.exit()\n headerf, headerg = f.readline(), g.readline()\n if(headerf != headerg):\n print(\"Error with header on ppm file \" + file1)\n print(\"FAIL, \"+TestName)\n sys.exit()\n linecount, linelength = int(headerf.decode(\"utf-8\").split(\" \")[1]), int(headerf.decode(\"utf-8\").split(\" \")[2])\n a,b = 0,0\n percent_correct = 0\n for i in range(linecount):\n linef, lineg = f.read(3*linelength), g.read(3*linelength)\n if(len(linef) != 3*linelength or len(lineg) != 3*linelength):\n print(\"Error with length \" + file1)\n print(\"FAIL, \"+TestName)\n sys.exit()\n for j in range(linelength):\n if(linef[3*j:3*j+2] != lineg[3*j:3*j+2]):\n a+=1\n b+=1\n\n if ppm:\n percent_correct = (1-(a/b))*100\n if percent_correct > 99.85:\n print(\"PASS, \"+TestName)\n else:\n print(\"FAIL, \"+TestName)\n return a,b, percent_correct\n\n\nif(sys.argv[1][-4:]==\".txt\"):\n a,b, percent = checkequal(sys.argv[1], sys.argv[2], sys.argv[3])\n print(\"You have \" + str(a) +\" inaccurate pixels\" +\n \", which is a \" + str(percent)+\"\\% accuracy.\")\nelif(sys.argv[1][-4:] == \".ppm\"):\n a,b, percent = ppmequal(sys.argv[1], sys.argv[2], True, sys.argv[3])\n print(\"You have \" + str(a) +\" inaccurate pixels\" +\n \", which is a \" + str(percent)+\"\\% accuracy.\")\nelse:\n k = len(os.listdir(sys.argv[1]))\n a,b = 0,0\n percent_correct = 0;\n for i in range(k):\n num = str(i)\n while(len(num)<5):\n num = \"0\"+num\n file1 = sys.argv[1]+\"/frame\" + num + \".ppm\"\n file2 = sys.argv[2]+\"/frame\" + num + \".ppm\"\n c,d, percent = ppmequal(file1, file2, False, sys.argv[3])\n a+=c\n b+=d\n percent_correct = (1-(a/b))*100\n if percent_correct > 99.85:\n print(\"PASS, \" + sys.argv[3])\n else:\n print(\"FAIL, \" + sys.argv[3])\n print(\"You have \" + str(a) +\" inaccurate pixels\" + \", which is a \" + str(percent_correct)+\"\\% accuracy.\")\n","sub_path":"verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"365442506","text":"import random\nimport pygame\n\n\nclass Bird(object):\n def __init__(self):\n self.x = 400\n self.y = 300\n self.color = (255, 0, 0)\n self.radius = 30\n self.alive = True\n self.a = 2\n self.v = 0\n\n def drop(self, screen):\n self.v += self.a\n if self.y + self.radius >= screen.get_height():\n self.v = 0\n self.alive = False\n self.y += self.v\n\n def bounce(self):\n self.v = -20\n\n def draw(self, screen):\n pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, 0)\n\n\nclass Pipe(object):\n def __init__(self):\n self.height = random.randint(100, 400)\n self.position = 800\n self.in_range = True\n\n def draw(self, screen):\n rect_up = pygame.Rect(self.position, 0, 100, self.height)\n rect_down = pygame.Rect(self.position, self.height+150, 100, 450-self.height)\n pygame.draw.rect(screen, (0, 255, 255), rect_up)\n pygame.draw.rect(screen, (0, 255, 255), rect_down)\n\n def move(self):\n self.position -= 10\n if self.position < -100:\n self.in_range = False\n\n def is_bird_alive(self, bird):\n if bird.x-bird.radius < self.position < bird.x+bird.radius:\n if (bird.y+bird.radius > self.height+150) or (bird.y-bird.radius < self.height):\n bird.alive = False\n\n\npygame.init()\n# 初始化用于显示的窗口并设置窗口尺寸\nscreen = pygame.display.set_mode((800, 600))\n# 设置当前窗口的标题\npygame.display.set_caption('flappy_bird')\nbird = Bird()\npipes = []\nrunning = True\nwhile running:\n # 从消息队列中获取事件并对事件进行处理\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n # 处理鼠标事件的代码\n if event.type == pygame.MOUSEBUTTONDOWN:\n bird.bounce()\n screen.fill((0, 255, 0))\n if bird.alive:\n bird.draw(screen)\n if not pipes:\n pipe = Pipe()\n pipes.append(pipe)\n elif pipes[len(pipes) - 1].position < 600:\n pipe = Pipe()\n pipes.append(pipe)\n for pipe in pipes:\n if pipe.in_range:\n pipe.move()\n pipe.draw(screen)\n pipe.is_bird_alive(bird)\n else:\n pipes.remove(pipe)\n pygame.display.flip()\n pygame.time.delay(50)\n bird.drop(screen)\n\n","sub_path":"flappy_bird/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"504892476","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom navedex.naver import views\n\nrouter = DefaultRouter()\nrouter.register('naver', views.NaverModelViewSet)\nrouter.register('project', views.ProjectModelViewSet)\n\napp_name = 'naver'\nurlpatterns = [\n path(r'api/', include(router.urls)),\n]\n","sub_path":"navedex/naver/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"417245113","text":"import discord\nfrom discord.ext import commands\nfrom cogs.utils.dataIO import dataIO\nimport datetime\nfrom copy import deepcopy\nfrom collections import namedtuple # There may be more here?\nfrom __main__ import send_cmd_help\nimport time\nimport logging\nimport os\nfrom .utils import checks\nimport random\nimport inflect\n\np = inflect.engine()\n\nclass StashError(Exception):\n pass\n\n# Raised when you try to make an account that exists already\nclass AccountAlreadyExists(StashError):\n pass\n\n# Raised when you try to access an account that doesn't exist yet\nclass NoAccount(StashError):\n pass\n\n# Raised when you try to deposit a negative amount\nclass NegativeValue(StashError):\n pass\n\n# Raised when you try to withdraw more than you have\nclass InsufficientBalance(StashError):\n pass\n\n# Raised when you try to withdraw an item you don't have\nclass NoItem(StashError):\n pass\n\n# Raised when you try to deposit/withdraw 0 of an item\nclass ZeroCount(StashError):\n pass\n\n# Raised when you try to deposit 'all' of an item\nclass DepositAll(StashError):\n pass\n\n# Raised when you try to convert a real date before our start date\nclass TooEarly(Exception):\n pass\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\nclass StashParser:\n def __init__(self, argument):\n argument = argument.lower()\n argument = argument.strip()\n argument = argument.replace('\\n',' ')\n\n first = argument.split(' ')[0]\n if first == 'all':\n self.amount = 'all'\n argument = argument[len(first)+1:]\n elif first == 'a' or first == 'an' or first == 'the':\n self.amount = 1\n argument = argument[len(first)+1:]\n else:\n if is_number(first):\n argument = argument[len(first)+1:]\n self.amount = float(first)\n if self.amount.is_integer():\n self.amount = int(self.amount)\n else:\n self.amount = 1\n\n newform = p.singular_noun(argument)\n if newform:\n argument = newform\n\n self.name = argument\n\ndef check_folders():\n if not os.path.exists(\"data/atlas\"):\n print(\"Creating data/atlas folder...\")\n os.makedirs(\"data/atlas\")\n\n\ndef check_files():\n f = \"data/atlas/settings.json\"\n if not dataIO.is_valid_json(f):\n print(\"Creating default atlas's settings.json...\")\n dataIO.save_json(f, {})\n\n f = \"data/atlas/stash.json\"\n if not dataIO.is_valid_json(f):\n print(\"Creating empty stash.json...\")\n dataIO.save_json(f, {})\n\nclass Stash:\n\n def __init__(self, bot, file_path):\n # Load the account data\n self.accounts = dataIO.load_json(file_path)\n # Make the bot have an existential crisis\n self.bot = bot\n\n # this function creates a new account for someone\n def create_account(self, user):\n server = user.server\n # Make sure the account doesn't exist already\n if not self.account_exists(user):\n # If no one in the server has an account, add that server\n if server.id not in self.accounts:\n self.accounts[server.id] = {}\n\n # Capture the time stamp to show when this was last edited\n timestamp = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # Create the stash entry\n account = {\"name\": user.name,\n \"created_at\": timestamp,\n \"items\": {}\n }\n # Put the stash entry into the json file and save it\n self.accounts[server.id][user.id] = account\n dataIO.save_json(\"data/atlas/stash.json\", self.accounts)\n\n return self.get_account(user)\n else:\n raise AccountAlreadyExists()\n\n # See if the account exists by trying to look at it. Nifty.\n def account_exists(self, user):\n try:\n self._get_account(user)\n except NoAccount:\n return False\n return True\n\n # Returns the gold of a specified user\n # CHANGE TO ENTIRE STASH #\n def get_items(self, user):\n account = self._get_account(user)\n return account[\"items\"]\n\n # Makes an account which is a copy of the entry for this person in the json\n def get_account(self, user):\n acc = self._get_account(user)\n acc[\"id\"] = user.id\n acc[\"server\"] = user.server\n # Complete the object and return it\n return self._create_account_obj(acc)\n\n # This function returns a copy of the account if it exists\n def _get_account(self, user):\n # Get the current server\n server = user.server\n try:\n # Try copying the account\n return deepcopy(self.accounts[server.id][user.id])\n except KeyError:\n # Account doesn't exist, so error\n raise NoAccount()\n\n # Builds an account object based off of the incomplete account passed in\n def _create_account_obj(self, account):\n account[\"member\"] = account[\"server\"].get_member(account[\"id\"])\n account[\"created_at\"] = datetime.datetime.strptime(account[\"created_at\"],\n \"%Y-%m-%d %H:%M:%S\")\n\n # I have no idea what this does from this point down\n Account = namedtuple(\"Account\", \"id name items \"\n \"created_at server member\")\n return Account(**account)\n\n def deposit_item(self, user, itemname, amount):\n server = user.server\n if amount == 'all':\n raise DepositAll()\n if amount <= 0:\n raise ZeroCount()\n\n account = self._get_account(user)\n\n if itemname in account[\"items\"]:\n account[\"items\"][itemname] += amount\n if float(account[\"items\"][itemname]).is_integer():\n account[\"items\"][itemname] = int(account[\"items\"][itemname])\n else:\n account[\"items\"][itemname] = amount\n\n self.accounts[server.id][user.id] = account\n self._save_stash()\n\n def withdraw_item(self, user, itemname, amount):\n server = user.server\n\n if amount != \"all\" and amount <= 0:\n raise ZeroCount()\n\n account = self._get_account(user)\n if itemname in account[\"items\"]:\n if amount == 'all':\n account[\"items\"].pop(itemname)\n else:\n if account[\"items\"][itemname] >= amount:\n account[\"items\"][itemname] -= amount\n if float(account[\"items\"][itemname]).is_integer():\n account[\"items\"][itemname] = int(account[\"items\"][itemname])\n if account[\"items\"][itemname] <= 0:\n account[\"items\"].pop(itemname)\n else:\n raise InsufficientBalance()\n self.accounts[server.id][user.id] = account\n self._save_stash()\n else:\n raise NoItem()\n\n def _save_stash(self):\n dataIO.save_json(\"data/atlas/stash.json\", self.accounts)\n\nclass atlas:\n \"\"\"A cog to manage Atlas, our D&D 5e West Marches campaign!\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.stash = Stash(bot, \"data/atlas/stash.json\")\n\n @commands.group(name=\"stash\", pass_context=True)\n async def _stash(self, ctx):\n \"\"\"Stash operations\"\"\"\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)\n\n @_stash.command(pass_context=True, no_pm=True)\n async def register(self, ctx):\n \"\"\"Registers for the stash!\"\"\"\n author = ctx.message.author\n try:\n account = self.stash.create_account(author)\n await self.bot.say(\"{} Account opened! Empty stash created.\"\n \"\".format(author.mention))\n except AccountAlreadyExists:\n await self.bot.say(\"{} You already have a stash. Silly.\".format(author.mention))\n\n @_stash.command(pass_context=True)\n async def show(self, ctx, item=''):\n \"\"\"Lists the things in the your stash, or shows how many of a particular item you have.\n\n Examples:\n stash show\n stash show dogs\"\"\"\n\n # Make the target the author\n\n if not item:\n user = ctx.message.author\n try:\n # If you have an account, display what's in it\n items = self.stash.get_items(user)\n if len(items):\n itemsstring = \"\"\n for item, amount in items.items():\n itemsstring += str(amount) +\" \"+ p.plural(item, amount) +\"\\n\"\n\n await self.bot.say(\"```{}'s Stash\\n-------------------------\\n{}```\".format(user.nick, itemsstring))\n else:\n await self.bot.say(\"{} Your stash is empty!\".format(user.mention))\n \n # If you don't have an account, say so\n except NoAccount:\n await self.bot.say(\"{} You don't have a registered stash.\"\n \" Type `{}stash register`\"\n \" to create one.\".format(user.mention,\n ctx.prefix))\n else:\n user = ctx.message.author\n try: \n items = self.stash.get_items(user)\n if item in items:\n amount = items[item]\n await self.bot.say(\"{} You have {} {} in your stash!\".format(user.nick, amount, p.plural_noun(item, amount)))\n else:\n await self.bot.say(\"{} You have no {} in your stash!\".format(user.nick, p.plural_noun(item, 2)))\n except NoAccount:\n await self.bot.say(\"{} You don't have a registered stash.\"\n \" Type `{}stash register`\"\n \" to create one.\".format(user.mention,\n ctx.prefix))\n\n\n @_stash.command(pass_context=True, no_pm=True)\n async def add(self, ctx, *, item: StashParser):\n \"\"\"Adds an item to your stash.\n\n Examples:\n stash add Shovel\n stash add Staff of rightousness\n stash add 5 paper airplanes\"\"\"\n\n author = ctx.message.author\n try:\n self.stash.deposit_item(author, item.name, item.amount)\n logger.info(\"{}({}) added {} '{}' to their stash.\".format(\n author.name, author.id, item.amount, item.name))\n await self.bot.say(\"{} {} {} {} been added to your stash.\".format(author.mention, item.amount,\n p.plural(item.name, item.amount),\n p.plural(\"has\", item.amount)))\n if item.amount == 69 or item.amount == 420:\n await self.bot.whisper(\"Hehe... nice...\")\n except DepositAll:\n await self.bot.say(\"{} You can't have ALL of that! Save some for the rest of us.\".format(author.mention))\n except ZeroCount:\n await self.bot.say(\"{} You have to add a positive amount of something!\".format(author.mention))\n except NoAccount:\n await self.bot.say(\"{} You don't have a registered stash.\"\n \" Type `{}stash register`\"\n \" to create one.\".format(author.mention,\n ctx.prefix))\n\n @_stash.command(pass_context=True, no_pm=True)\n async def remove(self, ctx, *, item: StashParser):\n \"\"\"Adds an item to your stash.\n\n Examples:\n stash remove Shovel\n stash remove Longsword of great power\n stash remove 6 marbles\"\"\"\n\n author = ctx.message.author\n try:\n self.stash.withdraw_item(author, item.name, item.amount)\n logger.info(\"{}({}) removed {} '{}' from their stash.\".format(\n author.name, author.id, item.amount, item.name))\n await self.bot.say(\"{} {} {} {} been removed from your stash.\".format(author.mention, item.amount,\n p.plural(item.name,item.amount),\n p.plural(\"has\",item.amount)))\n if item.amount == 69 or item.amount == 420:\n await self.bot.whisper(\"Pfff... haha!\")\n except NoItem:\n await self.bot.say(\"{} You don't have that item.\".format(author.mention))\n except ZeroCount:\n await self.bot.say(\"{} You have to remove a positive amount of something!\".format(author.mention))\n except InsufficientBalance:\n await self.bot.say(\"{} You don't have {} {} to remove!\".format(author.mention, item.amount, p.plural(item.name, item.amount)))\n except NoAccount:\n await self.bot.say(\"{} You don't have a registered stash.\"\n \" Type `{}stash register`\"\n \" to create one.\".format(author.mention,\n ctx.prefix))\n\n # Nice command\n @commands.command(pass_context=True)\n async def goodbot(self, ctx):\n \"\"\"Compliment the narrator.\"\"\"\n\n user = ctx.message.author\n start = random.choice([\"Aww, \"+user.mention+\". \",\n \"Thank you, \"+user.mention+\"! \",\n \"Aw' shucks. \",\n user.mention+\", you really do flatter me. \",\n \"Weeeee! \",\n \"I'm blushing. \"])\n\n tag = random.choice([\"You are a gem.\",\n \"But you are gooder.\",\n \"I know it.\",\n \"I try my best.\",\n \"It means a lot to me.\",\n \"You're gonna make me cry.\",\n \"But I am just following orders.\",\n \"But it was your idea.\",\n \"Thank you.\"])\n\n #Your code will go here\n await self.bot.say(start+tag)\n\n # Bad command\n @commands.command(pass_context=True)\n async def badbot(self, ctx):\n \"\"\"Scold the narrator.\"\"\"\n\n user = ctx.message.author\n tag = random.choice([\"I'm a bad, bad bot.\",\n \"I'm sorry!\",\n \"You're worse.\",\n \"I try my best. ;)\",\n \"Did you guys hear? \"+user.mention+\" admitted to being a horrible person.\",\n \"You're gonna make me cry.\",\n \"But I am just following orders.\",\n \"But it was your idea.\",\n \">:)\",\n \"Ooh, tell me I'm bad again, \"+user.mention,\n \"You're gonna make me blush.\",\n \"Once upon a time there was a meany named \"+user.mention+\". They got their steez from being mean to robotic narrators.\"])\n\n #Your code will go here\n await self.bot.say(tag)\n\n # Setting the scene\n @commands.command(pass_context=True)\n async def scene(self, ctx):\n \"\"\"Sets the seen for roleplay in The Bumbling Bee.\"\"\"\n\n #user = ctx.message.author\n tempurature = random.choice([\"cold\", \"mild\", \"hot\"])\n\n if tempurature == \"cold\":\n tempword = random.choice([\"frigid\", \"freezing\", \"chilled\", \"chilling\", \"icy\", \"cold\", \"frosty\", \"cool\"])\n tempword2 = random.choice([\"frigid\", \"freezing\", \"chilled\", \"chilling\", \"icy\", \"cold\", \"frosty\", \"cool\"])\n elif tempurature == \"hot\":\n tempword = random.choice([\"scalding\", \"hot\", \"warm\", \"sweltering\", \"broiling\", \"heated\", \"scorching\"])\n tempword2 = random.choice([\"scalding\", \"hot\", \"warm\", \"sweltering\", \"broiling\", \"heated\", \"scorching\"])\n else:\n tempword = random.choice([\"mild\",\"balmy\",\"temperate\",\"clement\"])\n tempword2 = random.choice([\"mild\",\"balmy\",\"temperate\",\"clement\"])\n\n\n tempsentence = random.choice([\n \"The night air was {}.\".format(tempword),\n \"A {} day became a {} night.\".format(tempword, tempword2),\n \"The denizens of Chart's End gathered after a long, {} day.\".format(tempword),\n \"Adventurers and citizens alike joined in to drink after the {} afternoon behind them.\".format(tempword),\n ])\n\n weatheroptions = [\"clear\",\"rainy\",\"cloudy\",\"hailing\"]\n \n if tempurature == \"cold\":\n weatheroptions.append(\"snowy\")\n\n weather = random.choice(weatheroptions)\n\n if weather == \"clear\":\n weathersentence = random.choice([\n \"Outside, stars painted the clear, {} sky.\".format(tempword2),\n \"The bright moon rose overhead.\",\n \"A stillness filled the town outside.\",\n \"A calmness outside contrasted the tavern's goings-on.\"\n ])\n elif weather == \"rainy\":\n weathersentence = random.choice([\n \"Heavy rain poured down outside.\",\n \"The heavy downpour drummed a steady beat on the tavern's roof.\",\n \"Mud tracks at the door left evidence of the rains outside.\"\n ])\n elif weather == \"cloudy\":\n weathersentence = random.choice([\n \"Thick clouds covered the skies.\",\n \"The stars were obscured by a foggy film.\",\n \"Fog swept over the town, dimming the moon's light.\"\n ])\n elif weather == \"hailing\":\n weathersentence = random.choice([\n \"Loud sounds of hail drowned out all but the loudest of conversations.\",\n \"The violent hail outside made the establishment a more welcoming place.\",\n \"Thick shards of ice fell quickly to the ground in loud bursts.\"\n ])\n elif weather == \"snowy\":\n weathersentence = random.choice([\n \"A thin film of snow covered the ground.\",\n \"The town layed beneath blankets of puffy white precipitation.\",\n \"A snowstorm filled the air with falling, grey clumps.\"\n ])\n\n people = random.choice([\"low\",\"medium\",\"full\"])\n\n if people == \"full\":\n peoplesentence = random.choice([\n \"The place was packed.\",\n \"The Bumbling Bee was full to the brim of various townsmen and explorers.\",\n \"Wall to wall people filled the floor, drinking and talking.\",\n \"There was hardly a place to sit with the number of people drinking tonight.\",\n ])\n peoplesound = random.choice([\n \"A myriad of hearty conversations could be heard echoing in the tavern air.\",\n \"Things were bustling, not a single opportunity arose for a stale moment.\",\n \"Talks of fame and fortune ring out as the various tavern-goers share stories with one another.\"\n ])\n elif people == \"low\":\n peoplesentence = random.choice([\n \"Only a small handful of people speckled the various seats in the bar.\",\n \"Attendence in the tavern this night was scarce.\",\n \"The place was nearly empty.\",\n \"The low attendence at The Bumbling Bee left many places open to sit.\"\n ])\n peoplesound = random.choice([\n \"A few quiety conversations were held between still bodies.\"\n \"It was eerily quiet.\",\n \"The sounds of utensils on plates and mugs hitting the table complimented the calm atmosphere.\"\n \"The crackling of the fire filled the gaps in conversation.\"\n ])\n else:\n peoplesentence = random.choice([\n \"Various tavern goers sat about drinking and talking.\",\n \"The bar had an enticing atmostphere on this night.\",\n \"The usual crowd sat scattered in their usual spots.\",\n \"The tavern had put on a lively persona tonight.\"\n ])\n peoplesound = random.choice([\n \"The occasional laugh peaked over ambient discussions.\",\n \"Toasts could be heard coming from a group near the back.\",\n \"The tavern-goers traded tales and kept the barrels flowing.\"\n ])\n\n specifichappening = random.choice([\n \"A loud argument could be heard between two of the patrons.\",\n \"A drunkard layed passed out on the floor.\",\n \"The barkeep cleaned tables while holding an attention capturing conversation with a patron.\",\n \"A stray cat shuffled quickly through the briefly open door and under a table.\",\n \"Crashing rang out as a dropped glass shattered on the floor.\",\n \"A small group of friends belted out a catchy shanty.\",\n \"A man fell to the floor as he laughed at a joke he found hysterical.\",\n \"A slightly intoxicated man wildly confessed his love to a \"+random.choice([\"flustered\",\"annoyed\",\"cute\",\"embarrassed\"])+\" girl.\",\n \"'Another round!' called out one patron.\",\n \"A\"+random.choice([\"n impressive\", \" slighty drunk\",\" mediocre\"])+\" bard played a\"\n +random.choice([\" well-known\",\" worn-out\",\"n ages-old\", \"n entertaining\"])+\" tune.\"\n ])\n\n sentences1 = [tempsentence, weathersentence]\n random.shuffle(sentences1)\n\n sentences2 = [peoplesentence, peoplesound]\n random.shuffle(sentences2)\n\n paragraph = \"\"\n for sentence in sentences1:\n paragraph += sentence + \" \"\n\n for sentence in sentences2:\n paragraph += sentence + \" \"\n\n paragraph += specifichappening\n\n\n #Your code will go here\n await self.bot.say(paragraph)\n\n # See what in-game-day it is\n @commands.command(pass_context=True)\n async def date(self, ctx):\n \"\"\"Learn what the current in-game date is.\"\"\"\n\n startdate = datetime.date(2018,4,1)\n now = startdate.today()\n\n diff = now-startdate\n\n gameyear = 1\n gamemonth = 3\n gameday = 1\n\n days = diff.days\n\n while days > 0:\n while days >= 180:\n gameyear += 1\n days -= 180\n\n while days >= 15:\n gamemonth += 1\n days -= 15\n\n while days >= 1:\n gameday += 2\n days -= 1\n\n if datetime.datetime.now().hour >= 13:\n gameday += 1\n\n months = {\n 1: \"Deepwinter\",\n 2: \"The Claw of Winter\",\n 3: \"The Claw of the Sunsets\",\n 4: \"The Claw of Storms\",\n 5: \"The Melting\",\n 6: \"The Time of Flowers\",\n 7: \"Summertide\",\n 8: \"Highsun\",\n 9: \"The Fading\",\n 10: \"Leaffall\",\n 11: \"The Rotting\",\n 12: \"The Drawing Down\",\n }\n\n await self.bot.say(\"It is the {} day of {}. {} year of the Age of The Portals.\".format(p.ordinal(gameday), months[gamemonth], p.ordinal(gameyear)))\n\n # See what in-game-day it is, shortened\n @commands.command(pass_context=True)\n async def shortdate(self, ctx):\n \"\"\"Learn what the current in-game date is, in a shortened format.\"\"\"\n\n startdate = datetime.date(2018,4,1)\n now = startdate.today()\n\n diff = now-startdate\n\n gameyear = 1\n gamemonth = 3\n gameday = 1\n\n days = diff.days\n\n while days > 0:\n while days >= 180:\n gameyear += 1\n days -= 180\n\n while days >= 15:\n gamemonth += 1\n days -= 15\n\n while days >= 1:\n gameday += 2\n days -= 1\n\n if datetime.datetime.now().hour >= 13:\n gameday += 1\n\n await self.bot.say(\"Day: {}\\nMonth: {}\\nYear: {}\".format(gameday, gamemonth, gameyear))\n\n # Convert a real date to an in-game date\n @commands.command(pass_context=True)\n async def realtogame(self, ctx, realDay, realMonth, realYear):\n \"\"\"See what in-game date a real date would be.\n\n Usage:\n realtogamedate day month year\n\n Example:\n realtogamedate 18 6 2019\n \"\"\"\n\n startdate = datetime.date(2018,4,1)\n realDate = realDay+realMonth+realYear\n\n try:\n target = datetime.datetime.strptime(realDate, \"%d%m%Y\").date()\n \n if target < startdate:\n raise TooEarly\n\n diff = target-startdate\n\n gameyear = 1\n gamemonth = 3\n gameday = 1\n\n days = diff.days\n\n while days > 0:\n while days >= 180:\n gameyear += 1\n days -= 180\n\n while days >= 15:\n gamemonth += 1\n days -= 15\n\n while days >= 1:\n gameday += 2\n days -= 1\n\n\n await self.bot.say(\"Day: {}-{}\\nMonth: {}\\nYear: {}\".format(gameday, gameday+1, gamemonth, gameyear))\n except TooEarly:\n await self.bot.say(\"The real date must be after April 1st, 2018! (That's when Atlas launched.)\")\n except ValueError:\n await self.bot.say(\"Invalid date entered! It goes day month year.\")\n\n \n\ndef setup(bot):\n global logger\n check_folders()\n check_files()\n logger = logging.getLogger(\"red.atlas\")\n if logger.level == 0:\n # Prevents the logger from being loaded again in case of module reload\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler(\n filename='data/atlas/stash.log', encoding='utf-8', mode='a')\n handler.setFormatter(logging.Formatter(\n '%(asctime)s %(message)s', datefmt=\"[%d/%m/%Y %H:%M]\"))\n logger.addHandler(handler)\n bot.add_cog(atlas(bot))","sub_path":"atlas.py","file_name":"atlas.py","file_ext":"py","file_size_in_byte":25646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"274568729","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport math\nimport re\nfrom lch_proprietary.ml_pipeline_lch import isolate_categoricals, is_category\n\n\ndef iterate_subplots(axs, ncols, rows, ax_col, ax_row):\n ax_col += 1\n\n if (ax_col >= ncols) and ((ax_col + 1) % ncols) == 1:\n ax_row += 1\n ax_col = 0\n\n if rows == 1:\n current_subplot = axs[ax_col]\n\n elif ncols == 1:\n current_subplot = axs[ax_row]\n ax_row += 1\n else:\n current_subplot = axs[ax_row, ax_col]\n\n return current_subplot, ax_row, ax_col\n\n\ndef generate_axes(ncols, num_metrics, total_plot_width, sharey):\n rows = math.ceil(num_metrics / ncols)\n\n if ncols == 1 or (num_metrics % ncols == 0):\n axes_to_remove = 0\n else:\n axes_to_remove = ncols - (num_metrics % ncols)\n\n if not (0 < rows <= num_metrics):\n raise ValueError (\n \"Plot must have at least one row. Please update number of columns\"\n \" ('ncols') or check that at least one metric is specified in \"\n \"'metrics'.\")\n if not (0 < ncols <= num_metrics):\n raise ValueError(\n \"Plot must have at least one column, and no more columns than \"\n \"subplots. Please update number of columns ('ncols') or check \"\n \"that at least one metric is specified in 'metrics'.\")\n\n total_plot_width = total_plot_width\n\n fig, axs = plt.subplots(nrows=rows, ncols=ncols,\n figsize=(total_plot_width, 5 * rows),\n sharey=sharey,\n gridspec_kw={'wspace': 0.2, 'hspace': 0.4})\n\n return fig, axs, rows, axes_to_remove\n\n\ndef view_dist(df, geo_columns=True, fig_size=(20,15), labels = None, bins=10,\n density=False, sharey=False, ncols=4, categorical_keywords=None,\n total_plot_width=25):\n '''\n Plot distributions of non-categorical columns in a given dataframe\n\n Inputs:\n df: pandas dataframe\n geo_columns: list of column names corresponding to columns with numeric \n geographical information (ex: zipcodes) \n labels: list of labels to apply to plot: title, xlabel, ylabel, respectively\n '''\n non_categoricals = isolate_categoricals(df, categoricals_fcn=is_category, \n ret_categoricals=False, keywords=categorical_keywords, geos_indicator=geo_columns)\n\n num_metrics = len(non_categoricals)\n\n fig, axs, rows, axes_to_remove = generate_axes(ncols=ncols, num_metrics=num_metrics, \n total_plot_width=total_plot_width, sharey=sharey)\n\n # set a different distribution to be plotted in each subplot\n ax_col = -1\n ax_row = 0\n col_num = 0\n\n while col_num < num_metrics:\n col = non_categoricals[col_num]\n\n current_subplot, ax_row, ax_col = iterate_subplots(axs, ncols, rows, ax_col, ax_row)\n\n current_subplot.hist(x=df[col], density=density, bins=bins, align='left')\n \n plot_title = col.replace('_', ' ').upper() + \" DISTRIBUTION\"\n\n current_subplot.xaxis.grid(color='lightgray', which='major')\n current_subplot.yaxis.grid(color='lightgray', which='major')\n\n labels = [item.get_text().replace('_', ' ').upper() for item in current_subplot.get_xticklabels()]\n\n if '' not in labels and len(labels) > 5:\n current_subplot.set_xticklabels(labels, rotation=30, ha='center')\n else:\n plt.xticks(rotation=30, horizontalalignment='center')\n\n current_subplot.set_xlabel(col.replace('_', ' ').upper(), fontsize=12)\n current_subplot.set_ylabel(\"FREQUENCY\", fontsize=12)\n current_subplot.set_title(plot_title, fontsize=14)\n\n col_num += 1\n\n # disable axes not being used\n if axes_to_remove > 0:\n for i in np.arange(axes_to_remove):\n axs[-1, -(i + 1)].axis('off')\n\n fig.suptitle('NON-CATEGORICAL FEATURE DISTRIBUTIONS', fontsize=20)\n plt.show()\n\n\n\ndef plot_value_counts(df, col, norm = False, plot_kind = 'bar',\n ax=None):\n if ax is None:\n fig, ax = plt.subplots(figsize=(10, 5))\n\n plot_title = col.replace('_', ' ').upper() + \" DISTRIBUTION\"\n df[col].value_counts(normalize = norm).plot(kind=plot_kind, ax=ax)\n\n ax.set_axisbelow(True)\n ax.xaxis.grid(color='lightgray', which='major')\n ax.yaxis.grid(color='lightgray', which='major')\n\n labels = [item.get_text().replace('_', ' ').upper() for item in ax.get_xticklabels()]\n\n if '' not in labels:\n ax.set_xticklabels(labels, rotation=30, ha='center')\n else:\n plt.xticks(rotation=30, horizontalalignment='center')\n\n ax.set_xlabel(col.replace('_', ' ').upper(), fontsize=12)\n ax.set_ylabel(\"FREQUENCY\", fontsize=12)\n ax.set_title(plot_title, fontsize=20)\n\n return ax\n\n\n\n\ndef plot_multiple_value_counts(df, type_dict, category, norm = False,\n plot_kind = 'bar', palette=None, ncols=3):\n\n num_metrics = len(type_dict[category])\n cols_to_plot = type_dict[category]\n\n\n rows = math.ceil(num_metrics / ncols)\n\n if ncols == 1 or (num_metrics % ncols == 0):\n axes_to_remove = 0\n else:\n axes_to_remove = ncols - (num_metrics % ncols)\n\n if not (0 < rows <= num_metrics):\n raise ValueError (\n \"Plot must have at least one row. Please update number of columns\"\n \" ('ncols') or check that at least one metric is specified in \"\n \"'metrics'.\")\n if not (0 < ncols <= num_metrics):\n raise ValueError(\n \"Plot must have at least one column, and no more columns than \"\n \"subplots. Please update number of columns ('ncols') or check \"\n \"that at least one metric is specified in 'metrics'.\")\n\n total_plot_width = 25\n\n fig, axs = plt.subplots(nrows=rows, ncols=ncols,\n figsize=(total_plot_width, 6 * rows),\n sharey=True,\n gridspec_kw={'wspace': 0.075, 'hspace': 0.4})\n\n # set a different distribution to be plotted in each subplot\n ax_col = -1\n ax_row = 0\n col_num = 0\n\n while col_num < num_metrics:\n col = cols_to_plot[col_num]\n\n current_subplot, ax_row, ax_col = iterate_subplots(axs, ncols, rows, ax_col, ax_row)\n\n plot_value_counts(df=df, col=col, ax=current_subplot,\n norm = norm, plot_kind = plot_kind)\n col_num += 1\n\n # disable axes not being used\n if axes_to_remove > 0:\n for i in np.arange(axes_to_remove):\n axs[-1, -(i + 1)].axis('off')\n\n plt.show()\n\n\ndef check_corr(df, geo_columns = True, cat_cols = None):\n '''\n Display heatmap of linear correlation between non-categorical columns in a\n given dataframe\n\n Inputs:\n df: pandas dataframe\n geo_columns: list of column names corresponding to columns with numeric\n geographical information (ex: zipcodes)\n\n Attribution: Colormap Attribution: adapted from gradiated dataframe at\n https://www.datascience.com/blog/introduction-to-correlation-learn-data-science-tutorials and correlation heatmap at https://stackoverflow.com/questions/29432629/correlation-matrix-using-pandas\n '''\n try:\n non_categoricals = isolate_categoricals(df, categoricals_fcn = is_category,\n ret_categoricals = False, geos_indicator = geo_columns)\n\n fig, ax = plt.subplots(figsize=(12, 12))\n corr = df[non_categoricals].corr(method=\"pearson\")\n sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),\n cmap=plt.get_cmap(\"coolwarm\"), square=True, ax=ax, annot=True)\n\n ax.set_xticks(range(len(non_categoricals)))\n ax.set_yticks(range(len(non_categoricals)))\n\n ax.tick_params(direction='inout')\n ax.set_xticklabels(non_categoricals, rotation=45, ha='right')\n ax.set_yticklabels(non_categoricals, rotation=45, va='top')\n plt.title('Feature Correlation')\n plt.show()\n\n except:\n if cat_cols:\n cat_df = df[df.columns]\n\n for col in cat_cols:\n cat_df[col] = cat_df[col].astype('categorical')\n\n fig, ax = plt.subplots(figsize=(12, 12))\n corr = cat_df.corr(method=\"pearson\")\n sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),\n cmap=plt.get_cmap(\"coolwarm\"), square=True, ax=ax, annot=True)\n\n ax.set_xticks(range(len(cat_df.columns)))\n ax.set_yticks(range(len(cat_df.columns)))\n\n ax.tick_params(direction='inout')\n ax.set_xticklabels(cat_df.columns, rotation=45, ha='right')\n ax.set_yticklabels(cat_df.columns, rotation=45, va='top')\n plt.title('Feature Correlation')\n plt.show()\n\n\n\ndef discretize_cols(df, num_bins, geo_columns=True, specific_cols = False, split = False):\n '''\n Add columns to discretize and classify non-categorical columns in a given\n data frame\n\n Inputs:\n df: pandas dataframe\n geo_columns: list of column names corresponding to columns with\n numeric geographical information (ex: zipcodes)\n num_bins: number of groups into which column values should be\n discretized\n '''\n if specific_cols:\n non_categoricals = specific_cols\n else:\n non_categoricals = isolate_categoricals(df,\n categoricals_fcn = is_category, ret_categoricals = False,\n geos_indicator = geo_columns)\n\n for col in non_categoricals:\n bin_col = col + \"_bin\"\n if col == \"age\":\n age_bins = math.ceil((df[col].max() - df[col].min()) / 10)\n\n if split:\n df[bin_col], train_bins = pd.cut(df[col], bins = age_bins,\n right = False, precision=0, retbins=split)\n else:\n df[bin_col] = pd.cut(df[col], bins = age_bins, right = False,\n precision=0, retbins=split)\n else:\n if split:\n df[bin_col], train_bins = pd.cut(df[col], bins = num_bins,\n precision=0, retbins=split)\n else:\n df[bin_col] = pd.cut(df[col], bins = num_bins, precision=0,\n retbins=split)\n if split:\n return train_bins\n\n\n\n\n\ndef discretize_train_test(train_test_tuples, still_blanks):\n for i, (train, test) in enumerate(train_test_tuples):\n fill_cols = still_blanks[i]\n for col in fill_cols:\n grouped = col + '_bin'\n train[grouped], train_bins = pd.cut(train[col], bins = 4, precision = 0, retbins = True)\n test[grouped] = pd.cut(test[col], bins = train_bins, precision = 0)\n\n\ndef confirm_train_test_discretization(train_test_tuples, still_blanks):\n for i, (train, test) in enumerate(train_test_tuples):\n for col in still_blanks[i]:\n grouped = col\n grouped = col + '_bin'\n print(\"set {} {} train: {}.\".format(i, col, train[grouped].unique()))\n print()\n print(\"set {} {} test: {}.\".format(i, col, test[grouped].unique()))\n print()\n\n\ndef drop_tt_binned(train_test_tuples, to_drop):\n '''\n Drop columns from both train and test sets.\n\n Inputs:\n train_test_tuples: list of tupled dataframes\n to_drop: list of columns to drop\n '''\n for train, test in train_test_tuples:\n train.drop(to_drop, axis = 1, inplace = True)\n test.drop(to_drop, axis = 1, inplace = True)\n\n\ndef create_binary_vars(df, cols_to_dummy, keyword_list):\n '''\n Create columns of binary values corresponding to values above zero for\n selected columns in a given dataframe based on common keywords\n\n Inputs:\n df: pandas dataframe\n cols_to_dummy: (list of strings) columns in data frame to be evaluated\n into dummy variables\n keyword_list: (list of strings) words or phrases included in columns\n to be evaluated indicating a dummy variable should be created based\n on its values\n '''\n keyword_string = (\"|\").join(keyword_list)\n for col in cols_to_dummy:\n colname_trunc = re.sub(keyword_string, '', col)\n binary_col_name = 'tf_' + colname_trunc\n df[binary_col_name] = df[col].apply(lambda x: x > 0)\n\n\n\ndef plot_corr(df, color_category, geo_columns=True):\n '''\n Observe distributions and correlations of features for non-categorical\n\n Inputs:\n df: pandas dataframe\n categoricals_list: list of strings corresponding to categorical columns\n (ex: zip codes)\n '''\n non_categoricals = isolate_categoricals(df, categoricals_fcn = is_category,\n ret_categoricals = False, geos_indicator = geo_columns)\n\n plot_list = non_categoricals + [color_category]\n corr = sns.pairplot(df[plot_list], hue = color_category, palette = \"Set2\")\n\n\n\ndef plot_relationship(df, feature_x, xlabel,feature_y, ylabel, xlimit = None,\n ylimit = None, color_cat = None, filter_col = None,\n filter_criteria = None, filter_param = None,\n filter_param2 = None):\n '''\n Plot two features in a given data frame against each other to view\n relationship and outliers.\n\n Attribution: adapted from https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Python_Seaborn_Cheat_Sheet.pdf\n '''\n if filter_col and filter_criteria and filter_param:\n if filter_criteria == 'geq':\n use_df = df[df[filter_col] >= filter_param]\n elif filter_criteria == 'gt':\n use_df = df[df[filter_col] > filter_param]\n elif filter_criteria == 'leq':\n use_df = df[df[filter_col] <= filter_param]\n elif filter_criteria == 'lt':\n use_df = df[df[filter_col] < filter_param]\n elif filter_criteria == 'eq':\n use_df = df[df[filter_col] == filter_param]\n elif filter_criteria == 'neq':\n use_df = df[df[filter_col] != filter_param]\n elif filter_criteria == 'between':\n use_df = df[(df[filter_col] > filter_param) and (df[filter_col] < filter_param2)]\n\n g = sns.lmplot(x = feature_x, y = feature_y, data = use_df, aspect = 3,\n hue = color_cat)\n g = (g.set_axis_labels(xlabel,ylabel)).set(xlim = xlimit , ylim = ylimit)\n plot_title = ylabel + \" by \" + xlabel\n plt.title(plot_title)\n plt.show(g)\n\n else:\n g = sns.lmplot(x = feature_x, y = feature_y, data = df, aspect = 3,\n hue = color_cat)\n g = (g.set_axis_labels(xlabel,ylabel)).set(xlim = xlimit , ylim = ylimit)\n plot_title = ylabel + \" by \" + xlabel\n plt.title(plot_title)\n plt.show(g)\n\n\n\n\n\n\n\ndef eval_ratios(df, include_cols, category_cols, method = \"count\",\n pct = False):\n '''\n Evaluate specific features via grouping on one or more category\n\n Inputs:\n df: (dataframe) pandas dataframe\n include_cols: (list of strings) column names to be aggregated or\n grouped\n category_cols: (list of strings) column name(s) for variable(s) used\n for grouping\n method: (string) groupby aggregation method for column values\n\n Output:\n ratio_df: pandas data frame of grouped data\n '''\n if method == \"count\":\n ratio_df = df[include_cols].groupby(category_cols).count()\n if pct:\n single_col = include_cols[-1] + \" Percentage\"\n ratio_df[single_col] = ((df[include_cols].groupby(category_cols).count() /\n df[include_cols].groupby(category_cols).count().sum()) * 100)\n\n elif method == \"sum\":\n ratio_df = df[include_cols].groupby(category_cols).sum()\n if pct:\n single_col = include_cols[-1] + \" Percentage\"\n ratio_df[single_col] = ((df[include_cols].groupby(category_cols).sum() /\n df[include_cols].groupby(category_cols).sum().sum()) * 100)\n return ratio_df\n\n\n\ndef feature_by_geo(df, geo, expl_var, num_var, method = \"median\"):\n '''\n Evaluate specific features by geography (ex: zip code)\n\n Inputs:\n df: (dataframe) pandas dataframe\n geo: (string) column name corresponding to geography used for grouping\n expl_var: (string) column name for exploratory variable used for\n grouping\n num_var: (string) column name for numeric variable/ feature to be\n aggregated\n method: (string) groupby aggregation method for column values\n\n Output:\n geo_features: pandas data frame of grouped data\n '''\n df_geo = df[(df[geo] != 0)]\n groupby_list = [geo] + expl_var\n if method == \"median\":\n geo_features = df_geo.groupby(groupby_list)[num_var].median().unstack(level = 1)\n if method == \"count\":\n geo_features = df_geo.groupby(groupby_list)[num_var].count().unstack(level = 1)\n geo_features.fillna(value = \"\", inplace = True)\n return geo_features\n\n\n\ndef plot_top_distros(train_test_tuples, var_dict, set_num):\n for i, col in enumerate(var_dict['tops']):\n train, test = train_test_tuples[set_num]\n plot_title = \"Projects by {} for Training Set {}\".format(col, set_num)\n train[col].value_counts().sort_index().plot(kind='bar', title = plot_title)\n plt.show()\n","sub_path":"lch_proprietary/ml_explore.py","file_name":"ml_explore.py","file_ext":"py","file_size_in_byte":17305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"218161014","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nfrom six import print_\nfrom itertools import tee\n\nVOWELS = set(\"aeiou\")\nNAUGHTY = set([\"ab\", \"cd\", \"pq\", \"xy\"])\n\ndef enough_vowels(s):\n count = 0\n for ch in s:\n if ch in VOWELS:\n count += 1\n if count == 3:\n return True\n return False\n\ndef pairwise(iterable):\n a, b = tee(iterable, 2)\n next(b)\n return zip(a, b)\n\ndef met_pair_condition(s):\n found_double = False\n for a, b in pairwise(s):\n if a + b in NAUGHTY:\n return False\n if a == b:\n found_double = True\n return found_double\n\ndef nice(s):\n return enough_vowels(s) and met_pair_condition(s)\n\ndef main():\n print_(sum(nice(line) for line in sys.stdin))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"day5-1.py","file_name":"day5-1.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"92827095","text":"from pandac.PandaModules import *\r\n\r\nclass To2D: \r\n \"\"\" class that can take all the things \r\n that where added to it and manipulate \r\n them in 2d guish kinda way \"\"\"\r\n \r\n def getGUIPos(self,node): \r\n \"\"\" node -> pos on screen , -100,-100 it its not on screen\"\"\"\r\n pos2d = self.compute2dPosition(node,Vec3(0,0,0))\r\n if pos2d:\r\n return Point2((pos2d[0]+1)*gui.windowsize[0]/2, (-pos2d[1]+1)*gui.windowsize[1]/2)\r\n return Point2(-100,-100)\r\n \r\n def getThingClosestTo(self,cThings,pos,far=9999):\r\n try:\r\n cThings = cThings.values()\r\n except: pass\r\n things = []\r\n for thing in cThings:\r\n pos2d = self.compute2dPosition(thing.node,Vec3(0,0,0))\r\n if pos2d:\r\n pos2d = Vec2(\r\n (pos2d[0]+1)*gui.windowsize[0]/2, \r\n (-pos2d[1]+1)*gui.windowsize[1]/2)\r\n distance = (pos2d-pos).length()\r\n #print pos2d,pos,distance,thing\r\n if distance < far:\r\n far = distance\r\n things = [thing]\r\n return things\r\n \r\n def getThingInGUIRec(self,cThings,rec):\r\n \"\"\" is there a thing in any of the rectagle \"\"\"\r\n try:\r\n cThings = cThings.values()\r\n except: pass\r\n sx,ex,sy,ey = rec \r\n if sx > ex: sx,ex = ex,sx\r\n if sy > ey: sy,ey = ey,sy\r\n things = []\r\n for thing in cThings:\r\n pos2d = self.compute2dPosition(thing.node,Vec3(0,0,0))\r\n if pos2d:\r\n pos2d = Point2(\r\n (pos2d[0]+1)*gui.windowsize[0]/2, \r\n (-pos2d[1]+1)*gui.windowsize[1]/2)\r\n x,y=pos2d[0],pos2d[1]\r\n if sx < x and x < ex and sy < y and y < ey :\r\n things.append(thing) \r\n return things \r\n \r\n def compute2dPosition(self,nodePath, point = Point3(0, 0, 0)):\r\n \"\"\" Computes a 3-d point, relative to the indicated node, into a\r\n 2-d point as seen by the camera. The range of the returned value\r\n is based on the len's current film size and film offset, which is\r\n (-1 .. 1) by default. \"\"\"\r\n if base.win.hasSize():\r\n p3d = base.cam.getRelativePoint(nodePath, point)\r\n p2d = Point2()\r\n if base.cam.node().getLens().project(p3d, p2d):\r\n return p2d\r\n return None ","sub_path":"game/GUI/Treegui/to2d.py","file_name":"to2d.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"605798735","text":"class BaseQueueException(Exception):\n \"\"\"The base exception class.\"\"\"\n fmt = 'An unspecified error occurred'\n\n def __init__(self, **kwargs):\n msg = self.fmt.format(**kwargs)\n super().__init__(self, msg)\n self.kwargs = kwargs\n\n\nclass BaseQueueManagerException(BaseQueueException):\n \"\"\"The base exception class for QueueManager exceptions.\"\"\"\n\n\nclass BaseProviderException(BaseQueueException):\n \"\"\"The base exception class for Providers exceptions.\"\"\"\n\n\nclass ImproperProviderException(BaseQueueManagerException):\n \"\"\"Raised when QueueManager retrieve wrong provider class.\"\"\"\n fmt = 'You cannot use class {class_name} with MessageQueueManager'\n","sub_path":"queue_manager/queue_manager/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"583930413","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import Post\nfrom .forms import PostForm\n\n# Create your views here.\n\n\ndef index(request):\n '''The home page for blog.'''\n return render(request, 'blog_app/index.html')\n\n\ndef posts(request):\n '''Show all posts.'''\n posts = Post.objects.order_by('date_added')\n context = {'posts': posts}\n return render(request, 'blog_app/posts.html', context)\n\n\n@login_required\ndef new_post(request):\n '''A page for adding post.'''\n if request.method != 'POST':\n # No data submitting; create a blank form\n form = PostForm()\n else:\n # POST data submitted; process data\n form = PostForm(request.POST)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.owner = request.user\n new_post.save()\n return HttpResponseRedirect(reverse('blog_app:posts'))\n\n context = {'form': form}\n return render(request, 'blog_app/new_post.html', context)\n\n\n@login_required\ndef edit_post(request, post_id):\n '''A page for editing a post.'''\n post = Post.objects.get(id=post_id)\n if post.owner != request.user:\n raise Http404\n\n if request.method != 'POST':\n # Initial request; pre-fill with the current entry\n form = PostForm(instance=post)\n else:\n # POST data submitted; process data\n form = PostForm(instance=post, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('blog_app:posts'))\n\n context = {'post': post, 'form': form}\n return render(request, 'blog_app/edit_post.html', context)\n","sub_path":"project1/venv/projects/blog/blog_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"439894908","text":"import os\nimport maindata as md\n\ndef display_window():\n \"\"\"显示用户界面\"\"\"\n print('+++++++++fei+++++++++')\n print(' 1.上线 ')\n print(' 2.下线 ')\n print(' 3.显示用户 ')\n print(' 4.发送信息 ')\n print(' 5.发送文件 ')\n print(' 6.下载文件 ')\n print('+++++++++++++++++++++')\n\ndef display_user_list():\n \"\"\"显示在线用户\"\"\"\n for i, user in enumerate(md.online_user_list):\n md.user_info_list.append(user)\n print(i, user)\n\ndef display_current_list():\n \"\"\"显示当前目录下的所有文件(不包括文件夹)\"\"\"\n file_list = os.listdir()\n temp_list = list()\n for e in file_list:\n if not os.path.isdir(e):\n temp_list.append(e)\n for i, e in enumerate(temp_list):\n md.current_file_list.append(e)\n print(\"{}: {}\".format(i,e))\n\ndef display_current_upload():\n \"\"\"显示当前正在上传的文件\"\"\"\n for e in md.upload_list:\n print(e)\n\ndef display_current_download():\n \"\"\"显示可下载的文件\"\"\"\n for i, e in enumerate(md.download_list):\n print('{}: {}'.format(i,e))\n","sub_path":"Display.py","file_name":"Display.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"629905209","text":"from __future__ import unicode_literals\nfrom numpy import array, argsort\nfrom fractions import gcd as _gcd\nfrom math import pi\nfrom pychemia.utils.periodic import atomic_symbols, electronegativity, atomic_number, covalent_radius\nfrom pychemia.utils.computing import deep_unicode\nfrom builtins import str\nfrom functools import reduce\nfrom collections import Mapping\n\n\nclass Composition(Mapping):\n u\"\"\"\n The class Composition is basically a dictionary with species as keys and\n number of atoms of that specie as values. The methods provided for Composition objects should\n not contain geometrical information or graph connectivity.\n\n The main purpose of this class is to be able to parse formulas into compositions and return\n string formulas sorted in various ways.\n \"\"\"\n\n def __init__(self, value=None):\n \"\"\"\n Creates a new composition, internally it is a dictionary\n where each specie is the key and the value is an integer\n with the number of atoms of that specie\n\n :param value: (str, dict) The value could be a string with a chemical formula or the actual dictionary\n of species and values\n\n :rtype: Composition\n\n Example:\n >>> import pychemia\n >>> comp = pychemia.Composition({'Ba': 2, 'Cu': 3, 'O': 7, 'Y': 1})\n >>> comp.formula\n u'Ba2Cu3O7Y'\n >>> comp = pychemia.Composition('Ba2Cu3O7Y')\n >>> comp2 = pychemia.Composition(comp)\n >>> len(comp2)\n 4\n >>> comp.nspecies\n 4\n >>> comp = pychemia.Composition()\n >>> comp.composition\n {}\n >>> len(comp)\n 0\n\n \"\"\"\n self._composition = {}\n if value is not None:\n value = deep_unicode(value)\n if isinstance(value, str):\n self._set_composition(self.formula_parser(value))\n elif isinstance(value, dict):\n self._set_composition(value)\n elif isinstance(value, Composition):\n self._set_composition(value.composition)\n elif hasattr(value, \"__len__\"):\n dvalue = {}\n for i in value:\n if i in dvalue:\n dvalue[i] += 1\n else:\n dvalue[i] = 1\n self._set_composition(dvalue)\n else:\n self._composition = {}\n\n def __len__(self):\n return len(self._composition)\n\n def __getitem__(self, item):\n if item in self._composition:\n return self._composition[item]\n else:\n return 0\n\n def __repr__(self):\n return 'Composition(' + str(self.composition) + ')'\n\n def __str__(self):\n ret = ''\n for i in self.species:\n ret += \" %3s: %4d \" % (i, self.composition[i])\n return ret\n\n def __iter__(self):\n return iter(self.composition)\n\n def __contains__(self, item):\n return item in self._composition\n\n def _set_composition(self, value):\n \"\"\"\n Checks the values of a dictionary before seting the actual composition\n\n :param value: (dict)\n :rtype: None\n \"\"\"\n for i in value:\n assert (i in atomic_symbols)\n assert (isinstance(value[i], int))\n self._composition = value.copy()\n\n @property\n def composition(self):\n \"\"\"\n :return: The composition dictionary\n\n :rtype: dict\n \"\"\"\n return self._composition\n\n @property\n def formula(self):\n \"\"\"\n :return: The chemical formula with atoms sorted alphabetically\n\n :rtype: str\n \"\"\"\n return self.sorted_formula(sortby='alpha', reduced=True)\n\n @property\n def gcd(self):\n \"\"\"\n The number of formulas that can be extracted from a composition\n The greatest common denominator for the composition.\n\n :rtype: (int)\n\n Example:\n >>> import pychemia\n >>> comp = pychemia.Composition('NaCl')\n >>> comp.gcd\n 1\n >>> comp = pychemia.Composition('Na2Cl2')\n >>> comp.gcd\n 2\n >>> comp = pychemia.Composition()\n >>> comp.gcd is None\n True\n\n \"\"\"\n if self.natom > 0:\n return reduce(_gcd, self.values)\n else:\n return None\n\n @property\n def symbols(self):\n ret = []\n for specie in self:\n number_atoms_specie = self.composition[specie]\n for i in range(number_atoms_specie):\n ret.append(specie)\n return sorted(deep_unicode(ret))\n\n @property\n def species(self):\n \"\"\"\n :return: The list of species\n\n :rtype: list\n \"\"\"\n return [deep_unicode(x) for x in self._composition]\n\n @property\n def nspecies(self):\n return len(self.species)\n\n @property\n def values(self):\n \"\"\"\n :return: The number of atoms of each specie\n\n :rtype: list\n \"\"\"\n return [self._composition[x] for x in self._composition]\n\n @property\n def natom(self):\n \"\"\"\n :return: The number of atoms in the composition\n\n :rtype: int\n \"\"\"\n return sum(self.values)\n\n @staticmethod\n def formula_parser(value):\n \"\"\"\n :return: Convert an string representing a chemical formula into a dictionary with the species as keys\n and values as the number of atoms of that specie\n\n :param value: (str) String representing a chemical formula\n\n :rtype: dict\n\n Examples:\n >>> import pychemia\n >>> import pprint\n >>> pychemia.Composition.formula_parser('Au20')\n {u'Au': 20}\n >>> ret = pychemia.Composition.formula_parser('UutUupUusUuo')\n >>> pprint.pprint(ret)\n {u'Uuo': 1, u'Uup': 1, u'Uus': 1, u'Uut': 1}\n\n \"\"\"\n ret = {}\n jump = False\n for i in range(len(value)):\n if jump > 0: # This char belongs to the current atom, move on\n jump -= 1\n elif value[i].isupper(): # Atom Name starts with Uppercase\n if i + 1 < len(value) and value[i + 1].islower(): # Atom name has more than 1 char\n if i + 2 < len(value) and value[i + 2].islower(): # Atom name has more than 2 chars\n specie = value[i:i + 3]\n jump = 2\n else:\n specie = value[i:i + 2]\n jump = 1\n else:\n specie = value[i]\n jump = 0\n j = 1\n number = ''\n while True:\n if i + jump + j < len(value) and value[i + jump + j].isdigit():\n number += value[i + jump + j]\n j += 1\n else:\n break\n if number == '':\n ret[specie] = 1\n else:\n ret[specie] = int(number)\n return ret\n\n @staticmethod\n def formula_to_list(formula, nunits=1):\n \"\"\"\n Reads a formula and returns a list of\n atomic symbols consistent with the formula\n and the number of formulas given by nunits\n\n :param formula: (str) Chemical formula as string\n\n :param nunits: (int) Number of formulas to apply\n\n :rtype : (list)\n\n Examples:\n >>> import pychemia\n >>> pychemia.Composition.formula_to_list('NaCl')\n [u'Na', u'Cl']\n >>> flist = pychemia.Composition.formula_to_list(u'Uut2Uup3Uus4Uuo5')\n >>> len(flist)\n 14\n >>> flist = pychemia.Composition.formula_to_list('Uut2Uup3Uus4Uuo5', nunits=2)\n >>> len(flist)\n 28\n\n \"\"\"\n import re\n\n # decompose composition\n a = re.findall(r\"[A-Z][a-z0-9]*\", formula)\n composition = []\n for i in a:\n m = re.match(r\"([A-Za-z]+)([0-9]*)\", i)\n if m.group(2) == \"\":\n n = int(1)\n else:\n n = int(m.group(2))\n\n for j in range(n * nunits):\n composition.append(m.group(1))\n\n return composition\n\n def sorted_formula(self, sortby='alpha', reduced=True):\n \"\"\"\n :return: The chemical formula. It could be sorted alphabetically using sortby='alpha', by electronegativity\n using sortby='electroneg' or using Hill System with sortby='Hill'\n\n :param sortby: (str) 'alpha' : Alphabetically\n 'electroneg' : Electronegativity\n 'hill' : Hill System\n\n :param reduced: (bool) If the formula should be normalized\n\n :rtype: str\n\n >>> comp=Composition('YBa2Cu3O7')\n >>> comp.sorted_formula()\n u'Ba2Cu3O7Y'\n >>> comp.sorted_formula(sortby='hill')\n u'Ba2Cu3O7Y'\n >>> comp.sorted_formula(sortby='electroneg')\n u'Ba2YCu3O7'\n >>> comp = Composition('H10C5')\n >>> comp.sorted_formula(sortby='hill', reduced=True)\n u'CH2'\n >>> comp = Composition('IBr')\n >>> comp.sorted_formula(sortby='hill', reduced=False)\n u'BrI'\n >>> comp = Composition('Cl4C')\n >>> comp.sorted_formula(sortby='hill', reduced=False)\n u'CCl4'\n >>> comp = Composition('IH3C')\n >>> comp.sorted_formula(sortby='hill', reduced=False)\n u'CH3I'\n >>> comp = Composition('BrH5C2')\n >>> comp.sorted_formula(sortby='hill', reduced=False)\n u'C2H5Br'\n >>> comp = Composition('S04H2')\n >>> comp.sorted_formula(sortby='hill', reduced=False)\n u'H2S4'\n >>> comp = Composition('SO4H2')\n >>> comp.sorted_formula(sortby='hill', reduced=False)\n u'H2O4S'\n\n \"\"\"\n if reduced and self.gcd > 1:\n comp = Composition(self.composition)\n for i in comp.composition:\n comp._composition[i] //= self.gcd\n else:\n comp = self\n if sortby == 'electroneg':\n electroneg = list(electronegativity(comp.species))\n for i in range(len(electroneg)):\n if electroneg[i] is None:\n electroneg[i] = -1\n sortedspecies = array(comp.species)[argsort(electroneg)]\n elif sortby == \"hill\": # FIXME: Hill system exceptions not implemented\n sortedspecies = []\n presortedspecies = sorted(comp.species)\n if 'C' in presortedspecies:\n sortedspecies.append('C')\n presortedspecies.pop(presortedspecies.index('C'))\n if 'H' in presortedspecies:\n sortedspecies.append('H')\n presortedspecies.pop(presortedspecies.index('H'))\n sortedspecies += presortedspecies\n else:\n sortedspecies = sorted(comp.species)\n ret = u''\n for specie in sortedspecies:\n ret += '%s' % specie\n if comp.composition[specie] > 1:\n ret += \"%d\" % comp.composition[specie]\n return deep_unicode(ret)\n\n def species_encoded(self, base):\n ret = 0\n i = 0\n for atom_number in sorted(atomic_number(self.species)):\n ret += atom_number * (base ** i)\n i += 1\n return ret\n\n def species_hex(self):\n \"\"\"\n Encodes the species into a hexadecimal representation where\n each specie is stored on a 2-Byte slot ordered by atomic\n number.\n This is a 'confortable' encoding where each 2 characters\n from the hexadecimal will encode a single species and the\n species are ordered by atomic number making the codification\n unique.\n\n :return: str\n\n Example:\n >>> comp = Composition('YBa2Cu3O7')\n >>> comp.species_hex()\n '0x38271d08'\n\n \"\"\"\n enc = self.species_encoded(256)\n return hex(enc)\n\n @staticmethod\n def get_species_from_hex(arg):\n \"\"\"\n Return a set of species from the encoded species hexadecimal\n representation.\n\n :param arg: str String with hexadecimal representation of list of species.\n :return:\n\n Example:\n >>> Composition.get_species_from_hex('0x38271d08')\n [8, 29, 39, 56]\n\n \"\"\"\n num = int(arg, 16)\n ret = []\n while num > 0:\n ret.append(num % 256)\n num = (num-ret[-1])//256\n return ret\n\n def covalent_volume(self, packing='cubes'):\n \"\"\"\n Returns the volume occupied by a given formula\n assuming a 'cubes' packing or 'spheres' packing\n\n :param packing: (str) The kind of packing could be 'cubes' or 'spheres'\n\n :rtype : (float)\n\n >>> import pychemia\n >>> comp=pychemia.Composition('C5H10')\n >>> comp.covalent_volume()\n 19.942320000000002\n >>> comp.covalent_volume(packing='spheres')\n 10.441774334589468\n\n \"\"\"\n if packing == 'cubes':\n factor = 8\n elif packing == 'spheres':\n factor = 4 * pi / 3.0\n else:\n raise ValueError('Non-valid packing value ', packing)\n\n # find volume of unit cell by adding cubes\n volume = 0.0\n for specie in self:\n number_atoms_specie = self.composition[specie]\n # Pack each atom in a cube (2*r)^3\n volume += factor * number_atoms_specie * covalent_radius(specie) ** 3\n return volume\n","sub_path":"pychemia/core/composition.py","file_name":"composition.py","file_ext":"py","file_size_in_byte":13441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"463550533","text":"#!/usr/bin/env python3\n\"\"\"Parameter utilities for general modular graphs.\"\"\"\n# -*- coding: utf-8 -*-\n# @author: Jean-Gabriel Young \nimport numpy as np\n\n\nensemble_types = ['simple_undirected', 'simple_directed',\n 'undirected', 'directed']\n\n\ndef get_m_max(n, ensemble=\"simple_undirected\"):\n \"\"\"Get maximal edge counts between block pairs, for block sizes n.\"\"\"\n q = len(n) # number of blocks\n m_max = np.zeros((q, q))\n for i in range(q):\n if ensemble == \"simple_undirected\":\n m_max[i, i] = n[i] * (n[i] - 1) / 2\n elif ensemble == \"simple_directed\":\n m_max[i, i] = n[i] * (n[i] - 1)\n elif ensemble == \"undirected\":\n m_max[i, i] = n[i] * (n[i] + 1) / 2\n else: # ensemble == \"directed\":\n m_max[i, i] = n[i] ** 2\n for j in range(i + 1, q):\n m_max[i, j] = n[i] * n[j]\n m_max[j, i] = n[i] * n[j]\n return m_max.astype(int)\n\n\ndef get_beta(w, n, ensemble='simple_undirected'):\n \"\"\"Get the value of beta for the indicator matrix W and block sizes n.\"\"\"\n m_max = get_m_max(n, ensemble)\n if ensemble == \"simple_undirected\" or ensemble == \"undirected\":\n normalization = np.sum(np.triu(m_max))\n return np.sum(np.triu(m_max * w / normalization))\n elif ensemble == \"simple_directed\" or ensemble == \"directed\":\n normalization = np.sum(m_max)\n return np.sum(m_max * w / normalization)\n\n\ndef get_p(w, p_out, p_in):\n \"\"\"Construct probability matrix from indicator matrix and densities.\"\"\"\n return w * (p_in - p_out) + np.ones_like(w) * p_out\n\n\ndef to_probability_space(rho, delta, beta):\n \"\"\"Map parameters from the density space to the probability space.\n Return\n ------\n (p_out, p_in) : tuple of float\n Internal and external densities.\n \"\"\"\n return (rho - delta * beta, rho + delta * (1 - beta))\n\n\ndef to_density_space(p_out, p_in, beta):\n \"\"\"Map parameters from the probability space to the density space.\n Return\n ------\n (rho, delta) : tuple of float\n Density space coordinates.\n \"\"\"\n return ((beta * p_in + (1 - beta) * p_out), p_in - p_out)\n\n\ndef in_allowed_region(rho, delta, beta):\n \"\"\"Check whether a (rho,Delta) coordinate is in the allowed region.\"\"\"\n if delta < 0: # map to upper region under the rotation symmetry\n rho = 1 - rho\n if rho <= beta:\n return rho / beta >= abs(delta)\n else:\n return -rho / (1 - beta) + 1 / (1 - beta) >= abs(delta)\n\n\ndef get_delta_limits(beta, rho):\n \"\"\"Get extremal values of Delta for fixed beta and rho.\"\"\"\n lims = [0, 0] # list since pairs are immutable\n if rho < (1 - beta):\n lims[0] = -rho / (1 - beta)\n else:\n lims[0] = rho / beta - 1 / beta\n if rho < beta:\n lims[1] = rho / beta\n else:\n lims[1] = -rho / (1 - beta) + 1 / (1 - beta)\n return (lims[0], lims[1])\n\n\ndef get_rho_limits(beta, delta):\n \"\"\"Get extremal values of rho for fixed beta and delta.\"\"\"\n if delta < 0:\n # return (-rho / (1 - beta), rho / beta - 1 / beta)\n return (-(1 - beta) * delta, beta * delta + 1)\n else: # delta >0\n return (beta * delta, 1 - (1 - beta) * delta)\n\n\ndef uniform_cover_generator(beta, rho_spacing=0.05, delta_spacing=0.05, ensemble='simple_undirected'):\n \"\"\"\n Generate a list of parameters covering the allowed region uniformly.\n Notes\n -----\n Return values in the density space.\n \"\"\"\n for rho in np.arange(0, 1 + rho_spacing, rho_spacing):\n for Delta in np.arange(-1, 1 + delta_spacing, delta_spacing):\n if in_allowed_region(rho, Delta, beta):\n yield (rho, Delta)\n\n\ndef phase_transition_generator(delta_list, rho, beta):\n \"\"\"\n Generate (p_out, p_in) pairs for a list of delta values at fixed rho.\n The GMG will undergo a detectability phase transition as delta nears 0.\n \"\"\"\n for delta in delta_list:\n yield to_probability_space(rho, delta, beta)","sub_path":"geometrySBM.py","file_name":"geometrySBM.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"297586314","text":"# coding=utf-8\n\"\"\"Calling DjangoFloor signals\n===========================\n\nPublic functions\n****************\n\nDefine the `df_call` function for calling signals and its shortcut `call`.\nCan activate the test mode, allowing to retain signal calls (simplifying tests).\nActivate this mode with `set_test_mode(True)` and fetch called signals with `pop_called_signals()`.\n\n\nInternal functions\n******************\n\nDefine several Celery tasks, get signals encoders/decoders (JSON by default) and a function for automatically discover\nall signals.\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import\nimport logging\nfrom django.utils.module_loading import import_string\nfrom django.conf import settings\nfrom django.http import HttpRequest\nfrom djangofloor.utils import import_module\nfrom django.utils.lru_cache import lru_cache\nfrom djangofloor.decorators import REGISTERED_SIGNALS, RedisCallWrapper, SignalRequest\n\nimport celery\n# used to avoid strange import bug with Python 3.2/3.3\n# noinspection PyStatementEffect\ncelery.__file__\nfrom celery import shared_task\n\n__author__ = 'Matthieu Gallet'\nlogger = logging.getLogger('djangofloor.signals')\n\nUSER = 'users'\nSESSION = 'sessions'\nBROADCAST = 'broadcast'\nWINDOW = 'window'\n# special value used for plain HTTP requests\nRETURN = 'return'\n\n__internal_state = {'accumulate': False, 'called_signals': []}\n\n\ndef set_test_mode(test=True):\n \"\"\" Activate (or deactivate) test mode, allowing to gather all signals calls (instead of actually calling them)\n :param test:\n :type test: :class:`bool`\n \"\"\"\n __internal_state['accumulate'] = test\n\n\ndef pop_called_signals():\n \"\"\" return the list of called signals with their requests and arguments when `test_mode` is set to `True`.\n\n :return: list of `(signal_name, request, sharing, kwargs)`\n :rtype: :class:`list`\n \"\"\"\n values = __internal_state['called_signals']\n __internal_state['called_signals'] = []\n return values\n\n\n@shared_task(serializer='json')\ndef signal_task(signal_name, request_dict, from_client, kwargs):\n \"\"\"Unique Celery tasks, transparently called for delayed signal calls.\n\n You should not have to use it.\n\n :type signal_name: :class:`str`\n :param request_dict: a :class:`djangofloor.decorators.SignalRequest` serialized as a :class:`dict` object\n :type request_dict: :class:`dict`\n :type from_client: :class:`bool`\n :type kwargs: :class:`dict`\n \"\"\"\n import_signals()\n request = SignalRequest(**request_dict)\n logger.debug('delayed signal %s called' % signal_name)\n if signal_name not in REGISTERED_SIGNALS:\n return\n # noinspection PyBroadException\n try:\n for wrapper in REGISTERED_SIGNALS[signal_name]:\n if not isinstance(wrapper, RedisCallWrapper) or not wrapper.delayed:\n continue\n if (from_client and not wrapper.allow_from_client) or (wrapper.auth_required and not request.session_key):\n continue\n prepared_kwargs = wrapper.prepare_kwargs(kwargs)\n wrapper.function(request, **prepared_kwargs)\n except Exception as e:\n logger.error('Exception encountered in signal %s' % signal_name, exc_info=1)\n raise e\n\n\n@shared_task(serializer='json')\ndef delayed_task(signal_name, request_dict, sharing, from_client, kwargs):\n \"\"\"\n :param signal_name:\n :param request_dict:\n :param sharing:\n :param from_client:\n :param kwargs:\n :return:\n \"\"\"\n import_signals()\n request = SignalRequest(**request_dict)\n df_call(signal_name, request, sharing=sharing, from_client=from_client, kwargs=kwargs)\n\n\n@lru_cache()\ndef import_signals():\n \"\"\"Import all `signals.py` files to register signals.\n \"\"\"\n for app in settings.INSTALLED_APPS:\n try:\n import_module('%s.signals' % app)\n except ImportError:\n pass\n\n\n@lru_cache()\ndef get_signal_encoder():\n \"\"\" return the class for encoding signal data to JSON. The result is cached.\n\n Only import `settings.FLOOR_SIGNAL_ENCODER` and cache the results.\n \"\"\"\n return import_string(settings.FLOOR_SIGNAL_ENCODER)\n\n\n@lru_cache()\ndef get_signal_decoder():\n \"\"\" return the class for decoding signal data to JSON. The result is cached.\n\n Only import `settings.FLOOR_SIGNAL_DECODER` and cache the results.\n \"\"\"\n return import_string(settings.FLOOR_SIGNAL_DECODER)\n\n\ndef call(signal_name, request, sharing=None, **kwargs):\n \"\"\" Call a signal and all the three kinds of receivers can receive it:\n * standard Python receivers\n * Python receivers through Celery (thanks to the `delayed` argument)\n * JavaScript receivers (through websockets)\n\n\n This is a shortcut for `djangofloor.tasks.df_call` but that forbids several signal argument names (`signal_name`,\n `request` and `sharing`. Directly use `djangofloor.tasks.df_call` if you want to use any of the argument names,\n or if you want to specify more options (like wait some time before executing code).\n Example:\n\n .. code-block:: python\n\n from djangofloor.tasks import call, SESSION\n from djangofloor.decorators import connect\n\n def any_function(request):\n call('myproject.signal_name', request, sharing=SESSION, arg1=\"arg1\", arg2=42)\n\n @connect('myproject.signal_name')\n def signal_name(request, arg1, arg2):\n print(arg1, arg2)\n\n\n :param signal_name:\n :type signal_name: :class:`str`\n :param request: initial request, giving information about HTTP sessions and its user\n :type request: :class:`djangofloor.decorators.SignalRequest` or :class:`django.http.HttpRequest`\n :param sharing:\n * `None`: does not propagate to the JavaScript (client) side\n * `WINDOW`: only to the browser window that initiated the original request\n * `USER`, `SESSION`, `BROADCAST`: propagate to the request user, only to its current session, or\n to all currently logged-in users\n * {'users': ['username1', 'username2'], 'groups': ['group1', 'group2'], 'broadcast': True}\n (or any subset of these keys)\n * `RETURN` return result values of signal calls to the caller\n\n :param kwargs: arguments for the receiver\n \"\"\"\n return df_call(signal_name, request, sharing=sharing, from_client=False, kwargs=kwargs)\n\n\ndef df_call(signal_name, request, sharing=None, from_client=False, kwargs=None, countdown=None, expires=None, eta=None):\n \"\"\" Call a signal and all the three kinds of receivers can receive it:\n * standard Python receivers\n * Python receivers through Celery (thanks to the `delayed` argument)\n * JavaScript receivers (through websockets)\n\n Do not use it directly, you should prefer use the `call` function.\n\n :param signal_name:\n :type signal_name: :class:`str`\n :param request: initial request, giving information about HTTP sessions and its user\n :type request: :class:`djangofloor.decorators.SignalRequest` or :class:`django.http.HttpRequest`\n :param sharing:\n * `None`: does not propagate to the JavaScript (client) side\n * `WINDOW`: only to the browser window that initiated the original request\n * `USER`, `SESSION`, `BROADCAST`: propagate to the request user, only to its current session,\n or to all currently logged-in users\n * {'users': ['username1', 'username2'], 'groups': ['group1', 'group2'], 'broadcast': True}\n (or any subset of these keys)\n * `RETURN` return result values of signal calls to the caller\n\n :param from_client: True if this call comes a JS client\n :param kwargs: arguments for the receiver\n :param countdown: Wait `countdown` seconds before actually calling the signal.\n Check `Celery doc `_\n :type countdown: :class:`int`\n :param eta: Wait until `eta` before actually calling the signal.\n Check `Celery doc `_\n :type eta: :class:`datetime.datetime`\n :param expires: Wait until `eta` before actually calling the signal.\n Check `Celery doc `_\n :type expires: :class:`datetime.datetime` or :class:`int`\n :return:\n if sharing != `RETURN`: return `None`\n else: call `djangofloor.tasks.df_call` on each element of the call result\n \"\"\"\n import_signals()\n if kwargs is None:\n kwargs = {}\n if __internal_state['accumulate']: # test mode activated\n __internal_state['called_signals'].append((signal_name, request, sharing, kwargs))\n return\n celery_kwargs = {}\n if expires:\n celery_kwargs['expires'] = expires\n if eta:\n celery_kwargs['eta'] = eta\n if countdown:\n celery_kwargs['countdown'] = countdown\n if celery_kwargs:\n delayed_task.apply_async([signal_name, request.to_dict(), sharing, from_client, kwargs], **celery_kwargs)\n return\n\n result = []\n if isinstance(request, HttpRequest):\n request = SignalRequest.from_request(request)\n if sharing is not None and sharing != RETURN:\n logger.debug('JS signal %s called' % signal_name)\n if settings.FLOOR_USE_WS4REDIS:\n from djangofloor.df_ws4redis import ws_call\n ws_call(signal_name, request, sharing, kwargs)\n else:\n from djangofloor.df_redis import push_signal_call\n push_signal_call(request, signal_name, kwargs=kwargs, sharing=sharing)\n\n must_delay = False\n synchronous = False\n for wrapper in REGISTERED_SIGNALS.get(signal_name, []):\n if (not wrapper.allow_from_client and from_client) or (wrapper.auth_required and not request.session_key):\n continue\n if wrapper.delayed:\n must_delay = True\n else:\n synchronous = True\n prepared_kwargs = wrapper.prepare_kwargs(kwargs)\n wrapper_result = wrapper.function(request, **prepared_kwargs)\n if wrapper_result:\n result += list(wrapper_result)\n if synchronous:\n logger.debug('synchronous signal %s called' % signal_name)\n if must_delay and settings.USE_CELERY:\n logger.debug('delayed signal %s called' % signal_name)\n signal_task.apply_async([signal_name, request.to_dict(), from_client, kwargs])\n if sharing == RETURN:\n return result\n for data in result:\n df_call(data['signal'], request, sharing=data.get('sharing', SESSION), from_client=False,\n kwargs=data['options'])\n","sub_path":"djangofloor/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":10639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"5218879","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ncomputer = pd.read_csv(\"D:/Data_Science/Data_Sci_Assignment/Multi Linear Regression/Computer_Data.csv\")\r\n\r\ncomputer= computer.drop([\"Unnamed: 0\"],axis=1)\r\n\r\ncomputer.speed.value_counts()\r\ncomputer.hd.value_counts()\r\ncomputer.ram.value_counts()\r\ncomputer.screen.value_counts()\r\ncomputer.cd.value_counts()\r\ncomputer.multi.value_counts()\r\ncomputer.premium.value_counts()\r\ncomputer.ads.value_counts()\r\ncomputer.trend.value_counts()\r\n## in computer data set ram, cd, screen, multi, premium are categorical data. So converting them into factor\r\n\r\nimport seaborn as sn\r\nsn.pairplot(computer)\r\n\r\ncorr_values = computer.corr()\r\n\r\n\r\n##The correlation between ads--trend is greater than price--ads and price--trend.\r\n## But the scatter plot shows that it has somewhat curvilinear shape \r\n\r\n## The same way, speed-- trend has higher correlation value than price--speed and price --trend\r\n##But the scatter plot doesnt show any linearity problem between speed and trend \r\n\r\n## The correlation betweeen hd--ads is higher than price--hd and lower than price--ads. \r\n## The scatter plot shows kind off linearity with high scatter of data.\r\n\r\n## The correlation between hd--trend is higher than price--hd and also price--trend.\r\n##The scatter plot shows kind off linearity, with high scatter of data.\r\n\r\n## Knowing this let us build the model\r\n\r\ncomputer_ram= pd.get_dummies(computer[\"ram\"])\r\ncomputer_cd=pd.get_dummies(computer[\"cd\"])\r\ncomputer_screen = pd.get_dummies(computer[\"screen\"])\r\ncomputer_premium = pd.get_dummies(computer[\"premium\"])\r\ncomputer_multi = pd.get_dummies(computer[\"multi\"])\r\n\r\ncomputer_ram.columns=[\"two\",\"four\",\"eight\",\"sixteen\",\"twentyfour\",\"thirtytwo\"]\r\ncomputer_screen.columns=[\"fourteen\",\"fifteen\",\"seventeen\"]\r\ncomputer_multi.columns = [\"no_multi\",\"yes_multi\"]\r\ncomputer_cd.columns=[\"no_cd\",\"yes_cd\"]\r\ncomputer_premium.columns=[\"no_pre\",\"yes_pre\"]\r\n\r\ncomputer_final= pd.concat([computer,computer_cd,computer_ram,computer_multi,computer_premium,computer_screen],axis=1)\r\n\r\ncomputer_final=computer_final.drop([\"ram\"],axis=1)\r\ncomputer_final=computer_final.drop([\"cd\"],axis=1)\r\ncomputer_final=computer_final.drop([\"screen\"],axis=1)\r\ncomputer_final= computer_final.drop([\"multi\"],axis=1)\r\ncomputer_final= computer_final.drop([\"premium\"],axis=1)\r\n\r\n## splitting data into test data and train data\r\nfrom sklearn.model_selection import train_test_split\r\ntrain_data,test_data = train_test_split(computer_final)\r\nimport statsmodels.formula.api as smf\r\n\r\n#model1\r\nm1= smf.ols(\"price~speed+hd+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+ads+trend\", data= train_data).fit()\r\nm1.summary() ## 0.792(r squared)\r\n\r\n## all the variables are significant. So, there is no collinearity problem.\r\n\r\n\r\n##transforming the model m1 for better r squared value\r\n\r\n##transform 1\r\n## applying sqrt to speed\r\nm1_fin= smf.ols(\"price~np.sqrt(speed)+hd+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+ads+trend\", data= train_data).fit()\r\nm1_fin.summary()## 0.798(r squared)\r\n\r\n##transform2\r\n## applying sqrt to speed, hd and log to price\r\nm2_fin = smf.ols(\"np.log(price)~np.sqrt(speed)+hd+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+ads+trend\",data= train_data).fit()\r\nm2_fin.summary() ## 0.811( r squared)\r\n\r\n## transform3\r\n## applying sqrt to speed,hd and log to price\r\nm3_fin = smf.ols(\"np.log(price)~np.sqrt(speed)+np.sqrt(hd)+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+ads+trend\",data = train_data).fit()\r\nm3_fin.summary() ##0.819 (r squared)\r\n\r\n## transform4\r\n## Applying quadratic to speed and hd, sqrt to speed,hd,ads and log to output variable price\r\nm4_fin = smf.ols(\"np.log(price)~np.sqrt(speed)+speed*speed+np.sqrt(hd)+(hd*hd)+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+np.sqrt(ads)+trend\",data = train_data).fit()\r\nm4_fin.summary() ##0.827 (r squared)\r\n\r\n##tranform5\r\n## Applying quadratic to speed,hd. Log to hd, sqrt to speed,ads and log to output variable.\r\nm5_fin = smf.ols(\"np.log(price)~np.sqrt(speed)+(speed*speed)+np.log(hd)+(hd*hd)+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+np.sqrt(ads)+trend\",data = train_data).fit()\r\nm5_fin.summary() ## 0.830 ( r squared)\r\n\r\n## taking transform1 and predicting the values and train,test rmse\r\nm1_fin= smf.ols(\"price~np.sqrt(speed)+hd+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+ads+trend\", data= train_data).fit()\r\nm1_fin.summary()## 0.798\r\n\r\n##train prediction\r\npred = m1_fin.predict(train_data)\r\n\r\n##train residuals\r\nm1_finres = train_data[\"price\"]-pred\r\n\r\n##train rmse\r\nm1_finrmse = np.sqrt(np.mean(m1_finres*m1_finres)) ## 261\r\n\r\n##test prediction\r\ntest_pre = m1_fin.predict(test_data)\r\n\r\n## test residuals\r\nm1_fintestres = test_data[\"price\"]-test_pre\r\n\r\n##test rmse\r\nm1_fintestrmse= np.sqrt(np.mean(m1_fintestres*m1_fintestres))## 271 (4)\r\n\r\n## taking transform2 and predicting the values and train, test rmse\r\nm2_fin = smf.ols(\"np.log(price)~np.sqrt(speed)+hd+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+ads+trend\",data= train_data).fit()\r\nm2_fin.summary() ##0.811\r\n\r\n##train prediction\r\npred2= m2_fin.predict(train_data)\r\npred21 = np.exp(pred2)\r\n\r\n##train residuals\r\nm2_finres = train_data[\"price\"]- pred21\r\n\r\n##train rmse\r\nm2_finrmse = np.sqrt(np.mean(m2_finres*m2_finres)) ##253\r\n\r\n## test prediction\r\nm2_fintestpred1 = m2_fin.predict(test_data)\r\nm2_fintestpred11= np.exp(m2_fintestpred1)\r\n##test residuals\r\nm2_fintestres = test_data[\"price\"]- m2_fintestpred11\r\n## test rmse\r\nm2_fintestrmse = np.sqrt(np.mean(m2_fintestres*m2_fintestres))## 248 (5)\r\n\r\n\r\n##taking transform3 and predicting the values and train, test rmse\r\nm3_fin = smf.ols(\"np.log(price)~np.sqrt(speed)+np.sqrt(hd)+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+ads+trend\",data = train_data).fit()\r\nm3_fin.summary() ##0.814\r\n\r\n##train prediction\r\nm3_finpred = m3_fin.predict(train_data)\r\nm3_finpred1 = np.exp(m3_finpred)\r\n\r\n##train residuals\r\nm3_finres = train_data[\"price\"]-m3_finpred1\r\n\r\n##train rmse\r\nm3_finrmse = np.sqrt(np.mean(m3_finres*m3_finres)) ##261\r\n\r\n## test prediction\r\nm3_fintestpred = m3_fin.predict(test_data)\r\nm3_fintestpred1 = np.exp(m3_fintestpred)\r\n\r\n##test residuals\r\nm3_fintestres= test_data[\"price\"]-m3_fintestpred1\r\n\r\n##test rmse\r\nm3_fintestrmse = np.sqrt(np.mean(m3_fintestres*m3_fintestres)) ##241 (4)\r\n\r\n##taking transform4 and predicting the values and train, test rmse\r\nm4_fin = smf.ols(\"np.log(price)~np.sqrt(speed)+speed*speed+np.sqrt(hd)+(hd*hd)+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+np.sqrt(ads)+trend\",data = train_data).fit()\r\nm4_fin.summary() ##0.827 (r squared)\r\n\r\n##train prediction \r\nm4_finpred = m4_fin.predict(train_data)\r\nm4_finpred1 = np.exp(m4_finpred)\r\n\r\n##train residuals\r\nm4_finres= train_data[\"price\"]-m4_finpred1 \r\n\r\n## train rmse\r\nm4_finrmse = np.sqrt(np.mean(m4_finres*m4_finres)) ## 235\r\n\r\n##test prediction\r\nm4_finptestpred = m4_fin.predict(test_data)\r\nm4_fintestpred1= np.exp(m4_finptestpred)\r\n## test residuals\r\nm4_fintestres = test_data[\"price\"]- m4_fintestpred1\r\n##test rmse\r\nm4_fintestrmse = np.sqrt(np.mean(m4_fintestres*m4_fintestres)) ## 233 (7)\r\n\r\n\r\n## taking transform5 and predicting the values and train, test rmse\r\n\r\nm5_fin = smf.ols(\"np.log(price)~np.sqrt(speed)+(speed*speed)+np.log(hd)+(hd*hd)+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+np.sqrt(ads)+trend\",data = train_data).fit()\r\nm5_fin.summary() ## 0.823(r squared)\r\n\r\n## train predicition\r\nm5_finpred = m5_fin.predict(train_data)\r\nm5_finpred1 = np.exp(m5_finpred)\r\n\r\n##train residuals\r\nm5_finres = train_data[\"price\"]- m5_finpred1\r\n\r\n##train rmse\r\nm5_finrmse = np.sqrt(np.mean(m5_finres*m5_finres))## 238\r\n\r\n## test prediction\r\nm5_fintestpred = m5_fin.predict(test_data)\r\nm5_fintestpred1 = np.exp(m5_fintestpred)\r\n## test residuals\r\nm5_finres = test_data[\"price\"]-m5_fintestpred1\r\n## test rmse\r\nm5_finrmse = np.sqrt(np.mean(m5_finres*m5_finres)) ##231 (7)\r\n\r\n\r\n##Looking at all the r square and rmse values we decide the final model.\r\n## The selected model has r square of 0.814 and train rmse is 245 and test rmse is 241. So, the final model is:\r\n\r\nfin = smf.ols(\"np.log(price)~np.sqrt(speed)+np.sqrt(hd)+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+ads+trend\",data = train_data).fit()\r\nfin.summary() ##0.814\r\n\r\n## Training the model with the whole data\r\nfinal = smf.ols(\"np.log(price)~np.sqrt(speed)+np.sqrt(hd)+two+four+eight+sixteen+twentyfour+thirtytwo+fourteen+fifteen+seventeen+no_cd+yes_cd+no_multi+yes_multi+no_pre+yes_pre+ads+trend\",data = computer_final).fit()\r\nfinal.summary() ## 0.814 ( r square)\r\n\r\nfinal_pred= final.predict(computer_final)\r\n\r\n##validating\r\n###Linearity\r\nplt.scatter(computer_final[\"price\"],final_pred,c='r');plt.xlabel(\"Actual values\"); plt.ylabel(\"Fitted values\")\r\n## it is linear\r\n\r\n## Homoscadasticity\r\nplt.scatter(final_pred,final.resid_pearson,c='r'),plt.axhline(y=0,color=\"blue\");plt.xlabel(\"Fitted values\");plt.ylabel(\"Residuals\")\r\n## Little Homoscadasticity\r\n\r\n##Normality\r\n## Plotting the historgram to see if the errors are normally distributed or not\r\n\r\nplt.hist(final.resid_pearson)\r\n## Because of the outliers present in data,it is showing the errors are not mormally distributed\r\n\r\n##Plotting QQplot to properly visualise if the errors are normally distributed or not\r\nimport pylab\r\nimport scipy.stats as sc\r\n\r\nsc.probplot(final.resid_pearson, dist=\"norm\", plot=pylab)\r\n## the data is normally distributed but there are outliers so the ends of the graphs are not on the same line\r\n\r\n\r\n","sub_path":"Computer_Data_amit.py","file_name":"Computer_Data_amit.py","file_ext":"py","file_size_in_byte":10239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"73690831","text":"\ncoun_tax = 100\nwater = 30\ng_and_e = 80\nloan = 200\nbb = 22\nrent = 575\npart_pay = 650\n\nprint(\"\\t\\t\\t Money Calculator\")\nprint('\\t\\t\\t~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\nprint(\"\\t\\t\\tProgrammed by: Grant Peach 2013\")\nprint(\"\\t\\t\\t~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\nprint('\\t\\t\\t U.K')\n\n\n#works out your left over money and add's partner's pay to the amount entered\ntotal = input(\"\\nPlease enter your monthly pay:\")\ntotal = int(total)\ntotal = total + part_pay\ntotal = total - coun_tax - water - g_and_e - loan - bb - rent\n\n\n#prints the total left over\nprint(\"\\nHere is what you will have left after all outgoings £\",total)\n\n\n#ask's if there is any more money that is not forseen\ndata_1 = input(\"\\nIs there any more that needs to be subtracted?\")\nif data_1 == \"yes\":\n data_2 = input(\"\\nOK, please enter the amount:\")\n data_2 = int(data_2)\n total = total - data_2\n print(\"\\nHere is your new updated total £\",total)\n input(\"\\nPress Enter to close the program\")\nelse:\n data_1 == \"no\"\n print('\\nBrilliant!')\n input(\"\\nOK, Please press Enter to close the program and have a nice day!\")\n\n\n\n","sub_path":"Finance Calculator.py","file_name":"Finance Calculator.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"553449037","text":"COMMANDS = [\"UPLOAD_FILE\", \"BLUP\", \"DOWNLOAD_FILE\",\n \"DELETE_FILE\", \"DELETE_DIR\", \"MAKE_DIR\",\n \"NEW_USER\", \"LOG_IN\", \"MOVE_FILE\", \"MOVE_DIR\",\n \"ASK_SHARE\", \"SHARE\", \"GET_USERS\", \"HI THERE CLOUD!\", \"COPY_FILE\",\n \"GET_REQUESTS\", \"RENAME_FILE\", \"DONT_SEND\", \"SEND_MY_FILES\",\n \"ASK_FILE\", \"DENY_ACCESS\", \"GET_ONLINE_USERS\"]\nGET_ONLINE_USERS = \"GET_ONLINE_USERS\"\nNO_USERS = 0\nPROJECT_FILES = \"\\\\Project Files\"\nCLOUDIO_PATH = r\"\\Project Files\\Icons & Applications\\GUI App\\Shortcut\\Cloudio.exe.lnk\"\nCLOUDIO_NAME = \"Cloudio\"\nPROJECT_FILES_CHECK = \"Project Files\"\nPATH_TO_STARTUP = \"\\\\Project Files\\\\Programs\\\\Startup.pyw\"\nDOCUMENTS_PATH = \"C:\\\\Users\\\\%s\\\\Documents\"\nPATH_TO_PROJECT_FILES = \"C:\\\\Users\\\\%s\\\\Documents\\\\Project Files\"\nNO_ONLINE = \"No one is online\"\nTRASH_DIRECTORY = \"Trash\"\nDENY_ACCESS = \"DENY_ACCESS\"\nRENAME_FILE = \"RENAME_FILE\"\nSEND_MY_FILES = \"SEND_MY_FILES\"\nASK_FOR_SHARE = \"ASK_SHARE\"\nASK_FOR_FILE = \"ASK_FILE\"\nDONT_SEND = \"DONT_SEND\"\nGET_REQUESTS = \"GET_REQUESTS\"\nCOPY_FILE = \"COPY_FILE\"\nASK_SOMEONE = \"Ask Someone For Their Files\"\nASK_FOR_FILES = \"Ask Other Users\"\nASK_BTN = \"Ask\"\nSHARE = \"SHARE\"\nFILE_SEPARATOR = \" |$|$ \"\nSTARTUP_DIRECTORY = r'C:\\Users\\%s\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup'\nGET_USERS = \"GET_USERS\"\nPERMISSION_DENIED = \"permission denied\"\nMAKE_DIR = \"MAKE_DIR\"\nMOVE_DIR = \"MOVE_DIR\"\nRE_UPLOAD_FILE = \"RE_UPLOAD_FILE\"\nNEW_USER = \"NEW_USER\"\nMOVE_FILE = \"MOVE_FILE\"\nLOG_IN = \"LOG_IN\"\nGET_OUT = \"GET_OUT\"\nUSER_OFFLINE = \"User Offline\"\nREQUEST_SEPARATOR = \" |||| \"\nSOMETHING_WENT_WRONG = \"something went wrong\"\nNO_USER = \"NO_USER\"\nINCORRECT_PASSWORD = \"password incorrect\"\nMESSAGE_SENT = \"request sent\"\nTHREE_PARAMETER = 3\nSHORT_SLEEP = 1\nERROR_FORMAT = \"error\"\nCHOOSE_FILE_TITLE = \"Choose A File\"\nCHOOSE_FILE_BTN = \"Choose\"\nFINE = \"logged in\"\nWAIT = \"WAIT\"\nSEPERATOR = \" $$$ \"\nSTARTER = 1\nGUI_INIT = -1\nGUI_TITLE = \"Tomer Cloud Inc.\"\nQUIT_BOTTON = \"Quit\"\nMENU = \"Menu\"\nSIGN_IN_PANEL_SIZE = (270, 225)\nSELECT_DIRECTORY = \"Folder Selection\"\nDELETE_FLAG_REG = \"delete_flag\"\nCHOOSE_YOUR_CLOUD_STATIC = \"Choose Your Cloud\"\nNO_PASSWORD = \"\"\nPYTHON = \"python\"\nBLANK = \"\"\nBROWSE_BTN = \"Browse...\"\nSUBMIT_BTN = \"Submit\"\nLONG_SLEEP = 3\nSYSTEM_REGISTER_PANEL_SIZE = (250, 225)\nINIT_CLOUD_GUI_SIZE = (300, 200)\nCHOOSE_USER_GUI_SIZE = (350, 250)\nNO_REQUEST_DB = 0\nSHARE_MODE = 0\nASK_FOR_SHARE_MODE = 1\nUSERNAME_STATIC = \"Username\"\nPASSWORD_STATIC = \"Password\"\nREGISTER_TITLE = \"Register\"\nHOME_PAGE_TITLE = \"Home\"\nHI_TITLE = \"Hi \"\nUSERNAME_DOESNT_EXIST = \"Username Doesn't Exist\"\nSHARE_BTN = \"Share\"\nSHARE_TITLE = \"Share\"\nCHOOSE_FILE_TO_SHARE = \"Choose A File To Share\"\nCHOOSE_USER_SHARE_STATIC = \"Choose A User To Share The File With\"\nCHOOSE_FILE_TO_ASK = \"Choose A File To Ask For\"\nCHOOSE_USER_SHARE_TITLE = \"Choose A User\"\nNEXT_BTN = \"Next...\"\nASK_FOR_SHARE_BTN = \"Ask For Share\"\nSIGN_UP_BTN = \"Sign Up\"\nSELECT = r\"Select - Sign Up \\ Log In\"\nLOG_IN_BTN = \"Log In\"\nUSERNAME_EXISTS = \"username exists already\"\nSTART = 0\nRENAME_FILE_MODE = 1\nNOT_RENAME_FILE_MODE = 0\nMID_GUI_MODE = 2\nDIR_ADDED = \"directory created successfully\"\nNO_DIR_ADDED = \"no dir added\"\nDELETE_FILE = \"DELETE_FILE\"\nDELETE_DIR = \"DELETE_DIR\"\nCLIENT_REG = r\"SOFTWARE\\WOW6432Node\\Cloud Server\\Client\"\nSERVER_REG = r\"SOFTWARE\\WOW6432Node\\Cloud Server\"\nOBSERVER = \"observer\"\nALLOW_OBS = \"Allow\"\nDENY_OBS = \"Deny\"\nALLOW_BLUP = \"Allow\"\nDENY_BLUP = \"Deny\"\nPATH_CREATED = \"path created!\"\nCONTINUE = \"CONTINUE\"\nFOLDER_MANAGER_REG = \"folder_manager\"\nYES_REG = \"yes\"\nNO_REG = \"no\"\nAPPLICATION_GUI_NAME = \"CloudIO.exe\"\nINFO_FILE = \"info.txt\"\nLOGIN = \"LOGIN\"\nAPP_MODE = '0'\nCLIENT_MODE = '1'\nSIGNUP = \"SIGNUP\"\nAPPINFO = \"\\\\AppInfo\"\nREQ_SOCK_COMMAND = \"HI THERE CLOUD!\"\nNO_CLOUD = \"\"\nDOT = '.'\nNO_COMMAND = \"BLUP\"\nYAY = \"YAY\"\nFILE_ALREADY_IN_CLOUD = \"file in cloud\"\nEND = -1\nMSG_LEN = 1024\nBYTE_NUMBER = 1024\nIP_COMMAND = \"IP\"\nCLIENT_CLOUD_COMMAND = \"CLIENT_CLOUD\"\nUSERNAME_COMMAND = \"USERNAME\"\nNO_ENTERS = 0\nPORT_COMMAND = \"PORT\"\nFILE_DOESNT_EXIST = \"file doesn't exist\"\nFILE_END = b'NO_MORE'\nFILE_SENT = 'file uploaded successfully !!!'\nFILE_SHARED = \"File shared!\"\nMSG_FILL = 4\nNO_PARAMETERS = 0\nONE_PARAMETER = 1\nTWO_PARAMETER = 2\nTHREE_PARAMETERS = 3\nPROPORTION = 0\nBORDER_LARGE = 15\nBORDER_SMALL = 7\nBYTE = 4\nUSERNAME_REG = \"username\"\nPASSWORD_REG = \"password\"\nCLOUD_REG = \"CLIENT_CLOUD\"\nPRM = 1\nPORT = 6969\nUSER_ADDED = \"new username added\"\nNOT_ADDED = \"username already exists\"\nSERVER_FELL = str(1)\nCLOUD_FORMAT = 'cloud'\nADDER = 1\nTEMPORARY_FILES = r\"E:\\12\\Project\\Temp\"\nUPLOAD_FILE = \"UPLOAD_FILE\"\nDOWNLOAD_FILE = \"DOWNLOAD_FILE\"\nNOT_IN_CLOUD = \"file doesn't exist in cloud\"\nREADY = \"ready\"\nQUIT_BTN = 'Quit'\nQUIT_APP = 'Quit Application'\nMENU_GUI = 'Menu&'\nNONE_VALID = \"you entered a non valid value\"\nREQUEST_EXISTS = \"You Have Sent This Request Already\"\nDB_COMMAND_USERS = \"select * from users\"\nDB_COMMAND_REQUESTS = \"select * from requests\"\nDELETE_COMMAND = \"delete from requests where id_to = %s\"\nNO_REQUESTS = \"NO_REQUESTS\"\nNOT_FOUND = 0\nTHIRD = 2\nDB_COMMAND_USERNAME = \"select username from users\"\nOK = \"ok\"\nBYE = \"bye\"\nSTART_COUNT = 1\nSECOND = 1\nFILE_DOWNLOADED = 'file is opening right now...'\nIP_REG = \"IP\"\nPORT_REG = \"port\"\nNO_FILES = \"No Files Were Found\"\nREG_CONST = 0\n","sub_path":"Classes/CONSTS.py","file_name":"CONSTS.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"422152160","text":"\"\"\"\nii_session\n\ndisplay all data from a session, including aligned behavior and all mean cell activity\n\"\"\"\n##\nfrom ii_params import *\n##\nfrom routines import first_n_of_4_trials_types, num2str\n\n## Params\n\nnormalize_each_slice = False\nonset_choice = 4 # phase idx or puff_onset / lick_onset / hall_onset / sig_onset[***]\ngroupby_choice = ['outcome'] # outcome, side, choice, direction, dummy\nsort_cells_by_grp = 0 #which group to sort cells by\nsort_method = 'max' # 'max' / 'argmax' / 'diff' / None\nnormalize_to = 'local' # refers to dff; local(=this analysis) / global(=whole signal)\nnormalize_behav = True # normalize the aligned behavior variables\nsignal = is_sig # dff / is_sig\npad0t,pad1t = (.2,1.5) # secs, note that this is a cross-script param\npad0i,pad1i = map(lambda x: int(np.round(x/data.Ts)), [pad0t,pad1t])\n\n# trial filters\ntri = trials\n#tri = tri[tri.dur==2.6]\n#tri = tri[tri.delay==0.2]\n#tri = first_n_of_4_trials_types(tri, n=9)\n\n## Define onsets/triggers\nif type(onset_choice)==int:\n # assumes a phase-triggered onset is desired\n onset = phases[phases.phase==onset_choice]\n onset.ix[:,'abs_onset'] = onset.start_time\n onset = populate_onsets(onset, tri)\n onset.ix[:,'frame'] = onset.apply(onset_to_frame, axis=1, args=(i2c,data.Ts))\nelse:\n onset = onset_choice\n\n# cut filtered trials\nonset = onset[onset.trial.isin(tri.idx.values)]\n\n## Apply grouping and do extractions\ngb = onset.groupby(groupby_choice)\nresults = [] # for each grp: (id, n, mean_dff, mean_mov)\n\nfor gr_id,g in gb:\n # ensure that all desired onsets in this group can get their desired pads\n g = g[(g.frame+pad1i < dff.shape[0]).values]\n\n # convert onset times to frame numbers\n abstime_idx = g.abs_onset.dropna().values\n frame_idx = g.frame.dropna().astype(int).values\n slice_idxs = [slice(on-pad0i, on+pad1i) for on in frame_idx]\n\n # dff extraction\n slices_dff = np.array([signal[si] for si in slice_idxs]).astype(float)\n if normalize_each_slice:\n slices_dff -= np.mean(slices_dff[:,:pad0i,:], axis=1)[:,None,:]\n agg = pf.Series(np.mean(slices_dff,axis=0), Ts=data.Ts)\n agg.t0 = -pad0t\n dff_dic = dict(slices=slices_dff, mean=agg)\n\n # motion extraction\n slices_motion = np.array([np.abs(motion[['x_local','y_local','metric']].iloc[si,:].values) for si in slice_idxs])\n agg_motion = pf.Series(np.mean(slices_motion,axis=0), Ts=data.Ts)\n agg_motion.t0 = -pad0t\n motion_dic = dict(slices=slices_motion, mean=agg_motion)\n\n # lick & puff & hall extraction\n slice_i = extract_slices_by_times(ar, abstime_idx, pad0t, pad1t)\n # lick\n slices_lick = np.array([lick_signal.iloc[sl].values for sl in slice_i])\n agg_lick = pf.Series(np.mean(slices_lick,axis=0), Ts=ar.Ts)\n agg_lick.t0 = -pad0t\n lick_dic = dict(slices=slices_lick, mean=agg_lick)\n # puff\n slices_puff = np.array([puff_signal.iloc[sl].values for sl in slice_i])\n agg_puff = pf.Series(np.mean(slices_puff,axis=0), Ts=ar.Ts)\n agg_puff.t0 =-pad0t\n puff_dic = dict(slices=slices_puff, mean=agg_puff)\n # hall\n if hall_signal is not None:\n slices_hall = np.array([hall_signal.iloc[sl].values for sl in slice_i])\n agg_hall = pf.Series(np.mean(slices_hall,axis=0), Ts=ar.Ts)\n agg_hall.t0 =-pad0t\n hall_dic = dict(slices=slices_hall, mean=agg_hall)\n else:\n hall_dic = {}\n\n # solenoid extraction\n slice_i = extract_slices_by_times(spout, abstime_idx, pad0t, pad1t)\n slices_spout = np.array([spout.iloc[sl].values for sl in slice_i])\n agg_spout = pf.Series(np.mean(slices_spout,axis=0), Ts=spout.Ts)\n agg_spout.t0 = -pad0t\n spout_dic = dict(slices=slices_spout, mean=agg_spout)\n\n # behavior movie extraction (trace of behavior defined as `b`)\n if b is not None:\n slice_b = extract_slices_by_times(b, abstime_idx, pad0t, pad1t)\n slices_mov = np.array([b[sl] for sl in slice_b])\n agg_mov = pf.Series(np.mean(slices_mov,axis=0), Ts=b.Ts)\n agg_mov.t0 = -pad0t\n mouth_dic = dict(slices=slices_mov, mean=agg_mov)\n else:\n mouth_dic = {}\n \n if not isinstance(gr_id, tuple): gr_id = (gr_id,)\n res_dic = dict( gr_id=gr_id, \n n=len(frame_idx), \n dff=dff_dic, \n motion=motion_dic, \n lick=lick_dic, \n spout=spout_dic, \n puff=puff_dic,\n hall=hall_dic,\n mouth=mouth_dic)\n results.append(res_dic)\n\n## Determine order of cells\nif sort_cells_by_grp is not None:\n if sort_method == 'max':\n trig = results[sort_cells_by_grp]['dff']['mean'].max(axis=0)\n elif sort_method == 'argmax':\n trig = np.argmax(results[sort_cells_by_grp]['dff']['mean'], axis=0)\n elif sort_method == 'diff':\n # assumes 1-0 for now\n trig = np.mean(results[1]['dff']['mean'] - results[0]['dff']['mean'], axis=0)\n elif sort_method == 'rdiff':\n # pearson-r based diff, assumes 1-0 for now\n trig = [np.corrcoef(r0,r1)[0,1] for r0,r1 in zip(results[0]['dff']['mean'].T,results[1]['dff']['mean'].T)]\n order = np.argsort(trig)\nelse:\n order = np.arange(dff.shape[1])\n\n## Plot results\npl.style.use(['normal','noticks'])\nif normalize_to == 'local':\n vmin,vmax = np.min([i['dff']['mean'].min() for i in results]),np.max([i['dff']['mean'].max() for i in results])\nelif normalize_to == 'global':\n vmin,vmax = signal.min(),signal.max()\n\nfig,axs = pl.subplots(7, len(gb), sharex=True, sharey='row', gridspec_kw=dict(height_ratios=[25,1,1,1,1,.5,.5],hspace=0.05,wspace=0.1,left=0.02,right=0.96, top=.97, bottom=.03), squeeze=False); axs=axs.T\nax_cbar = fig.add_axes([.97,0.3,.01,0.6])\n\nnorms = {}\nfor field in ['motion','puff','lick','hall','spout','mouth']:\n if normalize_behav and results[0][field]:\n minn = np.min([r[field]['mean'].min() for r in results])\n maxx = np.max([r[field]['mean'].max() for r in results])\n else:\n minn = 0\n maxx = 1\n norms[field] = dict(vmin=minn, vmax=maxx)\n\nfor r,ax in zip(results, axs):\n gr_id = r['gr_id']\n gr_len = r['n']\n gr_dff = r['dff']['mean']\n gr_motion = r['motion']['mean']\n gr_lick = r['lick']['mean']\n gr_puff = r['puff']['mean']\n gr_hall = r['hall'].get('mean', None)\n gr_spout = r['spout']['mean']\n gr_mouth = r['mouth'].get('mean',None)\n sl = r['dff']['slices']\n sl_puff = r['puff']['slices']\n sl_lick = r['lick']['slices']\n \n # calcium heatmap\n mesh = gr_dff.heat(order=order, ax=ax[0], vmin=vmin, vmax=vmax, **heat_kwargs)\n groupstr = ', '.join(['{}: {}'.format(g0,num2str(g1,g0)) for g0,g1 in zip(groupby_choice,gr_id)])\n ax[0].set_title('{} (n={})'.format(groupstr,gr_len), fontsize='small')\n # motion heatmap\n gr_motion.heat(ax=ax[1], labels=['x','y','c'], **heat_kwargs, **norms['motion'])\n # lick heatmap\n gr_lick.heat(ax=ax[2], labels=['L','R'], **heat_kwargs, **norms['lick'])\n # puff heatmap\n gr_puff.heat(ax=ax[3], labels=['L','R'], **heat_kwargs, **norms['puff'])\n # spout heatmap\n gr_spout.heat(ax=ax[4], labels=['L','R'], **heat_kwargs, **norms['spout'])\n # hall heatmap\n if gr_hall is not None:\n gr_hall.heat(ax=ax[5], labels=[''], **heat_kwargs, **norms['hall'])\n # behavior movie heatmap\n if gr_mouth is not None:\n gr_mouth.heat(ax=ax[6], labels=[''], **heat_kwargs, **norms['mouth'])\n\n for a in ax:\n a.vlines(0, a.get_ylim()[0], a.get_ylim()[1], colors='w', linestyles='--')\n ax[-1].tick_params(labelsize='small', axis='x', labelcolor='k')\n\nfor a,lab in zip(ax[1:],['motion','lick','puff','spout','hall','mov']):\n a.set_ylabel(lab, fontsize='xx-small', rotation=0, labelpad=15)\n a.yaxis.set_label_position('right')\n\ncbar = pl.colorbar(mesh, cax=ax_cbar, ticks=np.linspace(vmin, vmax, 3))\nax_cbar.yaxis.set_tick_params(colors='k', labelsize='xx-small')\nax_cbar.set_yticklabels(ax_cbar.get_yticklabels(), rotation=90)\npl.style.use(['default','normal'])\n\n##\nb = pl.isinteractive()\npl.ioff()\nwith open('/Users/ben/Desktop/fig.fig','wb') as f:\n pickle.dump(fig, f)\npl.interactive(b)\n\n##\n","sub_path":"analysis/c5/ii_session.py","file_name":"ii_session.py","file_ext":"py","file_size_in_byte":8124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"248399542","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 23 14:54:02 2015\n\n@author: af26\n\nTakes connectivity graphs for several years and combines them into a single \ngraph. Two edge weights are given: one is a sum of the weights in the \nyears, the other is the number of years when the connection exists.\n\"\"\"\n\nimport networkx as nx\n\nyears = range(1990,2001)\n\nH = nx.DiGraph()\n\nfor year in years:\n graph_file = ('C:/Users/af26/LarvalDispersalResults/' + \n 'polcoms' + str(year) + \n '/Run_1000_baseline/Networkdata/GraphCompose/' + \n 'graph_compose.graphml')\n infile = open(graph_file,'r') \n G = nx.read_graphml(infile)\n infile.close()\n\n for node in G.nodes():\n H.add_node(node)\n \n for u,v,data in G.edges_iter(data=True):\n w = data['weight']\n if H.has_edge(u,v):\n H[u][v]['weight'] += w\n H[u][v]['nyears'] += 1\n else:\n H.add_edge(u, v, weight=w, nyears = 1)\n \ngraph_file = ('C:/Users/af26/LarvalDispersalResults/' + \n 'polcoms1990to2000' +\n '/Run_1000_baseline/Networkdata/GraphCompose/' + \n 'graph_compose.graphml')\noutfile = open(graph_file,'w') \nnx.write_graphml(H, outfile)\noutfile.close()\n","sub_path":"graph_combine_years.py","file_name":"graph_combine_years.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"62520940","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport curses\nimport math\nimport rospy\nfrom flow_control_py.srv import *\n\ndef main(stdscr):\n rospy.init_node('flow_teleop', anonymous=True)\n rospy.wait_for_service('flow_command')\n flow_command = rospy.ServiceProxy('flow_command',FlowCommand)\n rate = rospy.Rate(10) \n keycode = -1\n command = 'Stop'\n stdscr.addstr(\"Flow commands\\n\")\n stdscr.addstr(\" - UP : start flow control\\n\")\n stdscr.addstr(\" - LEFT/RIGHT : control angular z\\n\")\n stdscr.addstr(\" - any key : stop flow control\\n\")\n stdscr.addstr(\" - ESC : stop flow control and exit\\n\")\n while (not rospy.is_shutdown()) and (keycode != 27): \n keycode = stdscr.getch() \n if keycode == curses.KEY_UP : command = 'Go'\n elif keycode == curses.KEY_LEFT : command = 'Left'\n elif keycode == curses.KEY_RIGHT : command = 'Right'\n else : command = 'Stop'\n flow_command(command)\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n curses.wrapper(main)\n except rospy.ROSInterruptException:\n pass\n","sub_path":"flow_control_py/scripts/flow_teleop.py","file_name":"flow_teleop.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"181055616","text":"from flask import jsonify\nfrom flask_restful import Resource, reqparse, abort\n\nfrom database.data import db_session\nfrom database.data import __all_models as models\n\n\nparser = reqparse.RequestParser()\nparser.add_argument(\"id\")\nparser.add_argument(\"access_code\")\n\n\ndef abort_if_lesson_not_found(lesson_id):\n db_sess = db_session.create_session()\n lesson = db_sess.query(models.Lesson).get(lesson_id)\n if not lesson:\n abort(404, message=f\"Lesson {lesson_id} not found\")\n\n\ndef check_permission(access_code):\n if access_code != \"G1Yuhoqfe3TAYIf6y73f\":\n abort(304, message=f\"not enough permissions\")\n\n\nclass LessonsResource(Resource):\n def get(self, lesson_id):\n args = parser.parse_args()\n check_permission(args[\"access_code\"])\n abort_if_lesson_not_found(lesson_id)\n \n db_sess = db_session.create_session()\n lesson = db_sess.query(models.Lesson).get(lesson_id)\n return jsonify({\"lesson\": lesson.to_dict()})\n\n\nclass LessonsListResource(Resource):\n def get(self):\n args = parser.parse_args()\n check_permission(args[\"access_code\"])\n \n db_sess = db_session.create_session()\n lessons = db_sess.query(models.Lesson).all()\n return jsonify({\"lessons\": [item.to_dict() for item in lessons]})\n","sub_path":"api/resources/lesson.py","file_name":"lesson.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"470933012","text":"__author__ = 'ceremcem'\n\nimport time\nfrom barrier import Barrier\nfrom gevent import sleep, spawn\nfrom gevent.queue import Queue\n\nclass SamplingQueue(object):\n \"\"\"\n FIFO queue with a sampling interval\n\n if an item is put this queue at a higher frequency that sample_interval\n allows, the last value in this queue is discarded and replaced with new\n value.\n\n if a value is requested from this queue and it is not available,\n then `get()` method blocks execution.\n\n if values are requested too quick, then `get()` method blocks\n execution until a sample_interval elapses, and returns last item.\n \"\"\"\n def __init__(self, sampling_interval=0.033, maxsize=None):\n object.__init__(self)\n self.__queue = Queue(maxsize=maxsize)\n self.buff = SamplingBuffer(sampling_interval=sampling_interval)\n spawn(self.action)\n\n def action(self):\n while True:\n self.__queue.put(self.buff.get())\n\n def put(self, item):\n self.buff.put(item)\n\n def get(self):\n return self.__queue.get()\n\n\nclass SamplingBuffer(object):\n def __init__(self, sampling_interval=None, sampling_freq=None, initial_value=None):\n \"\"\"\n if value is put too fast, `get` method should limit this speed with \"sample interval\" parameter.\n\n if value is got too slow, `get` method should return immediately\n\n \"\"\"\n if sampling_interval:\n self.sampling_interval = sampling_interval\n elif sampling_freq:\n self.sampling_interval = (1.0/sampling_freq)\n else:\n self.sampling_interval = 0.01 # 10 ms default value\n\n self.curr_val = initial_value\n self.last_timestamp = 0\n self.put_barrier = Barrier()\n self.fine_tune_last_wait = 0.005 # seconds\n\n def put(self, value):\n self.curr_val = value\n self.put_barrier.go()\n\n def get(self):\n while True:\n remaining_time = self.last_timestamp + self.sampling_interval - time.time()\n #print \"remaining time: \", remaining_time\n if remaining_time <= 0:\n self.put_barrier.wait()\n self.last_timestamp = time.time()\n return self.curr_val\n else:\n if remaining_time > self.fine_tune_last_wait:\n sleep(remaining_time - self.fine_tune_last_wait)\n else:\n # try to wait in the end very precisely\n sleep(remaining_time/2)\n\n\n","sub_path":"aktos_dcs/core/sampling_queue.py","file_name":"sampling_queue.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"393648033","text":"import pytest\n\nfrom django.urls import reverse\n\n\n@pytest.fixture(autouse=True)\ndef setup(mock_queue, mock_case, mock_denial_reasons, mock_post_refusal_advice):\n yield\n\n\n@pytest.fixture\ndef url(data_queue, data_standard_case):\n return reverse(\"cases:refuse_all\", kwargs={\"queue_pk\": data_queue[\"id\"], \"pk\": data_standard_case[\"case\"][\"id\"]})\n\n\ndef test_refuse_all_get(authorized_client, url):\n response = authorized_client.get(url)\n assert response.status_code == 200\n\n\n@pytest.mark.parametrize(\n \"denial_reasons, refusal_reasons, expected_status_code\",\n [\n # Valid form\n ([\"1\"], \"test\", 302),\n # Valid form with 2 denial_reasons\n ([\"1\", \"1a\"], \"test\", 302),\n # Invalid form - missing denial_reasons\n ([], \"test\", 200),\n # Invalid form - missing refusal_reasons\n ([\"1\"], \"\", 200),\n # Invalid form - missing denial_reasons & refusal_reasons\n ([], \"\", 200),\n ],\n)\ndef test_refuse_all_post(authorized_client, url, denial_reasons, refusal_reasons, expected_status_code):\n response = authorized_client.post(url, data={\"denial_reasons\": denial_reasons, \"refusal_reasons\": refusal_reasons})\n assert response.status_code == expected_status_code\n","sub_path":"unit_tests/caseworker/advice/views/test_refusal_advice_view.py","file_name":"test_refusal_advice_view.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"109928369","text":"# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Defines external repositories needed by rules_webtesting.\"\"\"\n\nload(\"//web/internal:java_import_external.bzl\", \"java_import_external\")\nload(\"//web/internal:platform_http_file.bzl\", \"platform_http_file\")\nload(\"//web/internal:go_repository.bzl\", \"go_repository\")\n\n\ndef web_test_repositories(\n omit_cglib_nodep=False,\n omit_com_github_gorilla_mux=False,\n omit_com_github_tebeka_selenium=False,\n omit_com_google_code_findbugs_jsr305=False,\n omit_com_google_code_gson=False,\n omit_com_google_errorprone_error_prone_annotations=False,\n omit_com_google_guava=False,\n omit_commons_codec=False,\n omit_commons_logging=False,\n omit_junit=False,\n omit_net_java_dev_jna=False,\n omit_net_java_dev_jna_platform=False,\n omit_org_apache_commons_exec=False,\n omit_org_apache_httpcomponents_httpclient=False,\n omit_org_apache_httpcomponents_httpcore=False,\n omit_org_apache_httpcomponents_httpmime=False,\n omit_org_eclipse_jetty_io=False,\n omit_org_eclipse_jetty_util=False,\n omit_org_eclipse_jetty_websocket_api=False,\n omit_org_eclipse_jetty_websocket_client=False,\n omit_org_eclipse_jetty_websocket_common=False,\n omit_org_hamcrest_core=False,\n omit_org_seleniumhq_py=False,\n omit_org_seleniumhq_selenium_api=False,\n omit_org_seleniumhq_selenium_remote_driver=False,\n **kwargs):\n \"\"\"Defines external repositories required by Webtesting Rules.\n\n This function exists for other Bazel projects to call from their WORKSPACE\n file when depending on rules_webtesting using http_archive. This function\n makes it easy to import these transitive dependencies into the parent\n workspace using a blacklist model. Individual dependencies may be excluded\n with the omit parameters. This is useful for users who want to be rigorous\n about declaring their own direct dependencies, or when another Bazel project\n is depended upon (e.g. rules_closure) that defines the same dependencies as\n this one (e.g. com_google_guava.) Alternatively, a whitelist model may be\n used by calling the individual functions this method references.\n\n Please note that while these dependencies are defined, they are not actually\n downloaded, unless a target is built that depends on them.\n \"\"\"\n _check_bazel_version(\"Web Testing Rules\", \"0.4.2\")\n if kwargs.keys():\n print(\"The following parameters are deprecated: \" + str(kwargs.keys()))\n if not omit_cglib_nodep:\n cglib_nodep()\n if not omit_com_github_gorilla_mux:\n com_github_gorilla_mux()\n if not omit_com_github_tebeka_selenium:\n com_github_tebeka_selenium()\n if not omit_com_google_code_findbugs_jsr305:\n com_google_code_findbugs_jsr305()\n if not omit_com_google_code_gson:\n com_google_code_gson()\n if not omit_com_google_errorprone_error_prone_annotations:\n com_google_errorprone_error_prone_annotations()\n if not omit_com_google_guava:\n com_google_guava()\n if not omit_commons_codec:\n commons_codec()\n if not omit_commons_logging:\n commons_logging()\n if not omit_junit:\n junit()\n if not omit_net_java_dev_jna:\n net_java_dev_jna()\n if not omit_net_java_dev_jna_platform:\n net_java_dev_jna_platform()\n if not omit_org_apache_commons_exec:\n org_apache_commons_exec()\n if not omit_org_apache_httpcomponents_httpclient:\n org_apache_httpcomponents_httpclient()\n if not omit_org_apache_httpcomponents_httpcore:\n org_apache_httpcomponents_httpcore()\n if not omit_org_apache_httpcomponents_httpmime:\n org_apache_httpcomponents_httpmime()\n if not omit_org_hamcrest_core:\n org_hamcrest_core()\n if not omit_org_seleniumhq_py:\n org_seleniumhq_py()\n if not omit_org_seleniumhq_selenium_api:\n org_seleniumhq_selenium_api()\n if not omit_org_seleniumhq_selenium_remote_driver:\n org_seleniumhq_selenium_remote_driver()\n\n\ndef browser_repositories(firefox=False, chromium=False):\n \"\"\"Sets up repositories for browsers defined in //browsers/....\n\n This should only be used on an experimental basis; projects should define\n their own browsers.\n\n Args:\n firefox: Configure repositories for //browsers:firefox-native.\n chromium: Configure repositories for //browsers:chromium-native.\n \"\"\"\n _check_bazel_version(\"Web Testing Rules\", \"0.4.2\")\n if chromium:\n org_chromium_chromedriver()\n org_chromium_chromium()\n if firefox:\n org_mozilla_firefox()\n org_mozilla_geckodriver()\n\n\ndef cglib_nodep():\n java_import_external(\n name=\"cglib_nodep\",\n jar_sha256=\"b40d7ac4400ea21dcf818f436a346ddd66b67c550811ccac5bbcbec095ab1287\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/cglib/cglib-nodep/3.2.4/cglib-nodep-3.2.4.jar\",\n \"http://maven.ibiblio.org/maven2/cglib/cglib-nodep/3.2.4/cglib-nodep-3.2.4.jar\",\n \"http://repo1.maven.org/maven2/cglib/cglib-nodep/3.2.4/cglib-nodep-3.2.4.jar\",\n ],\n licenses=[\"notice\"], # ASF 2.0\n )\n\n\ndef com_github_gorilla_mux():\n go_repository(\n name=\"com_github_gorilla_mux\",\n import_name=\"github.com/gorilla/mux\",\n sha256=\"a32c13a36c58cb321136231ae8b67b0c6ad3c5f462e65eb6771f59c44b44ccba\",\n strip_prefix=\"mux-757bef944d0f21880861c2dd9c871ca543023cba\",\n excluded_srcs=[\"context_gorilla.go\"],\n license=\"licenses([\\\"notice\\\"])\",\n urls=[\n \"http://bazel-mirror.storage.googleapis.com/github.com/gorilla/mux/archive/757bef944d0f21880861c2dd9c871ca543023cba.tar.gz\",\n \"https://github.com/gorilla/mux/archive/757bef944d0f21880861c2dd9c871ca543023cba.tar.gz\",\n ])\n\n\ndef com_github_tebeka_selenium():\n go_repository(\n name=\"com_github_tebeka_selenium\",\n import_name=\"github.com/tebeka/selenium\",\n sha256=\"c33decb47a9b81d5221cda29c8f040ca5cf874956bbb002ef82b06e07ed78c3d\",\n strip_prefix=\"selenium-f6f9a3638fa049f85b0aaf42e693e1c4ab257d4f\",\n license=\"licenses([\\\"notice\\\"]) # MIT.\",\n urls=[\n \"http://bazel-mirror.storage.googleapis.com/github.com/tebeka/selenium/archive/f6f9a3638fa049f85b0aaf42e693e1c4ab257d4f.tar.gz\",\n \"https://github.com/tebeka/selenium/archive/f6f9a3638fa049f85b0aaf42e693e1c4ab257d4f.tar.gz\",\n ])\n\n\ndef com_google_code_findbugs_jsr305():\n java_import_external(\n name=\"com_google_code_findbugs_jsr305\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar\",\n \"http://repo1.maven.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar\",\n \"http://maven.ibiblio.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar\",\n ],\n jar_sha256=\"905721a0eea90a81534abb7ee6ef4ea2e5e645fa1def0a5cd88402df1b46c9ed\",\n licenses=[\"notice\"], # BSD 3-clause\n )\n\n\ndef com_google_code_gson():\n java_import_external(\n name=\"com_google_code_gson\",\n jar_sha256=\"13f44a2f6ead058da80a91ee650c073871942468e684a9bf6a0d0319138924ce\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/com/google/code/gson/gson/2.3.1/gson-2.3.1.jar\",\n \"http://repo1.maven.org/maven2/com/google/code/gson/gson/2.3.1/gson-2.3.1.jar\",\n \"http://maven.ibiblio.org/maven2/com/google/code/gson/gson/2.3.1/gson-2.3.1.jar\",\n ],\n licenses=[\"notice\"], # The Apache Software License, Version 2.0\n deps=[\"@com_google_code_findbugs_jsr305\"])\n\n\ndef com_google_errorprone_error_prone_annotations():\n java_import_external(\n name=\"com_google_errorprone_error_prone_annotations\",\n jar_sha256=\"e7749ffdf03fb8ebe08a727ea205acb301c8791da837fee211b99b04f9d79c46\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.0.15/error_prone_annotations-2.0.15.jar\",\n \"http://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.0.15/error_prone_annotations-2.0.15.jar\",\n ],\n licenses=[\"notice\"], # Apache 2.0\n )\n\n\ndef com_google_guava():\n java_import_external(\n name=\"com_google_guava\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/com/google/guava/guava/20.0/guava-20.0.jar\",\n \"http://repo1.maven.org/maven2/com/google/guava/guava/20.0/guava-20.0.jar\",\n \"http://maven.ibiblio.org/maven2/com/google/guava/guava/20.0/guava-20.0.jar\",\n ],\n jar_sha256=\"36a666e3b71ae7f0f0dca23654b67e086e6c93d192f60ba5dfd5519db6c288c8\",\n licenses=[\"notice\"], # Apache 2.0\n exports=[\n \"@com_google_code_findbugs_jsr305\",\n \"@com_google_errorprone_error_prone_annotations\",\n ])\n\n\ndef commons_codec():\n java_import_external(\n name=\"commons_codec\",\n jar_sha256=\"4241dfa94e711d435f29a4604a3e2de5c4aa3c165e23bd066be6fc1fc4309569\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/commons-codec/commons-codec/1.10/commons-codec-1.10.jar\",\n \"http://repo1.maven.org/maven2/commons-codec/commons-codec/1.10/commons-codec-1.10.jar\",\n \"http://maven.ibiblio.org/maven2/commons-codec/commons-codec/1.10/commons-codec-1.10.jar\",\n ],\n licenses=[\"notice\"], # Apache License, Version 2.0\n )\n\n\ndef commons_logging():\n java_import_external(\n name=\"commons_logging\",\n jar_sha256=\"daddea1ea0be0f56978ab3006b8ac92834afeefbd9b7e4e6316fca57df0fa636\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/commons-logging/commons-logging/1.2/commons-logging-1.2.jar\",\n \"http://maven.ibiblio.org/maven2/commons-logging/commons-logging/1.2/commons-logging-1.2.jar\",\n \"http://repo1.maven.org/maven2/commons-logging/commons-logging/1.2/commons-logging-1.2.jar\",\n ],\n licenses=[\"notice\"], # The Apache Software License, Version 2.0\n )\n\n\ndef junit():\n java_import_external(\n name=\"junit\",\n jar_sha256=\"59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar\",\n \"http://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar\",\n \"http://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar\",\n ],\n licenses=[\"reciprocal\"], # Eclipse Public License 1.0\n testonly_=1,\n deps=[\"@org_hamcrest_core\"])\n\n\ndef net_java_dev_jna():\n java_import_external(\n name=\"net_java_dev_jna\",\n jar_sha256=\"1aa37e9ea6baa0ee152d89509f758f0847eac66ec179b955cafe0919e540a92e\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/net/java/dev/jna/jna/4.1.0/jna-4.1.0.jar\",\n \"http://maven.ibiblio.org/maven2/net/java/dev/jna/jna/4.1.0/jna-4.1.0.jar\",\n \"http://repo1.maven.org/maven2/net/java/dev/jna/jna/4.1.0/jna-4.1.0.jar\",\n ],\n # LGPL, version 2.1\n # http://www.gnu.org/licenses/licenses.html\n # ASL, version 2\n # http://www.apache.org/licenses/\n licenses=[\"restricted\"])\n\n\ndef net_java_dev_jna_platform():\n java_import_external(\n name=\"net_java_dev_jna_platform\",\n jar_sha256=\"f91ba7c0f26c34f04bf57d2ae30d4b19f906e7bb1de90eb3e1f4fdbf45d0c541\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/net/java/dev/jna/jna-platform/4.1.0/jna-platform-4.1.0.jar\",\n \"http://repo1.maven.org/maven2/net/java/dev/jna/jna-platform/4.1.0/jna-platform-4.1.0.jar\",\n \"http://maven.ibiblio.org/maven2/net/java/dev/jna/jna-platform/4.1.0/jna-platform-4.1.0.jar\",\n ],\n # LGPL, version 2.1\n # http://www.gnu.org/licenses/licenses.html\n # ASL, version 2\n # http://www.apache.org/licenses/\n licenses=[\"restricted\"],\n deps=[\"@net_java_dev_jna\"])\n\n\ndef org_apache_commons_exec():\n java_import_external(\n name=\"org_apache_commons_exec\",\n jar_sha256=\"cb49812dc1bfb0ea4f20f398bcae1a88c6406e213e67f7524fb10d4f8ad9347b\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/org/apache/commons/commons-exec/1.3/commons-exec-1.3.jar\",\n \"http://repo1.maven.org/maven2/org/apache/commons/commons-exec/1.3/commons-exec-1.3.jar\",\n \"http://maven.ibiblio.org/maven2/org/apache/commons/commons-exec/1.3/commons-exec-1.3.jar\",\n ],\n licenses=[\"notice\"], # Apache License, Version 2.0\n )\n\n\ndef org_apache_httpcomponents_httpclient():\n java_import_external(\n name=\"org_apache_httpcomponents_httpclient\",\n jar_sha256=\"0dffc621400d6c632f55787d996b8aeca36b30746a716e079a985f24d8074057\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar\",\n \"http://repo1.maven.org/maven2/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar\",\n \"http://maven.ibiblio.org/maven2/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar\",\n ],\n licenses=[\"notice\"], # Apache License, Version 2.0\n deps=[\n \"@org_apache_httpcomponents_httpcore\",\n \"@commons_logging\",\n \"@commons_codec\",\n ])\n\n\ndef org_apache_httpcomponents_httpcore():\n java_import_external(\n name=\"org_apache_httpcomponents_httpcore\",\n jar_sha256=\"f7bc09dc8a7003822d109634ffd3845d579d12e725ae54673e323a7ce7f5e325\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar\",\n \"http://maven.ibiblio.org/maven2/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar\",\n \"http://repo1.maven.org/maven2/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar\",\n ],\n licenses=[\"notice\"], # Apache License, Version 2.0\n )\n\n\ndef org_apache_httpcomponents_httpmime():\n java_import_external(\n name=\"org_apache_httpcomponents_httpmime\",\n jar_sha256=\"231a3f7e4962053db2be8461d5422e68fc458a3a7dd7d8ada803a348e21f8f07\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/org/apache/httpcomponents/httpmime/4.5.2/httpmime-4.5.2.jar\",\n \"http://maven.ibiblio.org/maven2/org/apache/httpcomponents/httpmime/4.5.2/httpmime-4.5.2.jar\",\n \"http://repo1.maven.org/maven2/org/apache/httpcomponents/httpmime/4.5.2/httpmime-4.5.2.jar\",\n ],\n licenses=[\"notice\"], # Apache License, Version 2.0\n deps=[\"@org_apache_httpcomponents_httpclient\"])\n\n\ndef org_chromium_chromedriver():\n platform_http_file(\n name=\"org_chromium_chromedriver\",\n amd64_sha256=\"59e6b1b1656a20334d5731b3c5a7400f92a9c6f5043bb4ab67f1ccf1979ee486\",\n amd64_urls=[\n \"http://chromedriver.storage.googleapis.com/2.26/chromedriver_linux64.zip\"\n ],\n macos_sha256=\"70aae3812941ed94ad8065bb4a9432861d7d4ebacdd93ee47bb2c7c57c7e841e\",\n macos_urls=[\n \"http://chromedriver.storage.googleapis.com/2.26/chromedriver_mac64.zip\"\n ])\n\n\ndef org_chromium_chromium():\n # Roughly corresponds to Chrome 55\n platform_http_file(\n name=\"org_chromium_chromium\",\n amd64_sha256=\"e3c99954d6acce013174053534b72f47f67f18a0d75f79c794daaa8dd2ae8aaf\",\n amd64_urls=[\n \"http://commondatastorage.googleapis.com/chromium-browser-snapshots/Linux_x64/423768/chrome-linux.zip\"\n ],\n macos_sha256=\"62aeb7a5c6b8a1b7b31400105bf01295bbd45b0627920b8f99f0cc4ca76927ca\",\n macos_urls=[\n \"http://commondatastorage.googleapis.com/chromium-browser-snapshots/Mac/423758/chrome-mac.zip\"\n ])\n\n\ndef org_hamcrest_core():\n java_import_external(\n name=\"org_hamcrest_core\",\n jar_sha256=\"66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar\",\n \"http://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar\",\n \"http://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar\",\n ],\n licenses=[\"notice\"], # New BSD License\n testonly_=1)\n\n\ndef org_mozilla_firefox():\n platform_http_file(\n name=\"org_mozilla_firefox\",\n amd64_sha256=\"10533f3db9c819a56f6cd72f9340e05c7e3b116454eb81b0d39ed161955bb48f\",\n amd64_urls=[\n \"http://ftp.mozilla.org/pub/firefox/releases/50.1.0/firefox-50.1.0.linux-x86_64.sdk.tar.bz2\",\n \"http://bazel-mirror.storage.googleapis.com/ftp.mozilla.org/pub/firefox/releases/50.1.0/firefox-50.1.0.linux-x86_64.sdk.tar.bz2\",\n ],\n macos_sha256=\"5cd449ebedb44b2f882b37e6e5cee1a814bc5ff3c3f86d1a1019b937aa287441\",\n macos_urls=[\n \"http://ftp.mozilla.org/pub/firefox/releases/50.1.0/firefox-50.1.0.mac-x86_64.sdk.tar.bz2\",\n \"http://bazel-mirror.storage.googleapis.com/ftp.mozilla.org/pub/firefox/releases/50.1.0/firefox-50.1.0.mac-x86_64.sdk.tar.bz2\",\n ])\n\n\ndef org_mozilla_geckodriver():\n platform_http_file(\n name=\"org_mozilla_geckodriver\",\n amd64_sha256=\"ce4aa8b5cf918a6607b50e73996fb909db42fd803855f0ecc9d7183999c3bedc\",\n amd64_urls=[\n \"http://bazel-mirror.storage.googleapis.com/github.com/mozilla/geckodriver/releases/download/v0.11.1/geckodriver-v0.11.1-linux64.tar.gz\",\n \"https://github.com/mozilla/geckodriver/releases/download/v0.11.1/geckodriver-v0.11.1-linux64.tar.gz\",\n ],\n macos_sha256=\"802cc1a33b8ce6f7c3aeb5116730cb6efc20414959d6f750e74437869d37a150\",\n macos_urls=[\n \"http://bazel-mirror.storage.googleapis.com/github.com/mozilla/geckodriver/releases/download/v0.11.1/geckodriver-v0.11.1-macos.tar.gz\",\n \"https://github.com/mozilla/geckodriver/releases/download/v0.11.1/geckodriver-v0.11.1-macos.tar.gz\",\n ])\n\n\ndef org_seleniumhq_py():\n native.new_http_archive(\n name=\"org_seleniumhq_py\",\n build_file=str(Label(\"//build_files:org_seleniumhq_py.BUILD\")),\n sha256=\"85daad4d09be86bddd4f45579986ac316c1909c3b4653ed471ea4519eb413c8f\",\n strip_prefix=\"selenium-3.0.2/py\",\n urls=[\n \"http://bazel-mirror.storage.googleapis.com/pypi.python.org/packages/0c/42/20c235e604bf736bc970c1275a78c4ea28c6453a0934002f95df9c49dad0/selenium-3.0.2.tar.gz\",\n \"https://pypi.python.org/packages/0c/42/20c235e604bf736bc970c1275a78c4ea28c6453a0934002f95df9c49dad0/selenium-3.0.2.tar.gz\",\n ])\n\n\ndef org_seleniumhq_selenium_api():\n java_import_external(\n name=\"org_seleniumhq_selenium_api\",\n jar_sha256=\"0226cc02880aff06f7fd85e77314182087a524e21ceda02f8197317bbb0390b8\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/org/seleniumhq/selenium/selenium-api/3.0.1/selenium-api-3.0.1.jar\",\n \"http://repo1.maven.org/maven2/org/seleniumhq/selenium/selenium-api/3.0.1/selenium-api-3.0.1.jar\",\n \"http://maven.ibiblio.org/maven2/org/seleniumhq/selenium/selenium-api/3.0.1/selenium-api-3.0.1.jar\",\n ],\n licenses=[\"notice\"], # The Apache Software License, Version 2.0\n testonly_=1)\n\n\ndef org_seleniumhq_selenium_remote_driver():\n java_import_external(\n name=\"org_seleniumhq_selenium_remote_driver\",\n jar_sha256=\"97eed1fe99c4b5ced127336270fe56fa53754627f24536bc07141c6451270275\",\n jar_urls=[\n \"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/org/seleniumhq/selenium/selenium-remote-driver/3.0.1/selenium-remote-driver-3.0.1.jar\",\n \"http://repo1.maven.org/maven2/org/seleniumhq/selenium/selenium-remote-driver/3.0.1/selenium-remote-driver-3.0.1.jar\",\n ],\n licenses=[\"notice\"], # The Apache Software License, Version 2.0\n testonly_=1,\n deps=[\n \"@cglib_nodep\",\n \"@com_google_code_gson\",\n \"@com_google_guava\",\n \"@net_java_dev_jna_platform\",\n \"@org_apache_commons_exec\",\n \"@org_apache_httpcomponents_httpmime\",\n \"@org_seleniumhq_selenium_api\",\n ])\n\n\ndef _check_bazel_version(project, bazel_version):\n if \"bazel_version\" not in dir(native):\n fail(\"%s requires Bazel >=%s but was <0.2.1\" % (project, bazel_version))\n elif not native.bazel_version:\n pass # user probably compiled Bazel from scratch\n else:\n current_bazel_version = _parse_bazel_version(native.bazel_version)\n minimum_bazel_version = _parse_bazel_version(bazel_version)\n if minimum_bazel_version > current_bazel_version:\n fail(\"%s requires Bazel >=%s but was %s\" % (project, bazel_version,\n native.bazel_version))\n\n\ndef _parse_bazel_version(bazel_version):\n # Remove commit from version.\n version = bazel_version.split(\" \", 1)[0]\n # Split into (release, date) parts and only return the release\n # as a tuple of integers.\n parts = version.split(\"-\", 1)\n # Turn \"release\" into a tuple of strings\n version_tuple = ()\n for number in parts[0].split(\".\"):\n version_tuple += (str(number),)\n return version_tuple\n","sub_path":"web/repositories.bzl","file_name":"repositories.bzl","file_ext":"bzl","file_size_in_byte":21504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"41622359","text":"import os\nimport re\nimport sys\nimport sysconfig\nimport platform\nimport subprocess\nimport distutils\nimport glob\nimport tempfile\nimport shutil\nfrom distutils.version import LooseVersion\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools.command.test import test as TestCommand\nimport distutils.spawn\nimport urllib.request\nimport tarfile\n\nclass CMakeExtension(Extension):\n def __init__(self, name, path, sourcedir=\"\"):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n self.path = path\n\n\nclass CMakeBuild(build_ext):\n\n user_options = build_ext.user_options + [('base-dir=', None, 'base directory of Triton')]\n\n def initialize_options(self):\n build_ext.initialize_options(self)\n self.base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))\n\n def finalize_options(self):\n build_ext.finalize_options(self)\n\n def run(self):\n try:\n out = subprocess.check_output([\"cmake\", \"--version\"])\n except OSError:\n raise RuntimeError(\n \"CMake must be installed to build the following extensions: \" + \", \".join(e.name for e in self.extensions)\n )\n\n if platform.system() == \"Windows\":\n cmake_version = LooseVersion(re.search(r\"version\\s*([\\d.]+)\", out.decode()).group(1))\n if cmake_version < \"3.1.0\":\n raise RuntimeError(\"CMake >= 3.1.0 is required on Windows\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n # self.debug = True\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.path)))\n # create build directories\n build_suffix = 'debug' if self.debug else 'release'\n llvm_build_dir = os.path.join(tempfile.gettempdir(), \"llvm-\" + build_suffix)\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n if not os.path.exists(llvm_build_dir):\n os.makedirs(llvm_build_dir)\n # python directories\n python_include_dirs = [distutils.sysconfig.get_python_inc()] + ['/usr/local/cuda/include']\n python_lib_dirs = distutils.sysconfig.get_config_var(\"LIBDIR\")\n cmake_args = [\n \"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=\" + extdir,\n \"-DBUILD_TUTORIALS=OFF\",\n \"-DBUILD_PYTHON_MODULE=ON\",\n #'-DPYTHON_EXECUTABLE=' + sys.executable,\n #'-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON',\n \"-DTRITON_LLVM_BUILD_DIR=\" + llvm_build_dir,\n \"-DPYTHON_INCLUDE_DIRS=\" + \";\".join(python_include_dirs)\n ]\n # configuration\n cfg = \"Debug\" if self.debug else \"Release\"\n build_args = [\"--config\", cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += [\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}\".format(cfg.upper(), extdir)]\n if sys.maxsize > 2**32:\n cmake_args += [\"-A\", \"x64\"]\n build_args += [\"--\", \"/m\"]\n else:\n import multiprocessing\n cmake_args += [\"-DCMAKE_BUILD_TYPE=\" + cfg]\n build_args += [\"--\", '-j' + str(2 * multiprocessing.cpu_count())]\n\n env = os.environ.copy()\n subprocess.check_call([\"cmake\", self.base_dir] + cmake_args, cwd=self.build_temp, env=env)\n subprocess.check_call([\"cmake\", \"--build\", \".\"] + build_args, cwd=self.build_temp)\n\n\nsetup(\n name=\"triton\",\n version=\"1.0.0\",\n author=\"Philippe Tillet\",\n author_email=\"phil@openai.com\",\n description=\"A language and compiler for custom Deep Learning operations\",\n long_description=\"\",\n packages=[\"triton\", \"triton/_C\", \"triton/tools\", \"triton/ops\", \"triton/ops/blocksparse\"],\n install_requires=[\"numpy\", \"torch\"],\n package_data={\"triton/ops\": [\"*.c\"], \"triton/ops/blocksparse\": [\"*.c\"]},\n include_package_data=True,\n ext_modules=[CMakeExtension(\"triton\", \"triton/_C/\")],\n cmdclass={\"build_ext\": CMakeBuild},\n zip_safe=False,\n # for PyPI\n keywords=[\"Compiler\", \"Deep Learning\"],\n url=\"https://github.com/ptillet/triton/\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Build Tools\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n ],\n)\n","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"134179937","text":"# give the year and output file name as argument to be added in the csv\n# arg 1 - year\n# arg 2 - new file name\n# arg 3 - loan_type (home or business)\nimport pandas as pd\nimport sys\n\ndef convert_xls_to_csv():\n # change the sheet name here\n data_xls = pd.read_excel('SBA_Disaster_Loan_Data_FY19.xlsx','FY19 Home', index_col=None,skiprows=[0,1,2,3],skipfooter=2)\n data_xls.insert(0, column=\"year\",value=sys.argv[1])\n data_xls.to_csv(sys.argv[2], encoding='utf-8',index=False)\n\n# upload data to the existing file\n# if business add to business else add to home\n\ndef add_to_existing_data():\n if sys.argv[3] == 'Business':\n test = pd.read_csv(sys.argv[2])\n test.to_csv('sba_disaster_loan_data_business_FY01-17.csv',mode='a',index=False,header=False)\n test.to_csv('sba_business_FY01-17.csv',mode='a',index=False,header=False)\n elif sys.argv[3] == 'Home':\n test = pd.read_csv(sys.argv[2])\n test.to_csv('sba_disaster_loan_data_home_FY01-17.csv',mode='a',index=False,header=False)\n test.to_csv('sba_home_FY01-17.csv',mode='a',index=False,header=False)\n\ndef main():\n #convert_xls_to_csv()\n add_to_existing_data()\n\nif __name__ == \"__main__\":\n main()","sub_path":"tasks/sba/convert_xlsx_to_csv.py","file_name":"convert_xlsx_to_csv.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"645452183","text":"from contextlib import contextmanager\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom samplewebapp.schema import Base\n\n\n# This is a global session class for accessing the database with; according\n# to sqlalchemy docs, the most basic pattern is to have this class available\n# and importable. Code can use `Session.configure` to point it to different\n# database engines, and make objects of this class for interacting with the\n# database itself.\nSession = sessionmaker()\n\n\ndef bind_to_database(db_file_path):\n # create a sqlalchemy engine for the file and bind the session to it\n engine = create_engine(f'sqlite:///{db_file_path}')\n Session.configure(bind=engine)\n\n # create tables in the database if they have not yet been created\n create_tables(engine)\n\n\n@contextmanager\ndef database_access():\n '''\n A context manager for handling a scoped sqlalchemy database access session;\n produces a session object upon entering the scope; if exceptions are\n raised, session will be automatically rolled back, otherwise session will\n be automatically committed. Upon exiting context session will be\n automatically closed.\n\n Typically, code that wants to access the database to do something should\n use this context manager like the following:\n\n with database_access() as session:\n # do things with session here\n pass\n\n '''\n session = Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n\ndef create_tables(engine):\n '''\n Call this function to create all tables if they don't already exist in\n the database.\n\n @param engine: the database engine to use; pointing to a database to\n attempt to create tables in\n '''\n Base.metadata.create_all(engine)\n","sub_path":"samplewebapp/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"1271451","text":"'''\r\nExpanding circles...\r\n\r\nCreated on Nov 17, 2014\r\nUpdated Summer, 2015\r\n\r\n@author: smn4\r\n@author kvlinden\r\n'''\r\nfrom tkinter import *\r\nfrom random import randint\r\n\r\ndef get_random_color():\r\n ''' Generate random color intensities for red, green & blue and convert them to hex. '''\r\n return '#{:02X}{:02X}{:02X}'.format(randint(0,255), randint(0,255), randint(0,255))\r\n\r\nclass CircleAnimation:\r\n \r\n def __init__(self, window):\r\n self._window = window\r\n self._window.protocol('WM_DELETE_WINDOW', self.safe_exit)\r\n self._window.bind(\"\", self.process_mouse_event)\r\n self._window.bind(\"\", self.process_key_event) \r\n \r\n SCREEN_WIDTH = 250\r\n self._x = self._y = SCREEN_WIDTH / 2\r\n self._radius = 0\r\n self._dx = 1\r\n self._color = get_random_color()\r\n self._rate = 10 # wait time between frames\r\n\r\n canvas = Canvas(self._window, bg='white', width=SCREEN_WIDTH, height=SCREEN_WIDTH)\r\n canvas.pack()\r\n \r\n # Run the animation.\r\n self._terminated = False\r\n while not self._terminated:\r\n canvas.create_oval(self._x - self._radius, \r\n self._y - self._radius, \r\n self._x + self._radius, \r\n self._y + self._radius, \r\n fill=self._color)\r\n if (self._radius < 0 or self._radius > SCREEN_WIDTH / 2):\r\n self._dx *= -1\r\n self._radius += self._dx \r\n canvas.after(self._rate)\r\n canvas.update()\r\n \r\n def process_mouse_event(self, event):\r\n# print('Clicked at', event.x, event.y) \r\n self._x = event.x\r\n self._y = event.y\r\n self._radius = 0\r\n self._color = get_random_color()\r\n \r\n def process_key_event(self, event):\r\n# print('keysym?', event.keysym)\r\n if event.keysym == 'Right':\r\n self._x += 5\r\n elif event.keysym == 'Left':\r\n self._x -= 5\r\n elif event.keysym == 'Up':\r\n self._y -= 5\r\n elif event.keysym == 'Down':\r\n self._y += 5\r\n \r\n def safe_exit(self):\r\n ''' Terminate the animation before shutting down the GUI window. '''\r\n self._terminated = True\r\n self._window.destroy()\r\n \r\nif __name__ == '__main__':\r\n root = Tk()\r\n root.title('Those crazy circles...') \r\n app = CircleAnimation(root)\r\n root.mainloop()\r\n","sub_path":"labs/14/circle_solution.py","file_name":"circle_solution.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"512378952","text":"# -*- encoding=utf8 -*-\n\nimport math\nimport re\nfrom functools import reduce\nfrom operator import mul\nimport itertools\nfrom red_divx import *\n\n#regex to identify float number\nisfloat = re.compile('^\\d+(\\.\\d+)?$')\n\ndef red_add(expr):\n ''' Reduit les entiers simples de l'equation. Les supprime et ajoute\n la somme a la fin de la liste en place. '''\n print('BEFORE CALL ADD :',expr)\n if len(expr) == 1:\n print('AFTER CALL ADD Len 1 so ntohghing')\n return\n to_rm = []\n total = 0\n for i,ex in enumerate(expr):\n if isfloat.match(ex.strip('+- ')) != None:# or ex.startswith('-'):\n total += float(ex)\n to_rm.append(i)\n nb_rm = 0\n for i in sorted(to_rm)[::-1]:\n expr.pop(i)\n if total != 0:\n expr.append(str(total))\n print('AFTER CALL ADD :',expr)\n\ndef red_sub(expr):\n print('BEFORE CALL SUB :',expr)\n to_rm = []\n #this part split the sub\n sub = []\n for ind,i in enumerate(expr):\n if '- ' in i:\n sub.append(i.split('- '))\n to_rm.append(ind)\n for i in sorted(to_rm)[::-1]:\n expr.pop(i)\n for i in sub:\n print('Sub in sub : ',sub)\n for ind,j in enumerate(i):\n print('J before red : ',j)\n j = split_expr_red(j)\n print('J red : ',j)\n if len(j.strip()) and ind == 0:\n expr.append(j.strip())\n elif len(j.strip()):\n if not j.startswith('-'):\n expr.append('-' + j.strip())\n else:\n expr.append(j.strip(' -'))\n print('J strip : ',j)\n print('AFTER CALL SUB :',expr)\n\ndef red_ent(expr_liste):\n ''' Reduit entiers de la liste en place'''\n print('BEFORE CALL ENT :',expr_liste)\n total = 0\n to_rm = []\n for i,e in enumerate(expr_liste):\n if isfloat.match(e.strip('-+')):\n total += float(e)\n to_rm.append(i)\n for i in sorted(to_rm)[::-1]:\n expr_liste.pop(i)\n if total != 0:\n expr_liste.append(str(total))\n print('AFTER CALL ENT :',expr_liste)\n\ndef red_mul(expr):\n print('BEFORE CALL MUL :',expr)\n total = 0\n to_rm = []\n for i,e in enumerate(expr):\n subtotal = 0\n if '*' in e and not 'x' in e.lower() and not '/' in e:\n new_e = e.split('*')\n new_e = split_expr_red(new_e)\n new_e = list(map(float,new_e))\n subtotal = reduce(mul,new_e)\n to_rm.append(i)\n total += subtotal\n for i in sorted(to_rm)[::-1]:\n expr.pop(i)\n if total:\n expr.append(str(total))\n print('AFTER CALL MUL :',expr)\n\ndef red_div(expr):\n print('BEFORE CALL DIV :',expr)\n nb = []\n to_rm = []\n for i,e in enumerate(expr):\n if '/' in e and not 'x' in e.lower() and not '*' in e:\n nb.append(e)\n to_rm.append(i)\n for i in sorted(to_rm)[::-1]:\n expr.pop(i)\n total = 0\n for i in nb:\n curr = 0\n i = i.split('/')\n for ind,j in enumerate(i):\n j = split_expr_red(j)\n if ind == 0:\n curr = float(j.strip())\n else:\n curr /= float(j.strip())\n total += curr\n if total:\n expr.append(str(float(total)))\n print('AFTER CALL DIV :',expr)\n\ndef red_pow(expr):\n print('BEFORE CALL POW :',expr)\n total = 0\n totalx = 0\n to_rm = []\n for i,e in enumerate(expr):\n if '^' in e and not 'x' in e.lower() and not '/' in e and not '*' in e:\n total += pow(float(e.split('^')[0]), float(e.split('^')[1]))\n to_rm.append(i)\n for i in sorted(to_rm)[::-1]:\n expr.pop(i)\n if total:\n expr.append(str(total))\n if totalx:\n expr.append(str(totalx) + 'X')\n print('AFTER CALL POW :',expr)\n\ndef red_multiple_div_mul(expr):\n print('BEFORE CALL RED_MUL_DIV :',expr)\n pattern = re.compile(r'([\\*\\/])')\n #print('EXPR in div-mul : ',expr)\n to_rm = []\n big_total = 0\n for ind, e in enumerate(expr):\n total = 0\n if '/' in e and '*' in e and 'x' not in e.lower():\n to_rm.append(ind)\n exp = re.split(pattern, e)\n print('E inside ok : ',exp)\n nb = ''\n op = ''\n for i,e in enumerate(exp):\n #print('\\n\\t\\tTour : ',i,'\\tNb : ',nb,'\\tOp : ',op,'\\tTotal et big : ',total,'||',big_total,'\\n')\n if '*' not in e and '/' not in e:\n #print('E to red : ',e)\n nb = split_expr_red(e)\n else:\n op = e.strip()\n #print(\"OP = \",op,'\\tNb : ',nb,'\\tTotal : ',total)\n if op == '' and nb != '':\n total = float(nb)\n elif op == '*' and nb != '':\n total *= float(nb)\n elif op == '/' and nb != '':\n total /= float(nb)\n else:\n print('GROS BUG RED MULTIPLE DIV MUL')\n nb = ''\n big_total += total\n #print('BiiiG TOTAL : ',big_total)\n for i in sorted(to_rm)[::-1]:\n expr.pop(i)\n if big_total != 0:\n expr.append(str(big_total))\n print('AFTER CALL RED_MUL_DIV :',expr)\n\ndef split_expr_red(expr):\n to_ret = []\n if type(expr) == list:\n for i in expr:\n i = [i]\n red_entier_all(i)\n to_ret += i\n return to_ret\n expr = [expr]\n red_entier_all(expr)\n expr = \"\".join(expr)\n return expr\n\n\ndef red_entier_all(expr):\n red_add(expr)\n red_sub(expr)\n red_div(expr)\n red_mul(expr)\n red_pow(expr)\n red_multiple_div_mul(expr)\n #red_ent(expr) no it does + /// same shit as red_add ?\n","sub_path":"ComputerV1/red_entier.py","file_name":"red_entier.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"318250056","text":"import torch.nn as nn\n\nfrom SimCLR.Blocks import ResNetBlock\nfrom SimCLR.Tools import activation_func\n\n\nclass ResNetLayer(nn.Module):\n '''\n A ResNet layer class that creates a layer from n blocks. \n This layer will have one block that scales then the rest compute\n '''\n\n def __init__(self, in_channels, mid_channels, out_channels, block=ResNetBlock,\n n=3, activation='relu', *args, **kwargs):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n # if there is scaling between layers we need to account for that\n# if in_channels != out_channels:\n# resampling = 2\n# else:\n resampling = 1\n\n # not exactly what the paper describes, but it preforms well\n self.blocks = nn.Sequential(\n block(\n in_channels, mid_channels, out_channels, resampling=resampling,\n *args, **kwargs\n ),\n *[\n block(\n out_channels, mid_channels, out_channels,\n *args, **kwargs, resampling=1\n ) for _ in range(n-1)\n ],\n nn.BatchNorm2d(out_channels),\n activation_func[activation]\n )\n\n def forward(self, x):\n return self.blocks(x)\n\n\nclass ResNetEncoder(nn.Module):\n '''\n The encoder expands the feature size by stacking layers with\n increasing feature sizes\n I tried to base this on table 1 of the paper\n '''\n\n def __init__(self, in_channels=3, blocks_sizes=[2**i for i in [6, 7, 8 ,9]],\n blocks_layers=[3, 4, 6, 3], activation='relu', block=ResNetBlock,\n *args, **kwargs):\n super().__init__()\n self.blocks_sizes = blocks_sizes\n self.out_blocks_sizes = [i*4 for i in self.blocks_sizes]\n\n # this first block will bring the data in to be processed\n # this is based on table 1 of the paper\n self.first_block = nn.Sequential(\n nn.Conv2d(\n in_channels, self.blocks_sizes[0],\n kernel_size=7, stride=2, padding=3, bias=False\n ),\n nn.BatchNorm2d(self.blocks_sizes[0]),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n activation_func[activation]\n )\n\n # organize the in and out sizes in the adjoining pairs\n in_out_sizes = list(zip(self.out_blocks_sizes[:-1], self.blocks_sizes[1:], \n self.out_blocks_sizes[1:]))\n\n # the first layer maintains the size from the first block\n # but the subsequent layers begin to scale\n self.blocks = nn.ModuleList(\n [\n ResNetLayer(\n self.blocks_sizes[0], self.blocks_sizes[0], self.out_blocks_sizes[0],\n n=blocks_layers[0], activation=activation, block=block,\n *args, **kwargs\n ),\n *[ResNetLayer(\n in_channels, mid_channels, out_channels,\n n=n, activation=activation, block=block,\n *args, **kwargs\n ) for (in_channels, mid_channels, out_channels), n in zip(in_out_sizes, blocks_layers[1:])]\n ]\n )\n\n self.AAP = nn.AdaptiveAvgPool2d((1, 1))\n\n def forward(self, x):\n x = self.first_block(x)\n for block in self.blocks:\n x = block(x)\n x = self.AAP(x)\n x = x.view(x.shape[0], -1)\n return x\n\n\nclass ResNetDecoder(nn.Module):\n '''\n The decoder takes the output from the encoder and creates a\n final output\n This uses a linear layer\n '''\n\n def __init__(self, in_features, n_classes):\n super().__init__()\n self.linear = nn.Linear(in_features, n_classes)\n\n def forward(self, x):\n x = self.linear(x)\n return x\n","sub_path":"SimCLR/Layers.py","file_name":"Layers.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"477851817","text":"import socket\nimport sys\n\n\nserver_address = (\"0.0.0.0\",2100)\n\nsock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nsock.bind(server_address)\n\nprint(\"Start to Listen at %s,%d\" %(server_address[0],server_address[1]))\nsock.listen(1)\n\nwhile True:\n connection,client_address = sock.accept()\n\n try:\n while True:\n data = connection.recv(16)\n if data:\n print(\"received \" + data)\n print(\"sending data back to the client: \" , client_address)\n connection.sendall(data)\n else:\n print(\"no more data from \" , client_address)\n break\n finally:\n connection.close()\n\n","sub_path":"server_echo.py","file_name":"server_echo.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"486627549","text":"import json\nimport matplotlib.pyplot as plt\nimport os, glob\nimport pandas as pd\nimport seaborn as sns\n\nfrom libraries.lib_drought import *\nfrom libraries.lib_gather_data import *\nfrom libraries.plot_hist import plot_simple_hist\nfrom libraries.pandas_helper import categorize_strings\n#\n# country-specific\nfrom libraries.lib_RO_housing_plots import housing_plots\n\npd.set_option('display.width', 220)\nsns.set_style('whitegrid')\nsns_pal = sns.color_palette('Set1', n_colors=8, desat=.5)\n\nglobal model\nmodel = os.getcwd()\n\n# People/hh will be affected or not_affected, and helped or not_helped\naffected_cats = pd.Index(['a', 'na'], name='affected_cat') # categories for social protection\nhelped_cats = pd.Index(['helped','not_helped'], name='helped_cat')\n\n# These parameters could vary by country\nreconstruction_time = 3.00 # time needed for reconstruction\nreduction_vul = 0.20 # how much early warning reduces vulnerability\ninc_elast = 1.50 # income elasticity (of capital?)\nmax_support = 0.05 # max expenditure on PDS as fraction of GDP (set to 5% based on PHL)\nnominal_asset_loss_covered_by_PDS = 0.80 # also, elsewhere, called 'shareable'\n\n# Dictionary for standardizing HIES column names that is SPECIFIC TO ROMANIA\nhbs_dict = {'regiune':'Region','judet':'County','codla':'hhcode','coefj':'hhwgt','cpers':'hhsize','R44':'hhinc',\n 'REGIUNE':'Region','JUDET':'County','CODLA':'hhcode','COEFJ':'hhwgt','CPERS':'hhsize',\n 'NRGL':'nrgl','MEDIU':'mediu','CENTRA':'centra'}\n\n# Define directories\ndef set_directories(myCountry): # get current directory\n \"\"\"Sets global directories for all functions in the library, so set_directories\n needs to be run before other functions here\n\n Parameters\n ----------\n myCountry : ISO code for country\n\n Returns\n -------\n str\n intermediates relative folder path\n \"\"\"\n global inputs, intermediate\n inputs = '~/Desktop/BANK/hh_resilience_model/inputs/'+myCountry+'/' # get inputs data directory\n intermediate = '~/Desktop/BANK/hh_resilience_model/intermediate/'+myCountry+'/' # get outputs data directory\n\n return True\n\ndef get_economic_unit(myC):\n eca_countries = ['AM','BG','TR','HR','GR','GE','RO','AL']\n if myC in eca_countries: return 'Region'\n if myC == 'PH': return 'region'#'province'\n if myC == 'FJ': return 'Division'#'tikina'\n if myC == 'SL': return 'district'\n if myC == 'MW': return 'district'\n if myC == 'BO': return 'departamento'\n assert(False)\n\ndef get_currency(myC):\n \"\"\"Dictionary lookup of currency, multiplier, and exchange rate from ISO key\"\"\"\n d = {'PH': ['b. PhP',1.E9,1./50.],\n 'FJ': ['k. F\\$',1.E3,1./2.],\n 'SL': ['LKR',1.E9,1./150.],\n 'MW': ['MWK',1.E9,1./724.64],\n 'BO': ['BoB',1.E9,1./6.9],\n 'AM': ['PPP',1E0,1.],\n 'AL': ['PPP',1E0,1.],\n 'HR': ['PPP',1E0,1.],\n 'BG': ['PPP',1E0,1.],\n 'RO': ['PPP',1.E9,1/4.166667],\n 'TR': ['PPP',1E0,1.],\n 'GE': ['PPP',1E0,1.],\n 'GR': ['PPP',1E0,1.]\n }\n try:\n return d[myC]\n except KeyError:\n return ['XXX',1E0,1]\n\ndef get_hhid_elements(myC):\n if myC == 'RO': return ['Region','County','centra','hhcode','nrgl','mediu']\n return None\n\ndef get_places(myC):\n \"\"\"Returns a df with economic unit as key and the population per economic unit.\n\n Country Notes\n -------------\n For SL, in addition to calculating population by district which is\n sum of psus in household * household weights, this function also saves\n the household weights, sizes, and # children, religion, ethnicity to a\n csv for future use.\n\n Parameters\n ----------\n myC : str\n ISO Country reference\n\n Returns\n -------\n df : DataFrame\n economic unit is the index\n population is the first and only column.\n\n \"\"\"\n economy = get_economic_unit(myC)\n\n if myC == 'PH':\n df_prov = pd.read_excel('~/Desktop/BANK/hh_resilience_model/inputs/PH/population_2015.xlsx',sheet_name='population').set_index('province').rename(columns={'population':'psa_pop'})\n df_reg = pd.read_csv(inputs+'prov_to_reg_dict.csv').set_index('region')\n df_reg = pd.merge(df_prov,df_reg,left_index=True,right_on='province').sum(level='region')[['psa_pop']]\n\n return df_reg\n\n\n\n else: return None\n\ndef get_places_dict(myC,reverse=False):\n \"\"\"Get economy-level names of provinces or districts (p-code)\n and larger regions if available (r-code)\n Parameters\n ----------\n myC : str\n ISO\n\n Returns\n -------\n p_code : Series\n Series, province/district code as index, province/district names as values.\n r_code : Series\n region code as index, region names as values.\n \"\"\"\n\n p_code,r_code = None,None\n\n if myC == 'PH':\n p_code = pd.read_excel('in/FIES_provinces.xlsx')[['province_code','province_AIR']].set_index('province_code').squeeze()\n #p_code[97] = 'Zamboanga del Norte'\n #p_code[98] = 'Zamboanga Sibugay'\n if reverse: p_code = p_code.reset_index().set_index('province_AIR')\n\n r_code = pd.read_excel('in/FIES_regions.xlsx')[['region_code','region_name']].set_index('region_code').squeeze()\n if reverse: r_code = r_code.reset_index().set_index('region_name')\n\n try: p_code = p_code.to_dict()\n except: pass\n try: r_code = r_code.to_dict()\n except: pass\n\n return p_code,r_code\n\ndef load_survey_data(myC):\n df = None\n #Each survey/country should have the following:\n # -> hhid household id\n # -> hhinc household income? but seems to be expenditure (SL)\n # -> pcinc household income per person\n # -> hhwgt number of households this line is 'representative' of\n # -> pcwgt total population this line is representative of\n # -> hhsize household size\n # -> hhsize_ae household size2\n # -> hhsoc social payments (government and remittances)\n # -> pcsoc per person social payments\n # -> ispoor\n # -> has_ew\n\n if myC == 'PH':\n #path = '2015FIES/fies2015_complete.csv'\n path = '~/Desktop/BANK/hh_resilience_model/inputs/PH/FIES2015.csv'\n df = pd.read_csv(path)[['w_regn','w_prov','w_mun','w_bgy','w_ea','w_shsn','w_hcn',\n 'walls','roof',\n 'totex','cash_abroad','regft',\n 'hhwgt','poorhh','totdis','tothrec','pcinc_s','pcinc_ppp11','pcwgt',\n 'agri_sal','nonagri_sal','cash_abroad','cash_domestic','othin',\n 't930220',# total public receipts\n 't930221',# cct incl 4Ps transfers\n #'eacfggrs',# Crop Farming and Gardening gross receipts \n #'ealprgrs',# Livestock and Poultry Raising gross receipts \n #'eafisgrs',# Fishing gross receipts \n #'eaforgrs',# Forestry and Hunting gross receipts \n 'eatrdgrs',# Wholesale and Retail Trade gross receipts \n 'eamfggrs',# Manufacturing gross receipts \n 'eacpsgrs',# Community,Social,Rec'l,Personal Services gross receipts \n 'eatcsgrs',# Transportation,Storage and Comcn Services gross receipts \n 'eamnggrs',# Mining and Quarrying gross receipts \n 'net_cons',\n 'eacongrs',# Construction gross receipts \n 'eanecgrs',# Entrepreneurial Activities NEC gross receipts \n 'eainc', # Total Income from Entrepreneurial Activites\n 'job',# Household Head Job or Business Indicator (2nd visit only)\n 'occup_fin',# Household Head Occupation (2nd visit only)\n 'employed_pay',# Total number of family members employed for pay (2nd visit only)\n 'employed_prof',# Total number of family members employed for profit (2nd visit only)\n 'job',# Household Head Job or Business Indicator (2nd visit only)\n 'cw', # Household Head Class of Worker (2nd visit only)\n 'spouse_emp', #Spouse has job/business (2nd visit only)\n 'majsr',# Major Grouping of Main Source of Income \n 'minsr',# Detailed Grouping of Main Source of income \n 'radio_qty','tv_qty','cellphone_qty','pc_qty',\n 'savings','invest',\n ]]\n\n df = df.rename(columns={'tothrec':'hhsoc','poorhh':'ispoor','totex':'hhexp',\n 't930220':'total_public','t930221':'cct4P'})\n\n df['hhsize'] = df['pcwgt']/df['hhwgt']\n #df['hhsize_ae'] = df['pcwgt']/df['hhwgt']\n #df['aewgt'] = df['pcwgt'].copy()\n\n # Per capita expenditures\n df['pcexp'] = df['hhexp']/df['hhsize']\n\n # These lines use income as income\n df = df.rename(columns={'pcinc_s':'pcinc'})\n df['hhinc'] = df[['pcinc','hhsize']].prod(axis=1)\n\n # These lines use disbursements as proxy for income\n #df = df.rename(columns={'totdis':'hhinc'})\n #df['pcinc'] = df['hhinc']/df['hhsize']\n #df['aeinc'] = df['pcinc'].copy()\n\n df['ppp_factor'] = df.eval('(365*pcinc_ppp11*hhsize)/hhinc')# <-- annual PPP/LCU\n\n df['pcsoc'] = df['hhsoc']/df['hhsize']\n\n #df['tot_savings'] = df[['savings','invest']].sum(axis=1,skipna=False)\n df['savings'] = df['savings'].fillna(-1)\n df['invest'] = df['invest'].fillna(-1)\n\n df['axfin'] = 0\n df.loc[(df.savings>0)|(df.invest>0),'axfin'] = 1\n\n df['est_sav'] = df[['axfin','pcinc']].prod(axis=1)/2.\n\n #df['has_ew'] = df[['radio_qty','tv_qty','cellphone_qty','pc_qty']].sum(axis=1).clip(upper=1)\n #df = df.drop(['radio_qty','tv_qty','cellphone_qty','pc_qty'],axis=1)\n\n _mc_lo,_mc_hi = get_middleclass_range('PH')\n df['ismiddleclass'] = (df.pcinc>=_mc_lo)#&(df.pcinc<=_mc_hi)\n\n _lo,_hi = get_secure_range('PH')\n df['issecure'] = (df.pcinc>=_lo)&(df.pcinc<=_hi)\n\n _lo,_hi = get_vulnerable_range('PH')\n df['isvulnerable'] = (df.pcinc>=_lo)&(df.pcinc<=_hi)\n\n # Run savings script\n df['country'] = 'PH'\n listofquintiles=np.arange(0.10, 1.01, 0.10)\n df = df.reset_index().groupby('country',sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.pcinc),reshape_data(x.pcwgt),listofquintiles),\n 'decile_nat',sort_val='pcinc')).drop(['index'],axis=1)\n df = df.reset_index().groupby('w_regn',sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.pcinc),reshape_data(x.pcwgt),listofquintiles),\n 'decile_reg',sort_val='pcinc')).drop(['index'],axis=1)\n df = df.reset_index().set_index(['w_regn','decile_nat','decile_reg']).drop('index',axis=1)\n\n df['precautionary_savings'] = df['pcinc']-df['pcexp']\n\n # Savings rate by national decile\n _ = pd.DataFrame(index=df.sum(level='decile_nat').index)\n _['income'] = df[['pcinc','pcwgt']].prod(axis=1).sum(level='decile_nat')/df['pcwgt'].sum(level='decile_nat')\n _['expenditures'] = df[['pcexp','pcwgt']].prod(axis=1).sum(level='decile_nat')/df['pcwgt'].sum(level='decile_nat')\n _['precautionary_savings'] = _['income']-_['expenditures']\n _.sort_index().to_csv('csv/hh_savings_by_decile.csv')\n\n # Savings rate by decile (regionally-defined) & region\n _ = pd.DataFrame(index=df.sum(level=['w_regn','decile_reg']).index)\n _['income'] = df[['pcinc','pcwgt']].prod(axis=1).sum(level=['w_regn','decile_reg'])/df['pcwgt'].sum(level=['w_regn','decile_reg'])\n _['expenditures'] = df[['pcexp','pcwgt']].prod(axis=1).sum(level=['w_regn','decile_reg'])/df['pcwgt'].sum(level=['w_regn','decile_reg'])\n _['precautionary_savings'] = _['income']-_['expenditures']\n _.sort_index().to_csv('csv/hh_savings_by_decile_and_region.csv')\n\n # Savings rate for hh in subsistence (natl average)\n listofquartiles=np.arange(0.25, 1.01, 0.25)\n df = df.reset_index().groupby('country',sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.precautionary_savings),reshape_data(x.pcwgt),listofquartiles),\n 'nat_sav_quartile',sort_val='precautionary_savings'))\n df = df.reset_index().groupby('w_regn',sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.precautionary_savings),reshape_data(x.pcwgt),listofquartiles),\n 'reg_sav_quartile',sort_val='precautionary_savings')).drop(['index'],axis=1)\n df = df.reset_index().set_index(['w_regn','decile_nat','decile_reg']).drop('index',axis=1).sort_index()\n\n _ = pd.DataFrame()\n _.loc['subsistence_savings_rate','hh_avg'] = (df.loc[df.pcinc consumption\n print('Using per cap income as c')\n df['c'] = df['pcinc'].copy()\n\n # Drop unused columns\n df = df.reset_index().set_index(['w_regn','w_prov','w_mun','w_bgy','w_ea','w_shsn','w_hcn'])\n df = df.drop([_c for _c in ['country','decile_nat','decile_reg','est_sav','tot_savings','savings','invest',\n 'precautionary_savings','index','level_0','cash_domestic'] if _c in df.columns],axis=1)\n\n # Standardize province info\n prov_code,region_code = get_places_dict(myC)\n\n df = df.reset_index()\n get_hhid_FIES(df)\n df = df.rename(columns={'w_prov':'province','w_regn':'region'}).reset_index()\n df['province'].replace(prov_code,inplace=True) \n df['region'].replace(region_code,inplace=True)\n df = df.reset_index().set_index(get_economic_unit(myC)).drop(['index','level_0'],axis=1)\n #\n #print(df.head())\n #assert(False)\n #\n #get_hhid_FIES(cat_info)\n #cat_info = cat_info.rename(columns={'w_prov':'province','w_regn':'region'}).reset_index()\n #cat_info['province'].replace(prov_code,inplace=True) \n #cat_info['region'].replace(region_code,inplace=True)\n #cat_info = cat_info.reset_index().set_index(economy).drop(['index','level_0'],axis=1)\n \n # There's no region info in df--put that in...\n #df = df.reset_index().set_index('province')\n #cat_info = cat_info.reset_index().set_index('province')\n #df['region'] = cat_info[~cat_info.index.duplicated(keep='first')].region\n \n #try: df.reset_index()[['province','region']].to_csv('../inputs/PH/prov_to_reg_dict.csv',header=True)\n #except: print('Could not update regional-provincial dict')\n \n # Manipulate PSA (non-FIES) dataframe\n #df = df.reset_index().set_index(economy)\n #df['psa_pop'] = df.sum(level=economy)\n #df = df.mean(level=economy)\n\n\n # Assing weighted household consumption to quintiles within each province\n print('Finding quintiles')\n economy = df.index.names[0]\n listofquintiles=np.arange(0.20, 1.01, 0.20)\n # groupby apply takes each economy and then applies the function separately to each economy.\n # https://pandas.pydata.org/pandas-docs/stable/generated/pandas.core.groupby.GroupBy.apply.html\n # Finds quintiles by district\n df = df.reset_index().groupby(economy,sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.c),reshape_data(x.pcwgt),listofquintiles),'quintile'))\n\n print('Finding deciles')\n # finds deciles by district\n listofdeciles = np.arange(0.10, 1.01, 0.10)\n df = df.reset_index().groupby(economy,sort=True).apply(lambda x:match_percentiles(x,perc_with_spline(reshape_data(x.c),reshape_data(x.pcwgt),listofdeciles),'decile'))\n # drop extraneous columns\n df.drop([icol for icol in ['level_0','index','pctle_05','pctle_05_nat'] if icol in df.columns],axis=1,inplace=True)\n\n # Last thing: however 'c' was set (income or consumption), pcsoc can't be higher than 0.99*that!\n df['pcsoc'] = df['pcsoc'].clip(upper=0.99*df['c'])\n \n return df\n\ndef get_df2(myC):\n if myC == 'PH':\n df2 = pd.read_excel('~/Desktop/BANK/hh_resilience_model/inputs/PH/PSA_compiled.xlsx',skiprows=1)[['province','gdp_pc_pp','pop','shewp','shewr']].set_index('province')\n df2['gdp_pp'] = df2['gdp_pc_pp']*df2['pop']\n return df2\n else: return None\n\ndef get_vul_curve(myC,struct):\n \"\"\"Get vulnerability of materials used in construction.\n\n Parameters\n ----------\n myC : str\n ISO2 of country\n struct : str\n which building structure? Sheet names in vulnerrability curves in xlsx.\n\n Returns\n -------\n df\n df with two columns -\n desc: key used in census for housing material\n\n \"\"\"\n\n df = None\n\n if myC == 'PH':\n df = pd.read_excel('~/Desktop/BANK/hh_resilience_model/inputs/PH/vulnerability_curves_FIES.xlsx',sheet_name=struct)[['desc','v']]\n\n return df\n\ndef get_infra_stocks_data(myC):\n if myC == 'FJ':\n infra_stocks = pd.read_csv(inputs+'infra_stocks.csv',index_col='sector')\n return infra_stocks\n else:return None\n\ndef get_wb_or_penn_data(myC):\n #iso2 to iso3 table\n names_to_iso2 = pd.read_csv(inputs+'names_to_iso.csv')[['iso2','country']].drop_duplicates().set_index('country').squeeze()\n K = pd.read_csv(inputs+'avg_prod_k_with_gar_for_sids.csv',index_col='Unnamed: 0')\n wb = pd.read_csv(inputs+'wb_data.csv',index_col='country')\n wb['Ktot'] = wb.gdp_pc_pp*wb['pop']/K.avg_prod_k\n wb['GDP'] = wb.gdp_pc_pp*wb['pop']\n wb['avg_prod_k'] = K.avg_prod_k\n\n wb['iso2'] = names_to_iso2\n return wb.set_index('iso2').loc[myC,['Ktot','GDP','avg_prod_k']]\n\ndef get_rp_dict(myC):\n return pd.read_csv(inputs+\"rp_dict.csv\").set_index(\"old_rp\").new_rp\n\ndef get_infra_destroyed(myC,df_haz):\n\n #print(get_infra_stocks_data(myC))\n\n infra_stocks = get_infra_stocks_data(myC).loc[['transport','energy','water'],:]\n infra_stocks['infra_share'] = infra_stocks.value_k/infra_stocks.value_k.sum()\n\n print(infra_stocks)\n\n hazard_ratios_infra = broadcast_simple(df_haz[['frac_inf','frac_destroyed_inf']],infra_stocks.index)\n hazard_ratios_infra = pd.merge(hazard_ratios_infra.reset_index(),infra_stocks.infra_share.reset_index(),on='sector',how='outer').set_index(['Division','hazard','rp','sector'])\n hazard_ratios_infra['share'] = hazard_ratios_infra['infra_share']*hazard_ratios_infra['frac_inf']\n\n transport_losses = pd.read_csv(inputs+\"frac_destroyed_transport.csv\").rename(columns={\"ti_name\":\"Tikina\"})\n transport_losses['Division'] = (transport_losses['tid']/100).astype('int')\n prov_code,_ = get_places_dict(myC)\n rp_dict = get_rp_dict(myC)\n transport_losses['Division'] = transport_losses.Division.replace(prov_code)\n #sums at Division level to be like df_haz\n transport_losses = transport_losses.set_index(['Division','hazard','rp']).sum(level=['Division','hazard','rp'])\n transport_losses[\"frac_destroyed\"] = transport_losses.damaged_value/transport_losses.value\n #if there is no result in transport_losses, use the PCRAFI data (from df_haz):\n transport_losses = pd.merge(transport_losses.reset_index(),hazard_ratios_infra.frac_destroyed_inf.unstack('sector')['transport'].to_frame(name=\"frac_destroyed_inf\").reset_index(),on=['Division','hazard','rp'],how='outer')\n transport_losses['frac_destroyed'] = transport_losses.frac_destroyed.fillna(transport_losses.frac_destroyed_inf)\n transport_losses = transport_losses.set_index(['Division','hazard','rp'])\n\n hazard_ratios_infra = hazard_ratios_infra.reset_index('sector')\n hazard_ratios_infra.ix[hazard_ratios_infra.sector=='transport','frac_destroyed_inf'] = transport_losses[\"frac_destroyed\"]\n hazard_ratios_infra = hazard_ratios_infra.reset_index().set_index(['Division','hazard','rp','sector'])\n\n return hazard_ratios_infra.rename(columns={'frac_destroyed_inf':'frac_destroyed'})\n\ndef get_service_loss(myC):\n if myC == 'FJ':\n service_loss = pd.read_csv(inputs+'service_loss.csv').set_index(['hazard','rp'])[['transport','energy','water']]\n service_loss.columns.name='sector'\n a = service_loss.stack()\n a.name = 'cost_increase'\n infra_stocks = get_infra_stocks_data(myC).loc[['transport','energy','water'],:]\n service_loss = pd.merge(pd.DataFrame(a).reset_index(),infra_stocks.e.reset_index(),on=['sector'],how='outer').set_index(['sector','hazard','rp'])\n return service_loss\n else:return None\n\n\ndef get_poverty_line(myC,by_district=True,sec=None):\n \"\"\"Get poverty line either as a Series (if by_district is True)\n or as a float (if by_district is False).\n\n Parameters\n ----------\n myC : str\n ISO of country\n by_district : bool\n use a district poverty line, else, use a national level poverty line\n sec : str\n data may have urban or rural poverty lines, instead of by district.\n for the countries that need this, it should return a float.\n\n Returns\n -------\n Series/float\n poverty lines either by district (if series), or float (if not)\n\n \"\"\"\n\n pov_line = 0.0\n\n if myC == 'PH': pov_line = 22302.6775#21240.2924\n \n return pov_line\n\ndef get_middleclass_range(myC):\n if myC in ['AL','AM','BG','HR','GE','GR','RO','TR']:\n _pl = get_poverty_line(myC)\n _lower = _pl*(15/5.5)\n _upper = _pl*(45/5.5)\n #_upper = \n\n elif myC == 'PH':\n _pl = get_poverty_line(myC)\n _lower = _pl*(15/3.2)\n _upper = 0#_pl*(50/1.90) \n\n else: assert(False)\n return(_lower,_upper)\n\ndef get_secure_range(myC):\n if myC == 'PH':\n _pl = get_poverty_line(myC)\n _lower = _pl*(5.5/3.2)\n _upper = _pl*(15/3.2) \n\n else: assert(False)\n return(_lower,_upper)\n\ndef get_vulnerable_range(myC):\n if myC == 'PH':\n _pl = get_poverty_line(myC)\n _lower = _pl*(3.2/3.2)\n _upper = _pl*(5.5/3.2) \n\n else: assert(False)\n return(_lower,_upper)\n\n\n\ndef get_subsistence_line(myC):\n\n if myC == 'PH': return 14832.0962*(22302.6775/21240.2924)\n else:\n print('No subsistence info. Returning False')\n return False\n\ndef get_to_USD(myC):\n eca_countries = ['AM','BG','TR','HR','GR','GE','RO','AL']\n if myC in eca_countries: return 1.0\n\n if myC == 'PH': return 50.70\n if myC == 'FJ': return 2.01\n if myC == 'SL': return 153.76\n if myC == 'MW': return 720.0\n if myC == 'RO': return 4.0\n if myC == 'BO': return 6.93\n if myC == 'AM': return 477\n assert(False)\n\ndef get_pop_scale_fac(myC):\n #if myC == 'PH' or myC == 'FJ' or myC == 'MW' or myC == 'SL':\n return [1.E3,' (,000)']\n\ndef get_avg_prod(myC):\n \"\"\"Returns values from the global resilience model for the average productivity of capital\"\"\"\n if myC == 'PH': return 0.273657188280276\n elif myC == 'FJ': return 0.336139019412\n elif myC == 'SL': return 0.337960802589002\n elif myC == 'MW': return 0.253076569219416\n elif myC == 'RO': return (277174.8438/1035207.75)\n elif myC == 'BO': return 0.4218342\n assert(False)\n\ndef get_demonym(myC):\n\n if myC == 'PH': return 'Filipinos'\n if myC == 'FJ': return 'Fijians'\n if myC == 'SL': return 'Sri Lankans'\n if myC == 'MW': return 'Malawians'\n if myC == 'RO': return 'Romanians'\n if myC == 'BO': return 'Bolivians'\n return 'individuals'\n\n\n\ndef get_all_hazards(myC,df):\n temp = (df.reset_index().set_index(['hazard'])).copy()\n temp = temp[~temp.index.duplicated(keep='first')]\n return [i for i in temp.index.values if i != 'x']\n\ndef get_all_rps(myC,df):\n temp = (df.reset_index().set_index(['rp'])).copy()\n temp = temp[~temp.index.duplicated(keep='first')]\n return [int(i) for i in temp.index.values]\n\ndef int_w_commas(in_int):\n in_str = str(in_int)\n in_list = list(in_str)\n out_str = ''\n\n if in_int < 1E3: return in_str\n if in_int < 1E6: return in_str[:-3]+','+in_str[-3:]\n if in_int < 1E9: return in_str[:-6]+','+in_str[-6:-3]+','+in_str[-3:]\n if in_int < 1E12: return in_str[:-9]+','+in_str[-9:-6]+','+in_str[-6:-3]+','+in_str[-3:]\n","sub_path":"libraries/.ipynb_checkpoints/lib_country_dir-ORIGINAL-checkpoint.py","file_name":"lib_country_dir-ORIGINAL-checkpoint.py","file_ext":"py","file_size_in_byte":26659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"614518470","text":"from __future__ import division\r\nfrom __future__ import absolute_import\r\nfrom __future__ import print_function\r\nfrom __future__ import unicode_literals\r\nfrom selenium import webdriver\r\nfrom lxml import html\r\nfrom selenium.webdriver import DesiredCapabilities\r\nfrom urllib.parse import urlparse\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport pandas as pd\r\nfrom time import time\r\n\r\nDOMAIN = 'vk.com'\r\nHOST = 'http://' + DOMAIN\r\nFORBIDDEN_PREFIXES = ['#', 'tel:', 'mailto:']\r\nlinks = set() # множество всех ссылок\r\nheaders = {\r\n'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\nresponse = requests.get(HOST, headers=headers)\r\n\r\nchrome_options = webdriver.ChromeOptions()\r\n# директория сохранения профиля\r\nchrome_options.add_argument(\"--user-data-dir=C:/Users/galya/PycharmProjects/pythonProject11\")\r\n#dcap = dict(DesiredCapabilities.CHROME)\r\nchrome = webdriver.Chrome('D:\\\\chromedriver\\\\chromedriver.exe')\r\n\r\nchrome.get(HOST)\r\ntimes = []\r\n\r\ndef add_all_links_recursive(url, maxdepth=1):\r\n\r\n #глубина рекурсии не более `maxdepth`\r\n\r\n # список ссылок, от которых в конце мы рекурсивно запустимся\r\n links_to_handle_recursive = []\r\n #получаем html код страницы\r\n request = requests.get(url, headers=headers)\r\n # парсим его с помощью BeautifulSoup\r\n soup = BeautifulSoup(request.content, 'lxml')\r\n # рассматриваем все теги , при том, что href - не пустые\r\n for tag_a in soup.find_all('a', href=lambda v: v is not None):\r\n link = tag_a['href']\r\n\r\n # если ссылка не начинается с одного из запрещённых префиксов\r\n if all(not link.startswith(prefix) for prefix in FORBIDDEN_PREFIXES):\r\n # проверяем, является ли ссылка относительной\r\n if link.startswith('/') and not link.startswith('//'):\r\n # преобразуем относительную ссылку в абсолютную\r\n link = HOST + link\r\n # проверяем, что ссылка ведёт на нужный домен\r\n # и что мы ещё не обрабатывали такую ссылку\r\n if urlparse(link).netloc == DOMAIN and link not in links:\r\n links.add(link)\r\n links_to_handle_recursive.append(link)\r\n\r\n if maxdepth > 0:\r\n for link in links_to_handle_recursive:\r\n add_all_links_recursive(link, maxdepth=maxdepth - 1)\r\n\r\ndef main():\r\n print(\"\\nSite: \" + DOMAIN)\r\n urls = []\r\n add_all_links_recursive(HOST + '/')\r\n for link in links:\r\n start = time()\r\n result = chrome.get(link)\r\n end = time()\r\n urls.append(link)\r\n times.append(end - start)\r\n s = end - start;\r\n\r\n print(\"\\n\")\r\n # Ограничение вывода строк - максимум 10 000 (чтобы не выводило многоточия)\r\n pd.options.display.max_rows = 10000\r\n df = pd.DataFrame({'URL': urls, 'Time of open, sec': times})\r\n print(df)\r\n writer = pd.ExcelWriter('C:/Users/galya/Desktop/Data parsing/testing.xlsx')\r\n df.to_excel(writer, 'Лист1')\r\n writer.save()\r\n print(\" Домены с временем их открытия выгружены в таблицу Excel в папке Data parsing.\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"521063683","text":"import pandas as pd\nfrom datetime import datetime\n\n\nclass AnalyseLogs:\n Key_words = ['STARTED - TIME', 'ENDED - TIME', 'ENDED BY CC 0010 - TIME', 'USERID', '$HASP373', 'CLASS']\n Liblist = ['STEPLIB', 'DBRMLIB', 'LOADLIB']\n abend_list = ['ABEND=']\n key_result = list()\n Utility_result = list()\n condcode_result = list()\n final_result = list()\n abend_result = list()\n reason_list = list()\n total_steps = list()\n Lib_result = list()\n Temp_result = list()\n res = list()\n cpu = list()\n temp_word = temp_string = file_name = ''\n step_block = list()\n temp_lib_res = list()\n more_abend_info = list()\n temp_ab_res = list()\n temp_ab_res1 = list()\n ab_data = list()\n ab_list_data = list()\n job_info = list()\n temp_job_info = list()\n jes_df = pd.read_excel(\"Solution Catalog.xlsx\", dtype=str)\n jescodes = jes_df[\"Token Identifier\"].tolist()\n\n def extract_keyword(self, data_list):\n\n # print('EXTRACT KEYWORD FUNTION')\n # print(self.Key_words)\n for line in data_list:\n line = line.rstrip('\\n')\n\n for i in self.Key_words:\n if line.__contains__(i):\n index = line.find(i)\n if index != -1:\n temp_string = line[index + len(i) + 1:]\n for char in temp_string:\n if char != ' ':\n self.temp_word += char\n else:\n break\n self.key_result.append(self.temp_word)\n self.temp_word = ''\n if line.__contains__('$HASP373'):\n temp_word = line[10:20]\n self.key_result.append(temp_word)\n self.temp_word = ' '\n if line.strip() == '0------ JES2 JOB STATISTICS ------':\n break\n\n start_time = self.key_result[4]\n end_time = self.key_result[5]\n # print( start_time )\n # print( end_time )\n fmt = ' %H.%M.%S'\n runtime = datetime.strptime(end_time, fmt) - datetime.strptime(start_time, fmt)\n # print( tdelta )\n self.key_result.append(runtime)\n for line in data_list:\n line = line.rstrip('\\n')\n if line.__contains__('CPU:'):\n start = line.find(\"CPU:\") + len(\"CPU:\")\n end = line.find(\"SRB:\")\n self.cpu = line[start:end]\n\n self.cpu = self.cpu.replace(\" HR \", \":\")\n self.cpu = self.cpu.replace(\" MIN \", \":\")\n self.cpu = self.cpu.replace(\" SEC\", \" \")\n print(self.cpu)\n self.key_result.append(self.cpu)\n # print(self.key_result)\n\n def extarct_steps(self, data_list):\n\n # print('extarct_steps function')\n d = list()\n for line in data_list:\n line = line.rstrip('\\n')\n if line.__contains__('EXEC PGM'):\n d.append(line)\n self.total_steps = self.total_steps + d\n # print(self.total_steps)\n\n def extract_utility(self, data_list):\n\n # print('extract_utility')\n for line in data_list:\n line = line.rstrip('\\n')\n if line.__contains__('EXEC PGM'):\n index = line.find('EXEC PGM')\n # print(index)\n # print(len('EXEC PGM'))\n if index != -1:\n\n temp_string = line[index + len('EXEC PGM') + 1:]\n for char in temp_string:\n if char != ',':\n self.temp_word += char\n else:\n break\n self.Utility_result.append(self.temp_word)\n self.temp_word = ''\n # print(self.Utility_result)\n\n def extract_condcode(self, data_list):\n\n # print('Extract condcode function')\n k = []\n for line in data_list:\n line = line.rstrip('\\n')\n if line.__contains__('COND CODE') or line.__contains__('COMPLETION CODE - SYSTEM'):\n k.append(line[-4:])\n self.condcode_result = self.condcode_result + k\n\n # print(self.condcode_result)\n\n def extract_lib(self, data_list):\n\n temp_lib_flag = False\n\n # print('extract LIB function')\n for line in data_list:\n line = line.rstrip('\\n')\n for i in self.Liblist:\n index = line.find(i)\n if index != -1:\n temp_string = line[index + len(i) + 1:]\n for char in temp_string:\n if char != ',':\n self.temp_word += char\n # print(self.temp_word)\n else:\n break\n\n self.temp_lib_res.append(self.temp_word)\n self.temp_word = ''\n temp_lib_flag = True\n if temp_lib_flag:\n\n self.Lib_result.append(self.temp_lib_res)\n else:\n\n self.Lib_result.append('No system Libraries')\n\n # print(self.Lib_result)\n\n def step_division(self, data_list):\n\n # print('step_division function')\n data = list()\n flag = False\n for line in data_list:\n if line.__contains__('STEP WAS EXECUTED') or line.__contains__('COMPLETION CODE - SYSTEM'):\n flag = True\n if flag:\n data.append(line)\n if line.strip().__contains__('CPU:'):\n flag = False\n # break\n if not data:\n break\n self.step_block.append(data)\n data = []\n self.extract_data_sets()\n\n def extract_data_sets(self):\n\n # print('extract data sets function')\n d = c = final = ''\n b = []\n for k in self.step_block:\n for line in k:\n\n line = line.rstrip('\\n')\n if line.__contains__('IEF285I') and 'LOAD' not in line and line.__contains__('KEPT') \\\n or line.__contains__('CATALOGED') or line.__contains__('RETAINED'):\n d = d + line\n\n for i in range(len(d)):\n if d[i] == ' ':\n b.append(c)\n c = ''\n else:\n c = c + d[i]\n b.append(c)\n for j in range(len(b)):\n # print(b[j])\n if b[j] != 'IEF285I' and b[j] != 'KEPT' and b[j] != 'CATALOGED' and b[j] != '' and b[j] != 'RETAINED' \\\n and b[j] != 'IGD104I' and b[j] != 'DDNAME=DISK':\n final = final + b[j] + ','\n\n self.final_result.append(final)\n final = c = d = ' '\n b = []\n self.step_block = []\n if not self.final_result:\n self.final_result.append('NO DATA SETS FOUND')\n # print(self.final_result)\n\n def extract_abend(self, data_list):\n\n # print('extract abend function')\n\n flag = False\n\n temp_word = ''\n # print('fun5')\n for line in data_list:\n\n line = line.rstrip('\\n')\n\n for i in self.abend_list:\n\n if line.__contains__(i):\n\n index = line.find(i)\n\n if index != -1:\n\n temp_string = line[index + len(i):]\n\n for char in temp_string:\n\n if char != ' ':\n temp_word += char\n else:\n break\n\n self.res.append(temp_word)\n temp_word = ''\n flag = True\n\n if flag:\n\n self.abend_result.append(self.res)\n else:\n self.abend_result.append('RUN SUCCESSFULL')\n\n # print(self.abend_result)\n self.res = []\n\n def extract_reason(self, data_list):\n\n # print('extract reason function')\n # print(self.jescodes)\n k = list()\n # a = ' '\n # cnt = 0\n flag = False\n for line in data_list:\n # cnt += 1\n # DSNU095I : DB2LOAD ERROR.\n # IEB311I : CONFLICTION IN DCB PARAMETERS, ABEND =S822 U0000\n for jes in self.jescodes:\n if line.__contains__(jes):\n print('first if')\n # print( line )\n k.append(line)\n flag = True\n\n if flag:\n self.reason_list.append(k)\n else:\n self.reason_list.append('NOT ABLE TO FIND REASON')\n # print(self.reason_list)\n\n def extract_abend_info(self, data_list):\n\n # print('more abend info')\n ab_flag = False\n for f in self.abend_result:\n\n for g in f:\n\n if g == 'S0C7':\n\n # print('inside soc7 loop')\n\n for line in data_list:\n\n if line.__contains__('Invalid data'):\n # print(line)\n self.temp_ab_res.append(line)\n ab_flag = True\n\n if g == 'S0CB':\n\n print('inside SOCB loop')\n\n for line in data_list:\n\n line = line.rstrip('\\n')\n\n if line.__contains__('Local Variables:'):\n ab_flag = True\n if ab_flag:\n self.ab_data.append(line)\n\n if line.strip().__contains__('Run-Time Options Report:'):\n\n ab_flag = False\n\n if not self.ab_data:\n break\n\n self.temp_ab_res.append(self.ab_data)\n\n ab_flag = True\n\n self.ab_data = []\n if g == 'S04E':\n for line in data_list:\n\n if line.startswith(\" DSNU\"):\n self.temp_ab_res.append(line)\n\n ab_flag = True\n # print(self.temp_ab_res)\n\n if ab_flag:\n\n self.more_abend_info.append(self.temp_ab_res)\n\n else:\n\n self.more_abend_info.append('No More info')\n\n # print(self.more_abend_info)\n\n def extract_job_info(self, data_list):\n\n df_flag1 = False\n df_flag = False\n job_flag = False\n data_list.reverse()\n for line in data_list:\n self.temp_job_info.append(line)\n job_flag = True\n if line.find('PROCESSING COMPLETE') != -1 or line.find('CPU:') != -1:\n break\n else:\n job_flag = False\n\n self.temp_job_info.reverse()\n\n # print(self.job_info)\n\n if job_flag:\n self.job_info.append(self.temp_job_info)\n\n for a in range(len(self.abend_result)):\n # checking for abend\n if self.abend_result[a] == 'RUN SUCCESSFULL':\n df_flag = True\n\n for c in range(len(self.condcode_result)):\n\n # print(self.condcode_result[c])\n\n # checking for condcode (to list out in separate sheet)\n if self.condcode_result[c] == '0000':\n\n df_flag1 = True\n else:\n break\n\n if df_flag and df_flag1:\n\n # print('run success')\n\n df1 = pd.DataFrame(self.key_result)\n df1 = df1.transpose()\n df1.columns = [\"OWNER\", \"JOB NAME\", \"JOB CLASS\", \"JOB ID\", \"STARTED_TIME\", \"ENDED_TIME\", \"RUN TIME\", \"CPU\"]\n # print(df1)\n df2 = pd.DataFrame(self.total_steps)\n df2.columns = [\"STEPS IN JCL\"]\n df2['UTILITY/STEP NAME'] = self.Utility_result\n # print(df2)\n df3 = pd.DataFrame(self.condcode_result)\n df3.columns = ['COND CODES']\n # print(df3)\n df4 = pd.DataFrame(self.final_result)\n df4.columns = ['STEP WISE DATA SETS']\n # print(df4)\n df1['STEP AND DBRM LIBS'] = self.Lib_result\n df1['RUN STATUS'] = self.abend_result\n df1[\"JOB INFO\"] = self.job_info\n # print(df1)\n df = pd.concat([df1, df2, df3, df4], axis=1)\n # print(df)\n print(df)\n df = df[['JOB NAME', 'OWNER', 'JOB CLASS', 'JOB ID', 'RUN TIME', 'CPU', 'STEPS IN JCL', 'UTILITY/STEP NAME',\n 'COND CODES', 'RUN STATUS', 'STEP WISE DATA SETS', 'STARTED_TIME', 'ENDED_TIME',\n 'STEP AND DBRM LIBS', 'JOB INFO']]\n\n df.to_csv(r'C:\\Users\\m1055990\\Desktop\\Lagacy.csv', index=False, mode='a', header=False)\n\n else:\n\n # print('Abended jobs')\n df5 = pd.DataFrame(self.key_result)\n df5 = df5.transpose()\n df5.columns = [\"OWNER\", \"JOB NAME\", \"JOB CLASS\", \"JOB ID\", \"STARTED_TIME\", \"ENDED_TIME\", \"RUN TIME\", \"CPU\"]\n df6 = pd.DataFrame(self.total_steps)\n df6.columns = [\"STEPS IN JCL\"]\n df6['UTILITY/STEP NAME'] = self.Utility_result\n df7 = pd.DataFrame(self.condcode_result)\n df7.columns = ['COND CODES']\n df8 = pd.DataFrame(self.final_result)\n df8.columns = ['STEP WISE DATA SETS']\n df5['STEP AND DBRM LIBS'] = self.Lib_result\n df5['RUN STATUS'] = self.abend_result\n df5['REASON FOR ABEND'] = self.reason_list\n df5['MORE ABEND INFO'] = self.more_abend_info\n df5['JOB INFO'] = self.job_info\n dataf = pd.concat([df5, df6, df7, df8], axis=1)\n # print(dataf)\n print(dataf)\n dataf = dataf[['JOB NAME', 'OWNER', 'JOB CLASS', 'JOB ID', 'RUN TIME', 'CPU', 'STEPS IN JCL',\n 'UTILITY/STEP NAME', 'COND CODES', 'RUN STATUS', 'REASON FOR ABEND', 'MORE ABEND INFO',\n 'STEP WISE DATA SETS', 'STARTED_TIME', 'ENDED_TIME', 'STEP AND DBRM LIBS', 'JOB INFO']]\n dataf.to_csv(r'C:\\Users\\m1055990\\Desktop\\ABEND.csv', index=False, mode='a', header=False)\n\n self.key_result.clear()\n self.total_steps.clear()\n self.Utility_result.clear()\n self.condcode_result.clear()\n self.final_result.clear()\n self.abend_result.clear()\n self.reason_list.clear()\n self.Lib_result.clear()\n self.temp_lib_res.clear()\n self.more_abend_info.clear()\n self.temp_ab_res.clear()\n self.job_info.clear()\n self.temp_job_info.clear()\n","sub_path":"Analyse_Logs.py","file_name":"Analyse_Logs.py","file_ext":"py","file_size_in_byte":14615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"440530851","text":"from fastapi import FastAPI, Response, status\n\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom .models import AZRequest, AZResponse\nfrom .azgen import get_bounds, get_cutoff_alt, get_az\nfrom .verison import __version__\n\n\napp = FastAPI()\n\norigins = [\"http://activation.zone\", \"http://www.activation.zone\",\n\"https://activation.zone\", \"https://www.activation.zone\", \"http://localhost:8082\", \"https://localhost:8082\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n@app.get(\"/\")\ndef home():\n return f'AZAPI v{__version__}'\n\n\n@app.post(\"/\", status_code=status.HTTP_200_OK, response_model=AZResponse)\ndef azgen(item: AZRequest, response: Response):\n bounds = get_bounds(item)\n cutoff_alt = get_cutoff_alt(item)\n az_geo = get_az(item, bounds)\n\n print(f'Bounds: {bounds}')\n print(f'Cutoff Alt: {cutoff_alt}')\n print(f'AZ Data: {az_geo}')\n\n # Convert to string and remove altitude (0m)\n az_geo_string = str(az_geo)\n az_geo_string = az_geo_string.replace(\" 0\", \"\")\n\n print(\"Polygon String: \", az_geo_string)\n \n return { \"az\": az_geo_string }\n","sub_path":"azapi/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"66196177","text":"from .base import *\n\nfrom pathlib import Path\nimport environ\nimport cloudinary\n\nenv = environ.Env()\nenv.read_env()\n\nSECRET_KEY = env.str(\"SECRET_KEY\", default='')\n\nDEBUG = env.bool(\"DEBUG\", default=True)\n\nALLOWED_HOSTS = [\"*\"]\n\nEMAIL_HOST = env.str(\"EMAIL_HOST\")\nEMAIL_HOST_USER = env.str(\"EMAIL_HOST_USER\")\nDEFAULT_FROM_EMAIL = EMAIL_HOST_USER\nEMAIL_HOST_PASSWORD = env.str(\"EMAIL_HOST_PASSWORD\")\nEMAIL_PORT = env.int(\"EMAIL_PORT\")\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\")\n\n\ncloudinary.config(\n cloud_name = env.str(\"CLOUDINARY_NAME\", ''), \n api_key = env.str(\"CLOUDINARY_API_KEY\", ''), \n api_secret = env.str(\"CLOUDINARY_API_SECRET\", '') \n)\n\nINTERNAL_IPS = (\"127.0.0.1\", \"172.17.0.1\", 'localhost')\n\n# Extra app\n\nINSTALLED_APPS += [\n 'debug_toolbar',\n 'django.contrib.admindocs',\n]\n\n# Extra middleware\n\nMIDDLEWARE += [\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\n\nSTATIC_URL = '/static/'\n\nMEDIA_URL = '/media/'","sub_path":"kfupm/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"309727291","text":"#!/usr/bin/env python\nfrom flask import Flask, render_template, send_from_directory, request, g\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom facebook import get_user_from_cookie, GraphAPI\nfrom utils import save_img, dir2zip\nfrom aiohttp import request as aioreq\nfrom celery import Celery\n\nimport asyncio\n\n\"\"\"\nhttps://gist.github.com/omz/5048592 -- evernote demo\n\"\"\"\n\n#app.debug = True\ntmpl_path = '/home/nero/flask/FBPocket/templates/'\napp = Flask(__name__, template_folder=tmpl_path)\napp.config.from_pyfile('config.py')\n\nFB_APP_ID = '1544719435778456'\nFB_APP_NAME = 'JustToy'\nFB_APP_SECRET = '1b29c573b9f96cda4413b6f83fce1804'\nPOST_ID = ''\nCNT = 0\nMEDIA = ''\n\n\"\"\"\nRecommend Use WebServer.\n@app.route('/media/')\ndef send_js(path):\n return send_from_directory('media', path)\n\"\"\"\n\ncelery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])\ncelery.conf.update(app.config)\n\n\ndb = SQLAlchemy(app)\n\n\n## models ##\nfrom datetime import datetime\n\nColumn = db.Column\nDateTime = db.DateTime\nString = db.String\nInteger = db.Integer\nModel = db.Model\n\n\nusers_contents = db.Table('users_contens',\n Column(\"user_id\", Integer,\n ForeignKey(\"User.id\")),\n Column(\"contents_id\", Integer,\n ForeignKey(\"Contents.id\"))\n )\n\n\nclass ContentsType(Model):\n \"\"\"\n video, album, ETC\n \"\"\"\n tag = Column(int, nullable=False, primary_key=True)\n\n\nclass Contents(Model):\n id = Column(String, nullable=False, primary_key=True)\n\n\nclass User(Model):\n __tablename__ = 'users'\n\n id = Column(String, nullable=False, primary_key=True)\n created = Column(DateTime, default=datetime.utcnow, nullable=False)\n updated = Column(DateTime, default=datetime.utcnow, nullable=False,\n onupdate=datetime.utcnow)\n name = Column(String, nullable=False)\n profile_url = Column(String, nullable=False)\n access_token = Column(String, nullable=False)\n## models ##\n\n\n@celery.task(bind=True)\ndef get_contents(self, urls):\n try:\n from urllib.request import urlopen\n except ImportError:\n from urllib2 import urlopen\n\n for i, url in enumerate(urls):\n filename = str(i)+url[-4:]\n with open(filename) as f:\n f.write(urlopen(url).read())\n\n self.update_state(state='PROGRESS',\n meta={'current': i})\n\n return {'current': 100, 'total': 100, 'status': 'Task completed!'}\n\n\n@app.route('/get-content/', methods=[\"GET\", \"POST\"])\ndef getcontents():\n from re import search\n result = get_user_from_cookie(cookies=request.cookies, app_id=FB_APP_ID,\n app_secret=FB_APP_SECRET)\n graph = GraphAPI(ACCESS_TOKEN)\n url = request.form['url']\n info = search(r\"\"\"\n ((?<=facebook.com/)(?P\\w*))/posts/(?P\\d*$)\n \"\"\", url).groupdict()\n\n page_id = graph.get_object(id=info['page_name'], fields=['id'])['id']\n\n full_id = \"%s_%s\" % (page_id, info['post_id'])\n\n print(full_id)\n post = graph.get_object(id=full_id,\n fields=['attachments'])\n #print(post['message'])\n data = post['attachments']['data'][0]\n\n if data['type'] != 'album':\n return False\n\n img_urls = []\n\n for subdata in data['subattachments']['data']:\n if subdata['type'] == 'photo':\n img_urls.append(subdata['media']['image']['src'])\n else:\n return \"Invalid type\"\n print(img_urls)\n get_contents(img_urls).apply_async()\n\n\n@app.before_request\ndef get_current_user():\n if session.get('user'):\n g.user = session.get('user')\n return\n\n # Attempt to get the short term access token for the current user.\n result = get_user_from_cookie(cookies=request.cookies, app_id=FB_APP_ID,\n app_secret=FB_APP_SECRET)\n\n if result:\n user = User.query.filter(User.id == result['uid']).first()\n\n if not user:\n graph = GraphAPI(result['access_token'])\n profile = graph.get_object('me')\n\n # Create the user and insert it into the database\n user = User(id=str(profile['id']), name=profile['name'],\n profile_url=profile['link'],\n access_token=result['access_token'])\n db.session.add(user)\n\n elif user.access_token != result['access_token']:\n user.access_token = result['access_token']\n\n session['user'] = dict(name=user.name, profile_url=user.profile_url,\n id=user.id, access_token=user.access_token)\n\n db.session.commit()\n g.user = session.get('user', None)\n\n\n@app.route('/result/')\ndef result():\n zipfilepath = ''\n evernote = ''\n return render_template('result.html')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', app_id=FB_APP_ID)\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"_flask_backup/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"542696287","text":"#!/usr/bin/env python\nimport logging\nlogging.basicConfig()\nrootLogger = logging.getLogger('')\nrootLogger.setLevel(logging.INFO)\n\nfrom icecube.simprod.modules import Corsika5ComponentGenerator\n\nif __name__ == '__main__':\n stats = {}\n cors = Corsika5ComponentGenerator()\n cors.ExecuteOpts(stats)\n","sub_path":"simprod-scripts/resources/scripts/corsika5comp.py","file_name":"corsika5comp.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"23356406","text":"\"\"\"\nRequirements:\n 如需运行本模块, 请先安装 Qt 5.0+ (推荐 5.15) 完整版.\n 本模块所用到的离线文件读取自:\n \"{YourQtProgram}/Docs/Qt-{version}/qtdoc/qmltypes.html\".\n\"\"\"\nimport re\nfrom collections import defaultdict\n\nfrom bs4 import BeautifulSoup\nfrom lk_logger import lk\nfrom lk_utils import read_and_write\n\n\ndef main(file_i, file_o):\n \"\"\"\n \n Args:\n file_i: '~/blueprint/resources/no2_all_qml_types.html'. 该文件被我事先从\n \"{YourQtProgram}/Docs/Qt-{version}/qtdoc/qmltypes.html\" 拷贝过来.\n file_o: 生成文件. \"~/blueprint/resources/no3_all_qml_types.json\"\n {module_group: {module: {type_name: path}, ...}, ...}\n # {模组: {模块: {类型: 路径}}}\n e.g. {\n 'qtquick': {\n 'qtquick': {\n 'Rectangle': 'qtquick/qml-qtquick-rectangle.html',\n 'Text': 'qtquick/qml-qtquick-text.html',\n ...\n },\n 'qtquick-window': {\n 'Window': 'qtquick/qml-qtquick-window-window.html',\n ...\n },\n ...\n },\n ...\n }\n \n 思路:\n 1. 我们安装了 Qt 主程序以后, 在软件安装目录下的 'Docs/Qt-{version}' 中有\n 它的 API 文档\n 2. 其中 \"~/Docs/Qt-{version}/qtdoc/qmltypes.html\" 列出了全部的 qml types\n 3. 我们对 \"qmltypes.html\" 用 BeautifulSoup 解析, 从中获取每个 qml types\n 和它的链接, 最终我们将得到这些信息: 模组, 模块, 类型, 路径等\n 4. 将这些信息保存到本项目下的 \"~/resources/qmltypes.json\" 文件中\n \"\"\"\n soup = BeautifulSoup(read_and_write.read_file(file_i), 'html.parser')\n \n # https://www.itranslater.com/qa/details/2325827141935563776\n data = defaultdict(lambda: defaultdict(dict))\n # {module_group: {module: {type_name: filename, ...}, ...}, ...}\n \n container = soup.find('div', 'flowListDiv')\n for e in container.find_all('dd'):\n link = e.a['href'] # type: str\n # e.g. \"../qtdatavisualization/qml-qtdatavisualization-\n # abstract3dseries.html\"\n \n match = re.search(r'\\.\\./(\\w+)/([-\\w]+)\\.html', link)\n # | ^-1-^ ^--2---^ |\n # ^-------- group(0) -------^\n # match.group(0): '../qtdatavisualization/qml-qtdatavisualization\n # -abstract3dseries.html'\n # match.group(1): 'qtdatavisualization'\n # match.group(2): 'qml-qtdatavisualization-abstract3dseries'\n assert match, e\n \n module_group = match.group(1)\n module = match.group(2)\n # see `blueprint/qml_modules_indexing/no1_all_qml_modules.py:comments\n # :针对 QtQuick Controls 的处理`\n if module_group == 'qtquickcontrols1':\n continue\n if 'qtquick-controls2' in module:\n # e.g. 'qml-qtquick-controls2-label'\n module = module.replace('controls2', 'controls')\n \n path = match.group(0).lstrip('../')\n # -> 'qtdatavisualization/qml-qtdatavisualization-abstract3dseries\n # .html'\n module_group = _correct_module_lettercase(module_group)\n # 'qtdatavisualization' -> 'QtDataVisualization'\n module = _correct_module_lettercase('-'.join(module.split('-')[1:-1]))\n # eg1: 'qml-qtdatavisualization-abstract3dseries' -> ['qml',\n # 'qtdatavisualization', 'abstract3dseries'] -> [\n # 'qtdatavisualization'] -> 'qtdatavisualization'\n # -> 'QtDataVisualization'\n # eg2: 'qml-qt3d-input-abstractactioninput' -> ['qml', 'qt3d',\n # 'input', 'abstractactioninput'] -> ['qt3d', 'input',\n # 'abstractactioninput'] -> 'qt3d-input' -> 'Qt3D.Input'\n # 注: 为什么要舍去末尾的元素? 因为末尾的那个是 `type_name`, 不是\n # `module`. 接下来我们会抽取 `type_name`.\n type_name = e.text.split(':', 1)[0]\n # 注意我们不使用 `correct_module_lettercase(match.group(2).split('-')\n # [-1])`, 是因为 `correct_module_lettercase` 的词库范围比较小, 仅对\n # `module_group` 和 `module` 做了覆盖, 不能保证对 `type_name` 的处理正\n # 确; 而 `soup` 是可以比较轻松地通过 tag 提取到它的, 所以通过 html 元\n # 素获取.\n # e.g. 'RadioButton: QtQuickControls' -> 'RadioButton'\n \n lk.loga(module_group, module, type_name)\n data[module_group][module][type_name] = path\n \n read_and_write.dumps(data, file_o)\n\n\n# ------------------------------------------------------------------------------\n\nqml_modules = read_and_write.loads('../resources/no2_all_qml_modules.json')\nqml_modules = qml_modules['module_group'] | qml_modules['module'] # type: dict\nqml_modules.update({ # 扩充\n '' : '',\n 'qtquick-controls-private': 'QtQuick.Controls.Private',\n 'mediaplayer-qml' : 'QtMediaPlayer',\n # 注: 这个其实是不存在的, 只是为了不报错所以加上去\n})\n\n\ndef _correct_module_lettercase(module: str):\n \"\"\" 修正模块的大小写.\n \n 示例:\n 'qtquick-window' -> 'QtQuick.Window'\n 'qtgraphicaleffects' -> 'QtGraphicalEffects\n \n 注意: 存在一些特殊情况:\n 'qt-labs-animation' -> 'Qt.labs.animation'\n \n 思路:\n 1. 我们需要把模块的名字按照词来拆分:\n 'qtgraphicaleffects' -> ['qt', 'graphical', 'effects']\n 2. 然后将每个词的首字母大写:\n ['Qt', 'Graphical', 'Effects']\n 3. 再拼接回去:\n 'QtGraphicalEffects'\n \n (对于一些特殊情况, 比如 Qt.labs 要求全小写, 则需要进一步判断和调整.)\n \n 单词拆分该怎么实现?\n 方案 1: 引入一个第三方库来切词. 缺点是词库体积大, 有些 Qt 自定义词不在里\n 面 (自己也不一定找全), 甚至可能会切分存在歧义导致不准确. 成本高且效果差.\n 方案 2: 从 \"{YourQtProgram}/Docs/Qt-{version}/qtdoc/modules-qml.html\" 页\n 面, 把里面提到的所有单词都提取出来, 然后组成一个列表. 这里的单词应该完整\n 覆盖了模块的名字中的所有情况. 然后我们把列表转换成一个前缀树, 就可以以一\n 种简单且准确的方式去分词了.\n 目前采用的是方案 2. 方案 2 需要提前准备这样一个单词列表, 见:\n `blueprint/qml_indexing/no1_all_qml_modules.py`.\n \"\"\"\n global qml_modules\n return qml_modules[module]\n\n\nif __name__ == '__main__':\n main('../resources/no3_all_qml_types.html',\n '../resources/no4_all_qml_types.json')\n","sub_path":"blueprint/qml_modules_indexing/no2_all_qml_types.py","file_name":"no2_all_qml_types.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"376144024","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nimport uuid\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CustomUser',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('password', models.CharField(verbose_name='password', max_length=128)),\n ('last_login', models.DateTimeField(blank=True, verbose_name='last login', null=True)),\n ('is_superuser', models.BooleanField(verbose_name='superuser status', help_text='Designates that this user has all permissions without explicitly assigning them.', default=False)),\n ('email', models.EmailField(verbose_name='email address', max_length=254, unique=True)),\n ('full_name', models.CharField(blank=True, verbose_name='full name', max_length=30)),\n ('is_staff', models.BooleanField(verbose_name='staff status', help_text='Designates whether the user can log into this admin site.', default=False)),\n ('is_active', models.BooleanField(verbose_name='active', help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', default=True)),\n ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),\n ('cidade', models.CharField(max_length=200, blank=True, null=True)),\n ('estado', models.CharField(max_length=100, blank=True, null=True)),\n ('endereco', models.CharField(max_length=256, blank=True, null=True)),\n ('telefone', models.CharField(max_length=40, blank=True, null=True)),\n ],\n options={\n 'verbose_name_plural': 'users',\n 'verbose_name': 'user',\n },\n ),\n migrations.CreateModel(\n name='Company',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('razao_social', models.CharField(max_length=150)),\n ('cnpj_cpf', models.CharField(max_length=40, blank=True, verbose_name='CPF/CNPJ', null=True)),\n ('rg_ie', models.CharField(max_length=40, blank=True, verbose_name='RG/IE', null=True)),\n ('country', models.CharField(max_length=100, blank=True, verbose_name='País', null=True)),\n ('state', models.CharField(max_length=100, blank=True, verbose_name='UF', null=True)),\n ('city', models.CharField(max_length=100, blank=True, verbose_name='Cidade', null=True)),\n ('address', models.CharField(max_length=100, blank=True, verbose_name='Endereço', null=True)),\n ('address_number', models.CharField(max_length=100, blank=True, verbose_name='Número', null=True)),\n ('address_complement', models.CharField(max_length=100, blank=True, verbose_name='Complemento', null=True)),\n ('address_postcode', models.CharField(max_length=100, blank=True, verbose_name='Cep', null=True)),\n ('address_neighborhood', models.CharField(max_length=100, blank=True, verbose_name='Bairro', null=True)),\n ('phone1', models.CharField(max_length=100, blank=True, verbose_name='Telefone 1', null=True)),\n ('phone2', models.CharField(max_length=100, blank=True, verbose_name='Telefone 2', null=True)),\n ('site', models.CharField(max_length=100, blank=True, verbose_name='Website', null=True)),\n ('contact', models.CharField(max_length=100, blank=True, verbose_name='Contato', null=True)),\n ('slug', models.SlugField(max_length=150)),\n ('token', models.UUIDField(default=uuid.uuid4, editable=False)),\n ('usuario_rel', models.ForeignKey(default=None, to=settings.AUTH_USER_MODEL, related_name='usuarios')),\n ],\n options={\n 'verbose_name': 'Empresa',\n },\n ),\n migrations.AddField(\n model_name='customuser',\n name='empresa',\n field=models.ForeignKey(default=1, verbose_name='Empresa', to='main.Company', related_name='empresa'),\n ),\n migrations.AddField(\n model_name='customuser',\n name='groups',\n field=models.ManyToManyField(blank=True, verbose_name='groups', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_query_name='user', related_name='user_set', to='auth.Group'),\n ),\n migrations.AddField(\n model_name='customuser',\n name='user_permissions',\n field=models.ManyToManyField(blank=True, verbose_name='user permissions', help_text='Specific permissions for this user.', related_query_name='user', related_name='user_set', to='auth.Permission'),\n ),\n ]\n","sub_path":"djangobusiness/apps/main/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"162500701","text":"#!/usr/bin/python3\nimport sys\nimport argparse\nimport os.path\nimport sqlite3\nfrom datetime import datetime\n\nt0 = datetime.now()\n\nparser = argparse.ArgumentParser(description='Extract VCF file to SQLite3 database')\nparser.add_argument('db', nargs=1,\n help='SQLite3 database file')\nparser.add_argument('vcf', nargs=1,\n help='VCF file')\nargs = parser.parse_args()\ndbfile = args.db[0]\nvcfile = args.vcf[0]\n\nif not os.path.isfile(args.db[0]):\n print(args.db[0], \"not found\")\n sys.exit()\nif not os.path.isfile(args.vcf[0]):\n print(args.vcf[0], \"not found\")\n sys.exit()\n\nconnection = sqlite3.connect(dbfile)\nprint(connection)\nconnection.close()\n","sub_path":"args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"557132457","text":"\nimport rowan\nimport numpy as np\nfrom numpy import testing as npt\nfrom hypothesis import assume, given, settings\nfrom hypothesis import strategies as hs\nfrom hypothesis.extra import numpy as hnp\n\nDIM = 8\n\nINVARIANT_MODES = ['full', 'partial', 'single']\n\nMERGE_MODES = ['mean', 'concat']\n\nFLOAT_SCALES = np.logspace(2.5, -2.5, 5)\n\nfinite_dtype = hnp.from_dtype(\n np.dtype('float32'), min_value=-4, max_value=4,\n allow_nan=False, allow_infinity=False)\n\nweight_dtype = hnp.from_dtype(\n np.dtype('float32'), min_value=0, max_value=10,\n allow_nan=False, allow_infinity=False)\n\n@hs.composite\ndef bond_weights(draw, shape):\n w = draw(hnp.arrays(np.float32, shape, elements=weight_dtype))\n w /= np.sum(w)\n return w\n\n@hs.composite\ndef point_cloud(draw, weights=False):\n N = draw(hs.integers(min_value=4, max_value=12))\n r = draw(hnp.arrays(np.float32, (N, 3), elements=finite_dtype))\n v = draw(hnp.arrays(np.float32, (N, DIM), elements=finite_dtype))\n\n assume(np.sum(np.square(r)) > 1e-5)\n\n result = [r, v]\n\n if weights:\n w = draw(bond_weights((N,)))\n result.append(w)\n\n return tuple(result)\n\n@hs.composite\ndef unit_quaternions(draw):\n result = draw(hnp.arrays(np.float32, (4,), elements=finite_dtype))\n assume(np.sum(np.square(result)) > 1e-5)\n return result/np.linalg.norm(result)\n\nclass AllTests:\n DIM = DIM\n\n @settings(deadline=None)\n @given(\n unit_quaternions(),\n point_cloud(),\n hs.integers(1, 3),\n hs.sampled_from(MERGE_MODES),\n hs.sampled_from(MERGE_MODES),\n hs.sampled_from(INVARIANT_MODES))\n def test_rotation_invariance_value(self, q, rv, rank, merge_fun, join_fun, invar_mode):\n errs = []\n for scale in FLOAT_SCALES:\n r, v = rv; r = r*scale; v = v*scale\n rprime = rowan.rotate(q[None], r).astype(np.float32)\n\n key = 'rotation_invariance'\n prediction1 = self.value_prediction(r, v, key, rank, merge_fun, join_fun, invar_mode)\n prediction2 = self.value_prediction(rprime, v, key, rank, merge_fun, join_fun, invar_mode)\n\n self.assertEqual(v[0].shape, prediction1.shape)\n errs.append(np.mean(np.square(prediction1 - prediction2)))\n\n with np.errstate(divide='ignore'):\n x, y = np.log(FLOAT_SCALES), np.log(errs)\n filt = np.isfinite(y)\n x, y = x[filt], y[filt]\n if len(x) > 2:\n corrcoef = np.corrcoef(x, y)[0, 1]\n if np.isfinite(corrcoef):\n self.assertGreater(corrcoef, .7)\n\n @settings(deadline=None)\n @given(\n hs.integers(0, 128),\n hs.integers(0, 128),\n hs.integers(1, 3),\n hs.sampled_from(MERGE_MODES),\n hs.sampled_from(MERGE_MODES),\n hs.sampled_from(INVARIANT_MODES))\n def test_permutation_equivariance_value(self, swap_i, swap_j, rank, merge_fun, join_fun, invar_mode):\n np.random.seed(13)\n r = np.random.normal(size=(7, 3)).astype(np.float32)\n r /= np.linalg.norm(r, axis=-1, keepdims=True)\n v = np.zeros((r.shape[0], self.DIM), dtype=np.float32)\n v[:, 0] = np.arange(len(r))\n rv = r, v\n\n errs = []\n for scale in FLOAT_SCALES:\n r, v = rv; r = r*scale; v = v*scale\n swap_i = swap_i%len(r)\n swap_j = swap_j%len(r)\n rprime, vprime = r.copy(), v.copy()\n rprime[swap_i], rprime[swap_j] = r[swap_j], r[swap_i]\n vprime[swap_i], vprime[swap_j] = v[swap_j], v[swap_i]\n\n key = 'permutation_equivariance'\n prediction1 = self.value_prediction(r, v, key, rank, merge_fun, join_fun, invar_mode, reduce=False)\n prediction2 = self.value_prediction(rprime, vprime, key, rank, merge_fun, join_fun, invar_mode, reduce=False)\n\n self.assertEqual(v.shape, prediction1.shape)\n temp = prediction2[swap_i].copy()\n prediction2[swap_i] = prediction2[swap_j]\n prediction2[swap_j] = temp\n errs.append(np.mean(np.square(prediction1 - prediction2)))\n\n with np.errstate(divide='ignore'):\n x, y = np.log(FLOAT_SCALES), np.log(errs)\n filt = np.isfinite(y)\n x, y = x[filt], y[filt]\n if len(x) > 2:\n corrcoef = np.corrcoef(x, y)[0, 1]\n if np.isfinite(corrcoef):\n self.assertGreater(corrcoef, .7)\n\n @settings(deadline=None)\n @given(\n unit_quaternions(),\n point_cloud(),\n hs.integers(1, 3),\n hs.sampled_from(MERGE_MODES),\n hs.sampled_from(MERGE_MODES),\n hs.sampled_from(INVARIANT_MODES),\n hs.sampled_from(INVARIANT_MODES))\n def test_rotation_covariance_vector(self, q, rv, rank, merge_fun, join_fun,\n invar_mode, covar_mode):\n errs = []\n for scale in FLOAT_SCALES:\n r, v = rv; r = r*scale; v = v*scale\n rprime = rowan.rotate(q[None], r).astype(np.float32)\n\n key = 'rotation_covariance'\n prediction1 = self.vector_prediction(\n r, v, key, rank, merge_fun, join_fun, invar_mode, covar_mode)\n prediction1_prime = rowan.rotate(q, prediction1)\n prediction2 = self.vector_prediction(\n rprime, v, key, rank, merge_fun, join_fun, invar_mode, covar_mode)\n\n errs.append(np.mean(np.square(prediction1_prime - prediction2)))\n\n with np.errstate(divide='ignore'):\n x, y = np.log(FLOAT_SCALES), np.log(errs)\n filt = np.isfinite(y)\n x, y = x[filt], y[filt]\n if len(x) > 2:\n corrcoef = np.corrcoef(x, y)[0, 1]\n if np.isfinite(corrcoef):\n self.assertGreater(corrcoef, .7)\n\n @settings(deadline=None)\n @given(\n unit_quaternions(),\n point_cloud(),\n point_cloud())\n def test_rotation_covariance_label_vector(self, q, rv, rv2):\n errs = []\n for scale in FLOAT_SCALES:\n r, v = rv; r = r*scale; v = v*scale\n v2 = rv2[1]\n rprime = rowan.rotate(q[None], r).astype(np.float32)\n\n key = 'rotation_covariance_label'\n prediction1 = self.label_vector_prediction(r, v, v2, key)\n prediction1_prime = rowan.rotate(q[None], prediction1)\n prediction2 = self.label_vector_prediction(rprime, v, v2, key)\n\n errs.append(np.mean(np.square(prediction1_prime - prediction2)))\n\n with np.errstate(divide='ignore'):\n x, y = np.log(FLOAT_SCALES), np.log(errs)\n filt = np.isfinite(y)\n x, y = x[filt], y[filt]\n if len(x) > 2:\n corrcoef = np.corrcoef(x, y)[0, 1]\n if np.isfinite(corrcoef):\n self.assertGreater(corrcoef, .7)\n","sub_path":"tests/test_internals.py","file_name":"test_internals.py","file_ext":"py","file_size_in_byte":6759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"635777358","text":"# SIM-CITY client\n#\n# Copyright 2015 Joris Borgdorff \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom picas.util import merge_dicts\nfrom picas import Job\nimport simcity\nimport json\n\nfrom numbers import Number\ntry:\n from httplib import HTTPConnection\nexcept ImportError:\n from http.client import HTTPConnection\n\nimport subprocess\nfrom uuid import uuid4\n\n\ndef submit_if_needed(hostname, max_jobs, submitter=None):\n \"\"\"\n Submit a new job if not enough jobs are already running or queued.\n\n Host configuration is extracted from an entry in the global config file.\n \"\"\"\n if not isinstance(max_jobs, Number):\n raise ValueError(\"Max jobs must be a number\")\n\n num = simcity.overview_total()\n\n num_jobs = num['active_jobs'] + num['pending_jobs']\n if ((num_jobs < num['todo'] and num_jobs < max_jobs) or\n # active jobs should be scrubbed, some of them are not running\n # anymore\n (num['pending_jobs'] == 0 and num['todo'] > 0 and\n num['locked'] == 0)):\n return submit(hostname, submitter)\n else:\n return None\n\n\ndef submit(hostname, submitter=None):\n \"\"\" Submit a new job to given host. \"\"\"\n host = hostname + '-host'\n try:\n host_cfg = simcity.get_config().section(host)\n except KeyError:\n raise ValueError('%s not configured under %s section' %\n (hostname, host))\n\n try:\n if submitter is None:\n if host_cfg['method'] == 'ssh':\n submitter = SSHSubmitter(\n database=simcity.get_job_database(),\n host=host_cfg['host'],\n jobdir=host_cfg['path'],\n prefix=hostname + '-')\n elif host_cfg['method'] == 'osmium':\n try:\n osmium_cfg = simcity.get_config().section('osmium')\n except KeyError:\n osmium_cfg = {'host': 'localhost:9998'}\n\n submitter = OsmiumSubmitter(\n database=simcity.get_job_database(),\n launcher=host_cfg.get('launcher'),\n url=host_cfg.get('host', osmium_cfg['host']),\n jobdir=host_cfg['path'],\n prefix=hostname + '-',\n max_time=host_cfg.get('max_time'))\n else:\n raise EnvironmentError('Connection method for %s unknown' %\n hostname)\n\n script = [host_cfg['script']] + host_cfg.get('arguments', '').split()\n except KeyError:\n raise EnvironmentError(\n \"Connection method for %s not well configured\" % hostname)\n\n return submitter.submit(script)\n\n\nclass Submitter(object):\n\n \"\"\" Submits a job \"\"\"\n\n def __init__(self, database, host, prefix, jobdir, method):\n self.database = database\n self.host = host\n self.jobdir = jobdir\n self.prefix = prefix\n self.method = method\n\n def submit(self, command):\n job_id = 'job_' + self.prefix + uuid4().hex\n job = simcity.queue_job(Job({'_id': job_id}), self.method,\n host=self.host, database=self.database)\n try:\n job['batch_id'] = self._do_submit(job, command)\n except:\n simcity.archive_job(job, database=self.database)\n raise\n else:\n self.database.save(job)\n return job\n\n def _do_submit(self, job, command):\n raise NotImplementedError\n\n\nclass OsmiumSubmitter(Submitter):\n\n \"\"\" Submits a job to Osmium. \"\"\"\n __BASE = {\n \"prestaged\": [],\n \"poststaged\": [],\n \"environment\": {}\n }\n\n def __init__(self, database, url, prefix, launcher, jobdir=\"~\",\n max_time=None):\n super(OsmiumSubmitter, self).__init__(\n database, url, prefix, jobdir, method=\"osmium\")\n self.launcher = launcher\n self.max_time = max_time\n\n def _request(self, location=\"/\", method=\"GET\", data=None):\n conn = HTTPConnection(self.host)\n url = 'http://%s%s' % (self.host, location)\n conn.request(method, url, data)\n response = conn.getresponse()\n conn.close()\n if response.status < 200 or response.status >= 300:\n raise IOError(\"Request failed \" + response.reason +\n \"(HTTP status \" + response.status + \")\")\n return response\n\n def _do_submit(self, job, command):\n request = merge_dicts(OsmiumSubmitter.__BASE, {\n 'executable': command[0],\n 'arguments': command[1:],\n 'jobdir': self.jobdir,\n 'environment': {'SIMCITY_JOBID': job.id},\n })\n if self.launcher is not None:\n request['launcher'] = self.launcher\n if self.max_time is not None:\n request['max_time'] = int(self.max_time)\n\n response = self._request(method=\"POST\", data=request)\n return response.location.split('/')[-1]\n\n def status(self, job_id):\n response = self._request('/job/%s' % (self.host, job_id))\n return json.loads(response.data)\n\n def jobs(self):\n response = self._request('/job')\n return json.loads(response.data)\n\n\nclass SSHSubmitter(Submitter):\n\n \"\"\" Submits a job over SSH. \"\"\"\n\n def __init__(self, database, host, prefix, jobdir=\"~\"):\n super(SSHSubmitter, self).__init__(\n database, host, prefix, jobdir, method=\"ssh\")\n\n def _do_submit(self, job, command):\n command_str = ('cd \"%s\";'\n 'export SIMCITY_JOBID=\"%s\";'\n 'qsub -v SIMCITY_JOBID %s') % (self.jobdir, job.id,\n ' '.join(command))\n process = subprocess.Popen(['ssh', self.host, command_str],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n (stdout, stderr) = process.communicate()\n lines = stdout.decode('utf-8').split('\\n')\n try:\n # get the (before)last line\n return lines[-2]\n except IndexError:\n raise IOError(\"Cannot parse job ID from stdout: '%s'\\n\"\n \"==== stderr ====\\n'%s'\"\n % (stdout, stderr))\n","sub_path":"simcity/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":6836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"353616356","text":"#!/usr/bin/env python\n\nimport unittest, sys, os, os.path as path, logging\ntry:\n import json\nexcept:\n try:\n import simplejson as json\n except:\n print >>sys.stderr, \"No json module, skipping\"\n sys.exit(0)\nfrom copy import deepcopy\n\ntry:\n import cStringIO as StringIO\nexcept ImportError:\n import StringIO\n\ntry:\n import xcp\nexcept ImportError:\n print >>sys.stderr, \"Must run with run-test.sh\"\n sys.exit(1)\n\n\nfrom xcp.net.ifrename.dynamic import DynamicRules\nfrom xcp.net.ifrename.macpci import MACPCI\nfrom xcp.logger import LOG, openLog, closeLogs\n\n\nclass TestLoadAndParse(unittest.TestCase):\n\n def setUp(self):\n self.logbuf = StringIO.StringIO()\n openLog(self.logbuf, logging.NOTSET)\n\n def tearDown(self):\n\n closeLogs()\n self.logbuf.close()\n\n def test_null(self):\n\n fd = StringIO.StringIO(\"\")\n dr = DynamicRules(fd=fd)\n\n self.assertTrue(dr.load_and_parse())\n\n self.assertEqual(dr.lastboot, [])\n self.assertEqual(dr.old, [])\n\n def test_empty(self):\n\n fd = StringIO.StringIO(\n '{\"lastboot\":[],\"old\":[]}'\n )\n dr = DynamicRules(fd=fd)\n\n self.assertTrue(dr.load_and_parse())\n\n self.assertEqual(dr.lastboot, [])\n self.assertEqual(dr.old, [])\n\n def test_one_invalid(self):\n\n fd = StringIO.StringIO(\n '{\"lastboot\":[[\"\",\"\",\"\"]],\"old\":[]}'\n )\n dr = DynamicRules(fd=fd)\n\n self.assertTrue(dr.load_and_parse())\n\n self.assertEqual(dr.lastboot, [])\n self.assertEqual(dr.old, [])\n\n def test_one_valid_lastboot(self):\n\n fd = StringIO.StringIO(\n '{\"lastboot\":[[\"01:23:45:67:89:0a\",\"00:10.2\",\"eth2\"]],\"old\":[]}'\n )\n dr = DynamicRules(fd=fd)\n\n self.assertTrue(dr.load_and_parse())\n\n self.assertEqual(dr.lastboot,\n [MACPCI(\"01:23:45:67:89:0a\",\"00:10.2\", tname=\"eth2\")])\n self.assertEqual(dr.old, [])\n\n\n def test_one_valid_lastboot(self):\n\n fd = StringIO.StringIO(\n '{\"lastboot\":[],\"old\":[[\"01:23:45:67:89:0a\",\"00:10.2\",\"eth2\"]]}'\n )\n dr = DynamicRules(fd=fd)\n\n self.assertTrue(dr.load_and_parse())\n\n self.assertEqual(dr.lastboot, [])\n self.assertEqual(dr.old,\n [MACPCI(\"01:23:45:67:89:0a\",\"00:10.2\", tname=\"eth2\")])\n\nclass TestGenerate(unittest.TestCase):\n\n def setUp(self):\n self.logbuf = StringIO.StringIO()\n openLog(self.logbuf, logging.NOTSET)\n\n def tearDown(self):\n\n closeLogs()\n self.logbuf.close()\n\n def test_ppn_quirks(self):\n # Test case taken from example on CA-75599\n\n dr = DynamicRules()\n dr.formulae = { \"eth0\" : (\"ppn\", \"em1\"),\n \"eth1\" : (\"ppn\", \"em2\")\n }\n\n dr.generate([\n MACPCI(\"00:1E:67:31:59:89\", \"0000:00:19.0\", kname=\"eth0\",\n ppn=\"em1\", label=\"Intel 82579LM VPRO\"),\n MACPCI(\"00:1E:67:31:59:88\", \"0000:02:00.0\", kname=\"eth1\",\n ppn=\"em1\", label=\"Intel 82574L\")\n ])\n\n # The quirks test should kick in and prevent any ppn rules from\n # being generated\n self.assertEqual(dr.rules, [])\n\n\n\nclass TestSave(unittest.TestCase):\n\n def setUp(self):\n self.logbuf = StringIO.StringIO()\n openLog(self.logbuf, logging.NOTSET)\n\n def tearDown(self):\n\n closeLogs()\n self.logbuf.close()\n\n def test_one_invalid_lastboot(self):\n\n dr = DynamicRules()\n dr.lastboot = [[\"foo\", \"bar\", \"baz\"]]\n\n try:\n json.loads(dr.write(False))\n except Exception:\n self.fail()\n\n def test_one_ibft_lastboot(self):\n\n dr = DynamicRules()\n dr.lastboot = [[\"00:1E:67:31:59:89\", \"0000:00:19.0\", None]]\n\n self.assertEqual(json.loads(dr.write(False)), {'lastboot': [],\n 'old': []})\n\n\nif __name__ == \"__main__\":\n sys.exit(unittest.main())\n","sub_path":"tests/test_ifrename_dynamic.py","file_name":"test_ifrename_dynamic.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"195169633","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport sys\nimport scipy\nimport scipy.signal\nimport glob\n\n\n# In[2]:\n\n\nimport UNet\n\nsys.path.append('../Preprocess')\nimport CTPPreprocess as preprocess\nimport CalcParaMaps as paramaps\n\nsys.path.append('../../')\nimport helper\n\n\n# In[3]:\n\n\nimport argparse\nparser = argparse.ArgumentParser(description = 'ctp noise2noise netwok')\nparser.add_argument('--imgFile', type=str, default='/home/dwu/trainData/Noise2Noise/train/ctp/simul/data/imgs_1e+06.npy')\nparser.add_argument('--refFile', type=str, default='/home/dwu/trainData/Noise2Noise/train/ctp/simul/data/imgs_-1.npy')\nparser.add_argument('--paraFile', type=str, default='/home/dwu/trainData/Noise2Noise/train/ctp/simul/data/paras_tikh_0.3.npz')\nparser.add_argument('--aifFile', type=str, default='/home/dwu/trainData/Noise2Noise/train/ctp/simul/data/aif0.npy')\nparser.add_argument('--nTrain', type=int, default=50)\nparser.add_argument('--nTest', type=int, default=15)\n\n# paths\nparser.add_argument('--outDir', type=str, default=None)\n\n# general network training\nparser.add_argument('--device', type=int, default=0)\nparser.add_argument('--nEpochs', type=int, default=100)\nparser.add_argument('--lr', type=float, default=1e-4)\nparser.add_argument('--batchSize', type=int, default=20)\nparser.add_argument('--outputInterval', type=int, default=50)\nparser.add_argument('--testInterval', type=int, default=25)\n\n# noise2noise params fo ctp\nparser.add_argument('--peakSampleHalfWidth', type=int, default=2)\n\n# data augmentation\nparser.add_argument('--aug', type=int, default=1)\nparser.add_argument('--imgNormIn', type=float, nargs=2, default=[0.15,0.15])\nparser.add_argument('--imgOffsetIn', type=float, nargs=2, default=[-1, -1])\n\nparser.add_argument('--imgNormOut', type=float, default=0.025)\nparser.add_argument('--imgOffsetOut', type=float, default=0)\n\n\n# In[4]:\n\n\ntf.reset_default_graph()\nnet = UNet.UNet()\nparser = net.AddArgsToArgParser(parser)\n\n\n# In[5]:\n\n\nif sys.argv[0] != 'TrainFrameToAvg.py':\n from IPython import display\n import matplotlib.pyplot as plt\n get_ipython().run_line_magic('matplotlib', 'inline')\n args = parser.parse_args(['--device', '0',\n '--nEpochs', '100',\n '--beta', '50',\n '--testInterval', '10',\n '--outputInterval', '10',\n '--outDir', '/home/dwu/trainData/Noise2Noise/train/ctp/simul/test_train'])\nelse:\n args = parser.parse_args(sys.argv[1:])\n\nfor k in args.__dict__:\n print (k, args.__dict__[k], sep=': ', flush=True)\n\n\n# In[6]:\n\n\ntf.reset_default_graph()\nnet = UNet.UNet()\nnet.FromParser(args)\nnet.imgshapeIn[-1] = net.imgshapeIn[-1] + 1\nnet.BuildModel()\nnet.BuildBiasControl()\n\nupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\nwith tf.control_dependencies(update_ops):\n train = tf.train.AdamOptimizer(args.lr).minimize(net.loss)\n\nsaver1 = tf.train.Saver(max_to_keep=5)\nsaver2 = tf.train.Saver(max_to_keep=100)\nif not os.path.exists(args.outDir):\n os.makedirs(args.outDir)\n\n\n# In[7]:\n\n\n# build bias control kernel\nbiasControlKernel = scipy.signal.gaussian(net.biasKernelSz, net.biasKernelStd)[..., np.newaxis]\nbiasControlKernel = biasControlKernel @ biasControlKernel.T\nbiasControlKernel = (biasControlKernel / np.sum(biasControlKernel))[..., np.newaxis, np.newaxis]\n\n\n# In[8]:\n\n\n# load the image data\nimgs = np.load(args.imgFile) - 1\nrefs = np.load(args.refFile) - 1\n\n\n# In[9]:\n\n\n# load param files\nwith np.load(args.paraFile) as f:\n cbf0 = f['cbf']\n cbv0 = f['cbv']\n mtt0 = f['mtt']\n masks = f['mask'][..., np.newaxis]\n cbfFac = f['cbfFac']\naif0 = np.load(args.aifFile) / 1000\n\nmaskVessels = np.where(np.max(imgs, -1) > 0.1, 1, 0)[...,np.newaxis]\nmaskVessels *= masks\nfor i in range(maskVessels.shape[0]):\n maskVessels[i,...,0] = scipy.ndimage.morphology.binary_dilation(maskVessels[i,...,0])\nmasks *= (1-maskVessels)\n\nimgs *= np.tile(masks, (1,1,1,imgs.shape[-1]))\nrefs *= np.tile(masks, (1,1,1,imgs.shape[-1]))\n\n\n# In[10]:\n\n\n# pre-process to get target frames\navgs = np.copy(imgs)\nfor i in range(1, avgs.shape[-1] - 1):\n avgs[...,i] = (imgs[...,i-1] + imgs[...,i+1]) / 2\n\n\n# In[11]:\n\n\nx = np.sum(avgs * masks, (1,2))\ny = np.sum(imgs * masks, (1,2))\np = y / x\navgs *= p[:, np.newaxis, np.newaxis, :]\n\n\n# In[15]:\n\n\ndef ExtractTrainingBatches(imgs, avgs, masks, args, batchSize = None):\n if batchSize is None:\n batchSize = args.batchSize\n \n inputImgs = []\n inputAuxs = []\n inputRefs = []\n inputMasks = []\n \n for i in range(batchSize): \n # first select slice\n # then sample randomly across framew or sample near the peak\n iSlice = np.random.randint(imgs.shape[0])\n if i < batchSize / 2:\n iFrame = np.random.randint(1, imgs.shape[-1] - 1)\n else:\n # find the peak of tac\n tac = np.mean(imgs[iSlice, ...], (0,1))\n iPeak = np.argmax(tac)\n iFrame = np.random.randint(iPeak - args.peakSampleHalfWidth, iPeak + args.peakSampleHalfWidth + 1)\n \n # extract foreground image\n imgSlice = imgs[[iSlice], ...]\n inputImg = np.copy(imgSlice[..., [iFrame]])\n \n # extract reference image\n avgSlice = avgs[[iSlice], ...]\n inputRef = np.copy(avgSlice[..., [iFrame]])\n \n # extract background image\n indBk = np.random.permutation(2)\n inputBk = np.copy(imgSlice[..., [indBk[0]]])\n refBk = np.copy(imgSlice[..., [indBk[1]]])\n \n # get input and output of network\n inputRef = inputRef - refBk\n inputImg = np.concatenate((inputImg, inputBk), -1)\n \n # get aux for bias correction\n inputAux = inputImg[...,[0]] - inputImg[...,[1]]\n\n inputImgs.append(inputImg)\n inputRefs.append(inputRef)\n inputAuxs.append(inputAux)\n inputMasks.append(masks[[iSlice], ...])\n \n inputImgs = np.concatenate(inputImgs)\n inputRefs = np.concatenate(inputRefs)\n inputAuxs = np.concatenate(inputAuxs)\n inputMasks = np.concatenate(inputMasks)\n \n return inputImgs, inputRefs, inputAuxs, inputMasks\n\n\n# In[16]:\n\n\ndef TestSequence(sess, net, imgs, args, iSlices = None):\n if iSlices is None:\n iSlices = [np.random.randint(imgs.shape[0])]\n elif iSlices == -1:\n iSlices = list(range(imgs.shape[0]))\n print (iSlices)\n \n imgNormIn = (args.imgNormIn[0] + args.imgNormIn[1]) / 2\n imgOffsetIn = (args.imgOffsetIn[0] + args.imgOffsetIn[1]) / 2\n \n imgs = imgs[iSlices, ...]\n recons = []\n for i in range(imgs.shape[-1]):\n inputImg1 = np.concatenate((imgs[..., [i]], imgs[..., [0]]), -1)\n inputImg2 = np.concatenate((imgs[..., [i]], imgs[..., [1]]), -1)\n \n recon1 = sess.run(net.recon, {net.x: inputImg1 / imgNormIn + imgOffsetIn})\n recon2 = sess.run(net.recon, {net.x: inputImg2 / imgNormIn + imgOffsetIn})\n \n recon = (recon1 + recon2) / 2 - args.imgOffsetOut\n recons.append(recon)\n \n recons = np.concatenate(recons, -1)\n\n return recons, iSlices\n\n\n# In[17]:\n\n\nsess = tf.Session(config = tf.ConfigProto(gpu_options = tf.GPUOptions(visible_device_list='%s'%args.device, \n allow_growth=True)))\nsess.run(tf.global_variables_initializer())\n\n\n# In[ ]:\n\n\nnp.random.seed(0)\n\nfor epoch in range(args.nEpochs):\n nBatches = args.nTrain * imgs.shape[-1]\n \n for iBatch in range(0, nBatches, args.batchSize):\n inputImg, inputRef, inputAux, inputMask = ExtractTrainingBatches(imgs[:args.nTrain, ...], \n avgs[:args.nTrain, ...], \n masks[:args.nTrain, ...], args)\n \n # augmentation\n if args.aug:\n argOption = np.random.randint(4)\n inputImg = helper.Augmentation(inputImg, argOption)\n inputRef = helper.Augmentation(inputRef, argOption)\n inputAux = helper.Augmentation(inputAux, argOption)\n inputMask = helper.Augmentation(inputMask, argOption)\n \n # training\n imgOffsetIn = np.random.uniform(args.imgOffsetIn[0], args.imgOffsetIn[1], [inputImg.shape[0], 1, 1, 1])\n imgNormIn = np.random.uniform(args.imgNormIn[0], args.imgNormIn[1], [inputImg.shape[0], 1, 1, 1])\n imgOffsetOut = args.imgOffsetOut\n imgNormOut = args.imgNormOut\n _, loss2, auxLoss, recon = sess.run(\n [train, net.loss2, net.auxLoss, net.recon], \n {net.x: inputImg / imgNormIn + imgOffsetIn, \n net.ref: inputRef / imgNormOut + imgOffsetOut,\n net.aux: inputAux / imgNormOut + imgOffsetOut,\n net.biasKernel: biasControlKernel,\n net.mask: inputMask,\n net.training: True})\n recon -= imgOffsetOut\n \n # display\n k = int(iBatch / args.batchSize)\n if (k+1) % args.outputInterval == 0:\n print ('(%d, %d): loss2 = %g, auxLoss = %g' %(epoch, k, loss2, net.beta * auxLoss), flush=True)\n\n if ((k+1) % (args.outputInterval * 5) == 0 or k == int(nBatches / args.batchSize)) and sys.argv[0] != 'TrainFrameToAvg.py' and epoch >= 0: \n\n # get network reconstruction\n testingImgs = imgs[-args.nTest:, ...]\n testingMasks = masks[-args.nTest:, ...]\n reconTest, iSlices = TestSequence(sess, net, testingImgs, args)\n\n # testing mask\n maskTest = testingMasks[iSlices, ...]\n maskFrame = np.tile(maskTest, (1,1,1,reconTest.shape[-1]))\n\n # get gaussian reconstruction\n ctp = testingImgs[iSlices, ...] / args.imgNormOut\n ctp = ctp - ctp[...,[0]]\n ctp *= maskFrame\n x = scipy.ndimage.gaussian_filter(ctp, (0,2,2,0), mode='nearest')\n\n cbf, _, _ = paramaps.CalcParaMaps(reconTest, maskTest, aif=aif0 / args.imgNormOut, rho=1, kappa=1) * cbfFac\n cbfGaussian, _, _ = paramaps.CalcParaMaps(x, maskTest, aif=aif0 / args.imgNormOut, rho=1, kappa=1) * cbfFac\n\n # get TAC\n reconTac = np.sum(reconTest * maskFrame, (0,1,2)) / np.sum(maskFrame, (0,1,2))\n imgTac = np.sum(ctp * maskFrame, (0,1,2)) / np.sum(maskFrame, (0,1,2))\n\n iFrame = np.argmax(imgTac)\n\n display.clear_output()\n plt.figure(figsize=[18,12])\n plt.subplot(231); plt.imshow(reconTest[0, ..., iFrame] * maskTest[0,...,0], \n cmap='gray', vmin=0, vmax=0.9)\n plt.subplot(232); plt.imshow(x[0,...,iFrame] * maskTest[0,...,0], cmap='gray', vmin=0, vmax=0.9)\n plt.subplot(234); plt.imshow(cbf[0,...] * maskTest[0,...,0], cmap='jet', vmin=0, vmax=50)\n plt.subplot(235); plt.imshow(cbfGaussian[0,...] * maskTest[0,...,0], cmap='jet', vmin=0, vmax=50)\n plt.subplot(233); plt.plot(reconTac); plt.plot(imgTac);\n plt.show()\n \n if (epoch + 1) % args.testInterval != 0 and epoch != args.nEpochs - 1:\n saver1.save(sess, os.path.join(args.outDir, '%d'%epoch))\n else:\n saver2.save(sess, os.path.join(args.outDir, '%d'%epoch))\n \n # save intermediate results\n \n print ('Generating intermediate results')\n \n tmpDir = os.path.join(args.outDir, 'tmp')\n if not os.path.exists(tmpDir):\n os.makedirs(tmpDir)\n\n reconTest, _ = TestSequence(sess, net, imgs, args, -1)\n maskFrame = np.tile(masks, (1,1,1,reconTest.shape[-1]))\n\n np.save(os.path.join(tmpDir, 'iodines'), \n np.copy(np.transpose((reconTest * maskFrame).astype(np.float32), (0,3,1,2)), 'C'))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"CTP/simul/TrainFrameToAvg.py","file_name":"TrainFrameToAvg.py","file_ext":"py","file_size_in_byte":11936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"635336584","text":"from django.shortcuts import render,HttpResponse\r\n# Create your views here.\r\nimport threading\r\nimport os\r\nimport time\r\nfrom wsgiref.util import FileWrapper\r\nfrom django.http import StreamingHttpResponse\r\nfrom dwebsocket.decorators import accept_websocket,require_websocket\r\n'''\r\nframe = []\r\ndef f():\r\n while True:\r\n time.sleep(2)\r\n print(\"啦啦啦\")\r\n frame.append(1111)\r\nclass Sington(object):\r\n _instance=[]\r\n def __new__(cls, *args, **kwargs):\r\n if not hasattr(cls,'_b'):\r\n cls._b = super(cls,Sington).__new__(cls,*args,**kwargs)\r\n return cls._b\r\ndef index(request):\r\n t1 = threading.Thread(target=f)\r\n t1.start()\r\n return render(request,\"index.html\")\r\ndef index2(request):\r\n return render(request,'index2.html')\r\n'''\r\n'''\r\n@accept_websocket\r\ndef echo(request):\r\n clients = Sington()._instance\r\n if not request.is_websocket():#判断是不是websocket连接\r\n try:#如果是普通的http方法\r\n message = request.GET['message']\r\n return HttpResponse(message) #\r\n except:\r\n return render(request,'index2.html')\r\n else:\r\n if request.websocket not in clients:\r\n clients.append(request.websocket)\r\n for message in request.websocket:\r\n #print(len(clients))\r\n for client in clients:\r\n if not message:\r\n clients.remove(request.websocket)\r\n break\r\n else:\r\n client.send(message)#发送消息到客户端\r\n'''\r\n'''\r\n@accept_websocket\r\ndef echo(request):\r\n if request.is_websocket:#如果是webvsocket\r\n lock = threading.RLock() #rlock线程锁\r\n try:\r\n lock.acquire()#抢占资源\r\n clients.append(request.websocket)#把websocket加入到clients\r\n print(clients)\r\n for message in request.websocket:\r\n if not message:\r\n break\r\n for client in clients:\r\n client.send(message)\r\n finally:\r\n clients.remove(request.websocket)\r\n lock.release()#释放锁\r\n else:\r\n try: # 如果是普通的http方法\r\n message = request.GET['message']\r\n return HttpResponse(message) #\r\n except:进入当前页面的时候执行代码\r\n return render(request, 'index2.html')\r\n'''\r\n'''\r\n@require_websocket\r\ndef echo_once(request):\r\n message = request.websocket.wait()\r\n while True:\r\n time.sleep(2)\r\n request.websocket.send(bytes(str(frame.pop()).encode('utf-8')))\r\n@accept_websocket\r\ndef echo_next(request):\r\n clients = Sington()._instance\r\n if not request.is_websocket():#判断是不是websocket连接\r\n t1 = threading.Thread(target=f)\r\n t1.start()\r\n try:#如果是普通的http方法\r\n message = request.GET['message']\r\n return HttpResponse(message) #\r\n except:\r\n return render(request,'index3.html')\r\n else:\r\n if request.websocket not in clients:\r\n clients.append(request.websocket)\r\n for message in request.websocket:\r\n print(len(clients))\r\n for client in clients:\r\n if not frame:\r\n clients.remove(request.websocket)\r\n break\r\n else:\r\n #client.send(message)\r\n client.send(str(frame.pop()).encode('utf-8'))#发送消息到客户端\r\n \r\n'''\r\ndef file_iterator(chunk_size=8192, offset=0, length=None):\r\n with open(\"static\\\\11.mp4\", \"rb\",decoding='utf-8') as f:\r\n f.seek(offset, os.SEEK_SET)\r\n remaining = int(length)\r\n while True:\r\n bytes_length = chunk_size if remaining is None else min(remaining, int(chunk_size))\r\n data = f.read(bytes_length)\r\n print(11111111)\r\n if not data:\r\n break\r\n if remaining:\r\n remaining -= len(data)\r\n yield data\r\nimport re\r\nimport mimetypes\r\ndef stream_video(request):\r\n\r\n path = \"static\\\\11.mp4\"\r\n \"\"\"将视频文件以流媒体的方式响应\"\"\"\r\n\r\n range_header = request.META.get('HTTP_RANGE', '').strip()\r\n range_re = re.compile(r'bytes\\s*=\\s*(\\d+)\\s*-\\s*(\\d*)', re.I)\r\n range_match = range_re.match(range_header)\r\n size = os.path.getsize(path)\r\n content_type, encoding = mimetypes.guess_type(path)\r\n content_type = content_type or 'application/octet-stream'\r\n '''\r\n #if range_match:\r\n first_byte, last_byte = range_match.groups()\r\n first_byte = int(first_byte) if first_byte else 0\r\n last_byte = first_byte + 1024 * 1024 * 8 # 8M 每片,响应体最大体积\r\n if last_byte >= size:\r\n last_byte = size - 1\r\n length = last_byte - first_byte + 1\r\n resp = StreamingHttpResponse(file_iterator(path, offset=first_byte, length=length), status=206, content_type=content_type)\r\n resp['Content-Length'] = str(length)\r\n resp['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size)\r\n #else:\r\n '''\r\n # 不是以视频流方式的获取时,以生成器方式返回整个文件,节省内存\r\n resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type)\r\n resp['Content-Length'] = str(size)\r\n\r\n resp['Accept-Ranges'] = 'bytes'\r\n return resp\r\ndef index4(request):\r\n return render(request,\"index4.html\")\r\ndef index5(request):\r\n return render(request,\"index5.html\")","sub_path":"Websocket/vchat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"337880995","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Philipp Temminghoff\n\"\"\"\n\nfrom qtpy import QtWidgets\n\n\nclass RadioButton(QtWidgets.QRadioButton):\n\n def set_enabled(self):\n self.setEnabled(True)\n\n def set_disabled(self):\n self.setEnabled(False)\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n widget = RadioButton(\"This is a test\")\n widget.show()\n app.exec_()\n","sub_path":"prettyqt/widgets/radiobutton.py","file_name":"radiobutton.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"510407212","text":"'''\nClass to visualize features and compare them between instances\nof the same person and diferent persons.\n'''\n\n# built-in\nimport pickle\nimport sys\nsys.path.append(\"..\")\n\n# libs\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\n# own libs\n\n# globals\n\n\nclass FeaturesStudy:\n\n def __init__(self, pickle_path, target, legend, colors):\n # target info\n self.__target = target\n self.__legend = legend\n self.__colors = colors\n\n # read pickle\n with open(pickle_path, \"rb\") as file:\n person_storage = pickle.load(file)\n\n # get persons\n self.__entries = person_storage.entries\n self.__exits = person_storage.exits\n all_persons = self.__entries + self.__exits\n\n # features\n self.__areas = []\n self.__hue_hists = []\n self.__sat_hists = []\n self.__hs_hists = []\n self.__edge_hists = []\n self.__cs_hists = []\n\n for person in all_persons:\n self.__areas.append(person.area)\n self.__hue_hists.append(person.hue_hist)\n self.__sat_hists.append(person.saturation_hist)\n self.__hs_hists.append(person.hs_2dhist)\n self.__edge_hists.append(person.edge_hist)\n self.__cs_hists.append(person.cs_hist)\n\n @staticmethod\n def plot_nvectors(title, xlabel, ylabel, data, legend=None, colors=None):\n plt.figure()\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n for ind in range(len(data)):\n vector = data[ind]\n\n if colors is not None:\n plt.plot(vector, color=colors[ind])\n\n else:\n plt.plot(vector)\n\n if legend is not None:\n plt.legend(legend)\n\n @staticmethod\n def plot_matrix(title, xlabel, ylabel, matrix):\n plt.figure()\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.imshow(matrix)\n\n def plot_stem(self, correct_distances, wrong_distances, title_suffix):\n plt.figure()\n plt.title(\"Match distances \" + title_suffix)\n plt.xlabel(\"Entry person\")\n plt.ylabel(\"Distance\")\n plt.stem(correct_distances, linefmt='C2-')\n plt.stem(np.arange(len(correct_distances), len(correct_distances) + len(wrong_distances)),\n wrong_distances, linefmt='C3-')\n\n def plots_same_person(self):\n features = [self.__areas, self.__hue_hists, self.__sat_hists, self.__hs_hists,\n self.__edge_hists, self.__cs_hists]\n titles = [\"Areas \", \"Hue Hists \", \"Saturation Hists \", \"H-S Histogram \",\n \"Edge Hists \", \"Color Structured Hists \"]\n xlabels = [\"Instance\", \"Hue\", \"Saturation\", \"Saturation\", \"Edge\", \"Color\"]\n ylabels = [\"Area\", \"Probability\", \"Probability\", \"Hue\", \"Decision Ind\", \"Decision Ind\"]\n\n for target_id in np.unique(self.__target):\n for feature_ind in range(len(features)):\n target_vectors = np.array(features[feature_ind])[self.__target == target_id]\n target_vectors = target_vectors if feature_ind != 0 else [target_vectors] # area 2D vector\n\n # if it is not h-s histograms\n if feature_ind != 3:\n FeaturesStudy.plot_nvectors(titles[feature_ind] + self.__legend[target_id],\n xlabels[feature_ind], ylabels[feature_ind], target_vectors)\n\n else:\n for hs_ind in range(len(target_vectors)):\n matrix = target_vectors[hs_ind]\n FeaturesStudy.plot_matrix(titles[feature_ind] + self.__legend[target_id] + \" \" + str(hs_ind),\n xlabels[feature_ind], ylabels[feature_ind], matrix)\n\n plt.show()\n\n def comparation_plots(self):\n features = [self.__areas, self.__hue_hists, self.__sat_hists, self.__cs_hists]\n titles = [\"Areas\", \"Hue Hists\", \"Saturation Hists\", \"Color Structured Hists\"]\n xlabels = [\"Instance\", \"Hue\", \"Saturation\", \"Color\"]\n ylabels = [\"Area\", \"Probability\", \"Probability\", \"Decision ind\"]\n\n for ind in range(len(features)):\n feature = features[ind]\n nvectors = []\n\n for target_id in np.unique(self.__target):\n target_vectors = np.array(feature)[self.__target == target_id]\n\n # if it is not area feature\n if ind != 0:\n mean_vector = np.mean(target_vectors, axis=0)\n nvectors.append(mean_vector)\n\n else:\n nvectors.append(target_vectors)\n\n FeaturesStudy.plot_nvectors(titles[ind], xlabels[ind], ylabels[ind], nvectors, self.__legend, self.__colors)\n\n plt.show()\n\n def plot_distances(self, k, ids_dict):\n # get features vectors\n entry_samples = np.array([person.get_features_vector() for person in self.__entries])\n exit_samples = np.array([person.get_features_vector() for person in self.__exits])\n\n # calculate distances\n bfm = cv2.BFMatcher()\n matches = bfm.knnMatch(exit_samples, entry_samples, k=k)\n dmatches_list = np.array(matches).ravel()\n\n # data structures to store distances\n correct_distances = []\n wrong_distances = []\n\n current_cdistances = []\n current_wdistances = []\n last_person = None\n\n # for each dmatch\n for dmatch in dmatches_list:\n # get information of the match\n exit_person = self.__exits[dmatch.queryIdx]\n entry_person = self.__entries[dmatch.trainIdx]\n distance = dmatch.distance\n\n # plot distances if the exit person changes\n if last_person is not None and last_person != exit_person.id:\n self.plot_stem(current_cdistances, current_wdistances, \"exit person \" + str(exit_person.id))\n current_cdistances = []\n current_wdistances = []\n last_person = exit_person.id\n\n # same persons\n if ids_dict[exit_person.id] == ids_dict[entry_person.id]:\n current_cdistances.append(distance)\n correct_distances.append(distance)\n\n # different persons\n else:\n current_wdistances.append(distance)\n wrong_distances.append(distance)\n\n # calculate threshold\n all_distances = np.array(correct_distances + wrong_distances)\n\n max_dist = np.max(all_distances)\n min_dist = np.min(all_distances)\n normalized_distances = (all_distances - min_dist) / (max_dist - min_dist)\n\n threshold = cv2.threshold((normalized_distances * 255.0).astype(np.uint8),\n 0, 255.0, cv2.THRESH_OTSU)[0]\n threshold = (threshold * (max_dist - min_dist) + min_dist) / 255.0\n\n # plot all distances\n plt.figure()\n plt.title(\"All matching distances\")\n plt.xlabel(\"X\")\n plt.ylabel(\"Distance\")\n plt.plot(correct_distances, '.', color='g')\n plt.plot(np.arange(len(correct_distances), len(correct_distances) + len(wrong_distances)),\n wrong_distances, '.', color='r')\n plt.plot(np.arange(len(all_distances)), [threshold] * len(all_distances), 'b')\n plt.show()\n\n\nif __name__ == \"__main__\":\n pickle_path = \"../../../Dataset/Pickles/person_storage.p\"\n\n target = np.array([0, 3, 2, 1, 2, 0, 3, 1, 2, 3, 0, 2, 0, 3, 1, 0, 2, 3])\n legend = [\"Frederico\", \"Goncalo F\", \"Goncalo A\", \"Engenheiro\"]\n colors = [\"red\", \"green\", \"blue\", \"orange\"]\n\n # FeaturesStudy(pickle_path, target, legend, colors).plots_same_person()\n## FeaturesStudy(pickle_path, target, legend, colors).comparation_plots()\n\n ids_dict = {7: 'GA', 20: 'GA', 32: 'GA', 51: 'GA', 55: 'GA',\n 25: 'GF', 37: 'GF', 44: 'GF',\n 1: 'F', 29: 'F', 34: 'F', 48: 'F', 63: 'F',\n 9: 'E', 30: 'E', 41: 'E', 53: 'E', 57: 'E'}\n k = 30\n FeaturesStudy(pickle_path, target, legend, colors).plot_distances(k, ids_dict)\n\n\n\n\n\n\n\n","sub_path":"03_Implementacao/PythonAlgs/src/matching_system/features_study.py","file_name":"features_study.py","file_ext":"py","file_size_in_byte":8164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"626141476","text":"from page.models import StaticInfo, Page\n\n\ndef static_info_processor(request):\n page_list = Page.objects.filter(in_menu_top=True, parent=None)\n menu_top = page_list.get_descendants(include_self=True)\n menu_bottom = Page.objects.filter(in_menu_bottom=True)\n if StaticInfo.objects.first():\n obj = StaticInfo.objects.first()\n phone = obj.phone\n phone_mobile = obj.phone_mobile\n address = obj.address\n site_title = obj.site_title\n logo = obj.logo\n logo_bottom = obj.logo_bottom\n email = obj.email\n footer_company_info = obj.footer_company_info\n return {'phone': phone,\n 'phone_mobile': phone_mobile,\n 'site_title':site_title,\n 'address': address,\n 'site_title':site_title,\n 'logo':logo,\n 'logo_bottom':logo_bottom,\n 'email':email,\n 'footer_company_info': footer_company_info,\n 'menu_top':menu_top,\n 'menu_bottom':menu_bottom,\n }\n else:\n return {\n 'menu_top':menu_top,\n 'menu_bottom':menu_bottom,\n }\n\ndef footer_menu(request):\n menu_list = Page.objects.filter(in_menu_bottom=True)\n return {'menu_list':menu_list}\n","sub_path":"page/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"54738469","text":"from socket import *\nimport os\nimport sys\nimport argparse\n\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.PublicKey import RSA\n\npr_key = RSA.import_key(open('private_pem.pem', 'r').read())\npu_key = RSA.import_key(open('public_pem.pem', 'r').read())\nprint(type(pr_key), type(pu_key))\ndecrypt = PKCS1_OAEP.new(key=pr_key)\n\ndef my_parser():\n commandList = [\"get\", \"post\"]\n if sys.argv[1]==\"-h\":\n with open('help.txt') as f:\n read_data = f.read()\n print(read_data)\n f.close()\n return 0,0,0\n if sys.argv[1] in commandList:\n if sys.argv[1]==\"get\":\n if len(sys.argv)==4:\n option = sys.argv[3]\n else: \n option = \"\"\n command = \"get\"\n # fileName = sys.argv[2]\n serverName = sys.argv[2].split(\":\")[0]\n fileName = sys.argv[2].split(\":\")[1]\n return command, serverName, fileName, option\n\n# serverName = \"localhost\"\ncommand, serverName, fileName, option = my_parser()\nserverPort = 12008\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect((serverName,serverPort))\n\nif command==0:\n exit(0)\nelse:\n print(command, fileName, option)\nsentence = \" \".join([command, fileName, option])\nclientSocket.send(sentence.encode())\n\nif option == \"-r\":\n try:\n with open(fileName+\".zip\", \"wb\") as fw:\n while True:\n print('Receiving...')\n # data = f.decrypt(clientSocket.recv(1024))\n data = decrypt.decrypt(clientSocket.recv(1024))\n if not data:\n break\n fw.write(data)\n fw.close()\n print(\"Received\\n\")\n os.system(\"mkdir \"+fileName)\n print(\"Unzipping...\", \"unzip ./\"+fileName+\".zip\"+\" -d \"+fileName)\n os.system(\"unzip ./\"+fileName+\".zip\"+\" -d \"+fileName)\n os.system(\"rm -rf \"+fileName+\".zip\")\n print(\"\")\n except:\n pass\nelse:\n try:\n with open(fileName, \"wb\") as fw:\n while True:\n print('Receiving...')\n data = decrypt.decrypt(clientSocket.recv(1024))\n # data = clientSocket.recv(1024)\n if not data:\n break\n fw.write(data)\n fw.close()\n print(\"Received\\n\")\n except:\n pass\nclientSocket.close()","sub_path":"TCPclient.py","file_name":"TCPclient.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"342022407","text":"class Solution(object):\n\tdef isValid(self, s):\n\t\t\"\"\"\n\t\t:type s: str\n\t\t:rtype: bool\n\t\t\"\"\"\n\t\tstack = []\n\t\tdic = {']':'[', '}':'{', ')':'('}\n\t\tfor c in s:\n\t\t\tif c in dic.values():\n\t\t\t\tstack.append(c)\n\t\t\telif c in dic.keys():\n\t\t\t\tif stack == [] or stack.pop() != dic[c]:\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn False\n\t\treturn stack == []","sub_path":"001-100/020-valid-parentheses.py","file_name":"020-valid-parentheses.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"406326150","text":"vowel = \"aeiou\" #元音\ndef PigLatin():\n array = input(\"请输入一系列单词,单词之间用空格分隔\\n\")\n array = array.lower().split() \n \n array04 = \"\"\n array04END=\"\"\n for i in range(len(array)):\n if(array[i][:1] in vowel):\n array[i] +=\"hay\"\n elif(array[i][:2]==\"qu\"):\n array = array[i][len(array[i]):]+\"quay\"\n else:\n array04 = condition04(array[i])\n array[i] = array[i][array04:]+array[i][0:array04]+\"ay\"\n return array;\ndef condition04(array04):\n for i in range(len(array04)):#下方判断,如果y不在第一个字母,当成元音\n if((array04[i] in vowel) or (i!=0 and array04[i] ==\"y\")):\n return i;\n return len(array04);\n\n#print(PigLatin())\n\nimport string\nimport keyword\n\ndef que02(s):\n if s not in keyword.kwlist: #字符串不能是关键字\n if (s[0].isalpha()) or (s[0]==\"_\"):#首字符必须是字母或者下划线\n for i in s[1:]:\n if (i.isalpha()!=True) and (i.isdigit()!=True) and (i!=\"_\"): #由数字字母下划线组成\n return False\n return True \n return False \n#print(que02(input(\"请输入字符串:\")))\n\ndef que03(s):\n s = s.lower() #转换成小写\n result = 0\n for i in s:\n result += ord(i)-96 #通过ascii码计算\n return result\n \nprint(que03(input(\"请输入要计算字母值的字符串:\\n\")))\n\n","sub_path":"week05_2.py","file_name":"week05_2.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"51262583","text":"\"\"\"\nClasses, subscribers and functions for dealing with index management\n\"\"\"\nfrom . import event\nfrom .archiveutil import ArchiveUtil\nfrom .desc import template\nfrom .desc import updict\nfrom .jenv import EnvFactory\nfrom .utils import benchmark\nfrom .utils import path\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom more_itertools import chunked\nfrom pyramid import threadlocal\nfrom pyramid.events import ApplicationCreated\nfrom pyramid.events import subscriber\nfrom pyramid.path import DottedNameResolver as dnr\nfrom pyramid.settings import asbool\nfrom threading import Thread\nimport json\nimport logging\nimport operator\nimport pkg_resources\nimport re\nimport threading\nimport time\n\nlogger = logging.getLogger(__name__)\n\n\nclass IndexManager(object):\n \"\"\"\n Manages the static file index\n \"\"\"\n\n root_index_file = 'index.html'\n EXTS = re.compile(r'^.*(?P\\.egg|-any\\.whl|\\.gz|\\.bz2|\\.tgz|\\.zip)$')\n SDIST_EXT = re.compile(r'^.*(?P\\.gz|\\.bz2|\\.tgz|\\.zip)$')\n\n leaf_name = 'leaf.html'\n home_name = 'index.html'\n def_index_title = 'CheesePrism'\n leaf_data = updict()\n index_data = updict(title=def_index_title,\n index_title=def_index_title,\n description=\"Welcome to the CheesePrism\")\n datafile_name = \"index.json\"\n index_data_lock = threading.Lock()\n\n leaf_template = template('leaf.html')\n home_template = template('home.html')\n\n at = archive_tool = ArchiveUtil()\n _move_on_error = at.move_on_error\n\n pkginfo_to_pkgdata = at.pkginfo_to_pkgdata\n pkginfo_from_file = at.pkginfo_from_file\n extension_of = at.extension_of\n leaf_locks = {}\n\n def __init__(self, index_path, template_env=None,\n arch_baseurl='/index/', urlbase='', index_data={},\n leaf_data={}, error_folder='_errors', executor=None,\n logger=None, write_html=True):\n\n if logger is None:\n self.log = logging.getLogger('.'.join((__name__, self.__class__.__name__)))\n\n self.urlbase = urlbase\n self.write_html = write_html\n self.arch_baseurl = arch_baseurl\n self.template_env = template_env\n\n if not self.template_env:\n self.template_env = self.default_env_factory('')\n self.index_data = index_data.copy()\n self.leaf_data = leaf_data.copy()\n self.path = path(index_path).makedirs_p()\n self.home_file = self.path / self.root_index_file\n self.datafile_path = self.path / self.datafile_name\n\n self.error_folder = self.path / error_folder\n\n if not (self.error_folder.exists() and self.error_folder.isdir()):\n if self.error_folder.exists() and not self.error_folder.isdir():\n self.error_folder.move(self.error_folder + '.bak')\n\n self.error_folder.makedirs()\n\n self.move_on_error = partial(self._move_on_error, self.error_folder)\n self.arch_to_add_map = partial(self.at.arch_to_add_map,\n error_handler=self.move_on_error)\n self.executor = executor\n\n @classmethod\n def from_registry(cls, registry):\n settings = registry.settings\n executor = registry['cp.executor']\n env = registry['cp.index_templates']\n return cls.from_settings(settings, executor, env)\n\n @classmethod\n def from_settings(cls, settings, executor=None, env=None):\n file_root = path(settings['cheeseprism.file_root'])\n if not file_root.exists():\n file_root.makedirs()\n\n urlbase = settings.get('cheeseprism.urlbase', '')\n write_html = asbool(settings.get('cheeseprism.write_html', 'true'))\n abu = settings.get('cheeseprism.archive.urlbase', '..')\n idx_tmplt = settings.get('cheeseprism.index_templates', '')\n env = EnvFactory.from_str(idx_tmplt)\n\n return cls(settings['cheeseprism.file_root'],\n urlbase=urlbase,\n arch_baseurl=abu,\n template_env=env,\n executor=executor,\n write_html=write_html)\n\n @property\n def default_env_factory(self):\n return EnvFactory.from_str\n\n @property\n def files(self):\n return (x for x in self.path.files() if self.archive_tool.EXTS.match(x))\n\n def projects_from_archives(self):\n with benchmark('-- collected projects'):\n projects = {}\n paths = (self.path / item for item in self.files)\n arch_info = partial(pki_ff, handle_error=self.move_on_error)\n results = [info for info in self.executor.map(arch_info, paths)]\n for itempath, info in results:\n projects.setdefault(info.name, []).append((info, itempath))\n\n with benchmark('-- sorted projects'):\n return sorted(projects.items())\n\n def regenerate_leaf(self, leafname):\n files = self.path.files('%s-*.*' %leafname)\n\n pki_ff = partial(self.at.pkginfo_from_file, handle_error=self.move_on_error)\n versions = [(pki_ff(self.path / item), item) for item in files]\n versions.sort(key=lambda x: pkg_resources.parse_version(x[0].version))\n\n return self.write_leaf(self.path / leafname, versions)\n\n def regenerate_all(self):\n items = self.projects_from_archives()\n if self.write_html is False:\n yield None\n else:\n with benchmark('-- wrote index.html'):\n yield self.write_index_home(items)\n\n with benchmark('-- regenerated index'):\n yield [self.write_leaf(self.path / key, value) for key, value in items]\n\n def write_index_home(self, items):\n self.log.info('Write index home: %s', self.home_file)\n data = self.index_data.copy()\n data['packages'] = [dict(name=key, url=str(path(self.urlbase) / key )) \\\n for key, value in items]\n self.home_file.write_text(self.home_template.render(**data))\n return self.home_file\n\n def _leaf_html(self, leafdir, tversions, indexhtml=\"index.html\"):\n title = \"%s:%s\" %(self.index_data['title'], leafdir.name)\n leafhome = leafdir / indexhtml\n text = self.leaf_template.render(\n package_title=leafdir.name,\n title=title,\n versions=tversions)\n leafhome.write_text(text)\n return leafhome\n\n def _leaf_html_free(self, leafdir, versions, indexhtml=\"index.html\"):\n leafhome = leafdir / indexhtml\n if not leafdir.exists():\n self.log.error(\"leafdir %s has disappeared\" %leafdir)\n raise StopIteration('Leaf dir missing %s' %leafdir)\n\n if leafhome.exists():\n self.log.info(\"HTML FREE: Removing %s\", str(leafhome))\n leafhome.remove_p()\n\n for filepath in versions:\n target = leafdir / filepath.name\n if filepath.exists():\n if not target.exists():\n yield filepath.symlink(target.abspath())\n else:\n self.log.error(\"file %s has disappeared, removing link\" %filepath)\n #@@ figure out the coverage issue here\n if target.exists():\n self.log.debug(\"-- removing link: %s\" %target)\n target.unlink()\n\n def leaf_values_from_archive(self, leafname, archive):\n url = str(path(self.arch_baseurl) / archive.name)\n return dict(url=url, name=archive.name)\n\n def leaf_values_from_map(self, leafname, leafdata):\n name = leafdata['name']\n url = str(path(self.arch_baseurl) / name)\n return dict(url=url, name=name)\n\n def leafdata(self, fpath, dist):\n return dict(filename=str(fpath.name),\n md5=fpath.md5hex,\n size=fpath.size,\n name=dist.name,\n version=dist.version,\n mtime=fpath.mtime,\n ctime=fpath.ctime,\n atime=fpath.ctime)\n\n getmd5 = staticmethod(operator.itemgetter('md5'))\n\n def add_version_to_leafjson(self, fpath, dist, leafname,\n indexjson=\"index.json\"):\n \"\"\"\n Append a single version to leafjson\n\n :returns: leafjson as dict\n \"\"\"\n leafdir = self.path / leafname\n leafjson = leafdir / indexjson\n with self.lock_leaf_json(leafname, leafjson) as leafdata:\n if fpath.name in set([x['filename'] for x in leafdata]):\n self.log.warning(\"%s - Attempt to add duplicate to %s\", fpath, leafjson)\n return leafdata\n leafdata.append(self.leafdata(fpath, dist))\n return leafdata\n\n @contextmanager\n def lock_leaf_json(self, name, leafjson):\n with self.leaf_locks.setdefault(name, threading.Lock()):\n with open(leafjson) as fd:\n leafdata = json.load(fd)\n\n try:\n yield leafdata\n finally:\n with open(leafjson, 'w') as jsonout:\n json.dump(leafdata, jsonout)\n\n def add_version_to_leaf(self, fpath, leafname,\n indexjson=\"index.json\",\n indexhtml=\"index.html\"):\n \"\"\"\n Most minimally, add a package version to a leaf node\n\n :returns: leafjson as dict\n \"\"\"\n leafdir = self.path / leafname\n assert leafdir.exists(), \"Leafdir missing: %s\" %leafdir\n dist = self.at.pkginfo_from_file(fpath, handle_error=self.move_on_error)\n leafdata = self.add_version_to_leafjson(fpath, dist, leafname, indexjson=\"index.json\")\n\n if self.write_html is True:\n self._leaf_html(leafdir,\n (self.leaf_values_from_map(leafname, datum) for datum in leafdata),\n indexhtml=\"index.html\")\n else:\n versions = (self.path / x['filename'] for x in leafdata) #@@ maybe on link what needs to be?\n linked = [x for x in \\\n self._leaf_html_free(leafdir, versions, indexhtml=\"index.html\") if x]\n\n self.log.debug(\"Linked: %s\", linked)\n\n active_archives = (self.path / x['filename'] for x in leafdata)\n self.log.debug(\"Removed: %s\",\n self.cleanup_links(leafdir,\n leafdir / indexjson,\n active_archives))\n leafdata = self.cleanup_leafdata(leafdir, leafdir / indexjson)\n return leafdata\n\n def cleanup_leafdata(self, leafdir, leafjson):\n with self.lock_leaf_json(str(leafdir.name), leafjson) as leafdata:\n missing = [idx for idx, fn in enumerate(x['filename'] for x in leafdata) if not (leafdir / fn).exists()]\n [leafdata.pop(idx) for idx in missing]\n return leafdata\n\n def cleanup_links(self, leafdir, leafjson, active_archives):\n targets = dict((str(version.name), version.exists()) for version in active_archives)\n archive_missing, removed = set(), []\n\n for link in (x for x in leafdir.files() if x.islink()):\n target = targets.get(str(link.name), None)\n if target is None:\n link.unlink()\n removed.append(link)\n\n if target is False:\n link.unlink()\n archive_missing.add(link.name)\n\n if archive_missing:\n logging.warn(\"Archives missing: %s\", archive_missing)\n with self.lock_leaf_json(leafdir.name, leafjson) as leafdata:\n dels = [i - 1 for i, data in enumerate(leafdata) if data['filename'] in archive_missing]\n archive_missing = [leafdata.pop(i)['filename'] for i in dels]\n\n return removed, archive_missing\n\n #@@ combine with regenerate_leaf\n def write_leaf(self, leafdir, versions, indexjson=\"index.json\", indexhtml=\"index.html\"):\n leafjson = leafdir / indexjson\n versions = list(versions)\n\n if not leafdir.exists(): leafdir.makedirs()\n\n if self.write_html is True:\n tversions = (self.leaf_values_from_archive(leafdir.name, archive)\\\n for info, archive in versions)\n self._leaf_html(leafdir, tversions, indexhtml=\"index.html\")\n else:\n [x for x in self._leaf_html_free(leafdir, (y for x, y in versions), indexhtml=\"index.html\")]\n\n with self.leaf_locks.setdefault(leafdir.name, threading.Lock()):\n with open(leafjson, 'w') as jsonout:\n leafdata = [self.leafdata(fpath, dist) for dist, fpath in versions]\n json.dump(leafdata, jsonout)\n\n leafjson.utime((time.time(), time.time()))\n return leafjson\n\n @staticmethod\n def data_from_path(datafile):\n datafile = path(datafile)\n if datafile.exists():\n with open(datafile) as stream:\n return json.load(stream)\n else:\n logging.error(\"No datafile found for %s\", datafile)\n datafile.write_text(\"{}\")\n return {}\n\n def _write_datafile(self, **data):\n if self.datafile_path.exists():\n newdata = data\n with open(self.datafile_path) as root:\n data = json.load(root)\n data.update(newdata)\n\n with open(self.datafile_path, 'w') as root:\n json.dump(data, root)\n return data\n\n def write_datafile(self, with_lock=True, **data):\n #@@ consider removing\n if with_lock is True:\n with self.index_data_lock:\n return self._write_datafile(**data)\n return self._write_datafile(**data)\n\n def reg_data(self, arch):\n pkgdata = self.arch_to_add_map(arch)\n return arch.md5hex, pkgdata,\n\n def register_archive(self, arch, registry=None):\n \"\"\"\n Adds an archive to the master data store (index.json)\n \"\"\"\n md5, pkgdata = self.reg_data(arch)\n self.write_datafile(**{md5:pkgdata})\n return pkgdata, md5\n\n @staticmethod\n def group_by_magnitude(collection):\n alen = len(collection)\n if alen > 1000:\n return chunked(collection, 100)\n if alen > 100:\n return chunked(collection, 10)\n return [collection]\n\n def update_data(self, datafile=None, pkgdatas=None):\n if datafile is None:\n datafile = self.datafile_path\n\n archs = self.files if pkgdatas is None else pkgdatas.keys()\n new = []\n\n archs_g = self.group_by_magnitude([x for x in archs])\n with benchmark(\"Rebuilt root index.json\"):\n for archs in archs_g:\n with self.index_data_lock:\n new.extend(self._update_data(archs, datafile))\n\n pkgs = len(set(x['name'] for x in new))\n self.log.info(\"Inspected %s versions for %s packages\" %(len(new), pkgs))\n return new\n\n def _update_data(self, archs, datafile):\n data = self.data_from_path(datafile)\n new = []\n exe = self.executor\n\n read = self.archive_tool\n archdata = [(arch, data) for arch in archs]\n for md5, pkgdata in exe.map(read, archdata):\n if pkgdata is not None:\n data[md5] = pkgdata\n new.append(pkgdata)\n\n self.write_datafile(with_lock=False, **data)\n return new\n\n\ndef pki_ff(path, handle_error=None, func=IndexManager.at.pkginfo_from_file):\n return path, func(path, handle_error=handle_error)\n\n\n@subscriber(event.IPackageAdded)\ndef rebuild_leaf(event):\n #@@ consider index as a registry bound utility\n reg = threadlocal.get_current_registry()\n fpath, name, index = event.path, event.name, event.im\n\n index.register_archive(event.path, registry=reg)\n ppath = index.path / event.name\n\n logger.debug(\"Adding %s\" %(event.path))\n if ppath.exists():\n with benchmark(\"%s - rebuilt\" %(event.name)):\n return index.add_version_to_leaf(fpath, name)\n\n with benchmark(\"%s - new leaf\" %name):\n return index.regenerate_leaf(name)\n\n\n\n@subscriber(event.IIndexUpdate)\ndef bulk_update_index(event):\n new_pkgs = event.index.update_data(event.datafile, pkgdatas=event.pkgdatas)\n return bulk_add_pkgs(event.index, new_pkgs)\n\n\ndef bulk_add_pkgs(index, new_pkgs):\n \"\"\"\n Sidestep the event system for efficiency\n\n @@ requires a prior update\n \"\"\"\n with benchmark('') as bm:\n leaves = set()\n archs = []\n\n for data in new_pkgs:\n leaves.add(data['name'])\n archs.append(index.path / data['filename'])\n\n bm.name = \"Added & registered %s archives and rebuilt %s leaves\"\\\n %(len(archs), len(leaves))\n\n for leaf in leaves:\n try:\n index.regenerate_leaf(leaf)\n except Exception:\n logger.exception('Issue building leaf for %s', leaf)\n\n return leaves, archs\n\n\ndef bulk_update_index_at_start(event):\n with benchmark('Bulk update complete'):\n logger.info(\"--> Checking and updating index\")\n reg = event.app.registry\n\n index = IndexManager.from_registry(reg)\n logger.info(\"-- %s pkg in %s\", len([x for x in index.files]), index.path.abspath())\n\n new_pkgs = index.update_data()\n\n logger.info(\"-- Bulk add %d packages\", len(new_pkgs))\n leaves, archs = bulk_add_pkgs(index, new_pkgs)\n\n home_file = index.path / index.root_index_file\n if index.write_html is True and (not home_file.exists() or len(leaves)):\n items = index.projects_from_archives()\n index.write_index_home(items)\n\n return leaves, archs\n\n\ndef async_bulk_update_at_start(event, thread=Thread):\n logger.info(\"Spawning thread to handle bulk update on start\")\n thread(target=bulk_update_index_at_start,\n args=(event,),\n name='bulk-update-on-start').start()\n\n\nresolve = dnr(None).maybe_resolve\npreup_key = 'cheeseprism.preupdate'\n\ndef noop(*args, **kw):\n return\n\ndef includeme(config):\n config.scan(__name__)\n preup = resolve(config.registry.settings.get(preup_key, 'cheeseprism.index.noop'))\n if preup:\n config.registry[preup_key] = preup\n if asbool(config.registry.settings.get('cheeseprism.async_restart', False)):\n config.add_subscriber(async_bulk_update_at_start, ApplicationCreated)\n else:\n config.add_subscriber(bulk_update_index_at_start, ApplicationCreated)\n","sub_path":"cheeseprism/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":18435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"35440633","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport datetime\n\nfrom django.core.cache import caches\nfrom django.utils.encoding import force_text\n\nfrom rest_framework import status\n\nfrom rest_framework_extensions.key_constructor.constructors import (\n DefaultKeyConstructor\n)\nfrom rest_framework_extensions.key_constructor.bits import (\n KeyBitBase,\n RetrieveSqlQueryKeyBit,\n ListSqlQueryKeyBit,\n PaginationKeyBit,\n UserKeyBit,\n)\nfrom rest_framework_extensions.cache.decorators import CacheResponse\n\nfrom .throttling import CustomThrottleException\n\n\nclass CustomCheckThrottleMixin(object):\n \"\"\"\n The mixin to customize the throttling behavior of APIView.\n 1) get more throttle info\n 2) use customized exception\n \"\"\"\n\n def check_throttles(self, request):\n \"\"\"\n Check if request should be throttled.\n Raises an appropriate exception if the request is throttled.\n \"\"\"\n for throttle in self.get_throttles():\n result, self.rate_limits_info = throttle.allow_request(request, self)\n ident = self.rate_limits_info['ident']\n if not result:\n self.throttled(request, ident)\n\n def throttled(self, request, ident):\n \"\"\"\n If request is throttled, determine what kind of exception to raise.\n \"\"\"\n raise CustomThrottleException(ident=ident)\n\n\nclass CustomCacheResponse(CacheResponse):\n \"\"\"\n Add headers after reading from cache\n \"\"\"\n\n def process_cache_response(self,\n view_instance,\n view_method,\n request,\n args,\n kwargs):\n response = super(CustomCacheResponse, self).process_cache_response(\n view_instance,\n view_method,\n request,\n args,\n kwargs\n )\n response = self._add_rate_limit_headers(response, view_instance)\n return response\n\n def _add_rate_limit_headers(self, response, view_instance):\n if not status.is_server_error(response.status_code):\n response['X-RateLimit-Limit'] = view_instance.rate_limits_info['limitation']\n response['X-RateLimit-Remaining'] = view_instance.rate_limits_info['remain']\n response['X-RateLimit-Reset'] = view_instance.rate_limits_info['reset']\n return response\n\n\ncustom_cache_response = CustomCacheResponse\n\n\nclass UpdatedAtKeyBit(KeyBitBase):\n def get_data(self, **kwargs):\n key = 'links:updated'\n value = caches['links'].get(key, None)\n if not value:\n value = datetime.datetime.utcnow().isoformat()\n caches['links'].set(key, value=value)\n return force_text(value)\n\n\nclass CustomObjectKeyConstructor(DefaultKeyConstructor):\n retrieve_sql = RetrieveSqlQueryKeyBit()\n updated_at = UpdatedAtKeyBit()\n\n\nclass CustomListKeyConstructor(DefaultKeyConstructor):\n list_sql = ListSqlQueryKeyBit()\n pagination = PaginationKeyBit()\n updated_at = UpdatedAtKeyBit()\n\n\nclass CustomListPaginationKeyConstructor(DefaultKeyConstructor):\n list_sql = ListSqlQueryKeyBit()\n pagination = PaginationKeyBit()\n updated_at = UpdatedAtKeyBit()\n\n\nclass CustomListPaginationPerUserKeyConstructor(DefaultKeyConstructor):\n list_sql = ListSqlQueryKeyBit()\n pagination = PaginationKeyBit()\n updated_at = UpdatedAtKeyBit()\n user = UserKeyBit()\n","sub_path":"uplinks/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"339687732","text":"class Solution:\n def reverseStr(self, s: str, k: int) -> str:\n def helper(arr, left, right):\n while left < right:\n temp = arr[left]\n arr[left] = arr[right]\n arr[right] = temp\n left += 1\n right -= 1\n n = len(s)\n s_arr = [i for i in s]\n for i in range(0, n, 2*k):\n if n - i < k:\n helper(s_arr, i, n-1)\n elif k <= n - i < 2*k:\n helper(s_arr, i, i + k - 1)\n else:\n helper(s_arr, i, i + k - 1)\n return \"\".join(s_arr)\n\n def licenseKeyFormatting(self, s: str, k: int) -> str:\n s = \"\".join(s.split(\"-\"))\n n = len(s)\n if k >= n:\n return s.upper()\n j = n - k\n ans = []\n while j >= 0:\n ans.append(s[j: j+k].upper())\n j -= k\n if j > -k:\n ans.append(s[:j+k].upper())\n return \"-\".join(ans[::-1])\n\n\nif __name__ == '__main__':\n solution = Solution()\n S = \"5F3Z-2e-9-w\"\n K = 4\n print(solution.licenseKeyFormatting(S,K))\n","sub_path":"leetcode/daily.py","file_name":"daily.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"59053162","text":"from flask import Blueprint, render_template, request, redirect, url_for, flash\nfrom flask_login import login_required, current_user\nfrom models.image import Image\nfrom models.user import User\nfrom models.follow import Follow\nfrom werkzeug.security import generate_password_hash \nfrom werkzeug.utils import secure_filename\nfrom instagram_web.util.helpers import upload_file_to_s3, allowed_file\nfrom config import Config\n\n\nusers_blueprint = Blueprint('users',\n __name__,\n template_folder='templates')\n\n\n@users_blueprint.route('/new', methods=['GET'])\ndef new():\n return render_template('users/new.html')\n\n# Sign up new user\n@users_blueprint.route('/', methods=['POST'])\ndef create():\n\n username = request.form.get('name')\n email = request.form.get('email')\n password = request.form.get('password')\n\n user = User(username=username, email=email, password=password) \n\n if user.save():\n print(user.username)\n flash('New user has been added!', 'success')\n return redirect(url_for('users.new')) \n else: \n print(user.errors)\n for error in user.errors:\n flash(error, 'danger')\n return render_template ('users/new.html', username=username, email=email, password=password)\n\n\n# user profile page\n@users_blueprint.route('/', methods=[\"GET\"])\ndef show(username):\n user = User.select().where(User.username == username).get()\n # code to check if user is an idol who has approved me (fan)\n return render_template('users/profile.html', user=user)\n\n\n# homepage / all users feed\n@users_blueprint.route('/', methods=[\"GET\"])\n@login_required\ndef index():\n users = User.select()\n images = Image.select().where(Image.user << users)\n return render_template('home.html', users=users, images=images)\n\n\n@users_blueprint.route('//edit', methods=['GET'])\n@login_required\ndef edit(id):\n user = User[id]\n if current_user.is_authenticated and current_user.id == user.id:\n return render_template('users/edit.html', user=user)\n else: \n flash(f\"Not allowed to update {user.username}'s profile\", 'danger')\n return render_template('users/edit.html', user=current_user)\n\n# upload profile image\n@users_blueprint.route('/profile/', methods=['POST'])\n@login_required\ndef update_profile_img(id):\n\t# get a file from request\n file = request.files['user_file']\n\t# if no file in request (user submit on empty form)\n if not file:\n flash(\"Please select a file\", 'danger')\n return render_template('users/edit.html')\n\t# if there is a file in request & is allowed type\n elif file and allowed_file(file.filename):\n file.filename = secure_filename(file.filename)\n output = upload_file_to_s3(file)\n if not output:\n flash(f'Unable to upload {file.filename}', 'danger')\n return render_template('users/edit.html')\n else:\n # get current user\n # save profile image link \n user = User.update(profile_image = output).where(User.id == current_user.id)\n user.execute()\n flash(f'Successfully uploaded {file.filename}', 'success')\n return redirect(url_for('users.show', username=current_user.username))\n \n\n# update user details\n@users_blueprint.route('/update/', methods=['POST'])\n@login_required\ndef update(id):\n user = User[id]\n if current_user == user:\n user.username = request.form.get('username')\n user.email = request.form.get('email')\n user.password = request.form.get('password')\n if user.save():\n flash('Successfully updated!', 'success')\n return redirect (url_for('users.edit', id=id))\n else: \n flash('Unable to update profile', 'danger')\n return render_template('users/edit.html', user=user)\n else:\n flash(f\"Not allowed to update {user.username}'s profile\", 'danger')\n return render_template('users/edit.html', user=user)\n\n\n# make account private\n@users_blueprint.route('/private/', methods=['POST'])\n@login_required\ndef make_private(id):\n user = User[id]\n if current_user == user:\n user.is_private = True\n if user.save():\n flash ('Account setting updated', 'success')\n return redirect (url_for('users.edit', id=id))\n else:\n flash('Unable to update account setting', 'danger')\n return render_template('users/edit.html', user=user)\n else:\n flash(f\"Not allowed to change {user.username}'s account setting\", 'danger')\n return render_template('users/edit.html', user=user)\n\n\n# follow user\n@users_blueprint.route('/follow/', methods=[\"POST\"])\n@login_required\ndef follow(username):\n idol_id = request.form.get('idol_id')\n f = Follow(fan=current_user.id, idol=idol_id)\n idol = User.get_by_id(idol_id)\n f.save()\n flash(f'You are now following {idol.username}', 'success')\n return redirect (url_for('users.show', username=idol.username))\n\n\n","sub_path":"instagram_web/blueprints/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"35903948","text":"import CorpusManager\nfrom threading import Thread\nimport OutputManager\n\n\n\ndef analyzeCorpus():\n CorpusManager.loadFiles(\"dictionaryw_pos.txt\", \"ttl.txt\")\n CorpusManager.getSentenceStructures()\n \n\ndef analyzeSentencePatterns():\n while True:\n currProgress = int(CorpusManager.getProgress())\n \n if(currProgress % 10 is 0 and currProgress is not 0):\n print(\"Progress: \", currProgress)\n OutputManager.printOutput(0,10)\n \n\ncorpusManager = Thread(target=analyzeCorpus, args=())\npatternManager = Thread(target=analyzeSentencePatterns, args=())\npatternManager.start()\ncorpusManager.start()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"378139463","text":"\n# coding: utf-8\n\n# \n# # StudyQuant\n# 作者 : Rudy\n\n# # 基本数据类型\n\n# Python 是一种动态类型语言,这意味着. Python 解择程序在运行时推知对象的类也C\n# 等编译语言通常是静态类型语言,在这类语言,对象类型必须在编译之前与对象\n# 绑定\n\n# # Python有五个标准的数据类型:\n# - Numbers(数字)\n# - String(字符串)\n# - List(列表)\n# - Tuple(元组)\n# - Dictionary(字典)\n# \n\n# 四种不同的数类型:\n# - int(整型) 0,1 等\n# - long(长整型[也可以代表八进制和十六进制])\n# - float(浮点型) 3.6 等\n# - complex(复数) 如 4+2j\n# - Bool : True, False\n# - String : “hello, world”\n# \n\n# # 一切都是对象\n\n# ## Basic Data Types\n\n# # int 整数\n\n# 按F9,它会运行当前的代码行\n# ctrl + enter\n# shift + shift\n# In[61]:\n\n\na = 10\ntype(a)\n\n\n# In[62]:\n\n\na.bit_length() #获得表现int 对象所需的位数\n\n\n# In[63]:\n\n\na = 100000\n\n\n# In[64]:\n\n\nprint(a)\n\n\n# In[65]:\n\n\ngoogol = 10 ** 100\ngoogol\n\n\n# In[66]:\n\n\n1 + 4\n\n\n# In[67]:\n\n\n1 / 4\n\n\n# In[68]:\n\n\ntype(1 / 4)\n\n\n# ## 数字的基本操作\n# python中的数字都支持下面的操作:\n# \n# 1、x + y:x加y;\n# 2、x - y:x减y;\n# 3、x * y:x和y的积;\n# 4、x / y:x和y的商;\n# 5、x // y:x和y的商的下限,即取整;\n# 6、x % y:x/y的余;\n# 7、abs(x):x为整型和浮点型,返回x的绝对值;x为复数型,返回x的magnitude(注);\n# 8、int(x):将x转换到整型;\n# 9、float(x):将x转换到浮点型;\n# 10、complex(re, im):得到实部为re,虚部为im的复数;\n# 11、c.conjugate():返回复数c的共轭复数;\n# 12、divmod(x, y):返回对(x // y, x % y);\n# 13、pow(x, y):x的y次方;\n# 14、x ** y:同pow(x, y),x的y次方。\n# \n\n# ### Floats\n\n# In[59]:\n\n\n1 / 4\n\n\n# In[12]:\n\n\ntype (1. / 4)\n\n\n# In[13]:\n\n\nb = 0.35\ntype(b)\n\n\n# In[14]:\n\n\nb + 0.1\n\n\n# In[15]:\n\n\nc = 0.5\nc.as_integer_ratio()\n\n\n# In[16]:\n\n\nb.as_integer_ratio()\n\n\n# ### Strings\n\n# 而'Hello, World!' 是一个字符串,之所以这\n# 么称呼是因为它包含一“串”字母。因为被引号包围,读者(以及解释器)可以将它们识\n# 别为字符串。\n\n# In[48]:\n\n\nt = 'StudyQuant offers Python Courses'\n\n\n# In[49]:\n\n\nt.capitalize()\n\n\n# In[50]:\n\n\nt.split()\n\n\n# In[70]:\n\n\nt.upper()\n\n\n# In[72]:\n\n\nt.find('studyquant')\n\n\n# In[52]:\n\n\nt.find('Python')\n\n\n# In[53]:\n\n\nt.replace(' ', '|')\n\n\n# In[54]:\n\n\n'http://www.python.org'.strip('htp:/')\n\n\n# ## Basic Data Structures\n\n# ### Tuples\n\n# - Python的元组与列表类似,不同之处在于元组的元素不能修改。\n# \n# - 元组使用小括号,列表使用方括号。\n# \n# - 元组创建很简单,只需要在括号中添加元素,并使用逗号隔开即可。\n\n# In[95]:\n\n\nt = (1, 2.5, 'data')\ntype(t)\n\n\n# In[96]:\n\n\nt = 1, 2.5, 'data'\ntype(t)\n\n\n# In[82]:\n\n\nt[2]\n\n\n# In[97]:\n\n\ntype(t[2])\n\n\n# In[98]:\n\n\nt.count('data') # 对象位置\n\n\n# In[99]:\n\n\nt.index(1) #对象的位置索引值\n\n\n# In[91]:\n\n\n# 以下修改元组元素操作是非法的。\n# t[0] = 100\n\n\n# In[92]:\n\n\nt2 = (2,3)\n\n\n# In[100]:\n\n\n# 创建一个新的元组\ntup3 = t + t2\nprint (tup3)\n\n\n# ### Lists\n\n# 序列是Python中最基本的数据结构。序列中的每个元素都分配一个数字 - 它的位置,或索引,第一个索引是0,第二个索引是1,依此类推。\n# \n# Python有6个序列的内置类型,但最常见的是列表和元组。\n# \n# 序列都可以进行的操作包括索引,切片,加,乘,检查成员。\n# \n# 此外,Python已经内置确定序列的长度以及确定最大和最小的元素的方法。\n# \n# 列表是最常用的Python数据类型,它可以作为一个方括号内的逗号分隔值出现。\n# \n# 列表的数据项不需要具有相同的类型\n# \n# 创建一个列表,只要把逗号分隔的不同的数据项使用方括号括起来即可\n\n# In[177]:\n\n\nl = [1, 2.5, 'data']\nl[2]\n\n\n# In[179]:\n\n\nt\n\n\n# In[180]:\n\n\nl = list(t)\nl\n\n\n# In[181]:\n\n\ntype(l)\n\n\n# In[182]:\n\n\nl.append([4, 3]) # append list at the end\nl\n\n\n# In[183]:\n\n\nl.extend([1.0, 1.5, 2.0]) # append elements of list\nl\n\n\n# In[184]:\n\n\nl.insert(1, 'insert') # insert object before index position\nl\n\n\n# In[185]:\n\n\nl.remove('data') # remove first occurence of object\nl\n\n\n# In[186]:\n\n\np = l.pop(3) # removes and returns object at index\nprint (l, p)\n\n\n# In[187]:\n\n\nl.pop(0)\n\n\n# In[188]:\n\n\nl\n\n\n# In[189]:\n\n\nl[2:4] # 3rd to 4th element\n\n\n# In[190]:\n\n\nl.append('Google') ## 使用 append() 添加元素\n\n\n# 删除列表元素\n\n# In[191]:\n\n\ndel l[2]\n\n\n# #列表元素个数\n\n# In[192]:\n\n\nlen(l) #\n\n\n# In[204]:\n\n\nprice = []\n\n\n# In[205]:\n\n\nfor i in range(10):\n price.append(i)\n\n\n# In[206]:\n\n\nprice \n\n\n# # 布尔表达式\n\n# 布尔表达式的结果要么是真(true),要么为假(false)。下面的例子是使用== 运算符,比\n# 较两个操作数,如果相等则结果为True, 否则为False:\n\n# In[10]:\n\n\nTrue\n\n\n# In[11]:\n\n\nFalse\n\n\n# In[12]:\n\n\n5 == 5\n\n\n# In[13]:\n\n\n5 == 10\n\n\n# In[14]:\n\n\na = 5 \n\n\n# In[15]:\n\n\nb = 10 \n\n\n# In[16]:\n\n\na > b\n\n\n# In[17]:\n\n\na < b\n\n\n# In[18]:\n\n\na = 10\n\n\n# In[21]:\n\n\na >= b\n\n\n# In[22]:\n\n\na <= b\n\n\n# In[23]:\n\n\nif a == b:\n print ('相等')\n\n\n# ### Dicts 字典\n\n# 字典是另一种可变容器模型,且可存储任意类型对象。\n# \n# 字典的每个键值 key=>value 对用冒号 : 分割,每个键值对之间用逗号 , 分割,整个字典包括在花括号 {} 中 \n\n# In[1]:\n\n\nd = {\n 'Name' : 'Rudy',\n 'Country' : 'China',\n 'Profession' : 'Quant',\n 'Age' : 29\n }\ntype(d)\n\n\n# In[2]:\n\n\nprint (d['Name'], d['Age'])\n\n\n# In[3]:\n\n\nd.keys()\n\n\n# In[4]:\n\n\nd.values()\n\n\n# In[5]:\n\n\nd.items()\n\n\n# In[6]:\n\n\nbirthday = True\nif birthday is True:\n d['Age'] += 1\nprint (d['Age'])\n\n\n# In[7]:\n\n\nd\n\n\n# In[8]:\n\n\nfor key in d.keys():\n print (key)\n\n\n# In[9]:\n\n\nfor key in d.values():\n print (key)\n\n\n# ### Sets\n\n# \n# \n\n# In[3]:\n\n\ns = set(['u', 'd', 'ud', 'du', 'd', 'du'])\ns\n\n\n# In[4]:\n\n\nt = set(['d', 'dd', 'uu', 'u'])\n\n\n# In[5]:\n\n\ns.union(t) # all of s and t\n\n\n# In[6]:\n\n\ns.intersection(t) # both in s and t\n\n\n# In[7]:\n\n\ns.difference(t) # in s but not t\n\n\n# In[8]:\n\n\nt.difference(s) # in t but not s\n\n\n# In[10]:\n\n\ns.symmetric_difference(t) # in either one but not both\n\n\n# In[12]:\n\n\ns.add(\"set\")\n\n\n# In[13]:\n\n\ns\n\n\n# In[15]:\n\n\ns.remove('du')\n\n\n# In[16]:\n\n\ns\n\n\n# In[17]:\n\n\nt & s # 交集\n\n\n# In[18]:\n\n\nt|s # 合集\n\n\n# In[19]:\n\n\nt -s #相对补几,差集\n\n#\n# ## 更多量化学习资源\n#\n# \n#\n# 扫上方二维码,关注公众账号 量化投资学院 ,获取下列免费资源\n# - 回复**“热点研报”**,获取近年热点券商金融工程研究报告\n# - 回复**“Python3”**,获取Python免费学习教程\n# - 回复**“quant教材与面试经验”**, 获取 quant教材与面试经验 资料\n# * [更多福利请点击此链接](https://www.jianshu.com/p/2ffb29f1a1aa)\n#\n#\n# ## 关注StudyQuant\n# * [课程](https://appcop3i2898823.h5.xiaoeknow.com/homepage)\n# - [量化投资与数据分析实战](http://study.163.com/course/introduction/1004855008.htm?share=2&shareId=400000000342001)\n# - [量化投资与数字货币实战](https://appcop3i2898823.h5.xiaoeknow.com/homepage)\n# * [知乎](https://zhuanlan.zhihu.com/studyquant)\n# * [简书](https://www.jianshu.com/u/495eda774816)\n# * [公众号](https://mp.weixin.qq.com/s__biz=MzU5NzU5NjIwMQ==&mid=100000028&idx=1&sn=2f8c053849f296455ec85406e80b2a2d&chksm=7e50405a4927c94c18ba438e0c309a7d13883621ddf02904266026556e9994ad1c3f8558327d&mpshare=1&scene=1&srcid=0810AEevB9zID4Ywzl1icPfA#rd)\n#\n","sub_path":"Tutorial/1.0 数据类型介绍/1.0 数据基本类型- 集合 (set).py","file_name":"1.0 数据基本类型- 集合 (set).py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"455401971","text":"#file: makefootprint.py \n#purpose:helper function script for kicad footprint generation \n#author: Patrick Menschel (C)2018\n\n#import re\n#import math\n\nimport matplotlib.pyplot as plt\ndef plot_points(points,figname):\n pts = points.copy()\n pts.append(points[0])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n line, = ax.plot([pt[0] for pt in pts],[pt[1] for pt in pts],lw=2,marker='o')\n for i in range(len(pts)-1):\n ax.annotate(\"PT{0} ({1},{2})\".format(i,pts[i][0],pts[i][1]),xy=pts[i])\n ax.set_title(figname) \n plt.show()\n return\n\ndef format_pad(padnum,padtype,padshape,posx,posy,sizex,sizey,layers):\n ret = \"(pad {0} {1} {2} (at {3} {4}) (size {5} {6}) (layers {7}))\".format(padnum, padtype, padshape,posx,posy,sizex,sizey,\" \".join(layers) )\n return ret\n\ndef get_posxy_for_span(pinnum,spanx,spany):\n#dual in line method, first col downwards, second col upwards\n xpts = [-((((pinnum/2)-1) * spanx) / 2) + pin*spanx for pin in range(int(pinnum/2))]\n ypts = [(-0.5*spany) + spany*pin for pin in range(2)]\n pts = [(posx,posy) for posy in ypts for posx in xpts]#reversed dim loops\n return pts\n\n\ndef format_pads(pinnum,spanx,spany,padtype,padshape,sizex,sizey,layers):\n ret = \"\"\n for padnum,(posx,posy) in enumerate(get_posxy_for_span(pinnum,spanx,spany)):\n ret += \"{0}\\n\".format(format_pad(padnum+1,padtype,padshape,posx,posy,sizex,sizey,layers))\n return ret\n\ndef get_outer_dimensions_of_pads(pads):\n minxy = [0,0]\n maxxy = [0,0]\n for pad in pads:\n padminxy,padmaxxy = pad.get_outer_dimensions()\n# print(padminxy,padmaxxy)\n minxy = list(min(minxy[i],padminxy[i]) for i in range(2))\n maxxy = list(max(maxxy[i],padmaxxy[i]) for i in range(2))\n return minxy,maxxy\n\ndef get_points_from_dimensions(dims):\n minxy,maxxy = dims\n pts = []\n pts.append((minxy[0],minxy[1]))\n pts.append((minxy[0],maxxy[1]))\n pts.append((maxxy[0],maxxy[1]))\n pts.append((maxxy[0],minxy[1]))\n return pts\n\ndef get_center_dimensions_of_pads(pads):\n minxy = [0,0]\n maxxy = [0,0]\n for pad in pads:\n padxypos = pad.get_xypos()\n minxy = list(min(minxy[i],padxypos[i]) for i in range(2))\n maxxy = list(max(maxxy[i],padxypos[i]) for i in range(2))\n return minxy,maxxy\n\ndef format_courtyard_lines(pads,package_dimensions,package_offset=0.25,linewidth=0.05,grid=0.01):\n points = []\n courtyard_lines = []\n grid_decimals = 2\n outer_pad_points = get_points_from_dimensions(get_outer_dimensions_of_pads(pads))\n# plot_points(outer_pad_points,\"outer_pad_points\")\n package_points = get_package_points(package_dimensions)\n \n outerpoints = []\n for i in range(4):\n thispt = [0,0]\n for j in range(2):\n if abs(outer_pad_points[i][j]) > abs(package_points[i][j]):#we know the points are aranged in the same manner/sector, so we check abs()\n thispt[j] = outer_pad_points[i][j]\n# print(\"Pads win at Point {0} dim {1}\".format(i,j))\n else:\n thispt[j] = package_points[i][j]\n# print(\"Package wins at Point {0} dim {1}\".format(i,j))\n outerpoints.append(thispt)\n \n for point in outerpoints:\n x,y = point\n if x < 0:\n thisx = round(x-package_offset-(grid/2),grid_decimals)\n else:\n thisx = round(x+package_offset+(grid/2),grid_decimals)\n if y < 0:\n thisy = round(y-package_offset-(grid/2),grid_decimals)\n else:\n thisy = round(y+package_offset+(grid/2),grid_decimals)\n \n points.append((thisx,thisy))\n \n# plot_points(package_points,\"package_points\")\n# \n# plot_points(points,\"courtard after rounding\")\n# \n for idx in range(len(points)-1):\n startpoint,endpoint = points[idx:idx+2]\n courtyard_lines.append(format_fpline(startpoint,endpoint,\"F.CrtYd\",linewidth))\n startpoint = points[-1] \n endpoint = points[0]\n courtyard_lines.append(format_fpline(startpoint,endpoint,\"F.CrtYd\",linewidth))\n return courtyard_lines\n\n\ndef get_package_points(package_dimensions):\n points = []\n for x in [-0.5,0.5]:\n for y in [-0.5,0.5]:\n points.append((x*package_dimensions[0],y*package_dimensions[1]))\n rearrangedpoints = points.copy()\n rearrangedpoints[3] = points[2]\n rearrangedpoints[2] = points[3]\n return rearrangedpoints\n\ndef format_fab_lines(package_dimensions,linewidth=0.1):\n \"\"\" return the lines (as in newline ) for the fabrication layer (geometrical) lines\n @param package_dimensions: size of the component (x,y)\n @return: lines for the fabrication layer rectangle with cutted edge as pin 1 indicator\n \"\"\"\n fab_lines = []\n # 4 points package rectangle\n \n points = get_package_points(package_dimensions) \n \n bevel = max(0.1,min(package_dimensions)*0.25)\n \n #alter first and last point to bevel\n points.append((points[0][0]+bevel,points[0][1]))\n points[0] = (points[0][0],points[0][1]+bevel)\n \n# plot_points(points,\"fab lines\")\n for idx in range(len(points)-1):\n startpoint,endpoint = points[idx:idx+2]\n fab_lines.append(format_fpline(startpoint,endpoint,\"F.Fab\",linewidth))\n startpoint = points[-1] \n endpoint = points[0]\n fab_lines.append(format_fpline(startpoint,endpoint,\"F.Fab\",linewidth))\n return fab_lines\n\n\ndef format_silks_lines(pads,package_dimensions,distance_to_fablines=0.12,distance_to_pads=0.2,linewidth=0.12):\n silks_lines = []\n minxy,maxxy = get_outer_dimensions_of_pads(pads)\n cminxy,cmaxxy = get_center_dimensions_of_pads(pads)\n \n faby = [p[1] for p in get_package_points(package_dimensions)]\n fabminy = min(faby) \n fabmaxy = max(faby)\n# print(minxy,fabminy,fabmaxy)\n \n startpoint = minxy[0]+(linewidth/2),min(minxy[1]-distance_to_pads,fabminy-distance_to_fablines)\n endpoint = cmaxxy[0],min(minxy[1]-distance_to_pads,fabminy-distance_to_fablines)\n# print(startpoint,endpoint)\n silks_lines.append(format_fpline(startpoint,endpoint,\"F.SilkS\",linewidth))\n \n startpoint = cminxy[0],max(maxxy[1]+distance_to_pads,fabmaxy+distance_to_fablines)\n endpoint = cmaxxy[0],max(maxxy[1]+distance_to_pads,fabmaxy+distance_to_fablines)\n# print(startpoint,endpoint)\n silks_lines.append(format_fpline(startpoint,endpoint,\"F.SilkS\",linewidth))\n return silks_lines\n\ndef format_3dmodel_lines(model3dname):\n lines = []\n sublines = []\n sublines.append(\"(at (xyz 0 0 0))\")\n sublines.append(\"(scale (xyz 1 1 1))\")\n sublines.append(\"(rotate (xyz 0 0 0))\")\n lines.append(r\"(model ${{KISYS3DMOD}}/{0}\".format(model3dname))\n lines.extend(\" {0}\".format(subline) for subline in sublines)\n lines.append(\")\")\n return lines\n \n \ndef format_fpline(startpoint,endpoint,layer,width):\n ret = \"(fp_line (start {0}) (end {1}) (layer {2}) (width {3}))\".format(\" \".join(\"{:.2f}\".format(dim) for dim in startpoint),\n \" \".join(\"{:.2f}\".format(dim) for dim in endpoint),\n layer,\n width)\n return ret \n\n\ndef format_fp_text(text,posxy,layer,fontsize=(1,1),thickness=0.15,texttype=\"reference\",angle=0):\n lines = []\n sublines = []\n poscontents = list(posxy)\n if angle:\n poscontents.append(angle)\n sublines.append(\"(effects (font (size {0}) (thickness {1:.2f})))\".format(\" \".join([\"{:.2f}\".format(I) for I in fontsize]),thickness))\n lines.append(\"(fp_text {0} {1} (at {2:}) (layer {3:})\".format(texttype,text,\" \".join([\"{:.2f}\".format(I) for I in poscontents]),layer))\n lines.extend(\" {0}\".format(subline) for subline in sublines)\n lines.append(\")\")\n return lines\n\n\ndef calc_fab_ref_text_scaling(text,sizexy,charsize=(1,1)):\n textdim = (charsize[0]*len(text),charsize[1])\n scalingxy = [min(abs(sizexy[I]/textdim[I]),1) for I in range(len(sizexy))] \n return scalingxy \n \n\nclass footprint_pad():\n\n \n def __init__(self, padnum, xypos, sizexy, padtype, padshape,layers,drill=None):\n \"\"\" A class to represent a footprint and implement helper functions \"\"\"\n self.padnum = padnum\n self.xypos = xypos\n self.sizexy = sizexy\n self.padtype = padtype\n self.padshape = padshape\n self.layers = layers \n self.drill = drill\n \n def get_outer_dimensions(self):\n \"\"\" return the outer dimensions of a pad \"\"\"\n minxy = list(self.xypos[I]-(self.sizexy[I]/2) for I in range(2))\n maxxy = list(self.xypos[I]+(self.sizexy[I]/2) for I in range(2))\n return minxy,maxxy\n \n def get_xypos(self):\n return self.xypos\n \n def format(self):\n #TODO make this nice later\n if self.padnum == None:\n padnum_str = \"\\\"\\\"\"#escaped \"\"\n else:\n padnum_str = self.padnum\n if self.drill:\n ret = \"(pad {0} {1} {2} (at {3}) (size {4}) (drill {5:.2f}) (layers {6}))\".format(padnum_str,\n self.padtype,\n self.padshape,\n \" \".join(\"{0:.2f}\".format(dim) for dim in self.xypos),\n \" \".join(\"{0:.2f}\".format(dim) for dim in self.sizexy),\n self.drill,\n \" \".join(self.layers) )\n else:\n ret = \"(pad {0} {1} {2} (at {3}) (size {4}) (layers {5}))\".format(padnum_str,\n self.padtype,\n self.padshape,\n \" \".join(\"{0:.2f}\".format(dim) for dim in self.xypos),\n \" \".join(\"{0:.2f}\".format(dim) for dim in self.sizexy),\n \" \".join(self.layers) )\n return ret\n \n def get_area(self):\n if self.padshape == \"rect\":\n ret = self.sizexy[0]*self.sizexy[1]\n else:\n raise NotImplementedError(\"Shape not handled yet {0}\".format(self.padshape))\n return ret \n \n \n\n\n\nclass kicad_footprint:\n \n \n def __init__(self,name,desc,datasheet,pads,tedit=\"5AA01C76\",layers=[\"F.Cu\",],tags=[\"TDFN\",],attr=[\"smd\",],model3dname=\"Package_DFN_QFN.3dshapes/DFN-14-1EP_3x4.5mm_P0.65mm.wrl\",package_dimensions=(1,1)):\n self.name = name\n self.desc = desc\n self.datasheet = datasheet\n self.pads = pads\n self.layers = layers\n self.tedit = tedit\n self.tags = tags\n self.attr = attr\n self.model3dname=model3dname\n self.package_dimensions = package_dimensions\n #self.pitch = re.compile(\"_P.*mm\").findall(self.name)[0]#TODO: should we noc generate the name instead of RE the pitch out of it?!\n \n def format(self):\n fabrication_layer_value_distance_to_pads = 1#TODO: STUB - automate / calculate this value\n items = []#list of items to be placed in the format string\n items.append(\"module {0} (layer {1}) (tedit {2})\".format(self.name,\" \".join(self.layers),self.tedit))#module definition line\n subitems = []\n subitems.append(\"(descr \\\"{0} ({1})\\\")\".format(self.desc,self.datasheet))\n subitems.append(\"(tags \\\"{0}\\\")\".format(\" \".join(self.tags)))\n subitems.append(\"(attr {0})\".format(\" \".join(self.attr)))\n \n #Fab layer\n subitems.extend(format_fab_lines(package_dimensions=self.package_dimensions)) \n maxxy = get_outer_dimensions_of_pads(self.pads)[1]\n refpos = (0,maxxy[1]+fabrication_layer_value_distance_to_pads)\n subitems.extend(format_fp_text(text=self.name,posxy=refpos,layer=\"F.Fab\",texttype=\"value\"))\n #scaling = calc_text_scaling(text=\"REF**\",sizexy=get_center_dimensions_of_pads(self.pads))#actually we're calculating %R but it translates to REF**\n scaling = calc_fab_ref_text_scaling(text=\"REF**\",sizexy=self.package_dimensions)\n# print(\"Scaling {0}\".format(scaling))\n angle=0\n if scaling[1] < scaling[0]:\n angle=90#use 90deg angle if the y dim is bigger than x, so we have more space\n fontsize=(min(scaling),)*2#keep aspect ratio\n# print(\"Fontsize {0}\".format(fontsize))\n thickness = 0.15*fontsize[0]\n# thickness = 0.15*(self.package_dimensions[1]/self.package_dimensions[0])\n# print(\"package_dims {0} ratio {1}\".format(self.package_dimensions,self.package_dimensions[1]/self.package_dimensions[0]))\n# print(\"Thickness {0}\".format(thickness))\n subitems.extend(format_fp_text(text=\"%R\",posxy=(0,0),layer=\"F.Fab\",texttype=\"user\",fontsize=fontsize,angle=angle,thickness=thickness))\n \n #SilkS layer\n minxy,maxxy = get_outer_dimensions_of_pads(self.pads)\n distance_to_pads = 1#TODO: write a function that returns the required distance to the pads, this is nasty\n refpos = (0,minxy[1]-distance_to_pads)\n subitems.extend(format_fp_text(text=\"REF**\",posxy=refpos,layer=\"F.SilkS\"))\n subitems.extend(format_silks_lines(self.pads,package_dimensions=self.package_dimensions))\n \n subitems.extend(format_courtyard_lines(self.pads,package_dimensions=self.package_dimensions))\n# subitems.extend(format_courtyard_lines(self.pads))#testing \n subitems.extend([pad.format() for pad in self.pads])\n subitems.extend(format_3dmodel_lines(self.model3dname))\n [items.append(\" {0}\".format(subitem)) for subitem in subitems]\n ret = \"({0}\\n)\".format(\"\\n\".join(items))\n return ret\n \n \n \ndef make_footprint_stmicro(N,E,X2,Y2,C,X,Y,V,EV,modulename,description,datasheet,centerpad,numthermalvias,package_dimensions,generate_thermalvias=True):\n \"\"\" make a kicad footprint from a st micro footprint description\n @param N: number of terminals(pads) in this footprint\n @param E: contact pitch\n @param X2: center pad width\n @param Y2: center pad length\n @param C: contact pad spacing\n @param X: contact pad width \n @param Y: contact pad length\n @param V: thermal via diameter\n @param EV: thermal via pitch\n @param modulename: reference name that kicad uses\n @param description: description in datasheet\n @param datasheet: href to datasheet \n @return: a concated string that can be written to a footprint file\n \n \"\"\"\n pads = []\n thermalvia_pads = []\n paste_pads = [] \n for idx,xypos in enumerate(get_posxy_for_span(pinnum=N,spanx=E,spany=C)):\n if idx >= N/2:\n padnum = N - idx + int(N/2) \n else:\n padnum=idx+1 \n pads.append(footprint_pad(padnum,\n xypos=(xypos[1],xypos[0]),\n sizexy=(Y,X),\n padtype=\"smd\", padshape=\"oval\",layers = [\"F.Cu\",\"F.Paste\",\"F.Mask\"]))\n if centerpad and X2 and Y2:\n ep = footprint_pad(N+1,\n xypos=(0,0),\n sizexy=(Y2,X2),\n padtype=\"smd\", padshape=\"rect\",layers = [\"F.Cu\",\"F.Mask\"])\n pads.append(ep)\n if numthermalvias and V and EV:\n #TODO: calculate the number of thermal vias\n for padnum,xypos in enumerate(get_posxy_for_span(pinnum=numthermalvias,spanx=EV,spany=EV)):#TODO: how do we know the Number of thermalvias from spec sheet variables? \n \n #add the thermal via\n diameter = min(Y2-EV,X2-EV)\n thermalvia_pads.append(footprint_pad(N+1,\n xypos=(xypos[1],xypos[0]),\n sizexy=(diameter,)*2,\n padtype=\"thru_hole\", padshape=\"circle\",layers=[\"*.Cu\",],drill=V))\n \n thermalvia_pads.append(footprint_pad(N+1,#FIXUP: one big thermal pad on the back.\n xypos=(0,0),\n sizexy=(Y2,X2),\n padtype=\"smd\", padshape=\"rect\",layers = [\"B.Cu\",]))\n \n \n #shape the paste fields on the center pad around the thermalvias\n #we're forming a cross\n #1.calc a rect between all vias\n # Y2 is the longest dim\n # use the space X2 - 2V as shortest dim\n paste_pads.append(footprint_pad(\n None,#No pad number\n xypos=(0,0),\n sizexy=(Y2,EV-V),#FIXME:not the diameter but the drill+margin, what is the margin?\n padtype=\"smd\",\n padshape=\"rect\",\n layers = [\"F.Paste\",]))\n #add the other pads\n for padnum,xypos in enumerate(get_posxy_for_span(pinnum=numthermalvias/2,spanx=EV,spany=EV)):#TODO Check with higher pin count!\n paste_pads.append(footprint_pad(\n None,#No pad number\n xypos=xypos,#position is in between the vias\n sizexy=(EV-V,X2-EV),\n padtype=\"smd\",\n padshape=\"rect\",\n layers = [\"F.Paste\",]))\n \n# paste_coverage = sum([paste_pad.get_area() for paste_pad in paste_pads])/ep.get_area()\n# print(\"paste coverage {0:%}\".format(paste_coverage))\n else:\n raise NotImplementedError(\"Not handling paste pad generation if there is no information about thermal vias\")\n #shape the paste fields on the center pad without thermal vias\n \n \n if generate_thermalvias: \n pads.extend(thermalvia_pads)\n pads.extend(paste_pads) \n \n fp_obj = kicad_footprint(name=modulename,desc=description,datasheet=datasheet,pads=pads,tags=[\"TDFN\",\"DFN\",\"{0}mm\".format(E)],\n model3dname=\"Package_DFN_QFN.3dshapes/{0}.wrl\".format(modulename.replace(\"_ThermalVias\",\"\")),\n package_dimensions=package_dimensions) \n return fp_obj.format()\n \n\nif __name__ == \"__main__\":\n modulename = \"TDFN-8-1EP_3x2mm_P0.5mm_EP1.80x1.65mm_ThermalVias\"\n footprint = make_footprint_stmicro(N=8,\n E=0.5,\n X2=1.65,\n Y2=1.8,\n C=2.9,\n X=0.25,\n Y=0.85,\n V=0.3,\n EV=1.0,\n modulename=modulename,\n description=\"8-lead plastic dual flat, 2x3x0.75mm size, 0.5mm pitch\",\n datasheet=\"http://ww1.microchip.com/downloads/en/DeviceDoc/8L_TDFN_2x3_MN_C04-0129E-MN.pdf\",\n centerpad=True,\n numthermalvias=4,\n package_dimensions=(3,2),#swapped again portrait vs landscape\n )\n with open(\"{0}.kicad_mod\".format(modulename),\"w\") as f:\n f.write(footprint)\n\n modulename = \"TDFN-8-1EP_3x2mm_P0.5mm_EP1.80x1.65mm\"\n footprint = make_footprint_stmicro(N=8,\n E=0.5,\n X2=1.65,\n Y2=1.8,\n C=2.9,\n X=0.25,\n Y=0.85,\n V=0.3,\n EV=1.0,\n modulename=modulename,\n description=\"8-lead plastic dual flat, 2x3x0.75mm size, 0.5mm pitch\",\n datasheet=\"http://ww1.microchip.com/downloads/en/DeviceDoc/8L_TDFN_2x3_MN_C04-0129E-MN.pdf\",\n centerpad=True,\n numthermalvias=4,\n package_dimensions=(3,2),\n generate_thermalvias=False,\n )\n with open(\"{0}.kicad_mod\".format(modulename),\"w\") as f:\n f.write(footprint)\n\n\n\n","sub_path":"makefootprint.py","file_name":"makefootprint.py","file_ext":"py","file_size_in_byte":21136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"137363296","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport pytimber\n\n# should be done before importing pytimber\nlogging.basicConfig(level=logging.INFO)\n\n\nldb = pytimber.LoggingDB(source=\"ldb\")\n\n\ndef test_search():\n variables = ldb.search(\"HX:BETA%\")\n assert \"HX:BETASTAR_IP1\" in variables\n assert len(variables) == 4\n\n\ndef test_get_simple():\n t1 = \"2015-05-13 12:00:00.000\"\n t2 = \"2015-05-15 00:00:00.000\"\n data = ldb.get(\"HX:FILLN\", t1, t2)\n\n t, v = data[\"HX:FILLN\"]\n assert len(t) == 6\n assert len(v) == 6\n\n assert t[0] == 1431523684.764\n assert v[0] == 3715.0\n\n\ndef test_getVariable():\n t1 = \"2015-05-13 12:00:00.000\"\n t2 = \"2015-05-15 00:00:00.000\"\n t, v = ldb.getVariable(\"HX:FILLN\", t1, t2)\n\n assert len(t) == 6\n assert len(v) == 6\n\n assert t[0] == 1431523684.764\n assert v[0] == 3715.0\n\n\ndef test_get_unixtime():\n t1 = \"2015-05-13 12:00:00.000\"\n t2 = \"2015-05-15 00:00:00.000\"\n data = ldb.get(\"HX:FILLN\", t1, t2, unixtime=False)\n t, v = data[\"HX:FILLN\"]\n import datetime\n\n assert t[0] == datetime.datetime(2015, 5, 13, 15, 28, 4, 764000)\n\n\ndef test_get_vectornumeric():\n t1 = \"2015-05-13 12:00:00.000\"\n t2 = \"2015-05-13 12:00:01.000\"\n data = ldb.get(\"LHC.BQBBQ.CONTINUOUS_HS.B1:ACQ_DATA_H\", t1, t2)\n\n t, v = data[\"LHC.BQBBQ.CONTINUOUS_HS.B1:ACQ_DATA_H\"]\n\n for vv in v:\n assert len(vv) == 4096\n\n\ndef test_get_vectorstring():\n t1 = \"2016-03-28 00:00:00.000\"\n t2 = \"2016-03-28 23:59:59.999\"\n\n t, v = ldb.getVariable(\"LHC.BOFSU:BPM_NAMES_H\", t1, t2)\n assert v[0][123] == \"BPM.16L3.B1\"\n\n\ndef test_getScaled():\n t1 = \"2015-05-15 12:00:00.000\"\n t2 = \"2015-05-15 15:00:00.000\"\n data = ldb.getScaled(\n \"MSC01.ZT8.107:COUNTS\",\n t1,\n t2,\n scaleInterval=\"HOUR\",\n scaleAlgorithm=\"SUM\",\n scaleSize=\"1\",\n )\n\n t, v = data[\"MSC01.ZT8.107:COUNTS\"]\n\n import numpy as np\n\n assert (v[:4] - np.array([1174144.0, 1172213.0, 1152831.0])).sum() == 0\n\n\ndef test_Timestamp():\n import time\n\n now = time.time()\n ts_now = ldb.toTimestamp(now)\n now2 = ldb.fromTimestamp(ts_now, unixtime=True)\n dt_now2 = ldb.fromTimestamp(ts_now, unixtime=False)\n assert now == now2\n assert str(ts_now)[:24] == dt_now2.strftime(\"%Y-%m-%d %H:%M:%S.%f\")[:24]\n\n time_str = \"2015-10-12 12:12:32.453255123\"\n ta = ldb.toTimestamp(time_str)\n assert ta.toLocaleString() == \"Oct 12, 2015 12:12:32 PM\"\n unix = ldb.fromTimestamp(ta, unixtime=True)\n assert unix == 1444644752.4532552\n assert (\n time.strftime(\"%b %-d, %Y %-I:%M:%S %p\", time.localtime(unix))\n == \"Oct 12, 2015 12:12:32 PM\"\n )\n\n\ndef test_hierarchy():\n assert hasattr(ldb.tree.LHC, \"Beam_Instrumentation\")\n\n\ndef test_getUnit():\n units = ldb.getUnit(\"%:LUMI_TOT_INST\")\n assert units[\"ATLAS:LUMI_TOT_INST\"] == \"Hz/ub\"\n\n\ndef test_getDescription():\n units = ldb.getDescription(\"%:LUMI_TOT_INST\")\n assert (\n units[\"ATLAS:LUMI_TOT_INST\"]\n == \"ATLAS: Total instantaneous luminosity summed over all bunches\"\n )\n\n\ndef test_fundamentals():\n fundamental = \"CPS:%:SFTPRO%\"\n t1 = \"2015-05-15 12:00:00.000\"\n t2 = \"2015-05-15 12:01:00.000\"\n t, v = ldb.getVariable(\"CPS.TGM:USER\", t1, t2, fundamental=fundamental)\n assert v[0] == \"SFTPRO2\"\n\n\ndef test_getStats():\n t1 = \"2016-03-01 00:00:00.000\"\n t2 = \"2016-04-03 00:00:00.000\"\n\n vn = \"LHC.BOFSU:EIGEN_FREQ_2_B1\"\n stat = ldb.getStats(vn, t1, t2)[vn]\n\n assert stat.MinTstamp == 1457962796.972\n assert stat.StandardDeviationValue == 0.00401594\n\n\ndef test_getMetaData():\n var = \"LHC.BLMI:LOSS_RS09\"\n out = ldb.getMetaData(var)\n assert len(out[var][0]) >= 24\n assert len(out[var][1]) >= 24\n","sub_path":"tests/test_loggingdb.py","file_name":"test_loggingdb.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"236796451","text":"\nimport ode\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.integrate import odeint\nimport astropy.units as u\nimport astropy.constants as const\n\n\ndef stiff_ode(L):\n\t'''\n\tsets up the stiff ode given in problem three\n\ttakes in the value for lambda, which is L here\n\treturns a new function nfunc(t , y) such that y' = nfunc(t , y)\n\t'''\n\t\n\tdef nfunc(t , y):\n\t\treturn [-1 * L * (y[0] - np.cos(t))]\n\treturn nfunc\n\t\ndef stiff_sltn(t , L):\n\n\t##given a time t and a value for lambda L, returns the solution of our stiff ode\n\t\n\tA = (-1 * L**2 / (1 + L ** 2)) * np.exp(-L * t) + (L / (1 + L ** 2)) * np.sin(t)\n\tA += (L ** 2 / (1 + L ** 2)) * np.cos(t)\n\treturn A\n\ndef pend(y, t, b, c):\n\n\t# From the scipy documentation\n\t# sets uo the pendulum ode for problem 2\n\t theta, omega = y\n\t dydt = [omega, -b*omega - c*np.sin(theta)]\n\t return dydt\n\t \ndef my_pend(b , c):\n\n\t'''\n\tsimilar to above, but this sets up the pendulum ode to work with my sovlers\n\treturns another function, npend(t,y) such that y' = npend(t , y)\n\t'''\n\t\n\tdef npend(t , y):\n\t\ttheta , omega = y\n\t\tdydt = [omega, -b*omega - c*np.sin(theta)]\n\t\treturn dydt\n\treturn npend\n\ndef WDPressure(rho):\n\n\t'''\n\tEquation of State for a White Dwarf\n\tTakes in a density rho (with astropy units)\n\tReturns a Pressure\n\t'''\n\t\n\t###mu_e = 2\n\tC = (1 / 20.0) * (3 / np.pi) ** (2.0 / 3)\n\tC *= const.h ** 2 / (const.m_e * const.u ** (5.0 / 3))\n\tP = C * ((rho / 2.0) ** (5.0 / 3))\n\treturn P.to(u.Ba)\n\t\ndef WDrho(P):\t\n\t'''\n\tEquation of State for a White Dwarf\n\tTakes in a Pressure P (with astropy units)\n\tReturns a density\n\t'''\n\tC = (1 / 20.0) * (3 / np.pi) ** (2.0 / 3)\n\tC *= const.h ** 2 / (const.m_e * const.u ** (5.0 / 3))\n\trho = (P / C) ** (3.0 / 5)\n\trho *= 2\n\treturn rho\n\t\ndef NSPressure(rho):\n\n\t'''\n\tEquation of State for a Neutron Star\n\tTakes in a density rho (with astropy units)\n\tReturns a Pressure\n\t'''\n\n\t###mu_e = 2\n\tC = (1 / 20.0) * (3 / np.pi) ** (2.0 / 3)\n\tC *= const.h ** 2 / (const.m_n ** (8.0 / 3))\n\tP = C * ((rho) ** (5.0 / 3))\n\treturn P.to(u.Ba)\n\ndef NSrho(P):\n\n\t'''\n\tEquation of State for a Neutron Star\n\tTakes in a density rho (with astropy units)\n\tReturns a Pressure\n\t'''\n\n\tC = (1 / 20.0) * (3 / np.pi) ** (2.0 / 3)\n\tC *= const.h ** 2 / (const.m_n ** (8.0 / 3))\n\trho = (P / C) ** (3.0 / 5)\n\treturn rho\n\t\n\n\n\t\ndef WD_func(r , y):\n\n\t'''\n\tsets up the system of odes for the white dwarf case\n\ty[0] is the pressure\n\ty[1] is the mass\n\treturns a list y' such that y'[0] = dp/dr and y'[1] = dm/dr\n\t'''\n\t\n\t###r is a radius in cm\n\t\n\tPressure = y[0]\n\tMass = y[1]\n\tif Pressure < 0:\n\t\treturn [0 , 0]\n\tG = (const.G.to(u.cm ** 3 / (u.g * u.s ** 2))).value\n\trho = WDrho((Pressure * u.Ba).to(u.GPa)).to(u.g / (u.cm ** 3)).value\n\tif Mass == 0:\n\t\tdp_dr = 0\n\telse:\n\t\tdp_dr = (-1 * G * Mass * rho / (r ** 2))\n\tdm_dr = 4 * np.pi * r ** 2 * rho\n\t\n\treturn [dp_dr , dm_dr]\n\t\n\ndef NS_func(r , y):\n\n\t'''\n\tsets up the system of odes for the neutron star case\n\ty[0] is the pressure\n\ty[1] is the mass\n\treturns a list y' such that y'[0] = dp/dr and y'[1] = dm/dr\n\t'''\n\t###r is a radius in cm\n\n\tPressure = y[0]\n\tMass = y[1]\n\tif Pressure < 0:\n\t\treturn [0 , 0]\n\tG = (const.G.to(u.cm ** 3 / (u.g * u.s ** 2))).value\n\trho = NSrho((Pressure * u.Ba).to(u.GPa)).to(u.g / (u.cm ** 3)).value\n\tc = const.c.to(u.cm / (u.s)).value\n\t\n\tif Mass == 0:\n\t\tdp_dr = 0\n\telse:\n\t\tdp_dr = (-1 * G * Mass * rho / (r ** 2))\n\t\tdp_dr *= (1 + Pressure / (rho * c * c))\n\t\tdp_dr *= (1 + 4 * np.pi * r ** 3 * Pressure / (Mass * c * c))\n\t\tdp_dr *= (1 - 2 * G * Mass / (r * c * c)) ** -1\n\tdm_dr = 4 * np.pi * r ** 2 * rho\n\n\treturn [dp_dr , dm_dr]\n\t\ndef problem_2():\n\n\t'''\n\tsolves problem 2 from the HW.\n\twill show a plot comparing my solvers to scipy's odeint\n\t'''\n\t\n\tb = 0.25\n\tc = 5.0\n\ty0 = [np.pi - 0.1, 0.0]\n\tt = np.linspace(0, 10, 101)\n\tprint (pend(y0 , t[0] , b , c))\n\tsol = odeint(pend, y0, t, args=(b, c))\n\t\n\t###Now we use my codes\n\tA = ode.solve_ode(my_pend(b , c) , 0 , y0 , (t[1] - t[0]))\n\tt_heun , y_heun = A.Heun(t[-1])\n\tt_fe , y_fe = A.Forward_Euler(t[-1])\n\tt_rk4 , y_rk4 = A.RK4(t[-1])\n\tprint (sol[0])\n\tpt = []\n\tptheta = []\n\tfor i in range(len(t)):\n\t\tpt.append(t[i])\n\t\tptheta.append(sol[i][0])\n\t\t\n\tfe = []\n\tfor i in y_fe:\n\t\tfe.append(i[0])\n\t\t\n\theun = []\n\tfor i in y_heun:\n\t\theun.append(i[0])\n\t\n\trk4 = []\n\tfor i in y_rk4:\n\t\trk4.append(i[0])\n\tf , ax = plt.subplots(3 , 1 , sharex = True)\n\tax[0].plot(pt , ptheta , label = \"scipy\")\n\tax[0].plot(t_fe , fe , label = \"Forward Euler\")\n\t#ax[0].set_xlabel(\"time\")\n\tax[0].set_ylabel(\"theta\")\n\tax[0].legend()\n\t\n\tax[1].plot(pt , ptheta , label = \"scipy\")\n\tax[1].plot(t_heun , heun , label = \"Heun\")\n\t#ax[1].set_xlabel(\"time\")\n\tax[1].set_ylabel(\"theta\")\n\tax[1].legend()\n\t\n\tax[2].plot(pt , ptheta , label = \"scipy\")\n\tax[2].plot(t_rk4, rk4 , label = \"RK4\")\n\tax[2].set_xlabel(\"time\")\n\tax[2].set_ylabel(\"theta\")\n\tax[2].legend()\n\n\tplt.show()\n\t\ndef stiff(L):\n\t\n\t'''\n\tsolves our stiff ode given a value of lambda, L\n\treturns the maximum error of all three of our methors\n\truns from 0 to 5 with aa step size of 1e-2\n\t'''\n\t\n\th = 1e-2\n\tstiff_solve = ode.solve_ode(stiff_ode(L) , 0 , [0] , h)\n\ttend = 5\n\tt_h , y_h = stiff_solve.Heun(tend)\n\tt_fe , y_fe = stiff_solve.Forward_Euler(tend)\n\tt_rk4 , y_rk4 = stiff_solve.RK4(tend)\n\trt = []\n\try = []\n\tfor i in range(len(t_h)):\n\t\trt.append(t_h[i])\n\t\try.append(stiff_sltn(t_h[i] , L))\n\t\t\n\theun = []\n\trk4 = []\n\tfe = []\n\t\n\tt = 0\n\t\n\tfor i in y_h:\n\t\theun.append(abs(i - stiff_sltn(t , L)))\n\t\tt += h\n\t\n\tt = 0\n\tfor i in y_fe:\n\t\tfe.append(abs(i - stiff_sltn(t , L)))\n\t\tt += h\n\tt = 0\n\tfor i in y_rk4:\n\t\trk4.append(abs(i - stiff_sltn(t , L)))\n\t\tt += h\n\t\t\n\treturn max(fe) , max(heun) , max(rk4)\n\t\n\t\ndef problem_3():\n\n\t'''\n\tsolves problem 3 from the hw\n\tthis function is mostly plot code\n\twill show a plot\n\t'''\n\t\n\tL = 1\n\tfe = []\n\th = []\n\tLs = []\n\trk = []\n\twhile L < 100:\n\t\ta , b ,c = stiff(L)\n\t\tfe.append(a)\n\t\th.append(b)\n\t\trk.append(c)\n\t\tLs.append(L)\n\t\tL += 1\n\t\n\n\tf , ax = plt.subplots( 3 , 1 , sharex = True)\n\t\n\tax[0].plot(Ls , fe)\n\tax[0].set_title(\"Forward Euler Method\")\n\t\n\tax[1].plot(Ls , h)\n\tax[1].set_title(\"Heun's Method\")\n\t\n\tax[2].plot(Ls , rk)\n\tax[2].set_title(\"RK4 Method\")\n\tplt.show()\n\t\n\t\t\n\t\t\n\t\n\t\ndef problem_4():\n\t\n\t'''\n\tproduces a mass vs. radius plot for our white dwarf\n\truns with central densities from 1e4 to 1e6 in g / cm ^ 3\n\t'''\n\t\n\n\t\n\trc = 1e4\n\tM_total = []\n\tR = []\n\th = 5e6\n\twhile rc <= 1e6:\n\t\n\t\n\t\trho_c = rc * (u.g / (u.cm ** 3))\n\t\t\n\t\t## we will solve in terms of P and M\n\t\t\t\n\t\tP_c = WDPressure(rho_c).value ###Central Pressure in Ba\n\t\t\n\t\tWhite_Dwarf = ode.solve_ode(WD_func , 1 , [P_c , 0] , h)\n\t\t\n\t\trearth = 6.378e+8 ##cm\n\t\tr_end = 5 * rearth\n\t\t\n\t\twdr , wdy = White_Dwarf.RK4(r_end)\n\t\t\n\n\t\tfor i in range(len(wdr)):\n\t\t\tif wdy[i][0] < 0:\n\t\t\t\tR.append((wdr[i] * u.cm).to(u.R_sun).value)\n\t\t\t\tM_total.append(((wdy[i][1] * u.g).to(u.M_sun).value))\n\t\t\t\tbreak\n\t\trc *= 1.5\n\tplt.plot(M_total , R)\n\tplt.xlabel(\"Mass (M_sun)\")\n\tplt.ylabel(\"Radius (Solar Radii)\")\n\tplt.show()\n\t\ndef problem_5():\n\n\t\n\t'''\n\tproduces a mass vs. radius plot for our neutron star\n\truns with central densities from 1e4 to 1e6 in g / cm ^ 3\n\t'''\n\t###neutron stars\n\th = 1e4\n\t\n\trc = 1e14 * u.g / (u.cm ** 3)\n\tPc = NSPressure(rc)\n\tRad = []\n\tM_total = []\n\twhile rc.value < 1e17:\n\t\tP_c = NSPressure(rc).value ###Central Pressure in Ba\n\t\t\n\t\tNeutron_Star = ode.solve_ode(NS_func , 1 , [P_c , 0] , h)\n\t\t\n\t\trend = 5e9\n\t\tnsr , nsy = Neutron_Star.Forward_Euler(rend)\n\n\t\tfor i in range(len(nsr)):\n\t\t\tif nsy[i][0] < 0:\n\t\t\t\tR = nsr[i]\n\t\t\t\tM = nsy[i][1]\n\t\t\t\tRad.append((R * u.cm).to(u.km).value)\n\t\t\t\tM_total.append( (M * u.g).to(u.M_sun).value)\n\t\t\t\tbreak\n\t\trc *= 1.2\n\tplt.plot(M_total , Rad)\n\tplt.xlabel(\"Mass (M_sun)\")\n\tplt.ylabel(\"Radius (km)\")\n\tplt.show()\n#problem_2()\n#problem_3()\n#problem_4()\nproblem_5()\n","sub_path":"HW3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"433586443","text":"#!/usr/bin/env python\n\nimport time\nimport datetime\nimport csv\nimport os, shutil, configparser\nimport subprocess\nimport serial\n\nfrom time import sleep\nfrom datetime import datetime\nfrom Bird_Scale_Setup import Setup\n\nfrom Bird_Scale_Weights import scale_weights as Get_Weight\nfrom OPC_Client import OPC_Sync\nfrom OPC_Client_CAL import OPC_CAL_Sync\nfrom OPC_Units import OPC_Unit_Get\nimport Bird_Scale_Functions as Custom_FB \n\n# Setup Config and Directories\nconfig_files = Setup()\n\n# Initialize Config File\nconfig = configparser.ConfigParser()\nconfig_2 = configparser.ConfigParser()\n\nSerial_Data = Custom_FB.read_config_ser(config_files)\n\n# Read Out Serial Array\nserial_usb = Serial_Data[0]\nserial_baud = Serial_Data[1]\n\n# Serial Setup\nser = serial.Serial(serial_usb, baudrate=serial_baud, timeout=1)\n\n# Variables\ncount = 0\nday_change = 0\nday_change_done = 0\nday_rese_t = [2,1]\nStable_Counter = 0\nSeconds = 0\n\n\ntry:\n Units = 0\n Units = OPC_Unit_Get(config_files)\n while True:\n Start_time = time.time()\n # Day Reset Triggering\n now = datetime.now()\n if now.hour == day_rese_t[0] and now.minute == day_rese_t[1] and day_change_done == 0:\n day_change = 1\n day_change_done = 1\n else:\n day_change = 0\n if now.hour == day_rese_t[0] and now.minute > day_rese_t[1]:\n day_change_done = 0\n\n # Get Bird Scale Data\n Bird_Data = Get_Weight(day_change,config_files,ser,Stable_Counter,Units)\n\n # HMI OPC Connection every 50 Cycles\n if count > 100:\n count = 0\n STD_Weight_Return = OPC_Sync(Bird_Avg_Weight=Bird_Data[1],\n Bird_Last_Weight=Bird_Data[2],\n Bird_Num_Weights=Bird_Data[3],\n Bird_Scale_Readout=Bird_Data[0],\n Bird_Uniformity=Bird_Data[9],\n Bird_CV_Value=Bird_Data[12],\n user_config_file=config_files)\n \n # Store Posiible New STD\n Custom_FB.write_config_STDscale(STD_Weight_Return,config_files)\n \n \n # Display Data Stream\n End_time = time.time()\n duration = End_time - Start_time\n\n '''\n # Clear Terminal\n os.system('printf \"\\033c\"')\n print(\"Cycle Time: \"+str(duration))\n print('----------------')\n print('Filtered Weight: ', Bird_Data[0],'Kg')\n print('AVG Weight: ', Bird_Data[1],'Kg')\n print('STD Weight: ', Bird_Data[4],'Kg')\n print('Last Weight: ', Bird_Data[2],'Kg')\n print('Number of Weights: ',Bird_Data[3])\n print('Scale: ', Bird_Data[0],'Kg')\n print('Last Difference: ', Bird_Data[7],'Kg')\n print('Last AVG: ', Bird_Data[8],'Kg')\n print('Uniformity', Bird_Data[9],'Kg')\n print('CV: ', Bird_Data[12],'Kg')\n print('Min Cutoff: ', Bird_Data[5],'Kg')\n print('Max Cutoff: ', Bird_Data[6],'Kg')\n print('% Low: ', Bird_Data[15],'Kg')\n print('% High: ', Bird_Data[16],'Kg')\n print('Counts: ', Bird_Data[17])\n print('----------------')\n '''\n Stable_Counter = Bird_Data[14]\n #print('Stable Counts: ',Stable_Counter)\n \n count = count+1\n\n if (now.second != Seconds):\n SN = int(Bird_Data[20][0]-1)\n Cal_Data_HMI = [0.0] *8\n Cal_Data_HMI[1] = Bird_Data[17]\n Cal_Data_HMI[2] = Bird_Data[18][0]\n Cal_Data_HMI[3] = Bird_Data[19][0]\n \n CAL_Tags_HMI = OPC_CAL_Sync(Cal_Data_HMI,config_files,SN)\n\n if (Units == 0):\n Unit_Label = 'Kg'\n elif (Units == 1):\n Unit_Label = 'Lbs'\n \n CAL_Data = [0,0]\n if (CAL_Tags_HMI[0] == 1):\n CAL_Data[1] = Bird_Data[17]\n CAL_Data[0] = Bird_Data[18][0]\n \n print('Scale: ', Bird_Data[0][0],Unit_Label)\n print('Counts: ', Bird_Data[17])\n print('Span Counts: ', CAL_Data[0])\n print('Zero Counts: ', CAL_Data[1])\n Custom_FB.write_config_CALscale(CAL_Data,config_files)\n \n if (CAL_Tags_HMI[1] == 1):\n CAL_Data[1] = Bird_Data[19][0]\n CAL_Data[0] = (Bird_Data[17]-CAL_Data[1])/CAL_Tags_HMI[2]\n \n print('Scale: ', Bird_Data[0][0],Unit_Label)\n print('Counts: ', Bird_Data[17])\n print('Span Counts: ', CAL_Data[0])\n print('Zero Counts: ', CAL_Data[1])\n Custom_FB.write_config_CALscale(CAL_Data,config_files)\n \n\n Seconds = now.second\n \n \nexcept Exception as e:\n print('Main Program Failure...')\n print(e)\n\n\n\n\n","sub_path":"Programs/Bird_Scale/West_Heritage_Barn_1/Wireless_Bird_Scale_OpenScale/Programs_V1-0-2/Bird_Scale.py","file_name":"Bird_Scale.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"224161189","text":"#!/usr/bin/env python\n#\n# Copyright (C) 2016 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"NDK packaging APIs.\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nfrom typing import Iterable, List, Optional, Set, Tuple\n\nimport ndk.abis\nfrom ndk.hosts import Host, host_to_tag\n\n\nPACKAGE_VARIANTS = (\n 'abi',\n 'arch',\n 'host',\n 'toolchain',\n 'triple',\n)\n\n\ndef expand_paths(package: str, host: Host,\n arches: Optional[Iterable[ndk.abis.Arch]]) -> List[str]:\n \"\"\"Expands package definition tuple into list of full package names.\n\n >>> expand_paths('gcc-{toolchain}-{host}', Host.Linux, ['arm', 'x86_64'])\n ['gcc-arm-linux-androideabi-linux-x86_64', 'gcc-x86_64-linux-x86_64']\n\n >>> expand_paths('gdbserver-{arch}', Host.Linux, ['arm64', 'x86_64'])\n ['gdbserver-arm64', 'gdbserver-x86_64']\n\n >>> expand_paths('llvm-{host}', Host.Linux, None)\n ['llvm-linux-x86_64']\n\n >>> expand_paths('platforms', Host.Linux, ['arm'])\n ['platforms']\n\n >>> expand_paths('libc++-{abi}', Host.Linux, ['arm'])\n ['libc++-armeabi-v7a']\n\n >>> expand_paths('binutils/{triple}', Host.Linux, ['arm', 'x86_64'])\n ['binutils/arm-linux-androideabi', 'binutils/x86_64-linux-android']\n\n >> expand_paths('toolchains/{toolchain}-4.9', Host.Linux, ['arm', 'x86'])\n ['toolchains/arm-linux-androideabi-4.9', 'toolchains/x86-4.9']\n \"\"\"\n host_tag = host_to_tag(host)\n if arches is None:\n return [package.format(host=host_tag)]\n\n seen_packages: Set[str] = set()\n packages = []\n for arch in arches:\n triple = ndk.abis.arch_to_triple(arch)\n toolchain = ndk.abis.arch_to_toolchain(arch)\n for abi in ndk.abis.arch_to_abis(arch):\n expanded = package.format(\n abi=abi, arch=arch, host=host_tag, triple=triple,\n toolchain=toolchain)\n if expanded not in seen_packages:\n packages.append(expanded)\n seen_packages.add(expanded)\n return packages\n\n\ndef package_varies_by(install_path: str, variant: str) -> bool:\n \"\"\"Determines if a package varies by a given input.\n\n >>> package_varies_by('foo-{host}', 'host')\n True\n\n >>> package_varies_by('foo', 'host')\n False\n\n >>> package_varies_by('foo-{arch}', 'host')\n False\n \"\"\"\n\n if variant not in PACKAGE_VARIANTS:\n raise ValueError\n\n variant_replacement_str = '{' + variant + '}'\n return variant_replacement_str in install_path\n\n\ndef expand_packages(package: str, install_path: str, host: Host,\n arches: List[ndk.abis.Arch]) -> Iterable[Tuple[str, str]]:\n \"\"\"Returns a list of tuples of `(package, install_path)`.\"\"\"\n package_template = package\n for variant in PACKAGE_VARIANTS:\n if package_varies_by(install_path, variant):\n package_template += '-{' + variant + '}'\n\n expanded_packages = expand_paths(package_template, host, arches)\n expanded_installs = expand_paths(install_path, host, arches)\n return zip(expanded_packages, expanded_installs)\n\n\ndef extract_zip(package_path: str, install_path: str) -> None:\n \"\"\"Extracts the contents of a zipfile to a directory.\n\n This behaves similar to the following shell commands (using tar instead of\n zip because `unzip` doesn't support `--strip-components`):\n\n mkdir -p $install_path\n tar xf $package_path -C $install_path --strip-components=1\n\n That is, the first directory in the package is stripped and the contents\n are placed in the install path.\n\n Args:\n package_path: Path to the zip file to extract.\n install_path: Directory in which to extract zip contents.\n\n Raises:\n RuntimeError: The zip file was not in the allowed format. i.e. the zip\n had more than one top level directory or was empty.\n \"\"\"\n package_name = os.path.basename(package_path)\n extract_dir = tempfile.mkdtemp()\n try:\n subprocess.check_call(\n ['unzip', '-q', package_path, '-d', extract_dir])\n dirs = os.listdir(extract_dir)\n if len(dirs) > 1:\n msg = 'Package has more than one root directory: ' + package_name\n raise RuntimeError(msg)\n if not dirs:\n raise RuntimeError('Package was empty: ' + package_name)\n parent_dir = os.path.dirname(install_path)\n if not os.path.exists(parent_dir):\n os.makedirs(parent_dir)\n shutil.move(os.path.join(extract_dir, dirs[0]), install_path)\n finally:\n shutil.rmtree(extract_dir)\n","sub_path":"ndk/packaging.py","file_name":"packaging.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"2414829","text":"import sys\n\ndef main(file):\n\tf = open(file, 'r')\n\tn = f.readline()\n\tA = [int(i) for i in f.readline().split()]\n\tstring = ''\n\tA = mergeSort(A)\n\tfor v in A:\n\t\tstring = string + \" %d\" %v\n\tprint(string)\n\n\ndef mergeSort(A):\n result = []\n if len(A) < 20:\n return sorted(A)\n mid = int(len(A)/2)\n y = mergeSort(A[:mid])\n z = mergeSort(A[mid:])\n i = 0\n j = 0\n while i < len(y) and j < len(z):\n if y[i] > z[j]:\n result.append(z[j])\n j += 1\n else:\n result.append(y[i])\n i += 1\n result += y[i:]\n result += z[j:]\n return result\n\n\nif __name__ == '__main__':\n\tmain('rosalind_ms.txt')\n","sub_path":"p12.py","file_name":"p12.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"17445487","text":"from include import *\nfrom util import *\n\nargs = ArgParser()\nmodelPath = args.model\ninputPath = args.input\n\nscalerModelName = \"classifiers_dump/robustscaler.sav\"\nsvmModelName = \"classifiers_dump/svm_clf.sav\"\nnbModelName = \"classifiers_dump/nb_clf.sav\"\nknnModelName = \"classifiers_dump/knn_clf.sav\"\n\nannModelName = \"saved_model/ann_clf\"\n\n# Load essential classes and trained classifiers from model\n\nwith open(modelPath + scalerModelName, 'rb') as modelFile:\n robust_scaler_fitted = pickle.load(modelFile)\n\nwith open(modelPath + svmModelName, 'rb') as modelFile:\n best_svm_clf = pickle.load(modelFile)\n\nwith open(modelPath + nbModelName, 'rb') as modelFile:\n best_nb_clf = pickle.load(modelFile)\n\nwith open(modelPath + knnModelName, 'rb') as modelFile:\n best_knn_clf = pickle.load(modelFile)\n\nbest_ann_clf = tf.keras.models.load_model(modelPath + annModelName)\n\ndf = getTracksFeatures(inputPath)\ndf = df.drop(columns=['track_href', 'analysis_url', 'uri', 'id', 'type'])\n\n# Scale\ndf[df.columns] = robust_scaler_fitted.transform(df[df.columns])\n\n# column check\ncolumns_list = ['danceability', 'energy', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness',\n 'liveness', 'valence', 'tempo', 'duration_ms', 'time_signature']\ndf = df[columns_list]\n\ndf = df.drop(columns=['tempo', 'mode'])\n\n# SVM Prediction\ny_pred_svm = best_svm_clf.predict(df)\nprint(\"SVM Prediction\")\nprint(y_pred_svm)\n\n# Naive Bayes Prediction\ny_pred_nb = best_nb_clf.predict(df)\nprint(\"Naive Bayes Prediction\")\nprint(y_pred_nb)\n\n# KNN Prediction\ny_pred_knn = best_knn_clf.predict(df)\nprint(\"K-Nearest Neighbor Prediction\")\nprint(y_pred_knn)\n\n# ANN Prediction\ny_pred_ann = best_ann_clf.predict(df)\ny_pred_ann = y_pred_ann.reshape(y_pred_ann.shape[0], ).round().astype(int)\nprint(\"Neural Network Prediction\")\nprint(y_pred_ann)\n","sub_path":"predict/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"263218239","text":"from struct import pack\nfrom math import pi\n\nfrom game.world.managers import UnitManager\nfrom utils.constants.ObjectCodes import ObjectTypes\nfrom utils.ConfigManager import config\nfrom game.world.managers.abstractions.Vector import Vector\nfrom network.packet.PacketWriter import PacketWriter, OpCode\nfrom utils.constants.UpdateFields \\\n import ContainerFields, ItemFields, PlayerFields, UnitFields, ObjectFields, GameObjectFields\n\n\nclass ObjectManager(object):\n def __init__(self,\n guid=0,\n entry=0,\n object_type=None,\n walk_speed=2.5,\n running_speed=7.0,\n swim_speed=4.72222223,\n turn_rate=pi,\n movement_flags=0,\n unit_flags=0,\n dynamic_flags=0,\n shapeshift_form=0,\n display_id=0,\n scale=1,\n bounding_radius=config.Unit.Defaults.bounding_radius,\n location=Vector(),\n transport_id=0,\n transport=Vector(),\n transport_orientation=0,\n orientation=0,\n pitch=0,\n zone=0,\n map_=0):\n self.guid = guid\n self.entry = entry\n self.object_type = [ObjectTypes.TYPE_OBJECT]\n self.walk_speed = walk_speed\n self.running_speed = running_speed\n self.swim_speed = swim_speed\n self.turn_rate = turn_rate\n self.movement_flags = movement_flags\n self.unit_flags = unit_flags\n self.dynamic_flags = dynamic_flags\n self.shapeshift_form = shapeshift_form\n self.display_id = display_id\n self.scale = scale\n self.bounding_radius = bounding_radius\n self.location = location\n self.transport_id = transport_id\n self.transport = transport\n self.transport_orientation = transport_orientation\n self.orientation = orientation\n self.pitch = pitch\n self.zone = zone\n self.map_ = map_\n\n def get_object_type_value(self):\n type_value = 0\n for type_ in self.object_type:\n type_value |= type_.value\n return type_value\n\n def get_update_mask(self):\n mask = 0\n if ObjectTypes.TYPE_CONTAINER in self.object_type:\n mask += ContainerFields.CONTAINER_END.value\n if ObjectTypes.TYPE_ITEM in self.object_type:\n mask += ItemFields.ITEM_END.value\n if ObjectTypes.TYPE_PLAYER in self.object_type:\n mask += PlayerFields.PLAYER_END.value\n if ObjectTypes.TYPE_UNIT in self.object_type:\n mask += UnitFields.UNIT_END.value\n if ObjectTypes.TYPE_OBJECT in self.object_type:\n mask += ObjectFields.OBJECT_END.value\n if ObjectTypes.TYPE_GAMEOBJECT in self.object_type:\n mask += GameObjectFields.GAMEOBJECT_END.value\n return (mask + 31) / 32\n\n def create_update_packet(self, update_type):\n update_mask = int(self.get_update_mask())\n data = pack(\n '上传成功
')\n else:\n upfileform=UpFileForm()\n return render(request,'process/upfile.html',{'upfileform':upfileform})\n\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n new_user = user_form.save(commit=False)\n new_user.set_password(user_form.cleaned_data['password'])\n new_user.save()\n profile = Profile.objects.create(user=new_user)\n return render(request,'account/register_done.html',{'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n user_form.fields['first_name'].label = '名字'\n return render(request,'account/register.html',{'user_form': user_form})\n\n# def user_login(request):\n# if request.method=='POST':\n# form=LoginForm(request.POST)\n# if form.is_valid():\n# cd=form.cleaned_data\n# user=authenticate(username=cd['username'],password=cd['password'])\n# if user is not None:\n# if user.is_active:\n# login(request,user)\n# return HttpResponse('Authenticated successfully')\n# else:\n# return HttpResponse('disabled account')\n# else:\n# return HttpResponse('invalid login')\n# else:\n# form=LoginForm()\n# return render(request,'account/login.html',{'form':form})\n\n\n","sub_path":"ftm/ftmapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"139532689","text":"from flask import Flask\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.script import Manager\nfrom flask.ext.migrate import Migrate, MigrateCommand\n\nfrom sqlalchemy import Column, Integer, String, Float, Text, ForeignKey, DateTime\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.ext.associationproxy import association_proxy\n\nimport datetime\nimport keys\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = keys.get_mysql_uri()\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n\ndb = SQLAlchemy(app)\n\nmigrate = Migrate(app, db)\nmanager = Manager(app)\nmanager.add_command('njtransit', MigrateCommand)\n\nclass Stations(db.Model):\n\n\t__tablename__ = \"stations\"\n\n\tid = db.Column(db.Integer, primary_key=True)\n\tname = db.Column(db.String(255))\n\tabbr = db.Column(db.String(255))\n\tschedules = relationship(\"Schedule\", backref=\"stations\", single_parent=True, cascade=\"all, delete-orphan\", primaryjoin=(\"Stations.id==Schedule.station_id\"))\n\n\tdef __init__(self, name, abbr):\n\t\tself.name = name\n\t\tself.abbr = abbr\n\nclass Schedule(db.Model):\n\n\t__tablename__ = \"schedule\"\n\n\tid = db.Column(db.Integer, primary_key=True)\n\tdeparture = db.Column(db.String(255))\n\tto = db.Column(db.String(255))\n\ttrack = db.Column(db.String(255))\n\tline = db.Column(db.String(255))\n\ttrain = db.Column(db.String(255))\n\tstatus = db.Column(db.String(255))\n\tstation_id = db.Column(db.Integer, ForeignKey(\"stations.id\"))\n\ttimestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n\n\tdef __init__(self, departure, to, track, line, train, status):\n\t\tself.departure = departure\n\t\tself.to = to\n\t\tself.track = track\n\t\tself.line = line\n\t\tself.train = train\n\t\tself.status = status\n\nclass Requests(db.Model):\n\n\t__tablename__ = \"requests\"\n\n\tid = db.Column(db.Integer, primary_key=True)\n\turl = db.Column(db.String(255))\n\ttime = db.Column(db.Float)\n\ttimestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n\tversion = db.Column(db.Integer)\n\n\tdef __init__(self, url, time, version):\n\t\tself.url = url\n\t\tself.time = time\n\t\tself.version = version\n\nif __name__ == \"__main__\":\n\tmanager.run()\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"526106627","text":"#This \"all.py\" file contains most of what I`ve written in python while Im in my learning phase..\n\n\n#My first lines of code which are ofc quite uninteresting.\nprint('Hello, Lets start off with you')\n\nprint('What is your name?') #This asks for name..\nnameVariable = input() #set variable for name that you type\nprint('It is good to meet you, ' + nameVariable)\nprint('The length of your name is:')\nprint(len(nameVariable)) #len is a function that counts letters\n\nprint('What is your age?') #Ask for age..\nageVariable = input() #set variable for age that you tyoe\nprint('You will be ' + str(int(ageVariable) + 1) + ' in a year.')\n\n\n#break and continue statement examples..\nnameVariable = ''\nwhile True:\n print('Type your name, pls. Only Emil is granted?')\n nameVariable = input()\n if nameVariable == 'Emil':\n break #break statement causes the execution to immediatly leave the loop.\nprint('Thanks!')\n\nrandomVariable = 0\nwhile randomVariable < 5:\n randomVariable = randomVariable +1\n if randomVariable == 3:\n continue # continue statement makes the computer jump back\n # to the start of the while statement ignoring lines\n # of code that would be after.\n print('randomVariable is '+str(randomVariable))\n\n\n#What is 2+2 my simple math question the sophisticated version.\nprint('Hello, can you type you`r name in for me please?!')\nnameVar = ('')\nwhile nameVar != bool(0):\n\t#while nameVar != str(''):\n\tnameVar = str(input())\n\t#print('Thanks, ' + nameVar + '!')\n\tbreak\nprint('Good to meet you, ' + nameVar + '!')\ntheCorrectAnswer = 2 + 2\nprint('what is 2+2?')\nmyAnswer = int(input())\nif theCorrectAnswer == myAnswer:\n\tprint('Correct!')\nelif theCorrectAnswer == myAnswer + 1 or theCorrectAnswer == myAnswer - 1:\n print('You are close, try again!')\n while theCorrectAnswer == myAnswer + 1 or theCorrectAnswer == myAnswer - 1:\n my2ndAnswer = int(input())\n if theCorrectAnswer == my2ndAnswer + 1 or theCorrectAnswer == my2ndAnswer - 1:\n print('Close again, try once more!')\n elif theCorrectAnswer == my2ndAnswer:\n print('Good job!')\n break\n elif theCorrectAnswer != my2ndAnswer or theCorrectAnswer != my2ndAnswer \\\n + 1 or the2ndCorrectAnswer != my2ndAnswer - 1:\n print('You are to far off, my friend!')\n break\nelse:\n\tprint('Wrong answer!')\nprint('Good bye ' + nameVar + '!')\n\n\n#Super simple and insecure login\nprint('Please type in your username!')\nprint('Hint: its \"emil\"!')\nusernameVariable1 = str(input())\nwhile usernameVariable1 != 'emil':\n print('Wrong user! Please re-type your username!')\n usernameVariable1 = str(input())\n if usernameVariable1 == 'emil':\n break\nprint('Alrighty!')\nprint('Now, type in your password!')\nprint('Hint: it`s a quite common one..!')\nuserPassword1 = str(input())\nwhile userPassword1 != '123':\n print('Wrong, dude..!')\n print('Try again!')\n userPassword1 = str(input())\n if userPassword1 == '123':\n break\nprint('You are now signed in, my friend!')\n\n\n#Looking at the \"for x in range\" function.\nprint('I want apples!')\nfor aWeirdVariable1 in range(5): #This will run the next code 5 times\n# and each time its ran it will be adding an incremental values ranging\n# from 0-4 in the aWeirdVariable1.\n if aWeirdVariable1 == 0: #We skip the first value as this is 0 and\n # it does not make sense to the context.\n # Notice the continue statement under.\n continue # Here it is. Since the aWeirdVariable1 is valued\n # at 0 at first, we chose to jump back to the start of the \"for\" statement.\n print('I want ' + str(aWeirdVariable1))\nprint('I want all apples!')\n\n\n#Just looking at the for function and a usecase for it.\n#It basically just start with the number 0 and adds the sum\n# of the 0 (aTotal1) + 0 and then, 1 and then 2... to 13\n#(counting from 0 to 13 = 14) (addedToTotal1).\naTotal1 = 0\nfor addedToTotal1 in range (14):\n aTotal1 = aTotal1 + addedToTotal1\nprint(int(aTotal1))\n\n\n#Taking a look at functions including while loop.\ndef firstFunction():\n if randomVariable2 == 1:\n print('You typed \"1\"')\n elif randomVariable2 == 2:\n print('You typed \"2\"')\n else:\n print('You typed \"3\"')\n\nprint('Type the number 1, 2 or 3!')\n\nrandomVariable2 = int(input())\nif randomVariable2 == int(1) or randomVariable2 == int(2) or\\\nrandomVariable2 == int(3):\n firstFunction()\nelse:\n while randomVariable2 != int(1) or randomVariable2 != int(2) or \\\n randomVariable2 != int(3):\n print('You did not type 1, 2 or 3!')\n print('Please type 1, 2 or 3!')\n randomVariable2 = int(input())\n if randomVariable2 == int(1) or randomVariable2 == int(2) or \\\n randomVariable2 == int(3):\n firstFunction()\n break\n\n\n# Using a function in a simple math question to qoute the answer\n# and using an if statement to decide which output to run.\ndef secondFunction():\n if randomVariable3 == int(4):\n print('That is correct, ' + str(int(randomVariable3)) + ' is the answer!')\n else:\n print('No, ' + str(int(randomVariable3)) + ' is not the correct answer!')\n\n\nprint('What is 2 + 2')\nrandomVariable3 = int(input())\nif randomVariable3 == int(4): # You can see that both lines will be run,\n# but the difference is the if statement inside \"secondFunction\"`s if statement.`\n secondFunction()\nelse:\n secondFunction()\n\n\n\n#Calling a function with different argumetns.\nprint('Type 1, 2 or any other number!')\nrandomVariable4 = int(input())\n\n# The word inside the parentheses is called a paramater.\n# A parameter is a variable that an argument is stored in.\ndef thirdFunction(randomVariable4):\n print('This ' + randomVariable4)\n\nif randomVariable4 == int(1):\n thirdFunction('parameter has a value of 1!')\nelif randomVariable4 == int(2):\n thirdFunction('parameter has a value of 2!')\nelse:\n thirdFunction('parameter has a value of any other number\\\n you typed which in this case was ' + str(randomVariable4) + '!')\n\n\n# Error handeling and how to work with it.\n# Handeling errors and examples of treating a function as a \"black box\"\n# when all you want is the output of a function.\nprint('Now I`m gonna divide the number 42 in 0, 1, 2, 3 ,5 and 21..!')\ndef randomVariable5(divideBy):\n try:\n return 42 / divideBy\n except ZeroDivisionError:\n print('No, dont divide by zero my friend!')\n# Since dividing by zero prints an error message, notice the\n# first print statement (0); the program will output an error\n# saying ZeroDivisionError: division by zero.\n# To overcome this so that the program can continue,\n# you can add in \"try and \"except\"\" statements.\nprint(randomVariable5(0))\nprint(randomVariable5(1))\nprint(randomVariable5(2))\nprint(int(randomVariable5(3)))\nprint(float(randomVariable5(5)))\nprint(int(randomVariable5(21)))\n\n\n# Another example..\n# This program lets you chose a number, but what if you type a\n# string of characters instead?\nprint('Type a number, please!')\nrandomVariable6 = input()\nif int(randomVariable6) > 5:\n print('Thats a high number!')\nelse:\n print('Thats a low number!')\n\n# If you type anything other than integers (number) it will crash with\n# the error message \"ValueError: invalid literal for int() with base 10: 'as'\".\n# This program fixes this by using the try and except statements.\n# This is a good example of the use of an input validation.\nprint('Type a number, please!')\nrandomVariable7 = input()\n\ntry:\n if int(randomVariable7) > 5:\n print('Thats a high number!')\n else:\n print('Thats a low number!')\nexcept ValueError: # You can see that we put the first line in the\n# error output in the except statement making the program run\n# the lines under if this error appears.\n print('You did not type in a number!')\n\n\n\n # Learning about the random function in python.\n# A simple example usage would be a guessing game..\n# I will also implement the \"in range\" function to limit\n# the amount of tries the user have to guess the correct answer.\n# Using the random module to make a guessing game would look something like this..\nimport random #We import the random module..\nprint('This is a guessing game!')\n# We set the variable \"secretVariable1\" to a random number between 1 and 20..\nsecretVariable1 = random.randint(1, 20)\nprint('Guess a number between 1 and 20!')\n\n# We set a range of 0 to 6 (not counting 7 as you remember)\n# giving us 1 to 6 -> giving us 6 tries.\n# This will run one line at a time in the for loop up\n# to 6 times unless you guess correct.\nfor guessRemainingVariable in range(1, 7):\n print('Now, guess the number!')\n# We store the number that we guess as an integer in the \"guessVariable1\"..\n guessVariable1 = int(input())\n if guessVariable1 < secretVariable1 and guessRemainingVariable <= 5: #If number we guess is lower than the number that is random and we have counted less than 0-5 (6 tries)..\n print('You have ' + str(int(6 - (guessRemainingVariable))) + ' tries left..!')\n print('To low, my friend, try again..!')\n elif guessVariable1 > secretVariable1 and guessRemainingVariable <= 5: #If number that we guess is higher than the number that is random and we have counted less than 0-5 (6 tries)..\n print('You have ' + str(int(6 - (guessRemainingVariable))) + ' tries left..!')\n print('To high, my friend, try again..!')\n else:\n break #This condition would be met if your guess is correct. This takes us out of the \"for\" loop and move us on to the next block.\n\nif guessVariable1 == secretVariable1:\n print('Your guess is correct, my friend. You used ' + str(int(guessVariable1)) + ' attempts. Congratulations!')\nelse:\n print('You ran out of tries. The number was ' + str(secretVariable1) + '.. Good-bye!')\n\n\n\n# Learning the list function..\n# Lists can hold multiple values. It ca neveb hold lists inside of a list.\nlistVariableTwo = [['norway','sweden','denmark'],['oslo','stockholm','copenhagen']] #The syntax is basically [[','],[',']]\n# listVariableTwo #This will output: [['norway', 'sweden', 'denmark'], ['oslo', 'stockholm', 'copenhagen']]\n# listVariableTwo [0] #While this will output: ['norway', 'sweden', 'denmark']\n# listVariableTwo [0][2] #And this will output: 'denmark'\n\nprint('We have two lists. One containing countries and another containing cities..!')\nprint('Chose wether you want to select the country-list or the city-list')\nprint('0 = ' + str(listVariableTwo [0])) #This prints out the first list inside the listVariableTwo.\nprint('1 = ' + str(listVariableTwo [1])) #This prints out the second list inside the listVariableTwo.\nprint('Type 0 or 1') #Lets ask user to type 0 or 1.\nuserInput1 = int(input()) #We store the value in this variable.\n\n# First function is chosing which list to view.\nif userInput1 == 0: # If 0 was typed, we run this block.\n print(listVariableTwo[0]) # We print out the first list of the two.\n listVariableTwo = listVariableTwo[0] #Now we also set the variable to only hold the first list before we move on to the next function.\nelif userInput1 == 1:# If 1 was typed, we run this block.\n print(listVariableTwo[1]) # We print out the second list of the two.\n listVariableTwo = listVariableTwo[1] # Now we also set the variable to only hold the second list before we move on to the next function.\nelse:\n print('You did not type 0 or 1..') # If the user is imbecile and dont understand the instruction to type 0 or 1, well then.....\n\n#The next function is chosing one item/value inside the selected list.\nprint('0 = ' + str(listVariableTwo [0])) #This prints out the first item in the previously selected list previous\nprint('1 = ' + str(listVariableTwo [1])) #This prints out the second item in the previously selected list previous\nprint('2 = ' + str(listVariableTwo [2])) #This prints out the third item in the previously selected list previous\n\nprint('Type 0, 1 or 2 to select item')\nuserInput2 = int(input())\nif userInput2 == 0: #If 0 was typed, we run this block.\n print(listVariableTwo[0]) #We print out the first item in the list.\nelif userInput2 == 1: #If 1 was typed, we run this block.\n print(listVariableTwo[1]) #We print out the second item in the list.\nelif userInput2 == 2: #If 2 was typed, we run this block.\n print(listVariableTwo[2]) #We print out the third item in the list.\nelse:\n print('You did not type 0,1 or 2..') #If the user is imbecile and dont understand the instruction to type 0, 1 or 2; well then.....\n\n\n\n\n#Update the list through user input can also be done.\n#Lets look at changing an index (item) inside a list.\nlistVariableThree = ['first_record','second_record','third_record']\nprint('We have ' + str(listVariableThree) + ' in this list!')\nprint('Write down the name of the item that you want stored in the first_record!')\nlistVariableThree[0] = str(input())\n\nprint('Thanks, I have now stored ' + str(listVariableThree[0]) + ' in your first_record')\nprint('Your list now contains ' + str(listVariableThree))\nprint('Now tell me what the other two items would be in the second_record and third_record')\nprint('Type the first item and then press enter, then the second item followed by another enter!')\nlistVariableThree[1:3] = str(input()) + str(input())\nprint('Your list now contains ' + str(listVariableThree) + '. Thanks for stopping by!')\n\n\n\n\n\n#Lets see how we can find the length of a list.\n#The len() function counts letter, indexes etc and can be used in this case.\nlistVariableFour = ['monitor','speakers','display','keyboard','mouse','microphone','gamepad']\nprint(len(listVariableFour)) #We put the len() function inside a print() function so that it prints out the length of the list -> in this case the total amount of values (indexes).\n\n\n\n\n\n#Almost forgot... we should also be able to delete records!\n#For deleting we use the del statement.\n#I`ll also add some other functions that I`ve learned so far (try and while loop) to make this more sophisticated.\nlistVariableSeven = ['emil','jan','thomas'] #We prelist 3 indexes (items).\nprint('Type 1, 2 ,3 or 4 and then press enter to delete a name!')\nprint('1 = ' + listVariableSeven[0])\nprint('2 = ' + listVariableSeven[1])\nprint('3 = ' + listVariableSeven[2])\nprint('4 = cancel') #We add a cancel option so that the user can cancel out and not be forced to delete a record.\ndelListSeven = 4 #We just set the variable that we will input to a value that will trigger the while loop.\nwhile delListSeven != 4 or delListSeven != 3 or delListSeven != 2 or delListSeven != 1: #Since our variable is not 1, 2, 3 or 4 (since it is preset to 5) the while loop triggers.\n try: #try statement lets the program try. This means essentially that if user input throws an error which in this case if we type characters since we spesifically ask for integer (notice the int(input()) we can set an action for that specific error.\n delListSeven = int(input()) #Remember that we need an int before the input() statement so that the if function works when equal input number.\n if delListSeven == 1 or delListSeven == 2 or delListSeven == 3 or delListSeven == 4: #We break out only if the value we type in is 1, 2, 3 or 4.\n break\n else:\n print('Wrong number selection!')\n print('Try again..!')\n print('Select 1, 2, 3 or 4 then hit enter..')\n continue\n except ValueError: #This is the error exception. We run the lines under if the line under the \"try\" statement will give us the ValueError which it will if we type in characters.\n print('Dont type characters!')\n print('Try again..!')\n print('Select 1, 2, 3 or 4 then hit enter..')\n continue #We jump back to the \"try\" statement using this continue statement.\n#The lines under are ran depending on which input made it through the while loop.\nif delListSeven == 1:\n del listVariableSeven[0]\nelif delListSeven == 2:\n del listVariableSeven[1]\nelif delListSeven == 3:\n del listVariableSeven[2]\nelse:\n print('Your list was unchanged..!')\nprint('Your list now looks like this ' + '\"' + str(listVariableSeven) + '\".')\n\n\n\n#Write records to list.\n#Giving the list a name and putting multiple values into the list:\nmyEmployees = [] #This is an empty list called myEmployees.\nwhile True:\n print('Name of employee ' + str(len(myEmployees) +1) + ' (enter nothing to stop!):')\n employeeName = input()\n if employeeName == '':\n break\n myEmployees = myEmployees + [employeeName] #We concatenate the list.\nprint('The employees names are:')\nfor employeeName in myEmployees: #The line under will run for any item/value/index in the list myEmployees.\n print(' ' + employeeName)\nwhile True: #We dont really need this while loop, but in this case it does not seem to introduce any errors or bugs, so why not for learning sake..\n if 'emil' in myEmployees: #Runs line under if emil is in the list.\n print('Yes, emil is listed here!')\n break\n elif 'emil' not in myEmployees: #Runs line under if emil is not listed.\n print('emil is not listed here!')\n break\nprint('Done!')\n\n\n#We can also use the list() function to list all numbers in the range() function.\n#Say for example that we have range (0,10,2) 0 - 10 and 2 increments.\nlist(range(-10,12,3)) #This will print out numbers from -10 to 11 (not counting 12) with an increment of 3. -> [-10, -7, -4, -1, 2, 5, 8, 11]\n#Lets put it in a variable..\nlistVariableEight = list(range(-10,12,3))\nprint(listVariableEight)\n\n\n#Lets organize the stuff in a list using the for in range function.\nlistVariableNine = ['phone','laptop','wallet','glasses'] #A list of my stuff..\nfor rangeVariable1 in range(len(listVariableNine)): #Notice that we put a len() function inside the for in range() statement. This will print out the number/order of all items in list.\n print('Item ' + str(rangeVariable1) + ' in my list off stuff is: ' + listVariableNine[rangeVariable1]) #Notice that since we concatenate a string in the print statement, we have to put str(rangeVariable1) because the in range variable contains only integers.\n\n\n#We can do a \"database\" like scheme for our list by giving a name-variable to each index.\ncarVariable = ['audi','yellow','2004'] #First we assign the list to a a variable..\nbrand, colour, year = carVariable #Then we give names to the items in relation to where they are stored; audi is in index 0 so we put brand first, then yellow which corresponds to the 2nd value = index 1 and so forth.. )\n#We can also do this in one line..\nbrand, colour, year = 'honda','red','2009'\n#To see if something is in the list we can use in or not in.\n'honda' in carVariable #If honda is found in any of the indexes it will return \"True\", if not; it will return \"False\".\n\n# List Methods...\n#Find the index position and or return a ValueError with the index() method.\n#Syntax = thelistvariable.index('indexname')\nfoodVariable = ['pizza','taco','salomon','chicken','spaghetti']\nfoodVariable.index('pizza') #This will return 0 becuase pizza is the first index.\nfoodVariable.index('pancake') #This will return:\n# Traceback (most recent call last):\n# File \"\", line 1, in \n# ValueError: 'pancake' is not in list\n\n#Adding (appending) an item to the list and/or inserting an item. Syntax = variable.append('item').\nfoodVariable.append('pancake') #This will add 'pancale' as in the last index.\nfoodVariable.index('pancake') #This will now return the number for the last item in the list which is 5.\nfoodVariable.insert(2,'lamb') #This will add 'lamb' in the third [2] 0,1,2 record and bump the rest of the items up 1 step.\n#Removing with the remove() list methond. Syntax = variable.remove('item').\n#Just like using \"del variable[0]\"\" for deleting index item 0, we can use the remove() function.\nfoodVariable.remove('pizza') #This will delete the record by typing in the name instead of index number. Keep in mind that if you have pizza stored twice or more, it will only remove the first item in the list.\n\n\n#For a list of numbers we can use the sort() method to sort them.\nlistVariableTen = [15,-35,75,47,12,-12,-63,-1,-2,73,12,-35]\nlistVariableTen.sort() #If we do a print(listVariableTen) we will see that this list is sorted from lowest to highest number.\n#This also works for alphabetical order (or ASCII-betical order since upper case characters will come before lower case characters).\nlistVariableEleven = ['plane','boat','car','train','submarine']\nlistVariableEleven.sort() #When printing out the list it will output -> ['boat', 'car', 'plane', 'submarine', 'train']\nlistVariableEleven.sort(reverse=True) #This sorts it the opposite way. -> ['train', 'submarine', 'plane', 'car', 'boat']\n#Remember that lists with numbers and characters can`t be sorted. It will throw out an unordable types error.\n#Taking a look at the upper and lower characters in the list.\nlistVariableEleven = ['Plane','boat','Car','Train','submarine','motorbike','thanks','Spaceshuttle'] #It will output -> ['Car', 'Plane', 'Spaceshuttle', 'Train', 'boat', 'motorbike', 'submarine', 'thanks']\n#For true alphabetical order we have to convert the upper case characters to lowercase.\nlistVariableEleven.sort(key=str.lower) #Now we have -> ['boat', 'Car', 'motorbike', 'Plane', 'Spaceshuttle', 'submarine', 'thanks', 'Train']\n\n\n#Get a random value from the list. A magic 8 ball program.\nimport random #We need to import the random module.\n#We store the list in randomVariable9.\nrandomVariable8 = [\n'1',\n'2',\n'3',\n'4',\n'5',\n'6',\n'7',\n'8',\n]\nrandomVariable9 = int(randomVariable8[random.randint(0,len(randomVariable8)-1)]) #We store one random item from the list in randomVariable9.\nif randomVariable9 == int(1):\n print('Today I`m having ' + str(int(randomVariable9)) + ' slice of pizza for dinner')\nelse:\n print('Today I`m having ' + str(int(randomVariable9)) + ' slices of pizza for dinner')\n\n\n#A list and a string is basically the same if you consider the fact that every letter in a string has its own index. We can see this by passing a string inside a list() function.\nlist('Hello!') #This will print out: ['H', 'e', 'l', 'l', 'o', '!'] in a python shell.\n#What can be done with lists can often also be done with strings.\nlistVariableTen = 'Hello!'\nlistVariableTen[0] #This will print out: 'H' (the first character in the variable).\nlistVariableTen[1:4] #This will print out: 'ell' ([1:4] means index 1 to 4 not counting 4 or the first one since it starts counting from 0).\nlistVariableTen[-2] #This will print out: 'o' ([-2] means counting backwards 2 steps from the last index).\n#The one difference is that a string cannot be changed. They are immutable..\nlistVariableTen[3] = 'p' #This will throw out a TypeError: 'str' object does not support item assignment\n#The right way to modify a string is to create a new string by using 'slices' then store it as a new variable. -> a slice looks like this: variablename[0:10].\nlistVariableEleven = 'Emil is a nerd!'\nmodifiedlistVariableEleven = listVariableEleven[0:10] + 'proffessional' + listVariableEleven[9:15] #listVariableEleven[0:10] = \"Emil is a \" listVariableEleven[9:15] = \" nerd!\". [0:10] is a slice..\n\n\n\n#Taking a look at the copy.deepcopy() function.\n#Until now we`ve been looking at ways to reference lists and not really copying them to have a complete seperate list.\n#We can import the copy module to do this.\nimport copy #Import the copy module so that we can run the copy.deepcopy() function.\nlistVariableTwelve = [\n'rice',\n'ham',\n'tomato',\n'potato',\n'spaghetti',\n'steak',\n'cod',\n'lettuce'\n]\ncopiedlistVariableTwelve = copy.deepcopy(listVariableTwelve) #Do a copy of the listVariableTwelve and reference the copy to the new variable copiedlistVariableTwelve.\n\ncopiedlistVariableTwelve.append('cucumber') #Now we modify the copiedlistVariableTwelve to store one more value.\nlistVariableTwelve[3] = ('apple') #Lets also modify the first list swapping out potato with apple.\n\nprint(listVariableTwelve) #Will print out: ['rice', 'ham', 'tomato', 'potato', 'spaghetti', 'steak', 'cod', 'lettuce']\nprint(copiedlistVariableTwelve)#Will print out: ['rice', 'ham', 'tomato', 'potato', 'spaghetti', 'steak', 'cod', 'lettuce', 'cucumber']\n","sub_path":"all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":24145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"572555110","text":"# %% -*- coding: utf-8 -*-\n'''\nAuthor: Shreyas Padhy\nDriver file for Standard UNet Implementation\n'''\nfrom __future__ import print_function\n\nimport argparse\nfrom timeit import default_timer as timer\nfrom tqdm import tqdm\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use('Agg')\n\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader, random_split\nfrom torchvision.utils import save_image\nimport torchvision.transforms as tr\n\nfrom data import BraTSDatasetUnet, TestDataset, BraTSDatasetLSTM\nfrom losses import DICELossMultiClass\nfrom models import UNet\n\n\ndef train(model, epoch, loss_list, train_loader, optimizer, criterion, args):\n model.train()\n for batch_idx, (image, mask) in enumerate(train_loader):\n if args.cuda:\n image, mask = image.cuda(), mask.cuda()\n\n image, mask = Variable(image), Variable(mask)\n\n optimizer.zero_grad()\n\n output = model(image)\n loss = criterion(output, mask)\n loss_list.append(loss.item())\n\n loss.backward()\n optimizer.step()\n\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tAverage DICE Loss: {:.6f}'.format(\n epoch, batch_idx * len(image), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef generate_colorimg(output):\n colors = [[0, 0, 0], [0, 0, 255], [255, 0, 0], [0, 255, 0], [255, 255, 0], [0, 255, 255], [255, 0, 255], [255, 255, 255]]\n _, height, width = output.shape\n colorimg = np.zeros((height, width, 3), dtype=np.uint8)\n #colorimg = np.full((height, width, 3), 255, dtype=np.uint8)\n #colorimg = np.ones((masks.shape[1], masks.shape[2], 3), dtype=np.float32) * 255\n for y in range(height):\n for x in range(width):\n selected_color = colors[output[0,y,x]]\n colorimg[y,x,:] = selected_color\n return tr.ToTensor()(colorimg.astype(np.uint8))\n\ndef test(model, loader, criterion, args, validation=False, save_output=False):\n test_loss = 0\n model.eval()\n for batch_idx, (image, mask) in tqdm(enumerate(loader)):\n if args.cuda:\n image, mask = image.cuda(), mask.cuda()\n\n with torch.no_grad():\n image, mask = Variable(image), Variable(mask)\n pred = model(image)\n #pred = torch.sigmoid(pred)\n maxes, out = torch.max(pred, 1, keepdim=True)\n \n\n if save_output:\n save_image(image, './output/images/images-batch-{}.png'.format(batch_idx))\n save_image(mask, './output/masks/masks-batch-{}.png'.format(batch_idx))\n save_image(out, './output/predictions/outputs-batch-{}.png'.format(batch_idx))\n test_loss += criterion(pred, mask).item()\n # Average Dice Coefficient\n test_loss /= len(loader)\n if validation:\n print('\\nValidation Set: Average DICE Coefficient: {:.4f})\\n'.format(test_loss))\n else:\n print('\\nTest Set: Average DICE Coefficient: {:.4f})\\n'.format(test_loss))\n\ndef test_only(model, loader, criterion, args):\n model.eval()\n for batch_idx, image in tqdm(enumerate(loader)):\n if args.cuda:\n image = image.cuda()\n\n with torch.no_grad():\n image = Variable(image)\n pred = model(image)\n #pred = torch.sigmoid(pred)\n maxes, out = torch.max(pred, 1, keepdim=True)\n \n save_image(image, './test-output/images/images-batch-{}.png'.format(batch_idx))\n save_image(out, './test-output/predictions/outputs-batch-{}.png'.format(batch_idx))\n\ndef start():\n parser = argparse.ArgumentParser(description='UNet + BDCLSTM for BraTS Dataset')\n parser.add_argument('--batch-size', type=int, default=4, metavar='N', help='input batch size for training (default: 4)')\n parser.add_argument('--test-batch-size', type=int, default=4, metavar='N', help='input batch size for testing (default: 4)')\n parser.add_argument('--train', action='store_true', default=False, help='Argument to train model (default: False)')\n parser.add_argument('--epochs', type=int, default=2, metavar='N', help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)')\n parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training (default: False)')\n parser.add_argument('--log-interval', type=int, default=1, metavar='N', help='batches to wait before logging training status')\n parser.add_argument('--size', type=int, default=128, metavar='N', help='imsize')\n parser.add_argument('--load', type=str, default=None, metavar='str', help='weight file to load (default: None)')\n parser.add_argument('--data', type=str, default='./Data/', metavar='str', help='folder that contains data')\n parser.add_argument('--save', type=str, default='OutMasks', metavar='str', help='Identifier to save npy arrays with')\n parser.add_argument('--modality', type=str, default='flair', metavar='str', help='Modality to use for training (default: flair)')\n parser.add_argument('--optimizer', type=str, default='SGD', metavar='str', help='Optimizer (default: SGD)')\n\n args = parser.parse_args()\n args.cuda = args.cuda and torch.cuda.is_available()\n\n DATA_FOLDER = args.data\n\n # %% Loading in the model\n # Binary\n model = UNet(num_channels=1, num_classes=2)\n # Multiclass\n # model = UNet(num_channels=1, num_classes=3)\n\n if args.cuda:\n model.cuda()\n\n if args.optimizer == 'SGD':\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.99)\n if args.optimizer == 'ADAM':\n optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))\n\n # Defining Loss Function\n criterion = DICELossMultiClass()\n\n if args.train:\n # %% Loading in the Dataset\n full_dataset = BraTSDatasetUnet(DATA_FOLDER, im_size=[args.size, args.size], transform=tr.ToTensor())\n #dset_test = BraTSDatasetUnet(DATA_FOLDER, train=False, \n # keywords=[args.modality], im_size=[args.size,args.size], transform=tr.ToTensor())\n \n train_size = int(0.9 * len(full_dataset))\n test_size = len(full_dataset) - train_size\n train_dataset, validation_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])\n \n train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)\n validation_loader = DataLoader(validation_dataset, batch_size=args.test_batch_size, shuffle=False, num_workers=1)\n #test_loader = DataLoader(full_dataset, batch_size=args.test_batch_size, shuffle=False, num_workers=1)\n\n print(\"Training Data : \", len(train_loader.dataset))\n print(\"Validaion Data : \", len(validation_loader.dataset))\n #print(\"Test Data : \", len(test_loader.dataset))\n \n loss_list = []\n start = timer()\n for i in tqdm(range(args.epochs)):\n train(model, i, loss_list, train_loader, optimizer, criterion, args)\n test(model, validation_loader, criterion, args, validation=True)\n end = timer()\n print(\"Training completed in {:0.2f}s\".format(end - start))\n\n plt.plot(loss_list)\n plt.title(\"UNet bs={}, ep={}, lr={}\".format(args.batch_size, args.epochs, args.lr))\n plt.xlabel(\"Number of iterations\")\n plt.ylabel(\"Average DICE loss per batch\")\n plt.savefig(\"./plots/{}-UNet_Loss_bs={}_ep={}_lr={}.png\".format(args.save, args.batch_size, args.epochs, args.lr))\n\n np.save('./npy-files/loss-files/{}-UNet_Loss_bs={}_ep={}_lr={}.npy'.format(args.save, args.batch_size, args.epochs, args.lr),\n np.asarray(loss_list))\n print(\"Testing Validation\")\n test(model, validation_loader, criterion, args, save_output=True)\n torch.save(model.state_dict(), 'unet-binary-{}-{}-{}'.format(args.batch_size, args.epochs, args.lr))\n \n print(\"Testing PDF images\")\n test_dataset = TestDataset('./pdf_data/', im_size=[args.size, args.size], transform=tr.ToTensor())\n test_loader = DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False, num_workers=1)\n print(\"Test Data : \", len(test_loader.dataset))\n test_only(model, test_loader, criterion, args)\n \n elif args.load is not None:\n test_dataset = TestDataset(DATA_FOLDER, im_size=[args.size, args.size], transform=tr.ToTensor())\n test_loader = DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False, num_workers=1)\n print(\"Test Data : \", len(test_loader.dataset))\n model.load_state_dict(torch.load(args.load))\n test_only(model, test_loader, criterion, args)\n #test(model, train_loader, test_loader, criterion, args, save_output=True, train_accuracy=True)\n\nif __name__ == '__main__':\n start()\n","sub_path":"binary-implementation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"251275663","text":"\"\"\"pytest tests for mytoyota.status.Energy\"\"\"\n\nfrom mytoyota.status import Energy\n\n# pylint: disable=no-self-use\n\n\nclass TestEnergy:\n \"\"\"pytest functions to test Energy\"\"\"\n\n @staticmethod\n def _create_example_data():\n \"\"\"Create list with predefined data\"\"\"\n return [\n {\n \"timestamp\": \"2021-09-19T14:02:37Z\",\n \"type\": \"HYDROGEN\",\n \"level\": 58.8,\n \"remainingRange\": 295,\n }\n ]\n\n @staticmethod\n def _create_example_legacy_data():\n \"\"\"Create dict with predefined data\"\"\"\n return {\"Fuel\": 69}\n\n @staticmethod\n def _create_example_battery_data():\n \"\"\"Create dict with predefined data\"\"\"\n return {\n \"BatteryPowerSupplyPossibleTime\": 16383,\n \"ChargeEndTime\": \"00:00\",\n \"ChargeRemainingAmount\": 100,\n \"ChargeStartTime\": \"22:10\",\n \"ChargeType\": 1,\n \"ChargeWeek\": 5,\n \"ChargingStatus\": \"chargeComplete\",\n \"ConnectorStatus\": 5,\n \"EvDistanceInKm\": 79.9,\n \"EvDistanceWithAirCoInKm\": 73.51,\n \"EvTravelableDistance\": 79.9,\n \"EvTravelableDistanceSubtractionRate\": 8,\n \"PlugInHistory\": 33,\n \"PlugStatus\": 45,\n \"RemainingChargeTime\": 65535,\n \"SettingChangeAcceptanceStatus\": 0,\n }\n\n def test_energy_km(self):\n \"\"\"Test energy in unit km\"\"\"\n energy = Energy(self._create_example_data())\n\n assert energy.level == 58.8\n assert energy.range == 295\n assert energy.type == \"Hydrogen\"\n assert energy.last_updated == \"2021-09-19T14:02:37Z\"\n\n def test_energy_mi(self):\n \"\"\"Test energy in unit mi\"\"\"\n energy = Energy(self._create_example_data(), unit=\"mi\")\n\n assert energy.legacy is False\n\n assert energy.level == 58.8\n assert energy.range == 183.3045\n assert energy.type == \"Hydrogen\"\n assert energy.last_updated == \"2021-09-19T14:02:37Z\"\n\n def test_energy_no_data(self):\n \"\"\"Test energy with no initialization data\"\"\"\n energy = Energy([{}])\n\n assert energy.legacy is False\n\n assert energy.level is None\n assert energy.range is None\n assert energy.type == \"Unknown\"\n assert energy.last_updated is None\n\n def test_energy_str(self):\n \"\"\"Test energy converted to a string\"\"\"\n energy = Energy(self._create_example_data())\n\n string = str(energy)\n assert isinstance(string, str)\n assert (\n string == \"{'level': 58.8, 'range': 295, 'type': 'Hydrogen', \"\n \"'last_updated': '2021-09-19T14:02:37Z'}\"\n )\n\n def test_energy_dict(self):\n \"\"\"Test energy converted to a dict\"\"\"\n energy = Energy(self._create_example_data())\n\n dictionary = energy.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\n \"level\": 58.8,\n \"range\": 295,\n \"type\": \"Hydrogen\",\n \"last_updated\": \"2021-09-19T14:02:37Z\",\n }\n\n def test_energy_legacy_km(self):\n \"\"\"Test legacy energy in unit km\"\"\"\n energy = Energy(self._create_example_legacy_data(), legacy=True)\n\n assert energy.legacy is True\n assert energy.level == 69\n\n energy.type = \"Petrol\"\n\n assert energy.type == \"Petrol\"\n assert energy.chargeinfo is None\n\n energy.set_battery_attributes(self._create_example_battery_data())\n\n assert energy.range == 79.9\n assert energy.range_with_aircon == 73.51\n assert isinstance(energy.chargeinfo, dict)\n assert energy.chargeinfo == {\n \"status\": \"chargeComplete\",\n \"remaining_time\": 65535,\n \"remaining_amount\": 100,\n \"start_time\": \"22:10\",\n \"end_time\": \"00:00\",\n }\n\n def test_energy_legacy_mi(self):\n \"\"\"Test legacy energy in unit mi\"\"\"\n energy = Energy(self._create_example_legacy_data(), unit=\"mi\", legacy=True)\n\n assert energy.legacy is True\n assert energy.level == 69\n\n energy.type = \"Petrol\"\n\n assert energy.type == \"Petrol\"\n assert energy.chargeinfo is None\n\n energy.set_battery_attributes(self._create_example_battery_data())\n\n assert energy.range == 49.6476\n assert energy.range_with_aircon == 45.677\n assert isinstance(energy.chargeinfo, dict)\n assert energy.chargeinfo == {\n \"status\": \"chargeComplete\",\n \"remaining_time\": 65535,\n \"remaining_amount\": 100,\n \"start_time\": \"22:10\",\n \"end_time\": \"00:00\",\n }\n\n def test_energy_legacy_no_data(self):\n \"\"\"Test energy with no initialization data\"\"\"\n energy = Energy({}, legacy=True)\n\n assert energy.legacy is True\n assert energy.level is None\n\n assert energy.type is None\n assert energy.chargeinfo is None\n\n energy.set_battery_attributes({})\n\n assert energy.range is None\n assert energy.range_with_aircon is None\n assert isinstance(energy.chargeinfo, dict)\n assert energy.chargeinfo == {\n \"status\": None,\n \"remaining_time\": None,\n \"remaining_amount\": None,\n \"start_time\": None,\n \"end_time\": None,\n }\n\n def test_energy_legacy_str(self):\n \"\"\"Test energy converted to a string\"\"\"\n energy = Energy(self._create_example_legacy_data(), legacy=True)\n\n string = str(energy)\n assert isinstance(string, str)\n assert string == \"{'legacy': True, 'level': 69}\"\n\n energy.type = \"Petrol\"\n\n energy.set_battery_attributes(self._create_example_battery_data())\n\n string_with_battery = str(energy)\n assert isinstance(string_with_battery, str)\n assert (\n string_with_battery\n == \"{'legacy': True, 'level': 69, 'type': 'Petrol', 'range': 79.9, \"\n \"'range_with_aircon': 73.51, \"\n \"'chargeinfo': {'status': 'chargeComplete', 'remaining_time': 65535, \"\n \"'remaining_amount': 100, \"\n \"'start_time': '22:10', 'end_time': '00:00'}}\"\n )\n\n def test_energy_legacy_dict(self):\n \"\"\"Test energy converted to a dict\"\"\"\n energy = Energy(self._create_example_legacy_data(), legacy=True)\n\n dictionary = energy.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\n \"level\": 69,\n \"legacy\": True,\n }\n\n energy.type = \"Petrol\"\n\n energy.set_battery_attributes(self._create_example_battery_data())\n\n dict_with_battery = energy.as_dict()\n assert isinstance(dict_with_battery, dict)\n assert dict_with_battery == {\n \"level\": 69,\n \"legacy\": True,\n \"type\": \"Petrol\",\n \"range\": 79.9,\n \"range_with_aircon\": 73.51,\n \"chargeinfo\": {\n \"status\": \"chargeComplete\",\n \"remaining_time\": 65535,\n \"remaining_amount\": 100,\n \"start_time\": \"22:10\",\n \"end_time\": \"00:00\",\n },\n }\n","sub_path":"tests/test_energy.py","file_name":"test_energy.py","file_ext":"py","file_size_in_byte":7207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"356367998","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 23 07:21:43 2017\n\n@author: Wu\n\"\"\"\n\nimport os\nimport re\nimport pandas as pd\n\n#read file\nos.chdir('c:/coding/228')\nrawCompen=pd.read_csv('compensate.txt',sep='\\t')\n\n\n# fill na by column type\nfor col in ['age']:\n rawCompen[col]=rawCompen[col].fillna(0)\nfor col in ['name', 'town', 'profession1', 'profession2', 'compenDate', 'regDate', 'type']:\n rawCompen[col]=rawCompen[col].fillna('')\n\n\n# formating death date \nfor col in ['compenDate','regDate']:\n rawCompen[col]=rawCompen[col].apply(lambda x:'0'+x if re.match('^[0-9]{3}$',x) else x)\n rawCompen[col]=rawCompen[col].str.replace('?','0')\n rawCompen[col]=rawCompen[col].apply(lambda x:'1947'+x if re.match('^[0-9]{4}$',x) else x)\n\n\n# choose a valid death date \ndef compareDate(row):\n if all([row['compenDate'],row['regDate']]) and row['compenDate']==row['regDate']:\n return 'agree'\n elif all([row['compenDate'],row['regDate']]) and row['compenDate']!=row['regDate']:\n return 'agreeMonth' if row['compenDate'][4:6]==row['regDate'][4:6] else 'disagree'\n \n elif not any([row['compenDate'],row['regDate']]):\n return 'na'\n else:\n return 'compenDate' if row['compenDate']!='' else 'regDate'\n \ndef cleanDate(row):\n if row['compareDate']=='agree':\n return row['compenDate']\n elif row['compareDate']=='disagree':\n # manully check rawCompen to comfrim that compenDate were more reasonable in most cases\n return row['compenDate'] \n elif row['compareDate'] in ['compenDate','regDate']:\n return row[row['compareDate']]\n elif row['compareDate']=='na':\n return ''\n \nrawCompen['compareDate']=rawCompen.apply(compareDate,axis=1)\nrawCompen['deathDate']=rawCompen.apply(cleanDate,axis=1)\n\n\n# formating rawCompen output\noutput=rawCompen.drop(['compenDate','regDate','compareDate'],axis=1)\noutput.to_csv('cleanCompenrawCompen.txt',encoding='utf8',sep='\\t',index=False)","sub_path":"cleanCompenData.py","file_name":"cleanCompenData.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"74023679","text":"\"\"\"MobileNet model trained on ImageNet dataset.\"\"\"\n\nfrom typing import List\n\nfrom lit_nlp.api import model\nfrom lit_nlp.api import types as lit_types\nfrom lit_nlp.examples.models import imagenet_labels\nfrom lit_nlp.lib import image_utils\nfrom lit_nlp.lib import utils as lit_utils\nimport numpy as np\nimport tensorflow as tf\n\n# Internal shape of the model input (h, w, c).\nIMAGE_SHAPE = (224, 224, 3)\n\n\nclass MobileNet(model.Model):\n \"\"\"MobileNet model trained on ImageNet dataset.\"\"\"\n\n class MobileNetSpec(model.ModelSpec):\n\n def is_compatible_with_dataset(self, dataset_spec: lit_types.Spec) -> bool:\n image_field_names = lit_utils.find_spec_keys(dataset_spec,\n lit_types.ImageBytes)\n return bool(image_field_names)\n\n def __init__(self) -> None:\n # Initialize imagenet labels.\n self.labels = [''] * len(imagenet_labels.IMAGENET_2012_LABELS)\n self.label_to_idx = {}\n for i, l in imagenet_labels.IMAGENET_2012_LABELS.items():\n l = l.split(',', 1)[0]\n self.labels[i] = l\n self.label_to_idx[l] = i\n\n self.model = tf.keras.applications.mobilenet_v2.MobileNetV2()\n\n def predict_minibatch(\n self, input_batch: List[lit_types.JsonDict]) -> List[lit_types.JsonDict]:\n output = []\n for example in input_batch:\n # Convert input to the model acceptable format.\n img_data = example['image']\n if isinstance(img_data, str):\n img_data = image_utils.convert_image_str_to_array(img_data, IMAGE_SHAPE)\n # Get predictions.\n x = img_data[np.newaxis, ...]\n x = tf.convert_to_tensor(x)\n preds = self.model(x).numpy()[0]\n # Determine the gradient target.\n grad_target = example.get('grad_target')\n if grad_target is None:\n grad_target_idx = np.argmax(preds)\n else:\n grad_target_idx = self.label_to_idx[grad_target]\n # Calculate gradients.\n with tf.GradientTape() as tape:\n tape.watch(x)\n y = self.model(x)[0, grad_target_idx]\n grads = tape.gradient(y, x).numpy()[0]\n # Add results to the output.\n output.append({\n 'preds': preds,\n 'grads': grads,\n 'grad_target': imagenet_labels.IMAGENET_2012_LABELS[grad_target_idx]\n })\n\n return output\n\n def input_spec(self):\n return {\n 'image':\n lit_types.ImageBytes(),\n # If `grad_target` is not specified then the label with the highest\n # predicted score is used as the gradient target.\n 'grad_target':\n lit_types.CategoryLabel(vocab=self.labels, required=False)\n }\n\n def output_spec(self):\n return {\n 'preds':\n lit_types.MulticlassPreds(\n vocab=self.labels,\n autosort=True),\n 'grads':\n lit_types.ImageGradients(\n align='image', grad_target_field_key='grad_target'),\n 'grad_target':\n lit_types.CategoryLabel(vocab=self.labels)\n }\n\n def spec(self) -> model.ModelSpec:\n return self.MobileNetSpec(\n input=self.input_spec(), output=self.output_spec())\n","sub_path":"lit_nlp/examples/models/mobilenet.py","file_name":"mobilenet.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"299509327","text":"\"\"\"\nTest layers\n\"\"\"\n\nfrom . import _setup_test_env # noqa\nfrom returnn_common.models.layers import *\nfrom returnn_common.models.base import get_extern_data, NameCtx, Layer, LayerRef\nfrom pprint import pprint\nfrom nose.tools import assert_equal\n\n\ndef test_simple_net():\n class _Net(Module):\n def __init__(self):\n super().__init__()\n self.lstm = Lstm(n_out=13)\n\n def forward(self) -> LayerRef:\n \"\"\"\n Forward\n \"\"\"\n x = get_extern_data(\"data\")\n x = self.lstm(x)\n return x\n\n net = _Net()\n net_dict = net.make_root_net_dict()\n pprint(net_dict)\n assert \"lstm\" in net_dict\n\n\ndef test_simple_net_share_params():\n class _Net(Module):\n def __init__(self):\n super().__init__()\n self.lstm = Lstm(n_out=13)\n\n def forward(self) -> LayerRef:\n \"\"\"\n Forward\n \"\"\"\n x = get_extern_data(\"data\")\n x = self.lstm(x)\n x = self.lstm(x)\n return x\n\n net = _Net()\n net_dict = net.make_root_net_dict()\n pprint(net_dict)\n assert \"lstm\" in net_dict\n assert \"lstm_0\" in net_dict\n assert_equal(net_dict[\"lstm_0\"][\"reuse_params\"], \"lstm\")\n\n\ndef test_explicit_root_ctx():\n class Net(Module):\n \"\"\"\n Net\n \"\"\"\n def __init__(self, l2=1e-07, dropout=0.1, n_out=13):\n super().__init__()\n self.linear = Linear(n_out=n_out, l2=l2, dropout=dropout, with_bias=False, activation=None)\n\n def forward(self, x: LayerRef) -> LayerRef:\n \"\"\"\n forward\n \"\"\"\n x = self.linear(x)\n return x\n\n with NameCtx.new_root() as name_ctx:\n net = Net()\n out = net(get_extern_data(\"data\"))\n assert isinstance(out, Layer)\n assert_equal(out.get_name(), \"Net\")\n\n net_dict = name_ctx.make_net_dict()\n pprint(net_dict)\n\n assert \"Net\" in net_dict\n sub_net_dict = net_dict[\"Net\"][\"subnetwork\"]\n assert \"linear\" in sub_net_dict\n lin_layer_dict = sub_net_dict[\"linear\"]\n assert_equal(lin_layer_dict[\"class\"], \"linear\")\n assert_equal(lin_layer_dict[\"from\"], \"base:data:data\")\n\n\ndef test_root_mod_call_twice():\n class TestBlock(Module):\n \"\"\"\n Test block\n \"\"\"\n def __init__(self, l2=1e-07, dropout=0.1, n_out=13):\n super().__init__()\n self.linear = Linear(n_out=n_out, l2=l2, dropout=dropout, with_bias=False, activation=None)\n\n def forward(self, x: LayerRef) -> LayerRef:\n \"\"\"\n forward\n \"\"\"\n x = self.linear(x)\n return x\n\n with NameCtx.new_root() as name_ctx:\n test_block = TestBlock()\n y = test_block(get_extern_data(\"input1\"))\n z = test_block(get_extern_data(\"input2\"))\n\n print(y)\n assert isinstance(y, LayerRef)\n assert_equal(y.get_name(), \"TestBlock\")\n print(z)\n assert isinstance(z, LayerRef)\n assert_equal(z.get_name(), \"TestBlock_0\")\n\n net_dict = name_ctx.make_net_dict()\n pprint(net_dict)\n\n assert \"TestBlock\" in net_dict and \"TestBlock_0\" in net_dict\n assert_equal(net_dict[\"TestBlock_0\"][\"reuse_params\"], \"TestBlock\")\n","sub_path":"tests/test_models_layers.py","file_name":"test_models_layers.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"267911746","text":"class User:\r\n def __init__(self, name, email):\r\n self.name = name # String\r\n self.email = email # Email Format\r\n self.phone = '' # String\r\n self.gender = '' # Female : F, Male: M\r\n self.date_of_birth = '01-01-2019' # DD-MM-YYYY\r\n self.address = '' # String\r\n self.avatar = '' # String URL\r\n self.jobs = [] # String List\r\n self.topic_share = [] # String List\r\n self.reach = [0, 0, 0] # Like, Share, Comment\r\n self.pages_manager = [] # String List\r\n self.cmnd_id = '' # String\r\n self.bank_acct_id = '' # String\r\n self.facebook = [] # String List[fb_id, uname_fb, url_profile_fb]\r\n self.google = [] # String List [gg_id, uname_gg, url_profile_gg]\r\n self.instagram = [] # String List [ins_id, uname_ins, url_profile_ins] \r\n\r\n def update_cmnd(self, cmnd_id):\r\n self.cmnd_id = cmnd_id\r\n \r\n def update_bank_acct(self, bank_acct_id):\r\n self.bank_acct_id = bank_acct_id\r\n \r\n","sub_path":"webapp/objClass/user_class.py","file_name":"user_class.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"358686921","text":"import discord\nfrom discord import Colour\nfrom discord.ext import commands\nfrom .utils import EmbedPages, PageTypes\n\nclass Warnings(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n async def _warnlist_member(self, ctx, member, page_num = 1):\n \"\"\"Handle gettings the warns for a specific member\"\"\"\n warns = []\n async with self.bot.pool.acquire() as connection:\n warns = await connection.fetch('SELECT * FROM warn WHERE member_id = ($1) AND guild_id = $2 ORDER BY id;', member.id, ctx.guild.id)\n\n if len(warns) > 0:\n embed = EmbedPages(PageTypes.WARN, warns, \"Warnings\", Colour.from_rgb(177,252,129), self.bot, ctx.author, ctx.channel)\n await embed.set_page(int(page_num))\n await embed.send()\n else:\n await ctx.send(\"No warnings recorded!\")\n\n @commands.command(pass_context=True)\n @commands.guild_only()\n async def warn(self, ctx, member: discord.Member, *, reason):\n \"\"\"Gives a member a warning, a reason is optional but recommended.\"\"\"\n if not await self.bot.is_staff(ctx):\n await ctx.send(\"You do not have permissions to warn.\")\n return\n\n parsed_args = self.bot.flag_handler.separate_args(reason, fetch=[\"reason\"], blank_as_flag=\"reason\")\n reason = parsed_args[\"reason\"]\n if len(reason) > 255:\n await ctx.send('The reason must be below 256 characters. Please shorten it before trying again.')\n return\n \n warns = 0\n async with self.bot.pool.acquire() as connection:\n await connection.execute('INSERT INTO warn (member_id, staff_id, guild_id, reason) values ($1, $2, $3, $4)', member.id, ctx.author.id, ctx.guild.id, reason)\n warns = await connection.fetchval('SELECT COUNT(*) FROM warn WHERE member_id = ($1) AND guild_id = $2', member.id, ctx.guild.id)\n\n await ctx.send(f':ok_hand: {member.mention} has been warned. They now have {warns} warns')\n try:\n await member.send(f'You have been warned by a member of the staff team ({ctx.author.mention}). The reason for your warn is: {reason}. You now have {warns} warns.')\n except Exception as e:\n print(e)\n\n @commands.command(pass_context = True)\n @commands.guild_only()\n async def warns(self, ctx, member: discord.Member = None):\n \"\"\"Shows a user their warnings, or shows staff members all/a single persons warnings\"\"\"\n is_staff = await self.bot.is_staff(ctx)\n if is_staff:\n if not member:\n # Show all warns\n warns = []\n async with self.bot.pool.acquire() as connection:\n warns = await connection.fetch('SELECT * FROM warn WHERE guild_id = $1 ORDER BY id;', ctx.guild.id)\n\n if len(warns) > 0:\n embed = EmbedPages(PageTypes.WARN, warns, \"Warnings\", Colour.from_rgb(177,252,129), self.bot, ctx.author, ctx.channel)\n await embed.set_page(1)\n await embed.send()\n else:\n await ctx.send(\"No warnings recorded!\")\n else:\n # Show member's warns\n await self._warnlist_member(ctx, member, 1) # Last parameter is the page number to start on\n else:\n if not member or member.id == ctx.author.id:\n # Show ctx.author warns\n await self._warnlist_member(ctx, ctx.author, 1)\n else:\n await ctx.send(\"You don't have permission to view other people's warns.\")\n\n @commands.command(pass_context=True, aliases=['warndelete'])\n @commands.guild_only()\n async def warnremove(self, ctx, *warnings):\n \"\"\"Remove warnings with this command, can do `warnremove ` or `warnremove ... `.\"\"\"\n if not await self.bot.is_staff(ctx):\n await ctx.send(\"You do not have permissions to remove a warning.\")\n return\n\n # if warnings[0].lower() == 'all':\n # async with self.bot.pool.acquire() as connection:\n # await connection.execute('DELETE FROM warn WHERE guild_id = $1', ctx.guild.id)\n # await ctx.send(\"All warnings on this guild have been removed.\")\n\n if len(warnings) > 1:\n await ctx.send('One moment...')\n\n async with self.bot.pool.acquire() as connection:\n for warning in warnings:\n try:\n warning = int(warning)\n existing_warnings = await connection.fetch(\"SELECT * FROM warn WHERE id = $1 AND guild_id = $2\", warning, ctx.guild.id)\n if len(existing_warnings) == 0:\n await ctx.send(\"You cannot remove warnings originating from another guild, or those that do not exist.\")\n continue # Try next warning instead\n\n await connection.execute('DELETE FROM warn WHERE id = ($1) AND guild_id = $2', warning, ctx.guild.id)\n if len(warnings) == 1:\n await ctx.send(f'Warning with ID {warning} has been deleted.')\n\n except ValueError:\n await ctx.send(f'Error whilst deleting ID {warning}: give me a warning ID, not words!')\n\n if len(warnings) > 1:\n await ctx.send(f\"The warning's have been deleted.\")\n\ndef setup(bot):\n bot.add_cog(Warnings(bot))\n","sub_path":"cogs/warnings.py","file_name":"warnings.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"192440333","text":"import pygame\n\nfrom view_manager import ViewManager\nfrom vertical_list_view import VerticalListView\nfrom root_menu import RootMenu\n\n\nsize = (128, 64)\nscale_factor = 4\nviewport_size = tuple(i * scale_factor for i in size)\nscreen = pygame.display.set_mode(viewport_size)\npygame.display.flip()\n\nrunning = True\n\nroot_view = RootMenu()\n\nrenderer = ViewManager(size, root_view, \"RGB\")\nwhile running:\n pil_frame = renderer.get_frame()\n frame_bytes = pil_frame.tobytes()\n pygame_frame = pygame.image.frombuffer(frame_bytes, pil_frame.size, pil_frame.mode)\n\n pygame_scaled_frame = pygame.transform.scale(pygame_frame, viewport_size)\n\n screen.blit(pygame_scaled_frame, (0, 0) + viewport_size)\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n renderer.handle_key('up')\n if event.key == pygame.K_DOWN:\n renderer.handle_key('down')\n if event.key == pygame.K_LEFT:\n renderer.handle_key('left')\n if event.key == pygame.K_RIGHT:\n renderer.handle_key('right')\n if event.key == pygame.K_z or event.key == pygame.K_a:\n renderer.handle_key('a')\n if event.key == pygame.K_x or event.key == pygame.K_b:\n renderer.handle_key('b')\n if event.key == pygame.K_c:\n renderer.handle_key('middle')\n","sub_path":"pygame_harness.py","file_name":"pygame_harness.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"169770166","text":"#!/Users/benjaminculver/anaconda3/bin/python\nimport json, requests, datetime, sys\nfrom colorama import Fore, Back, Style\n\n\n# Getting city code given city and state\ndef get_city_id(city:str, state:str):\n with open(\"/Users/benjaminculver/PythonProjects/Deployed/Weather/city_list.json\", \"r\") as f:\n c = city\n s = state\n cities = json.load(f)\n for city in cities:\n if city[\"name\"].upper() == c.upper() and city[\"state\"].upper() == s.upper():\n return city[\"id\"]\n\n\ndef get_weather(city_id):\n if not city_id:\n return (f\"That city name was not found\\n\"\n f\"Pleas try again and enter another city near by\"\n )\n\n url = f\"http://api.openweathermap.org/data/2.5/weather?id={city_id}&units=imperial&APPID=581919d5c3cf448cd889299a469fe4ac\"\n response = requests.get(url).text\n info = json.loads(response)\n \n name = info[\"name\"]\n description = info['weather'][0]['description']\n current_temp = str(round(info[\"main\"][\"temp\"])) + \"°\" \n feels_like = str(round(info[\"main\"][\"feels_like\"])) + \"°\"\n temp_min = str(round(info[\"main\"][\"temp_min\"])) + \"°\"\n temp_max = str(round(info[\"main\"][\"temp_max\"])) + \"°\"\n humidity = str(round(info[\"main\"][\"humidity\"])) + \"%\"\n cloud_percentage = str(info[\"clouds\"][\"all\"]) + \"%\"\n sunset = datetime.datetime.fromtimestamp(info[\"sys\"][\"sunset\"]).strftime(\"%I:%M %p\")\n\n result = (\n Fore.YELLOW + f\"{name} - \" + \n Fore.GREEN + Style.BRIGHT + f\"Current Temp\" + Fore.LIGHTMAGENTA_EX + \": \" + Fore.YELLOW + f\"{current_temp}F - \" +\n Fore.GREEN + Style.BRIGHT + f\"Feels Like\" + Fore.LIGHTMAGENTA_EX + \": \" + Fore.YELLOW + f\"{feels_like}F - \" +\n Fore.GREEN + Style.BRIGHT + f\"Min/Max\" + Fore.LIGHTMAGENTA_EX + f\": \" + Fore.YELLOW + f\"{temp_min}/{temp_max} - \" +\n Fore.GREEN + Style.BRIGHT + f\"Humidity\" + Fore.LIGHTMAGENTA_EX + f\": \" + Fore.YELLOW + f\"{humidity}\"\n )\n\n return result\n\ndef main(args):\n if len(args) == 1:\n print(\"More arguments needed\")\n elif len(args) > 1:\n city = str(args[0])\n state = str(args[1])\n\n city_id = get_city_id(city, state)\n result = get_weather(city_id)\n print(result)\n else:\n result = get_weather(4574324)\n print(result)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n","sub_path":"Weather/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"154318232","text":"from django.conf.urls import patterns, include, url\n\n\nurlpatterns = patterns('',\n url(r'^$', 'home.views.home'),\n url(r'^add$', 'home.views.add'),\n url(r'^map$', 'home.views.map'),\n url(r'^page$', 'home.views.page'),\n url(r'^add_contest$', 'home.views.add_contest'),\n url(r'^tweetEvening$', 'home.views.tweetEvening'),\n url(r'^tweetMorning$', 'home.views.tweetMorning'),\n url(r'^tweetAlways$', 'home.views.tweetAlways'),\n)\n","sub_path":"ContestTweets/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"30492349","text":"import os\nwhile os.path.isfile('Temp.txt')==False: pass\nfile = open('Temp.txt', 'r', encoding='utf-8')\nS1=''\nS2=[]\nfor i in file:\n if S1=='':\n \tS1=i\n else:\n \tS2+=i.capitalize()\nfile.close()\ns1=''\ns2=''\nfor i in S1:\n if i.isalpha() or i==' ':\n s1+=i\nfor i in ''.join(S2):\n if i.isnumeric()==False:\n s2+=i\nfile = open('Finally.txt', 'w', encoding='utf-8')\nfile.writelines(s1.capitalize()+'\\n')\nfile.writelines(s2)\nfile.close()\n#os.remove('Temp.txt')","sub_path":"BTL_Dictionary/SourceData/SourcePython/ConvertToTitleEV.py","file_name":"ConvertToTitleEV.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"141926353","text":"\"\"\"Test the config module.\"\"\"\n\nfrom argparse import Namespace\nimport pytest\n\nfrom ncbi_genome_download.config import NgdConfig, SUPPORTED_TAXONOMIC_GROUPS, _create_list\n\n\ndef test_init():\n \"\"\"Test NgdConfig initialises with the correct default values.\"\"\"\n config = NgdConfig()\n\n for key in NgdConfig._DEFAULTS:\n expected = NgdConfig._DEFAULTS[key]\n if key in NgdConfig._LIST_TYPES:\n if expected:\n if expected[0] == 'all':\n expected = expected[1:]\n else:\n expected = expected[:1]\n if 'all' in expected:\n expected = expected[::].remove('all')\n elif isinstance(expected, list):\n expected = expected[0]\n\n assert getattr(config, key) == expected\n\n\ndef test_from_kwargs():\n \"\"\"Test NgdConfig initialises correctly from kwargs.\"\"\"\n config = NgdConfig.from_kwargs(parallel=2)\n assert config.parallel == 2\n\n with pytest.raises(ValueError):\n NgdConfig.from_kwargs(garbage=\"wow\")\n\n\ndef test_from_namespace():\n \"\"\"Test NgdConfig initialises correctly from a Namespace object.\"\"\"\n args = Namespace(parallel=2)\n config = NgdConfig.from_namespace(args)\n assert config.parallel == 2\n\n\ndef test_section():\n \"\"\"Test NgdConfig.section getters/setters.\"\"\"\n config = NgdConfig()\n\n with pytest.raises(ValueError):\n config.section = 'garbage'\n\n\ndef test_group():\n \"\"\"Test NgdConfig.group getters/setters.\"\"\"\n config = NgdConfig()\n\n assert config.group == SUPPORTED_TAXONOMIC_GROUPS\n\n config.group = ['bacteria', 'fungi']\n assert config.group == ['bacteria', 'fungi']\n\n config.group = \"all\"\n assert config.group == SUPPORTED_TAXONOMIC_GROUPS\n\n with pytest.raises(ValueError):\n config.group = \"garbage\"\n\n\ndef test_file_format():\n \"\"\"Test NgdConfig.file_format getters/setters.\"\"\"\n config = NgdConfig()\n\n assert config.file_format == ['genbank']\n\n config.file_format = ['genbank', 'fasta']\n assert config.file_format == ['genbank', 'fasta']\n\n config.file_format = \"all\"\n assert config.file_format == list(NgdConfig._FORMATS)\n\n with pytest.raises(ValueError):\n config.file_format = \"garbage\"\n\n\ndef test_assembly_level():\n \"\"\"Test NgdConfig.assembly_level getters/setters.\"\"\"\n config = NgdConfig()\n\n with pytest.raises(ValueError):\n config.assembly_level = 'garbage'\n\n\ndef test_is_compatible_assembly_level():\n \"\"\"Test NgdConfig.is_compatible_assembly_level.\"\"\"\n config = NgdConfig()\n ncbi_string = \"Complete Genome\"\n\n assert config.is_compatible_assembly_level(ncbi_string)\n\n config.assembly_level = \"complete\"\n assert config.is_compatible_assembly_level(ncbi_string)\n\n config.assembly_level = \"chromosome,complete\"\n assert config.is_compatible_assembly_level(ncbi_string)\n\n config.assembly_level = \"chromosome\"\n assert not config.is_compatible_assembly_level(ncbi_string)\n\n\ndef test_assembly_accessions():\n \"\"\"Test NgdConfig.assembly_accessions getters/setters.\"\"\"\n config = NgdConfig()\n\n assert config.assembly_accessions == []\n\n config.assembly_accessions = \"GCF_000203835.1\"\n assert config.assembly_accessions == ['GCF_000203835.1']\n\n config.assembly_accessions = \"GCF_000203835.1,GCF_000444875.1\"\n assert config.assembly_accessions == ['GCF_000203835.1', 'GCF_000444875.1']\n\n\ndef test_is_compatible_assembly_accession():\n \"\"\"Test NgdConfig.is_compatible_assembly_accession.\"\"\"\n config = NgdConfig()\n\n assert config.is_compatible_assembly_accession(\"GCF_000444875.1\")\n\n config.assembly_accessions = \"GCF_000203835.1,GCF_000444875.1\"\n assert config.is_compatible_assembly_accession(\"GCF_000444875.1\")\n\n config.assembly_accessions = \"GCF_000203835.1\"\n assert not config.is_compatible_assembly_accession(\"GCF_000444875.1\")\n\n\ndef test_refseq_category():\n \"\"\"Test NgdConfig.refseq_category getters/setters.\"\"\"\n config = NgdConfig()\n\n with pytest.raises(ValueError):\n config.refseq_category = 'garbage'\n\n\ndef test_get_choices():\n \"\"\"Test NgdConfig.get_choices works.\"\"\"\n assert NgdConfig.get_choices('refseq_category') == ['all', 'reference', 'representative']\n\n with pytest.raises(KeyError):\n NgdConfig.get_choices('garbage')\n\n with pytest.raises(ValueError):\n NgdConfig.get_choices('uri')\n\n\ndef test_create_list(tmpdir):\n \"\"\"Test creating lists from different inputs works as expected.\"\"\"\n expected = [\"foo\", \"bar\", \"baz\"]\n\n ret = _create_list([\"foo\", \"bar\", \"baz\"])\n assert ret == expected\n\n ret = _create_list(\"foo,bar,baz\")\n assert ret == expected\n\n listfile = tmpdir.join('listfile.txt')\n listfile.write(\"foo\\nbar\\nbaz\")\n ret = _create_list(str(listfile), allow_filename=True)\n assert ret == expected\n\n with pytest.raises(ValueError):\n _create_list(123)\n","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"246147402","text":"\n\n#calss header\nclass _CURSE():\n\tdef __init__(self,): \n\t\tself.name = \"CURSE\"\n\t\tself.definitions = [u'magic words that are intended to bring bad luck to someone: ', u'a cause of trouble and unhappiness: ', u\"a woman's period (= flow of blood each month)\", u'a rude word or phrase']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_curse.py","file_name":"_curse.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"363439024","text":"import requests\nr = requests.get('http://example.com') #простой get запрос\nprint(r.text) #вывод ответа от сервера в виде текста\n######################\nurl = 'http://example.com'\npar = {'key1': 'value1', 'key2': 'value2'}\nr = requests.get(url, params= par)#передача параметров в запрос\nprint(r.url) #сформированный url адрес с учтом параметров GET запроса\n#####################\nurl = 'http://httpbin.org/cookies'\ncookies = {'cookies_are': 'working'}\nr = requests.get(url, cookies= cookies) #отправка сформированных cookies на сервер\nprint(r.text)\n####################\nprint(r.cookies['example_cookie_name'])","sub_path":"stepik_task_week3_6_1.py","file_name":"stepik_task_week3_6_1.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"378665155","text":"# -*- coding: utf-8 -*-\n# @Author: danielcarvalho\n# @Date: 2016-12-20 05:46:19\n# @Last Modified by: danielcarvalho\n# @Last Modified time: 2016-12-20 06:16:38\n\n# ----------- EX. 3 --------------\ndef kmPerDay(steps):\n\tkm = (steps * 1.2)* 0.001\n\n\treturn km\n\ndef caloriesPerDay(km):\n\tcalories = km * 32\n\n\treturn calories\n\ndef kmPerWeek(steps):\n\tweek_km = (steps * 1.2) * 0.001\n\n\treturn week_km\n\ndef averageCalories(km):\n\taverage = (km * 32) / 7\n\n\treturn average\n\nfile = open('steps.txt', 'r') #Open steps.txt file and give 'read' permission\n\ndaySteps = 0\nweekSteps = 0\n\nfor i in range(0,7):\n\tdaySteps = int(file.readline()) #Convert to integer the file line with number of steps\n\n\tprint(\"Day [{}]\".format(i+1))\n\tprint(\"Km: {:.2f}\".format(kmPerDay(daySteps)))\n\tprint(\"Calories: {:.2f}\".format(caloriesPerDay(kmPerDay(daySteps))))\n\tprint(\"\")\n\n\tweekSteps += daySteps\n\nprint(\"\\n--- This week ---\")\nprint(\"Total Km: {:.2f}\".format(kmPerWeek(weekSteps)))\nprint(\"Average calories: {:.2f}\".format(averageCalories(kmPerWeek(weekSteps))))\n","sub_path":"Solved Exams/A1/ex3_A1_14_15.py","file_name":"ex3_A1_14_15.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"243080746","text":"#\r\n# Author: Alvin Thai\r\n# Description:\r\n# Forms a guessing game between two players using inputs, if statements, lists, for and while loops and append. Player one submits an integer into an input and the player two guesses until the number is correct. Every guess adds into a list and the results are printed out after the other player guesses the right number.\r\n#\r\nmaximum = 100\r\n# A maximum limit to the guessing game is established by this variable. It can be adjusted.\r\ndef guessing():\r\n print(\"Welcome to the guessing game!\")\r\n print()\r\n count = 0\r\n guesses = []\r\n number = int(input(\"Player 1: Enter number for Player 2 to guess between 0 and \" \r\n + str(maximum) + \": \"))\r\n while number < 0 or number > maximum:\r\n if number < 0:\r\n print(\" That number is less than zero!\")\r\n number = int(input(\"Player 1: Enter number for Player 2 to guess between 0 and 100: \"))\r\n# Another input function is used to continue the while loop until an integer within the determined range is used. This allows another number to be used for the while loop.\r\n if number > maximum:\r\n print(\" That number is greater than \" + str(maximum)+ \"!\")\r\n number = int(input(\"Player 1: Enter number for Player 2 to guess between 0 and \" \r\n + str(maximum) + \": \"))\r\n print()\r\n guess = int(input(\"Player 2, guess a number: \"))\r\n# The input guess is run through an if-else statement which adds to the variable count and appends to the list guesses\r\n# The input guess is compared with the number Player 1 chose and this determines what prints out.\r\n if guess == number:\r\n count += 1\r\n guesses.append(guess)\r\n print(\"Correct!\")\r\n print(\"You got it right in \" + str(count) + \" guess! Wow, you are AMAZING.\") \r\n print() \r\n else:\r\n while guess != number:\r\n if guess < number:\r\n print(\"Too low...\")\r\n count += 1\r\n guesses.append(guess)\r\n guess = int(input(\"Player 2, guess a number: \"))\r\n #The next guess continues the while loop.\r\n elif guess > number:\r\n print(\"Too high...\")\r\n count += 1\r\n guesses.append(guess)\r\n guess = int(input(\"Player 2, guess a number: \"))\r\n elif guess < 0:\r\n print(\"The number is less than 0!\")\r\n count += 1\r\n guesses.append(guess)\r\n guess = int(input(\"Player 2, guess a number: \"))\r\n elif guess > 100:\r\n print(\"The number is greater than 100!\")\r\n count += 1\r\n guesses.append(guess)\r\n guess = int(input(\"Player 2, guess a number: \"))\r\n if guess == number:\r\n print(\"Correct!\")\r\n count += 1\r\n guesses.append(guess)\r\n print()\r\n return count, guesses\r\n# The next function uses returned variables count and guesses locally with generic variables a and b.\r\ndef statistics(a, b):\r\n print(\"It took you \" + str(a) + \" tries to guess correctly!\")\r\n print(\"The numbers you guessed were: \", end=\"\") # end=\"\" continues the statement on the same line.\r\n for guess in b:\r\n print(str(guess) + \" \", end=\"\")\r\n# The for loop prints each element of the list assigned to variable b and is followed by , end=\"\" to continue through each one on the same line. It is followed by an empty print statement to go onto the next line.\r\n print()\r\n print(\"Goodbye!\") \r\n# The values stored in the first function under main are assigned different variables again. The variables used will not change the output of the main function, because it runs at a different scope. \r\ndef main():\r\n x, y = guessing()\r\n statistics(x, y)\r\n \r\nmain()\r\n \r\n ","sub_path":"guessing.py","file_name":"guessing.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"172508667","text":"# Author(s): Djordje Vukcevic\n\nimport numpy as np\nimport sympy as sp\nfrom sympy import *\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.patches as mpatches\nfrom matplotlib import cm\nimport pandas as pd\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\n\ninput_data = np.loadtxt(\"plot_data.txt\", dtype='double', delimiter=' ')\nrows = input_data.shape[0]\n\n# Make data.\nX = input_data[:-1,0]\nxlen = len(X)\nY = input_data[:-1,1]\nylen = len(Y)\nZ = input_data[:-1,2]\n\ndf = pd.DataFrame({'x': X, 'y': Y, 'z': Z})\n\nfig = plt.figure()\nax = Axes3D(fig)\n\nsurf = ax.plot_trisurf(df.x, df.y, df.z, cmap = cm.jet, linewidth = 0.1)\nfig.colorbar(surf, shrink=0.5, aspect=5)\n\nax.set_xlabel(r'Friction torque 1 - [$Nm$]', fontsize=12)\nax.set_ylabel(r'Friction torque 2 - [$Nm$]', fontsize=12)\nax.set_zlabel(r'Acceleration energy - [$\\frac{Nm}{s^2}$]', fontsize=12)\n\nclass Arrow3D(FancyArrowPatch):\n\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\na = Arrow3D([input_data[-1,0], input_data[-1,0]], [input_data[-1,1], input_data[-1,1]], [input_data[-1,2], input_data[-1,2]+input_data[-1,2]/10], mutation_scale=20, lw=1, arrowstyle=\"-|>\", color=\"k\")\nax.add_artist(a)\n\nax.text(input_data[-1,0],input_data[-1,1], (input_data[-1,2] +input_data[-1,2]/10), 'Max: %d [%d, %d]' % (input_data[-1,2], input_data[-1,0], input_data[-1,1]), size=15, zorder=1, color='blue')\n\nplt.savefig('acc_energy_surface.pdf')\nplt.show()\n","sub_path":"src/Simulation/plotting_data.py","file_name":"plotting_data.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"640767905","text":"import json\n\nwith open('reaktor2.txt', 'r') as f:\n mystr = f.read()\n\nbinvals = mystr.split(' ')\nascvals = []\n\nfor b in binvals:\n ascv = 0\n for i in range(8):\n ascv += int(b[i]) * 2 ** (7-i)\n ascvals.append(ascv)\n\nprint(''.join([chr(x) for x in ascvals]))\n\njdict = json.loads(''.join([chr(x) for x in ascvals]))\n\n\n\nwith open('reaktor2_out.csv', 'w') as f:\n\n contaminents = {'SUM': 0}\n contaminenti = 1\n datetimes = {}\n datadict = {}\n\n for j in jdict:\n date = j['date']\n for reading in j['readings']:\n time = reading['time']\n datetimes[(date, time)] = reading['id']\n if (date, time) not in datadict:\n datadict[(date, time)] = [0]\n \n\n for (c, v) in reading['contaminants'].items():\n if c not in contaminents:\n contaminents[c] = contaminenti\n contaminenti += 1\n\n while len(datadict[(date, time)]) < contaminents[c]+1:\n datadict[(date, time)].append(None)\n\n print(contaminents[c])\n print(datadict[(date, time)])\n datadict[(date, time)][contaminents[c]] = int(v)\n datadict[(date, time)][contaminents['SUM']] += int(v)\n\n\n dts = sorted([x for x in datetimes])\n conts = sorted([x for x in contaminents])\n f.write('date,time,id,{}\\n'.format(','.join([c for c in conts])))\n for (d, t) in dts:\n outline = '{},{},{}'.format(d, t, datetimes[(d, t)])\n for c in conts:\n if contaminents[c] >= len(datadict[(d, t)]):\n outline += ',None'\n else:\n print(c)\n print(contaminents[c])\n print(datadict[(d, t)])\n outline += ',{}'.format(datadict[(d, t)][contaminents[c]])\n f.write(outline + '\\n')\n\n\n","sub_path":"misc/reaktor2.py","file_name":"reaktor2.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"36825820","text":"import sys,argparse\nimport os,glob\nimport numpy as np\nimport pandas as pd\nimport re,bisect\nfrom scipy import stats\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nmatplotlib.rcParams['font.size']=18\nsns.set(font_scale=1.2)\nsns.set_style(\"whitegrid\", {'axes.grid' : False})\nsns.set_style(\"ticks\",{'ytick.color': 'k','axes.edgecolor': 'k'})\nmatplotlib.rcParams[\"font.sans-serif\"] = [\"Arial\"]\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams[\"mathtext.rm\"] = \"Arial\"\n\ndef mark_pvalue(compr_pos,positions,box_vals):\n s,p = stats.ttest_ind(box_vals[compr_pos[0]],box_vals[compr_pos[1]],nan_policy='omit')\n y, h, col = np.percentile(np.append(box_vals[compr_pos[0]],box_vals[compr_pos[1]]),95)*.98 ,1.05, 'k'\n y2 = np.percentile(np.append(box_vals[compr_pos[0]],box_vals[compr_pos[1]]),0)*0.99\n x1,x2 = positions[compr_pos[0]],positions[compr_pos[1]]\n p_label='{:.0e}'.format(p)\n if p_label[-2]=='0':\n p_label = p_label[:-2]+p_label[-1]\n p_label = '{}\\n{}'.format(p_label,'up' if s<0 else 'down')\n if p<0.05:\n if compr_pos[2] == 't':\n plt.plot([x1*1.05, x1*1.05, x2*0.95, x2*0.95], [y, y*h, y*h, y], lw=1, c=col)\n plt.text((x1+x2)*.5, y*h, p_label, ha='center', va='bottom', color=col,fontsize=10)\n else:\n plt.plot([x1*1.05, x1*1.05, x2*0.95, x2*0.95], [y2, y2*.91, y2*.91, y2], lw=1, c=col)\n plt.text((x1+x2)*.5, y2*.95, p_label, ha='center', va='top', color=col,fontsize=10)\n\n\n\n\n\n## ==== main \n\ncellType_labels= {'Vector':'Vector',\\\n 'WT':'WT',\\\n 'DEL':'$\\Delta$cIDR',\\\n 'EIF':'UTX-eIF$_{IDR}$',\\\n 'TPR':'$\\Delta$TPR',\\\n 'MT2':'MT2',\\\n 'FUS':'UTX-FUS$_{IDR}$'}\n\n\n# qc from MAPS\nnorm_file = '/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang/f0_data_process/hichip/data_202008_sicer2_merged_islands_with_new_k27/qc_summary_rename.xlsx'\nnorm_df = pd.read_excel(norm_file,index_col=1)\nnorm_cols = ['number_of_pairs_after_duplicate_removal',\n 'number_of_intrachromosomal_pairs',\n 'number_of_short-range_intrachromosomal_pairs',\n 'number_of_short-range_vip_pairs',\n 'number_of_long-range_intrachromosomal_pairs']\n\n\nindir = '../../f0_looping_data_v2/data_202008_reindex'\noutdir = 'f2_data_202008_all_loop'\nos.makedirs(outdir,exist_ok=True)\n\ncellTypes = ['Vector','WT','DEL','EIF','TPR']\nfactors = ['H3K4me3','H3K27ac'] \n \n# prepare the box values\npositions = [1,2,3,4,5]\nfor factor in factors[:]:\n for norm_col in norm_cols[:1]:\n for kept_col in ['count','expected'][:1]: \n # plot the figs\n box_vals=[]\n for celltype in cellTypes:\n infile = '{}/{}_{}.bedpe'.format(indir,factor,celltype)\n with open(infile) as inf:\n df = pd.read_csv(inf,sep='\\t',index_col=0)\n norm_factor = norm_df.loc[norm_col,'{}_{}'.format(factor,celltype)]/100000000\n normalized_vals = np.log10(df[kept_col].values/norm_factor)\n box_vals.append(normalized_vals)\n \n # ==== plot figs\n fig = plt.figure(figsize=(3.6,3))\n # g = plt.violinplot(box_vals)\n g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\\\n boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\\\n medianprops=dict(color='grey'),showfliers=False) \n \n for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t'],[3,4,'t']]:\n mark_pvalue(compr_pos,positions,box_vals)\n plt.axes().set_xticklabels([cellType_labels[i] for i in cellTypes],rotation=45,ha='right',fontsize=15)\n plt.ylabel('log$_{{10}}$ (reads {} in loop)'.format(kept_col),fontsize=15)\n # plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc=\"upper right\",frameon=False)\n plt.title('By {}\\n {}'.format(norm_col, factor, ),fontsize=15,ha='center')\n plt.savefig(outdir+os.sep+'all_loops_{}_{}_NormBy_{}.pdf'.format(factor,kept_col,norm_col),bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)\n plt.show()\n plt.close()\n \n \n\n","sub_path":"f4_hichip_SICER_looping/f0_looping_genomic_distribution_v2/f1_normalized_reads_per_loop/py2_data_202008_all_loop_compr.py","file_name":"py2_data_202008_all_loop_compr.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"135292043","text":"import numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nword_embedding_size = 200\n\n# TP, FP, FN, TN\ncategories = {\n 'RESTAURANT#GENERAL': [0, 0, 0, 0],\n 'SERVICE#GENERAL': [0, 0, 0, 0],\n 'FOOD#QUALITY': [0, 0, 0, 0],\n 'FOOD#STYLE_OPTIONS': [0, 0, 0, 0],\n 'DRINKS#STYLE_OPTIONS': [0, 0, 0, 0],\n 'DRINKS#PRICES': [0, 0, 0, 0],\n 'RESTAURANT#PRICES': [0, 0, 0, 0],\n 'RESTAURANT#MISCELLANEOUS': [0, 0, 0, 0],\n 'AMBIENCE#GENERAL': [0, 0, 0, 0],\n 'FOOD#PRICES': [0, 0, 0, 0],\n 'LOCATION#GENERAL': [0, 0, 0, 0],\n 'DRINKS#QUALITY': [0, 0, 0, 0],\n 'NULL': [0, 0, 0, 0]\n}\n\nexamined_category = 'DRINKS#QUALITY'\ncategory_to_label_num = {\n 'RESTAURANT#GENERAL': 0,\n 'SERVICE#GENERAL': 1,\n 'FOOD#QUALITY': 2,\n 'FOOD#STYLE_OPTIONS': 3,\n 'DRINKS#STYLE_OPTIONS': 4,\n 'DRINKS#PRICES': 5,\n 'RESTAURANT#PRICES': 6,\n 'RESTAURANT#MISCELLANEOUS': 7,\n 'AMBIENCE#GENERAL': 8,\n 'FOOD#PRICES': 9,\n 'LOCATION#GENERAL': 10,\n 'DRINKS#QUALITY': 11,\n 'NULL': 12\n}\n\nlabel_num_to_category = {\n 0: 'RESTAURANT#GENERAL',\n 1: 'SERVICE#GENERAL',\n 2: 'FOOD#QUALITY',\n 3: 'FOOD#STYLE_OPTIONS',\n 4: 'DRINKS#STYLE_OPTIONS',\n 5: 'DRINKS#PRICES',\n 6: 'RESTAURANT#PRICES',\n 7: 'RESTAURANT#MISCELLANEOUS',\n 8: 'AMBIENCE#GENERAL',\n 9: 'FOOD#PRICES',\n 10: 'LOCATION#GENERAL',\n 11: 'DRINKS#QUALITY',\n 12: 'NULL'\n}\n\ncategory_sentence = {\n 'TP': [],\n 'FP': [],\n 'FN': []\n}\n\n\ndef test_model(net,\n threshold,\n dataset):\n TP = 0\n FP = 0\n TN = 0\n FN = 0\n net.eval()\n for i in range(dataset.get_test_data_size()):\n data = []\n next_sentence, sentence_labels = dataset.get_next_test_sentence(i)\n data.append(next_sentence)\n if torch.cuda.is_available():\n data = torch.from_numpy(np.array(data)).float().cuda()\n else:\n data = torch.from_numpy(np.array(data)).float()\n data = Variable(data).float()\n label_pred = F.softmax(net(data))\n for j in range(len(label_pred[0])):\n is_positive = float(label_pred[0][j] >= threshold)\n category = label_num_to_category[j]\n if is_positive:\n if j in sentence_labels:\n TP += 1\n categories[category][0] = categories[category][0] + 1\n if j == category_to_label_num[examined_category]:\n category_sentence['TP'].append(' '.join(dataset.get_test_sentence(i)[0]))\n else:\n FP += 1\n categories[category][1] = categories[category][1] + 1\n if j == category_to_label_num[examined_category]:\n category_sentence['FP'].append(' '.join(dataset.get_test_sentence(i)[0]))\n else:\n if j in sentence_labels:\n FN += 1\n categories[category][2] = categories[category][2] + 1\n if j == category_to_label_num[examined_category]:\n category_sentence['FN'].append(' '.join(dataset.get_test_sentence(i)[0]))\n else:\n TN += 1\n categories[category][3] = categories[category][3] + 1\n try:\n Recall = TP / (TP + FN)\n Precision = TP / (TP + FP)\n F1 = 2 * ((Precision * Recall) / (Precision + Recall))\n except ZeroDivisionError:\n F1 = 0.0\n print('F1 of the network is: ' + str(F1))\n print('Accuracy of the network is: ' + str((TP + TN)/(TP + TN + FP + FN)))\n for key in categories.keys():\n try:\n category_recall = categories[key][0] / (categories[key][0] + categories[key][2])\n category_precision = categories[key][0] / (categories[key][0] + categories[key][1])\n categories_accuracy = (categories[key][0] + categories[key][3]) / \\\n (categories[key][0] + categories[key][1] + categories[key][2] + categories[key][3])\n categories_F1 = 2 * ((category_precision * category_recall) / (category_precision + category_recall))\n except ZeroDivisionError:\n categories_F1 = 0.0\n print(categories[key])\n print('F1 of the network on category ' + key + ' is: ' + str(categories_F1))\n print('Accuracy of the network on category ' + key + ' is: ' + str(categories_accuracy))\n print('Precission of the network on category ' + key + ' is: ' + str(category_precision))\n print('Recall of the network on category ' + key + ' is: ' + str(category_recall))\n # print('Precission_Error of the network on category ' + key + ' is: ' + str(1.0 - category_precision))\n # print('Recall_Error of the network on category ' + key + ' is: ' + str(1.0 - category_recall))\n print('Result on ' + examined_category)\n f = open('Result' + examined_category + '.txt','w')\n for key in category_sentence.keys():\n #print(key + ':')\n f.write(key + ': '+'\\n')\n #print(category_sentence[key])\n for i in category_sentence[key]:\n f.write(i+'\\n')\n None\n return F1\n","sub_path":"codes/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"126834512","text":"#USAGE: python rmsd.py hotloop.xvg star.xvg clash.xvg sequence\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport sys\nimport math\nimport os\ndef chooseBinSize(length, std):\n return 10 * int(math.log2(length) + 1 + math.log2(1 + 1/std))\n\ndef calculateProbability(hist, cutoff, total):\n probability = 0\n for index, ele in enumerate(hist[1][1:]):\n if ele < cutoff:\n probability += hist[0][index] \n else:\n \tbreak\n return int(probability / total * 100)\n\ndef findMid(arr):\n\tmidTicks = []\n\tfor index in range(len(arr) - 1):\n\t\tmidTicks.append(0.5 * (arr[index] + arr[index + 1]))\n\treturn midTicks\n\ndef drawAllAndNoClash(ax, rmsd_mtx, clash_mtx, sequence, color, flag):\n\tcutoff = 0.5\n\tvf = np.vectorize((lambda x : 10 * x))\n\t#convert nm to A\n\trmsd_mtx = vf(rmsd_mtx)\n\n\tbinSize = chooseBinSize(len(rmsd_mtx), np.std(rmsd_mtx))\n\tXTicks = np.linspace(0, np.amax(rmsd_mtx), binSize)\n\n\thistAllFrames = np.histogram(rmsd_mtx, bins=XTicks)\n\ttotalSum = np.sum(histAllFrames[0])\n\tticks = histAllFrames[1]\n\tticks_real = np.array([])\n\tfor i in range(len(ticks) - 1):\n\t\tticks_real = np.append(ticks_real, 0.5*(ticks[i] + ticks[i+1]))\n\tbase = ticks_real[1] - ticks_real[0]\n\tmyfun = np.vectorize(lambda x : x / totalSum / base)\n\n\thistNoClash = np.histogram(rmsd_mtx[clash_mtx == \"No\"], bins=XTicks)\n\tscatterNoClash = histNoClash[0]\n\tscatterAllFrames = histAllFrames[0]\n\n\tmyfun = np.vectorize(lambda x : x / totalSum / base)\n\tscatterNoClashNormalized = myfun(scatterNoClash)\n\tscatterAllFramesNormalized = myfun(scatterAllFrames)\n\n\tlabel1 = sequence + \": all\"\n\tlabel2 = sequence + \": no clash\"\n\n\tax.plot(ticks_real, scatterAllFramesNormalized, linestyle='--', linewidth=1, label=label1, color=color)\n\tax.plot(ticks_real, scatterNoClashNormalized, linestyle='-', linewidth=1, label=label2, color=color)\n\n\n\tax.set_xlabel(\"RMSD ($\\AA$)\", fontsize=\"x-large\")\n\tax.set_ylabel(\"Probability Density\", fontsize=\"x-large\")\n\n\tprobabilityAllFrame = (\"P(RMSD < \" + \n\t\tstr(cutoff) + \") = \" + \n\t\tstr(calculateProbability(histAllFrames, cutoff, totalSum)) + \n\t\t\"%\")\n\n\tprobabilityNoClashFrame = (\"P(RMSD < \" + \n\t\tstr(cutoff) + \n\t\t\" | no clash) = \" + \n\t\tstr(calculateProbability(histNoClash, cutoff, totalSum)) + \n\t\t\"%\")\n\n\twith open(\"probability.txt\", \"a\") as probabilityFile:\n\t\tif flag == 1:\n\t\t\tprobabilityFile.write(sequence + \":\\nto hotloop\\n\")\n\t\t\tprobabilityFile.write(probabilityAllFrame) \n\t\t\tprobabilityFile.write(\"\\n\")\n\t\t\tprobabilityFile.write(probabilityNoClashFrame)\n\t\t\tprobabilityFile.write(\"\\n\")\n\t\telse:\n\t\t\tprobabilityFile.write(\"to SESEGGGG\\n\")\n\t\t\tprobabilityFile.write(probabilityAllFrame) \n\t\t\tprobabilityFile.write(\"\\n\")\n\t\t\tprobabilityFile.write(probabilityNoClashFrame)\n\t\t\tprobabilityFile.write(\"\\n\")\n\t\t\tprobabilityFile.write(\"\\n\")\n\n\ndef main():\n\t#color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']\n\tcolor = ['red', 'orange', 'green', 'blue', 'indigo', 'violet', 'black']\n\tfig, axs = plt.subplots(nrows=2, ncols=1, sharex=False, sharey=False, figsize=(14,14))\n\tfilesHotLoop = []\n\tfilesCluster = []\n\tfileClash = []\n\tsequence = []\n\tfor argsIndex in range(1, len(sys.argv) - 1, 4):\n\t\tfilesHotLoop.append(sys.argv[argsIndex])\n\t\tfilesCluster.append(sys.argv[argsIndex + 1])\n\t\tfileClash.append(sys.argv[argsIndex + 2])\n\t\tsequence.append(sys.argv[argsIndex + 3])\n\n\tif float(len(sequence)) > 0.5 * len(color):\n\t\tsys.exit(\"Color Choices Is Not Enough!\")\n\n\tfor index in range(len(sequence)):\n\n\t\trmsdHotLoop_mtx = np.loadtxt(filesHotLoop[index], comments=[\"#\", \"@\"], usecols=1)\n\t\trmsdCluster_mtx = np.loadtxt(filesCluster[index], comments=[\"#\", \"@\"], usecols=1)\n\t\tclash_mtx = np.loadtxt(fileClash[index], usecols=0, dtype=str)\n\t\trmsdHotLoop_mtx = rmsdHotLoop_mtx[0:len(clash_mtx)]\n\t\trmsdCluster_mtx = rmsdCluster_mtx[0:len(clash_mtx)]\n\n\t\tdrawAllAndNoClash(axs[0], rmsdHotLoop_mtx, clash_mtx, sequence[index], color[index], 1)\n\t\tdrawAllAndNoClash(axs[1], rmsdCluster_mtx, clash_mtx, sequence[index], color[index], 2)\n\n\n\taxs[0].legend(loc=4)\n\taxs[1].legend(loc=4)\n\taxs[0].set_title(\"To Hotloop\")\n\taxs[1].set_title(\"To SESEGGGG\") \n\tfig.savefig(fname=\"histogram.png\", dpi=300)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"9ClashAnalysis/rmsd.py","file_name":"rmsd.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"331360505","text":"from rh.classes.Colaborador import Colaborador\nfrom datetime import date\n\n\nclass Departamento:\n def __init__(\n self,\n nome_setor,\n nome_responsavel=None,\n dia=None,\n mes=None,\n ano=None\n ):\n self._nome_setor = nome_setor\n self._responsavel = nome_responsavel\n self._colaboradores = []\n self._hoje = date.today()\n\n try:\n self._aniversario_resp = date(ano, mes, dia)\n except TypeError:\n raise TypeError('Informe dia, mês e ano')\n @property\n def aniversario_resp(self):\n return self._aniversario_resp.isoformat()\n @property\n def nome(self):\n return self._nome_setor\n\n @nome.setter\n def nome(self, value):\n self._nome_setor = value\n\n @property\n def responsavel(self):\n if self._responsavel is None:\n return None\n return str(self._responsavel)\n\n @property\n def colaboradores(self):\n return self._colaboradores\n\n def informar_responsavel(self, nome, dia, mes, ano):\n self._responsavel = Colaborador(nome, dia, mes, ano)\n\n def add_colaborador(self, nome, dia, mes, ano):\n self._colaboradores.append(Colaborador(nome, dia, mes, ano))\n\n def aniversario_hoje(self):\n if Colaborador._aniversario.day == self._hoje.day:\n if Colaborador._aniversario.month == self._hoje.month:\n return True\n return False\n\n def verificar_aniversariantes(self):\n lista = []\n for colaborador in self.colaboradores:\n if colaborador.aniversario_hoje():\n lista.append((\n colaborador.nome,\n colaborador.aniversario,\n self.nome))\n if Departamento.responsavel:\n if self._aniversario_resp.day == self._hoje.day:\n if self._aniversario_resp.month == self._hoje.month:\n lista.append((\n self.responsavel,\n self.aniversario_resp,\n self.nome))\n return lista\n\n def __str__(self):\n return self._nome_setor\n\n def __repr__(self):\n return 'Departamento = ' + self._nome_setor ","sub_path":"semana 08/rh/classes/Departamento.py","file_name":"Departamento.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"127698237","text":"#! /usr/bin/env python3\n\ndef warshall(matrix, print_intermediary = False):\n\n size = len(matrix)\n for k in range(size):\n print_matrix(matrix, k)\n for i in range(size):\n for j in range(size):\n matrix[i][j] = matrix[i][j] or matrix[i][k] * matrix[k][j]\n\n return matrix\n\ndef print_matrix(matrix, index):\n print('R({})'.format(index))\n for row in matrix:\n print(row)\n print()\n\nif __name__ == '__main__':\n matrix = [\n [0, 0, 1, 0],\n [1, 0, 0, 1],\n [0, 0, 0, 0],\n [0, 1, 0, 0]\n ]\n matrix = warshall(matrix, True)\n print_matrix(matrix, len(matrix))\n","sub_path":"warshall.py","file_name":"warshall.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"478374485","text":"import asyncio\nfrom typing import Iterable\n\n\ndef raise_exceptions(tasks: Iterable[asyncio.Task], logger) -> None:\n exception_raised = False\n for task in tasks:\n if not task.cancelled():\n exception = task.exception()\n if exception:\n logger.error(\"Exception in task %s:\", task.get_name(), exc_info=exception)\n exception_raised = True\n if exception_raised:\n raise Exception(\"Error while running tasks\")\n","sub_path":"spinpid/util/asyncio.py","file_name":"asyncio.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"432775888","text":"from django.urls import path\n\nfrom report import views\n\nurlpatterns = [\n path('upload/', views.ReportUpload.as_view(), name='upload_report'),\n path('scheduler-report-upload/', views.SchedulerReportUpload.as_view(), name='scheduler_report_upload'),\n path('sales/', views.SalesReport.as_view(), name='sales_report'),\n path('sales-by/', views.SalesByReport.as_view(), name='sales_by_report'),\n path('allsales/', views.AllSalesReport.as_view(), name='all_sales_report'),\n path('sales-comparison/', views.SalesComparisonReport.as_view(), name='sales_comparison_report'),\n path('year-year-sales/', views.YearToYearSalesReport.as_view(), name='year_to_year_sales_report'),\n path('commission', views.CommissionReport.as_view(), name='commission_report'),\n path('top-agent', views.TopAgentReport.as_view(), name='top_agent_report'),\n path('summary/', views.SalesSummaryReport.as_view(), name='sales_summary'),\n path('monthly-yoy/', views.MonthlyYOYReport.as_view(), name='monthly_yoy'),\n path('airline-agency/', views.AirlineAgencyReport.as_view(), name='airline_agency'),\n path('adm/', views.ADMReport.as_view(), name='adm_report'),\n path('disbursement-summary/', views.DisbursementSummary.as_view(), name='disbursement_summary_report'),\n path('getadm/', views.GetADMReport.as_view(), name='adm_report_download'),\n path('getairlineagency/', views.GetAirlineAgencyReport.as_view(), name='airline_agency_download'),\n path('getmonthlyyoy/', views.GetMonthlyYOYReport.as_view(), name='monthly_yoy_download'),\n path('getsummary/', views.GetSalesSummaryReport.as_view(), name='summary_report_download'),\n path('getcomparison/', views.GetSalesComparisonReport.as_view(), name='sales_comparison_report_download'),\n path('getdetails/', views.GetSalesReport.as_view(), name='sales_deatails_report_download'),\n path('get-sales-by/', views.GetSalesByReport.as_view(), name='sales_by_report_download'),\n path('get-all-sales/', views.GetAllSalesReport.as_view(), name='all_sales_report_download'),\n path('get-yeartoyear-sales/', views.GetYearToYearSalesReport.as_view(), name='year_to_year_sales_report_download'),\n path('get-commission/', views.GetCommissionReport.as_view(), name='commission_report_download'),\n path('get-top-agent/', views.GetTopAgentReport.as_view(), name='top_agent_report_download'),\n path('taxes-partial//', views.TaxesPartial.as_view(), name='taxes_partial'),\n path('re-process/', views.ReProcessReports.as_view(), name='re_process'),\n path('check-process/', views.CheckTasks.as_view(), name='check_process'),\n path('get-disbursement-summary/', views.GetDisbursementSummary.as_view(), name='disbursement_summary_report_download'),\n # path('upload-calendar/', views.CalendarUpload.as_view(), name='upload_calendar'),\n # path('calendar/', views.CalendarList.as_view(), name='calendar'),\n\n]\n","sub_path":"report/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"240864482","text":"class Player:\r\n\r\n def __init__(self):\r\n self.max_health = 15\r\n self.health = self.max_health\r\n self.attack_power = 2\r\n self.defense = 1\r\n self.health_potion_ammount = 3\r\n self.health_potion_power = 5\r\n self.x = 0\r\n self.y = 0\r\n\r\n # increases the players stats\r\n def level_up(self):\r\n self.max_health += 15\r\n self.health = self.max_health\r\n self.attack_power += 3\r\n self.defense += 1\r\n self.health_potion_ammount += 2\r\n self.health_potion_power += 4\r\n\r\n def attack(self, boss):\r\n boss.health -= self.attack_power\r\n\r\n def drink_potion(self):\r\n \r\n if self.health_potion_ammount > 0:\r\n self.health_potion_ammount -= 1\r\n\r\n missing_health = self.max_health - self.health\r\n recovered_health = self.health_potion_power\r\n\r\n # makes sure that the players health doesn't go above his max health\r\n if self.health + self.health_potion_power > self.max_health:\r\n self.health += missing_health\r\n recovered_health = missing_health\r\n else:\r\n self.health += self.health_potion_power\r\n\r\n print(\"You recovered \", recovered_health, \" HP.\")\r\n print(self.health_potion_ammount, \" health potions remaining.\")\r\n else:\r\n print(\"You ran out of healing potions.\")\r\n \r\n def isAlive(self):\r\n if self.health > 0:\r\n return True\r\n else:\r\n return False\r\n\r\n # returns a list that will act as health bar\r\n def getHealth(self):\r\n health_color = (255, 0, 0) # RED\r\n black = (0, 0, 0)\r\n\r\n health = [health_color if i < self.health else black for i in range(64)]\r\n\r\n return health\r\n \r\n # player chooses what to do on the terminal\r\n def playerTurn(self):\r\n print(\"[1] Attack \\n[2] Heal\")\r\n option = int(input(\"What will you do?\"))\r\n\r\n return option\r\n\r\n\r\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"289920440","text":"\r\n#백준 1495번 기타리스트\r\n\r\nn, s, m = map(int, input().split())\r\nv = [int(i) for i in input().split()]\r\n\r\ncheck = [[False]*(m+1) for _ in range(n + 1)]\r\ncheck[0][s] = True\r\n\r\nfor i in range(1, n+1):\r\n for j in range(m+1):\r\n if check[i-1][j] is True:\r\n temp = j - v[i-1]\r\n if 0 <= temp <= m:\r\n check[i][temp] = True\r\n temp = j + v[i - 1]\r\n if 0 <= temp <= m:\r\n check[i][temp] = True\r\n\r\nresult = -1\r\nfor i in range(m, -1, -1):\r\n if check[-1][i]:\r\n result = i\r\n break\r\nprint(result)","sub_path":"20200412_BJ1495.py","file_name":"20200412_BJ1495.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"8037158","text":"#!/bin/env python\n\n# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract\n# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government\n# retains certain rights in this software.\n\n# External dependencies\nimport PIL.Image\n\n# Python standard library\nimport argparse\n\ntry:\n import io as StringIO\nexcept ImportError:\n import io\n\nimport datetime\nimport errno\nimport json\nimport slycat.mime_type\nimport os\nimport re\nimport stat\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport traceback\nimport uuid\nimport abc\nimport logging\nimport configparser\nimport glob\nimport base64\nimport numpy\n\nsession_cache = {}\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, numpy.integer):\n return int(obj)\n elif isinstance(obj, numpy.floating):\n return float(obj)\n elif isinstance(obj, numpy.ndarray):\n return obj.tolist()\n elif type(obj) is bytes:\n return str(obj.decode())\n else:\n return super(MyEncoder, self).default(obj)\n\nclass Agent(object, metaclass=abc.ABCMeta):\n \"\"\"\n This class is an interface for agent functionality on a cluster server\n \"\"\"\n _log_lock = threading.Lock()\n _loggers = {}\n\n def __init__(self):\n self.scripts = []\n self.hpc = {}\n self.json_paths = []\n self._status_list = [\"[STARTED]\", \"[RUNNING]\", \"[FINISHED]\", \"[FAILED]\", \"[UNKNOWN]\"]\n\n @abc.abstractmethod\n def run_remote_command(self, command):\n \"\"\"\n run a predefine script on hpc. could potentially be\n an hpc batch job such as slurm or pbs.\n :param command: json command\n :return:\n \"\"\"\n pass\n\n @abc.abstractmethod\n def run_shell_command(self, command, jid=0, log_to_file=False):\n \"\"\"\n command to be run on the remote machine\n :param log_to_file: bool for logging\n :param jid: job id\n :param command: json command\n :return: \n \"\"\"\n pass\n\n @abc.abstractmethod\n def launch(self, command):\n \"\"\"\n launch a job on the remote machine\n :param command: json command\n :return: \n \"\"\"\n pass\n\n @abc.abstractmethod\n def submit_batch(self, command):\n \"\"\"\n submit a batch job on the remote machine\n :param command: json command\n :return: \n \"\"\"\n pass\n\n @abc.abstractmethod\n def checkjob(self, command):\n \"\"\"\n check a job's status on a remote machine\n :param command: json command\n :return: \n \"\"\"\n pass\n\n @abc.abstractmethod\n def check_agent_job(self, command):\n \"\"\"\n check an agents job status on a remote machine\n :param command: json command\n :return: \n \"\"\"\n pass\n\n @abc.abstractmethod\n def cancel_job(self, command):\n \"\"\"\n cancels a remote job\n :param command: json command\n :return: \n \"\"\"\n pass\n\n @abc.abstractmethod\n def get_job_output(self, command):\n \"\"\"\n get a detailed version of the jobs output\n :param command: json command\n :return: \n \"\"\"\n pass\n\n @abc.abstractmethod\n def generate_batch(self, module_name, wckey, nnodes, partition, ntasks_per_node, time_hours, time_minutes,\n time_seconds,\n fn,\n tmp_file):\n \"\"\"\n generate a remote batch file that can be used\n by the remote system's mpi queue\n :param module_name: \n :param wckey: \n :param nnodes: \n :param partition: \n :param ntasks_per_node: \n :param time_hours: \n :param time_minutes: \n :param time_seconds: \n :param fn: \n :param tmp_file: \n :return: \n \"\"\"\n pass\n\n @abc.abstractmethod\n def run_function(self, command):\n \"\"\"\n function used to run a job\n :param command: json command\n :return: \n \"\"\"\n pass\n\n @abc.abstractmethod\n def check_hpc_params(self, command):\n \"\"\"\n takes a command json and creates a string that can be\n run hpc jobs\n :param command: json command\n :return: \n \"\"\"\n pass\n\n def get_script_run_string(self, command_script):\n \"\"\"\n takes a command json and creates a string that can be\n run\n :param command_script: json representation of a command_script\n :return: \n \"\"\"\n run_command = ''\n for agent_script in self.scripts:\n # we just found a match lets add it to the command that we are going to run\n if command_script[\"name\"] == agent_script[\"name\"]:\n run_command += str(agent_script[\"exec_path\"])\n run_command += \" \"\n run_command += str(agent_script[\"path\"])\n for parameter in command_script[\"parameters\"]:\n run_command += \" \"\n run_command += str(parameter[\"name\"])\n run_command += \" \"\n run_command += str(parameter[\"value\"])\n return run_command\n if not run_command:\n raise Exception(\"No Matching script name or missing params\")\n\n def get_job_logger(self, name):\n \"\"\"\n returns a logging function with the jid.log as the file name\n :param jid: job id\n :return: \n \"\"\"\n logger = logging.getLogger(str(name))\n if str(name) not in self._loggers:\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler(str(name) + '.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n self._loggers[str(name)] = logger\n return lambda msg: self._loggers[str(name)].info(msg)\n\n def get_user_config(self):\n \"\"\"\n reads the users config as json \n {results:{config:{}, \"ok\":bool, errors:\"string errors message\"}}\n :return: \n \"\"\"\n results = {\n \"ok\": True\n }\n\n rc = os.path.expanduser('~') + \"/.slycatrc\"\n if os.path.isfile(rc):\n try:\n parser = configparser.RawConfigParser()\n parser.read(rc)\n configuration = {section: {key: eval(value) for key, value in parser.items(section)} for section in\n parser.sections()}\n results[\"config\"] = configuration\n results[\"errors\"] = \"\"\n except Exception as e:\n results[\"config\"] = {}\n results[\"errors\"] = \"%s\" % e\n else:\n results[\"config\"] = \"see errors\"\n results[\"errors\"] = \"the user does not have a .slycatrc file under their home directory\"\n\n sys.stdout.write(\"%s\\n\" % json.dumps(results, cls=MyEncoder))\n sys.stdout.flush()\n\n def set_user_config(self, command):\n \"\"\"\n writes config into ~/.slycatrc file of the user\n :param command: incoming json format should be \n {\n action:\"action\",\n command:{config:{section_key:{option_key:\"value\"}...}}\n }\n :return: \n \"\"\"\n results = {\n \"ok\": True,\n \"errors\": \"\"\n }\n config = command[\"command\"][\"config\"]\n rc = os.path.expanduser('~') + \"/.slycatrc\"\n\n with open(rc, \"w+\") as rc_file:\n rc_file.seek(0)\n rc_file.truncate()\n parser = configparser.RawConfigParser()\n for section_key in config:\n if not parser.has_section(section_key):\n parser.add_section(section_key)\n section = config[section_key]\n for option_key in section:\n if not str(section[option_key]) == \"\":\n parser.set(section_key, option_key, \"\\\"%s\\\"\" % section[option_key])\n parser.write(rc_file)\n sys.stdout.write(\"%s\\n\" % json.dumps(results, cls=MyEncoder))\n sys.stdout.flush()\n\n # Handle the 'browse' command.\n def browse(self, command):\n if \"path\" not in command:\n raise Exception(\"Missing path.\")\n path = command[\"path\"]\n if not os.path.isabs(path):\n raise Exception(\"Path must be absolute.\")\n if not os.path.exists(path):\n raise Exception(\"Path not found.\")\n\n file_reject = re.compile(command.get(\"file-reject\")) if \"file-reject\" in command else None\n file_allow = re.compile(command.get(\"file-allow\")) if \"file-allow\" in command else None\n directory_reject = re.compile(command.get(\"directory-reject\")) if \"directory-reject\" in command else None\n directory_allow = re.compile(command.get(\"directory-allow\")) if \"directory-allow\" in command else None\n\n if os.path.isdir(path):\n names = sorted(os.listdir(path))\n else:\n path, name = os.path.split(path)\n names = [name]\n\n listing = {\n \"ok\": True,\n \"path\": path,\n \"names\": [],\n \"sizes\": [],\n \"types\": [],\n \"mtimes\": [],\n \"mime-types\": [],\n }\n\n for name in names:\n fpath = os.path.join(path, name)\n fstat = os.stat(fpath)\n ftype = \"d\" if stat.S_ISDIR(fstat.st_mode) else \"f\"\n\n if ftype == \"d\":\n if directory_reject is not None and directory_reject.search(fpath) is not None:\n if directory_allow is None or directory_allow.search(fpath) is None:\n continue\n\n if ftype == \"f\":\n if file_reject is not None and file_reject.search(fpath) is not None:\n if file_allow is None or file_allow.search(fpath) is None:\n continue\n\n if ftype == \"d\":\n mime_type = \"application/x-directory\"\n else:\n mime_type = slycat.mime_type.guess_type(name)[0]\n\n listing[\"names\"].append(name)\n listing[\"sizes\"].append(fstat.st_size)\n listing[\"types\"].append(ftype)\n listing[\"mtimes\"].append(datetime.datetime.fromtimestamp(fstat.st_mtime).isoformat())\n listing[\"mime-types\"].append(mime_type)\n\n sys.stdout.write(\"%s\\n\" % json.dumps(listing, cls=MyEncoder))\n sys.stdout.flush()\n\n # Handle the 'get-file' command.\n def get_file(self, command):\n if \"path\" not in command:\n raise Exception(\"Missing path.\")\n path = command[\"path\"]\n if not os.path.isabs(path):\n raise Exception(\"Path must be absolute.\")\n if not os.path.exists(path):\n raise Exception(\"Path not found.\")\n if not os.access(path, os.R_OK):\n raise Exception(\"No read permission.\")\n if os.path.isdir(path):\n raise Exception(\"Directory unreadable.\")\n\n try:\n with open(path, \"rb\") as f:\n file_content = f.read()\n except IOError as e:\n if e.errno == errno.EACCES:\n raise Exception(\"Access denied.\")\n raise Exception(e.strerror)\n except Exception as e:\n raise Exception(e.message)\n\n content_type, encoding = slycat.mime_type.guess_type(path)\n self.get_job_logger(\"slycat_agent\")(str(type(file_content)))\n encoded_file_content = base64.b64encode(file_content).decode('utf-8')\n sys.stdout.write(\"%s\\n\" % (json.dumps({\"ok\": True, \"message\": \"File retrieved.\", \"path\": path, \"content-type\": content_type,\"size\": len(file_content), \"content\": encoded_file_content}, cls=MyEncoder)))\n sys.stdout.flush()\n\n # Handle the 'write-file' command.\n def write_file(self, command):\n if \"path\" not in command:\n raise Exception(\"Missing path.\")\n if \"data\" not in command:\n raise Exception(\"Missing data.\")\n path = command[\"path\"]\n data = base64.decodestring(command[\"data\"])\n if not os.path.isabs(path):\n raise Exception(\"Path must be absolute.\")\n if os.path.exists(path):\n raise Exception(\"Path exists.\")\n pdir = os.path.dirname(path)\n if not pdir: pdir = '.'\n if os.path.isdir(path):\n raise Exception(\"Directory path is unwritable.\")\n if not os.path.exists(os.path.dirname(path)):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError as e: # Guard against race condition\n raise Exception(e.message)\n try:\n self.get_job_logger(\"slycat_agent\")(\"Writing file\")\n self.get_job_logger(\"slycat_agent\")(\"file \\n%s\" % data)\n with open(path, \"w\") as f:\n f.write(data)\n self.get_job_logger(\"slycat_agent\")(\"Done Writing file\")\n except IOError as e:\n if e.errno == errno.EACCES:\n raise Exception(\"Access denied.\")\n raise Exception(e.strerror)\n except Exception as e:\n raise Exception(e.message)\n self.get_job_logger(\"slycat_agent\")(\"getting \") \n content_type, encoding = slycat.mime_type.guess_type(path)\n sys.stdout.write(\"%s\\n\" % (json.dumps({\"ok\": True, \"message\": \"File written.\", \"path\": path, \"content-type\": content_type}, cls=MyEncoder)))\n sys.stdout.flush()\n\n # Handle the 'get-image' command.\n def get_image(self, command):\n if \"path\" not in command:\n raise Exception(\"Missing path.\")\n path = command[\"path\"]\n if not os.path.isabs(path):\n raise Exception(\"Path must be absolute.\")\n if not os.path.exists(path):\n raise Exception(\"Path not found.\")\n if os.path.isdir(path):\n raise Exception(\"Directory unreadable.\")\n\n file_content_type, encoding = slycat.mime_type.guess_type(path)\n requested_content_type = command.get(\"content-type\", file_content_type)\n\n # Optional fast path if the client hasn't requested anything that would alter the image contents:\n if \"max-size\" not in command and \"max-width\" not in command and \"max-height\" not in command and requested_content_type == file_content_type:\n try:\n content = open(path, \"rb\").read()\n except IOError as e:\n if e.errno == errno.EACCES:\n raise Exception(\"Access denied.\")\n raise Exception(e.strerror)\n except Exception as e:\n raise Exception(e.message)\n\n content_type, encoding = slycat.mime_type.guess_type(path)\n sys.stdout.write(\"%s\\n%s\" % (json.dumps(\n {\"ok\": True, \"message\": \"Image retrieved.\", \"path\": path, \"content-type\": content_type,\n \"size\": len(content)}, cls=MyEncoder), content))\n sys.stdout.flush()\n return\n\n if requested_content_type not in [\"image/jpeg\", \"image/png\"]:\n raise Exception(\"Unsupported image type.\")\n\n # Load the requested image.\n try:\n image = PIL.Image.open(path)\n except IOError as e:\n raise Exception(e.strerror)\n\n # Optionally downsample the image.\n size = image.size\n if \"max-size\" in command:\n size = (command[\"max-size\"], command[\"max-size\"])\n if \"max-width\" in command:\n size = (command[\"max-width\"], size[1])\n if \"max-height\" in command:\n size = (size[0], command[\"max-height\"])\n if size != image.size:\n image.thumbnail(size=size, resample=PIL.Image.ANTIALIAS)\n\n # Save the image to the requested format.\n content = io.StringIO()\n if requested_content_type == \"image/jpeg\":\n image.save(content, \"JPEG\")\n elif requested_content_type == \"image/png\":\n image.save(content, \"PNG\")\n\n # Send the results back to the caller.\n sys.stdout.write(\"%s\\n%s\" % (json.dumps(\n {\"ok\": True, \"message\": \"Image retrieved.\", \"path\": path, \"content-type\": requested_content_type,\n \"size\": len(content.getvalue())}, cls=MyEncoder), content.getvalue()))\n sys.stdout.flush()\n\n def run(self):\n \"\"\"\n format {action:action, command: command}\n :return: \n \"\"\"\n debug = False\n self.get_job_logger(\"slycat_agent\")(\"\\n\")\n self.get_job_logger(\"slycat_agent\")(\"*agent started*\")\n # Parse and sanity-check command-line arguments.\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--fail-startup\", default=False, action=\"store_true\",\n help=\"Fail immediately on startup. Obviously, this is for testing.\")\n parser.add_argument(\"--fail-exit\", default=False, action=\"store_true\",\n help=\"Fail during exit. Obviously, this is for testing.\")\n parser.add_argument('--json', action='append', default=[], help='path to json files dirs')\n arguments = parser.parse_args()\n\n if arguments.fail_startup:\n exit(-1)\n # look for json configuration for hpc system and load it\n if len(arguments.json) > 0:\n self.json_paths = arguments.json\n for path in self.json_paths:\n # load all .js files in the directory given\n for _ in glob.glob(path + \"/*.js\"):\n with open(_) as in_file:\n self.scripts.append(json.load(in_file))\n # Let the caller know we're ready to handle commands.\n sys.stdout.write(\"%s\\n\" % json.dumps({\"ok\": True, \"message\": \"Ready.\"}, cls=MyEncoder))\n sys.stdout.flush()\n\n while True:\n # format: {\"action\":\"action\"}\n # Read the next command from caller.\n command = sys.stdin.readline()\n if command == \"\": # EOF means the caller went away and it's time to shut-down.\n break\n\n try:\n # Parse the command, which must be a JSON object containing an action.\n try:\n command = json.loads(command)\n except:\n self.get_job_logger(\"slycat_agent\")(\"Not a JSON object.\")\n raise Exception(\"Not a JSON object.\")\n if not isinstance(command, dict):\n self.get_job_logger(\"slycat_agent\")(\"Not a JSON object.\")\n raise Exception(\"Not a JSON object.\")\n if \"action\" not in command:\n self.get_job_logger(\"slycat_agent\")(\"Missing action for command: %s\" % command)\n raise Exception(\"Missing action.\")\n if \"debug\" in command:\n if command[\"debug\"] is True:\n debug = True\n\n action = command[\"action\"]\n if debug:\n self.get_job_logger(\"slycat_agent\")(\"command: %s\" % command)\n if action == \"exit\":\n self.get_job_logger(\"slycat_agent\")(\"*agent stopping*\\n\")\n if not arguments.fail_exit:\n break\n elif action == \"browse\":\n self.browse(command)\n elif action == \"get-file\":\n self.get_file(command)\n elif action == \"write-file\":\n self.write_file(command)\n elif action == \"get-image\":\n self.get_image(command)\n elif action == \"create-video\":\n sys.stdout.write(\"%s\\n\" % json.dumps({\"ok\": False, \"message\": \"this command is depricated and has \"\n \"been removed\"}, cls=MyEncoder))\n sys.stdout.flush()\n elif action == \"launch\":\n self.launch(command)\n elif action == \"check-agent-job\":\n self.check_agent_job(command)\n elif action == \"submit-batch\":\n self.submit_batch(command)\n elif action == \"checkjob\":\n self.checkjob(command)\n elif action == \"get-job-output\":\n self.get_job_output(command)\n elif action == \"run-function\":\n self.run_function(command)\n elif action == \"run-remote-command\":\n self.run_remote_command(command)\n elif action == \"cancel-job\":\n self.cancel_job(command)\n elif action == \"get-user-config\":\n self.get_user_config()\n elif action == \"set-user-config\":\n self.set_user_config(command)\n else:\n self.get_job_logger(\"slycat_agent\")(\"Unknown command.\")\n raise Exception(\"Unknown command.\")\n except Exception as e:\n if debug:\n self.get_job_logger(\"slycat_agent\")(\"%s\\n\" % json.dumps({\"ok\": False, \"message\": traceback.format_exc()}))\n sys.stdout.write(\"%s\\n\" % json.dumps({\"ok\": False, \"message\": str(traceback.format_exc())}, cls=MyEncoder))\n else:\n sys.stdout.write(\"%s\\n\" % json.dumps({\"ok\": False, \"message\": str(e)}, cls=MyEncoder))\n sys.stdout.flush()\n\n\nif __name__ == \"__main__\":\n \"\"\"\n this is how we run the agent when implemented\n \"\"\"\n some_cluster_agent = Agent()\n some_cluster_agent.run()\n","sub_path":"agent/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":21755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"653976517","text":"import json\nimport http.client, urllib.request, urllib.parse, urllib.error\nimport ocr_cache as cache\nimport sub_key\n\n# API vars\nheaders = {\n # Request headers\n 'Content-Type': 'application/octet-stream',\n 'Ocp-Apim-Subscription-Key': sub_key.get_key(),\n}\n\nparams = urllib.parse.urlencode({\n # Request parameters\n 'language': 'en',\n 'detectOrientation ': 'true',\n})\n\ndef get_json_data(image, base_path, zoom_level, pref):\n \n zoom_prefix = str(zoom_level) + 'x/' if zoom_level > 1 else ''\n\n url = \"/vision/v1.0/ocr?%s\" % params\n\n\n full_path = base_path + '/' + zoom_prefix + image \n with open(full_path, 'rb') as img_file:\n img_data = img_file.read()\n\n cache_prefix = 'oxford' + url\n data = cache.get(cache_prefix, img_data)\n\n if data:\n return json.loads(data.decode('utf8'))\n\n conn = None\n try:\n conn = http.client.HTTPSConnection('api.projectoxford.ai', timeout=10)\n conn.request(\"POST\", url, body=img_data, headers=headers)\n response = conn.getresponse()\n body = response.read()\n if response.status == 200:\n data = body\n cache.put(cache_prefix, img_data, data)\n else:\n raise Exception(\"Error with retrieving Oxford OCR results for %s: %s\" % (full_path, body))\n\n finally:\n if conn is not None:\n conn.close()\n conn = None\n\n return json.loads(data.decode('utf-8')) # Need to double-check if utf-8 is correct\n","sub_path":"oxford_api.py","file_name":"oxford_api.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"644477738","text":"import os\nimport io\nfrom .urls import Urls\nfrom .errors import MediaError, InstallationError\nfrom .utils.request import Session\nfrom .backend.audio.player import Player\nfrom .backend.mediasetup import Album, Mp3\nfrom .frontend.display import Print, Verbose, Show\nfrom .backend.filehandler import file_size, Tmp, Path\nfrom .backend.config import User, Datatype, Queued, Threader\nfrom .mixtapes import Mixtapes\n\n# TODO NOT finish writig baseplayer method and subclasses\n# from .backend.audio.player import BasePlayer as player\n# will cahnge import to another name\n# change name of player\n\n\nclass Media:\n \"\"\" Media player that control the songs selected from Mixtapes \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if not hasattr(cls, \"__tmpfile\"):\n Tmp.removeTmpOnstart()\n cls.__tmpfile = Tmp.create()\n\n if hasattr(cls, \"player\"):\n if kwargs.get(\"player\"):\n cls.player = Player.getPlayer(**kwargs)\n\n elif not hasattr(cls, \"player\"):\n try:\n cls.player = Player.getPlayer(**kwargs)\n except Exception as e:\n cls.player = None\n\n if cls.player is None: # Incase user reinitalize Media class\n raise MediaError(7, InstallationError._extra)\n\n return super(Media, cls).__new__(cls)\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self._Mixtapes)\n\n def __repr__(self):\n return \"Media(%s)\" % (self._Mixtapes)\n\n def __len__(self):\n if hasattr(self, \"songs\"):\n return len(self.songs)\n else:\n return 0\n\n def __init__(self, mixtape=None, **kwargs):\n \"\"\" \n Initialize Media and load all mixtapes.\n\n :param: mixtape - Datpiff.Mixtapes object\n \"\"\"\n if not self.__isMixtapesObject(mixtape):\n raise MediaError(2, \"must pass a mixtape object to Media class\")\n\n Verbose(\"Media initialized\")\n if not mixtape:\n raise MediaError(1)\n\n self._session = Session()\n self._Mixtapes = mixtape\n self._artist_name = None\n self._album_name = None\n self._current_index = None\n self._selected_song = None\n self.__downloaded_song = None\n super(Media, self).__init__()\n\n def __isMixtapesObject(self, obj):\n try:\n if issubclass(obj.__class__, Mixtapes):\n return True\n except:\n pass\n return False\n\n def findSong(self, songname):\n \"\"\"\n Search through all mixtapes songs and return all songs \n with songname\n\n :param: songname - name of the song to search for\n \"\"\"\n\n # TODO:look this video with James Powell\n # https://www.youtube.com/watch?v=R2ipPgrWypI&t=1748s at 55:00.\n # Implement a generator function , so user dont have to wait on all the results at once\n # Also thread this main function, to unblock user from still using program while\n # it wait for result to be finished.\n songname = Datatype.strip_lowered(songname)\n Print(\"\\nSearching for song: %s ...\" % songname)\n links = self._Mixtapes.links\n links = list(enumerate(links, start=1))\n results = Queued(self.__searchAlbumFor, links).run(songname)\n if not results:\n Print(\"No song was found with the name: %s \" % songname)\n results = Datatype.removeNone(results)\n return results\n\n def __searchAlbumFor(self, links, song, *args, **kwargs):\n \"\"\"\n Search through all Albums and return all Albums\n that contains similiar songs' title.\n\n :param: song - title of the song to search for\n :param: links - all mixtapes links\n \"\"\"\n index, link = links\n album = Album(link)\n name = album.name\n tracks = Mp3(album.datpiff_player_response).songs\n for track in tracks:\n if song in Datatype.strip_lowered(track):\n return {\"ablumNo\": index, \"album\": name, \"song\": track}\n\n def setMedia(self, selection):\n \"\"\"\n Initialize and set the an Album to Media Player.\n A pydatpiff.mixtapes.Mixtape's ablum will be load to the media player.\n\n :param: selection - pydatpiff.Mixtapes album's name or artist's name.\n int - will return the Datpiff.Mixtape artist at that index.\n str - will search for an artist from Mixtapes.artists (default)\n or album from Mixtapes.ablum. \n\n note: see pydatpiff.mixtape.Mixtapes for album or artist selection \n \"\"\"\n\n result = self._Mixtapes._select(selection)\n if result is None:\n Verbose(\"SELECTION:\", selection)\n e_msg = '\\n--> Mixtape \"%s\" was not found' % selection\n raise MediaError(1)\n\n # set up all Album's Info\n max_result = len(self._Mixtapes) - 1\n result = result if result <= max_result else max_result\n self._artist_name = self._Mixtapes.artists[result]\n self._album_name = self._Mixtapes.mixtapes[result]\n self._album_cover = self._Mixtapes.album_covers[result]\n\n link = self._Mixtapes._links\n\n self.__setupMedia(link[result])\n Verbose(\"Setting Media to %s - %s\" % (self.artist, self.album))\n\n def __setupMedia(self, link):\n \"\"\"\n Initial an Album and sets all Mp3 songs'tags\n \n param: link - Album's mixtape link\n \"\"\"\n album = Album(link)\n self._Mp3 = Mp3(album.datpiff_player_response)\n # get the ablum's uploader\n self.uploader = album.uploader\n # get ablum bio\n self.bio = album.bio\n self.__cache_storage = {}\n\n def __index_of_song(self, select):\n \"\"\"\n Parse all user input and return the correct song index.\n :param select: - Media.songs name or index of Media.songs \n datatype: int: must be numerical\n str: artist,mixtape, or song name\n \"\"\"\n try:\n return User.selection(select, self.songs, [x.lower() for x in self.songs])\n except MediaError as e:\n raise MediaError(5)\n\n @property\n def artist(self):\n \"\"\"Return the current artist name.\"\"\"\n return self._artist_name\n\n @artist.setter\n def artist(self, name):\n self.setMedia(name)\n\n @property\n def album(self):\n \"\"\"Return the current album name.\"\"\"\n return self._album_name\n\n @album.setter\n def album(self, name):\n self.setMedia(name)\n\n @property\n def album_cover(self):\n if hasattr(self, \"_album_cover\"):\n return self._album_cover\n return \"\"\n\n @album_cover.setter\n def album_cover(self, url):\n self._album_cover = url\n\n @property\n def songs(self):\n \"\"\" Return all songs from album.\"\"\"\n if not hasattr(self, \"_Mp3\"):\n e_msg = '\\nSet media by calling --> Media.setMedia(\"Album name\")'\n raise MediaError(3, e_msg)\n return self._Mp3.songs\n\n @property\n def __mp3urls(self):\n \"\"\"Returns all parsed mp3 url\"\"\"\n return list(self._Mp3.mp3Urls)\n\n def show_songs(self):\n \"\"\"Pretty way to Print all song names\"\"\"\n try:\n songs = self.songs\n [Print(\"%s: %s\" % (a + 1, b)) for a, b in enumerate(songs)]\n except TypeError:\n Print(\"Please set Media first\\nNo Artist name\")\n\n @property\n def song(self):\n \"\"\"Returns the current song set by user.\"\"\"\n return self._selected_song\n\n @song.setter\n def song(self, name):\n \"\"\" \n Set current song\n :param: name - name of song or song's index\n \"\"\"\n songs = self.songs\n index = self.__index_of_song(name)\n if index is not None:\n self._selected_song = songs[index]\n self._current_index = index\n else:\n Print(\"\\n\\t song was not found\")\n\n def _cacheSong(self, song, content):\n \"\"\"\n Preserve the data from song and store it for future calls.\n This prevents calling the requests function again for the same song. \n Each data from a song will be stored in __cache_storage for future access.\n\n :param: song - name of the song\n :param: content - song content \n \"\"\"\n name = \"-\".join((self.artist, song))\n try:\n self.__cache_storage[name] = content\n except MemoryError:\n self.__cache_storage = {}\n\n def _checkCache(self, songname):\n \"\"\"\n Check whether song has been download already.\n\n :param: \n \n \"\"\"\n requested_song = \"-\".join((self.artist, songname))\n if hasattr(self, \"__cache_storage\"):\n if requested_song in self.__cache_storage:\n response = self.__cache_storage.get(requested_song)\n if not response:\n extended_msg = \"%s not in cache_storage\" % songname\n raise MediaError(8, extended_msg)\n return response\n\n def _getMp3Content(self, track):\n \"\"\"\n Return content of the song in IO Bytes object\n\n :param: track - name of song or song index\n \"\"\"\n\n selection = self.__index_of_song(track)\n if selection is None:\n return\n\n self.__song_index = selection\n link = self.__mp3urls[selection]\n songname = self.songs[selection]\n self.song = selection + 1\n\n # Write songname to file\n # check if song has been already downloaded\n # if so then get the response from cache\n response = self._checkCache(songname)\n if not response:\n response = self._session.method(\"GET\", link)\n self._cacheSong(songname, response)\n\n return io.BytesIO(response.content)\n\n @property\n def autoplay(self):\n \"\"\"Continuously play song from current album.\"\"\"\n if hasattr(self, \"_auto_play\"):\n self.player._media_autoplay = self._auto_play\n return self._auto_play\n\n @autoplay.setter\n def autoplay(self, auto=False):\n \"\"\" \n Sets the autoplay function.\n \n :param: auto - disable or enable autoplay\n datatype: boolean\n default: False\n \"\"\"\n self._auto_play = auto\n self._continousPlay()\n if auto:\n Verbose(\"\\t----- AUTO PLAY ON -----\")\n else:\n Verbose(\"\\t----- AUTO PLAY OFF -----\")\n\n @Threader\n def _continousPlay(self):\n \"\"\" \n Automatically play each song from Album when autoplay is enable.\n \"\"\"\n if self.autoplay:\n total_songs = len(self)\n if not self.song:\n Verbose(\"Must play a song before setting autoplay\")\n return\n\n trackno = self.__index_of_song(self.song) + 2\n if trackno > total_songs:\n Print(\"AutoPlayError: Current track is the last track\")\n self.autoplay = False\n return\n\n while self.autoplay:\n current_track = self.__index_of_song(self.song) + 1\n stopped = self.player._state.get(\"stop\")\n if stopped:\n next_track = current_track + 1\n\n if next_track > total_songs:\n Verbose(\"No more songs to play\")\n self.autoplay = False\n break\n\n Verbose(\"Loading next track\")\n Verbose(\"AUTO PLAY ON\")\n self.play(next_track)\n while self.player._state[\"stop\"]:\n pass\n\n def play(self, track=None, demo=False):\n \"\"\" \n Play song (uses vlc media player) \n\n :param: track - name or index of song type(str or int)\n :param: demo - True: demo sample of song\n False: play full song \n *default: False\n \"\"\"\n if self.player is None:\n extented_msg = \"Audio player is incompatible with device\"\n raise MediaError(6, extented_msg)\n\n if track is None:\n Print(\"\\n\\t -- No song was entered --\")\n return\n\n if isinstance(track, int):\n if track > len(self):\n raise MediaError(4)\n\n try:\n content = self._getMp3Content(track).read()\n except Exception:\n Print(\"\\n\\t-- No song was found --\")\n return\n\n songname = self.songs[self.__song_index]\n track_size = len(content)\n # play demo or full song\n if not demo: # demo whole song\n chunk = content\n samp = int(track_size)\n else: # demo partial song\n samp = int(track_size / 5)\n start = int(samp / 5)\n chunk = content[start : samp + start]\n size = file_size(samp)\n\n # write song to file\n Path.writeFile(self.__tmpfile.name, chunk, mode=\"wb\")\n\n # display message to user\n Show.mediaPlayMsg(self.artist, songname, size, demo)\n\n song = \" - \".join((self.artist, songname))\n self.player.setTrack(song, self.__tmpfile.name)\n self.player.play\n\n def download(self, track=False, output=\"\", rename=None):\n \"\"\"\n Download song from Datpiff\n\n :param: track - name or index of song type(str or int)\n :param: output - location to save the song (optional)\n :param: rename - rename the song (optional)\n default will be song's name \n \"\"\"\n selection = self.__index_of_song(track)\n if selection is None:\n return\n\n # Handles paths\n output = output or os.getcwd()\n if not Path.is_dir(output):\n Print(\"Invalid directory: %s\" % output)\n return\n link = self.__mp3urls[selection]\n song = self.songs[selection]\n\n # Handles song's naming\n if rename:\n title = rename.strip() + \".mp3\"\n else:\n title = \" - \".join((self.artist, song.strip() + \".mp3\"))\n title = Path.standardizeName(title)\n songname = Path.join(output, title)\n\n try:\n response = self._checkCache(song)\n if response:\n content = response.content\n else:\n response = self._session.method(\"GET\", link)\n response.raise_for_status()\n\n size = file_size(len(response.content))\n Path.writeFile(songname, response.content, mode=\"wb\")\n Show.mediaDownloadMsg(title, size)\n self._cacheSong(songname, response)\n except:\n Print(\"Cannot download song %s\" % songname)\n\n def downloadAlbum(self, output=None):\n \"\"\"\n Download the full ablum.\n\n :param: output - directory to save album \n :default - current directory\n \"\"\"\n if not output:\n output = os.getcwd()\n elif not os.path.isdir(output):\n Print(\"Invalid directory: %s\" % output)\n return\n\n title = \"-\".join((self.artist, self.album))\n title = Path.standardizeName(title)\n fname = Path.join(output, title)\n\n # make a directory to store all the ablum's songs\n if not os.path.isdir(fname):\n os.mkdir(fname)\n Queued(self.download, self.songs, fname).run()\n Print(\"\\n%s %s saved\" % (self.artist, self.album))\n","sub_path":"pydatpiff/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":15516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"1724758","text":"import typing as ty\nimport PyQt5.QtGui as qtg\nimport PyQt5.QtCore as qtc\n\nimport uuid\nimport os\n\nfrom mate.ui.panels.map.map_painter import Painter\nfrom mate.ui.panels.map.layer._layer_main import _Layer\nfrom mate.net.nao import Nao\nimport mate.ui.utils as ui_utils\n\n\nclass Main(_Layer):\n name = \"motionPlanner\"\n\n def __init__(self, layer_model: ty.Dict, nao: Nao):\n merged_model = ui_utils.load_model(os.path.dirname(__file__) +\n \"/model.json\", layer_model)\n super(Main, self).__init__(merged_model, nao, str(uuid.uuid4()))\n\n self.targetPosition = None\n self.displacementVector = None\n self.transformation = [[0, 0], 0]\n self.transformation_identifier = uuid.uuid4()\n if self.nao.is_connected():\n self.connect(self.nao)\n\n def connect(self, nao: Nao):\n self.nao = nao\n self.subscribe()\n\n def update_transformation(self, data):\n scope = {\"input\": data.data, \"output\": None}\n exec(self.layer_model[\"config\"][\"transformation\"][\"key_lambda\"], scope)\n self.transformation = scope[\"output\"]\n\n def update_targetPosition(self, data):\n scope = {\"input\": data.data, \"output\": None}\n exec(self.layer_model[\"config\"][\"targetPosition\"][\"key_lambda\"], scope)\n self.targetPosition = scope[\"output\"]\n\n def update_displacementVector(self, data):\n scope = {\"input\": data.data, \"output\": None}\n exec(self.layer_model[\"config\"]\n [\"displacementVector\"][\"key_lambda\"], scope)\n self.displacementVector = scope[\"output\"]\n\n def subscribe(self):\n self.nao.debug_protocol.subscribe(\n self.layer_model[\"config\"][\"transformation\"][\"key\"],\n self.transformation_identifier,\n lambda i: self.update_transformation(i)\n )\n self.nao.debug_protocol.subscribe(\n self.layer_model[\"config\"][\"targetPosition\"][\"key\"],\n self.identifier,\n lambda i: self.update_targetPosition(i)\n )\n self.nao.debug_protocol.subscribe(\n self.layer_model[\"config\"][\"displacementVector\"][\"key\"],\n self.identifier,\n lambda i: self.update_displacementVector(i)\n )\n\n def paint(self, painter: Painter):\n painter.transformByPose(self.transformation)\n painter.setBrush(qtc.Qt.NoBrush)\n if self.targetPosition is not None:\n painter.setPen(qtg.QColor(\n self.layer_model[\"config\"][\"targetPosition\"][\"targetColor\"]), 0)\n painter.drawTarget([self.targetPosition, 0], self.layer_model[\"config\"]\n [\"targetPosition\"][\"targetCircleDiameter\"])\n if self.displacementVector is not None and self.displacementVector[0] != 0 and self.displacementVector[1] != 0:\n painter.setPen(qtg.QPen(qtg.QColor(self.layer_model[\"config\"][\"displacementVector\"][\"lineColor\"]),\n self.layer_model[\"config\"][\"displacementVector\"][\"lineWidth\"], qtc.Qt.DashDotLine))\n painter.drawLineF(0.0,\n 0.0,\n self.displacementVector[0],\n self.displacementVector[1])\n if self.targetPosition is not None and self.displacementVector is not None:\n painter.setPen(qtc.Qt.yellow, 0)\n painter.drawTarget([[self.targetPosition[0] - self.displacementVector[0],\n self.targetPosition[1] - self.displacementVector[1]], 0], self.layer_model[\"config\"][\"targetPosition\"][\"targetCircleDiameter\"])\n\n def destroy(self):\n if self.nao.is_connected():\n self.nao.debug_protocol.unsubscribe(\n self.layer_model[\"config\"][\"transformation\"][\"key\"],\n self.transformation_identifier)\n self.nao.debug_protocol.unsubscribe(\n self.layer_model[\"config\"][\"targetPosition\"][\"key\"],\n self.identifier)\n self.nao.debug_protocol.unsubscribe(\n self.layer_model[\"config\"][\"displacementVector\"][\"key\"],\n self.identifier)\n","sub_path":"tools/mate/mate/ui/panels/map/layer/motionPlanner/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"378786168","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n (r'^$', 'charitext.views.homepage'),\n (r'^about-us$', 'charitext.views.aboutus'),\n (r'^sign-up$', 'charitext.views.signup'),\n (r'^how-it-works$', 'charitext.views.howitworks'),\n (r'^contact-us$', 'charitext.views.contactus'),\n # url(r'^charitext/', include('charitext.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"charitext/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"229216104","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef showimg(img,figsize=(10,10)):\n plt.figure(figsize=figsize)\n plt.axis('off')\n plt.imshow(img)\n plt.show()\n\ndef read_rgb(impath):\n img = cv2.imread(impath)\n if img is None:\n return None\n return img[:,:,::-1]\n\ndef resize_ar(image,maxdim):\n shape = image.shape\n h = shape[0]\n w = shape[1]\n ar = h/w\n if w > h:\n scalefactor = maxdim/w\n h*=scalefactor\n h= int(h)\n w = maxdim\n else:\n scalefactor = maxdim/h\n w*=scalefactor\n w = int(w)\n h = maxdim\n if h*w > shape[0]*shape[1]:\n interpolation = cv2.INTER_CUBIC\n else:\n interpolation = cv2.INTER_AREA\n return cv2.resize(image,(w,h),interpolation)\n\ndef drawLandmarks(frame, landmarks):\n for x,y in landmarks:\n cv2.circle(frame,(x,y),2,(0,0,255),thickness=-1)\n\ndef drawboxes(frame,boxes,thickness=1):\n img = frame.copy()\n for x,y,xm,ym in boxes:\n cv2.rectangle(img,(x,y),(xm,ym),(0,255,0),thickness)\n return img\n\ndef boundBoxes(boxes,imw,imh,xywh=False):\n if xywh:\n boxes[:,2:] += boxes[:,:2]\n boxes[:,0] = np.maximum(boxes[:,0],0)\n boxes[:,1] = np.maximum(boxes[:,1],0)\n boxes[:,2] = np.minimum(boxes[:,2],imw-1)\n boxes[:,3] = np.minimum(boxes[:,3],imh-1)\n if xywh:\n boxes[:,2:] -= boxes[:,:2]\n return boxes\n\ndef getFaceOnly(image,facemap):\n mask = np.isin(facemap,FACE)\n mask = np.repeat(mask[...,None],3,axis=-1)\n return image * mask\n\ndef getminbbox(pts):\n xs = pts[:,0]\n ys = pts[:,1]\n return xs.min(), ys.min(), xs.max(), ys.max()\n\ndef pointsToNumpy(points):\n n = points.num_parts\n ret = np.zeros((n,2),dtype=np.int32)\n for i in range(n):\n temp = points.part(i)\n ret[i] = [temp.x,temp.y]\n return ret","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"596742651","text":"import ast\n# This is a boolean parser. It handles the following expressions\n\n# True and True X\n# True or True X\n# True and False X\n# True or False X\n# False and False X\n# False or True X\n# False and True X\n# False or False X\n\nuserTyped = input('Enter a boolean experession.').strip()\nwords = userTyped.split(' ')\n\na = words[1]\n\nt1 = ast.literal_eval(words[0].title())\nt2 = ast.literal_eval(words[2].title())\n\nif a ==('or'):\n print('or')\n if t1 or t2:\n print('True')\n else:\n print('False')\nelse:\n print('and')\n if t1 and t2:\n print('True')\n else:\n print('False')\n\n \n#if t1 and t2:\n# print(\"true1\")\n#elif t1 or t2:\n# print(\"true2\")\n \n","sub_path":"boolean.py","file_name":"boolean.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"244346051","text":"# Escriba un programa que lea un numero entero introduciendo pot el usuario.Su programa debe desplegar un mensaje indicando si su numero entero es par o inpar.\r\n\r\nentero = float(int(input('Inserte un numero entero:')))\r\n\r\na = (entero / 2)\r\nb = (entero % 2)\r\n\r\nif b <= 0.0:\r\n print('Es un numero par')\r\nelif b >= 1:\r\n print('Es un numero impar')\r\n","sub_path":"ejercicio34.py","file_name":"ejercicio34.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"181673229","text":"import services.controlers.filesControler\nimport json\nfrom json import JSONEncoder\nfrom json import JSONDecoder\nfrom services.exceptions import *\n\nfrom google.appengine.ext import blobstore\nfrom google.appengine.ext.webapp import blobstore_handlers\n\nclass RegisterFileView:\n\tdef get (self):\n\t\tresponse_data = {}\n\t\terror = ErrorNoAdmitePeticionesGet()\n\t\tresponse_data['status']=error.code\n\t\tresponse_data['message']=error.message\n\t\tresponse_data['data']=''\n\t\tjsonStringResponse = JSONEncoder().encode(response_data)\n\t\treturn jsonStringResponse\n\n\tdef post(self, upload_files, idPerformsAction):\n\t\tresponse_data = {}\n\t\ttry:\n\t\t\tresponse_data['status']=300\n\t\t\tresponse_data['message']=\"ERROR AL CARGAR ARCHIVO\"\n\t\t\tresponse_data['data']=''\n\n\t\t\tblob_info = upload_files[0]\n\n\t\t\tresponse_data['data']=str(blob_info.key())\n\t\t\tresponse_data['status']=OK\n\t\t\tresponse_data['message']=\"EL ARCHIVO SE HA CARGADO EXITOSAMENTE\"\n\n\t\t\tuserId =int(idPerformsAction)\n\t\t\tif response_data['status'] == OK:\n\t\t\t\taction = \"Carga de archivo EXITOSA\"\n\t\t\telse:\n\t\t\t\taction = \"Carga de archivo FALLIDA\"\n\t\t\tregisterLogTransactionView = services.views.registerLogTransactionView.RegisterLogTransactionView()\n\t\t\tregisterLogTransactionView.post(action, userId)\n\t\texcept Exception as e:\n\t\t\tresponse_data['status']=ERROR_NO_DEFINIDO\n\t\t\tresponse_data['message']=e.message\n\t\t\tresponse_data['data']=''\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\tjsonStringResponse = JSONEncoder().encode(response_data)\n\t\treturn jsonStringResponse","sub_path":"services/views/registerFileView.py","file_name":"registerFileView.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"576774623","text":"from Trader import *\r\n\r\n#Custom settings\r\nLOOP_DURATION = 15 * 60 # Time period (in seconds)\r\nMAX_LOOP_TIME = 10 * 24 * 60 * 60 # Max duration to run (in seconds)\r\nQUOTE_CURRENCY = \"LTC\" # Cryptocurrency of choice\r\nBASE_CURRENCY = \"USD\" # Fiat currency of choice\r\nCSV_PRICE = \"LTC_price.csv\" # Price CSV name\r\nCSV_TRANSACTIONS = \"LTC_transactions.csv\" # Transaction CSV name\r\nMODE = 2 # Mode of the Bot\r\n\r\n#Start thread\r\nstopFlag = Event()\r\nthread = Trader(stopFlag, LOOP_DURATION, QUOTE_CURRENCY, BASE_CURRENCY, CSV_PRICE, CSV_TRANSACTIONS, MODE)\r\nthread.daemon = True\r\nthread.start()\r\n\r\n#Set max time to run\r\ntime.sleep(MAX_LOOP_TIME)\r\nstopFlag.set()","sub_path":"LTC.py","file_name":"LTC.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"99350689","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved.\n#\n# Written by Claudio Fahey \n#\n\n\"\"\"\nThis uses TensorFlow to read a set of TFRecord files and show the throughput.\nIt can read files synchronously (all processes stay at the same step) or\nasynchronously (all processes are independent).\n\"\"\"\n\nimport argparse\nimport time\nimport socket\nimport horovod.tensorflow as hvd\nimport tensorflow as tf\nimport numpy as np\nimport datetime\nimport glob\nimport json\nfrom tensorflow.contrib.data.python.ops import batching\nfrom tensorflow.contrib.data.python.ops import interleave_ops\nfrom tensorflow.contrib.data.python.ops import threadpool\nfrom token_bucket import TokenBucket\n\n\ndef process_record(example_serialized):\n # example_serialized = tf.Print(example_serialized, [example_serialized], 'example_serialized: ', first_n=10, summarize=10)\n # print('example_serialized=%s' % str(example_serialized))\n example_bytes = tf.io.decode_raw(example_serialized, tf.uint8, name='example_bytes')\n # example_bytes = tf.Print(example_bytes, [example_bytes], 'example_bytes: ', first_n=10, summarize=10)\n num_bytes = tf.size(example_bytes, out_type=tf.int64)\n # print('num_bytes=%s' % str(num_bytes))\n return num_bytes\n\n\ndef create_iterator(\n batch_size, num_threads, parallel_interleave_cycle_length=0, input_file_spec=None, input_filenames=None,\n dataset_buffer_size=None, prefetch_records=None):\n if input_filenames:\n ds = tf.data.Dataset.from_tensor_slices(tf.convert_to_tensor(input_filenames))\n elif input_file_spec:\n ds = tf.data.TFRecordDataset.list_files(input_file_spec)\n else:\n raise ValueError('You must specify input_file_spec or input_filenames')\n\n if parallel_interleave_cycle_length:\n ds = ds.apply(\n interleave_ops.parallel_interleave(\n lambda f: tf.data.TFRecordDataset(f, buffer_size=dataset_buffer_size),\n cycle_length=parallel_interleave_cycle_length))\n else:\n ds = ds.apply(tf.data.TFRecordDataset)\n\n ds = ds.prefetch(buffer_size=prefetch_records)\n ds = ds.repeat()\n num_splits = 1\n ds = ds.apply(\n batching.map_and_batch(\n map_func=process_record,\n batch_size=batch_size,\n num_parallel_batches=num_splits))\n ds = ds.prefetch(buffer_size=num_splits)\n\n if num_threads:\n ds = threadpool.override_threadpool(\n ds,\n threadpool.PrivateThreadPool(\n num_threads, display_name='input_pipeline_thread_pool'))\n ds_iterator = ds.make_initializable_iterator()\n tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,\n ds_iterator.initializer)\n else:\n ds_iterator = ds.make_one_shot_iterator()\n return ds_iterator\n\n\ndef worker(\n rank, size, input_file_specs, batch_size=256, warmup_sec=10.0, run_sec=60*60*4, num_threads=0, sync=False,\n warn_latency_sec=4.0, report_period_sec=2.0, round_robin_files=True,\n throttle_sleep_sec=0.01, throttle_total_rate_bytes_per_sec=0):\n\n if rank == 0:\n print('storage_benchmark_tensorflow: BEGIN')\n print(datetime.datetime.utcnow())\n\n metrics_file_name = '/imagenet-scratch/logs/storage_benchmark_tensorflow_metrics-%d.log' % rank\n with open(metrics_file_name, 'a') as metrics_file:\n\n hostname = socket.gethostname()\n\n # Set random seed to have deterministic behavior.\n tf.set_random_seed(rank + 1)\n\n # Round robin the input file spec. This allows multiple mount points to be used.\n input_file_spec = input_file_specs[hvd.local_rank() % len(input_file_specs)]\n print('rank=%3d: %s: input_file_spec=%s' % (rank, hostname, input_file_spec))\n\n if round_robin_files:\n # Distribute sets of file names evenly over all processes and without overlap.\n all_input_filenames = sorted(glob.glob(input_file_spec))\n num_files = len(all_input_filenames)\n i = rank\n input_filenames = []\n while i < num_files:\n input_filenames.append(all_input_filenames[i])\n i += size\n print('rank=%3d: Found %d total files. %d files assigned to this process.' % (rank, len(all_input_filenames), len(input_filenames)))\n if len(input_filenames) == 0:\n raise ValueError('Not enough matching files.')\n input_file_spec = None\n else:\n # This will use tf.data.TFRecordDataset.list_files to randomly distribute files.\n input_filenames = None\n\n #\n # Build execution graph.\n #\n\n ds_iterator = create_iterator(batch_size, num_threads, input_file_spec=input_file_spec, input_filenames=input_filenames)\n\n # num_bytes_tensor is an int64 tensor of shape (batch_size).\n num_bytes_tensor = ds_iterator.get_next()\n\n # When num_bytes_for_step_tensor is evaluated, it reads the TFRecord files.\n num_bytes_for_step_tensor = tf.reduce_sum(num_bytes_tensor)\n\n # The following operations are used to synchronize the processes when running in sync mode.\n if sync:\n stop_flag_placeholder = tf.placeholder(tf.bool, shape=())\n stop_flag_broadcast_tensor = hvd.broadcast(stop_flag_placeholder, 0, 'stop_flag_broadcast')\n num_bytes_for_step_placeholder = tf.placeholder(tf.int64, shape=())\n total_bytes_for_step_tensor = hvd.allreduce(num_bytes_for_step_placeholder, average=False)\n\n #\n # Start the TensorFlow session and execute the graph.\n #\n\n config = tf.ConfigProto()\n config.device_count['GPU'] = 0\n config.intra_op_parallelism_threads = 1\n config.inter_op_parallelism_threads = 1\n print('rank=%3d: Creating session' % rank)\n with tf.Session(config=config) as session:\n print('rank=%3d: Session created' % rank)\n session.run([tf.initializers.global_variables(), tf.tables_initializer()])\n print('rank=%3d: Initialized variables' % rank)\n\n # Run first step. This can take 30 seconds for 100,000 files.\n print('rank=%3d: Running first step' % rank)\n _ = session.run(num_bytes_for_step_tensor)\n print('rank=%3d: First step complete' % rank)\n\n # Wait for barrier so we know when all processes have finished the first step.\n print('rank=%3d: Waiting for barrier' % rank)\n session.run(hvd.allreduce(tf.constant(0)))\n if rank == 0:\n print('rank=%3d: Completed waiting for barrier' % rank)\n\n # To ensure that all processes finish warmup and stop at exactly the same time,\n # the rank 0 node broadcasts its time to all other ranks.\n # This also serves as a synchronization barrier.\n local_t0 = time.time()\n t0_tensor = tf.constant(local_t0, tf.float64)\n t0_tensor = hvd.broadcast(t0_tensor, 0, 't0')\n t0 = session.run(t0_tensor)\n\n start_time = t0 + warmup_sec\n stop_time = start_time + run_sec\n step = 0\n warmed_up = False\n num_records = 0\n num_bytes = 0\n total_bytes = 0\n next_report_time = time.time() + report_period_sec\n\n if throttle_total_rate_bytes_per_sec:\n throttle_rate_bytes_per_sec = throttle_total_rate_bytes_per_sec / size\n burst_sec = 1.0\n throttle = TokenBucket(tokens=throttle_rate_bytes_per_sec*burst_sec, fill_rate=throttle_rate_bytes_per_sec)\n else:\n throttle = None\n\n while True:\n # Reset all counters when warmup completes.\n t = time.time()\n if not warmed_up and t >= start_time:\n print('rank=%3d: warmup complete at step %d' % (rank, step))\n warmed_up = True\n t0 = start_time\n step = 0\n num_records = 0\n num_bytes = 0\n total_bytes = 0\n\n # Run a single step of batch_size records per process.\n run_options = tf.RunOptions()\n # run_options.timeout_in_ms = 10000\n num_bytes_for_step = np.int64(0)\n try:\n num_bytes_for_step = session.run(num_bytes_for_step_tensor, options=run_options)\n except Exception as e:\n print('rank=%3d: %s: ERROR: %s' % (rank, hostname, e))\n\n step_dt = time.time() - t\n\n if (warmed_up or step >= 1) and step_dt > warn_latency_sec:\n print('rank=%3d: %s: WARNING: step %d took %0.3f seconds' %\n (rank, hostname, step, step_dt))\n next_report_time = 0.0\n\n # Calculate local stop flag. In sync mode, this is broadcast from rank 0.\n stop_flag = time.time() >= stop_time\n\n # Use Horovod to aggregate the byte counter across all processes.\n # This also acts as a synchronization barrier, much like gradient descent when\n # it shares gradients.\n # Also coordinate the stop flag so all processes stop at the same step.\n sync_dt = 0.0\n if sync:\n t = time.time()\n total_bytes_for_step, stop_flag = session.run(\n [total_bytes_for_step_tensor, stop_flag_broadcast_tensor],\n feed_dict={\n num_bytes_for_step_placeholder: num_bytes_for_step,\n stop_flag_placeholder: stop_flag,\n },\n )\n\n total_bytes += total_bytes_for_step\n\n sync_dt = time.time() - t\n if warmed_up and sync_dt > 30.0:\n print('rank=%3d: %s: WARNING: sync after step %d took %0.3f seconds' %\n (rank, hostname, step, sync_dt))\n next_report_time = 0.0\n\n num_records += batch_size\n num_bytes += num_bytes_for_step\n t = time.time()\n\n metrics = {\n '@timestamp': datetime.datetime.utcnow().isoformat() + 'Z',\n 'batch_size': batch_size,\n 'rank': rank,\n 'hostname': hostname,\n 'step': step,\n 'num_bytes': int(num_bytes_for_step),\n 'latency_sec': step_dt,\n 'sync_latency_sec': sync_dt,\n }\n json.dump(metrics, metrics_file)\n metrics_file.write(\"\\n\")\n metrics_file.flush()\n\n if t >= next_report_time:\n dt = t - t0\n if not sync:\n records_per_sec = num_records / dt\n bytes_per_sec = num_bytes / dt\n MB_per_sec = bytes_per_sec / 1e6\n print('rank=%3d: warmed_up=%d, step=%6d, records/sec=%8.0f, MB/sec=%11.3f, records=%10d, bytes=%15d, dt=%9.3f' %\n (rank, warmed_up, step, records_per_sec, MB_per_sec, num_records, num_bytes, dt))\n if sync:\n if rank == 0:\n total_records = num_records * size\n records_per_sec = total_records / dt\n bytes_per_sec = total_bytes / dt\n MB_per_sec = bytes_per_sec / 1e6\n print('TOTAL: warmed up=%d, step=%6d, records/sec=%8.0f, MB/sec=%11.3f, records=%10d, bytes=%15d, dt=%9.3f' %\n (warmed_up, step, records_per_sec, MB_per_sec, total_records, total_bytes, dt))\n next_report_time = t + report_period_sec\n\n # Throttle byte rate.\n if throttle:\n while not throttle.consume(num_bytes_for_step):\n # print('sleeping')\n time.sleep(throttle_sleep_sec)\n\n if stop_flag:\n print('rank=%3d: %s: complete at step %d' % (rank, hostname, step))\n break\n\n step += 1\n\n # Use Horovod to aggregate the final counters across all processes.\n num_steps_tensor = tf.constant(step)\n num_bytes_tensor = tf.constant(num_bytes)\n total_steps_tensor = hvd.allreduce(num_steps_tensor, average=False)\n total_bytes_tensor = hvd.allreduce(num_bytes_tensor, average=False)\n total_steps, total_bytes = session.run([total_steps_tensor, total_bytes_tensor])\n if rank == 0:\n dt = stop_time - start_time\n num_records = total_steps * batch_size\n records_per_sec = num_records / dt\n total_GB = total_bytes / 1e9\n bytes_per_sec = total_bytes / dt\n MB_per_sec = bytes_per_sec / 1e6\n print('FINAL: number of processes: %12d' % size)\n print('FINAL: batch size: %12d' % batch_size)\n print('FINAL: sync: %12s' % sync)\n print('FINAL: round robin files: %12s' % round_robin_files)\n print('FINAL: number of records: %12d' % num_records)\n print('FINAL: GB: %12.3f' % total_GB)\n print('FINAL: elapsed sec: %12.3f' % dt)\n print('FINAL: records/sec: %12.0f' % records_per_sec)\n print('FINAL: MB/sec: %12.3f' % MB_per_sec)\n\n if rank == 0:\n print('storage_benchmark_tensorflow: END')\n\n\ndef main():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-i', '--input_file_specs', action='append', help='Input file spec', required=True)\n args = parser.parse_args()\n hvd.init()\n worker(hvd.rank(), hvd.size(), args.input_file_specs)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"storage_benchmark_tensorflow.py","file_name":"storage_benchmark_tensorflow.py","file_ext":"py","file_size_in_byte":14256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"305776509","text":"from drf_spectacular.utils import extend_schema, OpenApiExample, OpenApiResponse\nfrom rest_framework import status\nfrom rest_framework.generics import ListCreateAPIView, RetrieveAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom api.models import TestRunRequest\nfrom api.serializers import TestRunRequestSerializer, TestRunRequestItemSerializer\nfrom api.tasks import execute_test_run_request\nfrom api.usecases import get_assets\n\n\n@extend_schema(description='Create a test run job to queue for Celery', methods=[\"POST\"])\nclass TestRunRequestAPIView(ListCreateAPIView):\n \"\"\"\n Returns a list of all test runs and their corresponding status information\n \"\"\"\n serializer_class = TestRunRequestSerializer\n queryset = TestRunRequest.objects.all().order_by('-created_at')\n\n def perform_create(self, serializer):\n instance = serializer.save()\n execute_test_run_request.delay(instance.id)\n\n\nclass TestRunRequestItemAPIView(RetrieveAPIView):\n \"\"\"\n Returns a test run object including all logs for the run\n \"\"\"\n serializer_class = TestRunRequestItemSerializer\n queryset = TestRunRequest.objects.all()\n lookup_field = 'pk'\n\n\nclass AssetsAPIView(APIView):\n \"\"\"\n Returns an object with 2 lists, one for all the available test paths and one for all the available environments\n \"\"\"\n example = {\n \"available_paths\": [\n {\n \"id\": 5,\n \"path\": \"api/tests/test_models.py\"\n },\n {\n \"id\": 6,\n \"path\": \"api/tests/test_tasks.py\"\n },\n ],\n \"test_envs\": [\n {\n \"id\": 1,\n \"name\": \"env1\"\n },\n {\n \"id\": 10,\n \"name\": \"env10\"\n },\n {\n \"id\": 100,\n \"name\": \"env100\"\n },\n ]\n }\n\n @extend_schema(\n responses={200: OpenApiResponse(\n response=200,\n examples=[OpenApiExample(\n name='Simple Example',\n value=example\n )]\n )}\n )\n def get(self, request):\n return Response(status=status.HTTP_200_OK, data=get_assets())\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"480290762","text":"import DeepFried2 as df\n\nimport theano.tensor as _T\n\n\nclass SpatialMaxPooling3D(df.Module):\n def __init__(self, k_w, k_h, k_d, d_w=None, d_h=None, d_d=None, ignore_border=False):\n df.Module.__init__(self)\n self.k_w = k_w\n self.k_h = k_h\n self.k_d = k_d\n self.ignore_border = ignore_border\n\n if d_w is None:\n self.d_w = self.k_w\n else:\n self.d_w = d_w\n\n if d_h is None:\n self.d_h = self.k_h\n else:\n self.d_h = d_h\n\n if d_d is None:\n self.d_d = self.k_d\n else:\n self.d_d = d_d\n\n def symb_forward(self, symb_input):\n \"\"\" 3d max pooling taken from github.com/lpigou/Theano-3D-ConvNet/\n (with modified shuffeling) \"\"\"\n if symb_input.ndim < 5:\n raise NotImplementedError('max pooling 3D requires a dimension >= 5')\n\n height_width_shape = symb_input.shape[-2:]\n\n batch_size = _T.prod(symb_input.shape[:-2])\n batch_size = _T.shape_padright(batch_size, 1)\n\n new_shape = _T.cast(_T.join(0, batch_size, _T.as_tensor([1,]), height_width_shape), 'int32')\n\n input_4d = _T.reshape(symb_input, new_shape, ndim=4)\n\n # downsample height and width first\n # other dimensions contribute to batch_size\n op = _T.signal.downsample.DownsampleFactorMax((self.k_h, self.k_w), self.ignore_border, st=(self.d_h, self.d_w))\n output = op(input_4d)\n\n outshape = _T.join(0, symb_input.shape[:-2], output.shape[-2:])\n out = _T.reshape(output, outshape, ndim=symb_input.ndim)\n\n vol_dim = symb_input.ndim\n\n shufl = (list(range(vol_dim-4)) + [vol_dim-2]+[vol_dim-1]+[vol_dim-3]+[vol_dim-4])\n input_depth = out.dimshuffle(shufl)\n vol_shape = input_depth.shape[-2:]\n\n batch_size = _T.prod(input_depth.shape[:-2])\n batch_size = _T.shape_padright(batch_size,1)\n\n new_shape = _T.cast(_T.join(0, batch_size, _T.as_tensor([1,]), vol_shape), 'int32')\n input_4D_depth = _T.reshape(input_depth, new_shape, ndim=4)\n\n # downsample depth\n # other dimensions contribute to batch_size\n op = _T.signal.downsample.DownsampleFactorMax((1,self.k_d), self.ignore_border, st=(1,self.d_d))\n outdepth = op(input_4D_depth)\n\n outshape = _T.join(0, input_depth.shape[:-2], outdepth.shape[-2:])\n shufl = (list(range(vol_dim-4)) + [vol_dim-1]+[vol_dim-2]+[vol_dim-4]+[vol_dim-3])\n\n return _T.reshape(outdepth, outshape, ndim=symb_input.ndim).dimshuffle(shufl)\n","sub_path":"DeepFried2/layers/SpatialMaxPooling3D.py","file_name":"SpatialMaxPooling3D.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"3227727","text":"import numpy as np\nimport scipy.integrate\nimport matplotlib.pyplot as plt\nimport IPython\n\n# L = 1.0\n# L2 = L * 0.5\n#\n#\n# def dist(y, x):\n# return np.sqrt(x**2 + y**2)\n#\n#\n# # numerical answer\n# v1, _ = scipy.integrate.dblquad(dist, -L2, L2, -L2, L2)\n#\n# # (approximate) analytic answer\n# v2 = L**3 * 1.14779 / 3\n#\n# print(f'Numerical answer = {v1}\\n Analytic answer = {v2}')\n\nclass Circle:\n def __init__(self, c, r):\n self.c = np.array(c)\n self.r = r\n\n\n\n# object\n# ko = 100 # spring constant\n#\n# fn = 10 # normal force\n# mu = 1 # coefficient of friction\n# wo = mu * fn\n\n\ndef main():\n N = 1000\n t = np.linspace(0, 3, N)\n dt = t[1] - t[0]\n\n f = np.zeros(N) # measured force\n\n # object is a circle\n r_o = 0.5\n p_o = np.zeros((N, 2))\n th_o = np.zeros(N)\n\n k_o = 1000 # spring constant\n fn = 100\n mu = 1\n w_o = fn * mu\n tau_o = 2 * r_o * w_o / 3\n\n # EE position and velocity\n p_ee = np.zeros((N, 2))\n p_ee[0, :] = np.array([-0.1, -0.5])\n v_ee = 1 * np.array([0, 1])\n\n for i in range(N-1):\n d_ee_o = np.linalg.norm(p_ee[i, :] - p_o[i, :])\n\n if d_ee_o < r_o:\n depth = r_o - d_ee_o\n # normal force is due to elasticity\n f_elastic_n = k_o * depth\n\n # tangential force is from friction\n f_elastic_t = mu * f_elastic_n\n\n # tangential force results in a moment\n tau_elastic = f_elastic_t * d_ee_o\n\n if f_elastic_n > w_o:\n direction = (p_o[i, :] - p_ee[i, :]) / d_ee_o\n p_o[i, :] = p_o[i, :] + (depth - w_o / k_o) * direction\n\n if tau_elastic > tau_o:\n # need to rotate in response to tangential force, when it\n # overcomes friction\n pass\n\n # recalculate now that object has moved\n d_ee_o = np.linalg.norm(p_ee[i, :] - p_o[i, :])\n f[i] = k_o * (r_o - d_ee_o)\n else:\n f[i] = 0\n\n p_o[i+1, :] = p_o[i, :]\n p_ee[i+1, :] = p_ee[i, :] + v_ee * dt\n\n plt.figure(1)\n plt.plot(t, f)\n plt.grid()\n\n plt.figure(2)\n plt.plot(p_ee[:, 0], p_ee[:, 1], label='EE')\n plt.plot(p_o[:, 0], p_o[:, 1], label='Object')\n plt.legend()\n plt.grid()\n plt.show()\n\n IPython.embed()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/planar_force.py","file_name":"planar_force.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"28022146","text":"import click\nimport os\nimport json\nimport re\nimport psutil\n\nfrom bentoml.utils.lazy_loader import LazyLoader\nfrom bentoml.utils.s3 import is_s3_url\nfrom bentoml.server.api_server import BentoAPIServer\nfrom bentoml.exceptions import BentoMLException, CLIException\nfrom bentoml.server import start_dev_server, start_prod_server\nfrom bentoml.server.open_api import get_open_api_spec_json\nfrom bentoml.utils import (\n ProtoMessageToDict,\n status_pb_to_error_code_and_message,\n)\nfrom bentoml.cli.click_utils import (\n CLI_COLOR_WARNING,\n CLI_COLOR_SUCCESS,\n _echo,\n BentoMLCommandGroup,\n conditional_argument,\n)\nfrom bentoml.cli.utils import (\n echo_docker_api_result,\n Spinner,\n get_default_yatai_client,\n)\nfrom bentoml.saved_bundle import (\n load,\n load_bento_service_api,\n load_bento_service_metadata,\n)\n\ntry:\n import click_completion\n\n click_completion.init()\n shell_types = click_completion.DocumentedChoice(click_completion.core.shells)\nexcept ImportError:\n # click_completion package is optional to use BentoML cli,\n click_completion = None\n shell_types = click.Choice(['bash', 'zsh', 'fish', 'powershell'])\n\n\nyatai_proto = LazyLoader('yatai_proto', globals(), 'bentoml.yatai.proto')\n\n\ndef escape_shell_params(param):\n k, v = param.split('=')\n v = re.sub(r'([^a-zA-Z0-9])', r'\\\\\\1', v)\n return '{}={}'.format(k, v)\n\n\ndef to_valid_docker_image_name(name):\n # https://docs.docker.com/engine/reference/commandline/tag/#extended-description\n return name.lower().strip(\"._-\")\n\n\ndef to_valid_docker_image_version(version):\n # https://docs.docker.com/engine/reference/commandline/tag/#extended-description\n return version.encode(\"ascii\", errors=\"ignore\").decode().lstrip(\".-\")[:128]\n\n\ndef validate_tag(ctx, param, tag): # pylint: disable=unused-argument\n if \":\" in tag:\n name, version = tag.split(\":\")[:2]\n else:\n name, version = tag, None\n\n valid_name_pattern = re.compile(\n r\"\"\"\n ^(\n [a-z0-9]+ # alphanumeric\n (.|_{1,2}|-+)? # seperators\n )*$\n \"\"\",\n re.VERBOSE,\n )\n valid_version_pattern = re.compile(\n r\"\"\"\n ^\n [a-zA-Z0-9] # cant start with .-\n [ -~]{,127} # ascii match rest, cap at 128\n $\n \"\"\",\n re.VERBOSE,\n )\n\n if not valid_name_pattern.match(name):\n raise click.BadParameter(\n f\"Provided Docker Image tag {tag} is invalid. \"\n \"Name components may contain lowercase letters, digits \"\n \"and separators. A separator is defined as a period, \"\n \"one or two underscores, or one or more dashes.\",\n ctx=ctx,\n param=param,\n )\n if version and not valid_version_pattern.match(version):\n raise click.BadParameter(\n f\"Provided Docker Image tag {tag} is invalid. \"\n \"A tag name must be valid ASCII and may contain \"\n \"lowercase and uppercase letters, digits, underscores, \"\n \"periods and dashes. A tag name may not start with a period \"\n \"or a dash and may contain a maximum of 128 characters.\",\n ctx=ctx,\n param=param,\n )\n return tag\n\n\ndef resolve_bundle_path(bento, pip_installed_bundle_path):\n if pip_installed_bundle_path:\n assert (\n bento is None\n ), \"pip installed BentoService commands should not have Bento argument\"\n return pip_installed_bundle_path\n\n if os.path.isdir(bento) or is_s3_url(bento):\n # saved_bundle already support loading local and s3 path\n return bento\n\n elif \":\" in bento:\n # assuming passing in BentoService in the form of Name:Version tag\n yatai_client = get_default_yatai_client()\n name, version = bento.split(':')\n get_bento_result = yatai_client.repository.get(name, version)\n if get_bento_result.status.status_code != yatai_proto.status_pb2.Status.OK:\n error_code, error_message = status_pb_to_error_code_and_message(\n get_bento_result.status\n )\n raise BentoMLException(\n f'BentoService {name}:{version} not found - '\n f'{error_code}:{error_message}'\n )\n if get_bento_result.bento.uri.s3_presigned_url:\n # Use s3 presigned URL for downloading the repository if it is presented\n return get_bento_result.bento.uri.s3_presigned_url\n else:\n return get_bento_result.bento.uri.uri\n else:\n raise BentoMLException(\n f'BentoService \"{bento}\" not found - either specify the file path of '\n f'the BentoService saved bundle, or the BentoService id in the form of '\n f'\"name:version\"'\n )\n\n\ndef create_bento_service_cli(pip_installed_bundle_path=None):\n # pylint: disable=unused-variable\n\n @click.group(cls=BentoMLCommandGroup)\n @click.version_option()\n def bentoml_cli():\n \"\"\"\n BentoML CLI tool\n \"\"\"\n\n # Example Usage: bentoml run {API_NAME} {BUNDLE_PATH} --input=...\n @bentoml_cli.command(\n help=\"Run a API defined in saved BentoService bundle from command line\",\n short_help=\"Run API function\",\n context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),\n )\n @conditional_argument(pip_installed_bundle_path is None, \"bento\", type=click.STRING)\n @click.argument(\"api_name\", type=click.STRING)\n @click.argument('run_args', nargs=-1, type=click.UNPROCESSED)\n def run(api_name, run_args, bento=None):\n saved_bundle_path = resolve_bundle_path(bento, pip_installed_bundle_path)\n\n api = load_bento_service_api(saved_bundle_path, api_name)\n api.handle_cli(run_args)\n\n # Example Usage: bentoml info {BUNDLE_PATH}\n @bentoml_cli.command(\n help=\"List all APIs defined in the BentoService loaded from saved bundle\",\n short_help=\"List APIs\",\n )\n @conditional_argument(pip_installed_bundle_path is None, \"bento\", type=click.STRING)\n def info(bento=None):\n \"\"\"\n List all APIs defined in the BentoService loaded from saved bundle\n \"\"\"\n saved_bundle_path = resolve_bundle_path(bento, pip_installed_bundle_path)\n\n bento_service_metadata_pb = load_bento_service_metadata(saved_bundle_path)\n output = json.dumps(ProtoMessageToDict(bento_service_metadata_pb), indent=2)\n _echo(output)\n\n # Example usage: bentoml open-api-spec {BUNDLE_PATH}\n @bentoml_cli.command(\n name=\"open-api-spec\",\n help=\"Display API specification JSON in Open-API format\",\n short_help=\"Display OpenAPI/Swagger JSON specs\",\n )\n @conditional_argument(pip_installed_bundle_path is None, \"bento\", type=click.STRING)\n def open_api_spec(bento=None):\n saved_bundle_path = resolve_bundle_path(bento, pip_installed_bundle_path)\n\n bento_service = load(saved_bundle_path)\n\n _echo(json.dumps(get_open_api_spec_json(bento_service), indent=2))\n\n # Example Usage: bentoml serve {BUNDLE_PATH} --port={PORT}\n @bentoml_cli.command(\n help=\"Start a dev API server serving specified BentoService\",\n short_help=\"Start local dev API server\",\n )\n @conditional_argument(pip_installed_bundle_path is None, \"bento\", type=click.STRING)\n @click.option(\n \"--port\",\n type=click.INT,\n default=BentoAPIServer._DEFAULT_PORT,\n help=f\"The port to listen on for the REST api server, \"\n f\"default is ${BentoAPIServer._DEFAULT_PORT}\",\n envvar='BENTOML_PORT',\n )\n @click.option(\n '--enable-microbatch/--disable-microbatch',\n default=False,\n help=\"Run API server with micro-batch enabled\",\n envvar='BENTOML_ENABLE_MICROBATCH',\n )\n @click.option(\n '--run-with-ngrok',\n is_flag=True,\n default=False,\n help=\"Use ngrok to relay traffic on a public endpoint to this\"\n \"API server on localhost\",\n envvar='BENTOML_ENABLE_NGROK',\n )\n def serve(port, bento=None, enable_microbatch=False, run_with_ngrok=False):\n saved_bundle_path = resolve_bundle_path(bento, pip_installed_bundle_path)\n start_dev_server(saved_bundle_path, port, enable_microbatch, run_with_ngrok)\n\n # Example Usage:\n # bentoml serve-gunicorn {BUNDLE_PATH} --port={PORT} --workers={WORKERS}\n @bentoml_cli.command(\n help=\"Start a production API server serving specified BentoService\",\n short_help=\"Start production API server\",\n )\n @conditional_argument(pip_installed_bundle_path is None, \"bento\", type=click.STRING)\n @click.option(\n \"-p\",\n \"--port\",\n type=click.INT,\n default=BentoAPIServer._DEFAULT_PORT,\n help=f\"The port to listen on for the REST api server, \"\n f\"default is ${BentoAPIServer._DEFAULT_PORT}\",\n envvar='BENTOML_PORT',\n )\n @click.option(\n \"-w\",\n \"--workers\",\n type=click.INT,\n default=None,\n help=\"Number of workers will start for the gunicorn server\",\n envvar='BENTOML_GUNICORN_WORKERS',\n )\n @click.option(\"--timeout\", type=click.INT, default=None)\n @click.option(\n '--enable-microbatch/--disable-microbatch',\n default=False,\n help=\"Run API server with micro batch enabled\",\n envvar='BENTOML_ENABLE_MICROBATCH',\n )\n @click.option(\n '--microbatch-workers',\n type=click.INT,\n default=1,\n help=\"Number of micro-batch request dispatcher workers\",\n envvar='BENTOML_MICROBATCH_WORKERS',\n )\n def serve_gunicorn(\n port,\n workers,\n timeout,\n bento=None,\n enable_microbatch=False,\n microbatch_workers=1,\n ):\n if not psutil.POSIX:\n _echo(\n \"The `bentoml server-gunicon` command is only supported on POSIX. \"\n \"On windows platform, use `bentoml serve` for local API testing and \"\n \"docker for running production API endpoint: \"\n \"https://docs.docker.com/docker-for-windows/ \"\n )\n return\n saved_bundle_path = resolve_bundle_path(bento, pip_installed_bundle_path)\n start_prod_server(\n saved_bundle_path,\n port,\n timeout,\n workers,\n enable_microbatch,\n microbatch_workers,\n )\n\n @bentoml_cli.command(\n help=\"Install shell command completion\",\n short_help=\"Install shell command completion\",\n )\n @click.option(\n '--append/--overwrite',\n help=\"Append the completion code to the file\",\n default=None,\n )\n @click.argument('shell', required=False, type=shell_types)\n @click.argument('path', required=False)\n def install_completion(append, shell, path):\n if click_completion:\n # click_completion package is imported\n shell, path = click_completion.core.install(\n shell=shell, path=path, append=append\n )\n click.echo('%s completion installed in %s' % (shell, path))\n else:\n click.echo(\n \"'click_completion' is required for BentoML auto-completion, \"\n \"install it with `pip install click_completion`\"\n )\n\n @bentoml_cli.command(\n help='Containerizes given Bento into a ready-to-use Docker image.',\n short_help=\"Containerizes given Bento into a ready-to-use Docker image\",\n )\n @click.argument(\"bento\", type=click.STRING)\n @click.option('-p', '--push', is_flag=True)\n @click.option(\n '-t',\n '--tag',\n help=\"Optional image tag. If not specified, Bento will generate one from \"\n \"the name of the Bento.\",\n required=False,\n callback=validate_tag,\n )\n @click.option(\n '-u', '--username', type=click.STRING, required=False,\n )\n @click.option(\n '-p', '--password', type=click.STRING, required=False,\n )\n def containerize(bento, push, tag, username, password):\n \"\"\"Containerize specified BentoService.\n\n BENTO is the target BentoService to be containerized, referenced by its name\n and version in format of name:version. For example: \"iris_classifier:v1.2.0\"\n\n `bentoml containerize` command also supports the use of the `latest` tag\n which will automatically use the last built version of your Bento.\n\n You can provide a tag for the image built by Bento using the\n `--docker-image-tag` flag. Additionally, you can provide a `--push` flag,\n which will push the built image to the Docker repository specified by the\n image tag.\n\n You can also prefixing the tag with a hostname for the repository you wish\n to push to.\n e.g. `bentoml containerize IrisClassifier:latest --push --tag username/iris`\n would build a Docker image called `username/iris:latest` and push that to\n Docker Hub.\n\n By default, the `containerize` command will use the credentials provided by\n Docker. You may provide your own through `--username` and `--password`.\n \"\"\"\n saved_bundle_path = resolve_bundle_path(bento, pip_installed_bundle_path)\n\n _echo(f\"Found Bento: {saved_bundle_path}\")\n\n bento_metadata = load_bento_service_metadata(saved_bundle_path)\n name = to_valid_docker_image_name(bento_metadata.name)\n version = to_valid_docker_image_version(bento_metadata.version)\n\n if not tag:\n _echo(\n \"Tag not specified, using tag parsed from \"\n f\"BentoService: '{name}:{version}'\"\n )\n tag = f\"{name}:{version}\"\n if \":\" not in tag:\n _echo(\n \"Image version not specified, using version parsed \"\n f\"from BentoService: '{version}'\",\n CLI_COLOR_WARNING,\n )\n tag = f\"{tag}:{version}\"\n\n import docker\n\n docker_api = docker.APIClient()\n try:\n with Spinner(f\"Building Docker image {tag} from {bento} \\n\"):\n for line in echo_docker_api_result(\n docker_api.build(path=saved_bundle_path, tag=tag, decode=True,)\n ):\n _echo(line)\n except docker.errors.APIError as error:\n raise CLIException(f'Could not build Docker image: {error}')\n\n _echo(\n f'Finished building {tag} from {bento}', CLI_COLOR_SUCCESS,\n )\n\n if push:\n auth_config_payload = (\n {\"username\": username, \"password\": password}\n if username or password\n else None\n )\n\n try:\n with Spinner(f\"Pushing docker image to {tag}\\n\"):\n for line in echo_docker_api_result(\n docker_api.push(\n repository=tag,\n stream=True,\n decode=True,\n auth_config=auth_config_payload,\n )\n ):\n _echo(line)\n _echo(\n f'Pushed {tag} to {name}', CLI_COLOR_SUCCESS,\n )\n except (docker.errors.APIError, BentoMLException) as error:\n raise CLIException(f'Could not push Docker image: {error}')\n\n # pylint: enable=unused-variable\n return bentoml_cli\n","sub_path":"bentoml/cli/bento_service.py","file_name":"bento_service.py","file_ext":"py","file_size_in_byte":15455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"125177875","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#\n# alien_invasion.py -\n#\n# Author: Bao Hexing \n# Created: 29 March 2018\n#\n# Copyright © 2018, Bao Hexing. All Rights Reserved.\n\nimport sys\nimport pygame_sdl2 as pygame\n\nfrom settings import Settings\nfrom ship import Ship\n\ndef run_game():\n pygame.init()\n config = Settings()\n screen = pygame.display.set_mode((config.screen_width, config.screen_height))\n pygame.display.set_caption(\"Alien Invasion\")\n\n ship = Ship(screen)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n ship.blitme()\n screen.fill(config.bgcolor)\n pygame.display.flip()\n\nif __name__ == \"__main__\":\n run_game()\n","sub_path":"pygame/alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"209601399","text":"# -*- coding:utf-8 -*-\n# @Time: 2020/9/26 17:49\n# @Author:XuMengting\n# @Email:1871489154@qq.com\nimport os\nimport random\nimport wave\nimport numpy as np\nfrom picklable_itertools import xrange\nfrom tqdm import tqdm\nfrom hlp.tts.wavenet.model.model import model\nfrom hlp.tts.wavenet.model.audio import process_wav, one_hot\n\npredict_seconds = 1\nsample_argmax = False\nsample_temperature = 1.0 # Temperature for sampling. > 1.0 for more exploring, < 1.0 for conservative samples.\npredict_use_softmax_as_input = False # Uses the softmax rather than the argmax as in input for the next step.\npredict_initial_input = None\n\n\n# 创建一个后缀为.wav文件名用于写入声音,返回一个生成的sample的名称\ndef make_sample_name(epoch, predict_seconds, predict_use_softmax_as_input, sample_argmax, sample_temperature, seed):\n sample_str = ''\n if predict_use_softmax_as_input:\n sample_str += '_soft-in'\n if sample_argmax:\n sample_str += '_argmax'\n else:\n sample_str += '_sample'\n if sample_temperature:\n sample_str += '-temp-%s' % sample_temperature\n sample_name = 'sample_epoch-%05d_%02ds_%s_seed-%d.wav' % (epoch, int(predict_seconds), sample_str, seed)\n return sample_name\n\n\n# 打开sample_filename.wav文件,配置声道数,量化位数,采样频率头部\ndef make_sample_stream(desired_sample_rate, sample_filename):\n sample_file = wave.open(sample_filename, mode='w')\n sample_file.setnchannels(1)\n sample_file.setframerate(desired_sample_rate)\n sample_file.setsampwidth(1)\n return sample_file\n\n\n# write_samples(sample_stream, [output_val])在配置好头部的wav文件中写入声音流的值\ndef write_samples(sample_file, out_val, use_ulaw):\n s = np.argmax(out_val, axis=-1).astype('uint8')\n if use_ulaw:\n s = ulaw2lin(s)\n s = bytearray(list(s))\n sample_file.writeframes(s)\n\n\n# 将输出的分布distribution变为每一个采样点的value\ndef draw_sample(output_dist, sample_temperature, sample_argmax, _rnd):\n if sample_argmax:\n output_dist = np.eye(256)[np.argmax(output_dist, axis=-1)]\n else:\n if sample_temperature is not None:\n output_dist = softmax(output_dist, sample_temperature)\n output_dist = output_dist / np.sum(output_dist + 1e-7)\n output_dist = random.multinomial(1, output_dist)\n return output_dist\n\n\ndef softmax(x, temp, mod=np):\n x = mod.log(x) / temp\n e_x = mod.exp(x - mod.max(x, axis=-1))\n return e_x / mod.sum(e_x, axis=-1)\n\n\ndef ulaw2lin(x, u=255.):\n max_value = np.iinfo('uint8').max\n # max_value = 255\n min_value = np.iinfo('uint8').min\n # min_value = 0\n x = x.astype('float64', casting='safe')\n # 将x转化为float类型\n x -= min_value\n x /= ((max_value - min_value) / 2.)\n x -= 1.\n # 2(x - 0)\n # (255 - 0) - 1\n x = np.sign(x) * (1 / u) * (((1 + u) ** np.abs(x)) - 1)\n # np.sign取数字前面的符号,正数取1,0取0,负数取-1。\n # np.abs返回数字的绝对值\n x = float_to_uint8(x)\n return x\n\n\n# 浮点数float(0~1)转为(0~255)uint8数据\ndef float_to_uint8(x):\n x += 1.\n x /= 2.\n uint8_max_value = np.iinfo('uint8').max\n x *= uint8_max_value\n x = x.astype('uint8')\n return x\n\n\ndef predict(desired_sample_rate, predict_initial_input, use_ulaw, fragment_length, predict_seconds):\n run_dir = './'\n # 检查点的目录\n # checkpoint_dir = os.path.join(run_dir, 'checkpoint')\n # 最近一个检查点\n # last_checkpoint = sorted(os.listdir(checkpoint_dir))[-1]\n # 存放生成音频的目录项\n sample_dir = os.path.join(run_dir, 'samples')\n if not os.path.exists(sample_dir):\n os.mkdir(sample_dir)\n # 利用命名函数给一个生成音频命名\n sample_name = make_sample_name(epoch=2,\n predict_use_softmax_as_input=False,\n predict_seconds=1,\n seed=10,\n sample_temperature=None,\n sample_argmax=False)\n # 音频存放的名称和路径指定好\n sample_filename = os.path.join(sample_dir, sample_name)\n # 打印把文件存进这个文件目录\n print('Saving to \"%s\"' % sample_filename)\n # 打开sample_filename.wav文件,配置声道数,量化位数,采样频率\n sample_stream = make_sample_stream(desired_sample_rate, sample_filename)\n # 建立模型,加载之前的检查点,打印模型\n net = model((6000,256))\n # checkpoint_save_path = \"./checkpoint/wavenet.ckpt\"\n # if os.path.exists(checkpoint_save_path + '.index'):\n # print('-------------load the model-----------------')\n # net.load_weights(checkpoint_save_path)\n net.summary()\n wav = process_wav(desired_sample_rate, predict_initial_input, use_ulaw)\n # wav 是一个序列采样点序列\n outputs = list(one_hot(wav[:]))\n # write_samples(sample_stream, outputs)这里的sample_stream是一个已经配置好头部的wav后缀的文件\n # warned_repetition = False\n for i in tqdm(xrange(int(desired_sample_rate * predict_seconds))):\n # if not warned_repetition:\n # if np.argmax(outputs[-1]) == np.argmax(outputs[-2]) and np.argmax(outputs[-2]) == np.argmax(outputs[-3]):\n # warned_repetition = True\n # print('Last three predicted outputs where %d' % np.argmax(outputs[-1]))\n # else:\n # warned_repetition = False\n prediction_seed = np.expand_dims(np.array(outputs[i:i+fragment_length]), 0)\n # np.expand_dims在相应的轴上扩展维度,axis=0,在第一维上扩展维度\n output = net.predict(prediction_seed)\n # 得到的output是一个(1,fragment_length,256)维的softmax值\n output_dist = output[0][-1]\n # output_dist取得是最后一行output的值\n output_val = draw_sample(output_dist, sample_temperature=None, sample_argmax=True, _rnd=random)\n write_samples(sample_stream, [output_val], True)\n sample_stream.close()\n print(\"Done!\")\n\n\n# 测试\npredict(16000, r'../data/test/Taylor_Swift_-_Welcome_To_New_York.wav', True, 6000, 1)\n\n\n# if __name__ == '__main__':\n # main()\n","sub_path":"hlp/tts/wavenet/model/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":6253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"475190846","text":"import boto3\nimport uuid\nimport json\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom decimal import Decimal\n\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table('models-index')\n\ndef get_model(event, context):\n response = table.query(\n KeyConditionExpression=Key('id').eq(event['id'])\n )\n return response['Items']\n\n\ndef create_model(event, context):\n event['accuracy'] = Decimal(event['accuracy'])\n table.put_item(Item=event)\n return event\n\ndef delete_model(event, context):\n response = table.delete_item(Key={\n 'id': event['id']\n })\n return event\n\ndef most_recent(event, context):\n response = table.query(\n IndexName='owner-train_start_time-index',\n KeyConditionExpression=Key('owner').eq(event['owner']),\n ScanIndexForward=False,\n Limit=1\n )\n return response['Items']\n\ndef most_accurate(event, context):\n response = table.query(\n IndexName='name-accuracy-index',\n KeyConditionExpression=Key('name').eq(event['name']),\n ScanIndexForward=False,\n Limit=1\n )\n return response['Items']\n","sub_path":"api/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"153829395","text":"from flask import jsonify\nfrom dao.medical_devices import Medical_DevicesDAO\n\n\nclass MedicalDeviceHandler:\n\n def build_md_dict(self, row):\n result = {}\n result['mdev_id'] = row[0]\n result['mdev_name'] = row[1]\n result['mdev_description'] = row[2]\n return result\n\n def build_md_attr(self, mdev_id, mdev_name, mdev_description):\n result = {}\n result['mdev_id'] = mdev_id\n result['mdev_name'] = mdev_name\n result['mdev_description'] = mdev_description\n return result\n\n def getAllMD(self):\n dao = Medical_DevicesDAO()\n med_list = dao.getAllMedicalDevices()\n result_list = []\n for row in med_list:\n result = self.build_md_dict(row)\n result_list.append(result)\n return jsonify(MedicalDevices=result_list)\n\n def getMDById(self, mdev_id):\n dao = Medical_DevicesDAO()\n row = dao.getMedicalDevicesById(mdev_id)\n if not row:\n return jsonify(Error=\"Medical Devices not found\"), 404\n else:\n md = self.build_md_dict(row)\n return jsonify(MedicalDevice=md)\n\n def searchMD(self, args):\n if len(args) > 1:\n return jsonify(Error=\"Malformed search string\"), 400\n else:\n md = args.get(\"medical\")\n if md:\n dao = Medical_DevicesDAO()\n med_list = dao.getMedicalDeviceByName(md)\n result_list = []\n for row in med_list:\n result = self.build_md_dict(row)\n result_list.append(result)\n return jsonify(MedicalDevice=result_list)\n else:\n return jsonify(Error=\"Malformed search string\"), 400\n\n def insertMD(self, form):\n if form and len(form) == 3:\n mdev_name = form['mdev_name']\n mdev_description = form['mdev_description']\n resr_id = form['resr_id']\n if mdev_name and mdev_description:\n dao = Medical_DevicesDAO()\n mdev_id = dao.insert(mdev_name, mdev_description, resr_id)\n result = self.build_md_attr(mdev_id, mdev_name, mdev_description)\n return jsonify(MedicalDevice=result), 201\n else:\n return jsonify(Error=\"Malformed post request\"), 400\n else:\n return jsonify(Error=\"Malformed post request\"), 400\n\n def deleteMD(self, mdev_id):\n dao = Medical_DevicesDAO()\n if not dao.getMedicalDevicesById(mdev_id):\n return jsonify(Error=\"Medical Device not found\"), 404\n else:\n dao.delete(mdev_id)\n return jsonify(DeleteStatus=\"OK\"), 200\n\n def updateMD(self, mdev_id, form):\n dao = Medical_DevicesDAO()\n if not dao.getMedicalDevicesById(mdev_id):\n return jsonify(Error=\"Medical Device not found\"), 404\n else:\n if len(form) != 2:\n return jsonify(Error=\"Malformed update request\"), 400\n else:\n mdev_name = form['mdev_name']\n mdev_description = form['mdev_description']\n if mdev_name and mdev_description:\n dao.update(mdev_id, mdev_name, mdev_description)\n result = self.build_md_attr(mdev_id, mdev_name, mdev_description)\n return jsonify(MedicalDevice=result), 200\n else:\n return jsonify(Error=\"Unexpected attributes in update request\"), 400\n","sub_path":"handler/medical_devices.py","file_name":"medical_devices.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"476797281","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.http import JsonResponse\nfrom django.views import View\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom .forms import LoginForm, UserRegistrationForm, ProfileRegistrationForm\nfrom .models import Profile\nfrom driver.models import Department\nfrom complaints.views import get_pages\n\n# Create your views here.\n\n\nclass UserLoginView(View):\n \"\"\"\n 用户登录界面\n \"\"\"\n\n def get(self, request):\n forms = LoginForm()\n return render(request, 'login.html', locals())\n\n def post(self, request):\n forms = LoginForm(request.POST)\n if forms.is_valid():\n username = request.POST.get('username')\n pwd = request.POST.get('password')\n user = authenticate(username=username, password=pwd)\n if user:\n login(request, user)\n data = {'result': 'OK'}\n return JsonResponse(data)\n else:\n return JsonResponse({'result': '用户或密码错误'})\n else:\n return JsonResponse({'result': '验证码错误'})\n\n\ndef user_logout(request):\n logout(request)\n return redirect(settings.LOGIN_REDIRECT_URL)\n\n\ndef account_list(request):\n objects_list = Profile.objects.all()\n posts = get_pages(request, objects_list=objects_list)\n return render(request, 'account_list.html', locals())\n\n\n@login_required\ndef account_add(request):\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n user = request.POST.get('username')\n department = request.POST.get('department')\n if form.is_valid():\n try:\n u = form.save(commit=False)\n u.set_password(form.cleaned_data['password'])\n u.save()\n p = Profile()\n p.department = Department.objects.get(id=int(department))\n p.user = User.objects.get(username=user)\n # 编辑权限\n p.cpt_add = True if request.POST.get('cpt_add') == 'on' else False\n p.cpt_edit = True if request.POST.get('cpt_edit') == 'on' else False\n p.cpt_delete = True if request.POST.get('cpt_delete') == 'on' else False\n p.driver_add = True if request.POST.get('driver_add') == 'on' else False\n p.driver_edit = True if request.POST.get('driver_edit') == 'on' else False\n p.driver_delete = True if request.POST.get('driver_delete') == 'on' else False\n p.save()\n return redirect(reverse('account_list'))\n except Exception as e:\n return messages.error(request, e)\n\n title = '新增用户'\n form = UserRegistrationForm()\n profile_form = ProfileRegistrationForm()\n return render(request, 'profile_add.html', locals())\n\n\n@login_required\ndef account_delete(request):\n rowid = request.GET.get('id', '')\n user_id = Profile.objects.filter(id=rowid).first().user_id\n User.objects.filter(id=user_id).delete()\n return redirect(reverse('account_list'))\n\n@login_required\ndef account_detail(request, rowid):\n posts = get_object_or_404(Profile, id=rowid)\n if request.method == 'POST':\n form = ProfileRegistrationForm(instance=posts, data=request.POST)\n if form.is_valid():\n form.save()\n return JsonResponse({'result':'OK'})\n\n form = ProfileRegistrationForm(instance=posts)\n return render(request, 'profile_edit.html', locals())\n\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"469671115","text":"import random\nfrom io import BytesIO\nfrom pathlib import Path\nimport discord\nfrom discord import Game\nfrom discord.ext.commands import Bot\nimport asyncio\nimport imageio\nimageio.plugins.ffmpeg.download()\nimport moviepy.editor as mp\nfrom overlay import overlay_image, get_gif_url, gif_url_to_image_list, url_to_image, get_image_url, get_image_url_args, draw_text, paste_text_top_bottom, marius_origin, barr_origin, tim_origin, lan_origin, shel_origin, landrew_origin, hand_origin\nfrom filters import intensify_image, highlight_image, custom_edge_highlight_image, mirror_x, mirror_y, scramble_pixels, pixelate_image, saturate_image, make_okay_clip, make_draw_gif\nfrom stem_roles import stem_add_role, stem_remove_role, list_roles\nfrom face_detection import paste_on_face, open_image_cv, barr_scale, sp_scale, mar_scale, tim_scale, c_scale\nimport os\nimport random\nimport time\n\nBOT_PREFIX = \"$\"\nBOT_TOKEN = os.environ.get('BOT_TOKEN')\nBOT_ROLE = \"bots\"\n\nbot_last_command = {} #Key = User ID, Value = Bot's most recent message tied to the command\n\nclient = Bot(command_prefix=BOT_PREFIX)\nclient.remove_command('help')\n\n@client.event\nasync def on_ready():\n \"\"\"This function runs when the bot is started\"\"\"\n await client.change_presence(game = discord.Game(name = '#rules | $help'))\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\n@client.event\nasync def on_member_join(member):\n if member.server.id == '387465995176116224':\n welcome_channel = client.get_channel('387465995176116226') # introductions\n\n num_members = len(set(client.get_all_members()))\n\n # used to randomly pick one of the available drawing professors\n professor_chosen = random.randint(0, 6)\n\n welcome_message = 'Welcome ' + member.display_name + '!|You are member ' + str(num_members) + '!|To see all the channels set your major|and housing roles in #role-assignment!'\n if professor_chosen == 0:\n output = draw_text(welcome_message, Path('memes/barrington/bdraw.png'), barr_origin)\n elif professor_chosen == 1:\n output = draw_text(welcome_message, Path('memes/marius/draw.png'), marius_origin)\n elif professor_chosen == 2:\n output = draw_text(welcome_message, Path('memes/tim/tdraw.png'), tim_origin)\n elif professor_chosen == 3:\n output = draw_text(welcome_message, Path('memes/lan/lan-draw.png'), lan_origin)\n elif professor_chosen == 4:\n output = draw_text(welcome_message, Path('memes/lan/landrew.png'), landrew_origin)\n else:\n output = draw_text(welcome_message, Path('memes/sheldraw.png'), shel_origin)\n name = 'welcome-' + member.display_name + '.png'\n output.save(name)\n await client.send_file(welcome_channel, name, content=member.mention)\n os.remove(name)\n\n@client.event\nasync def on_message_delete(message):\n \"\"\"This function runs whenever a message is deleted\n\n Args:\n - message: context of the deleted message (used to get the message contents)\n \"\"\"\n author = message.author\n if message.server.id == '387465995176116224':\n if (BOT_ROLE not in [role.name.lower() for role in author.roles]) and author.id != '98138045173227520':\n content = message.content\n await client.send_message(client.get_channel('557002016782680076'), '_Deleted Message_\\n**Message sent by:** ' + author.mention + '\\n**Channel:** ' + message.channel.mention + '\\n**Contents:** *' + content + '*\\n--------------')\n\n@client.event\nasync def on_message_edit(before, after):\n \"\"\"This function runs whenever a message is edited\n\n Args:\n - before: context before the edit (use to get the message contents before the message was edited)\n - after: context after the edit (use to get the message contents after the message was edited)\n \"\"\"\n author = before.author\n if before.server.id == '387465995176116224': # UMass STEM Discord server ID\n if (BOT_ROLE not in [role.name.lower() for role in author.roles]) and author.id != '98138045173227520':\n before_content = before.content\n after_content = after.content\n await client.send_message(client.get_channel('557002016782680076'), '_Edited Message_\\n**Message sent by:** ' + author.mention + '\\n**Channel:** ' + before.channel.mention + '\\n**Pre-edit contents:** *' + before_content + '*\\n**Post-edit contents:** *'+ after_content + '*\\n--------------')\n\n@client.command(name='help')\nasync def help():\n \"\"\"help command\n\n Output: list of accessible commands and their descriptions\n \"\"\"\n embed = discord.Embed(\n color = discord.Color.orange()\n )\n # role command dictionary\n ROLE_COMMANDS = {\n '*$getlist*': 'Sends a list of all the available roles',\n '*$get [role]*': 'Gives you the specified role',\n '*$remove [role]*': 'Removes the specified role from you',\n '*$members*': 'Prints out the number of people on the server'\n }\n MEME_COMMANDS = {\n '*$mdraw [image/url/text]*': 'Sends a photo of marius drawing the specified image or text or gif, keep in mind that discord\\'s gif size restrictions are a bit harsh',\n '*$tdraw [image/url/text]*': 'Sends a photo of tim drawing the specified image or text or gif, keep in mind that discord\\'s gif size restrictions are a bit harsh',\n '*$bdraw [image/url/text]*': 'Sends a photo of barrington drawing the specified image or text or gif, keep in mind that discord\\'s gif size restrictions are a bit harsh',\n '*$ldraw [image/url/text]*': 'Sends a photo of lan drawing the specified image or text or gif, keep in mind that discord\\'s gif size restrictions are a bit harsh',\n '*$landrew [image/url/text]*': 'Sends a photo of a different occasion of lan drawing the specified image or text, keep in mind that discord\\'s gif size restrictions are a bit harsh',\n '*$shelpoint [image/url/text]*': 'Sends a photo of dan sheldon pointing to the specified image or text or gif, keep in mind that discord\\'s gif size restrictions are a bit harsh',\n '*barrify [image]*': 'The bot uses computer vision through the OpenCV library to put barrington on identified faces in the inputed image',\n '*surprisedpikachu [image]*': 'The bot uses computer vision through the OpenCV library to put surprised pikachu on identified faces in the inputed image',\n '*marify [image]*': 'The bot uses computer vision through the OpenCV library to put marius on identified faces in the inputed image',\n '*timify [image]*': 'The bot uses computer vision through the OpenCV library to put tim on identified faces in the inputed image',\n '*calebify [image]*': 'The bot uses computer vision through the OpenCV library to put caleb on identified faces in the inputed image',\n '*$meme [\"top\" \"bottom\" image]*': 'The bot outputs the inputed image with the specified text in the old meme format',\n '*$intensify [factor image]*': 'The bot outputs the inputed image intensified to the specified factor',\n '*$highlightEdge [image]*':'The bot outputs the inputed image with an edge highlighting algorithm applied to it',\n '*$customEdgeHighlight [Red Green Blue image]*':'The bot takes in RGB values (between 0 to 255) and applies an edge highlighting algorithm where the edges are the specified color',\n '*$noise [image]*':'The bot outputs the inputed image with a noise filter applied to it',\n '*$pixelate [factor image]*':'The bot outputs the inputed image after pixelating it by a given factor, remember to use a larger factor to see results on high-res images',\n '*$mirror [axis image]*':'The bot mirrors the image on the given axis (X or Y), and outputs the result',\n '*$saturate [factor image]*':'The bot saturates the given image by the given factor',\n '*$okay* [image]':'The bot turns the given image into a video with marius saying okay as the background noise',\n '*$erase*': 'Deletes the most recent m/bdraw or barrify generated by the bot',\n }\n embed.set_author(name='Help', icon_url='https://cdn.discordapp.com/attachments/501594682820788224/558396074868342785/UMass_Stem_discord_logo.png')\n for command in ROLE_COMMANDS:\n embed.add_field(\n name = command,\n value = ROLE_COMMANDS[command]\n )\n embed.add_field(\n name = '-------------------------------------------------------------------',\n value = '------------------------------MEMES-------------------------------'\n )\n for command in MEME_COMMANDS:\n embed.add_field(\n name = command,\n value = MEME_COMMANDS[command]\n )\n await client.say(embed=embed)\n\n@client.command(name = 'members')\nasync def server_members():\n num_members = len(set(client.get_all_members()))\n await client.say('There are ' + str(num_members) + ' server members')\n\n@client.command(name='get', pass_context = True)\nasync def get_role(requested_role):\n \"\"\"Command to get the requested role\n\n Args:\n - requested_role: context that the command occured use this to access the message and other attributes\n \"\"\"\n member = requested_role.message.author\n if requested_role.message.server.id == '387465995176116224':\n await stem_add_role(requested_role, member, client)\n else:\n await client.send_message(requested_role.message.channel, embed=discord.Embed(description=\"Roles are not yet supported on this server\", color=discord.Color.dark_red()))\n\n@client.command(name='remove', pass_context = True)\nasync def remove_role(requested_role):\n \"\"\"Command to remove the requested role\n\n Args:\n - requested_role: context that the command occured use this to access the message and other attributes\n \"\"\"\n member = requested_role.message.author\n if requested_role.message.server.id == '387465995176116224':\n await stem_remove_role(requested_role, member, client)\n else:\n await client.send_message(requested_role.message.channel, embed=discord.Embed(description=\"Roles are not yet supported on this server\", color=discord.Color.dark_red()))\n\n@client.command(name='getlist', pass_context = True)\nasync def get_list(ctx):\n \"\"\"Command to generate list of roles accessable with the get command\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n await list_roles(ctx, client) # found in stem_roles.py\n\n@client.command(name='mdraw', pass_context = True)\nasync def mdraw(ctx):\n \"\"\"Command to generate a meme of marius drawing on the image or text or gif\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n #in case of gif\n url = get_gif_url(ctx, 7)\n if url != 0:\n #get list of modified frames (has the prof drawing the image)\n imgList = gif_url_to_image_list(url, 0)\n if imgList == 0:\n #if invalid list, return\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"invalid image\", color=discord.Color.red()))\n return\n #get a list of imageClips for each frame\n gifClip = make_draw_gif(imgList, 1)\n gifClip.write_gif(\"mdraw.gif\", 24, program='imageio')\n try:\n #try sending, if gif is above 8mb then an error will be thrown\n message = await client.send_file(ctx.message.channel, \"mdraw.gif\")\n except:\n #random color because why not\n randRGB = lambda: random.randint(0, 255)\n randColor=int('%02X%02X%02X' % (randRGB(), randRGB(), randRGB()), 16)\n os.remove(\"mdraw.gif\")\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"GIF + image becomes too large to send, sorry :(\", color=randColor))\n return\n track_command(ctx.message.author.id, message)\n os.remove(\"mdraw.gif\")\n return\n await draw_universal(ctx, 'memes/marius/draw.png', 7, marius_origin, 'marius-drawing.png')\n\n@client.command(name='bdraw', pass_context = True)\nasync def bdraw(ctx):\n \"\"\"Command to generate a meme of barr drawing on the image or text or gif\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n #in case of gif\n url = get_gif_url(ctx, 7)\n if url != 0:\n #get list of frames\n imgList = gif_url_to_image_list(url, 1)\n if imgList == 0:\n #if invalid list return\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"invalid image\", color=discord.Color.red()))\n return\n #get list of image clips\n gifClip = make_draw_gif(imgList, 0)\n gifClip.write_gif(\"bdraw.gif\", 24, program='imageio')\n try:\n #check if message is <8 mb\n message = await client.send_file(ctx.message.channel, \"bdraw.gif\")\n except:\n #random color cause why not\n randRGB = lambda: random.randint(0, 255)\n randColor=int('%02X%02X%02X' % (randRGB(), randRGB(), randRGB()), 16)\n os.remove(\"bdraw.gif\")\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"GIF + image becomes too large to send, sorry :(\", color=randColor))\n return\n track_command(ctx.message.author.id, message)\n os.remove(\"bdraw.gif\")\n return\n await draw_universal(ctx, 'memes/barrington/bdraw.png', 7, barr_origin, 'barrington-drawing.png')\n\n@client.command(name='tdraw', pass_context = True)\nasync def tdraw(ctx):\n \"\"\"Command to generate a meme of tim drawing on the image or text or gif\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n #in case of gif\n url = get_gif_url(ctx, 7)\n if url != 0:\n #get list of frames\n imgList = gif_url_to_image_list(url, 3)\n if imgList == 0:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"invalid image\", color=discord.Color.red()))\n return\n #get list of imageClips\n gifClip = make_draw_gif(imgList, 2)\n gifClip.write_gif(\"tdraw.gif\", 24, program='imageio')\n try:\n #check for appropriate size\n message = await client.send_file(ctx.message.channel, \"tdraw.gif\")\n except:\n #random color cause ¯\\_(ツ)_/¯\n randRGB = lambda: random.randint(0, 255)\n randColor=int('%02X%02X%02X' % (randRGB(), randRGB(), randRGB()), 16)\n os.remove(\"tdraw.gif\")\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"GIF + image becomes too large to send, sorry :(\", color=randColor))\n return\n track_command(ctx.message.author.id, message)\n os.remove(\"tdraw.gif\")\n return\n await draw_universal(ctx, 'memes/tim/tdraw.png', 7, tim_origin, 'tim-drawing.png')\n\n@client.command(name='ldraw', pass_context = True)\nasync def ldraw(ctx):\n \"\"\"Command to generate a meme of lan drawing on the image or text or gif\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n #in case of gif\n url = get_gif_url(ctx, 7)\n if url != 0:\n #get list of frames\n imgList = gif_url_to_image_list(url, 3)\n if imgList == 0:\n #check for valid list\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"invalid image\", color=discord.Color.red()))\n return\n #get list of image clips\n gifClip = make_draw_gif(imgList, 4)\n gifClip.write_gif(\"ldraw.gif\", 24, program='imageio')\n try:\n #check for appropriate size\n message = await client.send_file(ctx.message.channel, \"ldraw.gif\")\n except:\n #random colors are fun, plus this doesn't need consistency\n randRGB = lambda: random.randint(0, 255)\n randColor=int('%02X%02X%02X' % (randRGB(), randRGB(), randRGB()), 16)\n os.remove(\"ldraw.gif\")\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"GIF + image becomes too large to send, sorry :(\", color=randColor))\n return\n track_command(ctx.message.author.id, message)\n os.remove(\"ldraw.gif\")\n return\n await draw_universal(ctx, 'memes/lan/lan-draw.png', 7, lan_origin, 'lan-drawing.png')\n\n@client.command(name='landrew', pass_context = True)\nasync def landrew(ctx):\n \"\"\"Command to generate a meme of lan drawing on the image or text\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n #in case of gif\n url = get_gif_url(ctx, 9)\n if url != 0:\n #get list of frames\n imgList = gif_url_to_image_list(url, 3)\n if imgList == 0:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"invalid image\", color=discord.Color.red()))\n return\n #get list of imageClips\n gifClip = make_draw_gif(imgList, 6)\n gifClip.write_gif(\"landraws.gif\", 24, program='imageio')\n try:\n #check whether size is appropriate\n message = await client.send_file(ctx.message.channel, \"landraws.gif\")\n except:\n #¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯\n randRGB = lambda: random.randint(0, 255)\n randColor=int('%02X%02X%02X' % (randRGB(), randRGB(), randRGB()), 16)\n os.remove(\"landraws.gif\")\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"GIF + image becomes too large to send, sorry :(\", color=randColor))\n return\n track_command(ctx.message.author.id, message)\n os.remove(\"landraws.gif\")\n return\n await draw_universal(ctx, 'memes/lan/landrew.png', 9, landrew_origin, 'landrew-drawing.png')\n\n@client.command(name='shelpoint', pass_context = True)\nasync def shelpoint(ctx):\n \"\"\"Command to generate a meme of Dan Sheldon drawing on the image or text or gif\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n #in case of gif\n url = get_gif_url(ctx, 11)\n if url != 0:\n #get list of frames\n imgList = gif_url_to_image_list(url, 3)\n if imgList == 0:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"invalid image\", color=discord.Color.red()))\n return\n #get list of imageClips\n gifClip = make_draw_gif(imgList, 3)\n gifClip.write_gif(\"shelpoint.gif\", 24, program='imageio')\n try:\n #check whether size is appropriate\n message = await client.send_file(ctx.message.channel, \"shelpoint.gif\")\n except:\n #¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯¯\\_(ツ)_/¯\n randRGB = lambda: random.randint(0, 255)\n randColor=int('%02X%02X%02X' % (randRGB(), randRGB(), randRGB()), 16)\n os.remove(\"shelpoint.gif\")\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"GIF + image becomes too large to send, sorry :(\", color=randColor))\n return\n track_command(ctx.message.author.id, message)\n os.remove(\"shelpoint.gif\")\n return\n await draw_universal(ctx, 'memes/sheldraw.png', 11, shel_origin, 'sheldon-pointing.png')\n\n@client.command(name='handdraw', pass_context = True)\nasync def handdraw(ctx):\n \"\"\"Command to generate a meme of a hand drawing on the image or text\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n #in case of gif\n url = get_gif_url(ctx, 10)\n if url != 0:\n #get list of frames\n imgList = gif_url_to_image_list(url, 3)\n if imgList == 0:\n #if invalid list return\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"invalid image\", color=discord.Color.red()))\n return\n #get list of imageClips\n gifClip = make_draw_gif(imgList, 5)\n gifClip.write_gif(\"handdraw.gif\", 24, program='imageio')\n try:\n #check if message is <8 mb\n message = await client.send_file(ctx.message.channel, \"bdraw.gif\")\n except:\n #random color cause why not\n randRGB = lambda: random.randint(0, 255)\n randColor=int('%02X%02X%02X' % (randRGB(), randRGB(), randRGB()), 16)\n os.remove(\"handdraw.gif\")\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"GIF + image becomes too large to send, sorry :(\", color=randColor))\n return\n track_command(ctx.message.author.id, message)\n os.remove(\"handdraw.gif\")\n return\n await draw_universal(ctx, 'memes/hand.png', 10, hand_origin, 'handdraw.png')\n\nasync def draw_universal(ctx, path, command_end_index, origin, name):\n \"\"\"Universal function which is called by draw command with the following arguments\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n - path: path to the drawing image (ie memes/lan/landrew.png)\n - command_end_index: index the end of the command (ie. for bdraw its 7 for '$' 'b' 'd' 'r' 'a' 'w' ' ')\n - origin: pixel origin imported from overlay.py\n - name: output file name\n \"\"\"\n url = get_image_url(ctx, command_end_index)\n if url == 0: # no url, hand should write the inputed text\n output = draw_text(ctx.message.content[command_end_index:], Path(path), origin)\n else: # url inputed, hand should draw on the image\n output = overlay_image(url_to_image(url), Path(path), origin)\n output.save(name)\n try:\n message = await client.send_file(ctx.message.channel, name)\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message) # tracks the most recent command of a user\n os.remove(name)\n\n#Deletes image based messages, such as bdraw, that the user requesting just sent.\n@client.command(name='erase', pass_context = True)\nasync def erase(ctx):\n \"\"\"Command to erase the most recent m/bdraw or barrify generated by the bot\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n if bot_last_command[ctx.message.author.id] is not None:\n await client.delete_message(bot_last_command[ctx.message.author.id])\n bot_last_command[ctx.message.author.id] = None #Clears this back up to avoid errors\n\n@client.command(name='barrify', pass_context = True, aliases = ['barify'])\nasync def barrify(ctx, *args):\n \"\"\"Command to paste barr's face on top of faces in an inputed image using facial recognition\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n await ify(ctx, barr_scale, 'memes/barrington/barr-face.png', 'barrify.png', args)\n\n@client.command(name='marify', pass_context = True, aliases=['marrify'])\nasync def marify(ctx, *args):\n \"\"\"Command to paste marius' face on top of faces in an inputed image using facial recognition\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n await ify(ctx, mar_scale, 'memes/marius/marius-face.png', 'marify.png', args)\n\n@client.command(name='calebify', pass_context = True)\nasync def calebify(ctx, *args):\n \"\"\"Command to paste caleb's face on top of faces in an inputed image using facial recognition\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n await ify(ctx, c_scale, 'memes/caleb/caleb-face.png', 'calebify.png', args)\n\n@client.command(name='timify', pass_context = True)\nasync def timify(ctx, *args):\n \"\"\"Command to paste tim's face on top of faces in an inputed image using facial recognition\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n await ify(ctx, tim_scale, 'memes/tim/tim-face.png', 'timify.png', args)\n\n@client.command(name='surprisedpikachu', pass_context=True)\nasync def surprisedpikachu_overlay(ctx, *args):\n \"\"\"Command to paste suprised pikachu on top of faces in an inputed image using facial recognition\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n await ify(ctx, sp_scale, 'memes/surprised-pikachu.png', 'surprisedpikachu.png', args)\n\nasync def ify(ctx, scale, path, file_name, *args):\n \"\"\"Command to paste a face on top of faces in an inputed image using facial recognition\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n - args: arguments of the message\n - scale: specified scale for the faces\n - path: face image path\n - file_name: output file name\n \"\"\"\n url = get_image_url_args(ctx, args, 1, 0)\n if url == 0: # invalid image\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid image\", color=discord.Color.red()))\n return\n else:\n output = paste_on_face(Path(path), url, scale)\n # if there were no faces found then send error\n if output == 0:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description='No faces found, please input another image', color=discord.Color.red()))\n return\n\n output.save(file_name)\n message = await client.send_file(ctx.message.channel, file_name)\n track_command(ctx.message.author.id, message)\n os.remove(file_name)\n\n@client.command(name='meme', pass_context=True)\nasync def meme_generator(ctx, *args):\n \"\"\"Command to generate memes with top and bottom text\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n - *args: arguments passed in with the command\n \"\"\"\n url = get_image_url_args(ctx, args, 3, 2)\n if url == 0: # invalid image\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid image\", color=discord.Color.red()))\n return\n else:\n output = paste_text_top_bottom(args[0], args[1], url_to_image(url))\n output.save('meme.png')\n try:\n message = await client.send_file(ctx.message.channel, 'meme.png')\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove('meme.png')\n\n@client.command(name='intensify', pass_context = True)\nasync def intensify(ctx, *args):\n \"\"\"Command to intensify inputed image by the inputed factor\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n - *args: arguments passed in with the command\n \"\"\"\n try:\n factor = float(args[0])\n except:\n factor = 2 # default if no factor specified\n url = get_image_url_args(ctx, args, 2, 1)\n if url == 0: # invalid image\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid image\", color=discord.Color.red()))\n return\n output = intensify_image(url_to_image(url), factor)\n if output == 0: # if factor < 0\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid factor\", color=discord.Color.red()))\n return\n # save and send image\n output.save('intensify.png')\n try:\n message = await client.send_file(ctx.message.channel, 'intensify.png')\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove('intensify.png')\n\n@client.command(name='mirror', pass_context = True)\nasync def mirror(ctx, *args):\n \"\"\"Command to mirror given image on the inputted axis\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n - *args: arguments passed in to the command (in this case either the X axis or Y axis)\n \"\"\"\n try:\n url = get_image_url_args(ctx, args, 2, 2)\n axis = args[0]\n except:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid input\", color=discord.Color.red()))\n if axis != \"x\" and axis != \"y\" and axis != \"X\" and axis != \"Y\":\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid axis, please use x or y\", color=discord.Color.red()))\n return\n if axis == \"x\" or axis == \"X\":\n output = mirror_x(url_to_image(url))\n output.save(\"mirror_x.png\")\n try:\n message = await client.send_file(ctx.message.channel, \"mirror_x.png\")\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove(\"mirror_x.png\")\n return\n if axis == \"y\" or axis == \"Y\":\n output = mirror_y(url_to_image(url))\n output.save(\"mirror_y.png\")\n try:\n message = await client.send_file(ctx.message.channel, \"mirror_y.png\")\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove(\"mirror_y.png\")\n\n@client.command(name='highlightEdge', pass_context = True, aliases=['highlight', 'edge'])\nasync def highlight_edge(ctx, *args):\n \"\"\"Command to apply an edge highlighting algorithm to a given image\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n url = get_image_url_args(ctx, args, 1, 0)\n if url == 0:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid Image\"), color=discord.Color.red())\n return\n output = highlight_image(url_to_image(url))\n output.save('highlighted.png')\n try:\n message = await client.send_file(ctx.message.channel, 'highlighted.png')\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove('highlighted.png')\n\n@client.command(name='customEdgeHighlight', pass_context=True, aliases=['customhighlight', 'customedge'])\nasync def custom_edge_highlight(ctx, *args):\n \"\"\"Command to highlight an image's edges and turn them into a given color\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n - *args: arguments passed in with the command (in this case the RGB values for the edge color)\n \"\"\"\n try:\n red = int(args[0])\n green = int(args[1])\n blue = int(args[2])\n except:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid Parameters\", color=discord.Color.red()))\n return\n url = get_image_url_args(ctx, args, 4, 3)\n if url == 0:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid Image\", color=discord.Color.red()))\n return\n output = custom_edge_highlight_image(url_to_image(url), red, green, blue)\n if output == 0: #if the RGB values are invalid\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid RGB Values, please input numbers between 0-255\", color=discord.Color.red()))\n return\n output.save('custom_highlight.png')\n try:\n message = await client.send_file(ctx.message.channel, 'custom_highlight.png')\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove('custom_highlight.png')\n\n@client.command(name='noise', pass_context=True)\nasync def noise_filter(ctx):\n \"\"\"Command to apply a noise filter on the inputted image\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n url = get_image_url(ctx, 7)\n if url == 0:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid Image\", color=discord.Color.red()))\n return\n output = scramble_pixels(url_to_image(url))\n output.save('noise.png')\n try:\n message = await client.send_file(ctx.message.channel, 'noise.png')\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove('noise.png')\n\n@client.command(name='pixelate', pass_context=True, aliases=['pixel'])\nasync def pixelate(ctx, *args):\n \"\"\"Command to pixelate a given image by a given factor\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n - *args: arguments passed into the command (in this case the pixelation factor)\n \"\"\"\n url = get_image_url_args(ctx, args, 2, 1)\n try:\n factor = float(args[0])\n except:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid Parameters\", color=discord.Color.red()))\n return\n if url == 0:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid Image\", color=discord.Color.red()))\n return\n output = pixelate_image(url_to_image(url), factor)\n output.save('pixelate.png')\n try:\n message = await client.send_file(ctx.message.channel, 'pixelate.png')\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove('pixelate.png')\n\n@client.command(name='saturate', pass_context=True)\nasync def saturate(ctx, *args):\n \"\"\"Command to saturate a given image by a given factor\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n - *args: arguments passed into the command (in this case the pixelation factor)\n \"\"\"\n url = get_image_url_args(ctx, args, 2, 1)\n try:\n factor = float(args[0])\n except:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid Parameters\", color=discord.Color.red()))\n return\n if url == 0:\n await client.send_message(ctx.message.channel, embedd=discord.Embed(description=\"Invalid Image\", color=discord.Color.red()))\n return\n output = saturate_image(url_to_image(url), factor)\n output.save('saturate.png')\n try:\n message = await client.send_file(ctx.message.channel, 'saturate.png')\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove('saturate.png')\n\n@client.command(name='okay', pass_context=True)\nasync def make_okay(ctx):\n \"\"\"Command to turn a given image into a video where marius says 'okay' in the background\n\n Args:\n - ctx: context that the command occured use this to access the message and other attributes\n \"\"\"\n url = get_image_url(ctx, 6)\n if url == 0:\n await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Invalid Image\", color=discord.Color.red()))\n return\n clip = make_okay_clip(url_to_image(url))\n clip.write_videofile(\"okay.mp4\", audio=\"sfx/okayturnedupto8.mp3\", fps=24)\n try:\n message = await client.send_file(ctx.message.channel, \"okay.mp4\")\n except:\n message = await client.send_message(ctx.message.channel, embed=discord.Embed(description=\"Image too large\", color=discord.Color.red()))\n track_command(ctx.message.author.id, message)\n os.remove(\"okay.mp4\")\n\n\n\ndef track_command(author, bot_message):\n \"\"\"tracks the authors most recent command\n\n Args:\n - author: author of the message\n - bot_message: most recent message sent by the bot corresponding to the author\n \"\"\"\n bot_last_command[author] = bot_message\n\nclient.run(BOT_TOKEN)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":37373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"516993437","text":"## Copyright (c) 2012 Aldebaran Robotics. All rights reserved.\n## Use of this source code is governed by a BSD-style license that can be\n## found in the COPYING file.\n\nimport os\nimport tempfile\nimport unittest\n\nimport qidoc.core\nimport qibuild\n\n\ndef check_tools():\n \"\"\" Check if sphinx-build and doxygen are installed.\n If not, we will skip the test_build test\n\n \"\"\"\n executables = dict()\n for name in [\"sphinx-build\", \"sphinx-build2\", \"doxygen\"]:\n executables[name] = qibuild.command.find_program(name)\n\n res = executables[\"sphinx-build\"] or executables[\"sphinx-build2\"]\n res = res and executables[\"doxygen\"]\n return res\n\nclass TestQiDoc(unittest.TestCase):\n def setUp(self):\n self.tmp = tempfile.mkdtemp(prefix=\"tmp-qidoc\")\n self.in_dir = os.path.join(self.tmp, \"in\")\n self.out_dir = os.path.join(self.tmp, \"out\")\n this_dir = os.path.dirname(__file__)\n qibuild.sh.install(os.path.join(this_dir, \"in\"),\n self.in_dir, quiet=True)\n self.qidoc_builder = qidoc.core.QiDocBuilder(self.in_dir, self.out_dir)\n\n def tearDown(self):\n qibuild.sh.rm(self.tmp)\n\n @unittest.skipUnless(check_tools(), \"Some required tools are not installed\")\n def test_build(self):\n opts = dict()\n opts[\"version\"] = 1.42\n self.qidoc_builder.build(opts)\n submodule_zip = os.path.join(self.out_dir,\n \"qibuild\", \"_downloads\", \"submodule.zip\")\n self.assertTrue(os.path.exists(submodule_zip))\n\n def test_cfg_parse(self):\n qibuild_sphinx = self.qidoc_builder.sphinxdocs[\"qibuild\"]\n self.assertEqual(qibuild_sphinx.name, \"qibuild\")\n self.assertEqual(qibuild_sphinx.src ,\n os.path.join(self.in_dir, \"qibuild\", \"doc\"))\n\n doc_sphinx = self.qidoc_builder.sphinxdocs[\"doc\"]\n self.assertEqual(doc_sphinx.depends, [\"qibuild\"])\n\n\n libalcommon = self.qidoc_builder.doxydocs[\"libalcommon\"]\n libalvision = self.qidoc_builder.doxydocs[\"libalvision\"]\n self.assertEqual(libalcommon.name, \"libalcommon\")\n self.assertEqual(libalvision.name, \"libalvision\")\n self.assertEqual(libalcommon.src ,\n os.path.join(self.in_dir, \"libnaoqi\", \"libalcommon\"))\n self.assertEqual(libalvision.src ,\n os.path.join(self.in_dir, \"libnaoqi\", \"libalvision\"))\n self.assertEqual(libalcommon.dest,\n os.path.join(self.out_dir, \"ref\", \"libalcommon\"))\n self.assertEqual(libalvision.dest,\n os.path.join(self.out_dir, \"ref\", \"libalvision\"))\n\n\n\n def test_sorting(self):\n docs = self.qidoc_builder.sort_sphinx()\n names = [d.name for d in docs]\n self.assertEqual(names, ['qibuild', 'doc'])\n\n docs = self.qidoc_builder.sort_doxygen()\n names = [d.name for d in docs]\n self.assertEqual(names, ['libqi', 'libalcommon', 'libalvision'])\n\n def test_intersphinx_mapping(self):\n mapping = self.qidoc_builder.get_intersphinx_mapping(\"doc\")\n self.assertEqual(mapping,\n {\"qibuild\": (os.path.join(self.out_dir, \"qibuild\"),\n None)}\n )\n\n def test_doxygen_mapping(self):\n mapping = self.qidoc_builder.get_doxygen_mapping(\"libalvision\")\n expected = {\n os.path.join(self.out_dir, \"doxytags\", \"libalcommon.tag\"):\n \"../libalcommon\",\n os.path.join(self.out_dir, \"doxytags\", \"libqi.tag\"):\n \"../libqi\",\n }\n self.assertEqual(mapping, expected)\n\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"nao/qibuild-1.14.3/python/qidoc/test/test_qidoc.py","file_name":"test_qidoc.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"543640539","text":"# Implementation of Sparse Matrix ADT using python List\n\nclass sparseMatrix:\n\n # Creating an Instance of the Matrix value\n def __init__(self, numRows, numCols):\n self.numRows = numRows\n self.numCols = numCols\n self.elements = list()\n\n # Returns the number of Rows\n def numRow(self):\n return self.numRows\n\n # Returns the number of col\n def numCol(self):\n return self.numCols\n\n # Returns the value of the row and col using x[i, j]\n def __getitem__(self, ndxTuple):\n ndx = self.findPosition(ndxTuple[0], ndxTuple[1])\n if ndx is not None:\n return self.elements[ndx].value\n else:\n return 0\n\n # Sets the value of the row and col using x[i, j] = value\n def __setitem__(self, ndxTuple, value):\n ndx = self.findPosition(ndxTuple[0], ndxTuple[1])\n if ndx is not None:\n if value != 0.0 :\n self.elements[ndx].value = value\n else:\n self.elements.pop(ndx)\n else:\n if value != 0.0:\n element = MatrixElement(ndxTuple[0], ndxTuple[1], value)\n self.elements.append(element)\n\n # Scale the Matrix by a value\n def scaleBy(self, scalar):\n for element in self.elements:\n element.value *= scalar\n\n # Returns the Addition two Matrix using A + B\n def __add__(self, MatB):\n assert self.numRows == MatB.numRows and \\\n self.numCols == MatB.numCols,\\\n \" Row and Columns for both Matrix must be equal\"\n # Creates a Matrix for the result of Addition\n MatC = sparseMatrix(self.numRows, self.numCols)\n\n # Duplicates the values of Matrix A into the result Matrix\n for element in self.elements:\n dup = MatrixElement(element.row, element.col, element.value)\n MatC.elements.append(dup)\n\n # Itrates over Matrix B values\n for element in MatB.elements:\n value = MatC[element.row, element.col]\n value += element.value\n\n # Sets the value of the row and column into the result matrix\n MatC[element.row, element.col] = value\n\n return MatC\n\n # Returns the Subtraction two Matrix using A - B\n def __sub__(self, matB):\n assert self.numRows == MatB.numRows and \\\n self.numCols == MatB.numCols,\\\n \" Row and Columns for both Matrix must be equal\"\n\n # Creates a Matrix for the result of Subtraction\n MatC = sparseMatrix(self.numRows, self.numCols)\n\n # Duplicates the values of Matrix A into the result Matrix\n for element in self.elements:\n dup = MatrixElement(element.row, element.col, element.value)\n MatC.elements.append(dup)\n\n # Itrates over Matrix B values\n for element in MatB.elements:\n value = MatC[element.row, element.col]\n value -= element.value\n\n # Sets the value of the row and column into the result matrix\n MatC[element.row, element.col] = value\n\n return MatC\n\n # Returns the Multiplication of Two Matrices using A*B\n def __mul__(self, matB):\n\n # Creates a Matrix for the result of the Multiplication\n matC = sparseMatrix(self.numRow(), matB.numCol())\n \n # Iterates Over Non Zero Elments in Matrix A\n for element_A in self.elements:\n # Obtain the Value\n value = element_A.value\n # Iterates Over B\n for element_B in matB.elements:\n # Determine if the value on the row as same column of Mat A element\n if element_A.col == element_B.row:\n # Multiplies this value by value in Mat A\n value_now = value * element_B.value\n # Puts this Value into New Matrix with Row of A as ite row\n # And Column of B as its Column\n matC[element_A.row, element_B.col] += value_now\n\n return matC\n\n\n # Returns the transpose of the Matrix\n def transpose(self):\n\n # Creates a Trasnpose Matrix\n mat_transpose = sparseMatrix(self.numRows, self.numCols)\n\n # Iterates over the Matrix elements and change row and column\n for i in range(len(self.elements)):\n element = MatrixElement(self.elements[i].col, self.elements[i].row, self.elements[i].value)\n mat_transpose.elements.append(element)\n\n\n return mat_transpose\n\n\n # Returns the position of an element in the list of element of the Matrix\n def findPosition(self, row, col):\n\n for i in range(len(self.elements)):\n if row == self.elements[i].row and \\\n col == self.elements[i].col:\n return i\n return None\n\n# Create a Class Storage for storing the row, col and value\nclass MatrixElement:\n\n def __init__(self, row, col, value):\n self.row = row\n self.col = col\n self.value = value\n\n\n#TEST\n\nMatA = sparseMatrix(4, 4)\nMatB = sparseMatrix(4, 4)\nMatA[1, 2] = 10\nMatA[1, 3] = 12\nMatA[2, 1] = 1\nMatA[2, 3] = 2\n\nMatB[1, 1] = 2\nMatB[1, 2] = 5\nMatB[2, 2] = 1\nMatB[3, 1] = 8\ntranspose = MatB.transpose()\n\n\nMatC = MatA*MatB\n\nprint(MatC[1, 1])\nprint(MatC[1, 2])\nprint(MatC[2, 1])\nprint(MatC[2, 2])\n","sub_path":"ch5/SparseMatrix Using Sorted List/sparseMatrix.py","file_name":"sparseMatrix.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"64962727","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2014 Instituto de Desarrollo Urbano (). \n# All Rights Reserved\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport flickrapi\nfrom time import sleep\nfrom PIL import Image\nimport os\n\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass flickr_uploader():\n\n def __init__(self, key, secret, login):\n self._api_key = key\n self._api_secret = secret\n self._api_login = login\n self.flickr = flickrapi.FlickrAPI(self._api_key, self._api_secret)\n self._token = self.get_token() #Conectar con Flickr\n\n def get_token(self):\n (self.token, self.frob) = self.flickr.get_token_part_one(perms='write')\n if not self.token: raw_input(\"Press ENTER after you have authorized the app in flickr page...\")\n self.flickr.get_token_part_two((self.token, self.frob))\n _logger.info(\"token: {0}\".format(self.token))\n _logger.info(\"frob: {0}\".format(self.frob))\n return self.token\n\n def upload(self, photo, title, tag):\n im_1 = Image.open(photo)\n width, height = im_1.size\n if (width > 500):\n relacion = float(width)/float(height)\n new_width = 500\n new_height = new_width/relacion\n im_2 = im_1.resize((int(new_width),int(new_height)), Image.ANTIALIAS)\n im_2.save(photo)\n self.flickr.upload(filename=photo, title=title, tags=tag)\n #upload(photo,title, tag)\n id_fs = ''\n attempts = 0\n while(id_fs == '' and attempts < 8):\n sleep(0.5)\n _logger.info(\"Flickr timeout. Retrying...\")\n attempts += 1\n fotosubida = self.flickr.walk(\n tag_mode='any',\n tags=tag,\n user_id = self._api_login\n )\n for fs in fotosubida:\n id_fs = fs.get(\"id\")\n _logger.info(\"Flicker ID: {0}\".format(id_fs))\n\n photos = []\n for photo in self.flickr.walk(\n tag_mode='any',\n tags=tag,\n user_id = self._api_login\n ):\n #Tamano mediano c : 800,800 en el lado mas largo\n photo_url = \"http://farm{0}.static.flickr.com/{1}/{2}_{3}_b.jpg\".format(\n photo.get(\"farm\"),\n photo.get(\"server\"),\n photo.get(\"id\"),\n photo.get(\"secret\"),\n )\n photo_title = photo.get(\"title\")\n photo_id = photo.get(\"id\")\n\n # Por si llega a encontrar mas de una photo con el mismo tag: \n photos.append({'url':photo_url,'id':photo_id,'title':photo_title})\n\n url_photo = ''\n for p in photos:\n url_photo = p['url']\n\n _logger.info(\"Photo uploaded Flickr\")\n os.remove(photo)\n return url_photo\n","sub_path":"src/photo_gallery/flickr_uploader/flickr_uploader.py","file_name":"flickr_uploader.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"266873401","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport csv\n\ndef getData(FILENAME):\n x = []\n y = []\n\n with open(FILENAME, 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n for i, row in enumerate(reader):\n if i == 0: continue\n x.append(float(row[0]))\n y.append(float(row[4]))\n \n return np.array(x), np.array(y)\n\n\ndef plot_IvsV1(v_x, i_y, label):\n \n plt.plot(v_x, i_y, '.', label=label)\n\n\ndef makeFit(v_x, i_y, start, end, label):\n \n fit = np.polyfit(v_x[start:end], i_y[start:end], 1)\n fit_func = lambda x: fit[0]*x + fit[1]\n #plt.plot(v_x, [fit_func(i) for i in v_x], label=label)\n plt.plot(v_x[start:end], [fit_func(i)*1e6 for i in v_x[start:end] ] )\n plt.text(0.1, -25, \"Fit: ax + b\\na=%e$\\mho$\\nb=%e$A$\" % (fit[0], fit[1]))\n\n\nif __name__ == \"__main__\":\n v_x, i_y = getData(\"../data/adaptive-biasing-V3_Gm.txt\")\n \n plot_IvsV1(v_x, i_y*1e6, \"Experimental Data\")\n \n start, end = (120, 350)\n #start, end = (0, len(i_y)-1)\n makeFit(v_x, i_y, start, end, \"Theoretical Fit\")\n \n plt.xlabel(\"$V_{dm}$\", fontsize=16)\n plt.ylabel(\"$I_{out}$ ($\\mu$A)\", fontsize=16)\n plt.title(\"$G_{m}$\", fontsize=20)\n plt.show()\n","sub_path":"FinalProject/plottingScripts/transconductanceGain.py","file_name":"transconductanceGain.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"477304337","text":"import os\r\nimport os.path\r\nimport io\r\nimport exifread\r\n\r\nclass jImage:\r\n\r\n\tdef __init__(self, image_path=None):\r\n\t\tself.tag_serialized = dict()\r\n\t\tself.image_path = image_path\r\n\t\tself.tags = self._extract_data(self.image_path)\r\n\t\tif self.tags is not None:\r\n\t\t\tself.tag_serialized['EXIF']=self._extract_cat(\"EXIF\")\r\n\t\t\tself.tag_serialized['Image']=self._extract_cat(\"Image\")\r\n\t\t\tself.tag_serialized['Thumbnail']=self._extract_cat(\"Thumbnail\")\r\n\r\n\tdef _extract_data(self, image_path=None):\r\n\t\ttags = None\r\n\t\tif os.path.isfile( image_path ):\r\n\t\t\ttry:\r\n\t\t\t\timage_file = open(image_path,'rb')\r\n\t\t\t\ttags = exifread.process_file(image_file, details = False)\r\n\t\t\tfinally:\r\n\t\t\t\timage_file.close()\r\n\t\treturn tags\r\n\r\n\tdef _extract_cat(self, cat_name=None):\r\n\t\treturn {k.replace(cat_name+\" \", ''): v for k, v in self.tags.items() if k.startswith(cat_name)}\r\n\r\n\tdef _cleanup(self, string=None):\r\n\t\tstring = string.replace(' ', '_').lower()\r\n\t\tstring = string.replace(':', '-').lower()\r\n\t\treturn string\r\n\r\n\tdef get_tags_of(self, category=\"EXIF\"):\r\n\t\treturn self.tag_serialized[category]\r\n\r\n\tdef get_tag(self, tag=\"DateTimeOriginal\",category=\"EXIF\" ,cleanup=False):\r\n\t\tif cleanup is False:\r\n\t\t\treturn self.tag_serialized[category][tag]\r\n\t\telse:\r\n\t\t\treturn self._cleanup(str(self.tag_serialized[category][tag]))\r\n\r\n\tdef get_tags_serialized(self):\r\n\t\treturn self.tag_serialized\r\n\r\n\tdef get_categories(self):\r\n\t\tresult=[]\r\n\t\tfor cat in self.get_tags_serialized():\r\n\t\t\tresult.append(cat)\r\n\t\treturn result","sub_path":"lib/jtools/exif.py","file_name":"exif.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"188098757","text":"import copy\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\nclass LinfPGDAttack(object):\n def __init__(self, model=None, device=None, epsilon=0.05, k=10, a=0.01, feat = None):\n #k: iterations, a: step size\n self.model = model\n self.epsilon = epsilon\n self.k = k\n self.a = a\n self.loss_fn = nn.MSELoss().to(device)\n self.device = device\n\n # Feature-level attack? Which layer?\n self.feat = feat\n\n # PGD or I-FGSM?\n self.rand = True\n\n def perturb(self, X_nat, y, c_trg):\n if self.rand:\n X = X_nat.clone().detach_() + torch.tensor(np.random.uniform(-self.epsilon, self.epsilon, X_nat.shape).astype('float32')).to(self.device)\n else:\n X = X_nat.clone().detach_()\n\n\n for i in range(self.k):\n X.requires_grad = True\n X.retain_grad = True\n output, feats = self.model(X, c_trg)\n\n if self.feat:\n output = feats[self.feat]\n\n self.model.zero_grad()\n loss = self.loss_fn(output, y)\n loss.backward()\n grad = X.grad\n\n X_adv = X + self.a * grad.sign()\n\n eta = torch.clamp(X_adv - X_nat, min=-self.epsilon, max=self.epsilon)\n X = torch.clamp(X_nat + eta, min=-1, max=1).detach_()\n\n self.model.zero_grad()\n return X, X - X_nat\n\nclass FGSM(object):\n def __init__(self, model=None, device=None, feat=None, epsilon=0.05):\n self.model = model\n self.epsilon = epsilon\n self.device = device\n self.feat = feat\n self.loss_fn = nn.MSELoss().to(device)\n\n def perturb(self, X_nat, y, c_trg):\n X = X_nat.clone().detach()\n X.requires_grad = True\n X.retain_grad = True\n output, feats = self.model(X, c_trg)\n if self.feat:\n output = feats[self.feat]\n\n self.model.zero_grad()\n loss = self.loss_fn(output, y)\n loss.backward()\n grad = X.grad\n\n X_adv = X + self.epsilon*grad.sign()\n\n X = torch.clamp(X_adv, min=-1, max=1).detach()\n self.model.zero_grad()\n return X, X-X_nat\n\nclass iFGSM(object):\n def __init__(self, model=None, device=None, epsilon=0.05, k=10, a=0.01, feat = None):\n #k: iterations, a: step size\n self.model = model\n self.epsilon = epsilon\n self.k = k\n self.a = a\n self.loss_fn = nn.MSELoss().to(device)\n self.device = device\n\n # Feature-level attack? Which layer?\n self.feat = feat\n\n def perturb(self, X_nat, y, c_trg):\n X = X_nat.clone().detach_()\n\n for i in range(self.k):\n X.requires_grad = True\n X.retain_grad = True\n output, feats = self.model(X, c_trg)\n if self.feat:\n output = feats[self.feat]\n\n self.model.zero_grad()\n loss = self.loss_fn(output, y)\n loss.backward()\n grad = X.grad\n\n X_adv = X + self.epsilon*grad.sign()\n\n X = torch.clamp(X_adv, min=-1, max=1).detach()\n\n return X, X - X_nat\n\ndef clip_tensor(X, Y, Z):\n # Clip X with Y min and Z max\n X_np = X.data.cpu().numpy()\n Y_np = Y.data.cpu().numpy()\n Z_np = Z.data.cpu().numpy()\n X_clipped = np.clip(X_np, Y_np, Z_np)\n X_res = torch.FloatTensor(X_clipped)\n return X_res\n\ndef perturb_batch(X, y, c_trg, model, adversary):\n # Perturb batch function for adversarial training\n model_cp = copy.deepcopy(model)\n for p in model_cp.parameters():\n p.requires_grad = False\n model_cp.eval()\n \n adversary.model = model_cp\n\n X_adv, _ = adversary.perturb(X, y, c_trg)\n\n return X_adv","sub_path":"Stargan-attack/attacks.py","file_name":"attacks.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"179918557","text":"# -*- coding: utf-8 -*-\nfrom setuptools.command.install import install\n\n\nclass CustomInstall(install):\n \"\"\"Customized setuptools install command.\n\n Installs git submodules and packages that need to be installed first.\"\"\"\n files = {\n 'install_first': 'requirements/install_first.txt',\n 'submodules': 'requirements/submodules.txt'\n }\n\n @staticmethod\n def _read_requirements(filename):\n with open(filename, 'r') as f:\n return [l.strip() for l in f.readlines()]\n\n def run(self):\n import os\n import pip\n from distutils.sysconfig import get_python_lib\n\n self.requirements = {k: self._read_requirements(v)\n for k, v in self.files.items()}\n\n for package in self.requirements['install_first']:\n pip.main(['install', package])\n\n install.do_egg_install(self)\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n for submodule in self.requirements['submodules']:\n pth_path = os.path.join(get_python_lib(), submodule + \".pth\")\n with open(pth_path, 'w') as pth:\n pth.write(os.path.join(current_dir, submodule) + os.linesep)\n\n\ndef read_requirements(filename):\n from pip.req import parse_requirements\n from pip.download import PipSession\n install_reqs = parse_requirements(filename, session=PipSession())\n return [str(ir.req) for ir in install_reqs]\n","sub_path":"{{cookiecutter.project_slug}}/setup_utils.py","file_name":"setup_utils.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"363397670","text":"\"\"\"\nImplementation from:\nNASA Ames Robotics \"The Cheesy Poofs\"\nTeam 254\n\"\"\"\nimport itertools\n\nimport numpy as np\n\nfrom utils.geometry import Pose, PoseWithCurvature, boundRadians\n\nMIN_SAMPLE_SIZE = 1\n\n\ndef parameterizeSpline(\n spline,\n max_dx: float,\n max_dy: float,\n max_dtheta: float,\n t0: float = 0,\n t1: float = 1,\n) -> np.array:\n dt = (t1 - t0) / MIN_SAMPLE_SIZE\n ret = np.empty(0, dtype=PoseWithCurvature)\n t = 0\n while t < t1:\n next_time = t + dt\n ret = np.append(\n ret, getSegmentArc(spline, t, next_time, max_dx, max_dy, max_dtheta)\n )\n t = next_time\n return ret\n\n\ndef parameterizeSplines(\n splines: np.array, max_dx: float, max_dy: float, max_dtheta: float\n) -> np.array:\n ret = np.empty(1, dtype=PoseWithCurvature)\n ret[0] = splines[0].getPoseWithCurvature(0)\n for i, spline in enumerate(splines):\n samples = parameterizeSpline(spline, max_dx, max_dy, max_dtheta)\n ret = np.append(ret, samples)\n return ret\n\n\ndef getSegmentArc(\n spline: np.array,\n t0: float,\n t1: float,\n max_dx: float,\n max_dy: float,\n max_dtheta: float,\n) -> np.array:\n p0 = spline.getPoint(t0)\n p1 = spline.getPoint(t1)\n r0 = spline.getHeading(t0)\n r1 = spline.getHeading(t1)\n transform_point = (p1 - p0).rotateBy(-r0)\n transformation = Pose(transform_point.x, transform_point.y, boundRadians(r1 - r0))\n twist = transformation.getTwist()\n if twist.dx > max_dx or twist.dy > max_dy or twist.dtheta > max_dtheta:\n return np.concatenate(\n (\n getSegmentArc(spline, t0, (t0 + t1) / 2, max_dx, max_dy, max_dtheta),\n getSegmentArc(spline, (t0 + t1) / 2, t1, max_dx, max_dy, max_dtheta),\n )\n )\n else:\n return np.array([spline.getPoseWithCurvature(t1)])\n","sub_path":"src/trajectory/splinegenerator.py","file_name":"splinegenerator.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"28314033","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport requests\nimport lxml.html\nfrom mailsend import *\n\n\ndef TEB_readfromjson(mode=0):\n '''\n read information from json files \n mode=0: RELEASE \n mode=1: DEBUG\n '''\n if mode == 0:\n print('[INFO] try to Read TEB_config.json ...')\n try:\n with open(\"TEB_config.json\", \"r\") as f1:\n text = f1.read()\n TEB_jsonData = json.loads(text)\n except Exception:\n print(\n '[INFO] Read failed! Please check if the file [TEB_config.json] is existed...')\n exit()\n else:\n print('[INFO] __DEBUG MODE__')\n print('[INFO] try to Read TEB_config_test.json ...')\n try:\n with open(\"TEB_config_test.json\", \"r\") as f1:\n text = f1.read()\n TEB_jsonData = json.loads(text)\n except Exception:\n print(\n '[INFO] Read failed! Please check if the file [TEB_config_test.json] is existed...')\n exit()\n print('[INFO] Read succesful ...')\n\n return TEB_jsonData\n\n\ndef TEB_getFeeFromPage(url, headers, form_data):\n '''\n return (float)fee \n (slow method)\n '''\n # post 模板\n form_data_post = {\n \"__VIEWSTATE\": \"\",\n \"__VIEWSTATEGENERATOR\": \"\",\n \"drlouming\": \"\",\n \"drceng\": \"\",\n \"dr_ceng\": \"\",\n \"drfangjian\": \"\",\n \"radio\": \"usedR\",\n \"ImageButton1.x\": \"\",\n \"ImageButton1.y\": \"\"\n }\n # 创建 requests 会话\n with requests.Session() as TEBsess:\n r = TEBsess.get(url, headers=headers, timeout=5) # 第1次 post\n page = lxml.html.document_fromstring(r.text)\n VIEWSTATE = page.xpath(\"//input[@name='__VIEWSTATE']/@value\")\n VIEWSTATEGENERATOR = page.xpath(\n \"//input[@name='__VIEWSTATEGENERATOR']/@value\")\n form_data_post['__VIEWSTATE'] = VIEWSTATE\n form_data_post['__VIEWSTATEGENERATOR'] = VIEWSTATEGENERATOR\n form_data_post['drlouming'] = form_data['drlouming']\n\n r = TEBsess.post(url, headers=headers,\n data=form_data_post, timeout=5) # 第2次 post\n page = lxml.html.document_fromstring(r.text)\n VIEWSTATE = page.xpath(\"//input[@name='__VIEWSTATE']/@value\")\n VIEWSTATEGENERATOR = page.xpath(\n \"//input[@name='__VIEWSTATEGENERATOR']/@value\")\n form_data_post['__VIEWSTATE'] = VIEWSTATE\n form_data_post['__VIEWSTATEGENERATOR'] = VIEWSTATEGENERATOR\n form_data_post['drceng'] = form_data['drceng']\n\n r = TEBsess.post(url, headers=headers,\n data=form_data_post, timeout=5) # 第3次 post\n page = lxml.html.document_fromstring(r.text)\n VIEWSTATE = page.xpath(\"//input[@name='__VIEWSTATE']/@value\")\n VIEWSTATEGENERATOR = page.xpath(\n \"//input[@name='__VIEWSTATEGENERATOR']/@value\")\n form_data_post['__VIEWSTATE'] = VIEWSTATE\n form_data_post['__VIEWSTATEGENERATOR'] = VIEWSTATEGENERATOR\n form_data_post['dr_ceng'] = form_data['dr_ceng']\n\n r = TEBsess.post(url, headers=headers,\n data=form_data_post, timeout=5) # 第4次 post\n page = lxml.html.document_fromstring(r.text)\n VIEWSTATE = page.xpath(\"//input[@name='__VIEWSTATE']/@value\")\n VIEWSTATEGENERATOR = page.xpath(\n \"//input[@name='__VIEWSTATEGENERATOR']/@value\")\n form_data_post['__VIEWSTATE'] = VIEWSTATE\n form_data_post['__VIEWSTATEGENERATOR'] = VIEWSTATEGENERATOR\n form_data_post['drfangjian'] = form_data['drfangjian']\n form_data_post['radio'] = form_data['radio']\n form_data_post['ImageButton1.x'] = form_data['ImageButton1.x']\n form_data_post['ImageButton1.y'] = form_data['ImageButton1.y']\n\n r = TEBsess.post(url, headers=headers,\n data=form_data_post, timeout=5) # 第5次 post\n page = lxml.html.document_fromstring(r.text)\n # xpath 方法筛选\n fee = float(page.xpath(\"//span[@class='number orange']/text()\")[0])\n\n return fee\n\n\ndef TEB_getInfo(TEB_jsonData):\n '''\n return: \n >>> `TEB_Info`\n {'1_8_307': {\n 'mailclient': {\n '******@qq.com': 0,\n '******@qq.com': 1\n },\n 'Fee': 3.39\n }\n }\n >>> `admin_MailInfo`\n mailserver_username = TEB_jsonData['mailserver_username'] # 发送提醒邮件的邮箱\n mailserver_password = TEB_jsonData['mailserver_password'] # 发送提醒邮件邮箱的密码\n mailserver_smtp = TEB_jsonData['mailserver_smtp'] # 发送邮箱的SMTP服务器地址\n admin_mail = TEB_jsonData['admin_mailaddr'] # 管理员邮箱\n\n >>> `admin_MailMsg`\n [嘉定校区 8 号楼 321 房间 电费还剩 50.2, 嘉定校区 9 号楼 322 房间 电费还剩 50.2]\n '''\n TEB_Info = {}\n admin_MailMsg = []\n url = TEB_jsonData['target_url']\n headers = TEB_jsonData['headers']\n\n subkey = ['mailserver_username', 'mailserver_password',\n 'mailserver_smtp', 'admin_mailaddr']\n admin_MailInfo = {key: TEB_jsonData[key] for key in subkey}\n\n for room in TEB_jsonData['clientroom']:\n alarm_threshold = TEB_jsonData['clientroom'][room]['alarm_threshold']\n form_data = TEB_jsonData['clientroom'][room]['form_data']\n mailclient = TEB_jsonData['clientroom'][room]['client_mailaddr']\n\n fee = TEB_getFeeFromPage(url, headers, form_data)\n\n TEB_Info[room] = {'Fee': fee, 'mailclient': mailclient,\n 'alarm_threshold': alarm_threshold}\n roominfo = room.split('_')\n\n msg = \" 嘉定\" + \" 校区 \" + \\\n roominfo[1] + \" 号楼 \" + roominfo[2] + \" 房间 \" + \" 电费还剩:\" + str(fee)\n admin_MailMsg.append(msg)\n\n return TEB_Info, admin_MailInfo, admin_MailMsg\n\n\ndef main():\n '''\n 主函数\n '''\n TEB_jsonData = TEB_readfromjson(mode=0) # 从json文件中读取条目\n TEB_Info, admin_MailInfo, admin_MailMsg = TEB_getInfo(TEB_jsonData) # 重新组合数据\n\n # print(TEB_Info)\n # print(admin_MailInfo)\n # print(admin_MailMsg)\n\n # regular_info, lowfee_info = TEB_Info_to_mailsend_Info(TEB_Info)\n # print(regular_info)\n # print(lowfee_info)\n\n mailSend(TEB_Info, admin_MailInfo)\n\n # adminMailSend(admin_MailInfo, admin_MailMsg)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fee_get.py","file_name":"fee_get.py","file_ext":"py","file_size_in_byte":6357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"151223344","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 29 14:37:47 2017\r\n\r\n@author: ebay\r\n\"\"\"\r\n\r\nimport csv\r\nimport pandas as pd\r\n\r\nlonglist = []\r\n\r\nwith open('fedex_charges.csv', 'r') as csvfile:\r\n reader = csv.reader(csvfile)\r\n for lines in reader:\r\n for i in range(1,len(lines),2):\r\n shortlist = []\r\n if lines[i] != '':\r\n shortlist.append(lines[0])\r\n shortlist.append(lines[i])\r\n shortlist.append(lines[i+1])\r\n longlist.append(shortlist)\r\n \r\n \r\ndf = pd.DataFrame(longlist,columns=['id','chargefor','price'])\r\n\r\ndfwide = pd.pivot_table(df, index='id', columns=['chargefor'], aggfunc=max)\r\n\r\ndfwide.fillna(0, inplace=True)\r\n\r\ndfwide.to_csv('charge_wide.csv',encoding='utf-8')","sub_path":"charges_test.py","file_name":"charges_test.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"223179458","text":"from __future__ import division\nfrom __future__ import print_function\n\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\")\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport flask\nimport json\nimport logging\nimport os\nimport re\nimport sys\nfrom django import forms\n\n# Appengine imports.\nfrom framework import ramcache\n# TODO(jrobbins): phase out gae_users\nfrom google.appengine.api import users as gae_users\nfrom framework import users\n\nfrom framework import basehandlers\nfrom framework import permissions\nfrom framework import utils\nfrom pages import guideforms\nfrom internals import models\nfrom internals import processes\nimport settings\n\n\n# Forms to be used for each stage of each process.\n# { feature_type_id: { stage_id: stage_specific_form} }\nSTAGE_FORMS = {\n models.FEATURE_TYPE_INCUBATE_ID: {\n models.INTENT_INCUBATE: guideforms.NewFeature_Incubate,\n models.INTENT_IMPLEMENT: guideforms.NewFeature_Prototype,\n models.INTENT_EXPERIMENT: guideforms.Any_DevTrial,\n models.INTENT_IMPLEMENT_SHIP: guideforms.NewFeature_EvalReadinessToShip,\n models.INTENT_EXTEND_TRIAL: guideforms.NewFeature_OriginTrial,\n models.INTENT_SHIP: guideforms.Most_PrepareToShip,\n models.INTENT_SHIPPED: guideforms.Any_Ship,\n },\n\n models.FEATURE_TYPE_EXISTING_ID: {\n models.INTENT_IMPLEMENT: guideforms.Existing_Prototype,\n models.INTENT_EXPERIMENT: guideforms.Any_DevTrial,\n models.INTENT_EXTEND_TRIAL: guideforms.Existing_OriginTrial,\n models.INTENT_SHIP: guideforms.Most_PrepareToShip,\n models.INTENT_SHIPPED: guideforms.Any_Ship,\n },\n\n models.FEATURE_TYPE_CODE_CHANGE_ID: {\n models.INTENT_IMPLEMENT: guideforms.PSA_Implement,\n models.INTENT_EXPERIMENT: guideforms.Any_DevTrial,\n models.INTENT_SHIP: guideforms.PSA_PrepareToShip,\n models.INTENT_SHIPPED: guideforms.Any_Ship,\n },\n\n models.FEATURE_TYPE_DEPRECATION_ID: {\n models.INTENT_IMPLEMENT: guideforms.Deprecation_Implement,\n models.INTENT_EXPERIMENT: guideforms.Any_DevTrial,\n models.INTENT_EXTEND_TRIAL: guideforms.Deprecation_DeprecationTrial,\n models.INTENT_SHIP: guideforms.Deprecation_PrepareToShip,\n models.INTENT_REMOVED: guideforms.Deprecation_Removed,\n },\n}\n\n\nIMPL_STATUS_FORMS = {\n models.INTENT_EXPERIMENT:\n (models.BEHIND_A_FLAG, guideforms.ImplStatus_DevTrial),\n models.INTENT_EXTEND_TRIAL:\n (models.ORIGIN_TRIAL, guideforms.ImplStatus_OriginTrial),\n models.INTENT_IMPLEMENT_SHIP:\n (None, guideforms.ImplStatus_EvalReadinessToShip),\n models.INTENT_SHIP:\n (models.ENABLED_BY_DEFAULT, guideforms.ImplStatus_AllMilestones),\n models.INTENT_SHIPPED:\n (models.ENABLED_BY_DEFAULT, guideforms.ImplStatus_AllMilestones),\n models.INTENT_REMOVED:\n (models.REMOVED, guideforms.ImplStatus_AllMilestones),\n }\n\n# Forms to be used on the \"Edit all\" page that shows a flat list of fields.\n# [('Section name': form_class)].\nFLAT_FORMS = [\n ('Feature metadata', guideforms.Flat_Metadata),\n ('Indentify the need', guideforms.Flat_Identify),\n ('Prototype a solution', guideforms.Flat_Implement),\n ('Dev trial', guideforms.Flat_DevTrial),\n ('Origin trial', guideforms.Flat_OriginTrial),\n ('Prepare to ship', guideforms.Flat_PrepareToShip),\n ('Ship', guideforms.Flat_Ship),\n]\n\n\nclass FeatureNew(basehandlers.FlaskHandler):\n\n TEMPLATE_PATH = 'guide/new.html'\n\n @permissions.require_edit_feature\n def get_template_data(self):\n user = self.get_current_user()\n\n new_feature_form = guideforms.NewFeatureForm(\n initial={'owner': user.email()})\n template_data = {\n 'overview_form': new_feature_form,\n }\n return template_data\n\n @permissions.require_edit_feature\n def process_post_data(self):\n owners = self.split_emails('owner')\n\n blink_components = (\n self.split_input('blink_components', delim=',') or\n [models.BlinkComponent.DEFAULT_COMPONENT])\n\n # TODO(jrobbins): Validate input, even though it is done on client.\n\n feature_type = int(self.form.get('feature_type', 0))\n gae_user = gae_users.User(email=self.get_current_user().email())\n feature = models.Feature(\n category=int(self.form.get('category')),\n name=self.form.get('name'),\n feature_type=feature_type,\n intent_stage=models.INTENT_NONE,\n summary=self.form.get('summary'),\n owner=owners,\n impl_status_chrome=models.NO_ACTIVE_DEV,\n standardization=models.EDITORS_DRAFT,\n unlisted=self.form.get('unlisted') == 'on',\n web_dev_views=models.DEV_NO_SIGNALS,\n blink_components=blink_components,\n tag_review_status=processes.initial_tag_review_status(feature_type),\n created_by=gae_user,\n updated_by=gae_user)\n key = feature.put()\n\n # TODO(jrobbins): enumerate and remove only the relevant keys.\n ramcache.flush_all()\n\n redirect_url = '/guide/edit/' + str(key.integer_id())\n return self.redirect(redirect_url)\n\n\nclass ProcessOverview(basehandlers.FlaskHandler):\n\n TEMPLATE_PATH = 'guide/edit.html'\n\n def detect_progress(self, f):\n progress_so_far = {}\n for progress_item, detector in processes.PROGRESS_DETECTORS.items():\n detected = detector(f)\n if detected:\n progress_so_far[progress_item] = str(detected)\n return progress_so_far\n\n @permissions.require_edit_feature\n def get_template_data(self, feature_id):\n f = models.Feature.get_by_id(long(feature_id))\n if f is None:\n self.abort(404, msg='Feature not found')\n\n feature_process = processes.ALL_PROCESSES.get(\n f.feature_type, processes.BLINK_LAUNCH_PROCESS)\n template_data = {\n 'overview_form': guideforms.MetadataForm(f.format_for_edit()),\n 'process_json': json.dumps(processes.process_to_dict(feature_process)),\n }\n\n progress_so_far = self.detect_progress(f)\n\n # Provide new or populated form to template.\n template_data.update({\n 'feature': f.format_for_template(),\n 'feature_id': f.key.integer_id(),\n 'feature_json': json.dumps(f.format_for_template()),\n 'progress_so_far': json.dumps(progress_so_far),\n })\n return template_data\n\n\nclass FeatureEditStage(basehandlers.FlaskHandler):\n\n TEMPLATE_PATH = 'guide/stage.html'\n\n def touched(self, param_name):\n \"\"\"Return True if the user edited the specified field.\"\"\"\n # TODO(jrobbins): for now we just consider everything on the current form\n # to have been touched. Later we will add javascript to populate a\n # hidden form field named \"touched\" that lists the names of all fields\n # actually touched by the user.\n\n # For now, checkboxes are always considered \"touched\", if they are\n # present on the form.\n # TODO(jrobbins): Simplify this after next deployment.\n checkboxes = ('unlisted', 'all_platforms', 'wpt', 'prefixed', 'api_spec')\n if param_name in checkboxes:\n form_fields_str = self.form.get('form_fields')\n if form_fields_str:\n form_fields = [field_name.strip()\n for field_name in form_fields_str.split(',')]\n return param_name in form_fields\n else:\n return True\n return param_name in self.form\n\n def get_blink_component_from_bug(self, blink_components, bug_url):\n # TODO(jrobbins): Use monorail API instead of scrapping.\n return []\n\n def get_feature_and_process(self, feature_id):\n \"\"\"Look up the feature that the user wants to edit, and its process.\"\"\"\n f = models.Feature.get_by_id(feature_id)\n if f is None:\n self.abort(404, msg='Feature not found')\n\n feature_process = processes.ALL_PROCESSES.get(\n f.feature_type, processes.BLINK_LAUNCH_PROCESS)\n\n return f, feature_process\n\n @permissions.require_edit_feature\n def get_template_data(self, feature_id, stage_id):\n f, feature_process = self.get_feature_and_process(feature_id)\n\n stage_name = ''\n for stage in feature_process.stages:\n if stage.outgoing_stage == stage_id:\n stage_name = stage.name\n\n template_data = {\n 'stage_name': stage_name,\n 'stage_id': stage_id,\n }\n\n # TODO(jrobbins): show useful error if stage not found.\n detail_form_class = STAGE_FORMS[f.feature_type][stage_id]\n\n impl_status_offered, impl_status_form_class = IMPL_STATUS_FORMS.get(\n stage_id, (None, None))\n\n feature_edit_dict = f.format_for_edit()\n detail_form = None\n if detail_form_class:\n detail_form = detail_form_class(feature_edit_dict)\n impl_status_form = None\n if impl_status_form_class:\n impl_status_form = impl_status_form_class(feature_edit_dict)\n\n # Provide new or populated form to template.\n template_data.update({\n 'feature': f,\n 'feature_id': f.key.integer_id(),\n 'feature_form': detail_form,\n 'already_on_this_stage': stage_id == f.intent_stage,\n 'already_on_this_impl_status':\n impl_status_offered == f.impl_status_chrome,\n 'impl_status_form': impl_status_form,\n 'impl_status_name': models.IMPLEMENTATION_STATUS.get(\n impl_status_offered, None),\n 'impl_status_offered': impl_status_offered,\n })\n return template_data\n\n @permissions.require_edit_feature\n def process_post_data(self, feature_id, stage_id=0):\n if feature_id:\n feature = models.Feature.get_by_id(feature_id)\n if feature is None:\n self.abort(404, msg='Feature not found')\n\n logging.info('POST is %r', self.form)\n\n if self.touched('spec_link'):\n feature.spec_link = self.parse_link('spec_link')\n\n if self.touched('api_spec'):\n feature.api_spec = self.form.get('api_spec') == 'on'\n\n if self.touched('spec_mentors'):\n feature.spec_mentors = self.split_emails('spec_mentors')\n\n if self.touched('security_review_status'):\n feature.security_review_status = self.parse_int('security_review_status')\n\n if self.touched('privacy_review_status'):\n feature.privacy_review_status = self.parse_int('privacy_review_status')\n\n if self.touched('initial_public_proposal_url'):\n feature.initial_public_proposal_url = self.parse_link(\n 'initial_public_proposal_url')\n\n if self.touched('explainer_links'):\n feature.explainer_links = self.split_input('explainer_links')\n\n if self.touched('bug_url'):\n feature.bug_url = self.parse_link('bug_url')\n if self.touched('launch_bug_url'):\n feature.launch_bug_url = self.parse_link('launch_bug_url')\n\n if self.touched('intent_to_implement_url'):\n feature.intent_to_implement_url = self.parse_link(\n 'intent_to_implement_url')\n\n if self.touched('intent_to_ship_url'):\n feature.intent_to_ship_url = self.parse_link(\n 'intent_to_ship_url')\n\n if self.touched('ready_for_trial_url'):\n feature.ready_for_trial_url = self.parse_link(\n 'ready_for_trial_url')\n\n if self.touched('intent_to_experiment_url'):\n feature.intent_to_experiment_url = self.parse_link(\n 'intent_to_experiment_url')\n\n if self.touched('origin_trial_feedback_url'):\n feature.origin_trial_feedback_url = self.parse_link(\n 'origin_trial_feedback_url')\n\n if self.touched('finch_url'):\n feature.finch_url = self.parse_link('finch_url')\n\n if self.touched('i2e_lgtms'):\n feature.i2e_lgtms = self.split_emails('i2e_lgtms')\n\n if self.touched('i2s_lgtms'):\n feature.i2s_lgtms = self.split_emails('i2s_lgtms')\n\n # Cast incoming milestones to ints.\n # TODO(jrobbins): Consider supporting milestones that are not ints.\n if self.touched('shipped_milestone'):\n feature.shipped_milestone = self.parse_int('shipped_milestone')\n\n if self.touched('shipped_android_milestone'):\n feature.shipped_android_milestone = self.parse_int(\n 'shipped_android_milestone')\n\n if self.touched('shipped_ios_milestone'):\n feature.shipped_ios_milestone = self.parse_int('shipped_ios_milestone')\n\n if self.touched('shipped_webview_milestone'):\n feature.shipped_webview_milestone = self.parse_int(\n 'shipped_webview_milestone')\n\n if self.touched('shipped_opera_milestone'):\n feature.shipped_opera_milestone = self.parse_int('shipped_opera_milestone')\n\n if self.touched('shipped_opera_android'):\n feature.shipped_opera_android_milestone = self.parse_int(\n 'shipped_opera_android_milestone')\n\n if self.touched('ot_milestone_desktop_start'):\n feature.ot_milestone_desktop_start = self.parse_int(\n 'ot_milestone_desktop_start')\n if self.touched('ot_milestone_desktop_end'):\n feature.ot_milestone_desktop_end = self.parse_int(\n 'ot_milestone_desktop_end')\n\n if self.touched('ot_milestone_android_start'):\n feature.ot_milestone_android_start = self.parse_int(\n 'ot_milestone_android_start')\n if self.touched('ot_milestone_android_end'):\n feature.ot_milestone_android_end = self.parse_int(\n 'ot_milestone_android_end')\n\n if self.touched('devtrial_instructions'):\n feature.devtrial_instructions = self.parse_link('devtrial_instructions')\n\n if self.touched('flag_name'):\n feature.flag_name = self.form.get('flag_name')\n\n if self.touched('owner'):\n feature.owner = self.split_emails('owner')\n\n if self.touched('doc_links'):\n feature.doc_links = self.split_input('doc_links')\n\n if self.touched('measurement'):\n feature.measurement = self.form.get('measurement')\n\n if self.touched('sample_links'):\n feature.sample_links = self.split_input('sample_links')\n\n if self.touched('search_tags'):\n feature.search_tags = self.split_input('search_tags', delim=',')\n\n if self.touched('blink_components'):\n feature.blink_components = (\n self.split_input('blink_components', delim=',') or\n [models.BlinkComponent.DEFAULT_COMPONENT])\n\n if self.touched('devrel'):\n feature.devrel = self.split_emails('devrel')\n\n if self.touched('feature_type'):\n feature.feature_type = int(self.form.get('feature_type'))\n\n # intent_stage can be be set either by or a checkbox\n if self.touched('intent_stage'):\n feature.intent_stage = int(self.form.get('intent_stage'))\n elif self.form.get('set_stage') == 'on':\n feature.intent_stage = stage_id\n\n if self.touched('category'):\n feature.category = int(self.form.get('category'))\n if self.touched('name'):\n feature.name = self.form.get('name')\n if self.touched('summary'):\n feature.summary = self.form.get('summary')\n if self.touched('motivation'):\n feature.motivation = self.form.get('motivation')\n\n # impl_status_chrome can be be set either by or a checkbox\n if self.touched('impl_status_chrome'):\n feature.impl_status_chrome = int(self.form.get('impl_status_chrome'))\n elif self.form.get('set_impl_status') == 'on':\n feature.impl_status_chrome = self.parse_int('impl_status_offered')\n\n if self.touched('interop_compat_risks'):\n feature.interop_compat_risks = self.form.get('interop_compat_risks')\n if self.touched('ergonomics_risks'):\n feature.ergonomics_risks = self.form.get('ergonomics_risks')\n if self.touched('activation_risks'):\n feature.activation_risks = self.form.get('activation_risks')\n if self.touched('security_risks'):\n feature.security_risks = self.form.get('security_risks')\n if self.touched('debuggability'):\n feature.debuggability = self.form.get('debuggability')\n if self.touched('all_platforms'):\n feature.all_platforms = self.form.get('all_platforms') == 'on'\n if self.touched('all_platforms_descr'):\n feature.all_platforms_descr = self.form.get('all_platforms_descr')\n if self.touched('wpt'):\n feature.wpt = self.form.get('wpt') == 'on'\n if self.touched('wpt_descr'):\n feature.wpt_descr = self.form.get('wpt_descr')\n if self.touched('ff_views'):\n feature.ff_views = int(self.form.get('ff_views'))\n if self.touched('ff_views_link'):\n feature.ff_views_link = self.parse_link('ff_views_link')\n if self.touched('ff_views_notes'):\n feature.ff_views_notes = self.form.get('ff_views_notes')\n if self.touched('ie_views'):\n feature.ie_views = int(self.form.get('ie_views'))\n if self.touched('ie_views_link'):\n feature.ie_views_link = self.parse_link('ie_views_link')\n if self.touched('ie_views_notes'):\n feature.ie_views_notes = self.form.get('ie_views_notes')\n if self.touched('safari_views'):\n feature.safari_views = int(self.form.get('safari_views'))\n if self.touched('safari_views_link'):\n feature.safari_views_link = self.parse_link('safari_views_link')\n if self.touched('safari_views_notes'):\n feature.safari_views_notes = self.form.get('safari_views_notes')\n if self.touched('web_dev_views'):\n feature.web_dev_views = int(self.form.get('web_dev_views'))\n if self.touched('web_dev_views'):\n feature.web_dev_views_link = self.parse_link('web_dev_views_link')\n if self.touched('web_dev_views_notes'):\n feature.web_dev_views_notes = self.form.get('web_dev_views_notes')\n if self.touched('prefixed'):\n feature.prefixed = self.form.get('prefixed') == 'on'\n\n if self.touched('tag_review'):\n feature.tag_review = self.form.get('tag_review')\n if self.touched('tag_review_status'):\n feature.tag_review_status = self.parse_int('tag_review_status')\n\n if self.touched('standardization'):\n feature.standardization = int(self.form.get('standardization'))\n if self.touched('unlisted'):\n feature.unlisted = self.form.get('unlisted') == 'on'\n if self.touched('comments'):\n feature.comments = self.form.get('comments')\n if self.touched('experiment_goals'):\n feature.experiment_goals = self.form.get('experiment_goals')\n if self.touched('experiment_timeline'):\n feature.experiment_timeline = self.form.get('experiment_timeline')\n if self.touched('experiment_risks'):\n feature.experiment_risks = self.form.get('experiment_risks')\n if self.touched('experiment_extension_reason'):\n feature.experiment_extension_reason = self.form.get(\n 'experiment_extension_reason')\n if self.touched('ongoing_constraints'):\n feature.ongoing_constraints = self.form.get('ongoing_constraints')\n\n feature.updated_by = gae_users.User(email=self.get_current_user().email())\n key = feature.put()\n\n # TODO(jrobbins): enumerate and remove only the relevant keys.\n ramcache.flush_all()\n\n redirect_url = '/guide/edit/' + str(key.integer_id())\n return self.redirect(redirect_url)\n\n\nclass FeatureEditAllFields(FeatureEditStage):\n \"\"\"Flat form page that lists all fields in seprate sections.\"\"\"\n\n TEMPLATE_PATH = 'guide/editall.html'\n\n @permissions.require_edit_feature\n def get_template_data(self, feature_id):\n f, feature_process = self.get_feature_and_process(feature_id)\n\n feature_edit_dict = f.format_for_edit()\n # TODO(jrobbins): make flat forms process specific?\n flat_form_section_list = FLAT_FORMS\n flat_forms = [\n (section_name, form_class(feature_edit_dict))\n for section_name, form_class in flat_form_section_list]\n template_data = {\n 'feature': f,\n 'feature_id': f.key.integer_id(),\n 'flat_forms': flat_forms,\n }\n return template_data\n\n\napp = basehandlers.FlaskApplication([\n ('/guide/new', FeatureNew),\n ('/guide/edit/', ProcessOverview),\n ('/guide/stage//', FeatureEditStage),\n ('/guide/editall/', FeatureEditAllFields),\n], debug=settings.DEBUG)\n","sub_path":"pages/guide.py","file_name":"guide.py","file_ext":"py","file_size_in_byte":20132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"443743358","text":"\nimport random\nimport numpy as np\n\nsize = 13800 # 1820, 2150, 13800\nfile_size = 46 # 8, 8, 46\ndata_size = 32 # 256 / 128\n\n\nlink = ['../dataset/twopiece/raw/gray_%d_%02d.data', '../dataset/twopiece/raw/mono_%d_%02d.data', '../dataset/twopiece/raw/thre_%d_%02d.data']\noutp = ['../dataset/twopiece/gray_%s_%d_%02d.data' , '../dataset/twopiece/mono_%s_%d_%02d.data' , '../dataset/twopiece/thre_%s_%d_%02d.data' ]\n\ndivisions, train = 5, 3 # 5, 40 | 3, 24\ndata_batch_size, val = 300, 4 # 230, 276, 300 | 4, 32\nreal_size = int(np.ceil(size / divisions))\nreal_real_size = real_size * 1 # 1, 8\n\nthe_list = np.arange(size)\nnp.random.shuffle(the_list)\n\n\ncolor = np.zeros((real_size, data_size, data_size, 4), dtype=np.uint8)\nlabel = np.zeros((2, real_size), dtype=np.int32)\n\n\nfor the_pos in range(len(link)):\n\n\tmatrices = []\n\tfor i in range(1, file_size + 1):\n\t\twith open(link[the_pos] % (data_size, i), 'rb') as f:\n\t\t\ta = np.load(f, encoding='bytes').item()\n\t\tmatrices.append(a)\n\t\n\tcount, bcount = 0, 1\n\tfor i in the_list[:real_size * train]:\n\t\tbatch_pos = i // data_batch_size\n\t\tdatas_pos = i % data_batch_size\n\t\ta, coun = matrices[batch_pos], count % real_size\n\t\tcolor[coun, :, :, :] = a[b'data'][datas_pos, :, :, :]\n\t\tlabel[:, coun] = a[b'chapter'][:, datas_pos]\n\t\tcount += 1\n\t\tif count % real_size == 0:\n\t\t\tprint(str(bcount))\n\t\t\td = {b'data': color, b'chapter': label}\n\t\t\twith open(outp[the_pos] % ('train', data_size, bcount), 'wb') as f:\n\t\t\t\tnp.save(f, d)\n\t\t\tbcount += 1\n\tprint(str(count))\n\n\tcolos = np.zeros((real_real_size, data_size, data_size, 4), dtype=np.uint8)\n\tlabes = np.zeros((2, real_real_size), dtype=np.int32)\n\n\tcount = 0\n\tfor i in the_list[real_size * train : real_size * val]:\n\t\tbatch_pos = i // data_batch_size\n\t\tdatas_pos = i % data_batch_size\n\t\ta, coun = matrices[batch_pos], count % real_real_size\n\t\tcolos[coun, :, :, :] = a[b'data'][datas_pos, :, :, :]\n\t\tlabes[:, coun] = a[b'chapter'][:, datas_pos]\n\t\tcount += 1\n\t\tif count % real_real_size == 0:\n\t\t\td = {b'data': colos, b'chapter': labes}\n\t\t\twith open(outp[the_pos] % ('val', data_size, 1), 'wb') as f:\n\t\t\t\tnp.save(f, d)\n\t\t\tbcount += 1\n\tprint(str(count))\n\n\tcount = 0\n\tfor i in the_list[real_size * val :]:\n\t\tbatch_pos = i // data_batch_size\n\t\tdatas_pos = i % data_batch_size\n\t\ta, coun = matrices[batch_pos], count % real_real_size\n\t\tcolos[coun, :, :, :] = a[b'data'][datas_pos, :, :, :]\n\t\tlabes[:, coun] = a[b'chapter'][:, datas_pos]\n\t\tcount += 1\n\t\tif (count+1) % real_real_size == 0: #\n\t\t\td = {b'data': colos, b'chapter': labes}\n\t\t\twith open(outp[the_pos] % ('test', data_size, 1), 'wb') as f:\n\t\t\t\tnp.save(f, d)\n\t\t\tbcount += 1\n\tprint(str(count) + '\\n')","sub_path":"valtest.py","file_name":"valtest.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"329595674","text":"#!/usr/bin/env python3\n# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab\n\"\"\" commands for dev oppo\n\nMost commands send a string (fixed for reading, attached data for writing)\nwhile parsing the response works by extracting the needed string part by\nregex. Some commands translate the device data into readable values via\nlookups.\n\"\"\"\n\nmodels = {\n 'ALL': ['info', 'general', 'control', 'menu'],\n 'UDP-203': []\n}\n\ncommands = {\n 'info': {\n 'time': {\n 'totalelapsed': {'read': True, 'write': False, 'read_cmd': '#QEL', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@QEL OK (.*)'},\n 'totalremaining': {'read': True, 'write': False, 'read_cmd': '#QRE', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@QRE OK (.*)'},\n 'chapterelapsed': {'read': True, 'write': False, 'read_cmd': '#QCE', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@QCE OK (.*)'},\n 'chapterremaining': {'read': True, 'write': False, 'read_cmd': '#QCR', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@QCE OK (.*)'},\n 'titleelapsed': {'read': True, 'write': False, 'read_cmd': '#QTE', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@QTE OK (.*)'},\n 'titleremaining': {'read': True, 'write': False, 'read_cmd': '#QTR', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@QTR OK (.*)'}\n },\n 'audio': {\n 'current': {'read': True, 'write': False, 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': [r'@QAT OK (?:[A-Z]*) (\\d{1,2})/(?:\\d{1,2}) (?:[A-Za-z]*)', r'@UAT (?:[A-Z]{2}) (\\d{2})/(?:\\d{2}) (?:[A-Z]{3}) (?:[0-7.]*)']},\n 'available': {'read': True, 'write': False, 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': [r'@QAT OK (?:[A-Z]*) (?:\\d{1,2})/(\\d{1,2}) (?:[A-Za-z]*)', r'@UAT (?:[A-Z]{2}) (?:\\d{2})/(\\d{2}) (?:[A-Z]{3}) (?:[0-7.]*)']},\n 'language': {'read': True, 'write': False, 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': [r'@QAT OK (?:[A-Z]*) (?:\\d{1,2})/(?:\\d{1,2}) ([A-Za-z]*)', r'@UAT (?:[A-Z]{2}) (?:\\d{2})/(?:\\d{2}) ([A-Z]{3}) (?:[0-7.]*)']},\n },\n 'subtitle': {\n 'current': {'read': True, 'write': False, 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': r'@UST (\\d{2})/(?:\\d{2}) ([A-Z]{3})'},\n 'available': {'read': True, 'write': False, 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': r'@UST (?:\\d{2})/(\\d{2}) (?:[A-Z]{3})'},\n 'language': {'read': True, 'write': False, 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': r'@UST (?:\\d{2})/(?:\\d{2}) ([A-Z]{3})'},\n },\n 'firmware': {'read': True, 'write': False, 'read_cmd': '#QVR', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@QVR OK (.*)', 'item_attrs': {'initial': True}},\n 'status': {'read': True, 'write': False, 'read_cmd': '#QPL', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': ['@QPL OK {LOOKUP}$', '@UPL {LOOKUP}$', '@OK {LOOKUP}$'], 'item_attrs': {'initial': True}, 'lookup': 'STATUS'},\n 'disctype': {'read': True, 'write': False, 'read_cmd': '#QDT', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': ['@QDT (?:OK\\s)?{LOOKUP}$', '@UDT {LOOKUP}$', '@OK {LOOKUP}$'], 'item_attrs': {'initial': True}, 'lookup': 'DISCTYPE'},\n 'totaltracks': {'read': True, 'write': False, 'read_cmd': '#QTK', 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': [r'@QTK OK (?:\\d{2})/(\\d{2})', r'@OK (?:\\d{2})/(\\d{2})', r'@UAT (?:[A-Z]{2}) (?:\\d{2})/(\\d{2}) (?:[A-Z]{3}) (?:[0-7.]*)'], 'item_attrs': {'initial': True}},\n 'displaytype': {'read': True, 'write': True, 'write_cmd': '#STC {RAW_VALUE_UPPER}', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@STC OK (.*)'},\n 'audiotype': {'read': True, 'write': False, 'read_cmd': '#QAT', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': ['@QAT OK {LOOKUP}', r'@UAT {LOOKUP} (?:\\d{2})/(?:\\d{2}) (?:[A-Z]{3}) (?:[0-7.]*)'], 'item_attrs': {'initial': True}, 'lookup': 'AUDIOTYPE'},\n 'channels': {'read': True, 'write': False, 'read_cmd': '#QAT', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': r'@UAT (?:[A-Z]{2}) (?:\\d{2})/(?:\\d{2}) (?:[A-Z]{3}) ([0-7.]{3})'},\n 'trackinfo': {'read': True, 'write': False, 'read_cmd': '#QTK', 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': '@UTC (.*)'},\n 'inputresolution': {'read': True, 'write': False, 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@UVO (?:_?)([a-zA-Z0-9]*)(?:_?)(?:\\s_?)(?:[a-zA-Z0-9]*)(?:_?)'},\n 'outputresolution': {'read': True, 'write': False, 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@UVO (?:_?)(?:[a-zA-Z0-9]*)(?:_?)(?:\\s_?)([a-zA-Z0-9]*)(?:_?)'},\n 'aspectratio': {'read': True, 'write': False, 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@UAR {LOOKUP}$', 'lookup': 'ASPECT'},\n 'U3D': {'read': True, 'write': False, 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': '@U3D (2D|3D)'},\n },\n 'general': {\n 'verbose': {'read': True, 'write': True, 'read_cmd': '#QVM', 'write_cmd': '#SVM {VALUE}', 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': ['@QVM OK (0|1|2|3)', '@SVM OK (0|1|2|3)'], 'cmd_settings': {'force_min': 1, 'force_max': 3}, 'item_attrs': {'attributes': {'cache': True, 'initial_value': 2}}},\n 'hdmiresolution': {'read': True, 'write': True, 'read_cmd': '#QHD', 'write_cmd': '#SHD {RAW_VALUE_UPPER}', 'item_type': 'str', 'dev_datatype': 'raw', 'reply_pattern': ['@SHD OK (.*)', '@QHD OK (.*)'], 'cmd_settings': {'valid_list': ['480I', '480P', '576I', '576P', '720P50', '720P60', '1080I50', '1080I60', '1080P24', '1080P50', '1080P60', '1080PAUTO', 'UHD24', 'UHD50', 'UHD60', 'UHD_AUTO', 'AUTO', 'Source Direct']}, 'item_attrs': {'initial': True}},\n },\n 'control': {\n 'power': {'read': True, 'write': True, 'read_cmd': '#QPW', 'write_cmd': '#P{VALUE}', 'item_type': 'bool', 'dev_datatype': 'onoff', 'reply_pattern': ['@POFF OK (OFF)', '@PON OK (ON)', '@QPW OK (ON|OFF)', '@UPW (0|1)'], 'item_attrs': {'initial': True}},\n 'pureaudio': {'read': True, 'write': True, 'write_cmd': '#PUR', 'item_type': 'bool', 'dev_datatype': 'onoff', 'reply_pattern': '@PUR OK (ON|OFF)', 'item_attrs': {'initial': True}},\n 'playpause': {'read': True, 'write': True, 'read_cmd': '#QPL', 'write_cmd': '{VALUE}', 'item_type': 'bool', 'dev_datatype': 'playpause', 'reply_pattern': ['@PLA OK {LOOKUP}$', '@PAU OK {LOOKUP}$'], 'lookup': 'PLAY'},\n 'stop': {'read': True, 'write': True, 'read_cmd': '#QPL', 'write_cmd': '#STP', 'item_type': 'bool', 'dev_datatype': 'raw', 'reply_pattern': ['@STP OK (?:(FULL\\s)?){LOOKUP}$'], 'lookup': 'STOP'},\n 'eject': {'read': True, 'write': True, 'write_cmd': '#EJT', 'item_type': 'bool', 'dev_datatype': 'openclose', 'reply_pattern': ['@UPL (OPEN|CLOS)', '@EJT OK (OPEN|CLOSE)'], 'item_attrs': {'initial': True, 'enforce': True}},\n 'chapter': {'read': True, 'write': True, 'read_cmd': '#QCH', 'write_cmd': '#SRH C{RAW_VALUE:03}', 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': ['@SRH (OK|ER INVALID)', r'@QCH OK (\\d{2})/(?:\\d{2})']},\n 'title': {'read': True, 'write': True, 'read_cmd': '#QTK', 'write_cmd': '#SRH T{RAW_VALUE:03}', 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': [r'@QTK OK (\\d{2})/(?:\\d{2})', '@SRH (OK|ER INVALID)', r'@UAT (?:[A-Z]{2}) (\\d{2})/(?:\\d{2}) (?:[A-Z]{3}) (?:[0-7.]{3})']},\n 'next': {'read': True, 'write': True, 'write_cmd': '#NXT', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@NXT (.*)'], 'item_attrs': {'enforce': True}},\n 'previous': {'read': True, 'write': True, 'write_cmd': '#PRE', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@PRE (.*)'], 'item_attrs': {'enforce': True}},\n 'forward': {'read': True, 'write': True, 'write_cmd': '#FWD', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@FWD (.*) 1x'], 'item_attrs': {'enforce': True}},\n 'reverse': {'read': True, 'write': True, 'write_cmd': '#REV', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@REV (.*) 1x'], 'item_attrs': {'enforce': True}},\n 'audio': {'read': True, 'write': True, 'write_cmd': '#AUD', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': '@AUD (.*)', 'item_attrs': {'enforce': True}},\n 'subtitle': {'read': True, 'write': True, 'write_cmd': '#SUB', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': '@SUB (.*)', 'item_attrs': {'enforce': True}},\n 'repeat': {'read': True, 'write': True, 'write_cmd': '#RPT', 'item_type': 'num', 'dev_datatype': 'raw', 'reply_pattern': ['@RPT OK {LOOKUP}$'], 'lookup': 'REPEAT'},\n 'input': {'read': True, 'write': True, 'write_cmd': '#SRC\\r#NU{VALUE}', 'item_type': 'num', 'dev_datatype': 'ok', 'reply_pattern': ['@SRC (.*)']},\n },\n 'menu': {\n 'home': {'read': True, 'write': True, 'write_cmd': '#HOM', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': '@HOM (.*)', 'item_attrs': {'enforce': True}},\n 'setup': {'read': True, 'write': True, 'write_cmd': '#SET', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@SET (.*)'], 'item_attrs': {'enforce': True}},\n 'option': {'read': True, 'write': True, 'write_cmd': '#OPT', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@OPT (.*)'], 'item_attrs': {'enforce': True}},\n 'info': {'read': True, 'write': True, 'write_cmd': '#INH', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@INH (.*)'], 'item_attrs': {'enforce': True}},\n 'popup': {'read': True, 'write': True, 'write_cmd': '#MNU', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@MNU (.*)'], 'item_attrs': {'enforce': True}},\n 'top': {'read': True, 'write': True, 'write_cmd': '#TTL', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@TTL (.*)'], 'item_attrs': {'enforce': True}},\n 'osd': {'read': True, 'write': True, 'write_cmd': '#OSD', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@OSD (.*)'], 'item_attrs': {'enforce': True}},\n 'pageup': {'read': True, 'write': True, 'write_cmd': '#PUP', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@PUP (.*)'], 'item_attrs': {'enforce': True}},\n 'pagedown': {'read': True, 'write': True, 'write_cmd': '#PDN', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@PDN (.*)'], 'item_attrs': {'enforce': True}},\n 'up': {'read': True, 'write': True, 'write_cmd': '#NUP', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@NUP (.*)'], 'item_attrs': {'enforce': True}},\n 'down': {'read': True, 'write': True, 'write_cmd': '#NDN', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@NDN (.*)'], 'item_attrs': {'enforce': True}},\n 'left': {'read': True, 'write': True, 'write_cmd': '#NLT', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@NLT (.*)'], 'item_attrs': {'enforce': True}},\n 'right': {'read': True, 'write': True, 'write_cmd': '#NRT', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@NRT (.*)'], 'item_attrs': {'enforce': True}},\n 'select': {'read': True, 'write': True, 'write_cmd': '#SEL', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@SEL (.*)'], 'item_attrs': {'enforce': True}},\n 'back': {'read': True, 'write': True, 'write_cmd': '#RET', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@RET (.*)'], 'item_attrs': {'enforce': True}},\n 'red': {'read': True, 'write': True, 'write_cmd': '#RED', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@RED (.*)'], 'item_attrs': {'enforce': True}},\n 'green': {'read': True, 'write': True, 'write_cmd': '#GRN', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@GRN (.*)'], 'item_attrs': {'enforce': True}},\n 'blue': {'read': True, 'write': True, 'write_cmd': '#BLU', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@BLU (.*)'], 'item_attrs': {'enforce': True}},\n 'yellow': {'read': True, 'write': True, 'write_cmd': '#YLW', 'item_type': 'bool', 'dev_datatype': 'ok', 'reply_pattern': ['@YLW (.*)'], 'item_attrs': {'enforce': True}},\n }\n}\n\nlookups = {\n 'ALL': {\n 'STATUS': {\n 'PLAY': 'PLAY',\n 'PAUS': 'PAUSE',\n 'PAUSE': 'PAUSE',\n 'STOP': 'STOP',\n 'DISC': 'No Disc',\n 'LOAD': 'Loading Disc',\n 'OPEN': 'Tray Open',\n 'CLOS': 'Tray Close',\n 'STPF': 'Forward Frame-by-Frame',\n 'STPR': 'Reverse Frame-by-Frame',\n 'HOME': 'Home Menu',\n 'MCTR': 'Media Center',\n 'MEDIA CENTER': 'Media Center',\n 'SCSV': 'Screen Saver',\n 'SCREEN SAVER': 'Screen Saver',\n 'DISC MENU': 'Disc Menu',\n 'MENU': 'Disc Menu'\n },\n 'PLAY': {\n 'PLAY': True,\n 'PAUS': False,\n 'PAUSE': False,\n 'STOP': False\n },\n 'STOP': {\n 'PLAY': False,\n 'PAUS': False,\n 'PAUSE': False,\n 'STOP': True\n },\n 'REPEAT': {\n '0': 'OFF',\n '1': 'Repeat Chapter',\n '2': 'Repeat Title'\n },\n 'ASPECT': {\n '16WW': '16:9 Wide',\n '16AW': '16:9 Auto Wide',\n '16A4': '16:9 Auto Wide, currently 4:3',\n '21M0': '21:9 Movable, currently full screen mode',\n '21M1': '21:9 Movable, currently playing 16:9 or 4:3 content',\n '21M2': '21:9 Movable, currently playing 21:9 content',\n '21F0': '21:9 Fixed, currently full screen mode',\n '21F1': '21:9 Fixed, currently playing 16:9 or 4:3 content',\n '21F2': '21:9 Fixed, currently playing 21:9 content',\n '21C0': '21:9 Cropped, currently full screen mode',\n '21C1': '21:9 Cropped, currently playing 16:9 or 4:3 content',\n '21C2': '21:9 Cropped, currently playing 21:9 content'\n },\n 'AUDIOTYPE': {\n 'PCM 44.1/16': 'PCM 44.1/16',\n 'PCM': 'PCM',\n 'DD': 'Dolby Digital',\n 'DP': 'Dolby Digital Plus',\n 'DT': 'Dolby TrueHD',\n 'TS': 'DTS',\n 'TH': 'DTS-HD High Resolution',\n 'TM': 'DTS-HD Master Audio',\n 'PC': 'LPCM',\n 'MP': 'MPEG Audio',\n 'CD': 'CD Audio',\n 'UN': 'Unknown',\n },\n 'DISCTYPE': {\n 'UHBD': 'Ultra HD Blu-ray Disc',\n 'BDMV': 'Blu-ray Disc',\n 'BD-MV': 'Blu-ray Disc',\n 'DVDV': 'DVD-Video',\n 'DVDA': 'DVD-Audio',\n 'SACD': 'SACD',\n 'CDDA': 'Audio-CD',\n 'DATA': 'Data Disc',\n 'VCD2': 'VCD 2.0',\n 'SVCD': 'SVCD',\n 'NO-DISC': 'No Disc',\n 'UNKW': 'Unknown',\n },\n }\n}\n","sub_path":"oppo/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":15029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"49144733","text":"from datetime import datetime\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport json,numpy\nimport pandas as pd\n#sns.set(rc={'axes.facecolor':'lightgray', 'figure.facecolor':'lightgray'})\ndef read_data(filename):\n df = pd.read_csv(filename)\n groups = df.groupID.unique()\n df['time']=df['time'].apply(lambda x: datetime.strptime(x.split('T')[0].replace('-',' '),\"%Y %m %d\"))\n t0 = min(df['time'])\n df['time']=df['time'].apply(lambda x: (x-t0).days)\n cols = ['user', 'group','day']\n #cols.extend(topics)\n df2 = pd.DataFrame(columns = cols)\n df = df[df['parent'].isnull()]\n users = list(set(df['user']))\n row = 0\n for user in users:\n smaller_df = df[df['user'] == user]\n groups = smaller_df.groupID.unique()\n for group in groups:\n entry = [user]\n entry.append(group)\n smallest_df = smaller_df[smaller_df['groupID'] == group]\n entry.append(min(smallest_df['time']))\n df2.loc[row] = entry\n row += 1\n df2.group = df2.group.astype(float)\n df2.day = df2.day.astype(float)\n return df2\n\ndef plot(df2):\n g = sns.catplot(x=\"group\", y=\"day\", kind=\"violin\", inner=None, data=df2)\n sns.swarmplot(x=\"group\", y=\"day\", color=\"k\", size=3, data=df2, ax=g.ax);\n plt.title(\"Violin Plot showing distribution of First Posts among Groups\")\n plt.show()\n\n\n","sub_path":"violin_group.py","file_name":"violin_group.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"58357963","text":"import h5py\nimport numpy as np\nimport time\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.utils import to_categorical\nfrom keras.optimizers import SGD\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\n\n# dynamically grow the memory used on the GPU\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nset_session(sess)\n\nparams = {\n\t'batch_size': \t\t32,\n\t'epochs': \t\t10,\n\t'shuffle':\t\tFalse,\n\t'validation_split':\t0.1\n\t }\n\n# read the data\n#f = h5py.File('data.h5', 'r')\n#X = f['X']\n#y = to_categorical(f['y'])\n#nx, nc = tuple(f['n'])\nnx = 1000\t#$VAR03$ #totaly layer size (height x width)\nnc = 3\t\t#$VAR04$ #$number_of_channels\nns = 10000\t#$VAR05$ #number of samples\ninput_shape = (nx, nx, nc)\n\n# reproducibility - uncomment the seeds\nnp.random.seed(1)\n#np.random.seed(None)\n\n# compute X\nX = np.random.rand(ns, nx, nx, nc)\n\n# compute y\nn = ns//2 #floor division (int returned)\ny = np.array([1]*n + [0]*n)\nnp.random.shuffle(y)\ny = to_categorical(y)\n\nprint('X:', X.shape)\nprint('y:', y.shape)\nprint(\"ns, nx, nc:\", ns, nx, nc)\nprint(\"input_shape:\", input_shape)\n\n#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=params['test_frac'])\n\n# define the model\nmodel = Sequential()\n\nmodel.add(Conv2D(nc, (3, 3), strides=(1, 1), activation='relu', input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))\nmodel.add(Dropout(0.1))\n\nmodel.add(Conv2D(nc, (3, 3), strides=(1, 1), activation='relu', input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))\nmodel.add(Dropout(0.1))\n\nmodel.add(Conv2D(nc, (3, 3), strides=(1, 1), activation='relu', input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))\nmodel.add(Dropout(0.1))\n\nmodel.add(Flatten())\nmodel.add(Dense(2, activation='softmax'))\n\nprint(model.summary())\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# train model\nmodel.fit(x=X, y=y, batch_size=params['batch_size'], epochs=params['epochs'],\n\t shuffle=params['shuffle'], validation_split=params['validation_split'])\n\n#model.predict()\n","sub_path":"conv2d.py","file_name":"conv2d.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"217391325","text":"'''\nCreated on 13.07.2015\n\n@author: Aaron Klein\n'''\n\nimport DIRECT\n\nimport numpy as np\n\nfrom robo.maximizers.base_maximizer import BaseMaximizer\n\n\nclass Direct(BaseMaximizer):\n\n def __init__(self, objective_function, X_lower, X_upper,\n n_func_evals=400, n_iters=200):\n \"\"\"\n Interface for the DIRECT algorithm by D. R. Jones, C. D. Perttunen\n and B. E. Stuckmann\n\n Parameters\n ----------\n objective_function: acquisition function\n The acquisition function which will be maximized\n X_lower: np.ndarray (D)\n Lower bounds of the input space\n X_upper: np.ndarray (D)\n Upper bounds of the input space\n n_func_evals: int\n The maximum number of function evaluations\n n_iters: int\n The maximum number of iterations\n \"\"\"\n self.n_func_evals = n_func_evals\n self.n_iters = n_iters\n super(Direct, self).__init__(objective_function, X_lower, X_upper)\n\n def _direct_acquisition_fkt_wrapper(self, acq_f):\n def _l(x, user_data):\n return -acq_f(np.array([x])), 0\n return _l\n\n def maximize(self):\n \"\"\"\n Maximizes the given acquisition function.\n\n Returns\n -------\n np.ndarray(N,D)\n Point with highest acquisition value.\n \"\"\"\n x, _, _ = DIRECT.solve(\n self._direct_acquisition_fkt_wrapper(self.objective_func),\n l=[self.X_lower],\n u=[self.X_upper],\n maxT=self.n_iters,\n maxf=self.n_func_evals)\n return np.array([x])\n","sub_path":"RoBO/build/lib.linux-x86_64-2.7/robo/maximizers/direct.py","file_name":"direct.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"253238487","text":"month = {1:\"January\",2:\"February\",3:\"March\",4:\"April\",5:\"May\",6:\"June\",\n 7:\"July\",8:\"August\",9:\"September\",10:\"October\",11:\"November\",12:\"December\"}\n\nclass Date:\n\n def __init__(self, date):\n date_components = date.split('/')\n self.month = int(date_components[0])\n self.day = int(date_components[1])\n self.year = int(date_components[2])\n\n def __str__(self):\n date_string = month[self.month] + ' ' + str(self.day) + ', ' + str(self.year)\n return date_string\n","sub_path":"date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"394219448","text":"from __future__ import division\nimport matplotlib.pyplot as plt\n# from newsavetxt import *\nimport numpy as np\nimport os\nimport pandas as pd\nfrom math import floor, ceil\nimport itertools as itertools\nfrom itertools import chain\n# from itertools import izip_longest #Python2\nfrom itertools import zip_longest #Python3\n\n\ndef Projector(M, x1, x2, y1, y2, z1, z2, Nx, Ny, Nz, dx, dy, dz):\n\n d_12 = np.sqrt(pow((x2 - x1),2) + pow((y2 - y1),2) + pow((z2 - z1), 2))\n\n def px(alpha):\n return x1 + alpha*(x2 - x1) #Parametric way of representing the ray\n def py(alpha):\n return y1 + alpha*(y2 - y1)\n def pz(alpha):\n return z1 + alpha*(z2 - z1)\n\n def phi_x(Alpha):\n return ( (px(Alpha) )/dx )\n def phi_y(Alpha):\n return ( (py(Alpha) )/dy )\n def phi_z(Alpha):\n return ( (pz(Alpha) )/dz )\n\n alpha_x = [( ((i*dx) - x1)/(x2 - x1) ) for i in range(Nx)]\n alpha_y = [( ((j*dy) - y1)/(y2 - y1) ) for j in range(Ny)]\n alpha_z = [( ((k*dz) - z1)/(z2 - z1) ) for k in range(Nz)]\n\n alphax_min = min(alpha_x[0],alpha_x[Nx-1])\n alphax_max = max(alpha_x[0],alpha_x[Nx-1])\n alphay_min = min(alpha_y[0],alpha_y[Ny-1])\n alphay_max = max(alpha_y[0],alpha_y[Ny-1])\n alphaz_min = min(alpha_z[0],alpha_z[Nz-1])\n alphaz_max = max(alpha_z[0],alpha_z[Nz-1])\n\n alpha_min = max(0, min(alpha_x[0],alpha_x[Nx-1]), min(alpha_y[0],alpha_y[Ny-1]), alphaz_min) #first entry point and exit point\n alpha_max = min(1, max(alpha_x[0],alpha_x[Nx-1]), max(alpha_y[0],alpha_y[Ny-1]), alphaz_max)\n\n if (alpha_min < alpha_max): #ray does enter the grid\n if (x1 < x2):\n if alpha_min == alphax_min:\n i_min = 1\n elif alpha_min != alphax_min:\n i_min = ceil(phi_x(alpha_min))\n if alpha_max == alphax_max:\n i_max = Nx - 1\n elif alpha_max != alphax_max:\n i_max = floor(phi_x(alpha_max))\n else:\n if alpha_min == alphax_min:\n i_max = Nx - 2\n elif alpha_min != alphax_min:\n i_max = floor(phi_x(alpha_min))\n if alpha_max == alphax_max:\n i_min = 0\n elif alpha_max != alphax_max:\n i_min = ceil(phi_x(alpha_max))\n\n if (y1 < y2):\n if alpha_min == alphay_min:\n j_min = 1\n elif alpha_min != alphay_min:\n j_min = ceil(phi_y(alpha_min))\n if alpha_max == alphay_max:\n j_max = Ny - 1\n elif alpha_max != alphay_max:\n j_max = floor(phi_y(alpha_max))\n else:\n if alpha_min == alphay_min:\n j_max = Ny - 2\n elif alpha_min != alphay_min:\n j_max = floor(phi_y(alpha_min))\n if alpha_max == alphay_max:\n j_min = 0\n elif alpha_max != alphay_max:\n j_min = ceil(phi_y(alpha_max))\n\n if (z1 < z2):\n if alpha_min == alphaz_min:\n k_min = 1\n elif alpha_min != alphaz_min:\n k_min = ceil(phi_z(alpha_min))\n if alpha_max == alphaz_max:\n k_max = Nz - 1\n elif alpha_max != alphaz_max:\n k_max = floor(phi_z(alpha_max))\n else:\n if alpha_min == alphaz_min:\n k_max = Nz - 2\n elif alpha_min != alphaz_min:\n k_max = floor(phi_z(alpha_min))\n if alpha_max == alphaz_max:\n k_min = 0\n elif alpha_max != alphaz_max:\n k_min = ceil(phi_z(alpha_max))\n\n\n if x1 < x2:\n a_x = [alpha_x[ii] for ii in range(int(i_min), int(i_max)+1)]\n else:\n a_x = [alpha_x[ii] for ii in range(int(i_max), int(i_min)-1,-1)]\n\n\n if y1 < y2:\n a_y = [alpha_y[jj] for jj in range(int(j_min), int(j_max)+1)]\n else:\n a_y = [alpha_y[jj] for jj in range(int(j_max), int(j_min)-1,-1)]\n\n if z1 < z2:\n a_z = [alpha_z[kk] for kk in range(int(k_min), int(k_max)+1)]\n else:\n a_z = [alpha_z[kk] for kk in range(int(k_max), int(k_min)-1,-1)]\n\n# print(\"---------------------------imin, imax, jmin, jmax, kmin, kmax---------------------------\")\n# print(a_x)\n## print(i_max)\n# print(\"----------------------------------------------------------------------------------------\")\n# print(a_y)\n## print(j_max)\n# print(\"----------------------------------------------------------------------------------------\")\n# print(a_z)\n## print(k_max)\n\n A = [alpha_min] + list(itertools.chain(*(filter(None, pair) for pair in zip_longest(a_x,a_y,a_z)))) + [alpha_max] #MERGING\n# print(\"--------------------------------------------------------------------\")\n# print(A)\n AA = list(set(A)) #REPALCE DUPLICATES WITH ONE COPY\n AA.sort() #SORT ARRAY\n# print(\"--------------------------------------------------------------------\")\n# print(AA)\n Nv = len(AA)\n\n im = [floor((phi_x((AA[m] + AA[m - 1])/2))) for m in range(1,Nv)] #X PIXEL INDEX\n jm = [floor((phi_y((AA[m] + AA[m - 1])/2))) for m in range(1,Nv)] #Y PIXEL INDEX\n km = [floor((phi_z((AA[m] + AA[m - 1])/2))) for m in range(1,Nv)]\n l = [(AA[m] - AA[m - 1])*d_12 for m in range(1,Nv)] #PATH LENGTH\n Sum = 0\n for s in range(len(l)):\n Sum += (M[int(km[s])][int(jm[s])][int(im[s])]*l[s])\n# print(\"---------------------------------------------------------------------------\")\n# print(\"---------------------------------------------------------------------------\")\n# print(im)\n# print(jm)\n# print(km)\n\n return im, jm, km, l, Sum\n","sub_path":"Projector3D.py","file_name":"Projector3D.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"197986961","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport pandas as pd\r\nfrom sklearn import svm\r\nimport numpy as np\r\nfrom sklearn.metrics import roc_curve, auc \r\nfrom scipy import interp \r\nimport matplotlib.pyplot as plt \r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn import tree\r\n\r\nfrom keras import models\r\nfrom keras import layers\r\nfrom tensorflow import keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nimport numpy as np\r\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\r\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\r\nfrom keras.utils import np_utils\r\nfrom sklearn.metrics import roc_curve,auc,roc_auc_score\r\nimport tensorflow as tf\r\nfrom keras import backend as K\r\nimport os\r\nfrom numpy.random import seed \r\nseed(1) \r\n\r\ndef neural_network(x_train,y_train):\r\n #scaler = StandardScaler()\r\n #scaler.fit(x_train)\r\n #x_train = scaler.transform(x_train)\r\n #x_test = scaler.transform(x_test)\r\n mlp = MLPClassifier(hidden_layer_sizes=(13,13,13),max_iter=5000)\r\n mlp.fit(x_train,y_train)\r\n return mlp \r\n\r\ndef dealWithSVM(x_train,y_train):\r\n svc = svm.SVC(probability = True)\r\n svc.fit(x_train,y_train)\r\n return svc\r\n \r\ndef dealWithLR(X, y):\r\n lr = LogisticRegression()\r\n lr.fit(X, y)\r\n return lr\r\n \r\ndef dealWithNB(X, y):\r\n nb = GaussianNB()\r\n nb.fit(X, y)\r\n return nb\r\n \r\n \r\ndata_train=pd.read_csv(\"train.csv\")\r\ndata_test=pd.read_csv('test.csv')\r\n\r\nfeature=[] \r\nfor i in data_train.columns:\r\n if (i!='label5') & (i!='sample') & (i!='OS'):\r\n feature.append(i)\r\nsample=['sample']\r\n#sample2=['sample']\r\nsample_train=data_train[sample]\r\nsample_test=data_test[sample]\r\n\r\nlabel=['label5']\r\nx_train=data_train[feature] \r\ny_train=data_train[label] \r\nx_test=data_test[feature] \r\ny_test=data_test[label] \r\nx_train=x_train.as_matrix() \r\ny_train=y_train.as_matrix() \r\nx_test=x_test.as_matrix() \r\ny_test=y_test.as_matrix() \r\nsample_train=sample_train.as_matrix()\r\n#sample_test=sample_test.as_matrix()\r\nsample_test_OS=data_test['OS']\r\n\r\n###(1) neural_network \r\nmlp=neural_network(x_train,y_train) \r\n#print mlp.coefs_\r\n#print mlp.intercepts_\r\npredictions_neural_network = mlp.predict(x_test)\r\nprobablity_neural_network= mlp.predict_proba(x_test)\r\nfpr_neural_network,tpr_neural_network, threshold_neural_network = roc_curve(y_test,probablity_neural_network[:, 1])\r\n\r\n\r\n###(2)LR\r\nLR=dealWithLR(x_train,y_train) \r\nLR.fit(x_train,y_train)\r\npredictions_LR = LR.predict(x_test)\r\nprobablity_LR= LR.predict_proba(x_test)\r\nfpr_LR,tpr_LR, threshold_LR = roc_curve(y_test,probablity_LR[:, 1])\r\n\r\n\r\n###(3)NB\r\nNB=dealWithNB(x_train,y_train) \r\nNB.fit(x_train,y_train)\r\npredictions_NB = NB.predict(x_test)\r\nprobablity_NB= NB.predict_proba(x_test)\r\nfpr_NB,tpr_NB, threshold_NB = roc_curve(y_test,probablity_NB[:, 1])\r\n\r\n\r\n###(4)SVM\r\nSVM=dealWithSVM(x_train,y_train) \r\nSVM.fit(x_train,y_train)\r\npredictions_SVM= SVM.predict(x_test)\r\nprobablity_SVM= SVM.predict_proba(x_test)\r\nfpr_SVM, tpr_SVM, threshold_SVM = roc_curve(y_test,probablity_SVM[:, 1]) \r\n\r\n###(5)Random Forest\r\nRF=RandomForestClassifier(n_estimators=1000, max_depth=50, min_samples_split=5,random_state=1) \r\nRF.fit(x_train,y_train)\r\npredictions_RF= RF.predict(x_test)\r\nprobablity_RF= RF.predict_proba(x_test)\r\nfpr_RF, tpr_RF, threshold_RF = roc_curve(y_test,probablity_RF[:, 1]) \r\n\r\n##(6) Linear Regression\r\nLiR=LinearRegression()\r\nLiR.fit(x_train,y_train)\r\npredictions_LiR = LiR.predict(x_test)\r\n#probablity_LiR= LiR.predict_proba(x_test)\r\nfpr_LiR,tpr_LiR,threshold_LiR = roc_curve(y_test,predictions_LiR[:, 0])\r\n\r\n###(7)Decision Tree\r\nmode = tree.DecisionTreeClassifier(criterion='gini')\r\nmode.fit(x_train,y_train)\r\npredictions_tree = mode.predict(x_test)\r\nprobablity_tree= mode.predict_proba(x_test)\r\nfpr_tree,tpr_tree,threshold_tree= roc_curve(y_test,probablity_tree[:, 1])\r\n\r\n###(8)Deeplearn\r\nmodel=models.Sequential()\r\nmodel.add(layers.Dense(16,activation='relu',input_shape=(105,)))\r\nmodel.add(layers.Dense(16,activation='relu'))\r\nmodel.add(layers.Dense(1,activation='sigmoid'))\r\nmodel.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])\r\nmodel.fit(x_train,y_train,epochs=4,batch_size=512)\r\ny_pred_label = model.predict_classes(x_test)\r\ny_pred_prob = model.predict_proba(x_test)\r\nfpr_Keras,tpr_Keras,threshold_Keras=roc_curve(y_test,y_pred_prob,pos_label=1)\r\n\r\n\r\nfpr=(fpr_neural_network,fpr_LR,fpr_NB,fpr_SVM,fpr_RF,fpr_LiR,fpr_tree,fpr_Keras)\r\ntpr=(tpr_neural_network,tpr_LR,tpr_NB,tpr_SVM,tpr_RF,tpr_LiR,tpr_tree,tpr_Keras)\r\nlabels=['NN','LR','NB','SVM',\"RF\",\"Linear\",\"Tree\",\"Keras\"] \r\n\r\n##ROC curve \r\ndef roc_curve_(fpr,tpr,labels): \r\n colorTable = ['blue','red','yellow','black',\"green\",\"orange\",\"gray\",\"purple\"] \r\n plt.figure() \r\n lw = 2 \r\n plt.figure(figsize=(10,8)) \r\n for i in range(len(fpr)): \r\n roc_auc = auc(fpr[i],tpr[i]) \r\n plt.plot(fpr[i],tpr[i],color=colorTable[i],linewidth=3,label='%s (%0.4f)' %(labels[i],roc_auc)) \r\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') \r\n plt.xlim([0.0, 1.0]) \r\n plt.ylim([0.0, 1.05]) \r\n plt.xlabel('False Positive Rate', fontsize=16) \r\n plt.ylabel('True Positive Rate', fontsize=16) \r\n plt.title('ROC Curve', fontsize=20) \r\n plt.legend(loc=\"lower right\") \r\n plt.savefig(\"ROC curve.jpg\")\r\n plt.show() \r\nroc_curve_(fpr,tpr,labels)","sub_path":"MPA model.py","file_name":"MPA model.py","file_ext":"py","file_size_in_byte":5660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"372283946","text":"from flask import render_template, flash, redirect\nfrom app import app, db\nfrom .forms import LoginForm, CommentForm\nfrom .models import Comment\nfrom datetime import datetime\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\tcontent = \"This is some content.\"\n\treturn (render_template('index.html', stuff = content))\n\n@app.route('/page')\ndef page():\n\treturn(render_template('page.html'))\n\n@app.route('/comments', methods = ['GET', 'POST'])\ndef comments():\n\tform = CommentForm()\n\tif form.validate():\n\t\ttime = datetime.now()\n\t\tcomment = Comment(username = form.username.data, body = form.comment.data,\n\t\t timestamp =time)\n\t\tdb.session.add(comment)\n\t\tdb.session.commit()\n\t\tflash('Thanks for commenting')\n\t\treturn(redirect('/comments')) \n\tcomments = Comment.query.order_by(Comment.timestamp).limit(10).all()\n\treturn(render_template('comments.html', form = form, comments = comments))\n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"277490950","text":"#!/usr/bin/env python\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\n#from PyQt4 import uic\n#Ui_correctorDialog, QDialog = uic.loadUiType('./correctordialog.ui')\n#from ui_configuredialog import Ui_configureDialog\n\nimport os\n\nfrom setup import *\n\n__version__ = WIDGET_VERSION\n\n#----------------------------------------------------------------------\nclass LPickConfigurationFileDialog(QFileDialog):\n def __init__(self, parent=None):\n super(LPickConfigurationFileDialog,self).__init__(parent)\n self.debug = (DEBUG & DIALOGS)\n self.setNameFilter(\"Configuration File (*.pvr);;Configuration File(*.conf);;All File (*)\")\n self.setFileMode(QFileDialog.AnyFile)\n self.setViewMode(QFileDialog.Detail)\n self.setDirectory(os.getcwd())\n self.setViewMode(QFileDialog.Detail)\n\n#----------------------------------------------------------------------\n# MAIN \n#----------------------------------------------------------------------\ndef main():\n import sys\n\n qApp = QApplication(sys.argv)\n d = LPickConfigurationFileDialog()\n d.show()\n sys.exit(qApp.exec_())\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cls1/eggs/src/lti/widgets.20120401/pvramper/dialogs.py","file_name":"dialogs.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"537245000","text":"from spynnaker.pyNN.utilities import conf\nfrom spynnaker.pyNN.utilities import constants\nfrom spynnaker.pyNN.utilities import utility_calls\nfrom spynnaker.pyNN import exceptions\nfrom spynnaker.pyNN.models.neural_properties import master_pop_table_generators\nfrom spynnaker.pyNN.models.neural_properties.synaptic_list import SynapticList\nfrom spynnaker.pyNN.models.neural_projections.projection_partitioned_edge \\\n import ProjectionPartitionedEdge\nfrom spynnaker.pyNN.models.neural_projections.projection_partitionable_edge \\\n import ProjectionPartitionableEdge\n\nfrom pacman.model.partitionable_graph.abstract_partitionable_vertex \\\n import AbstractPartitionableVertex\n\nfrom spinn_front_end_common.utilities import helpful_functions\n\nfrom data_specification.enums.data_type import DataType\nimport data_specification.utility_calls as dsg_utilities\n\nfrom scipy import special\nimport math\nimport sys\nimport numpy\n\n# TODO: Make sure these values are correct (particularly CPU cycles)\n_SYNAPSES_BASE_DTCM_USAGE_IN_BYTES = 28\n_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES = 0\n_SYNAPSES_BASE_N_CPU_CYCLES_PER_NEURON = 10\n_SYNAPSES_BASE_N_CPU_CYCLES = 8\n\n\nclass SynapticManager(object):\n \"\"\" Deals with synapses\n \"\"\"\n\n def __init__(self, synapse_type, machine_time_step, ring_buffer_sigma,\n spikes_per_second, population_table_type=None):\n self._synapse_type = synapse_type\n self._ring_buffer_sigma = ring_buffer_sigma\n self._spikes_per_second = spikes_per_second\n self._machine_time_step = machine_time_step\n\n # Get the type of population table\n self._population_table_type = population_table_type\n if population_table_type is None:\n population_table_type = (\"MasterPopTableAs\" + conf.config.get(\n \"MasterPopTable\", \"generator\"))\n algorithms = helpful_functions.get_valid_components(\n master_pop_table_generators, \"master_pop_table_as\")\n self._population_table_type = algorithms[population_table_type]()\n\n if self._ring_buffer_sigma is None:\n self._ring_buffer_sigma = conf.config.getfloat(\n \"Simulation\", \"ring_buffer_sigma\")\n\n if self._spikes_per_second is None:\n self._spikes_per_second = conf.config.getfloat(\n \"Simulation\", \"spikes_per_second\")\n\n # Prepare for dealing with STDP\n self._stdp_checked = False\n self._stdp_mechanism = None\n\n @property\n def synapse_type(self):\n return self._synapse_type\n\n @property\n def ring_buffer_sigma(self):\n return self._ring_buffer_sigma\n\n @ring_buffer_sigma.setter\n def ring_buffer_sigma(self, ring_buffer_sigma):\n self._ring_buffer_sigma = ring_buffer_sigma\n\n @property\n def spikes_per_second(self):\n return self._spikes_per_second\n\n @spikes_per_second.setter\n def spikes_per_second(self, spikes_per_second):\n self._spikes_per_second = spikes_per_second\n\n @property\n def vertex_executable_suffix(self):\n if self._stdp_mechanism is None:\n return \"\"\n return \"_\" + self._stdp_mechanism.get_vertex_executable_suffix()\n\n def get_n_cpu_cycles(self, vertex_slice, graph):\n\n # TODO: Calculate this correctly\n return 0\n\n def get_dtcm_usage_in_bytes(self, vertex_slice, graph):\n\n # TODO: Calculate this correctly\n return 0\n\n def _get_synapse_params_size(self, vertex_slice):\n per_neuron_usage = (\n self._synapse_type.get_sdram_usage_per_neuron_in_bytes())\n return (_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES +\n (per_neuron_usage * vertex_slice.n_atoms) +\n (4 * self._synapse_type.get_n_synapse_types()))\n\n def _check_synapse_dynamics(self, in_edges):\n \"\"\" Checks the synapse dynamics for all edges is the same\n \"\"\"\n if self._stdp_checked:\n return True\n self._stdp_checked = True\n for in_edge in in_edges:\n if (isinstance(in_edge, ProjectionPartitionableEdge) and\n in_edge.synapse_dynamics is not None):\n if in_edge.synapse_dynamics.fast is not None:\n raise exceptions.SynapticConfigurationException(\n \"Fast synapse dynamics are not supported\")\n elif in_edge.synapse_dynamics.slow is not None:\n if self._stdp_mechanism is None:\n self._stdp_mechanism = in_edge.synapse_dynamics.slow\n else:\n if not (self._stdp_mechanism ==\n in_edge.synapse_dynamics.slow):\n raise exceptions.SynapticConfigurationException(\n \"Different STDP mechanisms on the same\"\n \" vertex are not supported\")\n\n def _get_synaptic_block_size(self, synaptic_sub_list, max_n_words):\n \"\"\" Get the size of a single block\n \"\"\"\n # Gets smallest possible (i.e. supported by row length\n # Table structure) that can contain max_row_length\n row_length = self._population_table_type.get_allowed_row_length(\n max_n_words)\n num_rows = synaptic_sub_list.get_n_rows()\n syn_row_sz = 4 * (constants.SYNAPTIC_ROW_HEADER_WORDS + row_length)\n return syn_row_sz * num_rows\n\n def _get_exact_synaptic_blocks_size(\n self, graph_mapper, subvertex_in_edges):\n \"\"\" Get the exact size all of the synaptic blocks\n \"\"\"\n memory_size = 0\n\n # Go through the subedges and add up the memory\n for subedge in subvertex_in_edges:\n\n # pad the memory size to meet 1 k offsets\n if (memory_size & 0x3FF) != 0:\n memory_size = (memory_size & 0xFFFFFC00) + 0x400\n\n sublist = subedge.get_synapse_sublist(graph_mapper)\n max_n_words = max([\n graph_mapper.get_partitionable_edge_from_partitioned_edge(\n subedge).get_synapse_row_io().get_n_words(synapse_row)\n for synapse_row in sublist.get_rows()])\n\n all_syn_block_sz = self._get_synaptic_block_size(\n sublist, max_n_words)\n memory_size += all_syn_block_sz\n return memory_size\n\n def _get_estimate_synaptic_blocks_size(self, vertex_slice, in_edges):\n \"\"\" Get an estimate of the synaptic blocks memory size\n \"\"\"\n self._check_synapse_dynamics(in_edges)\n memory_size = 0\n\n for in_edge in in_edges:\n if isinstance(in_edge, ProjectionPartitionableEdge):\n\n # Get maximum row length in this edge\n max_n_words = in_edge.get_max_n_words(vertex_slice)\n all_syn_block_sz = self._get_synaptic_block_size(\n in_edge, max_n_words)\n\n # TODO: Fix this to be more accurate!\n # May require modification to the master population table\n n_atoms = sys.maxint\n edge_pre_vertex = in_edge.pre_vertex\n if isinstance(edge_pre_vertex, AbstractPartitionableVertex):\n n_atoms = in_edge.pre_vertex.get_max_atoms_per_core()\n if in_edge.pre_vertex.n_atoms < n_atoms:\n n_atoms = in_edge.pre_vertex.n_atoms\n\n num_rows = in_edge.get_n_rows()\n extra_mem = math.ceil(float(num_rows) / float(n_atoms)) * 1024\n if extra_mem == 0:\n extra_mem = 1024\n all_syn_block_sz += extra_mem\n memory_size += all_syn_block_sz\n\n return memory_size\n\n def _get_synapse_dynamics_parameter_size(self, in_edges):\n \"\"\" Get the size of the synapse dynamics region\n \"\"\"\n self._check_synapse_dynamics(in_edges)\n if self._stdp_mechanism is not None:\n return self._stdp_mechanism.get_params_size(\n len(self._synapse_type.get_synapse_targets()))\n return 0\n\n def get_sdram_usage_in_bytes(self, vertex_slice, in_edges):\n return (\n self._get_synapse_params_size(vertex_slice) +\n self._get_synapse_dynamics_parameter_size(in_edges) +\n self._get_estimate_synaptic_blocks_size(vertex_slice, in_edges) +\n self._population_table_type.get_master_population_table_size(\n vertex_slice, in_edges))\n\n def _reserve_memory_regions(\n self, spec, vertex, vertex_slice, graph, all_syn_block_sz):\n\n spec.reserve_memory_region(\n region=constants.POPULATION_BASED_REGIONS.SYNAPSE_PARAMS.value,\n size=self._get_synapse_params_size(vertex_slice),\n label='SynapseParams')\n\n in_edges = graph.incoming_edges_to_vertex(vertex)\n master_pop_table_sz = \\\n self._population_table_type.get_master_population_table_size(\n vertex_slice, in_edges)\n if master_pop_table_sz > 0:\n spec.reserve_memory_region(\n region=constants.POPULATION_BASED_REGIONS.POPULATION_TABLE\n .value,\n size=master_pop_table_sz, label='PopTable')\n if all_syn_block_sz > 0:\n spec.reserve_memory_region(\n region=constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX\n .value,\n size=all_syn_block_sz, label='SynBlocks')\n\n synapse_dynamics_sz = self._get_synapse_dynamics_parameter_size(\n in_edges)\n if synapse_dynamics_sz != 0:\n spec.reserve_memory_region(\n region=constants.POPULATION_BASED_REGIONS.SYNAPSE_DYNAMICS\n .value,\n size=synapse_dynamics_sz, label='synapseDynamicsParams')\n\n @staticmethod\n def _ring_buffer_expected_upper_bound(\n weight_mean, weight_std_dev, spikes_per_second,\n machine_timestep, n_synapses_in, sigma):\n \"\"\"\n Provides expected upper bound on accumulated values in a ring buffer\n element.\n\n Requires an assessment of maximum Poisson input rate.\n\n Assumes knowledge of mean and SD of weight distribution, fan-in\n & timestep.\n\n All arguments should be assumed real values except n_synapses_in\n which will be an integer.\n\n weight_mean - Mean of weight distribution (in either nA or\n microSiemens as required)\n weight_std_dev - SD of weight distribution\n spikes_per_second - Maximum expected Poisson rate in Hz\n machine_timestep - in us\n n_synapses_in - No of connected synapses\n sigma - How many SD above the mean to go for upper bound;\n a good starting choice is 5.0. Given length of simulation we\n can set this for approximate number of saturation events\n\n \"\"\"\n\n # E[ number of spikes ] in a timestep\n # x /1000000.0 = conversion between microsecond to second\n average_spikes_per_timestep = (\n float(n_synapses_in * spikes_per_second) *\n (float(machine_timestep) / 1000000.0))\n\n # Exact variance contribution from inherent Poisson variation\n poisson_variance = average_spikes_per_timestep * (weight_mean ** 2)\n\n # Upper end of range for Poisson summation required below\n # upper_bound needs to be an integer\n upper_bound = int(round(average_spikes_per_timestep +\n constants.POSSION_SIGMA_SUMMATION_LIMIT *\n math.sqrt(average_spikes_per_timestep)))\n\n # Closed-form exact solution for summation that gives the variance\n # contributed by weight distribution variation when modulated by\n # Poisson PDF. Requires scipy.special for gamma and incomplete gamma\n # functions. Beware: incomplete gamma doesn't work the same as\n # Mathematica because (1) it's regularised and needs a further\n # multiplication and (2) it's actually the complement that is needed\n # i.e. 'gammaincc']\n\n weight_variance = 0.0\n\n if weight_std_dev > 0:\n\n lngamma = special.gammaln(1 + upper_bound)\n\n gammai = special.gammaincc(1 + upper_bound,\n average_spikes_per_timestep)\n\n big_ratio = (math.log(average_spikes_per_timestep) * upper_bound -\n lngamma)\n\n if -701.0 < big_ratio < 701.0 and big_ratio != 0.0:\n\n log_weight_variance = (\n -average_spikes_per_timestep +\n math.log(average_spikes_per_timestep) +\n 2.0 * math.log(weight_std_dev) +\n math.log(math.exp(average_spikes_per_timestep) * gammai -\n math.exp(big_ratio)))\n\n weight_variance = math.exp(log_weight_variance)\n\n # upper bound calculation -> mean + n * SD\n return ((average_spikes_per_timestep * weight_mean) +\n (sigma * math.sqrt(poisson_variance + weight_variance)))\n\n def _get_ring_buffer_totals(self, subvertex, sub_graph, graph_mapper):\n in_sub_edges = sub_graph.incoming_subedges_from_subvertex(subvertex)\n vertex_slice = graph_mapper.get_subvertex_slice(subvertex)\n n_synapse_types = len(self._synapse_type.get_synapse_targets())\n absolute_max_weights = numpy.zeros(n_synapse_types)\n\n # If we have an STDP mechanism, get the maximum plastic weight\n stdp_max_weight = None\n if self._stdp_mechanism is not None:\n stdp_max_weight = self._stdp_mechanism.get_max_weight()\n absolute_max_weights.fill(stdp_max_weight)\n\n total_weights = numpy.zeros((n_synapse_types, vertex_slice.n_atoms))\n total_square_weights = numpy.zeros(\n (n_synapse_types, vertex_slice.n_atoms))\n total_items = numpy.zeros((n_synapse_types, vertex_slice.n_atoms))\n for subedge in in_sub_edges:\n sublist = subedge.get_synapse_sublist(graph_mapper)\n sublist.sum_n_connections(total_items)\n edge = graph_mapper.get_partitionable_edge_from_partitioned_edge(\n subedge)\n\n if edge.synapse_dynamics is None:\n\n # If there's no STDP maximum weight, sum the initial weights\n sublist.max_weights(absolute_max_weights)\n sublist.sum_weights(total_weights)\n sublist.sum_square_weights(total_square_weights)\n\n else:\n\n # Otherwise, sum the pathalogical case of all columns being\n # at stdp_max_weight\n sublist.sum_fixed_weight(total_weights, stdp_max_weight)\n sublist.sum_fixed_weight(total_square_weights,\n stdp_max_weight * stdp_max_weight)\n\n return (total_weights, total_square_weights, total_items,\n absolute_max_weights)\n\n def _get_ring_buffer_to_input_left_shifts(\n self, subvertex, sub_graph, graph_mapper, machine_timestep):\n \"\"\" Get the scaling of the ring buffer to provide as much accuracy as\\\n possible without too much overflow\n \"\"\"\n\n total_weights, total_square_weights, total_items, abs_max_weights =\\\n self._get_ring_buffer_totals(subvertex, sub_graph, graph_mapper)\n\n # Get maximum weight that can go into each post-synaptic neuron per\n # synapse-type\n max_weights = [max(t) for t in total_weights]\n\n # Clip the total items to avoid problems finding the mean of nothing(!)\n total_items = numpy.clip(total_items, a_min=1,\n a_max=numpy.iinfo(int).max)\n weight_means = total_weights / total_items\n\n # Calculate the standard deviation, clipping to avoid numerical errors\n weight_std_devs = numpy.sqrt(\n numpy.clip(numpy.divide(\n total_square_weights -\n numpy.divide(numpy.power(total_weights, 2),\n total_items),\n total_items), a_min=0.0, a_max=numpy.finfo(float).max))\n\n vertex_slice = graph_mapper.get_subvertex_slice(subvertex)\n n_synapse_types = len(self._synapse_type.get_synapse_targets())\n expected_weights = numpy.fromfunction(\n numpy.vectorize(\n lambda i, j: self._ring_buffer_expected_upper_bound(\n weight_means[i][j], weight_std_devs[i][j],\n self._spikes_per_second, machine_timestep,\n total_items[i][j], self._ring_buffer_sigma)),\n (n_synapse_types, vertex_slice.n_atoms))\n expected_max_weights = [max(t) for t in expected_weights]\n max_weights = [min((w, e))\n for w, e in zip(max_weights, expected_max_weights)]\n max_weights = [max((w, a))\n for w, a in zip(max_weights, abs_max_weights)]\n\n # Convert these to powers\n max_weight_powers = [0 if w <= 0\n else int(math.ceil(max(0, math.log(w, 2))))\n for w in max_weights]\n\n # If 2^max_weight_power equals the max weight, we have to add another\n # power, as range is 0 - (just under 2^max_weight_power)!\n max_weight_powers = [w + 1 if (2 ** w) >= a else w\n for w, a in zip(max_weight_powers, max_weights)]\n\n # If we have an STDP mechanism that uses signed weights,\n # Add another bit of shift to prevent overflows\n if self._stdp_mechanism is not None\\\n and self._stdp_mechanism.are_weights_signed():\n max_weight_powers = [m + 1 for m in max_weight_powers]\n\n return max_weight_powers\n\n @staticmethod\n def _get_weight_scale(ring_buffer_to_input_left_shift):\n \"\"\" Return the amount to scale the weights by to convert them from \\\n floating point values to 16-bit fixed point numbers which can be \\\n shifted left by ring_buffer_to_input_left_shift to produce an\\\n s1615 fixed point number\n \"\"\"\n return float(math.pow(2, 16 - (ring_buffer_to_input_left_shift + 1)))\n\n def _write_synapse_parameters(\n self, spec, subvertex, subgraph, graph_mapper, vertex_slice):\n\n # Get the ring buffer shifts and scaling factors\n ring_buffer_shifts = self._get_ring_buffer_to_input_left_shifts(\n subvertex, subgraph, graph_mapper, self._machine_time_step)\n weight_scales = [self._get_weight_scale(r) for r in ring_buffer_shifts]\n\n # update projections for future use\n in_partitioned_edges = subgraph.incoming_subedges_from_subvertex(\n subvertex)\n for partitioned_edge in in_partitioned_edges:\n partitioned_edge.weight_scales_setter(weight_scales)\n\n spec.switch_write_focus(\n region=constants.POPULATION_BASED_REGIONS.SYNAPSE_PARAMS.value)\n utility_calls.write_parameters_per_neuron(\n spec, vertex_slice,\n self._synapse_type.get_synapse_type_parameters())\n\n spec.write_array(ring_buffer_shifts)\n\n return weight_scales\n\n @staticmethod\n def _write_synapse_row_info(\n sublist, row_io, spec, current_write_ptr, fixed_row_length, region,\n weight_scales, n_synapse_type_bits):\n \"\"\" Write this synaptic block to the designated synaptic matrix region\\\n at its current write pointer.\n \"\"\"\n\n # Switch focus to the synaptic matrix memory region:\n spec.switch_write_focus(region)\n\n # Align the write pointer to the next 1Kbyte boundary using padding:\n write_ptr = current_write_ptr\n\n # Remember this aligned address, it's where this block will start:\n block_start_addr = write_ptr\n\n # Write the synaptic block, tracking the word count:\n synaptic_rows = sublist.get_rows()\n data = numpy.zeros(\n (fixed_row_length +\n constants.SYNAPTIC_ROW_HEADER_WORDS) *\n sublist.get_n_rows(), dtype=\"uint32\")\n data.fill(0xBBCCDDEE)\n\n for row_no, row in enumerate(synaptic_rows):\n data_pos = ((fixed_row_length +\n constants.SYNAPTIC_ROW_HEADER_WORDS) *\n row_no)\n\n plastic_region = row_io.get_packed_plastic_region(\n row, weight_scales, n_synapse_type_bits)\n\n # Write the size of the plastic region\n data[data_pos] = plastic_region.size\n data_pos += 1\n\n # Write the plastic region\n data[data_pos:(data_pos + plastic_region.size)] = plastic_region\n data_pos += plastic_region.size\n\n fixed_fixed_region = row_io.get_packed_fixed_fixed_region(\n row, weight_scales, n_synapse_type_bits)\n fixed_plastic_region = row_io.get_packed_fixed_plastic_region(\n row, weight_scales, n_synapse_type_bits)\n\n # Write the size of the fixed parts\n data[data_pos] = fixed_fixed_region.size\n data[data_pos + 1] = fixed_plastic_region.size\n data_pos += 2\n\n # Write the fixed fixed region\n data[data_pos:(data_pos + fixed_fixed_region.size)] = \\\n fixed_fixed_region\n data_pos += fixed_fixed_region.size\n\n # As everything needs to be word aligned, add extra zero to\n # fixed_plastic Region if it has an odd number of entries and build\n # uint32 view of it\n if (fixed_plastic_region.size % 2) != 0:\n fixed_plastic_region = numpy.asarray(numpy.append(\n fixed_plastic_region, 0), dtype='uint16')\n # does indeed return something (due to c fancy stuff in numpi) ABS\n\n # noinspection PyNoneFunctionAssignment\n fixed_plastic_region_words = fixed_plastic_region.view(\n dtype=\"uint32\")\n data[data_pos:(data_pos + fixed_plastic_region_words.size)] = \\\n fixed_plastic_region_words\n\n spec.write_array(data)\n write_ptr += data.size * 4\n\n # The current write pointer is where the next block could start:\n next_block_start_addr = write_ptr\n return block_start_addr, next_block_start_addr\n\n def _write_synaptic_matrix_and_master_population_table(\n self, spec, subvertex, all_syn_block_sz, weight_scales,\n master_pop_table_region, synaptic_matrix_region, routing_info,\n graph_mapper, subgraph):\n \"\"\" Simultaneously generates both the master population table and\n the synatic matrix.\n \"\"\"\n spec.comment(\n \"\\nWriting Synaptic Matrix and Master Population Table:\\n\")\n\n # Track writes inside the synaptic matrix region:\n next_block_start_addr = 0\n n_synapse_type_bits = self._synapse_type.get_n_synapse_type_bits()\n\n # Filtering incoming subedges\n in_subedges = subgraph.incoming_subedges_from_subvertex(subvertex)\n in_proj_subedges = [e for e in in_subedges\n if isinstance(e, ProjectionPartitionedEdge)]\n\n # Set up the master population table\n self._population_table_type.initialise_table(\n spec, master_pop_table_region)\n\n # For each entry in subedge into the subvertex, create a\n # sub-synaptic list\n for subedge in in_proj_subedges:\n keys_and_masks = routing_info.get_keys_and_masks_from_subedge(\n subedge)\n spec.comment(\n \"\\nWriting matrix for subedge:{}\\n\".format(subedge.label))\n sublist = subedge.get_synapse_sublist(graph_mapper)\n associated_edge = \\\n graph_mapper.get_partitionable_edge_from_partitioned_edge(\n subedge)\n row_io = associated_edge.get_synapse_row_io()\n\n # Get the maximum row length in words, excluding headers\n max_row_length = max([row_io.get_n_words(row)\n for row in sublist.get_rows()])\n\n # Get an entry in the row length table for this length\n row_length = self._population_table_type.get_allowed_row_length(\n max_row_length)\n block_start_addr = 0\n if max_row_length > 0:\n\n # Determine where the next block will actually start\n # and generate any required padding\n next_block_allowed_addr = \\\n self._population_table_type.get_next_allowed_address(\n next_block_start_addr)\n if next_block_allowed_addr != next_block_start_addr:\n\n # Pad out data file with the added alignment bytes:\n spec.switch_write_focus(synaptic_matrix_region)\n spec.set_register_value(\n register_id=15,\n data=next_block_allowed_addr - next_block_start_addr)\n spec.write_value(data=0xDD, repeats_register=15,\n data_type=DataType.UINT8)\n\n # Write the synaptic block for the sublist\n (block_start_addr, next_block_start_addr) = \\\n self._write_synapse_row_info(\n sublist, row_io, spec, next_block_allowed_addr,\n row_length, synaptic_matrix_region, weight_scales,\n n_synapse_type_bits)\n\n self._population_table_type.update_master_population_table(\n spec, block_start_addr, row_length, keys_and_masks,\n master_pop_table_region)\n\n self._population_table_type.finish_master_pop_table(\n spec, master_pop_table_region)\n\n def write_data_spec(\n self, spec, vertex, vertex_slice, subvertex, placement, subgraph,\n graph, routing_info, hostname, graph_mapper):\n\n # Reserve the memory\n subvert_in_edges = subgraph.incoming_subedges_from_subvertex(subvertex)\n all_syn_block_sz = self._get_exact_synaptic_blocks_size(\n graph_mapper, subvert_in_edges)\n self._reserve_memory_regions(\n spec, vertex, vertex_slice, graph, all_syn_block_sz)\n\n weight_scales = self._write_synapse_parameters(\n spec, subvertex, subgraph, graph_mapper, vertex_slice)\n\n self._write_synaptic_matrix_and_master_population_table(\n spec, subvertex, all_syn_block_sz, weight_scales,\n constants.POPULATION_BASED_REGIONS.POPULATION_TABLE.value,\n constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value,\n routing_info, graph_mapper, subgraph)\n\n if self._stdp_mechanism is not None:\n self._stdp_mechanism.write_plastic_params(\n spec,\n constants.POPULATION_BASED_REGIONS.SYNAPSE_DYNAMICS.value,\n self._machine_time_step, weight_scales)\n\n # Free any additional memory\n for subedge in subvert_in_edges:\n subedge.free_sublist()\n\n def get_synaptic_list_from_machine(\n self, placements, transceiver, pre_subvertex, pre_n_atoms,\n post_subvertex, synapse_io, subgraph, routing_infos,\n weight_scales):\n \"\"\"\n\n :param placements:\n :param transceiver:\n :param pre_subvertex:\n :param pre_n_atoms:\n :param post_subvertex:\n :param synapse_io:\n :param subgraph:\n :param routing_infos:\n :param weight_scales:\n :return:\n \"\"\"\n\n synaptic_block, max_row_length = self._retrieve_synaptic_block(\n placements, transceiver, pre_subvertex, pre_n_atoms,\n post_subvertex, routing_infos, subgraph)\n\n # translate the synaptic block into a sublist of synapse_row_infos\n synapse_list = None\n if max_row_length > 0:\n synapse_list = self._translate_synaptic_block_from_memory(\n synaptic_block, pre_n_atoms, max_row_length, synapse_io,\n weight_scales)\n else:\n synapse_list = SynapticList([])\n return synapse_list\n\n def _translate_synaptic_block_from_memory(\n self, synaptic_block, n_atoms, max_row_length, synapse_io,\n weight_scales):\n \"\"\"\n translates a collection of memory into synaptic rows\n \"\"\"\n synaptic_list = list()\n numpy_block = numpy.frombuffer(dtype='uint8',\n buffer=synaptic_block).view(dtype=' 0:\n\n # calculate the synaptic block size in words\n synaptic_block_size = (pre_n_atoms * 4 *\n (constants.SYNAPTIC_ROW_HEADER_WORDS +\n maxed_row_length))\n\n # read in the base address of the synaptic matrix in the app region\n # table\n synapse_region_base_address_location = \\\n dsg_utilities.get_region_base_address_offset(\n app_data_base_address,\n constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value)\n\n # read in the memory address of the synaptic_region base address\n synapse_region_base_address = helpful_functions.read_data(\n post_x, post_y, synapse_region_base_address_location, 4,\n \"\".format(officer.person.common_name, officer.person.email) for officer in officers]\n ccs = [\"David Mills \"]\n if self.group.kind == self.group.KIND.quartet:\n members = self.group.members.filter(\n status__gt=0,\n person__email__isnull=False,\n ).exclude(\n person__officers__in=officers,\n ).distinct()\n for member in members:\n ccs.append(\n \"{0} <{1}>\".format(member.person.common_name, member.person.email)\n )\n context = {'competitor': self}\n rendered = render_to_string('csa.txt', context)\n subject = \"[Barberscore] {0} {1} {2} Session CSA\".format(\n self.group.name,\n self.session.convention.name,\n self.session.get_kind_display(),\n )\n # email = EmailMessage(\n # subject=subject,\n # body=rendered,\n # from_email='Barberscore ',\n # to=tos,\n # cc=ccs,\n # )\n email = EmailMessage(\n subject=subject,\n body=rendered,\n from_email='Barberscore ',\n to=[\n 'dbinetti@gmail.com',\n 'proclamation56@gmail.com',\n 'chris.buechler@verizon.net',\n ],\n )\n queue = django_rq.get_queue('high')\n result = queue.enqueue(\n email.send\n )\n return result\n\n # Competitor Transition Conditions\n\n # Competitor Transitions\n @fsm_log_by\n @transition(\n field=status,\n source=[STATUS.new, STATUS.started, STATUS.finished],\n target=STATUS.started,\n )\n def start(self, *args, **kwargs):\n # Notification?\n return\n\n @fsm_log_by\n @transition(\n field=status,\n source=[STATUS.new, STATUS.started, STATUS.finished],\n target=STATUS.finished,\n )\n def finish(self, *args, **kwargs):\n self.queue_notification()\n return\n\n @fsm_log_by\n @transition(\n field=status,\n source='*',\n target=STATUS.scratched,\n )\n def scratch(self, *args, **kwargs):\n self.tot_rank = None\n self.mus_rank = None\n self.per_rank = None\n self.sng_rank = None\n self.tot_points = None\n self.mus_points = None\n self.per_points = None\n self.sng_points = None\n self.tot_score = None\n self.mus_score = None\n self.per_score = None\n self.sng_score = None\n appearances = self.appearances.all()\n appearances.delete()\n return\n\n @fsm_log_by\n @transition(\n field=status,\n source='*',\n target=STATUS.disqualified,\n )\n def disqualify(self, *args, **kwargs):\n return\n","sub_path":"project/api/models/competitor.py","file_name":"competitor.py","file_ext":"py","file_size_in_byte":11885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"266999236","text":"from abc import ABCMeta, abstractmethod\n\nfrom datetime import timedelta\n\nfrom airflow import DAG\nfrom airflow.operators.subdag_operator import SubDagOperator\nfrom airflow.utils.log.logging_mixin import LoggingMixin\nfrom airflow.operators.python_operator import ShortCircuitOperator\n\nfrom presidio.utils.airflow.operators.sensor.task_sensor_service import TaskSensorService\nfrom presidio.utils.airflow.operators.group_connector.single_point_group_connector import SinglePointGroupConnector\nfrom presidio.utils.airflow.operators.group_connector.multi_point_group_connector import MultiPointGroupConnector\nfrom presidio.utils.airflow.operators.trigger.expended_trigger_dag_run_operator import ExpandedTriggerDagRunOperator\nfrom presidio.utils.configuration.config_server_configuration_reader_singleton import \\\n ConfigServerConfigurationReaderSingleton\n\nRETRY_ARGS_CONF_KEY = \"subdag_retry_args\"\nDAGS_CONF_KEY = \"dags\"\n\n\nclass PresidioDagBuilder(LoggingMixin):\n conf_reader = ConfigServerConfigurationReaderSingleton().config_reader\n\n presidio_command = 'run'\n\n \"\"\"\n The Presidio DAG Builder has 1 method that receives a DAG. The \"build\" method creates the DAG's operators, links\n them to the DAG and configures the dependencies between them. The inheritors are builders of different types of DAGs\n (Input DAG, ADE DAG, Output DAG, etc.), but they all have a common functional interface that receives and populates\n an Airflow DAG. Each implementation does this according to the specific DAG type.\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def build(self, dag):\n \"\"\"\n Receives a DAG, creates its operators, links them to the DAG and configures the dependencies between them.\n :param dag: The DAG to populate\n :type dag: airflow.models.DAG\n :return: The given DAG, after it has been populated\n :rtype: airflow.models.DAG\n \"\"\"\n\n pass\n\n def _create_infinite_retry_short_circuit_operator(self, task_id, dag, python_callable):\n return ShortCircuitOperator(\n task_id=task_id,\n dag=dag,\n python_callable=python_callable,\n retries=99999,\n retry_exponential_backoff=True,\n max_retry_delay=timedelta(seconds=3600),\n retry_delay=timedelta(seconds=600),\n provide_context=True\n )\n\n def _create_expanded_trigger_dag_run_operator(self, task_id, trigger_dag_id, dag, python_callable):\n retry_args = self._calc_retry_args(None)\n return ExpandedTriggerDagRunOperator(\n task_id=task_id,\n trigger_dag_id=trigger_dag_id,\n dag=dag,\n python_callable=python_callable,\n retries=retry_args['retries'],\n retry_delay=timedelta(seconds=int(retry_args['retry_delay'])),\n retry_exponential_backoff=retry_args['retry_exponential_backoff'],\n max_retry_delay=timedelta(\n seconds=int(retry_args['max_retry_delay'])))\n\n def _create_multi_point_group_connector(self, builder, dag, multi_point_group_connector_id, short_circuit_operator,\n add_sequential_sensor):\n \"\"\"\n create multi_point_group_connector with first and last tasks\n and wire short_circuit_operator and add_sequential_sensor.\n :param builder: builder\n :param dag: dag\n :param multi_point_group_connector_id: multi_point_group_connector_id\n :param short_circuit_operator: short_circuit_operator\n :param add_sequential_sensor: boolean\n :return: WireOperator\n \"\"\"\n retry_args = self._calc_retry_args(multi_point_group_connector_id)\n return MultiPointGroupConnector(builder=builder,\n dag=dag,\n add_sequential_sensor=add_sequential_sensor,\n short_circuit_operator=short_circuit_operator,\n task_id='{}.{}'.format(\"multi_point\", multi_point_group_connector_id),\n retries=retry_args['retries'],\n retry_delay=timedelta(seconds=int(retry_args['retry_delay'])),\n retry_exponential_backoff=retry_args['retry_exponential_backoff'],\n max_retry_delay=timedelta(\n seconds=int(retry_args['max_retry_delay'])))\n\n def _create_single_point_group_connector(self, builder, single_point_group_connector_id, dag,\n short_circuit_operator, add_sequential_sensor):\n \"\"\"\n create a single_point_group_connector with start and end dummy operators\n and wire short_circuit_operator and add_sequential_sensor.\n :param builder: builder\n :param single_point_group_connector_id: single_point_group_connector_id\n :param dag: dag\n :param short_circuit_operator: short_circuit_operator\n :param add_sequential_sensor: add_sequential_sensor\n :return: ContainerOperator\n \"\"\"\n retry_args = self._calc_retry_args(single_point_group_connector_id)\n return SinglePointGroupConnector(\n builder=builder,\n dag=dag,\n single_point_group_connector_id=single_point_group_connector_id,\n retry_args=retry_args,\n add_sequential_sensor=add_sequential_sensor,\n short_circuit_operator=short_circuit_operator,\n task_id='{}.{}'.format(\"single_point\", single_point_group_connector_id),\n retries=retry_args['retries'],\n retry_delay=timedelta(seconds=int(retry_args['retry_delay'])),\n retry_exponential_backoff=retry_args['retry_exponential_backoff'],\n max_retry_delay=timedelta(\n seconds=int(retry_args['max_retry_delay'])))\n\n def _create_sub_dag_operator(self, sub_dag_builder, sub_dag_id, dag, short_circuit_operator, add_sequential_sensor):\n \"\"\"\n create a sub dag of the received \"dag\" fill it with a flow using the sub_dag_builder\n and wrap it with a sub dag operator.\n wire short_circuit_operator and add_sequential_sensor.\n :param sub_dag_builder: sub_dag_builder\n :param sub_dag_id: sub_dag_id\n :param dag: dag\n :return: SubDagOperator\n \"\"\"\n\n sub_dag = DAG(\n dag_id='{}.{}'.format(dag.dag_id, sub_dag_id),\n schedule_interval=dag.schedule_interval,\n start_date=dag.start_date,\n default_args=dag.default_args)\n\n retry_args = self._calc_retry_args(sub_dag_id)\n\n sub_dag = SubDagOperator(\n subdag=sub_dag_builder.build(sub_dag),\n task_id=sub_dag_id,\n dag=dag,\n retries=retry_args['retries'],\n retry_delay=timedelta(seconds=int(retry_args['retry_delay'])),\n retry_exponential_backoff=retry_args['retry_exponential_backoff'],\n max_retry_delay=timedelta(\n seconds=int(retry_args['max_retry_delay']))\n )\n\n task_sensor_service = TaskSensorService()\n if add_sequential_sensor:\n task_sensor_service.add_task_sequential_sensor(sub_dag)\n if short_circuit_operator:\n task_sensor_service.add_task_short_circuit(sub_dag, short_circuit_operator)\n\n return sub_dag;\n\n @staticmethod\n def validate_the_gap_between_dag_start_date_and_current_execution_date(dag, gap, execution_date, schedule_interval):\n return (dag.start_date + gap) <= execution_date + schedule_interval\n\n def _calc_retry_args(self, task_id):\n retry_args = {}\n if task_id:\n # read task subdag retry args\n retry_args = PresidioDagBuilder.conf_reader.read(\n conf_key=self.get_retry_args_task_instance_conf_key_prefix(task_id))\n if not retry_args:\n # read default subdag retry args\n self.log.debug((\n \"did not found task retry configuration for operator=%s. settling for default configuration\" % (\n self.__class__.__name__)))\n retry_args = PresidioDagBuilder.conf_reader.read(\n conf_key=self.get_default_retry_args_conf_key())\n return retry_args\n\n def get_retry_args_task_instance_conf_key_prefix(self, task_id):\n return \"%s.%s.%s\" % (self.get_task_instance_conf_key_prefix(), task_id, RETRY_ARGS_CONF_KEY)\n\n def get_task_instance_conf_key_prefix(self):\n return \"%s.tasks_instances\" % (DAGS_CONF_KEY)\n\n def get_default_retry_args_conf_key(self):\n return \"%s.operators.default_jar_values.%s\" % (DAGS_CONF_KEY, RETRY_ARGS_CONF_KEY)\n\n @staticmethod\n def remove_multi_point_group_container(dag):\n PresidioDagBuilder._remove_relatives_of_multi_point_group_container(dag)\n PresidioDagBuilder._remove_multi_point_group_container_tasks(dag)\n\n @staticmethod\n def _remove_relatives_of_multi_point_group_container(dag):\n \"\"\"\n Remove MultiPointGroupConnector from downstream and upstream lists of other tasks\n :param full_flow_dag:\n :return:\n \"\"\"\n tasks = dag.tasks\n for task in tasks:\n if not isinstance(task, MultiPointGroupConnector):\n for t in task.downstream_list:\n if isinstance(t, MultiPointGroupConnector):\n task.downstream_task_ids.remove(t.task_id)\n for t in task.upstream_list:\n if isinstance(t, MultiPointGroupConnector):\n task.upstream_task_ids.remove(t.task_id)\n\n @staticmethod\n def _remove_multi_point_group_container_tasks(dag):\n \"\"\"\n Remove MultiPointGroupConnector tasks\n :param full_flow_dag:\n :return:\n \"\"\"\n dicts = dag.task_dict\n for task_id, task in dicts.items():\n if isinstance(task, MultiPointGroupConnector):\n dicts.pop(task_id)\n dag.task_count = len(dag.tasks)\n","sub_path":"presidio-core/presidio-workflows/presidio/builders/presidio_dag_builder.py","file_name":"presidio_dag_builder.py","file_ext":"py","file_size_in_byte":10136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"603361322","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython.display import clear_output\nimport pandas as pd\n\nimport torch\n\ndef smoothen_metric(metric, window_size=40):\n return pd.Series(metric).rolling(window_size, min_periods=1).mean()\n\n\ndef evaluate_metric_on_dataset(model, loader, evaluate_metric):\n ground_truth, predictions = map(\n torch.cat, \n zip(*[\n [ground_truth, model(images)] \n for images, ground_truth in loader]))\n \n return evaluate_metric(ground_truth, predictions)\n\n\ndef eval_accuracy(ground_truth, predictions):\n return float(\n (ground_truth == predictions.argmax(dim=1))\n .type(torch.FloatTensor)\n .mean())\n\n\ndef plot_metric(steps, metric_values):\n plt.plot(\n np.linspace(0, steps, len(metric_values)), \n metric_values)\n \n\ndef plot_metrics(metrics, steps, model, xlim=None):\n training_loss = smoothen_metric([m[\"training_loss\"] for m in metrics])\n training_acc = smoothen_metric([m[\"training_accuracy\"] for m in metrics])\n testing_acc = smoothen_metric([m[\"testing_accuracy\"] for m in metrics], window_size=80)\n \n plt.title(\"Loss\")\n plt.xlim(xlim)\n plot_metric(steps, training_loss)\n plt.legend([\"Training Loss\"])\n plt.show()\n \n plt.title(\"Accuracy\")\n plt.xlim(xlim)\n plot_metric(steps, training_acc)\n plt.title(\"Training Loss\")\n plot_metric(steps, testing_acc)\n plt.legend([\"Training Accuracy\", \"Testing Accuracy\"])\n plt.show()\n \n print(f\"Training loss: {training_loss.iloc[-1]}\")\n print(f\"Training Accuracy: {training_acc.iloc[-1]}\")\n print(f\"Testing Accuracy: {testing_acc.iloc[-1]}\")\n\ndef train_model(model, criterion, optimizer, train_loader, test_loader, epochs=3, print_every=40):\n steps = 0\n metrics = []\n\n total_steps = epochs * len(iter(train_loader))\n \n for e in range(epochs):\n for images, labels in iter(train_loader):\n optimizer.zero_grad()\n\n output = model(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n \n metrics.append({\n \"training_loss\": loss.item(),\n \"training_accuracy\": eval_accuracy(labels, output),\n \"testing_accuracy\": (\n evaluate_metric_on_dataset(model, test_loader, eval_accuracy)\n if steps % print_every == 0\n else metrics[-1][\"testing_accuracy\"])\n })\n\n if steps % print_every == 0:\n clear_output(wait=True)\n\n plot_metrics(metrics, steps, model, xlim=(0, total_steps))\n\n steps += 1\n","sub_path":"introduction-to-computer-vision/model_training.py","file_name":"model_training.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"141933514","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.paginator import Paginator, EmptyPage,\\\nPageNotAnInteger\nfrom .models import Post, Comment, Category, Subscriber\nfrom django.urls import reverse_lazy\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import (TemplateView,ListView,\n DetailView,CreateView,\n UpdateView,DeleteView)\nfrom .forms import CommentForm,PostForm, Emailshare,Contact, SubscriberForm\nfrom django.core.mail import send_mail, BadHeaderError\nfrom taggit.models import Tag\nfrom django.db.models import Count\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom django.views.decorators.csrf import csrf_exempt\nimport random\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\n\n# Create your views here.\ndef post_list(request, **kwargs):\n object_list = Post.objects.all()\n tag = None\n tag_slug = kwargs.get(\"tag_slug\")\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n object_list = object_list.filter(tags__in=[tag])\n paginator = Paginator(object_list, 3) # 3 posts in each page\n page = request.GET.get('page')\n print(page)\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer deliver the first page\n posts = paginator.page(1)\n except EmptyPage:\n # If page is out of range deliver last page of results\n posts = paginator.page(paginator.num_pages)\n if tag:\n return render(request, 'blog/post/list.html', {'posts': posts,'tag': tag})\n else:\n return render(request, 'blog/post/list.html', {'posts': posts})\ndef create_post(request):\n sent = False\n if request.method == 'POST':\n forms = PostForm(data =request.POST, files=request.FILES)\n if forms.is_valid():\n forms.save()\n return redirect('/blog')\n sent = True\n else:\n forms = PostForm()\n return render(request,'blog/post/create.html',{'forms':forms })\ndef post_detail(request,pk):\n post = get_object_or_404(Post, pk=pk,status='published')\n # List of active comments for this post\n comments = post.comments.filter(active=True)\n new_comment = None\n if request.method == 'POST':\n # A comment was posted\n comment_form = CommentForm(data=request.POST)\n if comment_form.is_valid():\n # Create Comment object but don't save to database yet\n new_comment = comment_form.save(commit=False)\n # Assign the current post to the comment\n new_comment.post = post\n # Save the comment to the database\n new_comment.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER','/') )\n else:\n comment_form = CommentForm()\n\n post_tags_ids = post.tags.values_list('id', flat=True)\n similar_posts = Post.objects.filter(tags__in=post_tags_ids).exclude(id=post.id)\n similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]\n return render(request,'blog/post/detail.html',{'post': post,'comments': comments,\n 'new_comment': new_comment,\n 'comment_form': comment_form,'similar_posts':similar_posts})\n\ndef contact(request):\n sent = False\n if request.method == 'POST':\n contact = Contact(data=request.POST)\n if contact.is_valid():\n name = contact.cleaned_data['name']\n sender = contact.cleaned_data['email']\n subject = contact.cleaned_data['subject']\n message = contact.cleaned_data['message']\n recipients = ['agrictime@gmail.com']\n try:\n send_mail(subject, message, sender, recipients)\n except BadHeaderError:\n return HttpResponse('Invalid header found')\n sent = True\n else: \n contact = Contact()\n return render(request, 'blog/post/contact.html', {'contact': contact, 'sent':sent})\ndef share(request, pk):\n post = get_object_or_404(Post, pk=pk,status='published')\n sent = False\n if request.method == 'POST':\n form = Emailshare(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n post_url = request.build_absolute_uri(post.get_absolute_url())\n subject = f\"{cd['name']} recommends you read \" \\\n f\"{post.title}\"\n message = f\"Read {post.title} at {post_url}\\n\\n\" \\\n f\"{cd['name']}\\'s comments: {cd['comments']}\"\n send_mail(subject, message, '',[cd['to']])\n sent = True\n return redirect('blog:post_detail',pk=pk)\n else:\n form = Emailshare()\n return render(request, 'blog/post/share.html', {'post': post,\n 'form': form, 'sent':sent})\n\ndef show_category(request,hierarchy= None):\n category_slug = hierarchy.split('/')\n category_queryset = list(Category.objects.all())\n all_slugs = [ x.slug for x in category_queryset ]\n parent = None\n for slug in category_slug:\n if slug in all_slugs:\n parent = get_object_or_404(Category,slug=slug,parent=parent)\n else:\n instance = get_object_or_404(Post, slug=slug)\n breadcrumbs_link = instance.get_cat_list()\n category_name = [' '.join(i.split('/')[-1].split('-')) for i in breadcrumbs_link]\n breadcrumbs = zip(breadcrumbs_link, category_name)\n return render(request, \"blog/post/detail.html\", {'instance':instance,'breadcrumbs':breadcrumbs})\n\n return render(request,\"blog/post/categories.html\",{'post_set':parent.post_set.all(),'sub_categories':parent.children.all()})\n\ndef post_random(request):\n poster = Post.objects.order_by(\"?\")[:4]\n return render(request, \"blog/post/random_post.html\", {'poster':poster})\n\ndef random_digits():\n return \"%0.12d\" % random.randint(0, 999999999999)\n\n@csrf_exempt\ndef news(request):\n if request.method == 'POST':\n sub = Subscriber(email=request.POST['email'], conf_num=random_digits())\n sub.save()\n message = Mail(\n from_email=settings.FROM_EMAIL,\n to_emails=sub.email,\n subject='Newsletter Confirmation',\n html_content='Thank you for signing up for my email newsletter! \\\n Please complete the process by \\\n clicking here to \\\n confirm your registration .'.format(request.build_absolute_uri('/confirm/'),\n sub.email,\n sub.conf_num))\n sg = SendGridAPIClient(settings.SENDGRID_API_KEY)\n response = sg.send(message)\n return render(request, 'blog/base.html', {'email': sub.email, 'action': 'added', 'form': SubscriberForm()})\n else:\n return render(request, 'blog/base.html', {'form': SubscriberForm()})\ndef confirm(request):\n sub = Subscriber.objects.get(email=request.GET['email'])\n if sub.conf_num == request.GET['conf_num']:\n sub.confirmed = True\n sub.save()\n return render(request, 'blog/base.html', {'email': sub.email, 'action': 'confirmed'})\n else:\n return render(request, 'blog/base.html', {'email': sub.email, 'action': 'denied'})\ndef delete(request):\n sub = Subscriber.objects.get(email=request.GET['email'])\n if sub.conf_num == request.GET['conf_num']:\n sub.delete()\n return render(request, 'blog/base.html', {'email': sub.email, 'action': 'unsubscribed'})\n else:\n return render(request, 'blog/base.html', {'email': sub.email, 'action': 'denied'})","sub_path":"social/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"616634547","text":"import torch\n\nfrom . import horcorr_cuda\nfrom .horcorr_cuda import *\n\nclass HorCorrFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, left, right):\n assert left.shape[1] == horcorr_cuda.channels()\n assert (left.shape[0] * left.shape[2]) % horcorr_cuda.block_size() == 0\n assert left.shape[3] - right.shape[3] == horcorr_cuda.n_windows() - 1\n\n left = left.permute(0, 2, 3, 1).contiguous()\n right = right.permute(0, 2, 3, 1).contiguous()\n ctx.save_for_backward(left, right)\n out = torch.empty(right.shape[:3] + (horcorr_cuda.n_windows(),),\n dtype=right.dtype,\n device=right.device).contiguous()\n horcorr_cuda.forward(left, right, out)\n return out.permute(0, 3, 1, 2)\n\n @staticmethod\n def backward(ctx, grad):\n left, right = ctx.saved_tensors\n grad = grad.permute(0, 2, 3, 1).contiguous()\n grad_left = grad_right = None\n if ctx.needs_input_grad[0]:\n grad_left = torch.empty(left.shape,\n dtype=left.dtype,\n device=left.device).contiguous()\n horcorr_cuda.backward_left(right, grad, grad_left)\n grad_left = grad_left.permute(0, 3, 1, 2)\n if ctx.needs_input_grad[1]:\n grad_right = torch.empty(right.shape,\n dtype=right.dtype,\n device=right.device).contiguous()\n horcorr_cuda.backward_right(left, grad, grad_right)\n grad_right = grad_right.permute(0, 3, 1, 2)\n return grad_left, grad_right","sub_path":"py/horcorr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"643539193","text":"from pynput import keyboard as kb\nfrom pynput.keyboard import Key, Controller\n\nfrom copy import getselectedtext\nfrom combinations import COMBINATIONS\nfrom paste import paste\nfrom window import popup\n\n\nkeyboard = Controller()\n\n# The currently active modifierstr1s\ncurrent = set()\n\nll=['','','','','']\nllen=5\n\ndef execute():\n if current==COMBINATIONS[0]:\n cot=getselectedtext()\n if cot in ll:\n pass\n elif len(ll)==llen:\n ll.pop()\n ll.insert(0,cot)\n print(cot)\n else:\n ll.append(cot)\n print(cot)\n\n elif current==COMBINATIONS[1]:\n print(ll)\n popup(ll)\n # paste(ll)\n # paste(xx)\n\ndef on_press(key):\n if any([key in COMBO for COMBO in COMBINATIONS]):\n current.add(key)\n if any(all(k in current for k in COMBO) for COMBO in COMBINATIONS):\n # This fun exexuted when th combination i s pressed\n # print(current)\n execute()\n\ndef on_release(key):\n if any([key in COMBO for COMBO in COMBINATIONS]):\n current.remove(key)\n\nwith kb.Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()\n","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"160036715","text":"from rest_framework import generics\nfrom rest_framework.decorators import api_view\nfrom rest_framework.reverse import reverse\nfrom rest_framework.response import Response\nfrom openbudget.api import serializers\nfrom openbudget.apps.entities.models import Entity, Domain, DomainDivision\nfrom openbudget.apps.budgets.models import BudgetTemplate, BudgetTemplateNode, Budget, BudgetItem, Actual, ActualItem\n\n\n@api_view(['GET'])\ndef api_root(request, format=None):\n \"\"\"The entry endpoint of our API\"\"\"\n\n return Response({\n 'entities': reverse('entity-list', request=request),\n 'budgets': reverse('budget-list', request=request),\n 'actuals': reverse('actual-list', request=request),\n })\n\n\nclass EntityList(generics.ListAPIView):\n \"\"\"API endpoint that represents a list of geopols\"\"\"\n\n model = Entity\n serializer_class = serializers.EntityListLinked\n\n\nclass EntityDetail(generics.RetrieveAPIView):\n \"\"\"API endpoint that represents a single geopol\"\"\"\n\n model = Entity\n serializer_class = serializers.EntityDetailLinked\n\n\nclass DomainList(generics.ListAPIView):\n \"\"\"API endpoint that represents a list of domains\"\"\"\n\n model = Domain\n serializer_class = serializers.DomainLinked\n\n\nclass DomainDetail(generics.RetrieveAPIView):\n \"\"\"API endpoint that represents a single domain\"\"\"\n\n model = Domain\n serializer_class = serializers.DomainLinked\n\n\nclass DomainDivisionList(generics.ListAPIView):\n \"\"\"API endpoint that represents a list of domain divisions\"\"\"\n\n model = DomainDivision\n serializer_class = serializers.DomainDivisionLinked\n\n\nclass DomainDivisionDetail(generics.RetrieveAPIView):\n \"\"\"API endpoint that represents a single domain division\"\"\"\n\n model = DomainDivision\n serializer_class = serializers.DomainDivisionLinked\n\n\nclass BudgetTemplateDetail(generics.RetrieveAPIView):\n \"\"\"API endpoint that represents a single budget template\"\"\"\n\n model = BudgetTemplate\n serializer_class = serializers.BudgetTemplateLinked\n\n\nclass BudgetTemplateNodeDetail(generics.RetrieveAPIView):\n \"\"\"API endpoint that represents a single budget template node\"\"\"\n\n model = BudgetTemplateNode\n serializer_class = serializers.BudgetTemplateNodeLinked\n\n\nclass BudgetList(generics.ListAPIView):\n \"\"\"API endpoint that represents a list of budgets\"\"\"\n\n model = Budget\n serializer_class = serializers.BudgetLinked\n\n\nclass BudgetDetail(generics.RetrieveAPIView):\n \"\"\"API endpoint that represents a single budget\"\"\"\n\n model = Budget\n serializer_class = serializers.BudgetLinked\n\n\nclass BudgetItemList(generics.ListAPIView):\n \"\"\"API endpoint that represents a list of bitems\"\"\"\n\n model = BudgetItem\n serializer_class = serializers.BudgetItemLinked\n\n\nclass BudgetItemDetail(generics.RetrieveAPIView):\n \"\"\"API endpoint that represents a single bitem\"\"\"\n\n model = BudgetItem\n serializer_class = serializers.BudgetItemLinked\n\n\nclass ActualList(generics.ListAPIView):\n \"\"\"API endpoint that represents a list of actuals\"\"\"\n\n model = Actual\n serializer_class = serializers.ActualLinked\n\n\nclass ActualDetail(generics.RetrieveAPIView):\n \"\"\"API endpoint that represents a single actual\"\"\"\n\n model = Actual\n serializer_class = serializers.ActualLinked\n\n\nclass ActualItemList(generics.ListAPIView):\n \"\"\"API endpoint that represents a list of actual items\"\"\"\n\n model = ActualItem\n serializer_class = serializers.ActualItemLinked\n\n\nclass ActualItemDetail(generics.RetrieveAPIView):\n \"\"\"API endpoint that represents a single actual item\"\"\"\n\n model = ActualItem\n serializer_class = serializers.ActualItemLinked\n\n\nclass NodeBudgetTimeline(generics.ListAPIView):\n \"\"\"\n API endpoint that retrieves a timeline of budget items\n according to a given node, entity and optionally a period\n \"\"\"\n\n def get(self, request, entity_pk, node_pk, *args, **kwargs):\n \"\"\"GET handler for retrieving all budget items of the node's timeline, filtered by entity\"\"\"\n\n budget_items = BudgetItem.objects.timeline(node_pk, entity_pk)\n serializer = serializers.BudgetItemLinked(budget_items, many=True)\n return Response(serializer.data)","sub_path":"openbudget/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"123807087","text":"# Databricks notebook source\n# MAGIC %md # Flight departure delay predictions\n# MAGIC ## Main Notebook\n# MAGIC \n# MAGIC Team members:\n# MAGIC - Isabel Garcia Pietri\n# MAGIC - Madhu Hegde\n# MAGIC - Amit Karandikar\n# MAGIC - Piotr Parkitny\n\n# COMMAND ----------\n\n# MAGIC %md ## Problem Statement\n# MAGIC \n# MAGIC Flight delays represent a big issue in the air travel industry. Delays cause huge losses to airlines, impact passenger satisfaction and cause complex logistic problems. Because of this, it is crucial for airlines to better understand flight delays, in order to minimize the impact in their business.\n# MAGIC \n# MAGIC There are many things that can cause a flight to be delayed, going from aircraft maintenance, extreme weather, airport operations, security and screening activities, a previous flight delay causing the next flight to depart late, among many others. Because there are many moving parts in the air travel operations, solving the delay problem is quite complex.\n# MAGIC \n# MAGIC Many people have worked in understanding the problem of flights delays. For instance, in 2019 Navoneel Chakrabarty (1) developed a model to predict arrival delay for American Airlines flights in US, that achieved an accuracy of 85.73%, which is considered a very good accuracy for this type of problems.\n# MAGIC \n# MAGIC The implementation of a model to predict flight delays would have a great impact on airline operations. Airlines would be able to minimize the impact of such delays by making changes on passenger itineraries, flight routes, crew assignments, aircraft schedules and maintenance, etc.\n# MAGIC \n# MAGIC The main purpose of this study is to create a model to predict departure delay for flights in the US, where a delay is defined as 15-minute delay or more. We use a subset of the of the flight's on-time performance data provided by the United States Bureau of Transportation Statistics. The data comprises data of flights departing from all major US airports for the 2015-2019 timeframe. Additionally, we use weather information from the National Oceanic and Atmospheric Administration repository.\n# MAGIC \n# MAGIC The **output variable** in our model is a binary variable, where 1 represent flights that experienced departure delay and 0 represent flights with on-time departure.\n# MAGIC \n# MAGIC #### About the performance metrics\n# MAGIC \n# MAGIC Precision is the ratio between true positives and all the predicted positives. For our problem statement, is the measure of flights that we correctly identify as delayed out of all flights that are predicted as delayed. \n# MAGIC \n# MAGIC $$Precision = \\frac{TP}{TP + FP}$$\n# MAGIC \n# MAGIC Recall measures if our model is correctly identifying true positives. For our problem statement: of all the flights that are actually delayed, how many we correctly identified as delayed. \n# MAGIC $$Recall = \\frac{TP}{TP + FN}$$\n# MAGIC \n# MAGIC In order to minimize economic losses due to delays, airlines would like to avoid situations where the flight is going to be delayed, but the model classifies it not having a delay. This requires a model with high recall. On the other hand, a high precision is also important. \n# MAGIC The cases where the flight is not going to be delayed and the model predicts a delay, also represent an economic impact, since airlines will implement actions such as letting customers know so they don’t show up at the airport so early, changing passengers’ itineraries, etc. All these actions cause economic/reputation damage when implemented in cases where in reality there is no delay.\n# MAGIC \n# MAGIC Because all this, we aim to balance precision and recall. Hence, in this study we use f1-score as the main metric to optimize. \n# MAGIC \n# MAGIC $$f1 = 2\\times\\frac{Precision \\times Recall}{Precision + Recall} $$\n\n# COMMAND ----------\n\n# MAGIC %md ## Package imports, directories and configuration\n\n# COMMAND ----------\n\n# package imports\nfrom pyspark.sql.functions import col, isnull, broadcast, udf, count, when, isnan, lpad, to_timestamp, concat, to_utc_timestamp, expr, unix_timestamp, avg, round, lag, hour\nfrom pyspark.sql.window import Window\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import DoubleType\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.classification import RandomForestClassifier\nfrom pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nfrom pyspark.mllib.evaluation import MulticlassMetrics\nfrom pyspark.sql.types import FloatType\nimport pyspark.sql.functions as F\nfrom pyspark.ml.feature import OneHotEncoder , StringIndexer, VectorAssembler\nfrom pyspark.ml.classification import LogisticRegression\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.classification import GBTClassifier\nfrom pyspark.ml.classification import DecisionTreeClassifier\nfrom pyspark.ml.classification import LinearSVC\nfrom pyspark.ml.classification import NaiveBayes\n\nfrom pyspark.ml.feature import MinMaxScaler\nfrom pyspark.ml.linalg import Vectors\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\n\npd.set_option('display.max_rows', 999)\npd.set_option('display.max_colwidth', 999)\npd.set_option('display.max_columns', 999)\n\nfrom pyspark.sql.functions import col, isnull, broadcast, udf, count, when, isnan, lpad, to_timestamp, concat, to_utc_timestamp, expr, unix_timestamp, avg, round, lag, to_date, row_number, explode, array, lit\nfrom pyspark.sql.window import Window\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import DoubleType\n\nimport numpy as np\nimport math\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n \nfrom numpy import savetxt\n \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom pyspark.ml.feature import PCA\n\n# COMMAND ----------\n\n#data path to our directory \npath = 'dbfs:/mnt/Azure/'\ndisplay(dbutils.fs.ls(path))\n\n# COMMAND ----------\n\n# spark configuration\nsc = spark.sparkContext\nsqlContext = SQLContext(sc)\nsc.getConf().getAll()\n\n# COMMAND ----------\n\n# MAGIC %md ## Exploratory data analysis (EDA)\n# MAGIC \n# MAGIC The EDA of the project is available in the notebook: https://adb-6759024569771990.10.azuredatabricks.net/?o=6759024569771990#notebook/3743280196533040/command/3743280196533041\n\n# COMMAND ----------\n\n# MAGIC %md ## Extract, transform, load (ETL)\n# MAGIC \n# MAGIC The ETL of the project is available in the notebook: https://adb-6759024569771990.10.azuredatabricks.net/?o=6759024569771990#notebook/1407650788079970/command/1407650788079986\n# MAGIC \n# MAGIC This notebook contains the ETL pipeline we use to pre-process airlines and weather data.\n\n# COMMAND ----------\n\n# MAGIC %md ## Feature engineering\n# MAGIC \n# MAGIC Based on the results of the EDA, we designed some features around the aspects that appear to affect the most the departure delay.\n# MAGIC \n# MAGIC - Operating carrier appears to be important. So, we created a feature that represents the carrier delay over a time range before prediction time.\n# MAGIC - We also created features to measure an airport status: airport delay over a time range before prediction time, number of flights scheduled, among others.\n# MAGIC - Additionally, we created a feature to follow the delay of an aircraft. If previous flights have been delayed, then chances are that the next flight is going to be delayed.\n# MAGIC - To measure the airport’s importance, we calculated the PageRank statistic. Airports with more connections will be ranked higher than airports with few connections.\n# MAGIC - We have more than 350 airports in the dataset. So, instead of using all the categories for the origin and the destination airport, we decided to use a variable that represents the size of the airport. However, for the categorical variable that represent the carriers, we kept all the categories, because we think that different carriers display different performance and we want to be able to model that. We have a total of 19 operating carriers in the dataset.\n# MAGIC - In this section we also handle null values for the variables we consider for the modeling part.\n\n# COMMAND ----------\n\n# Load airlines pre-processed data. \ndf_airlines = spark.read.parquet(\"/mnt/Azure/processed_data/airlines_processed.parquet\")\n\nprint(f'{df_airlines.count():} flight records loaded')\n\n# Load weather pre-processed data. \ndf_weather = spark.read.parquet(\"/mnt/Azure/processed_data/weather_processed.parquet\")\n\nprint(f'{df_weather.count():} weather records loaded')\n\n# COMMAND ----------\n\n# MAGIC %md ### Carrier delay\n# MAGIC \n# MAGIC This feature represents the average delay a carrier has over a 12-hours period before prediction time.\n# MAGIC \n# MAGIC The 12-hours window goes from 2 hours and 15 minutes to 14 hours and 15 minutes before the scheduled departure time. The reason why we consider 2 hours and 15 minutes instead of 2 hours is:\n# MAGIC \n# MAGIC Let's say that we are getting statistics for a flight that is schedule to depart at 3 pm. If we use a 2-hour offset, then we would include information of flights that were scheduled to depart at 1 pm and before. But if a flight is scheduled to depart at exactly 1 pm, we would not know if that flight is delayed until 1:15 pm, so it would be wrong to use the information of this flight. To solve this, we get information of flights that are scheduled to depart 2 hours and 15 minutes before.\n\n# COMMAND ----------\n\n# carrier delay 12 hours window before prediction time\n# carrier delay 14 hours and 15 minutes (-51,300 seconds) to 2 hours and 15 minutes (-8,100 seconds) before scheduled departure time\n\ncarrier_window = Window.partitionBy('OP_CARRIER')\\\n .orderBy(unix_timestamp('dep_datetime_scheduled_utc'))\\\n .rangeBetween(-51300, -8100)\n\ndf_airlines = df_airlines.withColumn('carrier_delay', round(avg(col('DEP_DEL15')).over(carrier_window),4) )\n\n# COMMAND ----------\n\n# MAGIC %md ### Origin airport delay\n# MAGIC This feature represents the average delay the origin airport has over a 12-hours period before prediction time. We use the same logic explained above for the time range.\n\n# COMMAND ----------\n\n# airport delay 12 hours window before prediction time\n# airport delay 14 hours and 15 minutes (-51,300 seconds) to 2 hours and 15 minutes (-8,100 seconds) before scheduled departure time\nairport_window = Window.partitionBy('ORIGIN')\\\n .orderBy(unix_timestamp('dep_datetime_scheduled_utc'))\\\n .rangeBetween(-51300, -8100)\n\ndf_airlines = df_airlines.withColumn('airport_delay', round(avg(col('DEP_DEL15')).over(airport_window),4) )\n\n# COMMAND ----------\n\n# MAGIC %md ### Latest known aircraft departure status\n# MAGIC This feature represents the last known status of an aircraft at departure (delayed/not delayed) before prediction time. Again, we use status of flights that were scheduled to departure at least 2 hours and 15 minutes before.\n\n# COMMAND ----------\n\n# create a column that represents the cutoff time to get previous flights information\n# 2 hours and 15 minutes (-8,100 seconds) before the scheduled departure time\ndf_airlines = df_airlines.withColumn('cutoff_time_utc',(unix_timestamp(col('dep_datetime_scheduled_utc')) - 8100).cast('timestamp'))\n\n# feature that represents the latest known departure status of an aircraft (delayed/not delayed) before the prediction time \n\naircraft_depar_window = Window.partitionBy('TAIL_NUM')\\\n .orderBy('dep_datetime_scheduled_utc') \n\n# (lag window function returns the value that is offset rows before the current row)\n# First, look the previous departure, check if departure time is before cutoff time\n# if so, get the departure delay status \n# If the previous departure is not before the cutoff time, look for the previous flight, check departure time again, and so on .. \n# We go up to 5 flights before current flight to account for short flights\n# When tail number is not available this feature is null\ndf_airlines = df_airlines.withColumn('dep_delay_aircraft', \\\n when(col('TAIL_NUM').isNull(), None) \\\n .when(unix_timestamp(lag(col('dep_datetime_scheduled_utc'),1,None).over(aircraft_depar_window))<\\\n unix_timestamp(col('cutoff_time_utc')), \\\n lag(col('DEP_DEL15'),1,None).over(aircraft_depar_window)) \n .when(unix_timestamp(lag(col('dep_datetime_scheduled_utc'),2,None).over(aircraft_depar_window))<\\\n unix_timestamp(col('cutoff_time_utc')), \\\n lag(col('DEP_DEL15'),2,None).over(aircraft_depar_window) ) .when(unix_timestamp(lag(col('dep_datetime_scheduled_utc'),3,None).over(aircraft_depar_window))<\\\n unix_timestamp(col('cutoff_time_utc')), \\\n lag(col('DEP_DEL15'),3,None).over(aircraft_depar_window) ) .when(unix_timestamp(lag(col('dep_datetime_scheduled_utc'),4,None).over(aircraft_depar_window))<\\\n unix_timestamp(col('cutoff_time_utc')), \\\n lag(col('DEP_DEL15'),4,None).over(aircraft_depar_window) ) .when(unix_timestamp(lag(col('dep_datetime_scheduled_utc'),5,None).over(aircraft_depar_window))<\\\n unix_timestamp(col('cutoff_time_utc')), \\\n lag(col('DEP_DEL15'),5,None).over(aircraft_depar_window) ) )\n\n# COMMAND ----------\n\n# MAGIC %md ### Flight number for the aircraft on a day\n# MAGIC \n# MAGIC This feature represents the number of the flight for the aircraft on a day: first flight = 1, second flight= 2 and so on. \n\n# COMMAND ----------\n\n# partition by tail number an date, order by datetime\naircraft_depar_window2 = Window.partitionBy('TAIL_NUM', to_date('dep_datetime_scheduled_utc'))\\\n .orderBy('dep_datetime_scheduled_utc') \n\n# get sequence of flights per aircraft during a day\ndf_airlines = df_airlines.withColumn('flight_aircraft', when(col('TAIL_NUM').isNull(), None)\\\n .otherwise(row_number().over(aircraft_depar_window2)))\n\n# COMMAND ----------\n\n# MAGIC %md ### Origin and destination airport usage\n# MAGIC These two features represent how busy an airport is: number of scheduled departures at origin and number of scheduled arrivals at destination in a two-hours window (1 hour before and 1 hour after the prediction time). Since we are using **scheduled** flights we can go 1 hour over the prediction time.\n\n# COMMAND ----------\n\n# number of flights departures scheduled between t-1 and t+1 (t scheduled departure time)\n# 1 hour before: -7,200 seconds. 1 hour after: 7,200 seconds\norigin_airport_window = Window.partitionBy('ORIGIN')\\\n .orderBy(unix_timestamp('dep_datetime_scheduled_utc'))\\\n .rangeBetween(-7200, 7200)\n\ndf_airlines = df_airlines.withColumn('DEP_CNT', count(col('dep_datetime_scheduled_utc')).over(origin_airport_window))\n\n\n# number of flights arrivals scheduled between t-1 and t+1 at destination airport\n# 1 hour before: -7,200 seconds. 1 hour after: 7,200 seconds\ndest_airport_window = Window.partitionBy('DEST')\\\n .orderBy(unix_timestamp('arriv_datetime_scheduled_utc'))\\\n .rangeBetween(-7200, 7200)\n\ndf_airlines = df_airlines.withColumn('DST_CNT', count(col('arriv_datetime_scheduled_utc')).over(dest_airport_window))\n\n# COMMAND ----------\n\n# MAGIC %md ### Hour of the flight\n# MAGIC Extract the hour of the flight from scheduled departure timestamp. As we saw in the EDA, the hour of departure appears to be correlated with the outcome variable delay.\n\n# COMMAND ----------\n\ndf_airlines = df_airlines.withColumn('H_DEP', hour('dep_datetime_scheduled'))\n\n# COMMAND ----------\n\n# MAGIC %md ### PageRank and proxy for airports categorical variable\n# MAGIC \n# MAGIC The PageRank feature measures the importance of each node (in this case airport) within the graph, based on the number incoming relationships (incoming routes from other airports) and the importance of the corresponding source nodes (other airports). The logic behind this feature is that if an airport with many connections is delayed, this delay will likely propagate to other airports. \n# MAGIC \n# MAGIC Additionally, using information about the degree of an airport, we calculate a variable that represent the size of an airport to serve as a proxy for origin and destination airport.\n# MAGIC \n# MAGIC PageRank notebook: https://adb-6759024569771990.10.azuredatabricks.net/?o=6759024569771990#notebook/1310479932810734/command/1407650788079615\n\n# COMMAND ----------\n\n# Load PageRank, degree, an proxy airport data\n\ndf_pagerank = spark.read.parquet(\"/mnt/Azure/processed_data/pagerank_degree.parquet\")\n\nprint(f'{df_pagerank.count():,} nodes loaded')\n\n# Join with airlines dataframe on ORIGIN\ndf_airlines1 = df_airlines.join(broadcast(df_pagerank), df_airlines.ORIGIN == df_pagerank.id, 'left')\n\n# list of columns to keep\nkeep_columns = ['YEAR', 'QUARTER', 'MONTH', 'DAY_OF_MONTH', 'DAY_OF_WEEK', 'FL_DATE', 'OP_UNIQUE_CARRIER', 'OP_CARRIER_AIRLINE_ID', 'OP_CARRIER', \\\n 'TAIL_NUM', 'OP_CARRIER_FL_NUM', 'ORIGIN_AIRPORT_ID', 'ORIGIN_AIRPORT_SEQ_ID', 'ORIGIN_CITY_MARKET_ID', 'ORIGIN', 'ORIGIN_CITY_NAME',\\\n 'ORIGIN_STATE_ABR', 'ORIGIN_STATE_FIPS', 'ORIGIN_STATE_NM', 'ORIGIN_WAC', 'DEST_AIRPORT_ID', 'DEST_AIRPORT_SEQ_ID',\\\n 'DEST_CITY_MARKET_ID', 'DEST', 'DEST_CITY_NAME', 'DEST_STATE_ABR', 'DEST_STATE_FIPS', 'DEST_STATE_NM', 'DEST_WAC', \\\n 'CRS_DEP_TIME', 'DEP_TIME', 'DEP_DELAY', 'DEP_DELAY_NEW', 'DEP_DEL15', 'DEP_DELAY_GROUP', 'DEP_TIME_BLK', 'TAXI_OUT', 'WHEELS_OFF',\\\n 'WHEELS_ON', 'TAXI_IN', 'CRS_ARR_TIME', 'ARR_TIME', 'ARR_DELAY', 'ARR_DELAY_NEW', 'ARR_DEL15', 'ARR_DELAY_GROUP', 'ARR_TIME_BLK',\\\n 'CANCELLED', 'CANCELLATION_CODE', 'DIVERTED', 'CRS_ELAPSED_TIME', 'ACTUAL_ELAPSED_TIME', 'AIR_TIME', 'FLIGHTS', 'DISTANCE', 'DISTANCE_GROUP', \\\n 'carrier_delay', 'WEATHER_DELAY', 'NAS_DELAY', 'SECURITY_DELAY', 'LATE_AIRCRAFT_DELAY', 'dep_datetime_scheduled', 'dep_datetime_actual',\\\n 'arriv_datetime_scheduled', 'arriv_datetime_actual', 'station_origin', 'airp_origin_timezone', 'station_origin_timezone',\\\n 'station_dest', 'airp_dest_timezone', 'station_dest_timezone', 'dep_datetime_scheduled_utc', 'dep_datetime_actual_utc', \\\n 'arriv_datetime_scheduled_utc', 'arriv_datetime_actual_utc', 'airport_delay', 'cutoff_time_utc', 'dep_delay_aircraft', 'flight_aircraft', \\\n 'DEP_CNT', 'DST_CNT', 'pagerank', 'degree', 'airport_size_group','H_DEP']\n\n# keep relevant columns \ndf_airlines1 = df_airlines1.select(*keep_columns)\n\n# rename columns \ndf_airlines1 = df_airlines1.withColumnRenamed('degree','degree_origin') \\\n .withColumnRenamed('airport_size_group','proxy_origin') \\\n .withColumnRenamed('pagerank','ORPageRank')\n\n\n\n# COMMAND ----------\n\n# Join with airlines dataframe on DEST to get degree and airport proxy on destination\ndf_airlines2 = df_airlines1.join(broadcast(df_pagerank), df_airlines1.DEST == df_pagerank.id, 'left')\n\n# list of columns to keep\nkeep_columns2 = ['YEAR', 'QUARTER', 'MONTH', 'DAY_OF_MONTH', 'DAY_OF_WEEK', 'FL_DATE', 'OP_UNIQUE_CARRIER', 'OP_CARRIER_AIRLINE_ID', 'OP_CARRIER', \\\n 'TAIL_NUM', 'OP_CARRIER_FL_NUM', 'ORIGIN_AIRPORT_ID', 'ORIGIN_AIRPORT_SEQ_ID', 'ORIGIN_CITY_MARKET_ID', 'ORIGIN', 'ORIGIN_CITY_NAME',\\\n 'ORIGIN_STATE_ABR', 'ORIGIN_STATE_FIPS', 'ORIGIN_STATE_NM', 'ORIGIN_WAC', 'DEST_AIRPORT_ID', 'DEST_AIRPORT_SEQ_ID',\\\n 'DEST_CITY_MARKET_ID', 'DEST', 'DEST_CITY_NAME', 'DEST_STATE_ABR', 'DEST_STATE_FIPS', 'DEST_STATE_NM', 'DEST_WAC', \\\n 'CRS_DEP_TIME', 'DEP_TIME', 'DEP_DELAY', 'DEP_DELAY_NEW', 'DEP_DEL15', 'DEP_DELAY_GROUP', 'DEP_TIME_BLK', 'TAXI_OUT', 'WHEELS_OFF',\\\n 'WHEELS_ON', 'TAXI_IN', 'CRS_ARR_TIME', 'ARR_TIME', 'ARR_DELAY', 'ARR_DELAY_NEW', 'ARR_DEL15', 'ARR_DELAY_GROUP', 'ARR_TIME_BLK',\\\n 'CANCELLED', 'CANCELLATION_CODE', 'DIVERTED', 'CRS_ELAPSED_TIME', 'ACTUAL_ELAPSED_TIME', 'AIR_TIME', 'FLIGHTS', 'DISTANCE', 'DISTANCE_GROUP', \\\n 'carrier_delay', 'WEATHER_DELAY', 'NAS_DELAY', 'SECURITY_DELAY', 'LATE_AIRCRAFT_DELAY', 'dep_datetime_scheduled', 'dep_datetime_actual',\\\n 'arriv_datetime_scheduled', 'arriv_datetime_actual', 'station_origin', 'airp_origin_timezone', 'station_origin_timezone',\\\n 'station_dest', 'airp_dest_timezone', 'station_dest_timezone', 'dep_datetime_scheduled_utc', 'dep_datetime_actual_utc', \\\n 'arriv_datetime_scheduled_utc', 'arriv_datetime_actual_utc', 'airport_delay', 'cutoff_time_utc', 'dep_delay_aircraft', 'flight_aircraft', \\\n 'DEP_CNT', 'DST_CNT', 'ORPageRank', 'degree_origin', 'proxy_origin', 'pagerank', 'degree', 'airport_size_group','H_DEP' ]\n\n# keep relevant columns \ndf_airlines2 = df_airlines2.select(*keep_columns2)\n\n# rename columns \ndf_airlines2 = df_airlines2.withColumnRenamed('degree','degree_dest') \\\n .withColumnRenamed('airport_size_group','proxy_dest') \\\n .withColumnRenamed('pagerank','DESTPageRank')\n\n\n# COMMAND ----------\n\n# MAGIC %md ### Weather features\n# MAGIC In this section we join the airlines table and the weather table. For each record in the airlines table (each flight) we include weather observations at origin and destination airports. \n\n# COMMAND ----------\n\ndf_weather.registerTempTable('df_weather')\ndf_airlines2.registerTempTable('df_airlines2')\n\ndf_final = sqlContext.sql(\"\"\" \n\n--Airlines\nSelect AL1.*, \n\n--Source weather\nAL2.STATION As Src_STATION,\nAL2.DATE_JOIN_2H As Src_DATE_JOIN_2H,\nAL2.WND_0 As Src_WND_0,\nAL2.WND_1 As Src_WND_1,\nAL2.WND_3 As Src_WND_3,\nAL2.WND_4 As Src_WND_4,\nAL2.CIG_0 As Src_CIG_0,\nAL2.CIG_1 As Src_CIG_1,\nAL2.VIS_0 As Src_VIS_0,\nAL2.VIS_1 As Src_VIS_1,\nAL2.VIS_2 As Src_VIS_2,\nAL2.VIS_3 As Src_VIS_3,\nAL2.TMP_0 As Src_TMP_0,\nAL2.TMP_1 As Src_TMP_1,\nAL2.DEW_0 As Src_DEW_0,\nAL2.DEW_1 As Src_DEW_1,\nAL2.SLP_0 As Src_SLP_0,\nAL2.SLP_1 As Src_SLP_1,\nAL2.GA1_0 As Src_GA1_0,\nAL2.GA1_1 As Src_GA1_1,\nAL2.GA1_2 As Src_GA1_2,\nAL2.GA1_3 As Src_GA1_3,\nAL2.GA1_4 As Src_GA1_4,\nAL2.GA1_5 As Src_GA1_5,\n\n--Destination weather\nAL3.STATION As Dst_STATION,\nAL3.DATE_JOIN_2H As Dst_DATE_JOIN_2H,\nAL3.WND_0 As Dst_WND_0,\nAL3.WND_1 As Dst_WND_1,\nAL3.WND_3 As Dst_WND_3,\nAL3.WND_4 As Dst_WND_4,\nAL3.CIG_0 As Dst_CIG_0,\nAL3.CIG_1 As Dst_CIG_1,\nAL3.VIS_0 As Dst_VIS_0,\nAL3.VIS_1 As Dst_VIS_1,\nAL3.VIS_2 As Dst_VIS_2,\nAL3.VIS_3 As Dst_VIS_3,\nAL3.TMP_0 As Dst_TMP_0,\nAL3.TMP_1 As Dst_TMP_1,\nAL3.DEW_0 As Dst_DEW_0,\nAL3.DEW_1 As Dst_DEW_1,\nAL3.SLP_0 As Dst_SLP_0,\nAL3.SLP_1 As Dst_SLP_1,\nAL3.GA1_0 As Dst_GA1_0,\nAL3.GA1_1 As Dst_GA1_1,\nAL3.GA1_2 As Dst_GA1_2,\nAL3.GA1_3 As Dst_GA1_3,\nAL3.GA1_4 As Dst_GA1_4,\nAL3.GA1_5 As Dst_GA1_5\n\nfrom df_airlines2 AL1\nLEFT JOIN df_weather AL2 ON \n AL1.station_origin = AL2.STATION \n AND to_timestamp(concat(YEAR(dep_datetime_scheduled),'-',MONTH(dep_datetime_scheduled),'-',DAY(dep_datetime_scheduled),'T',HOUR(dep_datetime_scheduled),':','00',':00.000+0000')) = AL2.DATE_JOIN_2H\nLEFT JOIN df_weather AL3 ON \n AL1.station_dest = AL3.STATION \n AND to_timestamp(concat(YEAR(dep_datetime_scheduled),'-',MONTH(dep_datetime_scheduled),'-',DAY(dep_datetime_scheduled),'T',HOUR(dep_datetime_scheduled),':','00',':00.000+0000')) = AL3.DATE_JOIN_2H\n\n\"\"\")\n\n# COMMAND ----------\n\n# select columns to keep\ncolumns_keep = ['DAY_OF_MONTH', 'DAY_OF_WEEK','YEAR', 'QUARTER', 'MONTH', 'H_DEP', 'ORPageRank', 'proxy_origin', 'DESTPageRank', 'proxy_dest','OP_CARRIER',\n 'carrier_delay','airport_delay', 'DISTANCE', 'flight_aircraft', 'dep_delay_aircraft', 'DEP_CNT', 'DST_CNT', 'DEP_DEL15', 'Src_WND_0', \n 'Src_WND_1', 'Src_WND_3', 'Src_WND_4', 'Src_CIG_0', 'Src_CIG_1', 'Src_VIS_0', 'Src_VIS_1', 'Src_VIS_2', 'Src_VIS_3', 'Src_TMP_0', 'Src_TMP_1', \n 'Src_DEW_0', 'Src_DEW_1', 'Src_SLP_0', 'Src_SLP_1', 'Src_GA1_0', 'Src_GA1_1', 'Src_GA1_2', 'Src_GA1_3', 'Src_GA1_4', 'Src_GA1_5', \n 'Dst_WND_0', 'Dst_WND_1', 'Dst_WND_3', 'Dst_WND_4', 'Dst_CIG_0', 'Dst_CIG_1', 'Dst_VIS_0', 'Dst_VIS_1', 'Dst_VIS_2', 'Dst_VIS_3', \n 'Dst_TMP_0', 'Dst_TMP_1', 'Dst_DEW_0', 'Dst_DEW_1', 'Dst_SLP_0', 'Dst_SLP_1', 'Dst_GA1_0', 'Dst_GA1_1', 'Dst_GA1_2', 'Dst_GA1_3', \n 'Dst_GA1_4', 'Dst_GA1_5', 'FL_DATE', 'CRS_DEP_TIME', 'DEP_TIME', 'DEP_DELAY', 'CRS_ARR_TIME', 'ORIGIN', 'DEST_AIRPORT_ID']\n\ndf_final = df_final.select(*columns_keep)\n\n# COMMAND ----------\n\n# MAGIC %md ### Check for null values\n# MAGIC In this section we handle null values in the data.\n\n# COMMAND ----------\n\n# check for null values \ndf_final.select([count(when(isnan(c) | col(c).isNull(), c)).alias(c) for c in df_final.columns]).toPandas()\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC - The variable `carrier_delay` has null values. These null values appear when there is no information in the -14h:15m-2h:15m window to calculate the delay of the carrier. In these cases, we will assume no delay and replace the null values by zero.\n# MAGIC - The variable `airport_delay` has null values. We treat these cases in the same way as above. We will assume no delay and replace the null values by zero.\n# MAGIC - The variable `flight_aircraft` has null values. These null values are because there are records with no aircraft tail number, and this variable is based on the aircraft tail number. These records are going to be filtered out. \n# MAGIC - The variable `dep_delay_aircraft` has null values. These values also have their roots in the absence of aircraft tail number. In some cases, is because there is no information available of previous departures in the time window considered. As we are going to filter out records with no aircraft tail number information many of these null values are going to disappear. For the remaining records with null values we assume no delay, hence we replace the null by zero.\n# MAGIC - The weather variables have null values. These null values are going to be replaced by calculated average values.\n\n# COMMAND ----------\n\n# Fix null values\n\nprint(f'Number of records before fixing null values: {df_final.count()}')\n\ndf_final = df_final.na.fill(value=0,subset=['carrier_delay'])\ndf_final = df_final.na.fill(value=0,subset=['airport_delay'])\ndf_final = df_final.where(col('flight_aircraft').isNotNull())\ndf_final = df_final.na.fill(value=0,subset=['dep_delay_aircraft'])\n\ndf_final = df_final.na.fill(value=191.086034312939,subset=['Src_WND_0'])\ndf_final = df_final.na.fill(value=5.72357620777156,subset=['Src_WND_1'])\ndf_final = df_final.na.fill(value=32.4866334329149,subset=['Src_WND_3'])\ndf_final = df_final.na.fill(value=4.76868518022325,subset=['Src_WND_4'])\ndf_final = df_final.na.fill(value=14187.9922049176,subset=['Src_CIG_0'])\ndf_final = df_final.na.fill(value=5.34314229485104,subset=['Src_CIG_1'])\ndf_final = df_final.na.fill(value=14861.7568941402,subset=['Src_VIS_0'])\ndf_final = df_final.na.fill(value=5.18320388944515,subset=['Src_VIS_1'])\ndf_final = df_final.na.fill(value=9,subset=['Src_VIS_2'])\ndf_final = df_final.na.fill(value=5.83272417230157,subset=['Src_VIS_3'])\ndf_final = df_final.na.fill(value=128.150652427398,subset=['Src_TMP_0'])\ndf_final = df_final.na.fill(value=4.3669120773439,subset=['Src_TMP_1'])\ndf_final = df_final.na.fill(value=63.7693705711082,subset=['Src_DEW_0'])\ndf_final = df_final.na.fill(value=5.09696044502617,subset=['Src_DEW_1'])\ndf_final = df_final.na.fill(value=10161.8193223597,subset=['Src_SLP_0'])\ndf_final = df_final.na.fill(value=6.76090483370341,subset=['Src_SLP_1'])\ndf_final = df_final.na.fill(value=3.45184787386221,subset=['Src_GA1_0'])\ndf_final = df_final.na.fill(value=5.03447864378367,subset=['Src_GA1_1'])\ndf_final = df_final.na.fill(value=1362.80313152472,subset=['Src_GA1_2'])\ndf_final = df_final.na.fill(value=6.93450578301615,subset=['Src_GA1_3'])\ndf_final = df_final.na.fill(value=98.9195925988655,subset=['Src_GA1_4'])\ndf_final = df_final.na.fill(value=8.99639637985335,subset=['Src_GA1_5'])\n\ndf_final = df_final.na.fill(value=191.086034312939,subset=['Dst_WND_0'])\ndf_final = df_final.na.fill(value=5.72357620777156,subset=['Dst_WND_1'])\ndf_final = df_final.na.fill(value=32.4866334329149,subset=['Dst_WND_3'])\ndf_final = df_final.na.fill(value=4.76868518022325,subset=['Dst_WND_4'])\ndf_final = df_final.na.fill(value=14187.9922049176,subset=['Dst_CIG_0'])\ndf_final = df_final.na.fill(value=5.34314229485104,subset=['Dst_CIG_1'])\ndf_final = df_final.na.fill(value=14861.7568941402,subset=['Dst_VIS_0'])\ndf_final = df_final.na.fill(value=5.18320388944515,subset=['Dst_VIS_1'])\ndf_final = df_final.na.fill(value=9,subset=['Dst_VIS_2'])\ndf_final = df_final.na.fill(value=5.83272417230157,subset=['Dst_VIS_3'])\ndf_final = df_final.na.fill(value=128.150652427398,subset=['Dst_TMP_0'])\ndf_final = df_final.na.fill(value=4.3669120773439,subset=['Dst_TMP_1'])\ndf_final = df_final.na.fill(value=63.7693705711082,subset=['Dst_DEW_0'])\ndf_final = df_final.na.fill(value=5.09696044502617,subset=['Dst_DEW_1'])\ndf_final = df_final.na.fill(value=10161.8193223597,subset=['Dst_SLP_0'])\ndf_final = df_final.na.fill(value=6.76090483370341,subset=['Dst_SLP_1'])\ndf_final = df_final.na.fill(value=3.45184787386221,subset=['Dst_GA1_0'])\ndf_final = df_final.na.fill(value=5.03447864378367,subset=['Dst_GA1_1'])\ndf_final = df_final.na.fill(value=1362.80313152472,subset=['Dst_GA1_2'])\ndf_final = df_final.na.fill(value=6.93450578301615,subset=['Dst_GA1_3'])\ndf_final = df_final.na.fill(value=98.9195925988655,subset=['Dst_GA1_4'])\ndf_final = df_final.na.fill(value=8.99639637985335,subset=['Dst_GA1_5'])\n\nprint(f'Number of records after fixing null values: {df_final.count()}')\n\n# COMMAND ----------\n\n# Check again for null values to verify there are no null values\ndf_final.select([count(when(isnan(c) | col(c).isNull(), c)).alias(c) for c in df_final.columns]).toPandas()\n\n# COMMAND ----------\n\n# MAGIC %md No null values in the dataframe. We can now save to a parquet file. \n# MAGIC \n# MAGIC \n# MAGIC Note: The only variable that shows null values (`DEP_TIME`) is not used as variable in our models. It was left in the dataframe to try some encoding methods in the toy dataset.\n\n# COMMAND ----------\n\n# save flights data with all features including weather\n\ndbutils.fs.rm(path + \"/processed_data/\" + \"airlines_with_features.parquet\", recurse =True)\ndf_final.write.format(\"parquet\").save(path + \"/processed_data/\" + \"airlines_with_features.parquet\")\n\ndisplay(dbutils.fs.ls(path + \"/processed_data/\") )\n\n# COMMAND ----------\n\n# create temp view\ndf_final.createOrReplaceTempView(\"df_final\")\n\n# COMMAND ----------\n\n# MAGIC %sql DROP TABLE IF EXISTS airlines_with_features_sql; CREATE TABLE airlines_with_features_sql AS SELECT * FROM df_final\n\n# COMMAND ----------\n\n# MAGIC %md ## Modeling\n# MAGIC \n# MAGIC In this section we create our baseline model, develop more sophisticated models and experiment with methods to improve model's performance: oversampling, PCA among others.\n\n# COMMAND ----------\n\n# MAGIC %md ### Helper functions\n\n# COMMAND ----------\n\n#SOURCE https://runawayhorse001.github.io/LearningApacheSpark/classification.html\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n# COMMAND ----------\n\ndef get_dtype(df,coltype):\n col_list = []\n for name,dtype in df.dtypes:\n if dtype == coltype:\n col_list.append(name)\n return col_list\n\n# COMMAND ----------\n\ndef sample_df(df,ratio,sample_flag):\n \n df_minority = df.where(col('DEP_DEL15') == 1)\n df_majority = df.where(col('DEP_DEL15') == 0)\n \n # Oversample\n if sample_flag == 1:\n # range based on calculated ratio\n y = range(ratio)\n\n # duplicate the minority class rows\n df_duplicate = df_minority.withColumn('temp', explode(array([lit(x) for x in y]))).drop('temp')\n\n # combine oversampled delayed flights with not delayed flight records\n df_oversampled = df_majority.union(df_duplicate)\n\n # check the results\n\n not_delayed = df_oversampled.where(col('DEP_DEL15') == 0).count()\n delayed = df_oversampled.where(col('DEP_DEL15') == 1).count()\n\n print('Oversampling minority class results in:')\n print(f'Number of flights delayed: {delayed}')\n print(f'Number of flights not-delayed: {not_delayed}')\n print(f'Ratio: {not_delayed / delayed}')\n \n return df_oversampled\n \n # Undersample\n if sample_flag == 2:\n # undersample the records corresponding to not delayed flights according to the ratio 1:4\n df_sampled_major = df_majority.sample(False, 1/ratio)\n\n # create new dataframe with undersampled DEP_DEL15=0 and all records DEP_DEL15=1\n df_undersampled = df_sampled_major.union(df_minority)\n\n # check the results\n\n not_delayed = df_undersampled.where(col('DEP_DEL15') == 0).count()\n delayed = df_undersampled.where(col('DEP_DEL15') == 1).count()\n\n print('Undersampling majority class results in:')\n print(f'Number of flights delayed: {delayed}')\n print(f'Number of flights not-delayed: {not_delayed}')\n print(f'Ratio: {not_delayed / delayed}')\n \n return df_undersampled\n \n return df\n\n# COMMAND ----------\n\ndef ExtractFeatureImp(featureImp, dataset, featuresCol):\n list_extract = []\n for i in dataset.schema[featuresCol].metadata[\"ml_attr\"][\"attrs\"]:\n list_extract = list_extract + dataset.schema[featuresCol].metadata[\"ml_attr\"][\"attrs\"][i]\n varlist = pd.DataFrame(list_extract)\n varlist['score'] = varlist['idx'].apply(lambda x: featureImp[x])\n return(varlist.sort_values('score', ascending = False))\n\n# COMMAND ----------\n\n# MAGIC %md ### Get the data\n\n# COMMAND ----------\n\n#SET CONTEXT\nsc = spark.sparkContext\nsqlContext = SQLContext(sc)\n\n#GET DATA\ndf = sqlContext.sql(\"\"\"SELECT * from airlines_with_features_sql\"\"\")\n\n# COMMAND ----------\n\nprint(get_dtype(df,'int'))\nprint(\"================================\")\nprint(get_dtype(df,'double'))\nprint(\"================================\")\nprint(get_dtype(df,'long'))\nprint(\"================================\")\nprint(get_dtype(df,'string'))\n\n# COMMAND ----------\n\n# MAGIC %md ### Model data pipeline\n# MAGIC Pipeline to prepare data for modeling.\n\n# COMMAND ----------\n\n#SELECT FEATURES - NUMERIC\nnumericCols = ['DAY_OF_MONTH', 'DAY_OF_WEEK','YEAR', 'QUARTER', 'MONTH', 'H_DEP', 'ORPageRank', 'proxy_origin', 'DESTPageRank', 'proxy_dest',\n #'carrier_delay','airport_delay', 'DISTANCE', 'flight_aircraft', 'dep_delay_aircraft', \n 'DISTANCE', 'DEP_CNT', 'DST_CNT', 'Src_WND_0', 'Src_WND_1', \n 'Src_WND_3', 'Src_WND_4', 'Src_CIG_0', 'Src_CIG_1', 'Src_VIS_0', 'Src_VIS_1', 'Src_VIS_2', 'Src_VIS_3', 'Src_TMP_0', 'Src_TMP_1', \n 'Src_DEW_0', 'Src_DEW_1', 'Src_SLP_0', 'Src_SLP_1', 'Src_GA1_0', 'Src_GA1_1', 'Src_GA1_2', 'Src_GA1_3', 'Src_GA1_4', 'Src_GA1_5', \n 'Dst_WND_0', 'Dst_WND_1', 'Dst_WND_3', 'Dst_WND_4', 'Dst_CIG_0', 'Dst_CIG_1', 'Dst_VIS_0', 'Dst_VIS_1', 'Dst_VIS_2', 'Dst_VIS_3', \n 'Dst_TMP_0', 'Dst_TMP_1', 'Dst_DEW_0', 'Dst_DEW_1', 'Dst_SLP_0', 'Dst_SLP_1', 'Dst_GA1_0', 'Dst_GA1_1', 'Dst_GA1_2', 'Dst_GA1_3', \n 'Dst_GA1_4', 'Dst_GA1_5']\n\n#SELECT FEATURES - CATEGORICAL\ncategoricalColumns = ['OP_CARRIER']\n\n# COMMAND ----------\n\n# MAGIC %md Below we create a pipeline to prepare the data for modeling: \n# MAGIC - Create indexer for categorical and numerical variables.\n# MAGIC - Create indexer for the label column.\n# MAGIC - One-hot encode categorical variable.\n# MAGIC - Create one column that contains a vector with all features of the model.\n\n# COMMAND ----------\n\n# List of columns in the dataframe\ncols = df.columns\n\n# Create indexer for categorical variables and encode them using one-hot-encoding\n# Add these steps to the pipeline stages\nstages = []\nfor categoricalCol in categoricalColumns:\n stringIndexer = StringIndexer(inputCol = categoricalCol, outputCol = categoricalCol + 'Index')\n encoder = OneHotEncoder (inputCols=[stringIndexer.getOutputCol()], outputCols=[categoricalCol + \"classVec\"])\n stages += [stringIndexer, encoder]\n \n# Create indexer for the label variable\n# Add this step to the pipeline stages\nlabel_stringIdx = StringIndexer(inputCol = 'DEP_DEL15', outputCol = 'label')\nstages += [label_stringIdx]\n\n# Create a transformer that merges multiple columns into a vector column that represents the features for the model\n# Add this step to the pipeline stages\nassemblerInputs = [c + \"classVec\" for c in categoricalColumns] + numericCols\nassembler = VectorAssembler(inputCols=assemblerInputs, outputCol=\"features\")\nstages += [assembler]\n\n# Create a pipeline: sequence of stages to transform the data\npipeline = Pipeline(stages = stages)\n\n# The fit() method is called to produce a transformer that contains all transformations in stages\npipelineModel = pipeline.fit(df)\n\n# Run the transformation in the dataframe\ndf = pipelineModel.transform(df)\n\n# Select columns including the labels and the features column with all the features as a vector\nselectedCols = ['label', 'features'] + cols\ndf = df.select(selectedCols)\n\n# COMMAND ----------\n\n# MAGIC %md ### Split data into training, development and test sets\n# MAGIC \n# MAGIC All the data but the last 3 months is used for training. The last 3 months of 2019 are split into test and development sets to run prediction on more recent data. This represents a rolling window where the model is always tested with the most recent data and trained with all remaining data. \n\n# COMMAND ----------\n\n# TRAIN AND ALL DATA BUT LAST 3M\ntrain = df.where(((col('YEAR') == 2015) | (col('YEAR') == 2016) | (col('YEAR') == 2017) | (col('YEAR') == 2018)) | ((col('YEAR') == 2019) & (col('MONTH') <10) ))\nprint(f'{train.count():} records in train data')\n\n# TEST/DEV ON LAST 3M OF DATA\ntest, dev = (df.where((col('YEAR') == 2019) & (col('MONTH')>=10))).randomSplit([0.5,0.5],seed=1)\nprint(f'{test.count():} records in test data') \nprint(f'{dev.count():} records in dev data')\n\n# COMMAND ----------\n\n# MAGIC %md ### Baseline: Logistic Regression\n# MAGIC \n# MAGIC For its simplicity, as a baseline algorithm we chose to use Logistic Regression, which is a very common algorithm and probably one of the most common tools for classification. \n# MAGIC \n# MAGIC In the `Algorithm Implementation` section there is a detailed description of the Logistic Regression algorithm. \n\n# COMMAND ----------\n\nlr = LogisticRegression(featuresCol = 'features', labelCol = 'label', maxIter=10)\nlrModel = lr.fit(train)\npredictions = lrModel.transform(test)\n\n# COMMAND ----------\n\ntrainingSummary = lrModel.summary\nroc = trainingSummary.roc.toPandas()\nplt.plot(roc['FPR'],roc['TPR'])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC Curve')\nplt.show()\nprint('Training set area Under ROC: ' + str(trainingSummary.areaUnderROC))\n\nevaluator = BinaryClassificationEvaluator()\nprint('Test Area Under ROC', evaluator.evaluate(predictions))\n\nevaluator = MulticlassClassificationEvaluator(labelCol=\"label\", predictionCol=\"prediction\")\naccuracy = evaluator.evaluate(predictions)\nprint(\"Accuracy = %s\" % (accuracy))\nprint(\"Test Error = %s\" % (1.0 - accuracy))\n\nclass_temp = predictions.select(\"label\").groupBy(\"label\").count().sort('count', ascending=False).toPandas()\nclass_temp = class_temp[\"label\"].values.tolist()\nclass_names = map(str, class_temp)\nprint(class_names)\n\n# Get Predicted VS Actual\ny_true = predictions.select(\"label\").toPandas()\ny_pred = predictions.select(\"prediction\").toPandas()\ncnf_matrix = confusion_matrix(y_true, y_pred)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nclass_names = ['no_delay','delay']\nplot_confusion_matrix(cnf_matrix, classes=class_names,title='Confusion matrix, without normalization')\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,title='Normalized confusion matrix')\nplt.show()\n\nprint(classification_report(y_true, y_pred, target_names=class_names))\n\nprint(accuracy_score(y_true, y_pred))\n\n# COMMAND ----------\n\n# Get Predicted VS Actual\ny_true = predictions.select(\"label\").toPandas()\ny_pred = predictions.select(\"prediction\").toPandas()\ncnf_matrix = confusion_matrix(y_true, y_pred)\n\n\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,title='Normalized confusion matrix')\nplt.show()\n\nprint(classification_report(y_true, y_pred, target_names=class_names))\n\n# COMMAND ----------\n\n# MAGIC %md The departure delay prediction is very complex and non-linear. Because of this, a linear classifier such as logistic regression does not achieve high performance. Only 11% of departure delays are detected, and the f1-score is low (0.19).\n\n# COMMAND ----------\n\n# MAGIC %md ### Sophisticated Modeling\n# MAGIC \n# MAGIC In this section we develop models that can better handle the non-linear relationship between our predictors and the outcome variable.\n\n# COMMAND ----------\n\n#SET CONTEXT\nsc = spark.sparkContext\nsqlContext = SQLContext(sc)\n\n#GET DATA\ndf = sqlContext.sql(\"\"\"SELECT * FROM airlines_with_features_sql\"\"\")\n\n# COMMAND ----------\n\n# calculate the ratio of the classes\ndf_minority = df.where(col('DEP_DEL15') == 1)\ndf_majority = df.where(col('DEP_DEL15') == 0)\nmajor = df_majority.count()\nminor = df_minority.count()\nratio = int(major/minor)\nprint(f'There are {ratio} times more flights not delayed than flights delayed')\nprint(f'Number of records of flights delayed: {minor}')\nprint(f'Number of records of flights not delayed: {major}')\n\n# COMMAND ----------\n\n#SELECT FEATURES - NUMERIC ALL\nnumericColsAll = ['ORPageRank', 'proxy_origin', 'proxy_dest','carrier_delay','airport_delay','H_DEP','Src_WND_0', 'Src_WND_1', 'Src_WND_3', 'Src_WND_4', 'Src_CIG_0', 'Src_CIG_1', 'Src_VIS_0', 'Src_VIS_1', 'Src_VIS_2', 'Src_VIS_3', 'Src_TMP_0', 'Src_TMP_1', 'Src_DEW_0', 'Src_DEW_1', 'Src_SLP_0', 'Src_SLP_1', 'Src_GA1_0', 'Src_GA1_1', 'Src_GA1_2', 'Src_GA1_3', 'Src_GA1_4', 'Src_GA1_5', 'Dst_WND_0', 'Dst_WND_1', 'Dst_WND_3', 'Dst_WND_4', 'Dst_CIG_0', 'Dst_CIG_1', 'Dst_VIS_0', 'Dst_VIS_1', 'Dst_VIS_2', 'Dst_VIS_3', 'Dst_TMP_0', 'Dst_TMP_1', 'Dst_DEW_0', 'Dst_DEW_1', 'Dst_SLP_0', 'Dst_SLP_1', 'Dst_GA1_0', 'Dst_GA1_1', 'Dst_GA1_2', 'Dst_GA1_3', 'Dst_GA1_4', 'Dst_GA1_5','DEP_CNT', 'DST_CNT', 'DESTPageRank', 'DAY_OF_MONTH', 'DAY_OF_WEEK','YEAR', 'QUARTER', 'MONTH' , 'DISTANCE','flight_aircraft','dep_delay_aircraft']\n\n#SELECT FEATURES - NOT WEATHER\nnumericCols = ['ORPageRank', 'proxy_origin', 'proxy_dest','carrier_delay','airport_delay','H_DEP','DEP_CNT', 'DST_CNT', 'DESTPageRank', 'DAY_OF_MONTH', 'DAY_OF_WEEK','YEAR', 'QUARTER', 'MONTH' , 'DISTANCE','flight_aircraft','dep_delay_aircraft']\n\n#SELECT FEATURES WEATHER \nnumericColsWH = ['Src_WND_0', 'Src_WND_1', 'Src_WND_3', 'Src_WND_4', 'Src_CIG_0', 'Src_CIG_1', 'Src_VIS_0', 'Src_VIS_1', 'Src_VIS_2', 'Src_VIS_3', 'Src_TMP_0', 'Src_TMP_1', 'Src_DEW_0', 'Src_DEW_1', 'Src_SLP_0', 'Src_SLP_1', 'Src_GA1_0', 'Src_GA1_1', 'Src_GA1_2', 'Src_GA1_3', 'Src_GA1_4', 'Src_GA1_5', 'Dst_WND_0', 'Dst_WND_1', 'Dst_WND_3', 'Dst_WND_4', 'Dst_CIG_0', 'Dst_CIG_1', 'Dst_VIS_0', 'Dst_VIS_1', 'Dst_VIS_2', 'Dst_VIS_3', 'Dst_TMP_0', 'Dst_TMP_1', 'Dst_DEW_0', 'Dst_DEW_1', 'Dst_SLP_0', 'Dst_SLP_1', 'Dst_GA1_0', 'Dst_GA1_1', 'Dst_GA1_2', 'Dst_GA1_3', 'Dst_GA1_4', 'Dst_GA1_5']\n\n#SELECT FEATURES - CATEGORICAL\ncategoricalColumns = ['OP_CARRIER']\n\n# COMMAND ----------\n\n# MAGIC %md #### Principal Component Analysis (PCA)\n# MAGIC \n# MAGIC PCA is a statistical procedure that uses an orthogonal transformation to convert a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. \n# MAGIC \n# MAGIC We have identified the weather features as an ideal feature set to use with PCA due to the highly correlated of the many measurments that make up each observation. \n# MAGIC \n# MAGIC Components Number is set at 6 based on running PCA with a larger size and using the Explained Variance graph to determine that no additional variance is added if a larger number is used\n\n# COMMAND ----------\n\n#VECTOR THE INPUT DATA\nassembler = VectorAssembler(inputCols=numericColsWH, outputCol=\"PCA_IN_Numfeatures\")\ndf = assembler.transform(df)\n\n#RUN PCA\npca = PCA(k=7 , inputCol=\"PCA_IN_Numfeatures\" , outputCol=\"PCA_OUT_Numfeatures\")\nmodel = pca.fit(df)\ndf = model.transform(df)\n\n#GRAPH EXPLAINED VARIANCE\ndata = model.explainedVariance.values\nplt.bar(range(1,8,1), data)\nplt.xticks(np.arange(1, 8, 1))\nplt.title(\"Principal Component Explained Variance VS Each Principal Component\")\nplt.xlabel(\"Principal Components\")\nplt.ylabel(\"Explained Variance\")\n\n# COMMAND ----------\n\n# MAGIC %md #### Model Data PipeLine\n\n# COMMAND ----------\n\n#ALL COLUMNS\ncols = df.columns\n\n#VECTORIZE NUMERIC COLS\nassembler = VectorAssembler(inputCols=numericCols, outputCol=\"Numfeatures\")\ndf = assembler.transform(df)\n\n#MIN-MAX-SCALER\nScaler = MinMaxScaler(inputCol=\"Numfeatures\", outputCol=\"scaledFeatures\")\ndf = Scaler.fit(df).transform(df)\n\n#CAT COLUMNS\nstages = []\nfor categoricalCol in categoricalColumns:\n stringIndexer = StringIndexer(inputCol = categoricalCol, outputCol = categoricalCol + 'Index')\n encoder = OneHotEncoder (inputCols=[stringIndexer.getOutputCol()], outputCols=[categoricalCol + \"classVec\"])\n stages += [stringIndexer, encoder]\n \n#LABELS\nlabel_stringIdx = StringIndexer(inputCol = 'DEP_DEL15', outputCol = 'label')\nstages += [label_stringIdx]\n\n#ASSEBLER\nassemblerInputs = [c + \"classVec\" for c in categoricalColumns] \nassembler = VectorAssembler(inputCols=assemblerInputs, outputCol=\"featuresCat\")\nstages += [assembler]\n\n#PIPELINE\npipeline = Pipeline(stages = stages)\npipelineModel = pipeline.fit(df)\ndf = pipelineModel.transform(df)\nselectedCols = ['label', 'featuresCat'] + cols + ['Numfeatures','scaledFeatures']\ndf = df.select(selectedCols)\n\n#ADD CAT_FEATURES & MINMAX & PCA --> VECTOR\nassembler = VectorAssembler(inputCols=[\"featuresCat\", \"scaledFeatures\",\"PCA_OUT_Numfeatures\"],outputCol=\"features\")\ndf = assembler.transform(df)\n\n# COMMAND ----------\n\n# TRAIN AND ALL DATA BUT LAST 3M\ntrain = df.where(((col('YEAR') == 2015) | (col('YEAR') == 2016) | (col('YEAR') == 2017) | (col('YEAR') == 2018)) | ((col('YEAR') == 2019) & (col('MONTH') <10) ))\nprint(f'{train.count():} records in train data')\n\n# TEST/DEV ON LAST 3M OF DATA\ntest, dev = (df.where((col('YEAR') == 2019) & (col('MONTH')>=10))).randomSplit([0.5,0.5],seed=1)\nprint(f'{test.count():} records in test data') \nprint(f'{dev.count():} records in dev data')\n\n# OVERSAMPLE MINORITY CLASS\ntrain = sample_df(train,ratio,1)\n\n# COMMAND ----------\n\n# MAGIC %md #### LogisticRegression\n# MAGIC \n# MAGIC Hyperparameters: \n# MAGIC \n# MAGIC Elastic net contains both L1 and L2 regularization as special cases. If elastic net parameter α set to 1, it is equivalent to a Lasso model. On the other hand, if α is set to 0, the trained model reduces to a ridge regression model.\n# MAGIC \n# MAGIC - elasticNetParam corresponds to α \n# MAGIC - regParam corresponds to λ.\n\n# COMMAND ----------\n\n#regParam=0.1, elasticNetParam=0.95\nlr = LogisticRegression(featuresCol = 'features', labelCol = 'label', maxIter=10, family=\"multinomial\")\nlrModel = lr.fit(train)\npredictions = lrModel.transform(test)\n\n# COMMAND ----------\n\ntrainingSummary = lrModel.summary\nroc = trainingSummary.roc.toPandas()\nplt.plot(roc['FPR'],roc['TPR'])\nplt.ylabel('False Positive Rate')\nplt.xlabel('True Positive Rate')\nplt.title('ROC Curve')\nplt.show()\nprint('Training set area Under ROC: ' + str(trainingSummary.areaUnderROC))\n\nevaluator = BinaryClassificationEvaluator()\nprint('Test Area Under ROC', evaluator.evaluate(predictions))\n\nevaluator = MulticlassClassificationEvaluator(labelCol=\"label\", predictionCol=\"prediction\")\naccuracy = evaluator.evaluate(predictions)\nprint(\"Accuracy = %s\" % (accuracy))\nprint(\"Test Error = %s\" % (1.0 - accuracy))\n\nclass_temp = predictions.select(\"label\").groupBy(\"label\").count().sort('count', ascending=False).toPandas()\nclass_temp = class_temp[\"label\"].values.tolist()\nclass_names = map(str, class_temp)\nprint(class_names)\n\n# Get Predicted VS Actual\ny_true = predictions.select(\"label\").toPandas()\ny_pred = predictions.select(\"prediction\").toPandas()\ncnf_matrix = confusion_matrix(y_true, y_pred)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nclass_names = ['no_delay','delay']\nplot_confusion_matrix(cnf_matrix, classes=class_names,title='Confusion matrix, without normalization')\nplt.show()\n\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,title='Normalized confusion matrix')\nplt.show()\n\nprint(classification_report(y_true, y_pred, target_names=class_names))\n\nprint(accuracy_score(y_true, y_pred))\n\n# COMMAND ----------\n\n# MAGIC %md #### DecisionTree\n\n# COMMAND ----------\n\n# Create initial Decision Tree Model\ndt = DecisionTreeClassifier(labelCol=\"label\", featuresCol=\"features\", maxDepth=6)\n \n# Train model with Training Data\ndtModel = dt.fit(train)\n\n# COMMAND ----------\n\nprint(\"numNodes = \", dtModel.numNodes)\nprint(\"depth = \", dtModel.depth)\ndisplay(dtModel)\n\n# Make predictions on test data using the Transformer.transform() method.\npredictions = dtModel.transform(test)\n\n# Evaluate model\nevaluator = BinaryClassificationEvaluator()\nevaluator.evaluate(predictions)\n\n# COMMAND ----------\n\n# MAGIC %md #### RandomForestClassifier\n\n# COMMAND ----------\n\nrf = RandomForestClassifier(featuresCol = 'features', labelCol = 'label', numTrees = 100)\nrfModel = rf.fit(train)\npredictions = rfModel.transform(test)\n\n# COMMAND ----------\n\nevaluator = BinaryClassificationEvaluator()\nprint(\"Test Area Under ROC: \" + str(evaluator.evaluate(predictions, {evaluator.metricName: \"areaUnderROC\"})))\n\n# COMMAND ----------\n\nExtractFeatureImp(rfModel.featureImportances, train, \"features\").head(100)\n\n# COMMAND ----------\n\n# MAGIC %md #### GBTClassifier\n\n# COMMAND ----------\n\ngbt = GBTClassifier(maxIter=100)\ngbtModel = gbt.fit(train)\npredictions = gbtModel.transform(test)\n\n# COMMAND ----------\n\nevaluator = BinaryClassificationEvaluator()\nprint(\"Test Area Under ROC: \" + str(evaluator.evaluate(predictions, {evaluator.metricName: \"areaUnderROC\"})))\n\n# COMMAND ----------\n\n# MAGIC %md ## Conclusions\n# MAGIC > **Reference from instructions and rubric**\n# MAGIC >\n# MAGIC > From project instructions: report results and learnings for both the ML as well as the scalability.\n# MAGIC >\n# MAGIC > From rubric:\n# MAGIC > - Interpret and visualize the model scores, ie: confusion matrix.\n# MAGIC > - Draw effective conclusions that justify that the problem as envisioned originally is actually solved.\n\n# COMMAND ----------\n\n# MAGIC %md ## Algorithm Implementation\n# MAGIC \n# MAGIC We implemented Logistic Regression. \n# MAGIC \n# MAGIC The algorithm implementation is available in the notebook:\n\n# COMMAND ----------\n\n# MAGIC %md ## Application of Course Concepts\n\n# COMMAND ----------\n\n# MAGIC %md ###Encoding\n# MAGIC \n# MAGIC The airline/weather dataset has multiple fields that are numerical as well categorical in nature. Most machine learning algorithms do much better with numerical data than categorical data. While MLlib does account for some these conversions in its algorithm implmentations, doing encoding on the data prior to feeding it into an algorithm is advantageous as it gives the user the ability to choose the encoding method they may think is more appropriate to the dataset. For example, while One Hot Encoding could be leveraged, high cardinality on the data would result in larger set of features getting fed into the algorithm. This naturally can increase computation time as well as overall complexity of the model. At the end of the day, whether something like Feature hashing or Target encoding is applied, will really be based on general performance of the model with that encoding scheme applied. \n\n# COMMAND ----------\n\n# MAGIC %md ###Scaling\n# MAGIC \n# MAGIC A lot of the numerical data, more specifically the weather data has a varied range of continuous values. What one needs to avoid is have an algorithm give more weightage to a feature due to its higher ranging numbers. One other reason here is for algorithms like Logistic regression, gradient descent converges faster with scaled features than without - a case of saturation on an activation function.\n# MAGIC \n# MAGIC Another factor to consider here is in regards to feature selection. For leveraging PCA on a large dataset with a lot of features, scaling becomes critical for better performance given that features are selected based on maximum variance and magnitude. Naturally, unscaled data skews PCA results as well.\n\n# COMMAND ----------\n\n# MAGIC %md ###PageRank\n# MAGIC \n# MAGIC A feature that we introduced into the dataset was in noting the importance of an airport as hub in the overall domestic airport network. An airport's importance correlates to its impact on flight operations in terms of throughput of traffic. An important, but congested airport, may become responsible for more delayed flights. This effect is important to capture, and Pagerank can be used in a meaningful way. Calculating the degree centrality of an airport, meaning the number of routes (edges) connected to an airport depict its centrality measure in the ariport network, and thus its importance.\n\n# COMMAND ----------\n\n# MAGIC %md ## References\n# MAGIC \n# MAGIC 1. A Data Mining Approach to Flight Arrival Delay Prediction for American Airlines: https://www.researchgate.net/publication/331858316_A_Data_Mining_Approach_to_Flight_Arrival_Delay_Prediction_for_American_Airlines\n# MAGIC 2. Airport’s location dataset: https://data.humdata.org/dataset/ourairports-usa\n# MAGIC 3. Dictionary for airlines dataset: https://www.transtats.bts.gov/DL_SelectFields.asp?gnoyr_VQ=FGJ\n# MAGIC 4. Dictionary for weather dataset: https://www.ncei.noaa.gov/data/global-hourly/doc/isd-format-document.pdf\n# MAGIC 5. Oversampling/Undersampling: https://medium.com/@junwan01/oversampling-and-undersampling-with-pyspark-5dbc25cdf253\n\n# COMMAND ----------\n\n\n","sub_path":"Team_01_Final_Project/Final_Project_team01_Main.py","file_name":"Final_Project_team01_Main.py","file_ext":"py","file_size_in_byte":56272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"410749970","text":"from discord.ext import commands\nimport os\nimport traceback\nfrom datetime import datetime, timedelta\n\nbot = commands.Bot(command_prefix='/')\ntoken = os.environ['DISCORD_BOT_TOKEN']\n\n@bot.event\nasync def on_voice_state_update(member, before, after): \n if member.guild.id == 311848727759421442 and (before.channel != after.channel):\n now = datetime.utcnow() + timedelta(hours=9)\n alert_channel = bot.get_channel(732484072844034109)\n if before.channel is None: \n msg = f'{now:%m/%d-%H:%M} に {member.name} が {after.channel.name} に参加しました。'\n if after.channel.id == 732484197255610368:\n await alert_channel.send(msg)\n elif after.channel is None: \n msg = f'{now:%m/%d-%H:%M} に {member.name} が {before.channel.name} から退出しました。'\n if before.channel.id == 732484197255610368:\n await alert_channel.send(msg)\n\n@bot.command()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\n\nbot.run(token)\n","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"520197070","text":"try:\n # noinspection PyCompatibility\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nimport numpy\nfrom sklearn.neighbors import BallTree, DistanceMetric\n\nfrom smqtk.algorithms.nn_index.hash_index import HashIndex\nfrom smqtk.representation import get_data_element_impls\nfrom smqtk.utils import merge_dict, plugin\n\n\nclass SkLearnBallTreeHashIndex (HashIndex):\n \"\"\"\n Hash index using the ball tree implementation in scikit-learn.\n\n *Note:* **When saving this object's model or pickling, we do not naively\n pickle the underlying ball tree due to issues when saving the state of a\n large ball tree. We instead get the state and split its contents up for\n separate serialization via known safe methods.**\n \"\"\"\n\n @classmethod\n def is_usable(cls):\n return BallTree is not None\n\n @classmethod\n def get_default_config(cls):\n \"\"\"\n Generate and return a default configuration dictionary for this class.\n This will be primarily used for generating what the configuration\n dictionary would look like for this class without instantiating it.\n\n By default, we observe what this class's constructor takes as arguments,\n turning those argument names into configuration dictionary keys. If any\n of those arguments have defaults, we will add those values into the\n configuration dictionary appropriately. The dictionary returned should\n only contain JSON compliant value types.\n\n It is not be guaranteed that the configuration dictionary returned\n from this method is valid for construction of an instance of this class.\n\n :return: Default configuration dictionary for the class.\n :rtype: dict\n\n \"\"\"\n c = super(SkLearnBallTreeHashIndex, cls).get_default_config()\n c['cache_element'] = plugin.make_config(get_data_element_impls())\n return c\n\n @classmethod\n def from_config(cls, config_dict, merge_default=True):\n \"\"\"\n Instantiate a new instance of this class given the configuration\n JSON-compliant dictionary encapsulating initialization arguments.\n\n This method should not be called via super unless an instance of the\n class is desired.\n\n :param config_dict: JSON compliant dictionary encapsulating\n a configuration.\n :type config_dict: dict\n\n :param merge_default: Merge the given configuration on top of the\n default provided by ``get_default_config``.\n :type merge_default: bool\n\n :return: Constructed instance from the provided config.\n :rtype: SkLearnBallTreeHashIndex\n\n \"\"\"\n if merge_default:\n config_dict = merge_dict(cls.get_default_config(), config_dict)\n\n # Parse ``cache_element`` configuration if set.\n cache_element = None\n if config_dict['cache_element'] and config_dict['cache_element']['type']:\n cache_element = \\\n plugin.from_plugin_config(config_dict['cache_element'],\n get_data_element_impls())\n config_dict['cache_element'] = cache_element\n\n return super(SkLearnBallTreeHashIndex, cls).from_config(config_dict,\n False)\n\n def __init__(self, cache_element=None, leaf_size=40, random_seed=None):\n \"\"\"\n Initialize Scikit-Learn BallTree index for hash codes.\n\n :param cache_element: Optional data element to cache our index to.\n :type cache_element: smqtk.representation.DataElement | None\n\n :param leaf_size: Number of points at which to switch to brute-force.\n :type leaf_size: int\n\n :param random_seed: Optional random number generator seed (numpy).\n :type random_seed: None | int\n\n \"\"\"\n super(SkLearnBallTreeHashIndex, self).__init__()\n self.cache_element = cache_element\n self.leaf_size = leaf_size\n self.random_seed = random_seed\n\n # the actual index\n #: :type: sklearn.neighbors.BallTree\n self.bt = None\n\n self.load_model()\n\n def get_config(self):\n c = merge_dict(self.get_default_config(), {\n 'leaf_size': self.leaf_size,\n 'random_seed': self.random_seed,\n })\n if self.cache_element:\n c['cache_element'] = merge_dict(c['cache_element'],\n plugin.to_plugin_config(\n self.cache_element))\n return c\n\n def save_model(self):\n \"\"\"\n Cache a built B-Tree index to the configured cache element. This only\n occurs if we have a non-null cache element and a btree to save.\n\n :raises ValueError: If the cache element configured is not writable.\n\n \"\"\"\n if self.cache_element and self.bt:\n if self.cache_element.is_read_only():\n raise ValueError(\"Configured cache element (%s) is read-only.\"\n % self.cache_element)\n\n self._log.debug(\"Saving model: %s\", self.cache_element)\n # Saving BT component matrices separately.\n # - Not saving distance function because its always going to be\n # hamming distance (see ``build_index``).\n s = self.bt.__getstate__()\n tail = s[4:11]\n buff = StringIO()\n numpy.savez(buff,\n data_arr=s[0],\n idx_array_arr=s[1],\n node_data_arr=s[2],\n node_bounds_arr=s[3],\n tail=tail)\n self.cache_element.set_bytes(buff.getvalue())\n self._log.debug(\"Saving model: Done\")\n\n def load_model(self):\n \"\"\"\n Load a btree index from the configured cache element. This only occurs\n if there is a cache element configured and there are bytes there to\n read.\n \"\"\"\n if self.cache_element and not self.cache_element.is_empty():\n self._log.debug(\"Loading model from cache: %s\", self.cache_element)\n buff = StringIO(self.cache_element.get_bytes())\n with numpy.load(buff) as cache:\n tail = tuple(cache['tail'])\n s = (cache['data_arr'], cache['idx_array_arr'],\n cache['node_data_arr'], cache['node_bounds_arr']) +\\\n tail + (DistanceMetric.get_metric('hamming'),)\n #: :type: sklearn.neighbors.BallTree\n self.bt = BallTree.__new__(BallTree)\n self.bt.__setstate__(s)\n self._log.debug(\"Loading mode: Done\")\n\n def count(self):\n return self.bt.data.shape[0] if self.bt else 0\n\n def build_index(self, hashes):\n \"\"\"\n Build the index with the give hash codes (bit-vectors).\n\n Subsequent calls to this method should rebuild the index, not add to\n it. If an exception is raised, the current index, if there is one, will\n not be modified.\n\n :raises ValueError: No data available in the given iterable.\n\n :param hashes: Iterable of descriptor elements to build index\n over.\n :type hashes: collections.Iterable[numpy.ndarray[bool]]\n\n \"\"\"\n self._log.debug(\"Building ball tree\")\n if self.random_seed is not None:\n numpy.random.seed(self.random_seed)\n # BallTree can't take iterables, so catching input in a set of tuples\n # first in order to cull out duplicates (BT will index duplicate values\n # happily).\n hash_tuple_set = set(map(lambda v: tuple(v), hashes))\n if not hash_tuple_set:\n raise ValueError(\"No hashes given.\")\n # Convert tuples back into numpy arrays for BallTree constructor.\n hash_vector_list = map(lambda t: numpy.array(t), hash_tuple_set)\n # If distance metric ever changes, need to update save/load model\n # functions.\n self.bt = BallTree(hash_vector_list, self.leaf_size, metric='hamming')\n self.save_model()\n\n def nn(self, h, n=1):\n \"\"\"\n Return the nearest `N` neighbors to the given hash code.\n\n Distances are in the range [0,1] and are the percent different each\n neighbor hash is from the query, based on the number of bits contained\n in the query.\n\n :param h: Hash code to compute the neighbors of. Should be the same bit\n length as indexed hash codes.\n :type h: numpy.ndarray[bool]\n\n :param n: Number of nearest neighbors to find.\n :type n: int\n\n :raises ValueError: No index to query from.\n\n :return: Tuple of nearest N hash codes and a tuple of the distance\n values to those neighbors.\n :rtype: (tuple[numpy.ndarray[bool]], tuple[float])\n\n \"\"\"\n super(SkLearnBallTreeHashIndex, self).nn(h, n)\n # Reselect N based on how many hashes are currently indexes\n n = min(n, self.count())\n # Reshaping ``h`` into an array of arrays, with just one array (ball\n # tree deprecation warns when giving it a single array).\n dists, idxs = self.bt.query([h], n, return_distance=True)\n # only indexing the first entry became we're only querying with one\n # vector\n neighbors = numpy.asarray(self.bt.data)[idxs[0]].astype(bool)\n return neighbors, dists[0]\n","sub_path":"python/smqtk/algorithms/nn_index/hash_index/sklearn_balltree.py","file_name":"sklearn_balltree.py","file_ext":"py","file_size_in_byte":9410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"173842848","text":"\"\"\"Task : Write a program that takes a number from the user and prints the result to check if it is a prime number.\nThe examples of the desired output are as follows :\ninput → 19 ⇉ output : 19 is a prime number\ninput → 10 ⇉ output : 10 is not a prime number\"\"\"\nx = int(input(\"Enter a number to check whether it is Prime number or not: \"))\ndivider = 0\nfor i in range(2,x):\n if x % i == 0:\n divider += 1\n break\nif divider == 0:\n print(f\"{x} is a prime number\")\nelse:\n print(f\"{x} is not a prime number\")\n\n\n\n","sub_path":"Exercises/4 (Is it a Prime Number?).py","file_name":"4 (Is it a Prime Number?).py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"366315728","text":"import numpy\nfrom .. import backend as D\n\n__all__ = [\n 'brentsroot',\n 'brentsrootvec'\n]\n\ndef brentsroot(f, bounds, tol=None, verbose=False):\n \"\"\"Brent's algorithm for finding root of a bracketed function.\n\n Parameters\n ----------\n f : callable\n callable that evaluates the function whose roots are to be found\n bounds : tuple of float, shape (2,)\n lower and upper bound of interval to find root in\n tol : float-type\n numerical tolerance for the precision of the root\n verbose : bool\n set to true to print useful information\n\n Returns\n -------\n tuple(float-type, bool)\n returns the location of the root if found and a bool indicating a root was found\n\n Examples\n --------\n \n >>> def ft(x):\n return x**2 - (1 - x)**5\n >>> xl, xu = 0.1, 1.0\n >>> x0, success = brentsroot(ft, xl, xu, verbose=True)\n >>> success, x0, ft(x0)\n (True, 0.34595481584824206, 6.938893903907228e-17)\n \n \"\"\"\n lower_bound, upper_bound = bounds\n if tol is None:\n tol = D.epsilon()\n if tol < D.epsilon():\n tol = D.epsilon()\n tol = D.to_float(tol)\n a,b = D.to_float(lower_bound), D.to_float(upper_bound)\n fa = f(a)\n fb = f(b)\n \n if fa*fb >= 0:\n return D.to_float(numpy.inf), False\n if D.abs(fa) < D.abs(fb):\n a,b = b,a\n fa,fb = fb,fa\n \n c = D.copy(a)\n d = D.copy(b)\n fc = f(c)\n \n mflag = True\n conv = False\n numiter = 3\n \n while not conv:\n if verbose:\n print(\"[{numiter}] a={a}, b={b}, f(a)={fa}, f(b)={fb}\".format(**locals()))\n if fa != fc and fb != fc:\n s = (a * fb * fc) / ((fa - fb)*(fa - fc))\n s = s + (b * fa * fc) / ((fb - fa)*(fb - fc))\n s = s + (c * fa * fb) / ((fc - fa)*(fc - fb))\n else:\n s = b - fb * (b - a) / (fb - fa)\n \n cond1 = not ((3 * a + b) / 4 < s < b or b < s < (3 * a + b) / 4)\n cond2 = D.abs(s - b) >= D.abs(b - c)/2\n cond3 = D.abs(s - b) >= D.abs(c - d)/2\n cond4 = D.abs(b - c) < tol\n cond5 = D.abs(c - d) < tol\n bisect_now = cond1 or (mflag and cond2) or (not mflag and cond3) or (mflag and cond4) or (not mflag and cond5)\n mflag = bisect_now\n if mflag:\n s = (a + b) / 2\n\n fs = f(s)\n numiter += 1\n d = c\n \n if fa * fs < 0:\n b = s\n fb = fs\n else:\n a = s\n fa = fs\n \n if D.abs(fa) < D.abs(fb):\n a,b = b,a\n fa,fb = fb,fa\n \n conv = (fb == 0 or fs == 0 or D.abs(b - a) < tol)\n if verbose:\n print(\"[{numiter}] a={a}, b={b}, f(a)={fa}, f(b)={fb}\".format(**locals()))\n return b, D.abs(f(b)) <= tol\n\ndef brentsrootvec(f, bounds, tol=None, verbose=False):\n \"\"\"Vectorised Brent's algorithm for finding root of bracketed functions.\n\n Parameters\n ----------\n f : list of callables\n list of callables each of which evaluates the function to find the root of\n bounds : tuple of float, shape (2,)\n lower and upper bound of interval to find root in\n tol : float-type\n numerical tolerance for the precision of the roots\n verbose : bool\n set to true to print useful information\n\n Returns\n -------\n tuple(list(float-type), list(bool))\n returns a list of the locations of roots and a list of bools indicating whether or not a root was found in the interval\n\n Examples\n --------\n \n >>> f = lambda x: lambda y: x * y - y**2 + x\n >>> xl, xu = 0.1, 1.0\n >>> funcs = [f(i*0.5) for i in range(3)]\n >>> x0, success = brentsrootvec(funcs, xl, xu, verbose=True)\n >>> success, x0, [funcs[i](x0[i]) for i in range(len(funcs))]\n (array([ True, True, True]), array([0. , 1. , 1.61803399]), [0.0, 0.0, 0.0])\n \n \"\"\"\n lower_bound, upper_bound = bounds\n if tol is None:\n tol = D.epsilon()\n if tol < D.epsilon():\n tol = D.epsilon()\n tol = D.to_float(tol)\n a,b = D.stack([lower_bound for _ in range(len(f))]), D.stack([upper_bound for _ in range(len(f))])\n \n def _f(x, msk=None):\n if msk is None:\n if verbose:\n print([f[i](x[i]) for i in range(len(f))])\n print(f[0](x[0]), f[1](x[1]))\n return D.stack([f[i](x[i]) for i in range(len(f))])\n else:\n return D.stack([f[i](x[i]) if msk[i] else D.to_float(0.0) for i in range(len(f))])\n \n if verbose:\n print(_f(a))\n \n conv = D.ones_like(a, dtype=bool)\n \n fa = _f(a)\n fb = _f(b)\n \n mask = (D.abs(fa) < D.abs(fb))\n a[mask], b[mask] = b[mask], a[mask]\n fa[mask], fb[mask] = fb[mask], fa[mask]\n \n c = D.copy(a)\n d = D.copy(b)\n s = D.copy(a)\n fc = _f(c)\n fs = D.copy(fc)\n \n mflag = D.ones_like(a, dtype=bool)\n conv[fa * fb >= 0] = False\n not_conv = D.logical_not(conv)\n numiter = D.ones_like(a, dtype=D.int64)*3\n true_conv = D.abs(_f(b)) <= tol\n \n while D.any(conv):\n if verbose:\n print(\"[{numiter}] a={a}, b={b}, f(a)={fa}, f(b)={fb}, conv={not_conv}\".format(**locals()))\n mask = D.logical_and(fa != fc, fb != fc)\n mask[not_conv] = False \n s[mask] = (a[mask] * fb[mask] * fc[mask]) / ((fa[mask] - fb[mask])*(fa[mask] - fc[mask]))\n s[mask] = s[mask] + (b[mask] * fa[mask] * fc[mask]) / ((fb[mask] - fa[mask])*(fb[mask] - fc[mask]))\n s[mask] = s[mask] + (c[mask] * fa[mask] * fb[mask]) / ((fc[mask] - fa[mask])*(fc[mask] - fb[mask]))\n mask = D.logical_not(mask)\n mask[D.logical_not(conv)] = False\n s[mask] = b[mask] - fb[mask] * (b[mask] - a[mask]) / (fb[mask] - fa[mask])\n \n cond1 = D.logical_not(D.logical_or(D.logical_and((3 * a + b) / 4 < s, s < b), D.logical_and(b < s, s < (3 * a + b) / 4)))\n mask = cond1\n cond2 = D.logical_and(mflag, D.abs(s - b) >= D.abs(b - c)/2)\n mask = D.logical_or(mask, cond2)\n cond3 = D.logical_and(D.logical_not(mflag), D.abs(s - b) >= D.abs(c - d) / 2)\n mask = D.logical_or(mask, cond3)\n cond4 = D.logical_and(mflag, D.abs(b - c) < tol)\n mask = D.logical_or(mask, cond4)\n cond5 = D.logical_and(D.logical_not(mflag), D.abs(c - d) < tol)\n mask = D.logical_or(mask, cond5)\n mask[not_conv] = False\n s[mask] = (a[mask] + b[mask]) / 2\n mflag[mask] = True\n mask = D.logical_not(mask)\n mask[not_conv] = False\n mflag[mask] = False\n \n fs = _f(s, conv)\n numiter[conv] = numiter[conv] + 1\n d = c\n \n mask = fa * fs < 0\n mask[not_conv] = False\n b[mask] = s[mask]\n fb[mask] = fs[mask]\n mask = D.logical_not(mask)\n mask[not_conv] = False\n a[mask] = s[mask]\n fa[mask] = fs[mask] \n \n mask = D.abs(fa) < D.abs(fb)\n mask[not_conv] = False\n a[mask], b[mask] = b[mask], a[mask]\n fa[mask], fb[mask] = fb[mask], fa[mask]\n \n conv = D.logical_not(D.logical_or(D.logical_or(fb == 0, fs == 0), D.abs(b - a) < tol))\n not_conv = D.logical_not(conv)\n true_conv = D.abs(_f(b)) <= tol\n \n if D.any(numiter > 1000):\n break\n if verbose:\n print(\"[{numiter}] a={a}, b={b}, f(a)={fa}, f(b)={fb}, conv={true_conv}\".format(**locals()))\n return b, true_conv","sub_path":"desolver/utilities/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":7933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"262988614","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom network import Network\nfrom standard_networks import FC\n\nvocab = dict()\n\nwith open('vocab_746.txt') as f:\n for i,word in enumerate(f):\n word = word.strip()\n vocab[i] = word\n vocab[word] = i\n\n\ndef tokenize(sentence):\n sentence = sentence.split(' ')\n tokens = []\n numbers = list()\n indices = list()\n for i,word in enumerate(sentence):\n if word.isdigit():\n numbers.append(int(word))\n tokens.append('')\n indices.append(i)\n else:\n if word in vocab:\n tokens.append(word)\n else:\n tokens.append('')\n return [vocab[token] for token in tokens],numbers,indices\n\n\nclass RNN(nn.Module):\n def __init__(self,vocab_size,hidden_size):\n super(RNN, self).__init__()\n self.lstm = nn.LSTM(vocab_size,hidden_size,1,bidirectional=True)\n\n def forward(self, x,n1,n2,n3):\n x ,_ = self.lstm(x)\n x = torch.cat((x[-1,...],x[n1,...],x[n2,...],x[n3,...]),1)\n x.view(1,-1)\n return x\n\n\n\nrnn = RNN(len(vocab),75)\nnetwork1 = FC(600,6)\nnetwork2 = FC(600,4)\nnetwork3 = FC(600,2)\nnetwork4 = FC(600,4)\n\n\ndef np1(net,sentence):\n if net.last[0] == str(sentence): #Caching\n return net.last[1]\n tokenized,numbers,indices = tokenize(str(sentence).strip('\"'))\n data = torch.zeros(len(tokenized),1,len(vocab))\n for i,t in enumerate(tokenized):\n data[i,0,t] = 1.0\n outputs = net.net(Variable(data),*indices)\n net.last = (str(sentence),outputs)\n return outputs\n\n\n\ndef np2(net, id):\n representation = np1(networks[0], id)\n outputs = net.net(representation)\n return outputs.squeeze(0)\n\n\n\nnetworks = [Network(rnn, 'nn_rnn', np1),\n Network(network1, 'nn_permute', np2),\n Network(network2, 'nn_op1', np2),\n Network(network3, 'nn_swap', np2),\n Network(network4, 'nn_op2', np2)]\nnetworks[0].last = ('',None)\n\nnetworks[0].optimizer = optim.Adam(rnn.parameters(), lr=0.02)\nnetworks[1].optimizer = optim.Adam(network1.parameters(), lr=0.02)\nnetworks[2].optimizer = optim.Adam(network2.parameters(), lr=0.02)\nnetworks[3].optimizer = optim.Adam(network3.parameters(), lr=0.02)\nnetworks[4].optimizer = optim.Adam(network4.parameters(), lr=0.02)\n","sub_path":"examples/NIPS/Forth/WAP/wap_network.py","file_name":"wap_network.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"261189202","text":"from django.utils.translation import ugettext_lazy as _\n\n\nLANGUAGE_EN = 1\nLANGUAGE_ES = 2\nLANGUAGE_FR = 3\nLANGUAGE_NL = 4\nLANGUAGE_CH = 5\nLANGUAGE_RU = 6\nLANGUAGE_UK = 7\nLANGUAGE_GE = 8\nLANGUAGE_BU = 9\nLANGUAGE_PO = 10\n\nLANGUAGES = (\n (LANGUAGE_EN, _('English')),\n (LANGUAGE_ES, _('Spainish')),\n (LANGUAGE_FR, _('Franch')),\n (LANGUAGE_NL, _('Dutch')),\n (LANGUAGE_CH, _('Chinese')),\n (LANGUAGE_RU, _('Russian')),\n (LANGUAGE_UK, _('Ukrainian')),\n (LANGUAGE_GE, _('German')),\n (LANGUAGE_BU, _('Bulgarian')),\n (LANGUAGE_PO, _('Polish')),\n)\n\n\nCURRENCY_US = 1\nCURRENCY_EU = 2\nCURRENCY_ZL = 3\nCURRENCY_RU = 4\nCURRENCY_HR = 5\n\nCURRENCIES = (\n (CURRENCY_US, _('USA Dollar')),\n (CURRENCY_EU, _('Euro')),\n (CURRENCY_ZL, _('Zloty')),\n (CURRENCY_RU, _('Russian rub')),\n (CURRENCY_HR, _('Hrivnya'))\n)\n","sub_path":"restfullsimple/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"326800112","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\"pppoe断线时自动重播,并且刷新pppoe的ip地址\"\"\"\n\nimport time\nimport re\nimport psutil\n\nfrom db.mysql_db import select, update\nfrom networking.nat64 import exec_cmd\n\n\ndef keep_pppoe():\n \"\"\"pppoe重播\"\"\"\n pppoe_from_db = select('SELECT sUserName, sIP, sMask FROM m_tbdialdevice WHERE iStatus=1')\n pppoe_from_db = {i.sUserName:(i.sIP, i.sMask) for i in pppoe_from_db if i.sUserName}\n while 1:\n for username, db_addr in pppoe_from_db.iteritems():\n active_pppoe_name = get_pppoe_device_name(username)\n if active_pppoe_name:\n addr = get_pppoe_devices().get(active_pppoe_name, None)\n if addr is not None:\n if addr != db_addr: # 如果存在对应ppp拨号口,并���有ip,且ip和数据库的不同\n update(\"UPDATE m_tbdialdevice SET `sIP`=?,`sMask`=? WHERE `sUserName`=?\",\n addr[0], addr[1], username)\n pppoe_from_db[username] = addr\n continue\n\n pppoe_cmd = '/usr/sbin/pppoe-start /etc/ppp/pppoe.conf-%s' % username\n status, _ = exec_cmd(pppoe_cmd, msg='re_pppoe:') # 重新拨号\n if status == 0: # 拨号成功\n time.sleep(1)\n iface_name = get_pppoe_device_name(username)\n addr = get_pppoe_devices().get(iface_name, ('', ''))\n update(\"UPDATE m_tbdialdevice SET `sIP`=?,`sMask`=? WHERE `sUserName`=?\", addr[0], addr[1], username)\n pppoe_from_db[username] = addr\n else: # 拨号失败\n update(\"UPDATE m_tbdialdevice SET `sIP`=?,`sMask`=? WHERE `sUserName`=?\", '', '', username)\n pppoe_from_db[username] = ('', '')\n time.sleep(15)\n\n\ndef get_pppoe_device_name(config_name):\n \"\"\"获取拨号产生的ppp虚拟网口名称,如果找不到对应的ppp网口,则表示拨号不成功\"\"\"\n statuse_cmd = '/usr/sbin/pppoe-status /etc/ppp/pppoe.conf-%s' % config_name\n _, output = exec_cmd(statuse_cmd, msg='pppoe_status: ')\n template = re.compile(r'^pppoe-status: Link is up and running on interface (ppp\\d{1,2})$', flags=re.MULTILINE)\n _match = template.search(output)\n if _match:\n return _match.group(1)\n return None\n\n\ndef get_pppoe_devices():\n \"\"\"获得所有成功的拨号设备的ip、掩码信息\n return:\n 例如\n {'ppp1': (u'1.2.3.4', u'255.255.255.255'),\n 'ppp2': (u'2.2.3.5', u'255.255.255.255')\n }\n \"\"\"\n all_devices = psutil.net_if_addrs()\n template = re.compile(r'^ppp\\d{1,2}$')\n pppoe_devices = {}\n for iface, addrs in all_devices.iteritems():\n if template.search(iface):\n for i in addrs:\n if i.family == 2:\n pppoe_devices[iface] = (unicode(i.address), unicode(i.netmask))\n break\n return pppoe_devices\n\n\ndef config_pppoe_systemctl():\n status, output = exec_cmd('systemctl status keep-pppoe')\n if status and 'not-found' in output:\n exec_cmd('/usr/bin/cp /usr/local/bluedon/conf/systemctl/keep-pppoe.service /usr/lib/systemd/system')\n exec_cmd('systemctl disable keep-pppoe')\n\n\ndef start_keep_pppoe_if_need():\n config_pppoe_systemctl()\n pppoe_from_db = select('SELECT sUserName, sIP, sMask FROM m_tbdialdevice WHERE iStatus=1')\n if pppoe_from_db:\n exec_cmd('systemctl restart keep-pppoe')\n else:\n exec_cmd('systemctl stop keep-pppoe')\n\n\nif __name__ == '__main__':\n from utils.logger_init import get_logger\n get_logger('main', '/usr/local/bluedon/log/pppoe.log')\n keep_pppoe()\n","sub_path":"chuhuo_2.71/bluedon/networking/keep_pppoe.py","file_name":"keep_pppoe.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"297416201","text":"import numpy as np\nimport geopy\nfrom .map import Map\n\nclass Greengraph(object):\n def __init__(self, start, end):\n self.start=start\n self.end=end\n self.geocoder=geopy.geocoders.GoogleV3(domain=\"maps.google.co.uk\") #Chooses domain to take maps from.\n \n #Test flags.\n #First test to see if either start or finish arguments are numbers (does not prohibit postcodes).\n def is_number(s): #Function checks if string has a representation as a float.\n try:\n float(s)\n return True\n except ValueError:\n return False\n \n if is_number(self.start) == True or is_number(self.end) == True:\n raise TypeError(\"Start and finish should be a *string*, not a number.\")\n \n \n \n def geolocate(self, place):\n return self.geocoder.geocode(place, exactly_one=False)[0][1]\n \n def location_sequence(self, start,end,steps):\n \n #Test flags\n if start[0] > 90 or start[0] < -90 or end[0] > 90 or end[0] < -90:\n raise ValueError(\"Latitudes must be between -90 and 90.\")\n \n if start[1] > 180 or start[1] < -180 or end[1] > 180 or end[1] < -180:\n raise ValueError(\"Longitudes must be between -180 and 180.\")\n \n lats = np.linspace(start[0], end[0], steps) #Generates set of equally spaced latitudes.\n longs = np.linspace(start[1],end[1], steps)\n return np.vstack([lats, longs]).transpose() #Stacks as vertical arrays\n \n \n def green_between(self, steps):\n \n #Test flags.\n if float(steps) != int(float(steps)):\n raise TypeError(\"Steps must be a postive *integer*.\")\n \n if float(steps) <= 0:\n raise ValueError(\"Steps must be a *postive* integer.\")\n \n return [Map(*location).count_green()\n for location in self.location_sequence(\n self.geolocate(self.start), \n self.geolocate(self.end),\n steps)]\n \n \n ","sub_path":"build/lib/greengraph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"582717886","text":"#!/usr/bin/env python\n#_*_coding:utf-8_*_\nfrom Person import Person\nfrom Car import Car\nfrom School import School\nimport time\n\nclass Story(object):\n def __init__(self):\n pass \n \n def wait(self,seconds):\n time.sleep(seconds)\n \n def introduce(self):\n John = Person('Jonh',20,'male','Student',0,'basketball')\n Liz = Person('Liz',20,'female','Student',0,'dance')\n print(\n '''\n ======================Story Background============================\n ''')\n print('{0} and {1} are classmates of senior high school,they love each other!'.format(John.name,Liz.name))\n \n def Begining(self):\n John = Person('Jonh',20,'male','Student',0,'basketball')\n Liz = Person('Liz',20,'female','Student',0,'dance')\n print(\n '''\n ===========================Story Begin=============================\n '''\n )\n Liz.talk('I was admitted to Beijing City University')\n s.wait(2)\n John.talk('I was not,but I\\'ll go to Beijing and earn the fee of schooling for you')\n s.wait(2)\n Liz.talk('It\\'s so nice of you,thanks')\n s.wait(2)\n John.talk('I love you')\n \n def Transmission(self):\n print(\n '''\n =========================Few years later============================\n Many things happens this years,but they are still together,love each other.\n '''\n )\n \n def interview(self):\n print(\n '''\n ======================Interview after graduate======================\n '''\n )\n Peter = Person('Peter',30,'male','manager',10000,'bar')\n Liz = Person('Liz',24,'female','Student',3000,'dance')\n John = Person('Jonh',20,'male','Student',800,'basketball')\n Liz.talk('Hi,my name is Liz,here is my resume')\n s.wait(2)\n Peter.talk('Nice!You are hired, and by the way, May I drive you home?')\n Liz.talk('Thanks!')\n car = Car('Ferrari')\n print('\\n')\n print('{0} driver {1} home with {2}'.format(Peter.name,Liz.name,car.name))\n s.wait(2)\n \n print('{0} kissed {1},then {1} runs home'.format(Peter.name,Liz.name))\n print('Finnaly{0} and {1} was together and {2} was sad'.format(Peter.name,Liz.name,John.name))\n print('\\n')\n \n def Fightingyears(self):\n print(\n '''\n ===========================Fighting years=========================\n '''\n )\n Liz = Person('Liz',24,'female','Student',3000,'dance')\n John = Person('Jonh',20,'male','Student',800,'basketball')\n school = School('Python trainning Lab')\n print('{0} decide to persume {1} back, and he goes to school called {2}'.format(John.name,Liz.name,school.name))\n \n \n def endline(self):\n print(\n '''\n ===============================Ending==============================\n '''\n )\n John = Person('Jonh',24,'male','IT engineer',100000,'bar')\n Liz = Person('Liz',24,'female','Student',3000,'dance')\n print('{0} learn python,then become a {1},earns {2} a month'.format(John.name,John.work,John.salary))\n s.wait(2)\n print('{0} buys car and house'.format(John.name))\n s.wait(2)\n print('{0} and {1} meet one day'.format(John.name,Liz.name))\n s.wait(2)\n print('{0} said: {1} could you forgive the fault i have made?'.format(Liz.name,John.name))\n \ns = Story()\n","sub_path":"08day4/Virtuallife/Story.py","file_name":"Story.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"268265378","text":"import json\nimport logging\nimport logging.config\nimport os\nimport time\nimport unittest\n\nfrom docker import Client, errors\n\nfrom daemon import jenkins, stand, stand_manager\nfrom daemon.exceptions import DaemonException\n\nlog = logging.getLogger(__name__)\n\n\nclass DaemonTests(unittest.TestCase):\n def setUp(self):\n with open(os.path.join(os.path.dirname(__file__), 'daemon_config_for_tests.json')) as f:\n self.CONFIG = json.load(f)\n self.test_dir = os.path.join(self.CONFIG['work_dir'], 'test_{0}'.format(time.time()))\n os.mkdir(self.test_dir)\n self.CONFIG['work_dir'] = self.test_dir\n logging.config.fileConfig(os.path.join(os.path.dirname(__file__), 'logging_for_tests.config'))\n\n # Test constants\n self.NAME = 'unittest'\n self.DESCRIPTION = 'conatainer for test, delme'\n self.PORT = 8778\n self.PROJECT = 'product_uni'\n self.DB_IP = '192.168.201.187'\n self.LAST_BUILD_DB_NAME = 'docker_test4'\n\n self.EXISTED_STAND_DIR = '/home/okleptsova/PycharmProjects/stand-daemon/test_container'\n self.EXISTED_BUILD_DB_NAME = 'docker_test'\n self.EXISTED_STAND_DETAILS = {'image': self.CONFIG['image'],\n 'container_id': None,\n 'name': self.NAME,\n 'description': self.DESCRIPTION,\n 'port': self.PORT,\n 'docker_url': self.CONFIG['docker_url'],\n 'stand_dir': self.EXISTED_STAND_DIR,\n\n 'db_type': 'postgres',\n 'db_addr': self.DB_IP,\n 'db_port': 5432,\n 'db_name': self.EXISTED_BUILD_DB_NAME,\n 'db_user': self.CONFIG['postgres_user'],\n 'db_pass': self.CONFIG['postgres_pass'],\n\n 'jenkins_project': self.PROJECT,\n 'jenkins_version': None,\n 'jenkins_url': self.CONFIG['jenkins_url'],\n 'jenkins_user': self.CONFIG['jenkins_user'],\n 'jenkins_pass': self.CONFIG['jenkins_pass'],\n 'version': None,\n 'catalina_opt': self.CONFIG['catalina_opt'],\n\n 'active_task': None,\n }\n\n def tearDown(self):\n cli = Client(base_url=self.CONFIG['docker_url'])\n try:\n cli.remove_container(self.NAME, force=True)\n except errors.NotFound:\n pass\n\n def test_1_jenkins(self):\n \"\"\"\n Собирает и скачивает сборки определенной версии\n \"\"\"\n project = 'unipgups_branchTest'\n version = 'tandem_uni_v_2_10_5_2016_07_25'\n j = jenkins.Jenkins(self.CONFIG['jenkins_url'], self.CONFIG['jenkins_user'], self.CONFIG['jenkins_pass'])\n build = j.build_project(project, version)\n j.get_build(project, self.test_dir, build_number=build)\n self.assertTrue(os.path.isdir(os.path.join(self.test_dir, 'WEB-INF')))\n self.assertTrue(os.path.isdir(os.path.join(self.test_dir, 'META-INF')))\n\n def test_2_config(self):\n \"\"\"\n Создание hibernate.properties файла\n \"\"\"\n s = stand.Stand(**self.EXISTED_STAND_DETAILS)\n s.stand_dir = self.test_dir\n config_dir = os.path.join(self.test_dir, 'config')\n os.mkdir(config_dir)\n s._create_hibernate_properties(pattern=self.CONFIG['postgres_hibernate_config'])\n file = os.path.join(config_dir, 'hibernate.properties')\n self.assertTrue(os.path.exists(file))\n with open(file, 'r') as f:\n self.assertEqual('hibernate.dialect org.hibernate.dialect.PostgreSQLDialect\\n'\n 'hibernate.connection.driver_class org.postgresql.Driver\\n'\n 'hibernate.connection.url jdbc:postgresql://{0}:{1}/{2}\\n'\n 'hibernate.connection.username {3}\\n'\n 'hibernate.connection.password {4}\\n'.format(self.DB_IP,\n 5432,\n self.EXISTED_BUILD_DB_NAME,\n self.CONFIG['postgres_user'],\n self.CONFIG['postgres_pass']),\n f.read())\n\n def test_3_container(self):\n \"\"\"\n Создание, запуск, остановка и удаление докер-контейнера\n \"\"\"\n container = stand.Stand(**self.EXISTED_STAND_DETAILS)\n container.create_container()\n container.start()\n time.sleep(10)\n self.assertEqual(container.check_http(180), 'ok')\n container.stop()\n self.assertEqual(container.check_http(180), 'fail')\n container.remove()\n\n def test_4_free_resources(self):\n \"\"\"\n Отказ в запуске стенда при исчерпании ресурсов\n \"\"\"\n sm = stand_manager.StandManager(**self.CONFIG)\n\n self.assertTrue(sm.free_resources())\n some_stand = stand.Stand(**self.EXISTED_STAND_DETAILS)\n some_stand.create_container()\n sm.stands['something_1'] = some_stand\n sm.stands['something_2'] = stand.Stand(**self.EXISTED_STAND_DETAILS)\n sm.start('something_1')\n time.sleep(10)\n with self.assertRaises(DaemonException):\n sm.start('something_2')\n some_stand.remove()\n self.assertTrue(sm.free_resources())\n\n def test_5_stand_no_new_build(self):\n \"\"\"\n Сборка стенда из собранного билда. Стенд должен запускаться. Должна быть информация о стенде.\n Должны быть доступны логи стенда.\n \"\"\"\n sm = stand_manager.StandManager(**self.CONFIG)\n task = sm.add_new(name=self.NAME, description=self.DESCRIPTION, port=self.PORT, jenkins_project=self.PROJECT,\n db_addr=self.DB_IP, db_name=self.LAST_BUILD_DB_NAME)\n task.do_build = False\n task.run()\n\n task.stand.start()\n time.sleep(10)\n self.assertEqual(task.stand.check_http(), 'ok')\n expected = {'status': 'running',\n 'url': 'http://{0}:8778/'.format(self.CONFIG['uni_docker_url']),\n 'name': self.NAME}\n actual = sm.get_stands(full_info=True, active_only=True)\n for pair in expected.items():\n self.assertIn(pair, actual[0].items())\n\n stand_log = sm.catalina_out(self.NAME)\n print(stand_log)\n self.assertNotEqual(stand_log.find('Server startup in'), -1)\n\n def test_6_update_stand(self):\n \"\"\"\n Создание и обновление стенда. Стенд должен запускаться. Должна быть информация о стенде\n \"\"\"\n\n sm = stand_manager.StandManager(**self.CONFIG)\n task = sm.add_new(name=self.NAME, description=self.DESCRIPTION, port=self.PORT, jenkins_project=self.PROJECT,\n db_addr=self.DB_IP, db_name=self.LAST_BUILD_DB_NAME)\n\n task.do_build = False\n task.run()\n\n task.stand.start()\n time.sleep(10)\n self.assertEqual(task.stand.check_http(), 'ok')\n expected = {'status': 'running',\n 'url': 'http://{0}:8778/'.format(self.CONFIG['uni_docker_url']),\n 'name': self.NAME}\n actual = sm.get_stands(full_info=True, active_only=True)\n for pair in expected.items():\n self.assertIn(pair, actual[0].items())\n print(actual[0]['version'])\n self.assertIsNotNone(actual[0]['version'])\n\n task = sm.update(self.NAME)\n task.run()\n\n expected = {'jenkins_project': self.PROJECT,\n 'url': 'http://{0}:8778/'.format(self.CONFIG['uni_docker_url']),\n 'db_type': 'postgres',\n 'status': 'stopped',\n 'db_port': 5432,\n 'db_name': self.LAST_BUILD_DB_NAME,\n 'jenkins_version': None,\n 'name': self.NAME,\n 'db_addr': self.DB_IP,\n 'description': self.DESCRIPTION}\n actual = sm.get_stands(full_info=False, active_only=False)\n for pair in expected.items():\n self.assertIn(pair, actual[0].items())\n print(actual[0]['version'])\n self.assertIsNotNone(actual[0]['version'])\n\n task.stand.start()\n time.sleep(10)\n self.assertEqual(task.stand.check_http(), 'ok')\n\n def test_7_ports(self):\n \"\"\"\n Выдача портов из диапазона. Отказ в выдаче при исчерпании диапазона.\n \"\"\"\n sm = stand_manager.StandManager(**self.CONFIG)\n sm.add_new('8401', '8401', '8401', '8401', port=8401)\n self.assertEqual(8400, sm._get_port())\n sm.add_new('8400', '8400', '8400', '8400')\n sm.add_new('8402', '8402', '8402', '8402')\n with self.assertRaises(DaemonException):\n sm._get_port()\n\n def test_8_containers_from_json(self):\n \"\"\"\n Создание объектов-контейнеров имеющихся стендов при запуске\n \"\"\"\n sm = stand_manager.StandManager(**self.CONFIG)\n sm.add_new('name', 'db_addr', 'db_name', 'jenkins_project')\n sm2 = stand_manager.StandManager(**self.CONFIG)\n\n expected = {'name': 'name', 'db_addr': 'db_addr', 'db_name': 'db_name', 'jenkins_project': 'jenkins_project'}\n actual = sm2.get_stands(full_info=False, active_only=False)\n\n for pair in expected.items():\n self.assertIn(pair, actual[0].items())\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"271324570","text":"import argparse\nimport copy\nimport itertools\nimport numpy\nimport pylab\nimport sys\n\nfrom network import (NI, NE)\n\ndef load_masked_weights(filename):\n # **YUCK** older scripts output 3D rather than 2D weight data\n data = numpy.load(filename)\n if len(data.shape) == 3:\n data = data[0]\n\n weight = numpy.ma.array(data, fill_value=0.0)\n return numpy.ma.fix_invalid(weight)\n\ndef display_raw_weights(masked_weights, figure, axis):\n image = axis.imshow(masked_weights, cmap=\"jet\", interpolation=\"none\")\n axis.set_xlabel(\"Post neuron ID\")\n axis.set_ylabel(\"Pre neuron ID\")\n figure.colorbar(image)\n\ndef display_mean_weights(masked_weights, figure, axis, num_mcu_neurons, palette):\n # Calculate number of minicolumns\n num_minicolumns = masked_weights.shape[0] // num_mcu_neurons\n mean_weights = numpy.zeros((num_minicolumns, num_minicolumns))\n for mi, mj in itertools.product(range(num_minicolumns), repeat=2):\n slice_i = slice(mi, masked_weights.shape[0], num_minicolumns)\n slice_j = slice(mj, masked_weights.shape[0], num_minicolumns)\n sub_weights = masked_weights[slice_i, slice_j]\n\n mean_weights[mi, mj] = numpy.ma.mean(sub_weights)\n\n image = axis.imshow(mean_weights, cmap=palette, interpolation=\"none\")\n return image\n\ndef display_single_attractor(masked_weights, mi, axis, num_mcu_neurons):\n # Calculate number of minicolumns\n num_minicolumns = masked_weights.shape[0] / num_mcu_neurons\n\n slice_i = slice(mi, masked_weights.shape[0], num_minicolumns)\n\n mean_weights = [numpy.ma.mean(masked_weights[slice_i, slice(mj, masked_weights.shape[0], num_minicolumns)])\n for mj in range(num_minicolumns)]\n\n axis.plot(mean_weights, marker=\"x\")\n axis.axhline(linestyle=\"--\", color=\"gray\")\n axis.axvline(mi, linestyle=\"--\", color=\"gray\")\n axis.set_xlabel(\"Post minicolumn ID\")\n axis.set_ylabel(\"Mean weight\")\n\ndef combine_connection_weights(filenames):\n # Initialise combined weights to nan\n combined_masked_weights = numpy.ma.empty((NE, NE), fill_value=0.0)\n combined_masked_weights[:] = numpy.ma.masked\n\n # Loop through all connectors, 256\n for f in filenames:\n # Load weights\n masked_weights = load_masked_weights(f)\n combined_masked_weights = numpy.ma.array(combined_masked_weights.data + masked_weights.data,\n mask=numpy.logical_and(combined_masked_weights.mask, masked_weights.mask),\n fill_value=0.0)\n return combined_masked_weights\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Combine together weights recorded from multiple HCUs and display\")\n parser.add_argument(\"--num_hcus\", type=int, default=9, help=\"How many HCUs is data for\")\n parser.add_argument(\"--num_mcu_neurons\", type=int, default=100, help=\"How many neurons make up an MCU\")\n parser.add_argument(\"--selected_attractor\", type=int, default=7, help=\"Which attractor to plot\")\n parser.add_argument(\"folder\", nargs=1, help=\"Folder to search for weight files in\")\n parser.add_argument(\"filename\", nargs=1, help=\"Filenames of weight files are of the form connection_X_YYYY.npy where filename specified YYYY\")\n args = parser.parse_args()\n\n figure, axes = pylab.subplots(1, 2)\n\n # Build filenames from command line\n filenames = [\"%s/connection_%u_%s.npy\" % (args.folder[0], i, args.filename[0]) for i in range(args.num_hcus ** 2)]\n combined_masked_weights = combine_connection_weights(filenames)\n\n # Display weights\n display_single_attractor(combined_masked_weights, args.selected_attractor, axes[0], args.num_mcu_neurons)\n mean_image = display_mean_weights(combined_masked_weights, figure, axes[1], args.num_mcu_neurons, \"jet\")\n axes[1].set_xlabel(\"Post-synaptic attractor number\")\n axes[1].set_ylabel(\"Pre-synaptic attractor number\")\n\n figure.colorbar(mean_image, shrink=0.75)\n #display_raw_weights(combined_masked_weights, figure, axes[2])\n pylab.show()\n","sub_path":"examples/modular_attractor/analyse_weights.py","file_name":"analyse_weights.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"593633426","text":"import hashlib\nimport datetime\nimport random\nimport string\n\nclass Block(object):\n def __init__(self, timestamp, data, previous_hash, block_number):\n self.timestamp = timestamp\n self.block_number = block_number\n self.data = data\n self.previous_hash = previous_hash\n self.hash = self.calc_hash()\n self.next_block_ptr = None\n self.previous_block_ptr = None\n \n def calc_hash(self):\n sha = hashlib.sha256()\n sha.update(str(self.timestamp).encode('utf-8') + self.data.encode('utf-8'))\n return sha.hexdigest()\n\n def get_timestamp(self):\n return self.timestamp\n \n def get_block_number(self):\n return self.block_number\n \n def get_data(self):\n return self.data\n \n def get_previous_hash(self):\n return self.previous_hash\n\n def get_current_hash(self):\n return self.hash\n \n def get_next_block(self):\n return self.next_block_ptr\n \n def set_next_block(self, next_block):\n self.next_block_ptr = next_block\n\n def get_previous_block(self):\n return self.previous_block_ptr\n \n def set_previous_block(self, previous_block):\n self.previous_block_ptr = previous_block\n \n def __repr__(self):\n s = f\"\"\"Timestamp: {self.timestamp}\nCurrent Hash: {self.hash}\nPrevious Hash: {self.previous_hash}\nData: {self.data}\n \"\"\"\n return s \n \nclass Blockchain(object):\n def __init__(self):\n self.genesis_block = None\n self.last_block = None\n self.block_count = -1\n \n def get_genesis_block(self):\n return self.genesis_block\n\n def set_genesis_block(self, block):\n self.genesis_block = block\n\n def get_last_block(self):\n return self.last_block\n \n def set_last_block(self, block):\n self.last_block = block\n \n def get_block_count(self):\n return self.block_count\n \n def append(self, value):\n self.block_count += 1\n\n if self.get_genesis_block() is None:\n self.set_genesis_block(Block(datetime.datetime.now(), \"[Genesis] \" + value, \"None\", self.get_block_count()))\n self.set_last_block(self.get_genesis_block())\n return\n \n # Move to the tail (the last node)\n block = self.get_genesis_block()\n while block.get_next_block():\n block = block.get_next_block()\n \n block.set_next_block(Block(datetime.datetime.now(), \\\n \"[Block \" + str(self.block_count) + \"] \" + value, \\\n block.get_current_hash(), self.get_block_count()))\n\n self.get_last_block().get_next_block().set_previous_block(self.get_last_block())\n self.set_last_block(self.get_last_block().get_next_block())\n\n def size(self):\n return self.block_count + 1 \n\n \ndef get_randomized_data():\n data = ''\n for _ in range(64):\n data = f\"{data}{random.choice(string.ascii_lowercase + string.ascii_uppercase)}\"\n return data\n\ndef dump_blockchain_data(blockchain_name):\n info = ''\n \n info = \"Current number of blocks created: \" + str((blockchain_name.size()))\n\n block = blockchain_name.get_genesis_block()\n info += \"\\nBlock Details Report Start From Genesis Block\\n------------------------------------------\\n\"\n\n while block.get_next_block():\n info += str(block)\n block = block.get_next_block()\n info += \"\\n******************************************\\n\"\n\n info += \"------------------------------------------\\nBlock Details Report End\\n\\n\"\n\n block = blockchain_name.get_last_block()\n info += \"Block Details Report Start From Last Block in chain\\n------------------------------------------\\n\"\n while block:\n info += str(block)\n block = block.get_previous_block()\n info += \"\\n******************************************\\n\"\n \n info += \"------------------------------------------\\nBlock Details Report End\"\n print(info)\n\n\n# 10 block blockchain\nzebra_blockchain = Blockchain()\nzebra_blockchain.append(get_randomized_data())\nzebra_blockchain.append(get_randomized_data())\nzebra_blockchain.append(get_randomized_data())\nzebra_blockchain.append(get_randomized_data())\nzebra_blockchain.append(get_randomized_data())\nzebra_blockchain.append(get_randomized_data())\nzebra_blockchain.append(get_randomized_data())\nzebra_blockchain.append(get_randomized_data())\nzebra_blockchain.append(get_randomized_data())\nzebra_blockchain.append(get_randomized_data())\ndump_blockchain_data(zebra_blockchain)\n\n# single block blockchain\namoeba_blockchain = Blockchain()\namoeba_blockchain.append(get_randomized_data())\ndump_blockchain_data(amoeba_blockchain)\n","sub_path":"Project-2/Problem5.py","file_name":"Problem5.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"568948126","text":"from fpt import four_point_transform\nfrom skimage.filters import threshold_local\nimport pytesseract\nimport numpy as np\nfrom cv2 import cv2\nimport imutils\nimport sys\n\n# image_fullpath=sys.argv[1]\n# image_name=sys.argv[2]\n\n\n# Image=cv2.imread(str(image_fullpath))\n\n# image_save_path=image_fullpath.replace(image_name,\"temp.png\")\n\n#--------------------\n\n#--------------------\nImage = cv2.imread('14.jpeg')\n\nwidth,height,_ = Image.shape\nratio =0.25\nprint(width,height)\nOriginal = Image.copy()\nimage = cv2.resize(Image,(int(height*ratio),int(width*ratio)))\nprint(image.shape)\n# image_ogratio = image.copy()\n# image = imutils.resize(image, height = 500)\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n# cv2.imshow(\"gray\",gray)\ngray = cv2.GaussianBlur(gray, (5, 5), 0)\n# cv2.imshow(\"blur\",gray)\n\n# gray = cv2.GaussianBlur(gray, (5, 5), 0)\n# cv2.imshow(\"Blurred\",gray)\n\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\neqhisted = clahe.apply(gray)\n# cv2.imshow(\"eqhisted\",eqhisted)\n\n# cv2.imshow(\"eqhisted\",eqhisted)\n# cv2.imshow(\"Blurred2\",gray)\n\nedged = cv2.Canny(eqhisted, 25, 100)\n# cv2.imshow(\"edged\",edged)\n\nkernel = np.ones((5,5),np.uint8)\nedged = cv2.dilate(edged,kernel,iterations = 1)\n\n# cv2.imshow(\"dilate\",edged)\n\n\n# find the contours in the edged image, keeping only the\n# largest ones, and initialize the screen contour\ncnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n# cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n# cnts = imutils.grab_contours(cnts)\ncnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]\n# loop over the contours\nfor c in cnts:\n\t# approximate the contour\n\tperi = cv2.arcLength(c, True)\n\tapprox = cv2.approxPolyDP(c, 0.02 * peri, True)\n\t# if our approximated contour has four points, then we\n\t# can assume that we have found our screen\n\tif len(approx) == 4:\n\t\tscreenCnt = approx\n\t\tbreak\nelse:\n\tprint(\"Approx\",approx)\n\tscreenCnt = approx[:4]\n\t\n# apply the four point transform to obtain a top-down\n# view of the original image\nwarped = four_point_transform(Image, screenCnt.reshape(4, 2) / ratio)\n# convert the warped image to grayscale, then threshold it\n# to give it that 'black and white' paper effect\nwarped= cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\n\n# cv2.drawContours(image,cnts,-1,(0,0,255),2)\n# cv2.imshow(\"Contours\",image)\n# cv2.imshow(\"Warped\",cv2.resize(warped,(int(height*ratio),int(width*ratio))))\n\n# blur = cv2.GaussianBlur(warped,(3,3),0)\n# edged = cv2.Canny(blur, 25, 100)\n# cv2.imshow(\"edged\",edged)\n\nth3 = cv2.adaptiveThreshold(warped,255,cv2.ADAPTIVE_THRESH_MEAN_C,\\\n cv2.THRESH_OTSU,11,2)\n\n# ret3,th3 = cv2.threshold(warped,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\ncv2.imshow(\"thresholded\",warped)\ncv2.waitKey(0)\n#--------------------------------\n# # rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))\n# edged = cv2.Canny(th3, 25, 100)\n# # dilation = cv2.dilate(edged, rect_kernel, iterations = 8) \n\n# # cv2.imshow(\"dialeted\",cv2.resize(dilation,(int(height*ratio),int(width*ratio))))\n\n# # contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) \n# # for cnt in contours: \n# # x, y, w, h = cv2.boundingRect(cnt) \n \n# # # Drawing a rectangle on copied image \n# # rect = cv2.rectangle(warped, (x, y), (x + w, y + h), (0, 255, 0), 2) \n# # cv2.imshow(\"dfg\",cv2.resize(warped,(int(height*ratio),int(width*ratio))))\n# #--------------------------------\n\n# def dilate(ary, N, iterations):\n# \"\"\"Dilate using an NxN '+' sign shape. ary is np.uint8.\"\"\"\n# rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))\n# # kernel = np.zeros((N, N), dtype=np.uint8)\n# # kernel[(N - 1) // 2, :] = 1\n# dilated_image = cv2.dilate(ary / 255, rect_kernel, iterations=iterations)\n\n# # kernel = np.zeros((N, N), dtype=np.uint8)\n# # kernel[:, (N - 1) // 2] = 1\n# # dilated_image = cv2.dilate(dilated_image, rect_kernel, iterations=iterations)\n# dilated_image = dilated_image.astype('uint8')\n# return dilated_image\n# \"\"\"Dilate the image until there are just a few connected components.\n# Returns contours for these components.\"\"\"\n# # Perform increasingly aggressive dilation until there are just a few\n# # connected components.\n# count = 21\n# dilation = 5\n# n = 1\n# while count > 16:\n# \tn += 1\n# \tdilated_image = dilate(edged, N=3, iterations=n)\n# \tcontours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n# \tcount = len(contours)\n# # print dilation\n# # Image.fromarray(edges).show()\n# # Image.fromarray(255 * dilated_image).show()\n# cv2.imshow(\"dialted\",dilated_image)\n# for cnt in contours: \n# x, y, w, h = cv2.boundingRect(cnt) \n \n# # Drawing a rectangle on copied image \n# rect = cv2.rectangle(warped, (x, y), (x + w, y + h), (0, 255, 0), 2) \n# cv2.imshow(\"dfg\",cv2.resize(warped,(int(height*ratio),int(width*ratio))))\n\n\n\n# #---------------------------------------------------\n\n\n# # cv2.imshow(\"Original\", imutils.resize(image_ogratio, height = 650))\n# # cv2.imshow(\"Resized\",image)\n# cv2.imshow(\"Scanned\",cv2.resize(th3,(int(height*ratio),int(width*ratio))))\n# cv2.waitKey(0)\n# pytesseract.pytesseract.tesseract_cmd = r'C:\\Users\\Pavan Bhadaja\\tesseract.exe'\n# print(pytesseract.image_to_string(th3, timeout=5,lang=\"eng\")) # Timeout after 2 seconds\n\n# def tokenizer(txt):\n# lines = txt.split(\"\\n\")\n# tokens = []\n# for line in lines:\n# line = line.strip()\n# if len(line) > 0:\n# tokens.append(line)\n\n# return tokens()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"339857490","text":"from abc import ABC\nimport torch as th\n\nfrom .constants import SCHEDULE_F\n\nclass FuncDAG(object):\n class Node(object):\n def __init__(self, name, stage, pipe_f, sched_f):\n self.name = name\n self.stage = stage\n pipe_f = th.tensor(pipe_f, dtype=th.float32)\n sched_f = th.tensor(sched_f[:SCHEDULE_F], dtype=th.float32)\n sched_f = th.log(1.0 + sched_f)\n self.features = th.cat([pipe_f, sched_f], 0)\n self.state = None\n self.sched_f = sched_f\n self.pipe_f = pipe_f\n\n @property\n def id(self):\n return \"{}_{}\".format(self.name, self.stage)\n\n def cuda(self):\n if self.state is not None:\n self.state = self.state.cuda()\n\n if self.features is not None:\n self.features = self.features.cuda()\n\n def __repr__(self):\n return \"Node {}\".format(self.id)\n\n class Edge(object):\n def __init__(self, consumer, producer, calls):\n self.src = consumer\n self.dst = producer\n self.features = th.tensor(calls, dtype=th.float32).view(1)\n self.state = None\n\n def cuda(self):\n if self.state is not None:\n self.state = self.state.cuda()\n\n if self.features is not None:\n self.features = self.features.cuda()\n\n def __repr__(self):\n return \"Edge {} -> {}\".format(\n self.src.id, self.dst.id)\n \n def __repr__(self):\n s = \"Graph with nodes:\\n\"\n for n in self.nodes:\n s += \" {}\\n\".format(n)\n s += \"and edges:\\n\"\n for e in self.edges:\n s += \" {}\\n\".format(e)\n return s\n\n @property\n def edges(self):\n edges = []\n for n in self.nodes:\n edges += self.outgoing_edges[n]\n return edges \n\n @property\n def edge_features_size(self):\n e = self.edges\n if e:\n return e[0].features.shape\n else:\n raise ValueError(\"graph has no edges\")\n\n @property\n def node_features_size(self):\n n = self.nodes\n if n:\n return n[0].features.shape\n else:\n raise ValueError(\"graph has no nodes\")\n\n def cuda(self):\n for n in self.nodes:\n n.cuda()\n\n for e in self.edges:\n e.cuda()\n\n if self.state is not None:\n self.state = self.state.cuda()\n\n def __init__(self, features=None, nodes=[], edges=[]):\n # TODO(mgharbi): store pipeline depth -> iterations of graph net\n\n self.state = None\n\n self.nodes = []\n self.outgoing_edges = {}\n self.incoming_edges = {}\n\n feature_map = {}\n stage_to_node_idx = {}\n last_stage_for_node = {}\n\n # TODO: keep only partial graph for partially scheduled states \n # or find an encoding of partially scheduled features\n\n # Build feature map\n for f in features:\n name = f[b\"name\"].decode('utf-8')\n stage = f[b\"stage_idx\"]\n feature_map[(name, stage)] = (f[b\"pipeline\"], f[b\"schedule\"])\n\n # Instantiate nodes with their features\n for n_ in nodes:\n func_id = n_[b\"id\"]\n name = n_[b\"name\"].decode('utf-8')\n n_stages = len(n_[b\"stages\"])\n last_stage_for_node[name] = n_stages - 1\n\n prev_stage = None\n for s in range(n_stages-1, -1, -1):\n if (name, s) not in feature_map.keys(): # TODO: hack\n continue\n\n pipe_f, sched_f = feature_map[(name, s)]\n n = FuncDAG.Node(name, s, pipe_f, sched_f)\n\n stage_to_node_idx[(name, s)] = n\n\n self.nodes.append(n)\n\n # Initialize adjancency lists\n self.incoming_edges[n] = []\n self.outgoing_edges[n] = []\n\n # Add edge between update stages\n if prev_stage is not None:\n e = FuncDAG.Edge(prev_stage, n, -1) # TODO(mgharbi): we don't have calls info for update defs\n\n # Fill-in bidirectional edge maps\n self.incoming_edges[n].append(e)\n self.outgoing_edges[prev_stage].append(e)\n\n prev_stage = n \n\n # Instantiate edges, with their features\n for e_ in edges:\n stage = e_[b\"consumer_stage\"]\n calls = e_[b\"calls\"]\n cons = e_[b\"consumer\"].decode('utf-8')\n prod = e_[b\"producer\"].decode('utf-8')\n\n prod_stage = last_stage_for_node[prod]\n\n # TODO: hack\n if (cons, stage) not in stage_to_node_idx.keys() or (prod, prod_stage) not in stage_to_node_idx.keys():\n continue\n\n consumer = stage_to_node_idx[(cons, stage)]\n producer = stage_to_node_idx[(prod, prod_stage)]\n\n e = FuncDAG.Edge(consumer, producer, calls)\n\n # Fill-in bidirectional edge maps\n self.incoming_edges[producer].append(e)\n self.outgoing_edges[consumer].append(e)\n\n def adjacency_repr(self):\n num_nodes = len(self.nodes)\n adj = th.zeros(num_nodes, num_nodes)\n\n id_map = {}\n features = []\n for idx, n in enumerate(self.nodes):\n adj[idx, idx] = 1 # adds self connections\n id_map[n] = idx\n features.append(n.features)\n features = th.stack(features)\n\n for e in self.edges:\n id_0 = id_map[e.src]\n id_1 = id_map[e.dst]\n adj[id_0, id_1] = 1\n adj[id_1, id_0] = 1\n return features, adj\n","sub_path":"graphscheduler/func_dag.py","file_name":"func_dag.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"487359040","text":"\"\"\"Item crud client.\"\"\"\nimport re\nfrom datetime import datetime\nfrom typing import Any, Dict, List, Optional, Type, Union\nfrom urllib.parse import urljoin\n\nimport attr\nimport orjson\nfrom asyncpg.exceptions import InvalidDatetimeFormatError\nfrom buildpg import render\nfrom fastapi import HTTPException\nfrom pydantic import ValidationError\nfrom stac_pydantic.links import Relations\nfrom stac_pydantic.shared import MimeTypes\nfrom starlette.requests import Request\n\nfrom stac_fastapi.pgstac.models.links import CollectionLinks, ItemLinks, PagingLinks\nfrom stac_fastapi.pgstac.types.search import PgstacSearch\nfrom stac_fastapi.types.core import AsyncBaseCoreClient\nfrom stac_fastapi.types.errors import InvalidQueryParameter, NotFoundError\nfrom stac_fastapi.types.stac import Collection, Collections, Item, ItemCollection\n\nNumType = Union[float, int]\n\n\n@attr.s\nclass CoreCrudClient(AsyncBaseCoreClient):\n \"\"\"Client for core endpoints defined by stac.\"\"\"\n\n search_request_model: Type[PgstacSearch] = attr.ib(init=False, default=PgstacSearch)\n\n async def all_collections(self, **kwargs) -> Collections:\n \"\"\"Read all collections from the database.\"\"\"\n request: Request = kwargs[\"request\"]\n base_url = str(request.base_url)\n pool = request.app.state.readpool\n\n async with pool.acquire() as conn:\n collections = await conn.fetchval(\n \"\"\"\n SELECT * FROM all_collections();\n \"\"\"\n )\n linked_collections: List[Collection] = []\n if collections is not None and len(collections) > 0:\n for c in collections:\n coll = Collection(**c)\n coll[\"links\"] = await CollectionLinks(\n collection_id=coll[\"id\"], request=request\n ).get_links(extra_links=coll.get(\"links\"))\n\n linked_collections.append(coll)\n\n links = [\n {\n \"rel\": Relations.root.value,\n \"type\": MimeTypes.json,\n \"href\": base_url,\n },\n {\n \"rel\": Relations.parent.value,\n \"type\": MimeTypes.json,\n \"href\": base_url,\n },\n {\n \"rel\": Relations.self.value,\n \"type\": MimeTypes.json,\n \"href\": urljoin(base_url, \"collections\"),\n },\n ]\n collection_list = Collections(collections=linked_collections or [], links=links)\n return collection_list\n\n async def get_collection(self, id: str, **kwargs) -> Collection:\n \"\"\"Get collection by id.\n\n Called with `GET /collections/{collectionId}`.\n\n Args:\n id: Id of the collection.\n\n Returns:\n Collection.\n \"\"\"\n collection: Optional[Dict[str, Any]]\n\n request: Request = kwargs[\"request\"]\n pool = request.app.state.readpool\n async with pool.acquire() as conn:\n q, p = render(\n \"\"\"\n SELECT * FROM get_collection(:id::text);\n \"\"\",\n id=id,\n )\n collection = await conn.fetchval(q, *p)\n if collection is None:\n raise NotFoundError(f\"Collection {id} does not exist.\")\n\n collection[\"links\"] = await CollectionLinks(\n collection_id=id, request=request\n ).get_links(extra_links=collection.get(\"links\"))\n\n return Collection(**collection)\n\n async def _search_base(\n self, search_request: PgstacSearch, **kwargs: Any\n ) -> ItemCollection:\n \"\"\"Cross catalog search (POST).\n\n Called with `POST /search`.\n\n Args:\n search_request: search request parameters.\n\n Returns:\n ItemCollection containing items which match the search criteria.\n \"\"\"\n items: Dict[str, Any]\n\n request: Request = kwargs[\"request\"]\n pool = request.app.state.readpool\n\n # pool = kwargs[\"request\"].app.state.readpool\n req = search_request.json(exclude_none=True)\n\n try:\n async with pool.acquire() as conn:\n q, p = render(\n \"\"\"\n SELECT * FROM search(:req::text::jsonb);\n \"\"\",\n req=req,\n )\n items = await conn.fetchval(q, *p)\n except InvalidDatetimeFormatError:\n raise InvalidQueryParameter(\n f\"Datetime parameter {search_request.datetime} is invalid.\"\n )\n\n next: Optional[str] = items.pop(\"next\", None)\n prev: Optional[str] = items.pop(\"prev\", None)\n collection = ItemCollection(**items)\n cleaned_features: List[Item] = []\n\n for feature in collection.get(\"features\") or []:\n feature = Item(**feature)\n if (\n search_request.fields.exclude is None\n or \"links\" not in search_request.fields.exclude\n ):\n # TODO: feature.collection is not always included\n # This code fails if it's left outside of the fields expression\n # I've fields extension updated test cases to always include feature.collection\n feature[\"links\"] = await ItemLinks(\n collection_id=feature[\"collection\"],\n item_id=feature[\"id\"],\n request=request,\n ).get_links(extra_links=feature.get(\"links\"))\n\n exclude = search_request.fields.exclude\n if exclude and len(exclude) == 0:\n exclude = None\n include = search_request.fields.include\n if include and len(include) == 0:\n include = None\n cleaned_features.append(feature)\n\n collection[\"features\"] = cleaned_features\n collection[\"links\"] = await PagingLinks(\n request=request,\n next=next,\n prev=prev,\n ).get_links()\n return collection\n\n async def item_collection(\n self, id: str, limit: Optional[int] = None, token: str = None, **kwargs\n ) -> ItemCollection:\n \"\"\"Get all items from a specific collection.\n\n Called with `GET /collections/{collectionId}/items`\n\n Args:\n id: id of the collection.\n limit: number of items to return.\n token: pagination token.\n\n Returns:\n An ItemCollection.\n \"\"\"\n # If collection does not exist, NotFoundError wil be raised\n await self.get_collection(id, **kwargs)\n\n req = self.search_request_model(collections=[id], limit=limit, token=token)\n item_collection = await self._search_base(req, **kwargs)\n links = await CollectionLinks(\n collection_id=id, request=kwargs[\"request\"]\n ).get_links(extra_links=item_collection[\"links\"])\n item_collection[\"links\"] = links\n return item_collection\n\n async def get_item(self, item_id: str, collection_id: str, **kwargs) -> Item:\n \"\"\"Get item by id.\n\n Called with `GET /collections/{collectionId}/items/{itemId}`.\n\n Args:\n id: Id of the item.\n\n Returns:\n Item.\n \"\"\"\n # If collection does not exist, NotFoundError wil be raised\n await self.get_collection(collection_id, **kwargs)\n\n req = self.search_request_model(\n ids=[item_id], collections=[collection_id], limit=1\n )\n item_collection = await self._search_base(req, **kwargs)\n if not item_collection[\"features\"]:\n raise NotFoundError(\n f\"Item {item_id} in Collection {collection_id} does not exist.\"\n )\n\n return Item(**item_collection[\"features\"][0])\n\n async def post_search(\n self, search_request: PgstacSearch, **kwargs\n ) -> ItemCollection:\n \"\"\"Cross catalog search (POST).\n\n Called with `POST /search`.\n\n Args:\n search_request: search request parameters.\n\n Returns:\n ItemCollection containing items which match the search criteria.\n \"\"\"\n item_collection = await self._search_base(search_request, **kwargs)\n return ItemCollection(**item_collection)\n\n async def get_search(\n self,\n collections: Optional[List[str]] = None,\n ids: Optional[List[str]] = None,\n bbox: Optional[List[NumType]] = None,\n datetime: Optional[Union[str, datetime]] = None,\n limit: Optional[int] = None,\n query: Optional[str] = None,\n token: Optional[str] = None,\n fields: Optional[List[str]] = None,\n sortby: Optional[str] = None,\n **kwargs,\n ) -> ItemCollection:\n \"\"\"Cross catalog search (GET).\n\n Called with `GET /search`.\n\n Returns:\n ItemCollection containing items which match the search criteria.\n \"\"\"\n # Parse request parameters\n base_args = {\n \"collections\": collections,\n \"ids\": ids,\n \"bbox\": bbox,\n \"limit\": limit,\n \"token\": token,\n \"query\": orjson.loads(query) if query else query,\n }\n if datetime:\n base_args[\"datetime\"] = datetime\n\n if sortby:\n # https://github.com/radiantearth/stac-spec/tree/master/api-spec/extensions/sort#http-get-or-post-form\n sort_param = []\n for sort in sortby:\n sortparts = re.match(r\"^([+-]?)(.*)$\", sort)\n if sortparts:\n sort_param.append(\n {\n \"field\": sortparts.group(2).strip(),\n \"direction\": \"desc\" if sortparts.group(1) == \"-\" else \"asc\",\n }\n )\n base_args[\"sortby\"] = sort_param\n\n if fields:\n includes = set()\n excludes = set()\n for field in fields:\n if field[0] == \"-\":\n excludes.add(field[1:])\n elif field[0] == \"+\":\n includes.add(field[1:])\n else:\n includes.add(field)\n base_args[\"fields\"] = {\"include\": includes, \"exclude\": excludes}\n\n # Do the request\n try:\n search_request = self.search_request_model(**base_args)\n except ValidationError:\n raise HTTPException(status_code=400, detail=\"Invalid parameters provided\")\n return await self.post_search(search_request, request=kwargs[\"request\"])\n","sub_path":"stac_fastapi/pgstac/stac_fastapi/pgstac/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":10560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"310448495","text":"from threading import Thread\nfrom utils.log import Logger\n\nfrom psgd.interfaces import ITransfer, ReadTimeOut\nfrom network.interfaces import ICommunication_Controller\n\n\nclass NTransfer(ITransfer):\n \"\"\"\n string identifier used in this class.\n identifier used for json key tag.\n wrote and solved only within this code scope.\n \"\"\"\n STR_LAYER_NO = 'NLayer_NO'\n STR_W_TYPE = 'NW_Type'\n INT_RETRY_LIMIT = 5\n\n def __init__(self, weights_ctrl, com: ICommunication_Controller, logger=Logger('Default Transfer')):\n \"\"\"\n build a transfer controller for transferring data between local ML process and\n remote server process.\n Each training process has exactly one transfer instance.\n :param weights_ctrl: list of controller for transferring data, identified for each layer, each type.\n :param com: Network Communication Controller\n \"\"\"\n\n # formatted as weights_initial\n self.type_weights_controller = weights_ctrl\n self.communication_process = com\n\n self.working_thread = Thread(name='Transfer thread for node {}.' \\\n .format(self.communication_process.Node_Id), target=self.__run)\n self.Node_ID = com.Node_Id\n self.__log = logger\n\n def put_weights(self, content, tag, w_type='w'):\n \"\"\"\n Put weights instantly\n No waiting\n \"\"\"\n # Copy tag\n update_packs = self.type_weights_controller[tag.Layer_No][w_type].update_weights(content, tag)\n for update_pack in update_packs:\n sender, dic = update_pack\n self.__send(sender, dic, tag.Layer_No, w_type)\n\n def get_weights(self, tag, w_type='w'):\n \"\"\"\n Acquire weights instantly\n No waiting\n \"\"\"\n try:\n return self.type_weights_controller[tag.Layer_No][w_type].require_weights(tag)\n except ReadTimeOut as e:\n for sender, dic in e.retry():\n self.__send(sender, dic, tag.Layer_No, w_type)\n self.__log.log_error('Message retry to node {}'.format(sender))\n return self.type_weights_controller[tag.Layer_No][w_type].require_weights(tag)\n\n\n def start_transfer(self):\n \"\"\"\n Start transferring data between psgd controller and communication process.\n reference call (IParallelSGD.accept_data()) without sync check, is not thread safe call.\n :return: None\n \"\"\"\n self.working_thread.start()\n\n def __send(self, target, dic, layer_no, w_type):\n \"\"\"\n Write tag and send\n \"\"\"\n # skip none\n if len(target) == 0:\n return\n # write tag\n dic[NTransfer.STR_LAYER_NO] = layer_no\n dic[NTransfer.STR_W_TYPE] = w_type\n self.communication_process.send_one(target, dic)\n\n def __run(self):\n \"\"\"\n Working thread.\n Quit if self.communication_process is not alive.\n :return: None\n \"\"\"\n try:\n while not self.communication_process.is_closed():\n sender, dic = self.communication_process.get_one()\n # blocking other format\n if not isinstance(dic, dict):\n continue\n # quit processing if the object is not sent by the class instance like NTransfer\n try:\n layer_no = dic[NTransfer.STR_LAYER_NO]\n w_type = dic[NTransfer.STR_W_TYPE]\n update_packs = self.type_weights_controller[layer_no][w_type].accept_data(dic)\n # self.Log.log_message('Message accepted.')\n if update_packs is None:\n continue\n for update_pack in update_packs:\n sender, dic = update_pack\n self.__send(sender, dic, layer_no, w_type)\n # self.Log.log_message('Message back to node {}'.format(sender))\n except KeyError as e:\n # print DEBUG message\n import sys\n import traceback\n exc_type, exc_value, exc_tb = sys.exc_info()\n exc_tb = traceback.format_exception(exc_type, exc_value, exc_tb)\n for line in exc_tb:\n self.__log.log_message(line)\n # print DEBUG message\n except OSError as e:\n self.__log.log_message('Transfer thread report an error: {}'.format(e))\n except ValueError:\n pass\n finally:\n self.__log.log_message('Transfer thread exited safely.')\n","sub_path":"psgd/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"651796652","text":"import re\nimport statistics as s\nvalues=[]\nimport numpy as np\n#numbers=[]\nwith open('results.txt') as file:\n for line in file:\n if \"7. Time taken to create s2s connectivity\" in line:\n x=re.split(\"7. Time taken to create s2s connectivity\", line)\n values.append(x[1])\n a=list(map(str.strip, values))\n #numbers = [int(float(x)) for x in a]\n x = np.array(a)\n y = x.astype(np.float)\n print (y)\n print (np.mean(y))\n","sub_path":"search_avg.py","file_name":"search_avg.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"598772318","text":"#%%\nimport numpy as np\nimport pandas as pd \nimport tqdm\nimport glob\n#%%\n# Load the raw ecocyc master list. \necocyc = pd.read_csv('../../data/ecocyc_raw_data/2020-03-04_ecocyc_master.tab',\n delimiter='\\t')\n\n#%%\n# Iterate through each gene name and, for each synonym, create a new entry. \ndfs = []\ni = 0\nfor g, d in tqdm.tqdm(ecocyc.groupby(['gene_name'])):\n # Stitch together the go ids. \n go_ids = d['go_terms'].values[0]\n if str(go_ids) != 'nan':\n go_ids = '; '.join([s.strip() for s in go_ids.split('//')])\n else:\n go_ids = 'no ontology'\n mw = d['mw_kda'].unique()[0]\n if '//' in str(mw):\n mw = float(mw.split('//')[0])\n else:\n mw = float(mw)\n # Generate the base dict. \n base_dict = {'gene_name':g.lower(),\n 'b_number':d['b_number'].unique(), \n 'gene_product':d['gene_product'].unique(),\n 'mw_fg': mw * 1.660E-6,\n 'go_terms':go_ids}\n\n # Update the dataframe\n dfs.append(pd.DataFrame(base_dict, index=[0]))\n\n # Iterate through each synonym\n syn = d['synonyms'].values[0]\n if str(syn) != 'nan':\n syn_split = syn.split('//')\n syns = [s.strip().replace('\"', '').lower() for s in syn_split]\n for s in syns:\n base_dict['gene_name'] = s\n dfs.append(pd.DataFrame(base_dict, index=[0]))\n\ndf = pd.concat(dfs, sort=False)\nprint(i, len(ecocyc))\n#%%\n# Drop duplicate rows\ndf.drop_duplicates(inplace=True)\n\n\n# %%\n# Load all of the cog lists and collate\ncogs = pd.concat([pd.read_csv(f) for f in glob.glob('../../data/cog_data/*.csv')])\ncogs.drop(columns=['Unnamed: 4'], axis=1, inplace=True)\n\n# Assign the cog information to the ecocyc df based on the b number\nfor g, d in df.groupby('b_number'):\n cog = cogs[cogs['b_number']==g]\n if len(cog) == 0:\n df.loc[df['b_number']==g, 'cog_class'] = 'Not Assigned'\n df.loc[df['b_number']==g, 'cog_letter'] = 'Not Assigned'\n df.loc[df['b_number']==g, 'cog_category'] = 'Not Assigned'\n df.loc[df['b_number']==g, 'cog_desc'] = 'Not Assigned'\n else:\n df.loc[df['b_number']==g, 'cog_class'] = cog['cog_class'].values[0]\n df.loc[df['b_number']==g, 'cog_letter'] = cog['cog_letter'].values[0]\n df.loc[df['b_number']==g, 'cog_category'] = cog['cog_category'].values[0]\n df.loc[df['b_number']==g, 'cog_desc'] = cog['cog_desc'].values[0]\n\n# %%\ndf.to_csv('../../data/ecoli_genelist_master.csv', index=False)\n\n# %%\n","sub_path":"code/processing/ecoli_proteome_master.py","file_name":"ecoli_proteome_master.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"271916540","text":"import json\nimport os\n\ndef main():\n\tgerman = []\n\thanzi = []\n\tpinyin = []\n\n\twith open('data' + os.path.sep + 'HSK2 German', 'r') as input_file:\n\t\tfor line in input_file:\n\t\t\tgerman.append(line.strip())\n\n\twith open('data' + os.path.sep + 'HSK2 Hanzi', 'r') as input_file:\n\t\tfor line in input_file:\n\t\t\thanzi.append(line.strip())\n\n\twith open('data' + os.path.sep + 'HSK2 Pinyin', 'r') as input_file:\n\t\tfor line in input_file:\n\t\t\tpinyin.append(line.strip())\n\n\tvocables = []\n\n\tfor index, question in enumerate(german):\n\t\tvocables.append({\n\t\t\t'question':german[index],\n\t\t\t'answer':hanzi[index],\n\t\t\t'info':pinyin[index]\n\t\t})\n\n\thsk = {\n\t\t'identifier': 'HSK2',\n\t\t'questions': vocables\n\t}\n\n\twith open('data' + os.path.sep + 'HSK2.json', 'w') as output_file:\n\t\tjson.dump(hsk, output_file, ensure_ascii=False, indent='\\t', sort_keys=True)\n\n\twith open('data' + os.path.sep + 'HSK2.json', 'r') as input_file:\n\t\thsk = json.load(input_file)\n\n\tfor index, voc in enumerate(hsk['questions']):\n\t\tprint('VOC #'+str(index), voc)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"src/convert_to_json.py","file_name":"convert_to_json.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"275707633","text":"import numpy as np\nfrom astropy.table import Table\nimport matplotlib.pyplot as plt\nfrom astropy.coordinates import SkyCoord\n\ndef inv(l):\n return [not i for i in l]\n\nresp=raw_input('hist, map, or both (h/m/b)?')\np=Table.read('GBNCC_pointings.fits')\nt=Table.read('GBNCC_mask_fraction.fits')\nobs=Table.read('GBNCC_observed_052019.txt',format='ascii')\ncond=p['obs']==0\n\nif resp=='h' or resp=='b':\n\tplt.figure(figsize=(10,7))\n\trfi_ra=SkyCoord(t[t['true_bw']<50]['gl'],t[t['true_bw']<50]['gb'],unit='deg',frame='galactic').transform_to('icrs').ra.value\n\tn,bins,patches=plt.hist([p[cond]['RAdeg'],rfi_ra],bins=150,align='mid',rwidth=0.9,stacked=True,label=['Unobserved','RFI - redo'])\n\tplt.xlabel('RA [deg]')\n\tplt.ylabel('Count')\n\tplt.grid(axis='y')\n\tplt.legend(loc='upper left')\n\tplt.savefig('unobserved_hist.pdf')\n\tplt.title('GBNCC Beams to Observe')\n\tplt.show()\n\nif resp=='m' or resp=='b':\n\tf = plt.figure(figsize=(10,8))\n\tax = f.add_subplot(111,projection='mollweide')\n\tpos=SkyCoord(p['RAdeg'],p['Decdeg'],unit='deg').transform_to('galactic')\n\tl,b=pos.l.value,pos.b.value\n\tinds=np.where(l>180.0)\n\tl[inds]-=360.0\n\n\trfi_pos=SkyCoord(t[t['true_bw']<50]['gl'],t[t['true_bw']<50]['gb'],unit='deg',frame='galactic')\n\trl,rb=rfi_pos.l.value,rfi_pos.b.value\n\tinds=np.where(rl>180.0)\n\trl[inds]-=360.0\n\n\tdeg2rad=np.pi/180\n\tplt.scatter(l[cond]*deg2rad,b[cond]*deg2rad,marker='.',c='blue',alpha=0.1,lw=0)\n\tplt.scatter(360,0,c='blue',label='Unobserved')\n\tplt.scatter(l[inv(cond)]*deg2rad,b[inv(cond)]*deg2rad,marker='.',c='grey',alpha=0.05,lw=0)\n\tplt.scatter(360,0,c='grey',label='Observed')\n\tplt.scatter(rl*deg2rad,rb*deg2rad,marker='.',c='red',alpha=0.5,lw=0)\n\tplt.scatter(360,0,c='red',label='RFI - redo')\n\tplt.grid()\n\tplt.subplots_adjust(left=0.08,bottom=0,right=0.98,top=1)\n\tplt.xlabel('Galactic Longitude [deg]',fontsize=15)\n\tplt.ylabel('Galactic Latitude [deg]',fontsize=15)\n\tplt.title('GBNCC Beams to Observe')\n\tplt.legend(loc='upper left')\n\tplt.savefig('unobserved_skymap.pdf')\n\tplt.show()\n\n\n","sub_path":"unobserved.py","file_name":"unobserved.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"336646157","text":"from .captchaTestSetCreate import createTestSet, cropImage, CAPTCHA_SET_PATH\nfrom .captchaRecognizeMain import longitudinalSplit, CAPTCHA_SET, captchaImageBinary\nimport math\nfrom PIL import Image\nimport os\nimport numpy as np\n\n\n__all__ = [\"recognizeCaptcha\"]\n\n\nclass VectorCompare:\n # 计算矢量大小\n def magnitude(self, concordance):\n total = 0\n for word, count in concordance.items():\n total += np.dot(count, count)\n\n return math.sqrt(total)\n\n # 计算矢量之间的cos值\n def relation(self, concordance1, concordance2):\n relevance = 0\n topvalue = 0\n for word, count in concordance1.items():\n if word in concordance2:\n topvalue += np.dot(count, concordance2[word])\n\n return topvalue / (self.magnitude(concordance1) * self.magnitude(concordance2))\n\n\n# 将图片转换为矢量\ndef buildvector(image_object):\n dict1 = dict(enumerate(image_object.getdata()))\n\n return dict1\n\n\ndef recognizeCaptcha(image_path=CAPTCHA_SET_PATH + \"/captcha.jpeg\", dir_path=CAPTCHA_SET_PATH, captcha_set=CAPTCHA_SET):\n dir_path = dir_path\n image_object = Image.open(image_path)\n binary_object = captchaImageBinary(pixel_min=0,\n pixel_max=188, image_path=image_path)\n v = VectorCompare()\n captcha_set = captcha_set\n\n # 加载训练集\n imageset = []\n for letter in captcha_set:\n for img in os.listdir(f'{dir_path}/%s/' % letter):\n temp = []\n if img != \".DS_Store\":\n temp.append(buildvector(Image.open(\n f\"{dir_path}/%s/%s\" % (letter, img))))\n\n imageset.append({letter: temp})\n\n letters = longitudinalSplit(binary_object)\n image_objects = cropImage(\n binary_object, letters, extension=\"jpeg\", dir_path=dir_path, captcha_name=\"captcha_binary\")\n\n count = 0\n result = []\n for test_object in image_objects:\n guess = []\n # 将切割得到的验证码小片段与每个训练片段进行比较\n for image in imageset:\n for x, y in image.items():\n if len(y) != 0:\n guess.append(\n (v.relation(y[0], buildvector(test_object))[0], x))\n\n guess.sort(reverse=True)\n print(\"\", guess[0])\n count += 1\n result.append(guess[0][1])\n\n captcha_code = \"\".join(result)\n print(\n f\"\\n验证码识别结果:{captcha_code}, \", end=\"\")\n return captcha_code\n","sub_path":"crawlerUtils/captcha/recognizeCaptchaMain.py","file_name":"recognizeCaptchaMain.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"167646045","text":"import os\nimport csv\nimport math\nfrom subprocess import call\nfrom shutil import copyfile\nfrom scripts.run_model.run_siamese import construct_paths\nfrom scripts.data.split_file import split_dataset\n\nvalidation_proportion = 0.3\ntest_proportion = 0\n\nconfig_file_path = '../../config/01_bcb_clone_and_fp_balanced_all.yml'\ndata_file_dir_template = '../../data/processed/bcb/clone_and_false_positives/balanced/all-balanced/{run_id}/'\nmodel_name = 'eliminate_abnormal'\n\nmain_filename = 'main.csv'\ntrain_filename = 'train.csv'\nval_filename = 'val.csv'\ntest_filename = main_filename\nbetter_main_filename = 'better.' + main_filename\n\n\ndef main(threshold=0.5):\n \"\"\"This script train the model and check the prediction against the testing set.\n It removes the training data that is too different than the prediction.\n \"\"\"\n\n init_data_file_dir = data_file_dir_template.format(run_id='00')\n abnormal_file_path = os.path.join(init_data_file_dir, 'abnormals.csv')\n\n previous_better_main_file_path = None\n step = 0\n with open(abnormal_file_path, 'w+') as afile:\n awriter = csv.writer(afile)\n\n while step < 100:\n run_id = str(step).zfill(2)\n\n print(\"Running run_id: {}\".format(run_id))\n\n # Make new dir for this run if needed\n data_file_dir = data_file_dir_template.format(run_id=run_id)\n os.makedirs(data_file_dir, exist_ok=True)\n\n # Construct the paths for all kinds of files\n paths = construct_paths(model_name, run_id, data_file_dir, test_filename=test_filename)\n predictions_file_path = paths['predictions_file_path']\n\n current_main_file_path = os.path.join(data_file_dir, main_filename)\n is_current_main_file_exist = os.path.exists(current_main_file_path)\n # Copy main.csv file from the previous run\n if previous_better_main_file_path and not is_current_main_file_exist:\n print(\"Copying main file from previous run...\")\n copyfile(previous_better_main_file_path, current_main_file_path)\n\n is_val_file_exist = os.path.exists(paths['val_file_path'])\n # print(\"Does validation file exist?: {}\".format(is_val_file_exist))\n # Split main.csv into (train, val).csv\n if not is_val_file_exist:\n print(\"Splitting main file...\")\n split_dataset(current_main_file_path, data_file_dir, test_proportion, validation_proportion)\n\n is_model_trained = os.path.exists(paths['data_manager_pickle_file_path'])\n # print(\"data manager pickle file path: {}\".format(paths['data_manager_pickle_file_path']))\n # print(\"is trained: {}\".format(is_model_trained))\n\n # If the model is not trained yet.\n if not is_model_trained:\n # Train the model\n print(\"Start training...\")\n run_script(\"train\", run_id, data_file_dir, test_filename)\n\n is_predicted = os.path.exists(predictions_file_path)\n # print(\"prediction file path: {}\".format(predictions_file_path))\n # print(\"is predicted: {}\".format(is_predicted))\n\n # If the prediction file not generated yet\n if not is_predicted:\n # Predict with the trained models\n print(\"Start predicting...\")\n run_script(\"predict\", run_id, data_file_dir, test_filename)\n\n # Compare the test data with the predict data\n # and remove those with large differences from the training set\n\n test_file_path = paths['test_file_path']\n better_main_file_path = os.path.join(data_file_dir, better_main_filename)\n\n print(\"Removing abnormal clone pairs...\")\n with open(test_file_path) as tfile:\n treader = csv.reader(tfile)\n with open(predictions_file_path) as pfile:\n preader = csv.reader(pfile)\n with open(better_main_file_path, 'w') as bfile:\n bwriter = csv.writer(bfile)\n for row_orig, row_pred in zip(treader, preader):\n ground_truth = float(row_orig[2])\n predicted = float(row_pred[0])\n new_row = row_orig + [predicted]\n if math.fabs(ground_truth - predicted) > threshold:\n # abnormal case\n awriter.writerow(new_row)\n else:\n # better train file\n bwriter.writerow(new_row)\n\n previous_better_main_file_path = better_main_file_path\n\n step += 1\n\n # Test with only N times of traverse\n if step >= 50:\n break\n\n\ndef run_script(mode, run_id, data_file_dir, test_filename):\n call(['python', 'run_siamese.py',\n mode,\n '--config_file={}'.format(config_file_path),\n '--model_name={}'.format(model_name),\n '--run_id={}'.format(run_id),\n '--data_file_dir={}'.format(data_file_dir),\n '--test_filename={}'.format(test_filename),\n ])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/run_model/eliminate_abnormals.py","file_name":"eliminate_abnormals.py","file_ext":"py","file_size_in_byte":5274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"20704359","text":"import os\nimport sys\n\ncase_num = 1\nwhile True:\n os.system('python3 gen.py')\n os.system('./test1')\n os.system('./test2')\n s1 = open('out1.txt', 'r')\n s2 = open('out2.txt', 'r')\n\n c1 = s1.read()\n c2 = s2.read()\n\n length = max(len(c1), len(c2))\n try:\n for i in range(length):\n if c1[i] != c2[i]:\n print('find!')\n s = open('1.txt', 'r')\n case = s.read()\n print('case:')\n print(case)\n s1.close()\n s2.close()\n s.close()\n sys.exit()\n except:\n break\n\n print('exploring wrong case... case num:', case_num)\n case_num += 1","sub_path":"generator/cpp_ex/cpp_gen_act.py","file_name":"cpp_gen_act.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"322534970","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.http import FormRequest, Request\nfrom urllib.parse import quote, urlencode\nfrom scrapy.http.cookies import CookieJar\nfrom scrapy.loader import ItemLoader\nfrom lieping.items import LiepingItem\nimport copy\n\nclass LiepinSpider(scrapy.Spider):\n name = 'liepin'\n allowed_domains = ['liepin.com']\n # 主页url\n start_urls = ['https://www.liepin.com/it/',]\n # 搜索页面url\n search_url = 'https://www.liepin.com/zhaopin/?'\n\n def parse(self, response):\n job_lists = response.css('ul.sidebar li dl')[0]\n first_ctgs = job_lists.css('dt::text').getall()\n second_ctgs = job_lists.css('dd')\n for i in range(0,5):\n for a in second_ctgs[i].css('a'):\n loader = ItemLoader(item=LiepingItem(), response=response)\n # 职位一级分类\n loader.add_value('first_ctg', first_ctgs[i])\n # 职位二级分类\n second_ctg = a.css('::text').get()\n loader.add_value('second_ctg', a.css('::text').get())\n # 构造GET请求参数\n data = {}\n # 搜索岗位\n data['key'] = second_ctg\n for pageno in range(0, self.settings.get('MAX_PAGES')):\n # 当前页面\n data['curPage'] = str(pageno)\n url= self.search_url + urlencode(data)\n yield Request(url=url,\n callback=self.parse_list,\n meta={'item':copy.deepcopy(loader.load_item())},\n dont_filter=True,\n priority=10,)\n\n\n def parse_list(self, response):\n # 获取每一张卡片\n positions = response.css('ul.sojob-list li div.job-info')\n for position in positions:\n # 岗位名称\n position_name = position.css('a::text').get().strip()\n # 提取岗位详情url\n url = position.css('a::attr(href)').get()\n url = response.urljoin(url)\n # 检测urljoin是否正常作用\n # print(url)\n # 将岗位名称存入item\n loader = ItemLoader(item=response.meta['item'], response=response)\n loader.add_value('positionname', position_name)\n\n yield Request(url=url,\n callback=self.get_detail, \n meta={'item':loader.load_item()},\n priority=8)\n\n def get_detail(self,response):\n details = response.css('div.job-description div.content *::text').getall()\n details = [detail.strip() for detail in details if detail.strip()]\n loader = ItemLoader(item=response.meta['item'], response=response)\n loader.add_value('positiondetail', ''.join(details))\n return loader.load_item()","sub_path":"crawler/lieping/spiders/liepin.py","file_name":"liepin.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"209597896","text":"import logging\nimport discord\nimport datetime\n\nfrom discord.ext import commands\n\nfrom sgargabot.core.decorators import (\n callable_once,\n callable_n_times,\n callable_once_within,\n callable_once_per_day,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Gambling(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"slotmachine\", aliases=[\"slot\"])\n @callable_once_within(datetime.timedelta(hours=1))\n async def slotmachine(self, ctx):\n await ctx.send(\"You only have five calls!\")\n\n @slotmachine.error\n async def slotmachine_error(self, ctx, error):\n logger.info(error)\n await ctx.send(\n f\"Sorry bro! You can only call this command once every ten seconds. Time before you can call this function: {error.original.get_remaining_time()}\"\n )\n\n\ndef setup(bot):\n bot.add_cog(Gambling(bot))\n","sub_path":"sgargabot/cogs/testing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"536060952","text":"from flask import render_template, flash, redirect, request, url_for, send_file,session, request\nfrom flask_socketio import SocketIO, emit, disconnect\n# from flask.ext import excel\n# import pygal\n# from pygal import Config\n# from pygal.style import DarkSolarizedStyle\nfrom datetime import datetime, timedelta\nfrom app import app\nfrom .forms import LoginForm, ConfigForm, AddConfigKeyForm, DelConfigKeyForm\n\nfrom app.models import ConfigItem,db\nfrom sqlalchemy import desc\nfrom .configdb import writeconfig\nimport settings\nimport time\nimport datetime\nimport gc\nfrom app.motorctl import motorStart, motorStop, getPosition, getStartPosition, getDistancetogo, setPosition, getMode, STOPPED, STOP_NOW\n\nthread = None\nasync_mode = None\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\n#socketio = SocketIO(app, async_mode=\"threading\")\n#socketio = SocketIO(app, async_mode=\"gevent\") \n#socketio = SocketIO(app, async_mode=\"gevent_uwsgi\")\nvalues = {\n 'automan_mode':'Manual',\n 'leftbtntext':'Left',\n 'modebtntext':'Start',\n 'rightbtntext':'Right',\n 'startstop': 1,\n\t'position':0,\n\t'measuredist':1,\n\t'distance':0,\n\t'set_distance':100,\n\t'distancetogo':1000,\n\t'end_position':0,\n\t'start_position':0,\n\t'count':0,\n\t'end_count':0,\n\t'mode':app.config['STOPPED'],\n 'direction':app.config['STOPPED'],\t\n }\n@app.errorhandler(400)\ndef not_found_error(error):\n return render_template('400.html'), 400\n\t \n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('404.html'), 404\n\n@app.errorhandler(500)\ndef internal_error(error):\n db.session.rollback()\n return render_template('500.html'), 500\t\n \ndef background_thread():\n# global position,distancetogo\n while True:\n socketio.sleep(.5)\n values['position']=getPosition()\n values['distancetogo'] =getDistancetogo()\n values['start_position'] =getStartPosition()\n newMode= getMode()\n socketio.emit('message',\n {'data': 'test', 'position': values['position'],'curmeasdist':values['distancetogo'],'startdist':values['start_position']},\n namespace='/test')\n if (newMode!=values['mode']):\n if(newMode==STOP_NOW):\n values['startstop']=1\n socketio.emit('startstop_update',{'startstop': values['startstop'], 'direction': values['direction']},namespace='/test', broadcast=True)\t\t \n values['mode']=newMode\t\t \n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html', values=values,async_mode=socketio.async_mode)\n\n@socketio.on('mode_changed', namespace='/test')\ndef mode_changed(message):\n emit('mode_update',{'data': message['data']})\n\t\n@socketio.on('my_event', namespace='/test')\ndef test_message(message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': message['data'], 'count': session['receive_count']})\n\n\n@socketio.on('my_broadcast_event', namespace='/test')\ndef test_broadcast_message(message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': message['data'], 'count': session['receive_count']},\n broadcast=True)\n\n@socketio.on('value changed', namespace='/test')\ndef value_changed(message):\n values[message['who']] = message['data']\n if (message['who']=='leftbtn'):\n values['direction']=app.config['FORWARD']\n if (message['who']=='rightbtn'):\n values['direction']=app.config['BACKWARD']\n if (message['who']=='clearposbtn'):\n values['position']=0\n setPosition(0)\n if (message['who']=='goposzerobtn'):\n if (abs(values['position'])>3):\n if(values['start_position']>0):\n values['direction']=app.config['BACKWARD']\n else:\n values['direction']=app.config['FORWARD']\n values['distance']=abs(values['start_position'])\t\t\t \n values['startstop']=2\n motorStart(values['direction'],values['distance'])\n if (message['who']=='gostartbtn'):\n if (abs(values['position'])>3):\n if(values['position']>0):\n values['direction']=app.config['BACKWARD']\n else:\n values['direction']=app.config['FORWARD']\n values['distance']=abs(values['position'])\t\t\t \n values['startstop']=2\n motorStart(values['direction'],values['distance'])\n if (message['who']=='startstopbtn'):\n if ((values['startstop']==1) and (values['direction']!=app.config['STOPPED'])):\n values['startstop']=2\n motorStart(values['direction'],values['set_distance'])\n else:\n values['startstop']=1\n values['direction']=app.config['STOPPED']\n motorStop()\n emit('direction_update', {'direction':values['direction']}, broadcast=True)\n emit('startstop_update',{'startstop': values['startstop'], 'direction': values['direction']}, broadcast=True)\n\n\n@socketio.on('measuredist', namespace='/test')\ndef measure_dist(message):\n emit('setmeasuredist', message, broadcast=True)\n values['set_distance']=int(float(message['data'])*100)\n print(\"Set Distance:\",values['set_distance'])\n\n@socketio.on('refresh_req', namespace='/test')\ndef refresh_req():\n emit('refresh_data',{'automan_mode':values['automan_mode'],'measuredist':values['measuredist'],'position':values['position']})\n\n\t\n@socketio.on('disconnect_request', namespace='/test')\ndef disconnect_request():\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': 'Disconnected!', 'count': session['receive_count']})\n disconnect()\n\n\n@socketio.on('my_ping', namespace='/test')\ndef ping_pong():\n emit('my_pong')\n\n\n@socketio.on('connect', namespace='/test')\ndef test_connect():\n global thread\n if thread is None:\n thread = socketio.start_background_task(target=background_thread)\n emit('connect', {'data': 'Connected', 'count': 0})\n\n\n@socketio.on('disconnect', namespace='/test')\ndef test_disconnect():\n emit('disconnect', {'data': 'Disconnected', 'count': 0})\n print('Client disconnected', request.sid)\n\n@socketio.on_error(namespace='/test')\ndef chat_error_handler(e):\n print('An error has occurred: ' + str(e))\t\n\t\t\t\t\t\t \n@app.route('/about')\ndef about():\n return render_template('about.html')\n \n@app.route('/home')\ndef home():\n return redirect('/index')\n# return render_template('home.html')\n\n@app.route('/config',methods=['GET', 'POST'])\ndef config():\n form=ConfigForm()\n# if form.validate_on_submit():\n if request.method == 'POST' :\n# if form.validate_on_submit(): \n if True:\n app.config['APP_NAME']=form.appname.data\n app.config['SHORT_NAME']=form.shortname.data\n app.config['ALERTEMAIL']=form.alertemail.data\n app.config['MAIL_SERVER']=form.mailserver.data\n app.config['MAIL_USERNAME']=form.mailusername.data\n app.config['MAIL_PASSWORD']=form.mailpassword.data\n app.config['MAIL_PORT']=form.mailport.data\n app.config['PASSWORD']=form.password.data\n app.logger.info(app.config['APP_NAME']+' Configuration Form Saved')\n writeconfig()\n return redirect('/index')\n else:\n app.logger.info(app.config['APP_NAME']+' Configuration Form Not Validated')\t \n # db.session.commit()\n # for item in ConfigItem.query:\n # app.config[item.key] = item.value\n # flash('Configuration was saved')\n return redirect('/ricemaster/index')\n\n elif request.method == 'GET':\n form.appname.data=app.config[\"APP_NAME\"]\n form.shortname.data=app.config[\"SHORT_NAME\"]\n form.alertemail.data=app.config[\"ALERTEMAIL\"]\n form.mailserver.data=app.config[\"MAIL_SERVER\"]\n form.mailusername.data=app.config[\"MAIL_USERNAME\"]\n form.mailpassword.data=app.config[\"MAIL_PASSWORD\"]\n form.mailport.data=app.config[\"MAIL_PORT\"]\n form.password.data=app.config[\"PASSWORD\"]\n return render_template('config.html',form=form)\n \n@app.route('/addconfigkey',methods=['GET', 'POST'])\ndef addconfigkey():\n form=AddConfigKeyForm()\n if form.validate_on_submit():\n if not db.session.query(ConfigItem).filter_by(key=form.configkey.data).first():\n ci=ConfigItem(key=form.configkey.data,value=form.configvalue.data)\n db.session.add(ci)\n db.session.commit()\n else:\n ci=ConfigItem.query.filter_by(key=form.configkey.data).update(dict(value=form.configvalue.data))\t\t\n db.session.commit()\n for item in ConfigItem.query.all():\n app.config[item.key] = item.value\n flash('Configuration was saved')\n return redirect('/addconfigkey')\n elif request.method == 'GET':\n ci=ConfigItem.query.all()\n return render_template('addconfigkey.html',form=form,configitem=ci)\n\n@app.route('/delconfigkey',methods=['GET', 'POST'])\ndef delconfigkey(): \n form=DelConfigKeyForm()\n if form.validate_on_submit():\n ciq=ConfigItem.query.filter_by(key=form.configkey.data)\n if ciq==None:\n flash('Configuration Key Not Found')\n return redirect('/delconfigkey')\n else:\n for ci in ciq:\n db.session.delete(ci)\n db.session.commit()\n app.config[form.configkey.data] = \"\"\n flash('Configuration Key was deleted')\n return redirect('/delconfigkey')\n elif request.method == 'GET':\n ci=ConfigItem.query.all()\n return render_template('delconfigkey.html',form=form,configitem=ci)\t\n \napp.logger.info(' Views Started')\n\nif __name__ == '__main__':\n socketio.run(app, debug=True)\n\nif thread is None:\n thread = socketio.start_background_task(target=background_thread)\t\t ","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"256120042","text":"students = ['jeongyoon cha', 'sancho suh', 'hyunduk choi', 'seungho shin', 'jonglok kim', 'yongmin kim',\n 'hyojoon lee', 'hanjunghyun', 'seunghyun lee', 'jungil yang', 'jarang seo'\n]\n\nattendances = {}\n\ndef attend(name, attended_at) :\n if studentsValidation(name) :\n attendances[name] = attended_at\n print(\"log(attend) : succeed in adding attendance to the dictionary\")\n else :\n print(\"log(attend) : \" + name + \" is not in the student list\")\n\n\ndef studentsValidation(name) :\n for l_name in students :\n if l_name == name :\n return True # Return True if the name is in the student list\n print(\"log(studentsValidation) : \" + name + \" is not in the student list\")\n return False # else, return False\n\n\ndef printLog() :\n print(\"Num of attendees : %d\" % len(attendances))\n print(attendances)\n\n\nprintLog()\nattend('sancho suh', '2016-02-16')\nprintLog()\nattend('Kyungmook Cha', '2016-02-16')\nprintLog()\nattend('hanjunghyun', '2016-02-16')\nprintLog()\n","sub_path":"SanchoSuh_attendance1.py","file_name":"SanchoSuh_attendance1.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"382622587","text":"#!/usr/bin/env python\nimport os\nfrom cirrus.builder_plugin import Builder\nfrom cirrus.logger import get_logger\nfrom cirrus.invoke_helpers import local\n\n\nLOGGER = get_logger()\n\n\nclass CondaEnv(Builder):\n\n def __init__(self):\n super(CondaEnv, self).__init__()\n self.conda_bin = 'conda'\n self.plugin_parser.add_argument(\n '--conda',\n help='conda binary to use if different from default conda',\n default='conda'\n )\n self.plugin_parser.add_argument(\n '--environment',\n help='conda environment file to process',\n default=None\n )\n\n def create(self, **kwargs):\n python_bin = kwargs.get(\"python\", self.python_bin)\n conda = kwargs.get('conda', self.conda_bin)\n upgrade = kwargs.get('upgrade', False)\n nosetupdevelop = kwargs.get('nosetupdevelop', False)\n environment = kwargs.get(\n 'environment',\n self.build_config.get('conda-environment', None)\n )\n if environment is None:\n msg = \"No conda environment yaml specified in cirrus.conf [build] section or via --environment option\"\n LOGGER.error(msg)\n raise RuntimeError(msg)\n clean = kwargs.get('clean', False)\n if clean:\n self.clean(**kwargs)\n\n venv_command = \"{} env create -f {} -p {} \".format(\n conda,\n environment,\n self.venv_path\n )\n if python_bin:\n # should probably check this is int or int.int format\n venv_command += \" python={}\".format(self.python_bin)\n\n if not os.path.exists(self.venv_path):\n LOGGER.info(\"Bootstrapping conda env: {0}\".format(self.venv_path))\n local(venv_command)\n\n if upgrade:\n cmd = \"{activate} && conda env update {venv} -f {env}\".format(\n activate=self.activate(),\n venv=self.venv_path,\n env=environment\n )\n try:\n local(cmd)\n except OSError as ex:\n msg = (\n \"Error running conda env update command during build\\n\"\n \"Error was {0}\\n\"\n \"Running command: {1}\\n\"\n \"Working Dir: {2}\\n\"\n \"Conda env: {3}\\n\"\n \"Requirements: {4}\\n\"\n ).format(ex, cmd, self.working_dir, self.venv_path, self.reqs_name)\n LOGGER.error(msg)\n raise\n\n # setup for development\n if nosetupdevelop:\n msg = \"skipping python setup.py develop...\"\n LOGGER.info(msg)\n else:\n self.run_setup_develop()\n\n def clean(self, **kwargs):\n conda = kwargs.get('conda', self.conda_bin)\n if os.path.exists(self.venv_path):\n cmd = \"{} remove --all -y -p {}\".format(\n conda,\n self.venv_path\n )\n LOGGER.info(\"Removing existing conda env: {0}\".format(self.venv_path))\n local(cmd)\n\n def activate(self):\n command = \"source {}/bin/activate {}\".format(self.venv_path, self.venv_path)\n return command\n","sub_path":"src/cirrus/plugins/builders/conda_env.py","file_name":"conda_env.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"116560620","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom app.modelo.models import Cliente, Cuenta, Transaccion\nfrom .forms import FormularioTransaccion\n\n@login_required\ndef index(request):\n usuario = request.user\n grupos = [ x.name for x in usuario.groups.all()]\n cliente = None\n cuentas = None\n if usuario.groups.filter(name = 'cajeros').exists():\n queryset = request.GET.get(\"cedula\")\n print(queryset)\n if queryset:\n cliente = Cliente.objects.get(cedula=queryset)\n print(cliente)\n cuentas = Cuenta.objects.filter(cliente = cliente.cliente_id)\n print(cuentas)\n return render (request,'transacciones/principal.html',{'cliente':cliente, 'cuentas': cuentas, 'grupos':grupos})\n else:\n return render (request,'login/acceso_prohibido.html',locals())\n\ndef transaccionDeposito(request, cedula, numero):\n usuario = request.user\n grupos = [ x.name for x in usuario.groups.all()]\n if usuario.groups.filter(name = 'cajeros').exists():\n formulario = FormularioTransaccion(request.POST)\n cliente = Cliente.objects.get(cedula=cedula)\n cuenta = Cuenta.objects.get(numero=numero)\n if request.method == 'POST':\n if formulario.is_valid():\n datos = formulario.cleaned_data\n transaccion = Transaccion()\n transaccion.tipo = 'deposito'\n transaccion.valor = float(datos.get('valor'))\n transaccion.descripcion = datos.get('descripcion')\n transaccion.responsable = usuario.username\n transaccion.cuenta = cuenta\n transaccion.save()\n valorDeposito = float(datos.get('valor')) + float(cuenta.saldo) #cambia el valor de cadenas a double\n cuenta.saldo = valorDeposito\n cuenta.save()\n return HttpResponse('Transacción exitosa') \n else:\n return render(request, 'transacciones/transaccionDeposito.html', locals())\n else:\n return render (request,'login/acceso_prohibido.html',locals())\n\n #return HttpResponse('soy el depósito')\n\n \"\"\"\n fecha = models.DateTimeField(auto_now_add = True, null = False)\n\n\"\"\"","sub_path":"cooperativa/app/transacciones/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"61734105","text":"from src.matrix import Matrix, Vector\n\n\nclass Boilerplate:\n\tdef __init__(self):\n\t\tself.m1 = Matrix([[1, 123124142, 143, 46545],\n\t\t [5, 6, 7, 8],\n\t\t [10, 1211, 12, 1321, 146436]])\n\t\t# mag=6 c=5 r=3\n\t\tself.m2 = Matrix([[80, 8, 8, 8],\n\t\t [8, 8000, 80000, 8, 2],\n\t\t [8, 8, 8, 800000]])\n\t\t# mag=1 c=4 r=3\n\t\tself.m3 = Matrix([[8, 8, 8, 8],\n\t\t [8, 8, 8, 8],\n\t\t [8, 8, 8, 8]])\n\n\t\t# mag=5 c=4 r=3\n\t\tself.m4 = Matrix([[8, 8, 8, 8],\n\t\t [8, 8, 8, 8],\n\t\t [8, 8, 8, 8],\n\t\t [80, 800, 8000, 80000]])\n\n\t\tself.m5 = Matrix([[1, 123124142, 143, 46545, 5, 1, 2],\n\t\t [5, 6, 7, 8],\n\t\t [10, 1211, 12, 1321, 146436]])\n\n\t\tself.m = Matrix([['A', 'B', 'C', 'D', None],\n\t\t ['E', 'F', 'G', 'H', 'I'],\n\t\t ['J', 'K', 'L', 'M', None]])\n\n\tdef basic_test(self, res, expected, res_type, index_type_pairs: dict = None):\n\t\tself.is_equal(res, expected)\n\n\t\tself.is_instance(res, res_type)\n\n\t\tif res_type is list:\n\t\t\tfor v in res:\n\t\t\t\tself.is_instance(v, Vector)\n\n\t\tif index_type_pairs is not None:\n\t\t\tfor k, v in index_type_pairs.items():\n\t\t\t\tself.is_instance(res[k], v)\n\n\t@staticmethod\n\tdef is_equal(x, y):\n\t\tassert x == y\n\n\t@staticmethod\n\tdef not_is_equal(x, y):\n\t\tassert x != y\n\n\t@staticmethod\n\tdef is_instance(x, t):\n\t\tassert x is None if t is None else isinstance(x, t)\n\n\t@staticmethod\n\tdef not_is_instance(x, t):\n\t\tassert x is not None if t is None else not isinstance(x, t)\n\n\t@staticmethod\n\tdef is_a(x, y):\n\t\tassert x is y\n\n\t@staticmethod\n\tdef not_is_a(x, y):\n\t\tassert x is not y\n","sub_path":"tests/boilerplate.py","file_name":"boilerplate.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"565559034","text":"import time\n\nimport numpy as np\nimport py4j\n\n\nclass MicroscopeOperations:\n def __init__(self, event_logger):\n self.event_logger = event_logger\n\n def __getattr__(self, name):\n operation = globals()[name]\n\n def wrapper(*args, **kwargs):\n self.event_logger('OPERATION INFO: Calling %s' % operation.__name__)\n result = operation(*args, **kwargs)\n self.event_logger('OPERATION INFO: Exiting %s' % operation.__name__)\n return result\n\n return wrapper\n\n\ndef go_to_position(micromanager_interface, position_ind):\n\n mm_position_list = micromanager_interface.mm_studio.getPositionList()\n mm_position = mm_position_list.getPosition(position_ind)\n\n # move the stage to the new position\n # note that `goToPosition` moves the stages specified in the position list\n # we try twice because, for large stage movements, MicroManager will throw a timeout error\n try:\n mm_position.goToPosition(mm_position, micromanager_interface.mm_core)\n except py4j.protocol.Py4JJavaError:\n mm_position.goToPosition(mm_position, micromanager_interface.mm_core)\n\n\ndef call_afc(micromanager_interface, event_logger, afc_logger=None, position_ind=None):\n '''\n Attempt to call AFC and recover from timeout errors\n by incrementally adjusting the FocusDrive stage up and down\n\n TODO: consider switching to mm_core API,\n which has its own fullFocus method - might be faster\n '''\n # get the active AutofocusPlugin (assumed to be AFC)\n af_manager = micromanager_interface.mm_studio.getAutofocusManager()\n af_plugin = af_manager.getAutofocusMethod()\n\n # the initial AFC score and FocusDrive position\n initial_afc_score = af_plugin.getCurrentFocusScore()\n initial_focusdrive_position = micromanager_interface.mm_core.getPosition('FocusDrive')\n\n # here we attempt to call AFC at various FocusDrive positions.\n # the logic of this is that, when AFC times out, it is usually because\n # the FocusDrive stage is too low, so here, when it times out,\n # we move the stage up in 10um steps and attempt to call AFC at each step\n successful_offset = None\n afc_error_message = None\n afc_did_succeed = False\n failed_offsets = []\n\n focusdrive_offsets = [0, 10, 20, 40, 60, -20]\n for offset in focusdrive_offsets:\n if afc_did_succeed:\n continue\n\n if offset != 0:\n # if we're here, it means AFC has failed once (at offset = 0),\n # which means we need to reset the FocusDrive to its original position\n # and then move it up by the (now nonzero) offset\n # (note that when AFC times out, it lowers the FocusDrive by around 500um)\n focusdrive_position = initial_focusdrive_position + offset\n move_z_stage(\n micromanager_interface,\n stage_label='FocusDrive',\n position=focusdrive_position,\n kind='absolute',\n )\n # delay to help AFC 'adjust' to the new position (see comments below)\n time.sleep(0.5)\n\n try:\n af_plugin.fullFocus()\n afc_did_succeed = True\n successful_offset = offset\n except py4j.protocol.Py4JJavaError as error:\n event_logger(\"AUTOFOCUS INFO: AFC timed out at an offset of %sum\" % offset)\n afc_error_message = str(error)\n failed_offsets.append(offset)\n\n # add an artificial delay before retrieving the AFC score\n # because, anecdotally, the score requires some time to update\n # after the FocusDrive is moved\n # time.sleep(0.5)\n final_afc_score = af_plugin.getCurrentFocusScore()\n final_focusdrive_position = micromanager_interface.mm_core.getPosition('FocusDrive')\n\n # if AFC failed, move the FocusDrive back to where it was,\n # which is, at this point, the best we can do\n if afc_did_succeed:\n event_logger(\n 'AUTOFOCUS INFO: AFC was called successfully at an offset of %sum '\n 'and the FocusDrive position was updated from %s to %s'\n % (successful_offset, initial_focusdrive_position, final_focusdrive_position)\n )\n else:\n event_logger(\n 'AUTOFOCUS ERROR: AFC timed out at all offsets and the FocusDrive will be reset to %s'\n % initial_focusdrive_position\n )\n\n move_z_stage(\n micromanager_interface,\n stage_label='FocusDrive',\n position=initial_focusdrive_position,\n kind='absolute',\n )\n\n if afc_logger is not None:\n afc_logger(\n initial_afc_score=initial_afc_score,\n final_afc_score=final_afc_score,\n final_focusdrive_position=final_focusdrive_position,\n initial_focusdrive_position=initial_focusdrive_position,\n last_afc_error_message=afc_error_message,\n failed_offsets=failed_offsets,\n afc_did_succeed=afc_did_succeed,\n position_ind=position_ind,\n )\n\n return afc_did_succeed\n\n\ndef acquire_image(micromanager_interface, event_logger):\n '''\n This method just wraps _acquire_image and attempts to call it multiple times\n\n The motivation for this is that, on 2020-01-31, a MicroManager timeout error occurred\n during the call to mm_studio.live().snap that, while it allowed the call to return as usual,\n did not result in an image appearing in the queue, so that getLastMeta always returned None,\n and the _acquire_image method raised an uncaught TypeError that crashed the acquisition script\n '''\n\n data = None\n num_tries = 10\n wait_time = 10\n for _ in range(num_tries):\n try:\n data = _acquire_image(micromanager_interface)\n break\n except Exception as error:\n event_logger('ACQUIRE_IMAGE ERROR: %s' % str(error))\n time.sleep(wait_time)\n\n if data is None:\n event_logger('FATAL ERROR: All attempts to call _acquire_image failed')\n raise Exception('All attempts to call _acquire_image failed')\n return data\n\n\ndef _acquire_image(micromanager_interface):\n '''\n 'snap' an image using the current laser/camera/exposure settings\n and return the image data as a numpy memmap\n '''\n\n # KC: not sure if this is necessary but it seems wise\n micromanager_interface.mm_core.waitForSystem()\n\n # number of times to try calling gate.getLastMeta()\n num_tries = 10\n\n # time in seconds to wait between calls to gate.getLastMeta()\n wait_time = 0.10\n\n # clear the mm2python queue\n # this ensure that gate.getLastMeta returns either None\n # or the image generated by the call to mm_studio.live().snap() below\n micromanager_interface.gate.clearQueue()\n\n # acquire an image using the current exposure settings\n # note that this method does not exit until the exposure is complete\n micromanager_interface.mm_studio.live().snap(True)\n\n # retrieve the mm2python metadata corresponding to the image acquired above\n # (this seems to require waiting for some amount of time between 30 and 100ms)\n for _ in range(num_tries):\n time.sleep(wait_time)\n meta = micromanager_interface.gate.getLastMeta()\n if meta is not None:\n break\n\n # if meta is still None, try again with a longer wait time\n # (KC: I have no reason to believe this would ever be necessary;\n # I've included it only out of an abundance of caution)\n if meta is None:\n wait_time *= 10\n for _ in range(num_tries):\n time.sleep(wait_time)\n meta = micromanager_interface.gate.getLastMeta()\n if meta is not None:\n break\n\n # if meta is still None, we're in big trouble\n if meta is None:\n raise TypeError('The meta object returned by gate.getLastMeta() is None')\n\n data = np.memmap(\n meta.getFilepath(),\n dtype='uint16',\n mode='r+',\n offset=0,\n shape=(meta.getxRange(), meta.getyRange()),\n )\n return data\n\n\ndef acquire_stack(\n micromanager_interface, stack_settings, channel_ind, position_ind, position_name, event_logger\n):\n '''\n Acquire a z-stack using the given settings and 'put' it in the datastore object\n\n This method results in the creation (via datastore.putImage)\n of a single TIFF stack with a filename of the form\n 'MMSTack_{position_name}.ome.tif'\n\n Parameters\n ----------\n stack_settings :\n channel_ind : int\n a position-unique channel index (usually 0 for hoechst an 1 for GFP)\n position_ind : int\n the experiment-unique position index\n position_name : str\n an arbitrary but experiment-unique name for the current position,\n used to determine the filename of the TIFF stack\n\n Context\n -------\n The MicroManager API calls that acquire and 'save' an image at each z-slice\n are based on those that appear in the MicroManager v2 beanshell scripts.\n The relevant block from these scripts is copied verbatim below for reference.\n ```\n mmc.snapImage();\n tmp1 = mmc.getTaggedImage();\n Image channel0 = mm.data().convertTaggedImage(tmp1);\n channel0 = channel0.copyWith(\n channel0.getCoords().copy().channel(c).z(z).stagePosition(p).build(),\n channel0.getMetadata().copy().positionName(\"\"+p).build());\n autosavestore.putImage(channel0);\n ```\n '''\n\n def snap_and_get_image(delay=0):\n '''\n wrapper to try this block multiple times, in an attempt to catch a camera hardware error\n '''\n # acquire an image\n micromanager_interface.mm_core.waitForImageSynchro()\n micromanager_interface.mm_core.snapImage()\n\n # optional wait time between snapImage and getTaggedImage calls\n if delay > 0:\n time.sleep(delay)\n\n # convert the image\n # TODO: understand what's happening here\n tagged_image = micromanager_interface.mm_core.getTaggedImage()\n image = micromanager_interface.mm_studio.data().convertTaggedImage(tagged_image)\n return image\n\n # generate a list of the z positions to visit\n z_positions = np.arange(\n stack_settings.relative_bottom,\n stack_settings.relative_top + stack_settings.step_size,\n stack_settings.step_size,\n )\n\n for z_ind, z_position in enumerate(z_positions):\n\n # move to the new z-position\n move_z_stage(\n micromanager_interface, stack_settings.stage_label, position=z_position, kind='absolute'\n )\n\n # this is an attempt to recover from the 'camera image buffer read failed' error\n # that is randomly and rarely thrown by the `getTaggedImage` call\n image = None\n num_tries = 10\n intertry_wait_time = 3\n intratry_wait_time = 0\n for _ in range(num_tries):\n try:\n image = snap_and_get_image(intratry_wait_time)\n break\n except Exception as error:\n event_logger(\n 'ERROR: An error occurred in snap_and_get_image with a delay of %ss: %s'\n % (intratry_wait_time, str(error))\n )\n time.sleep(intertry_wait_time)\n intratry_wait_time += 1\n\n if image is None:\n message = 'All tries to call snap_and_get_image failed'\n event_logger('FATAL ERROR: %s' % message)\n raise Exception(message)\n\n # manually construct image coordinates (position, channel, z)\n # NOTE: a new TIFF stack will be created whenever a new and datastore-unique value\n # is passed to coords.stagePosition (this value must, however, be an int)\n coords = image.getCoords().copy()\n coords = coords.channel(channel_ind)\n coords = coords.z(z_ind)\n coords = coords.stagePosition(position_ind)\n coords = coords.build()\n\n # construct image metadata\n # NOTE: the filename of the TIFF stack is determined entirely\n # by the value passed to metadata.positionName (and this value can be any string)\n metadata = image.getMetadata().copy()\n metadata = metadata.positionName(position_name)\n metadata = metadata.build()\n\n image = image.copyWith(coords, metadata)\n if micromanager_interface.has_open_datastore:\n micromanager_interface.datastore.putImage(image)\n\n # cleanup: reset the piezo stage\n # TODO: decide if this is necessary\n move_z_stage(micromanager_interface, stack_settings.stage_label, position=0.0, kind='absolute')\n\n\ndef change_channel(micromanager_interface, channel_settings):\n '''\n Convenience method to set the laser power, exposure time, and camera gain\n\n (KC: pretty sure the order of these operations doesn't matter,\n but to be safe the order here is preserved from Nathan's script)\n '''\n mm_core = micromanager_interface.mm_core\n\n # hardware config (this takes some time)\n mm_core.setConfig(channel_settings.config_group, channel_settings.config_name)\n mm_core.waitForConfig(channel_settings.config_group, channel_settings.config_name)\n\n # laser power\n if channel_settings.laser_line is not None:\n mm_core.setProperty(\n channel_settings.laser_line, channel_settings.laser_name, channel_settings.laser_power\n )\n\n # exposure time\n mm_core.setExposure(float(channel_settings.exposure_time))\n\n # camera gain\n property_name = 'Gain'\n mm_core.setProperty(channel_settings.camera_name, property_name, channel_settings.camera_gain)\n\n\ndef move_z_stage(micromanager_interface, stage_label, position=None, kind=None):\n '''\n Convenience method to move a z-stage\n TODO: basic sanity checks on the value of `position`\n (e.g., if kind=='relative', `position` shouldn't be a 'big' number)\n '''\n\n if kind not in ['relative', 'absolute']:\n raise ValueError(\"`kind` must be either 'relative' or 'absolute'\")\n\n try:\n position = float(position)\n except ValueError:\n raise TypeError('`position` cannot be coerced to float')\n\n if not np.isfinite(position):\n raise TypeError('`position` cannot be nan')\n\n # move the stage\n if kind == 'absolute':\n micromanager_interface.mm_core.setPosition(stage_label, position)\n elif kind == 'relative':\n micromanager_interface.mm_core.setRelativePosition(stage_label, position)\n\n micromanager_interface.mm_core.waitForDevice(stage_label)\n\n\ndef autoexposure(\n micromanager_interface, stack_settings, autoexposure_settings, channel_settings, event_logger\n):\n '''\n\n Parameters\n ----------\n stack_settings : an instance of StackSettings\n autoexposure_settings : an instance of AutoexposureSettings\n channel_settings : the ChannelSettings instance\n corresponding to the channel on which to run the autoexposure algorithm\n NOTE: this method modifies the `laser_power` and `exposure_time` attributes\n\n Returns\n -------\n autoexposure_did_succeed : bool\n Whether the autoexposure algorithm was successful\n\n Algorithm description\n ---------------------\n slice check:\n while an over-exposed slice exists:\n step through the z-stack until an over-exposed slice is encountered,\n then lower the exposure time and/or laser power\n\n stack check:\n if no slices were over-exposed, check for under-exposure\n using the overall max intensity and lower the exposure time if necessary\n\n '''\n\n autoexposure_did_succeed = True\n\n # keep track of the maximum intensity\n stack_max_intensity = 0\n\n # keep track of whether any slices were ever over-exposed\n overexposure_did_occur = False\n\n # start at the bottom of the stack\n z_position = stack_settings.relative_bottom\n\n # step through the z-stack and check each slice for over-exposure\n while z_position <= stack_settings.relative_top:\n\n # move to the next z-position\n # (either the next slice or the bottom of the stack)\n move_z_stage(\n micromanager_interface, stack_settings.stage_label, position=z_position, kind='absolute'\n )\n\n # acquire an image and check the exposure\n image = acquire_image(micromanager_interface, event_logger)\n\n # use a percentile to calculate the 'max' intensity\n # as a defense against hot pixels, anomalous bright spots/dust, etc\n # (the 99.99th percentile corresponds to ~100 pixels in a 1024x1024 image)\n slice_max_intensity = np.percentile(image, 99.99)\n event_logger(\n 'AUTOEXPOSURE INFO: max_intensity = %d at z = %0.1f' % (slice_max_intensity, z_position)\n )\n\n # if the slice was over-exposed, lower the exposure time or the laser power,\n # reset stack_max_intensity, and go back to the bottom of the z-stack\n slice_was_overexposed = slice_max_intensity > autoexposure_settings.max_intensity\n if slice_was_overexposed:\n overexposure_did_occur = True\n\n # lower the exposure time\n channel_settings.exposure_time *= autoexposure_settings.relative_exposure_step\n event_logger(\n 'AUTOEXPOSURE INFO: The slice at z = %0.1f was overexposed (max = %d) '\n 'so the exposure time was reduced to %dms'\n % (z_position, slice_max_intensity, channel_settings.exposure_time)\n )\n\n # if the exposure time is now too low, turn down the laser instead\n if channel_settings.exposure_time < autoexposure_settings.min_exposure_time:\n channel_settings.exposure_time = autoexposure_settings.default_exposure_time\n channel_settings.laser_power *= autoexposure_settings.relative_exposure_step\n event_logger(\n 'AUTOEXPOSURE INFO: The minimum exposure time was exceeded '\n 'so the laser power was reduced to %0.1f%%' % (channel_settings.laser_power)\n )\n\n # update the laser power\n micromanager_interface.mm_core.setProperty(\n channel_settings.laser_line,\n channel_settings.laser_name,\n channel_settings.laser_power,\n )\n\n # update the exposure time\n micromanager_interface.mm_core.setExposure(float(channel_settings.exposure_time))\n\n # prepare to return to the bottom of the stack\n z_position = stack_settings.relative_bottom\n\n # reset the max intensity\n stack_max_intensity = 0\n\n # break out of the while loop if the exposure has been lowered\n # as far as it can be and the slice is still over-exposed\n # KC: in practice, I believe this should rarely/never happen\n if channel_settings.laser_power < autoexposure_settings.min_laser_power:\n autoexposure_did_succeed = False\n event_logger(\n 'AUTOEXPOSURE ERROR: The laser power was lowered to its minimum '\n 'but the stack was still over-exposed'\n )\n break\n\n # if the slice was not over-exposed,\n # update stack_max and move to the next z-slice\n else:\n stack_max_intensity = max(stack_max_intensity, slice_max_intensity)\n z_position += autoexposure_settings.z_step_size\n\n # after exiting the while-loop, either\n # 1) some slices were over-exposed and the exposure is now adjusted, or\n # 2) no slices were over-exposed and we need to check for under-exposure\n # here, we check for scenario (2) and use stack_max_intensity to increase\n # the exposure time if it is too low\n if not overexposure_did_occur:\n intensity_ratio = autoexposure_settings.min_intensity / stack_max_intensity\n if intensity_ratio > 1:\n channel_settings.exposure_time *= intensity_ratio\n event_logger(\n 'AUTOEXPOSURE INFO: The stack was under-exposed (max = %d) '\n 'so the exposure time was increased by %0.1fx to %dms'\n % (stack_max_intensity, intensity_ratio, channel_settings.exposure_time)\n )\n\n if channel_settings.exposure_time > autoexposure_settings.max_exposure_time:\n channel_settings.exposure_time = autoexposure_settings.max_exposure_time\n event_logger(\n 'AUTOEXPOSURE INFO: The stack was under-exposed '\n 'and the maximum exposure time was exceeded'\n )\n\n # reset the piezo stage\n move_z_stage(micromanager_interface, stack_settings.stage_label, position=0.0, kind='absolute')\n\n # log the final results\n event_logger(\n 'AUTOEXPOSURE INFO: The final stack max is %d, the laser power is %0.1f%%, '\n 'and the exposure time is %dms'\n % (stack_max_intensity, channel_settings.laser_power, channel_settings.exposure_time)\n )\n\n return autoexposure_did_succeed\n","sub_path":"dragonfly_automation/microscope_operations.py","file_name":"microscope_operations.py","file_ext":"py","file_size_in_byte":21019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"195600850","text":"from net.common import *\n\n\n\ndef rpn_loss(scores, deltas, inds, pos_inds, rpn_labels, rpn_targets):\n\n def modified_smooth_l1( box_preds, box_targets, sigma=3.0):\n '''\n ResultLoss = outside_weights * SmoothL1(inside_weights * (box_pred - box_targets))\n SmoothL1(x) = 0.5 * (sigma * x)^2, if |x| < 1 / sigma^2\n |x| - 0.5 / sigma^2, otherwise\n '''\n sigma2 = sigma * sigma\n diffs = tf.subtract(box_preds, box_targets)\n smooth_l1_signs = tf.cast(tf.less(tf.abs(diffs), 1.0 / sigma2), tf.float32)\n\n smooth_l1_option1 = tf.multiply(diffs, diffs) * 0.5 * sigma2\n smooth_l1_option2 = tf.abs(diffs) - 0. / sigma2\n smooth_l1_add = tf.multiply(smooth_l1_option1, smooth_l1_signs) + tf.multiply(smooth_l1_option2, 1-smooth_l1_signs)\n smooth_l1 = smooth_l1_add #tf.multiply(box_weights, smooth_l1_add) #\n\n return smooth_l1\n\n\n scores1 = tf.reshape(scores,[-1,2])\n rpn_scores = tf.gather(scores1,inds) # remove ignore label\n rpn_cls_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_scores, labels=rpn_labels))\n\n deltas1 = tf.reshape(deltas,[-1,4])\n rpn_deltas = tf.gather(deltas1, pos_inds) # remove ignore label\n rpn_smooth_l1 = modified_smooth_l1(rpn_deltas, rpn_targets, sigma=3.0)\n rpn_reg_loss = tf.reduce_mean(tf.reduce_sum(rpn_smooth_l1, axis=1))\n\n return rpn_cls_loss, rpn_reg_loss","sub_path":"baseline-00/net/rpn_loss_op.py","file_name":"rpn_loss_op.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"419220781","text":"#! /usr/bin/env python3\n\n# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\nfrom argparse import ArgumentParser\n\nfrom LUPY import argumentsForScripts as argumentsForScriptsModule\n\nfrom PoPs import IDs as PoPsIDsModule\nfrom PoPs import specialNuclearParticleID as specialNuclearParticleIDModule\n\nfrom fudge.reactions import base as reactionBaseModule\nfrom fudge import outputChannel as outputChannelModule\nfrom fudge import product as productModule\nfrom fudge import sums as sumsModule\nfrom fudge.reactionData import crossSection as crossSectionModule\nfrom fudge.productData import multiplicity as multiplicityModule\nfrom fudge.productData.distributions import unspecified as unspecifiedModule\nfrom fudge.productData.distributions import uncorrelated as uncorrelatedModule\nfrom fudge.productData.distributions import energy as energyModule\nfrom fudge.outputChannelData.fissionFragmentData import fissionFragmentData as fissionFragmentDataModule\nfrom fudge.outputChannelData.fissionFragmentData import delayedNeutron as delayedNeutronModule\n\nindentIncrement = ' '\n\nsummaryDocStringFUDGE = '''Prints an outlines of the reactions, and their energy domain and products for a GNDS reactionSuite file.'''\n\ndescription = '''\nPrints each reaction and brief information about each reaction's products. Product information include its id, label, and distribution type and frame.\nIf option '--summaryGammas' is present, only a summary of each photon (gamma) for each output channel is printed.\n'''\n\nparser = ArgumentParser(description=description)\n\nsingleProtareArguments = argumentsForScriptsModule.SingleProtareArguments(parser)\n\nparser.add_argument('--summaryGammas', action='store_true', help='If present, photons (e.g., gammas) for each output channel are summerized.')\nparser.add_argument('--productPath', action='store_true', help='If present, the product path needed by other scripts (e.g., spectrum.py) are included with each product.')\nparser.add_argument('--unspecified', action='store_true', help='If present, only information for products with unspecified multiplicities or distributions are printed.')\nparser.add_argument('--doNotShowProducts', action='store_true', help='If present, no product data are displayed.')\nparser.add_argument('--skipProductions', action='store_true', help='If present, skips the \"productions\" node.')\nparser.add_argument('--skipIncompleteReactions', action='store_true', help='If present, skips the \"incompleteReactions\" node.')\nparser.add_argument('--products', action='append', default=[], help='Only show reactions with these products in their list. If empty, all reactions are shown.')\nparser.add_argument('--MT', action='append', type=int, default=[], help='Only show reactions with these MTs. If empty, all reactions are shown.')\nparser.add_argument('--crossSectionSums', action='store_true', help='If present, also show crossSectionSum information.')\n\nclass PhotonDummy:\n\n pid = PoPsIDsModule.photon\n label = 'many'\n\ndef PID_label(indent, product):\n\n return '%-50s' % ('%sid = %-10s label = %-16s' % (indent, product.pid, product.label))\n\ndef delayedNeutronsPeek(self, indent):\n\n if len(self) > 0:\n print('%s-- delayedNeutrons --' % indent)\n indent2 = indentIncrement + indent\n for delayedNeutron in self:\n delayedNeutron.product.__peek(indent2, [], delayedNeutronRate='label = %-6s rate = %s' % (delayedNeutron.label, delayedNeutron.rate[0]))\n\ndelayedNeutronModule.DelayedNeutrons.__peek = delayedNeutronsPeek\n\ndef fissionFragmentDataPeek(self, indent):\n\n self.delayedNeutrons.__peek(indent)\n\nfissionFragmentDataModule.FissionFragmentData.__peek = fissionFragmentDataPeek\n\ndef productPeek(self, indent, productPath, doPrint=True, delayedNeutronRate=None):\n\n productPath = productPath + [ self.label ]\n\n unspecifiedPresent = False\n unspecifiedMultiplicity = ''\n distributionInfo = 'unknown'\n\n multiplicity = self.multiplicity\n if len(multiplicity) == 0:\n print('%s missing multiplicity data present' % PID_label(indent, self))\n return True, distributionInfo\n multiplicityFrom = multiplicity[0]\n if isinstance(multiplicityFrom, multiplicityModule.Unspecified):\n unspecifiedPresent = True\n unspecifiedMultiplicity = ' (multiplicity-unspecified)'\n\n distribution = self.distribution\n if len(distribution) == 0:\n print('%s missing distribution data present' % PID_label(indent, self))\n return True, distributionInfo\n distributionForm = distribution[0]\n if isinstance(distributionForm, unspecifiedModule.Form):\n unspecifiedPresent = True\n distributionInfo = '%s (%s)' % (distribution[0].moniker, distribution[0].productFrame)\n if isinstance(distributionForm, uncorrelatedModule.Form) and not args.summaryGammas:\n energy = distributionForm.energySubform.data\n if isinstance(energy, energyModule.DiscreteGamma):\n distributionInfo += ': discrete gamma %g %s' % (energy.value, energy.axes[1].unit)\n elif isinstance(energy, energyModule.PrimaryGamma):\n distributionInfo += ': primary gamma %g %s' % (energy.value, energy.axes[1].unit)\n\n if args.unspecified:\n if len(unspecifiedMultiplicity) == 0 and distributionForm.moniker != unspecifiedModule.Form.moniker:\n if self.outputChannel is not None: self.outputChannel.__peek(indent + indentIncrement, productPath)\n return unspecifiedPresent, distributionInfo\n\n productPathStr = ''\n if delayedNeutronRate is None:\n if args.productPath:\n productPathStr = ' (' + ':'.join(productPath) + ')'\n if doPrint:\n print('%s distribution[0] = %s%s%s' % (PID_label(indent, self), distributionInfo, unspecifiedMultiplicity, productPathStr))\n if self.outputChannel is not None:\n self.outputChannel.__peek(indent + indentIncrement, productPath)\n else:\n print('%-50s distribution[0] = %s%s' % (indent + delayedNeutronRate, distributionInfo, unspecifiedMultiplicity))\n\n return unspecifiedPresent, distributionInfo\n\nproductModule.Product.__peek = productPeek\n\ndef outputChannelPeek(self, indent, productPath):\n\n photonsToSummarize = []\n if args.summaryGammas:\n photonsToSummarize = [ product.pid for product in self.products if product.pid == PoPsIDsModule.photon ]\n if len(photonsToSummarize) == 1:\n photonsToSummarize = []\n\n photonsInfo = {}\n photonMonikers = []\n unspecifiedPresent = False\n for product in self.products:\n if product.pid in photonsToSummarize:\n unspecifiedPresent2, distributionInfo = product.__peek(indent, productPath, False)\n unspecifiedPresent = unspecifiedPresent or unspecifiedPresent2\n if distributionInfo not in photonsInfo:\n photonsInfo[distributionInfo] = 0\n photonsInfo[distributionInfo] += 1\n else:\n product.__peek(indent, productPath)\n\n if len(photonsInfo) > 0:\n if not args.unspecified or args.unspecified and unspecifiedPresent:\n photonsInfoStr = ', '.join([ '%d %s' % (photonsInfo[form], form) for form in photonsInfo ])\n print('%s distribution[0] = %s' % (PID_label(indent + indentIncrement, PhotonDummy), photonsInfoStr))\n\n self.fissionFragmentData.__peek(indent)\n\noutputChannelModule.OutputChannel.__peek = outputChannelPeek\n\ndef reactionPeek(self, prefix, index, indent, reactionIndex):\n\n if len(onlyReactionsWithTheseProducts) > 0:\n if len(onlyReactionsWithTheseProducts.intersection(self.listOfProducts())) == 0:\n return\n if len(args.MT) > 0:\n if self.ENDF_MT not in args.MT:\n return\n crossSectionStr = ''\n crossSection = self.crossSection[0]\n if isinstance(crossSection, crossSectionModule.Reference):\n try:\n crossSectionSum = crossSection.link.findClassInAncestry(sumsModule.CrossSectionSum)\n crossSectionStr = ': %s' % crossSectionSum.label\n except:\n pass\n print('%s%-32s (%4d): domainMin = %s, domainMax = %s %s%s' % (indent, prefix % str(self), index, self.domainMin, self.domainMax, self.domainUnit, crossSectionStr))\n productPath = [str(index)]\n if not args.doNotShowProducts:\n self.outputChannel.__peek(indent + indentIncrement, [str(reactionIndex)])\n\nreactionBaseModule.Base_reaction.__peek = reactionPeek\n\ndef crossSectionSum(self, index, indent):\n\n if len(onlyReactionsWithTheseProducts) > 0:\n return\n if len(args.MT) > 0:\n if self.ENDF_MT not in args.MT:\n return\n crossSectionStr = ''\n crossSection = self.crossSection[0]\n if isinstance(crossSection, crossSectionModule.Reference):\n try:\n crossSectionSum = crossSection.link.findClassInAncestry(sumsModule.CrossSectionSum)\n crossSectionStr = ': %s' % crossSectionSum.label\n except:\n pass\n print('%s%-32s (%4d): domainMin = %s, domainMax = %s %s%s' % (indent, str(self), index, crossSection.domainMin, \n crossSection.domainMax, crossSection.domainUnit, crossSectionStr))\n\ndef showChildren(node, skip):\n\n if skip:\n return\n if len(node) > 0:\n print('%s%s:' % (indent, node.moniker))\n for reactionIndex, reaction in enumerate(node):\n reaction.__peek('%s', reactionIndex, 2 * indentIncrement, reactionIndex)\n\nsumsModule.CrossSectionSum.__peek = crossSectionSum\n\nif __name__ == '__main__':\n args = parser.parse_args()\n onlyReactionsWithTheseProducts = set(args.products)\n additionalProducts = []\n for particle in onlyReactionsWithTheseProducts:\n additionalProducts.append(specialNuclearParticleIDModule.familiarID(particle))\n additionalProducts.append(specialNuclearParticleIDModule.nuclideID(particle))\n additionalProducts = set(additionalProducts)\n additionalProducts.discard(None)\n onlyReactionsWithTheseProducts = onlyReactionsWithTheseProducts.union(additionalProducts)\n\n protare = singleProtareArguments.protare(args, verbosity=0, lazyParsing=True)\n\n indent = indentIncrement\n print(protare.sourcePath, '(GNDS-%s)' % protare.format)\n showChildren(protare.reactions, False)\n showChildren(protare.orphanProducts, False)\n showChildren(protare.productions, args.skipProductions)\n showChildren(protare.incompleteReactions, args.skipIncompleteReactions)\n if args.crossSectionSums:\n print('%sCross section sums:' % indent)\n for crossSectionSumIndex, crossSectionSum in enumerate(protare.sums.crossSectionSums):\n crossSectionSum.__peek(crossSectionSumIndex, 2 * indentIncrement)\n","sub_path":"bin/peek.py","file_name":"peek.py","file_ext":"py","file_size_in_byte":10864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"356280464","text":"from django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\n\n\n@csrf_exempt\ndef simulate(request):\n if request.method == 'POST':\n body = json.loads(request.body)\n print(body[\"booleanExpression\"])\n response = algorithm(body[\"booleanExpression\"])\n return JsonResponse(response)\n\n\n@csrf_exempt\ndef simulate2(request):\n if request.method == 'POST':\n body = json.loads(request.body)\n response = algorithm2(body[\"inputSymbols\"], body[\"PMOSMap\"], body[\"NMOSMap\"])\n return JsonResponse(response)\n\n\ndef algorithm(expr_string):\n # print(\n # \"Format of Input Boolean Expression :\\n\"\n # \"~(~(A+B).C+~D)\\n\"\n # \"~(SUM1 + SUM2)\\n\"\n # \"\\nSymbols : ~ (NOT), . (AND), + (OR)\\n\"\n # )\n __SYMBOLS__ = ['(', ')', '+', '.', '~', ' ']\n __OP_PRECEDENCE__ = {'~': 3, '.': 2, '+': 1, '(': 0, ')': 0}\n # Step 1 : #parsing the user input by stripping each element alone in an array token\n tokens = []\n substr_start = 0\n for char_index in range(len(expr_string)):\n if expr_string[char_index] in __SYMBOLS__:\n if substr_start < char_index:\n tokens.append(expr_string[substr_start:char_index]) # appends expression into token\n if expr_string[char_index] != ' ':\n tokens.append(expr_string[char_index])\n substr_start = char_index + 1\n if substr_start < len(expr_string):\n tokens.append(expr_string[\n substr_start:len(expr_string)]) # expression appended until the substring is equal to expression\n # Step 2 : CONVERT TOKENS TO POSTFIX NOTATION\n operator_stack = []\n tokens_postfix = []\n for token_element in tokens:\n # classify as OPERATOR\n # this is the bonus feature for the parenthesis in the circuit\n if token_element in __OP_PRECEDENCE__.keys():\n if token_element == '(':\n operator_stack.append(token_element)\n elif token_element == ')':\n while (operator_stack[-1] != '('):\n tokens_postfix.append(operator_stack.pop())\n # remove top-most '(' symbol\n operator_stack.pop()\n else:\n while (len(operator_stack) != 0) and (operator_stack[-1] != '(') and (\n __OP_PRECEDENCE__[operator_stack[-1]] >= __OP_PRECEDENCE__[token_element]):\n tokens_postfix.append(operator_stack.pop())\n operator_stack.append(token_element)\n # identify as input variables\n else:\n tokens_postfix.append(token_element)\n while len(operator_stack) != 0:\n tokens_postfix.append(operator_stack.pop())\n # Step 3 : NOT (~) ELIMINATION FROM POSTFIX NMOS & PMOS\n tokens_postfix_pmos = tokens_postfix.copy()\n # negate the nmos postfix expression\n tokens_postfix_nmos = tokens_postfix.copy()\n tokens_postfix_nmos.append('~')\n\n # --- start of helper function - INVERT --- #\n # NOTE : tokens_postfix original copy will be changed (Pass by Reference)\n\n def invert(tokens_postfix, invert_index):\n # if token is a symbol\n if tokens_postfix[invert_index] not in __OP_PRECEDENCE__.keys():\n\n if tokens_postfix[invert_index][0] == '-':\n tokens_postfix[invert_index] = tokens_postfix[invert_index][1:]\n\n else:\n tokens_postfix[invert_index] = '-' + tokens_postfix[invert_index]\n\n return invert_index\n\n # if token is an OPERATOR (. or +)\n else:\n\n if tokens_postfix[invert_index] == '.':\n tokens_postfix[invert_index] = '+'\n\n elif tokens_postfix[invert_index] == '+':\n tokens_postfix[invert_index] = '.';\n\n chain_end = invert(tokens_postfix, invert_index - 1)\n return invert(tokens_postfix, chain_end - 1)\n\n # --- end of helper function - INVERT --- #\n # Step 3 (i) : NOT (~) elimination in PMOS postix expr.\n token_index = 0\n while token_index < len(tokens_postfix_pmos):\n if tokens_postfix_pmos[token_index] == '~':\n invert(tokens_postfix_pmos, token_index - 1)\n tokens_postfix_pmos.pop(token_index)\n else:\n token_index += 1\n # Step 3 (ii) : NOT (~) elimination in NMOS postfix expr.\n token_index = 0\n while token_index < len(tokens_postfix_nmos):\n if tokens_postfix_nmos[token_index] == '~':\n invert(tokens_postfix_nmos, token_index - 1)\n tokens_postfix_nmos.pop(token_index)\n else:\n token_index += 1\n # Step 4 : Postfix to TRANSISTOR NETLIST (NMOS PDN & PMOS PUN)\n # generating NMOS (Pull Down) transistor netlist and evaluation of the PDN\n postfix_eval_stack_nmos = []\n nmos_index = 0\n # index of last intersection/junction\n nmos_jn_index = 0\n for token in tokens_postfix_nmos:\n if token not in ['.', '+']:\n postfix_eval_stack_nmos.append(\n [{\"name\": (\"NMOS\" + str(nmos_index)), \"source\": nmos_jn_index, \"drain\": nmos_jn_index + 1,\n \"gate\": token}])\n # print(postfix_eval_stack_nmos)\n nmos_jn_index += 2\n nmos_index += 1\n elif token == '.':\n old_source = postfix_eval_stack_nmos[-1][0][\"source\"]\n new_source = postfix_eval_stack_nmos[-2][-1][\"drain\"]\n for transistor in postfix_eval_stack_nmos[-1]:\n if transistor[\"source\"] == old_source:\n transistor[\"source\"] = new_source\n postfix_eval_stack_nmos[-2].extend(postfix_eval_stack_nmos.pop(-1))\n elif token == '+':\n old_source = postfix_eval_stack_nmos[-1][0][\"source\"]\n new_source = postfix_eval_stack_nmos[-2][0][\"source\"]\n old_drain = postfix_eval_stack_nmos[-1][-1][\"drain\"]\n new_drain = postfix_eval_stack_nmos[-2][-1][\"drain\"]\n for transistor in postfix_eval_stack_nmos[-1]:\n if transistor[\"source\"] == old_source:\n transistor[\"source\"] = new_source\n if transistor[\"drain\"] == old_drain:\n transistor[\"drain\"] = new_drain\n postfix_eval_stack_nmos[-2].extend(postfix_eval_stack_nmos.pop(-1))\n # Identify OUTPUT and GND pins\n ground = postfix_eval_stack_nmos[0][0][\"source\"]\n out_nmos = postfix_eval_stack_nmos[0][-1][\"drain\"]\n for transistor in postfix_eval_stack_nmos[0]:\n if transistor[\"source\"] == ground:\n transistor[\"source\"] = \"GND\"\n if transistor[\"drain\"] == out_nmos:\n transistor[\"drain\"] = \"OUTPUT\"\n\n # print(postfix_eval_stack_nmos)\n # generating PMOS (Pull Up) transistor netlist and evaluation of the PUN\n # This function inverts input symbols\n\n def invert_input(token):\n if token[0] == '-':\n return token[1:]\n\n else:\n return \"-\" + token\n\n # --- end of function\n postfix_eval_stack_pmos = []\n pmos_index = 0\n # index of last intersection/junction\n pmos_jn_index = nmos_jn_index\n for token in tokens_postfix_pmos:\n if token not in ['.', '+']:\n postfix_eval_stack_pmos.append([{\"name\": (\"PMOS\" + str(pmos_index)), \"source\": pmos_jn_index,\n \"drain\": pmos_jn_index + 1, \"gate\": invert_input(token)}])\n # print(postfix_eval_stack_pmos)\n pmos_jn_index += 2\n pmos_index += 1\n elif token == '.':\n old_source = postfix_eval_stack_pmos[-1][0][\"source\"]\n new_source = postfix_eval_stack_pmos[-2][-1][\"drain\"]\n for transistor in postfix_eval_stack_pmos[-1]:\n if transistor[\"source\"] == old_source:\n transistor[\"source\"] = new_source\n postfix_eval_stack_pmos[-2].extend(postfix_eval_stack_pmos.pop(-1))\n elif token == '+':\n old_source = postfix_eval_stack_pmos[-1][0][\"source\"]\n new_source = postfix_eval_stack_pmos[-2][0][\"source\"]\n old_drain = postfix_eval_stack_pmos[-1][-1][\"drain\"]\n new_drain = postfix_eval_stack_pmos[-2][-1][\"drain\"]\n for transistor in postfix_eval_stack_pmos[-1]:\n if transistor[\"source\"] == old_source:\n transistor[\"source\"] = new_source\n if transistor[\"drain\"] == old_drain:\n transistor[\"drain\"] = new_drain\n postfix_eval_stack_pmos[-2].extend(postfix_eval_stack_pmos.pop(-1))\n # identify VDD and OUTPUT pins\n vdd = postfix_eval_stack_pmos[0][0][\"source\"]\n out_pmos = postfix_eval_stack_pmos[0][-1][\"drain\"]\n for transistor in postfix_eval_stack_pmos[0]:\n if transistor[\"source\"] == vdd:\n transistor[\"source\"] = \"VDD\"\n if transistor[\"drain\"] == out_pmos:\n transistor[\"drain\"] = \"OUTPUT\"\n\n # print(postfix_eval_stack_pmos)\n response = {\n 'inputSymbols': list(set(tokens_postfix) - {'~', '.', '+'}),\n 'PMOSCount': pmos_index,\n 'NMOSCount': nmos_index,\n 'PMOSMap': postfix_eval_stack_pmos[0],\n 'NMOSMap': postfix_eval_stack_nmos[0]\n }\n\n return response\n\n\ndef algorithm2(input_signals_values, postfix_eval_stack_pmos, postfix_eval_stack_nmos):\n \"\"\"\n Steps :\n 1. Read input signal values\n \t|--- Generate complement of input signal values\n 2. Find all junctions/terminals short by simulating transistor behaviour\n \t]--- PMOS Transistor Netlist\n \t|--- NMOS Transistor Netlist\n 3. Find presence of path from Power/Ground to OUTPUT\n \t]--- PMOS Transistor Netlist\n \t|--- NMOS Transistor Netlist\n \"\"\"\n # Step 2: Find all junctions/terminals short by simulating transistor behaviour\n pmos_netlist_short_jns = []\n nmos_netlist_short_jns = []\n # generating terminals/junctions short in PMOS netlist\n for transistor in postfix_eval_stack_pmos:\n if input_signals_values[transistor[\"gate\"]] == False:\n pmos_netlist_short_jns.append((transistor[\"source\"], transistor[\"drain\"]))\n # generating terminals/junctions short in NMOS netlists\n for transistor in postfix_eval_stack_nmos:\n if input_signals_values[transistor[\"gate\"]] == True:\n nmos_netlist_short_jns.append((transistor[\"source\"], transistor[\"drain\"]))\n\n # Step 3: Find presence of path from Power/Ground to OUTPUT\n\n # Find path from GND to OUTPUT in NMOS netlist\n initial_gnd_short = [\"GND\"]\n transistive_gnd_short = []\n\n nmos_output = \"Z\"\n\n # loop to check for no further elements are still available which means that there is ground and circuit ends\n while len(initial_gnd_short) != 0:\n transistive_gnd_short = []\n # generating the next hops using present hops\n for source_terminal, drain_terminal in nmos_netlist_short_jns:\n if source_terminal in initial_gnd_short:\n transistive_gnd_short.append(drain_terminal)\n # path found from GROUND to OUTPUT\n if \"OUTPUT\" in transistive_gnd_short:\n nmos_output = 0\n break\n initial_gnd_short = transistive_gnd_short\n # Find path from POWER(vdd) to OUTPUT in PMOS netlist\n initial_vdd_short = [\"VDD\"]\n transistive_vdd_short = []\n pmos_output = \"Z\"\n\n # until 'initial vdd_short' is not empty, i.e., NO MORE PATH FORWARD\n while len(initial_vdd_short) != 0:\n transistive_vdd_short = []\n # generating the next hops using present hops\n for source_terminal, drain_terminal in pmos_netlist_short_jns:\n if source_terminal in initial_vdd_short:\n transistive_vdd_short.append(drain_terminal)\n # path found from VDD to OUTPUT\n if \"OUTPUT\" in transistive_vdd_short:\n pmos_output = 1\n break\n initial_vdd_short = transistive_vdd_short\n\n response = {\n 'PMOSOutput': pmos_output,\n 'NMOSOutput': nmos_output\n }\n\n return response\n","sub_path":"MicroProject/simulator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"276845108","text":"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for intermodule.py.\"\"\"\n\nimport httplib\n\nimport mock\nimport webapp2\n\nfrom google.appengine.api import modules\nfrom google.appengine.api import urlfetch\n\nfrom upvote.gae.shared.common import basetest\nfrom upvote.gae.shared.common import intermodule\nfrom upvote.gae.shared.common import settings\nfrom upvote.gae.shared.common import settings_utils\n\n_TEST_DOMAIN = 'somemodule.appspot.com'\n\n\n@mock.patch.object(\n modules, 'get_hostname', return_value=_TEST_DOMAIN)\nclass SubmitIntermoduleRequestTest(basetest.UpvoteTestCase):\n\n @mock.patch.object(\n urlfetch, 'fetch',\n return_value=mock.Mock(status_code=httplib.OK, content='{}'))\n def testSuccess_WithoutData(self, mock_fetch, _):\n response = intermodule.SubmitIntermoduleRequest(\n 'some-module', '/api/path/whatever')\n\n self.assertEqual(httplib.OK, response.status_code)\n self.assertEqual('{}', response.content)\n\n mock_fetch.assert_called_once_with(\n 'https://%s/api/path/whatever' % _TEST_DOMAIN,\n method=urlfetch.GET,\n payload=None,\n headers=mock.ANY,\n deadline=mock.ANY,\n follow_redirects=False)\n\n @mock.patch.object(\n urlfetch, 'fetch',\n return_value=mock.Mock(status_code=httplib.OK, content='{}'))\n def testSuccess_WithData(self, mock_fetch, _):\n response = intermodule.SubmitIntermoduleRequest(\n 'some-module', '/api/path/whatever', data={'foo': 'bar'})\n\n self.assertEqual(httplib.OK, response.status_code)\n self.assertEqual('{}', response.content)\n\n mock_fetch.assert_called_once_with(\n 'https://%s/api/path/whatever' % _TEST_DOMAIN,\n method=urlfetch.POST,\n payload='foo=bar',\n headers=mock.ANY,\n deadline=mock.ANY,\n follow_redirects=False)\n\n @mock.patch.object(\n urlfetch, 'fetch',\n return_value=mock.Mock(status_code=httplib.OK, content=''))\n def testSuccess_NoReturnedData(self, mock_fetch, _):\n response = intermodule.SubmitIntermoduleRequest(\n 'some-module', '/api/path/whatever', data={'foo': 'bar'})\n\n self.assertEqual(httplib.OK, response.status_code)\n self.assertEqual('', response.content)\n\n @mock.patch.object(urlfetch, 'fetch')\n def testSuccess_Redirects(self, mock_fetch, _):\n def GenRedirect(url):\n return mock.Mock(status_code=httplib.FOUND, headers={'Location': url})\n\n # Rediret 4 times and succeed on the fifth.\n success = mock.Mock(status_code=httplib.OK, content='{}')\n redirect_urls = ['https://foo%d.com' % i for i in xrange(4)]\n mock_fetch.side_effect = [\n GenRedirect(url) for url in redirect_urls] + [success]\n\n response = intermodule.SubmitIntermoduleRequest(\n 'some-module', '/api/path/whatever')\n\n self.assertEqual(httplib.OK, response.status_code)\n self.assertEqual('{}', response.content)\n\n self.assertEqual(5, mock_fetch.call_count)\n\n for expected, call in zip(redirect_urls, mock_fetch.call_args_list[1:]):\n observed = call[0][0]\n self.assertEqual(expected, observed)\n\n @mock.patch.object(urlfetch, 'fetch')\n def testError_Redirects(self, mock_fetch, _):\n redirect = mock.Mock(\n status_code=httplib.FOUND, headers={'Location': 'https://foo.com'})\n mock_fetch.side_effect = (redirect,) * 5\n with self.assertRaises(urlfetch.Error):\n intermodule.SubmitIntermoduleRequest(\n 'some-module', '/api/path/whatever')\n\n self.assertEqual(5, mock_fetch.call_count)\n\n @mock.patch.object(urlfetch, 'fetch', side_effect=urlfetch.Error)\n def testError(self, *_):\n with self.assertRaises(urlfetch.Error):\n intermodule.SubmitIntermoduleRequest('some-module', '/api/path/whatever')\n\n\nif __name__ == '__main__':\n basetest.main()\n","sub_path":"upvote/gae/shared/common/intermodule_test.py","file_name":"intermodule_test.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"524866076","text":"from django.contrib import admin\n\nfrom .models import BanquetteAttendant, BanquetTable, BanquetTicket\nfrom django.contrib.auth.models import User\nfrom exhibitors.models import Exhibitor\nfrom fair.models import Fair\nfrom lib.util import image_preview\n\nimport csv\nfrom django.http import HttpResponse\n\ndef export_banquet_attendants_as_csv(modeladmin, request, queryset):\n response = HttpResponse(content_type=\"text/csv\")\n response['Content-Disposition'] = 'attachment; filename=banquet_attendants.csv'\n\n csv_headers = [\n 'Fair ID',\n 'First name',\n 'Last name',\n 'Email',\n 'Ticket Type',\n 'Table',\n 'Gender',\n 'Phone number',\n 'Allergies',\n 'Alcohol',\n 'Lactose free',\n 'Gluten free',\n 'Vegan',\n 'Job Title',\n 'Linkedin URL',\n 'Confirmed',\n 'Table ID',\n 'Ignored from placement',\n 'Seat Number'\n\n ]\n\n writer = csv.writer(response)\n writer.writerow(csv_headers)\n print(\"QUERY SET\", queryset, modeladmin, request)\n for attendant in queryset:\n writer.writerow([\n attendant.fair_id,\n attendant.first_name,\n attendant.last_name,\n attendant.email,\n attendant.ticket,\n attendant.table,\n attendant.gender,\n attendant.phone_number,\n attendant.allergies,\n attendant.wants_alcohol,\n attendant.wants_lactose_free_food,\n attendant.wants_gluten_free_food,\n attendant.wants_vegan_food,\n attendant.job_title,\n attendant.linkedin_url,\n attendant.confirmed,\n attendant.table_id,\n attendant.ignore_from_placement,\n attendant.seat_number\n ])\n return response\n\n@admin.register(BanquetteAttendant)\nclass BanquetAdmin(admin.ModelAdmin):\n actions = [export_banquet_attendants_as_csv]\n search_fields = ('first_name', 'last_name')\n\n@admin.register(BanquetTable)\nclass BanquetAdmin(admin.ModelAdmin):\n search_fields = ('table_name',)\n\n@admin.register(BanquetTicket)\nclass BanquetAdmin(admin.ModelAdmin):\n search_fields = ('name',)\n","sub_path":"banquet/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"69897891","text":"'''\n A patch-based mechanic is composed of three elements, a feature representation of the element,\n a search algorithm to find the element in an image and a patch updater.\n The interactions with the mechanics is easy, it has three main methods:\n * findTarget(array): returns the upper x,y and the w,h of the bounding box in which the\n element is.\n * getDescriptors(): returns the description array of the last patch found.\n * restartDescriptors(): the patch is reseted to the default version. A default version is\n always madatory.\n'''\nfrom core.features import ColorHistogramExtractor\nfrom core.trackers.kcftracker import KCFTracker\nfrom collections import namedtuple\nimport numpy as np\nimport cv2\n\nPoint = namedtuple('Point', ['x', 'y'])\n\n\ndef checkPatch(func):\n def check(*args):\n if args[0].root_patch is not None:\n return func(*args)\n else:\n return\n return check\n\n\nclass HistogramDetector:\n '''\n This is a static class that does not support instantiation\n '''\n descriptor = ColorHistogramExtractor()\n\n def __init__(self):\n raise NotImplementedError\n\n @staticmethod\n def detectObject(object, source):\n pass\n\n\nclass _PatchBasedMechanics:\n STATE_UNINITIATED = 0\n STATE_INITIATED = 1\n STATE_INTERRUPTED = 2\n STATE_FINISHED = 3\n\n def __init__(self, root_patch=None):\n self.root_patch = root_patch\n self.original_patch = root_patch\n self.descriptor = None\n\n def assignRootPatch(self, patch):\n if patch.descriptions[0][0] == str(self.descriptor):\n if self.original_patch is None:\n self.original_patch = patch\n self.root_patch = patch\n\n def findTarget(self, obj):\n raise NotImplementedError\n\n def updateTarget(self):\n raise NotImplementedError\n\n def getDescriptors(self):\n raise NotImplementedError\n\n def restartTarget(self):\n raise NotImplementedError\n\n\nclass FeatureFusionUpdateMechanic(_PatchBasedMechanics):\n def __init__(self, default_patch=None):\n super(FeatureFusionUpdateMechanic, self).__init__()\n\n\nclass ColorTracker:\n @staticmethod\n def rgb2hsv(frame):\n return cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)\n\n @staticmethod\n def distance(p1, p2):\n if p1 is None or p2 is None:\n return 0\n return np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n\n def __init__(self, configs):\n # Target color is expressed in HSV color space\n self._color = [configs['default_hsi_tcolor_lower'], configs['default_hsi_tcolor_upper']]\n self._color = [np.array(c) for c in self._color]\n self._erosion_kernel = configs['default_erode_kernel_size']\n self._dilate_kernel = configs['default_dilate_kernel_size']\n self._previous_point = None\n\n def findTarget(self, frame):\n # Frame must be RGB\n # First obtain the respective HSV value\n frame_hsv = ColorTracker.rgb2hsv(frame)\n\n # Threshold the HSV image to get only red colors\n mask = cv2.inRange(frame_hsv, self._color[0], self._color[1])\n\n # Bitwise-AND mask and original image\n res = cv2.bitwise_and(frame, frame, mask=mask)\n ret, res = cv2.threshold(res, 20, 255, 0)\n\n # Reduce dimensionality of result\n res = res[:, :, 0]\n\n # If no drone was found, return None\n if len(set(res.flatten())) == 1:\n return None\n\n # Eliminate noise, and increment interesting areas\n kernel = np.ones((self._erosion_kernel, self._erosion_kernel), np.uint8)\n res = cv2.erode(res, kernel, iterations=1)\n kernel = np.ones((self._dilate_kernel, self._dilate_kernel), np.uint8)\n res = cv2.dilate(res, kernel, iterations=2)\n\n # Segmentate drone posible places via find contours. The drone will be chosen to be\n # the largest element of all. A second choice parameter is available, and that is, to\n # considerate the vecinity of the previous location only\n contours = cv2.findContours(res, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]\n if not len(contours):\n return None\n\n mins_x, mins_y = list(), list()\n maxs_x, maxs_y = list(), list()\n for contour in contours:\n cnt = [c[0] for c in contour]\n cnt_x = [c[0] for c in cnt]\n cnt_y = [c[1] for c in cnt]\n mins_x.append(min(cnt_x))\n mins_y.append(min(cnt_y))\n maxs_x.append(max(cnt_x))\n maxs_y.append(max(cnt_y))\n\n dists_x = [ma - mi for ma, mi in zip(maxs_x, mins_x)]\n dists_y = [ma - mi for ma, mi in zip(maxs_y, mins_y)]\n\n areas = [x * y for x, y in zip(dists_x, dists_y)]\n\n index_of_largest = areas.index(max(areas))\n\n top_left_pt = (mins_x[index_of_largest], mins_y[index_of_largest])\n bottom_right_pt = (maxs_x[index_of_largest], maxs_y[index_of_largest])\n\n x, y = top_left_pt[0], top_left_pt[1]\n w, h = bottom_right_pt[0] - top_left_pt[0], bottom_right_pt[1] - top_left_pt[1]\n\n self._previous_point = (x + w // 2, y + h // 2)\n\n return ((x, y), (x + w, y + h))\n\n\nclass MixedTracker(_PatchBasedMechanics):\n '''\n This is a MixedTracker, it implements both TDLTracker and ColorTracker, when initializating\n this tracker you must specify which of the two will be the main tracker and which will be\n the support tracker. The default execution will be that of the main tracker, but, at fixed\n intervals, a second hand opinion is issued to the support tracker, via the method\n (getSupportDecision), and an agreement function is executed to determine the following\n state of the algorithm.\n\n Current limitations:\n The support tracker can not be a memory algorithm, that is, it can not rely on\n previous data as it is only called sporadically. As such, any tracker that subclasses\n _PatchBasedMechanics is not suitable for observing. Well, you can do it, but it is not\n recomended.\n '''\n\n MAXIMUM_AGREE_DISTANCE = 20\n\n @staticmethod\n def distance(p1, p2):\n if p1 is None or p2 is None:\n return 0\n return np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n\n def __init__(self, main=None, support=None):\n self._main = main\n self._support = support\n\n # Generalized call functions that receive the next frame as a parameter\n self._main_find = None\n self._support_find = None\n\n # Interval to check for support decision, counted in frames\n self.interval = 50\n\n def overloadFindFunctions(self, main_overload, support_overload):\n self._main_find = main_overload\n self._support_find = support_overload\n\n def findTarget(self, frame, state):\n response = None\n if self._main_find is not None and self._support_find is not None:\n # See if support help is required\n if self.interval == 50:\n self.interval = 0\n posible_main = self._main_find(frame, state)\n posible_support = self._support_find(frame)\n\n # Reach an agreement between the two conclusions\n response = self.agreement(main=posible_main, support=posible_support)\n\n # Change the main behavior, if possible, as a result of the agreement\n self.changeOnAgreement(response, frame)\n else:\n response = self._main_find(frame, state)\n self.interval += 1\n return response\n\n def getSupportDecision(self, frame):\n self._support_find(frame)\n\n def changeOnAgreement(self, agreement, frame):\n x, y = agreement[0][0], agreement[0][1]\n w, h = abs(x - agreement[1][0]), abs(y - agreement[1][1])\n self._main.resetPatch([x, y, w, h], frame)\n\n def agreement(main=None, support=None):\n if main is None or support is None:\n return main\n else:\n # Calculate the distance between the main and support decisions points\n # Use MixedTracker own distance function for this\n distance = MixedTracker.distance(main, support)\n\n # Tracked points differ too much\n if distance > MixedTracker.MAXIMUM_AGREE_DISTANCE:\n # Support method is trusted for x, y location. Main is trusted for size\n middle = (\n support[0][0] + abs(support[0][0] - support[1][0]) // 2,\n support[0][1] + abs(support[0][1] - support[1][1]) // 2)\n\n agreement = (\n (middle[0] - abs(main[0][0] - main[1][0]) // 2,\n middle[1] - abs(main[0][1] - main[1][1]) // 2),\n (middle[0] + abs(main[0][0] - main[1][0]) // 2,\n middle[1] + abs(main[0][1] - main[1][1]) // 2)\n )\n\n return agreement\n\n else: # Tracked points are similar, trust main, unchanged\n return main\n\n\nclass TDLTracker(_PatchBasedMechanics):\n '''\n This is a Tracking, learning and detection tracker, implemented in OpenCV.\n This is the only instance that requires the OpenCV library, use other trackers if you\n do not wish to depend on it.\n '''\n\n def __init__(self, root_patch=None):\n super(TDLTracker, self).__init__(root_patch)\n self.descriptor = ColorHistogramExtractor()\n self.match_method = cv2.TM_CCOEFF\n self.current_frame = None\n self.patch_h, self.patch_w = 0, 0\n self.tracker = None\n\n self.dispatcher = {\n TDLTracker.STATE_UNINITIATED: self.findTarget,\n TDLTracker.STATE_INITIATED: self.updateTarget,\n TDLTracker.STATE_INTERRUPTED: self.restartTarget,\n TDLTracker.STATE_FINISHED: self.updateTarget,\n }\n\n def assignRootPatch(self, patch):\n super().assignRootPatch(patch)\n self.patch_h, self.patch_w, _ = self.root_patch.patch.shape\n\n @checkPatch\n def feedFrame(self, frame, external_status):\n self.current_frame = frame\n return self.dispatcher[external_status]()\n\n def restartTarget(self):\n self.assignRootPatch(self.original_patch)\n self.findTarget()\n\n def resetPatch(self, min_patch, frame):\n self.tracker.init(min_patch, frame)\n\n def findTarget(self):\n result = cv2.matchTemplate(self.current_frame, self.root_patch.patch, self.match_method)\n _, _, _, max_loc = cv2.minMaxLoc(result)\n\n # Select found target\n target_top_left = max_loc\n target_bottom_right = (\n target_top_left[0] + self.patch_w,\n target_top_left[1] + self.patch_h)\n\n # Update Patch with current info\n patch = self.root_patch.copy()\n patch.patch = self.current_frame[\n target_top_left[1]: target_bottom_right[1] + 1,\n target_top_left[0]: target_bottom_right[0] + 1, :]\n patch.p1 = Point(x=target_top_left, y=target_bottom_right)\n self.assignRootPatch(patch)\n\n self.tracker = KCFTracker(True, True, True)\n self.tracker.init(\n [target_top_left[0], target_top_left[1], self.patch_w, self.patch_h],\n self.current_frame)\n\n return (target_top_left, target_bottom_right)\n\n def updateTarget(self):\n box = self.tracker.update(self.current_frame)\n box = [int(b) for b in box]\n for b in box:\n if b < 0:\n print('ERROR')\n return\n\n # Update Patch with current info\n patch = self.root_patch.copy()\n patch.patch = self.current_frame[\n box[1]: box[1] + box[3] + 1,\n box[0]: box[0] + box[2] + 1, :]\n patch.p1 = Point(x=box[0], y=box[1])\n self.assignRootPatch(patch)\n\n return ((box[0], box[1]), (box[0] + box[2], box[1] + box[3]))\n","sub_path":"fstone/director/core/mechanics.py","file_name":"mechanics.py","file_ext":"py","file_size_in_byte":11992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"323850991","text":"import functools\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n \"\"\"\n Same thought as LC96, we can generate trees recursively.\n If the root of tree is i\n The left subtree has a sequence of [start ... i - 1]\n The right subtree has a sequence of [i + 1 ... end]\n\n Can use cache to improve performance.\n \"\"\"\n\n def generateTrees(self, n: int) -> List[TreeNode]:\n if not n:\n return []\n return self.generate_subtrees(1, n)\n\n @functools.lru_cache(None)\n def generate_subtrees(self, start, end):\n res = []\n if end < start:\n return [None]\n\n for i in range(start, end + 1):\n # More concise than declare left/right list. Have same performance.\n for left in self.generate_subtrees(start, i - 1):\n for right in self.generate_subtrees(i + 1, end):\n node = TreeNode(i)\n node.left = left\n node.right = right\n res.append(node)\n return res\n","sub_path":"0095_Unique_Binary_Search_Trees_II.py","file_name":"0095_Unique_Binary_Search_Trees_II.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"486060633","text":"import os\nimport time\nimport torch\n# import random\nfrom transformers import AdamW, AutoTokenizer, BertForSequenceClassification, \\\n RobertaForSequenceClassification, RobertaTokenizer\nimport numpy as np\nimport pandas as pd\nfrom tqdm.notebook import tqdm\nfrom sklearn.utils.extmath import softmax\nfrom sklearn import model_selection\nfrom sklearn.metrics import classification_report, f1_score\nfrom src.com.util.util_model import EarlyStopping, make_print_to_file, AverageMeter, Dataset, seed_all\n\nProj_dir = os.path.abspath(os.path.join(os.getcwd(), \"../../../\"))\nbest_score = None\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\n\n\nclass config:\n LR_LIST = [1e-5]\n KFOLD = 3\n SAVE_DIR = Proj_dir + '/ensemble_model'\n TRAIN_FILE = '../../../data/train.tsv'\n VAL_FILE = '../../../data/valid.tsv'\n TEST_FILE = '../../../data/test.tsv'\n OOF_FILE = os.path.join(SAVE_DIR, 'output/oof_all.csv')\n MAX_LEN = 96\n MODEL = 'cardiffnlp/twitter-roberta-base'\n # fill_mask = pipeline(\"fill-mask\", model=MODEL, tokenizer=MODEL)\n TOKENIZER = AutoTokenizer.from_pretrained(MODEL)\n EPOCHS_LIST = [10]\n BATCH_SIZE_LIST_T = [32]\n BATCH_SIZE_LIST_V = [32]\n DEVICE = torch.device(\"cuda:0\" if (torch.cuda.is_available()) else \"cpu\")\n\n\ndef train_fn(data_loader, model, optimizer, device):\n model.train()\n losses = AverageMeter()\n tk0 = tqdm(data_loader, total=len(data_loader))\n\n for bi, d in enumerate(tk0):\n ids = d['ids']\n mask = d['mask']\n label = d['label']\n ids = ids.to(device, dtype=torch.long)\n label = label.to(device, dtype=torch.long)\n mask = mask.to(device, dtype=torch.long)\n\n model.zero_grad()\n k = model(input_ids=ids, attention_mask=mask, labels=label)\n loss = k['loss']\n # logits = k['logits']\n\n loss.backward()\n optimizer.step()\n\n losses.update(loss.item(), ids.size(0))\n tk0.set_postfix(loss=losses.avg)\n\n\ndef eval_fn(data_loader, model, device):\n model.eval()\n losses = AverageMeter()\n tk0 = tqdm(data_loader, total=len(data_loader))\n yt, yp = [], []\n\n for bi, d in enumerate(tk0):\n ids = d['ids']\n mask = d['mask']\n label = d['label']\n\n ids = ids.to(device, dtype=torch.long)\n label = label.to(device, dtype=torch.long)\n mask = mask.to(device, dtype=torch.long)\n\n with torch.no_grad():\n k = model(input_ids=ids, attention_mask=mask, labels=label)\n loss = k['loss']\n logits = k['logits']\n # loss, logits = model(input_ids=ids, attention_mask=mask, labels=label)\n\n logits = logits.detach().cpu().numpy()\n\n preds = softmax(logits)\n pred_labels = np.argmax(preds, axis=1).flatten()\n ground_labels = label.to('cpu').numpy()\n # print(\"predict label:\", pred_labels.tolist(), \";actual label:\", ground_labels.tolist())\n yt = yt + ground_labels.tolist()\n yp = yp + pred_labels.tolist()\n\n losses.update(loss.item(), ids.size(0))\n tk0.set_postfix(loss=losses.avg)\n\n return f1_score(yt, yp)\n\n\ndef test_fn(data_loader, model, device):\n model.eval()\n tk0 = tqdm(data_loader, total=len(data_loader))\n test_preds = []\n\n for bi, d in enumerate(tk0):\n ids = d['ids']\n mask = d['mask']\n label = d['label']\n\n ids = ids.to(device, dtype=torch.long)\n mask = mask.to(device, dtype=torch.long)\n label = label.to(device, dtype=torch.long)\n\n with torch.no_grad():\n k = model(input_ids=ids, attention_mask=mask, labels=label)\n # loss = k['loss']\n logits = k['logits']\n\n logits = logits.detach().cpu().numpy()\n preds = softmax(logits)[:, 1]\n test_preds = test_preds + preds.tolist()\n\n return test_preds\n\n\ndef test_prediction(kfold=None):\n df = pd.read_csv(config.VAL_FILE, sep='\\t')\n # df.columns = ['tweet_id', 'user_id', 'tweet']\n df['label'] = 0\n\n\n scores = pd.DataFrame()\n scores['tweet_id'] = df['tweet_id']\n device = torch.device(\"cuda:0\" if (torch.cuda.is_available()) else \"cpu\")\n\n if kfold is not None:\n for i in range(config.KFOLD):\n # run_fold(name='cardiffnlp/twitter-roberta-base', fold_idx=0, seed=7426)\n # run_fold(name='digitalepidemiologylab/covid-twitter-bert', fold_idx=1, seed=76)\n # run_fold(name='roberta-large', fold_idx=2, seed=2078)\n if i == 0:\n model = RobertaForSequenceClassification.from_pretrained(\n 'cardiffnlp/twitter-roberta-base', num_labels=2)\n config.TOKENIZER = AutoTokenizer.from_pretrained('cardiffnlp/twitter-roberta-base')\n\n elif i == 2:\n model = RobertaForSequenceClassification.from_pretrained(\n 'roberta-large', num_labels=2)\n config.TOKENIZER = RobertaTokenizer.from_pretrained('roberta-large')\n else:\n model = BertForSequenceClassification.from_pretrained(\n 'digitalepidemiologylab/covid-twitter-bert', num_labels=2)\n config.TOKENIZER = AutoTokenizer.from_pretrained('digitalepidemiologylab/covid-twitter-bert')\n\n model.to(device)\n test_dataset = Dataset(\n text=df.tweet.values,\n label=df.label.values,\n config=config\n )\n\n test_data_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=config.BATCH_SIZE_LIST_T[0],\n num_workers=4\n )\n model.load_state_dict(\n torch.load(os.path.join(config.SAVE_DIR, f'model/model_ensemble_all_{i}.bin')))\n y_preds = test_fn(test_data_loader, model, device)\n scores[f'prob_{i}'] = y_preds\n scores['avg'] = sum(scores[f'prob_{i}'] for i in range(config.KFOLD)) / config.KFOLD\n # scores[\n # 'prob_4']) / 5\n # scores['vote_1'] = (scores['prob_0'] >= 0.5) * 1 + (scores['prob_1'] >= 0.5) * 1 + (scores['prob_2'] >= 0.5) * 1\n # + (scores['prob_3'] >= 0.5) * 1 + (scores['prob_4'] >= 0.5) * 1\n # scores['vote_0'] = 5 - scores['vote_1']\n # max vote\n # scores['max_vote'] = (scores['vote_1'] >= scores['vote_0']) * 1\n scores['max_prob'] = (np.max(\n [scores['prob_0'], scores['prob_1'], scores['prob_2']],\n axis=0) >= 0.5) * 1\n # average probality\n scores['average_prob'] = (scores['avg'] >= 0.5) * 1\n scores['label'] = scores['average_prob']\n\n # print(i, y_preds)\n # else:\n # model.load_state_dict(torch.load(os.path.join(config.SAVE_DIR, f'model/model_ensemble_all.bin')))\n # y_preds = test_fn(test_data_loader, model, device)\n # scores[f'prob'] = y_preds\n # scores['preds'] = (scores['prob'] >= 0.5) * 1\n # scores['label'] = scores['preds']\n\n # .map({1: 'INFORMATIVE', 0: 'UNINFORMATIVE'})\n scores.to_csv(os.path.join(config.SAVE_DIR, 'output/scores_val_all_3.csv'), index=False)\n # submission = pd.DataFrame(scores, columns=['tweet_id', 'label'])\n # print(len(scores[scores.average_prob == 0]))\n # print(len(scores[scores.max_prob == 0]))\n # print(len(scores[scores.max_vote == 0]))\n # submission.to_csv(os.path.join(config.SAVE_DIR, 'output/submission_com2.tsv'), index=False, sep='\\t')\n\n # with open(os.path.join(config.SAVE_DIR, 'submission.txt'), 'w') as f:\n # for i in scores['labels'].values:\n # f.write(i + '\\n')\n\n\ndef run(model, df_train, df_val, fold=None):\n train_dataset = Dataset(\n text=df_train.tweet.values,\n label=df_train.label.values,\n config=config\n )\n valid_dataset = Dataset(\n text=df_val.tweet.values,\n label=df_val.label.values,\n config=config\n )\n device = torch.device(\"cuda:0\" if (torch.cuda.is_available()) else \"cpu\")\n model.to(device)\n\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'gamma', 'beta']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}\n ]\n print('Starting training....')\n test_predictions_dic = {}\n es = EarlyStopping(patience=10, mode=\"max\")\n for lr in config.LR_LIST:\n for batch_size_t in config.BATCH_SIZE_LIST_T:\n es.early_stop = False\n es.counter = 0\n for epoches in config.EPOCHS_LIST:\n train_data_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=batch_size_t,\n num_workers=4\n )\n valid_data_loader = torch.utils.data.DataLoader(\n valid_dataset,\n batch_size=batch_size_t,\n num_workers=4\n )\n optimizer = AdamW(optimizer_grouped_parameters, lr=lr)\n for epoch in range(epoches):\n # print(f'training epoch size= {epoches} , lr= {lr} ,batch size={batch_size_t} | Epoch :{epoch + 1}')\n train_fn(train_data_loader, model, optimizer, device)\n # print(\n # f'validating epoch size= {epoches} , lr= {lr} ,batch size={batch_size_t} | Epoch :{epoch + 1}')\n valid_loss = eval_fn(valid_data_loader, model, device)\n print(f'epoch size= {epoches} , lr= {lr} ,batch size={batch_size_t} | Epoch :{epoch + 1} '\n f'| Validation Score :{valid_loss}')\n if fold is None:\n es(valid_loss, model,\n model_path=os.path.join(config.SAVE_DIR, f'model/model_ensemble_all.bin'))\n else:\n es(valid_loss, model,\n model_path=os.path.join(config.SAVE_DIR,\n f'model/model_ensemble_all_{fold}.bin'))\n if es.early_stop:\n print('Early stopping')\n break\n\n print('Predicting for OOF')\n if fold is None:\n model.load_state_dict(\n torch.load(os.path.join(config.SAVE_DIR, 'model/model_ensemble_all.bin')))\n else:\n model.load_state_dict(\n torch.load(\n os.path.join(config.SAVE_DIR, f'model/model_ensemble_all_{fold}.bin')))\n model.to(device)\n\n test_predictions = test_fn(valid_data_loader, model, device)\n # if best_flag:\n print('best score prediction acc={} in epoches={} batch_size={} lr={}'.format(\n np.mean(test_predictions), epoches, batch_size_t, lr))\n test_predictions_dic[str(epoches) + '-' + str(batch_size_t) + '-' + str(lr)] = test_predictions\n return test_predictions, test_predictions_dic\n\n\ndef run_fold(name, fold_idx, seed):\n \"\"\"\n Perform k-fold cross-validation\n \"\"\"\n if name.__contains__('roberta-large'):\n # MODEL = 'roberta-large'\n model = RobertaForSequenceClassification.from_pretrained(name, num_labels=2)\n config.TOKENIZER = RobertaTokenizer.from_pretrained(name)\n\n elif name.__contains__('twitter-roberta-base'):\n # MODEL = 'cardiffnlp/twitter-roberta-base'\n model = RobertaForSequenceClassification.from_pretrained(name, num_labels=2)\n config.TOKENIZER = AutoTokenizer.from_pretrained(name)\n else:\n # MODEL = 'digitalepidemiologylab/covid-twitter-bert'\n model = BertForSequenceClassification.from_pretrained(name, num_labels=2)\n config.TOKENIZER = AutoTokenizer.from_pretrained(name)\n seed_all(seed=seed)\n df_train = pd.read_csv(config.TRAIN_FILE, sep='\\t')\n # only when we use original data\n df_train.columns = ['tweet_id', 'user_id', 'tweet', 'label']\n df_train = df_train.sample(frac=1).reset_index(drop=True)\n train = df_train\n\n # dividing folds\n kf = model_selection.StratifiedKFold(n_splits=config.KFOLD, shuffle=True, random_state=seed)\n idx = None\n\n for fold, (train_idx, val_idx) in enumerate(kf.split(X=train, y=train.label.values)):\n train.loc[val_idx, 'kfold'] = int(fold)\n if fold == fold_idx:\n idx = val_idx\n\n if os.path.isfile(config.OOF_FILE):\n scores = pd.read_csv(config.OOF_FILE)\n print('Found oof file')\n else:\n scores = train.copy()\n scores['oof'] = 0\n scores.to_csv(config.OOF_FILE, index=False)\n print('Created oof file')\n df_train = train[train.kfold != fold_idx]\n df_val = train[train.kfold == fold_idx]\n y, y_dict = run(model, df_train, df_val, fold_idx)\n scores.loc[idx, 'oof'] = y\n\n scores.to_csv(config.OOF_FILE, index=False)\n\n\ndef run_result():\n df = pd.read_csv(config.OOF_FILE)\n df['gold'] = df['label']\n # .map({'INFORMATIVE': 1, 'UNINFORMATIVE': 0})\n df.head(3)\n df['pred'] = (df['oof'] >= 0.5) * 1\n print(classification_report(df['gold'].values, df['pred'].values))\n from sklearn.metrics import roc_auc_score\n\n roc_auc_score(df['gold'].values, df['oof'].values)\n thresholds = np.arange(0, 1, 0.001)\n fscores = [f1_score(df['gold'].values, (df['oof'] >= t) * 1) for t in thresholds]\n idx = np.argmax(fscores)\n print(thresholds[idx], fscores[idx])\n\n\nif __name__ == \"__main__\":\n start = time.perf_counter()\n make_print_to_file(path=config.SAVE_DIR)\n # run_fold(name='cardiffnlp/twitter-roberta-base', fold_idx=0, seed=7426)\n # run_fold(name='digitalepidemiologylab/covid-twitter-bert', fold_idx=1, seed=76)\n # run_fold(name='roberta-large', fold_idx=2, seed=2078)\n # # #\n # run_result()\n\n test_prediction(kfold=config.KFOLD)\n end = time.perf_counter()\n time_cost = str((end - start) / 60)\n print(\"time-cost:\", time_cost)\n","sub_path":"run_ensemble_all.py","file_name":"run_ensemble_all.py","file_ext":"py","file_size_in_byte":14128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"66534790","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# NCTR, Nile Center for Technology Research\n# Copyright (C) 2011-2012 NCTR ().\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\n\nclass account_partner_balance(osv.osv_memory):\n \"\"\"\n This wizard will provide the partner balance report by periods, between any two dates.\n \"\"\"\n _inherit = 'account.partner.balance'\n _columns = {\n 'acc_ids': fields.many2many('account.account', 'account_common_partner_balance_account_rel', 'partner_bal_id', 'account_id', 'Accounts', required=True),\n 'partner_ids': fields.many2many('res.partner', 'account_partner_balance_partner_rel', 'partner_bal_id', 'partner_id', 'Partners'),\n }\n \n def _get_partner(self, cr, uid, context=None):\n return self.pool.get('res.partner').search(cr, uid, [], context=context)\n\n\n def onchange_partner_account(self, cr, uid, ids, result_selection='customer', chart_account_id= -1, context=None):\n res = {}\n result = ['receivable']\n account_obj = self.pool.get('account.account')\n if result_selection == 'supplier':\n result = ['payable']\n elif result_selection == 'customer_supplier':\n result = ['receivable', 'payable']\n children = account_obj._get_children_and_consol(cr, uid, chart_account_id)\n res['value'] = {'acc_ids': account_obj.search(cr, uid, [('id', 'in', tuple(children)), ('type', 'in', tuple(result))], context=context)}\n return res\n\n def onchange_chart_id(self, cr, uid, ids, chart_account_id= -1, context=None):\n res = {}\n if chart_account_id:\n account_obj = self.pool.get('account.account')\n children = account_obj._get_children_and_consol(cr, uid, chart_account_id, context=context)\n company_id = self.pool.get('account.account').browse(cr, uid, chart_account_id, context=context).company_id.id\n res['value'] = {'company_id': company_id, 'acc_ids': account_obj.search(cr, uid, [('id', 'in', tuple(children)),('type','not in',('view','consolidation'))], context=context)}\n return res \n\n _defaults = {\n 'display_partner': 'non-zero_balance',\n }\n \n def _print_report(self, cr, uid, ids, data, context=None):\n res = super(account_partner_balance, self)._print_report(cr, uid, ids, data, context=context)\n data = res['datas']\n data['form'].update(self.read(cr, uid, ids, ['acc_ids', 'initial_balance', 'partner_ids'])[0])\n res.update({'datas':data, 'report_name': 'account.partner.balance.arabic'})\n return res\n \naccount_partner_balance()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"v_7/GDS/common_shamil_v3/account_arabic_reports/wizard/account_report_partner_balance.py","file_name":"account_report_partner_balance.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"157712342","text":"#!C:\\Users\\David\\Desktop\\library_test\\project\\Scripts\\python.exe\n# EASY-INSTALL-ENTRY-SCRIPT: 'Flask==1.1.1','console_scripts','flask'\n__requires__ = 'Flask==1.1.1'\nimport sys\nfrom pkg_resources import load_entry_point\n\nif __name__ == '__main__':\n sys.exit(\n load_entry_point('Flask==1.1.1', 'console_scripts', 'flask')()\n )\n","sub_path":"Scripts/flask-script.py","file_name":"flask-script.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"397164320","text":"import imports\n\ndef triplet_sum_close_to_target(nums, target):\n\n nums.sort()\n smallest_difference = imports.Counter()\n\n for i in range(len(nums)):\n start = i + 1\n end = len(nums)-1\n while start < end:\n diff = target - nums[i] - nums[start] - nums[end]\n\n if diff == 0:\n return target - diff\n\n if abs(diff) < abs(smallest_difference) or \\\n (abs(diff) == abs(smallest_difference) and\n diff > smallest_difference):\n smallest_difference = diff\n\n if diff > 0:\n start += 1\n else:\n end -= 1\n return target - smallest_difference\n\n\ndef triplet_sum_close_to_target2(nums, target):\n smallest_diff = imports.inf\n\n for i in range(len(nums)):\n\n start = i+1\n end = len(nums) - 1\n while start < end:\n diff = target - (nums[i] + nums[start] + nums[end])\n if diff == 0:\n return 0\n\n if abs(diff) < abs(smallest_diff) or \\\n (abs(diff) == abs(smallest_diff) and\n diff > smallest_diff):\n smallest_diff = diff\n\n if diff > 0:\n start += 1\n else:\n end -= 1\n\n return target - smallest_diff\n","sub_path":"revise-daily/arjuna-vishwamitra-abhimanyu/educative/2-two-pointers/3_triplet_sum_close_to_target.py","file_name":"3_triplet_sum_close_to_target.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"424217042","text":"import sys\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import GLib, Gio, Gtk, Gdk, GObject\nfrom gi.repository.GdkPixbuf import Pixbuf\n\nfrom decimal import Decimal\nimport datetime\nfrom datetime import date,time,datetime\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import style\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas\n\nfrom functools import reduce\nfrom views.common import YesNoDialog\nfrom views import searchview,formview,listview,treeview,ganttview\n\n__all__ = ['SearchView','FormView','ListView','TreeView']\n\nclass CellRendererPixbuf(Gtk.CellRendererPixbuf):\n\t__gsignals__ = {\n\t'activate': (GObject.SIGNAL_ACTION, None, (str,))\n\t}\n\nclass BaseView(object):\n\n\t__slots__ = ['conn','modelapi','parent','widget','mode','searchField','searchValues','domain','path','editable','autosearch','many2onefield','many2manyobj','many2manyrel','many2manyfieldid1','many2manyfieldid2','viewtype']\n\n\t_view_classes = {}\n\n\tdef __init__(self,**kwargs):\n\t\tfor key in kwargs.keys():\n\t\t\tif key in self.__slots__:\n\t\t\t\tsetattr(self,'_' + key,kwargs[key])\n\t\t\telse:\n\t\t\t\traise Exception('Initialize argument not present: %s',(key,))\n\n\t\tself._view_classes['search'] = SearchView\n\t\tself._view_classes['list'] = ListView\n\t\tself._view_classes['tree'] = TreeView\n\t\tself._view_classes['form'] = FormView\n\t\tself._view_classes['gantt'] = GanttView\n\t\tself._view_classes['chart'] = ChartView\n\t\tself._view_classes['kanban'] = ChartView\n\t\tself._view_classes['mdx'] = ChartView\n\n\t@property\n\tdef SEARCH(self):\n\t\treturn self._view_classes['search']\n\n\t@property\n\tdef LIST(self):\n\t\treturn self._view_classes['list']\n\n\t@property\n\tdef TREE(self):\n\t\treturn self._view_classes['tree']\n\n\t@property\n\tdef FORM(self):\n\t\treturn self._view_classes['form']\n\n\t@property\n\tdef GANTT(self):\n\t\treturn self._view_classes['gantt']\n\n\t@property\n\tdef CHART(self):\n\t\treturn self._view_classes['chart']\n\n\t@property\n\tdef KANBAN(self):\n\t\treturn self._view_classes['kanban']\n\n\t@property\n\tdef MDX(self):\n\t\treturn self._view_classes['mdx']\n\n\t@property\n\tdef model(self):\n\t\treturn self._modelapi\n\t\n\tdef _getTitle(self):\n\t\td = self._model_info['description'].split(' ')\n\t\tif len(d) > 2:\n\t\t\treturn reduce(lambda x,y: x + ' ' + y,d[2:])\n\t\telif len(d) == 2:\n\t\t\treturn reduce(lambda x,y: x + ' ' + y,d)\n\t\telif len(d) == 1:\n\t\t\treturn d[0]\n\n\tdef do_import(self,filename):\n\t\timport csv\n\t\tf = open(filename)\n\t\tr = csv.DictReader(f)\n\t\tfields = r.fieldnames\n\t\tvalues = []\n\t\tfor row in r:\n\t\t\tfor field in fields:\n\t\t\t\tif len(row[field]) == 0:\n\t\t\t\t\trow[field] = None\n\t\t\t\telse:\n\t\t\t\t\tct = self._model_columns[field]['type']\n\t\t\t\t\tif ct == 'integer':\n\t\t\t\t\t\trow[field] = int(row[field])\n\t\t\t\t\telif ct in ('flota','double'):\n\t\t\t\t\t\trow[field] = float(row[field])\n\t\t\t\t\telif ct in ('real','decimal','numeric'):\n\t\t\t\t\t\trow[field] = Decimal(row[field])\n\t\t\t\t\telif ct == 'selection':\n\t\t\t\t\t\trow[field] = row[field] \n\t\t\t\t\t\n\t\t\tvalues.append(list(row.values()))\n\t\t\n\t\tif len(values) > 0:\n\t\t\tir = self.upsert(fields,values)\n\t\t\t#print('IR:',ir,values)\n\t\t\tself.commit()\n\t\n\n\tdef do_export(self,filename,fields,cond):\n\t\timport csv\n\t\tf= open(filename,'w')\n\t\tw = csv.DictWriter(f,fields)\n\t\tw.writeheader()\n\t\trows = self.select(fields,cond)\n\t\tfor row in rows:\n\t\t\tfor key in row.keys():\n\t\t\t\tif key != 'id' and self._model_columns[key]['type'] in ('many2one','related'):\n\t\t\t\t\tif row[key]['name'] and len(row[key]['name']) > 0:\n\t\t\t\t\t\trow[key] = row[key]['name']\n\t\t\t\t\telse:\n\t\t\t\t\t\trow[key] = ''\n\t\t\t\telif key != 'id' and self._model_columns[key]['type'] == 'selection':\n\t\t\t\t\trow[key] = self._selectionFields[key][row[key]]\n\t\n\t\t\tw.writerow(row)\n\t\n\t\tf.close()\n\nclass BaseViewList(BaseView):\n\t\n\t_selectionsModel = {}\n\t\n\tdef _load_meta_from_wa(self,action_id):\n\t\treturn self._conn._message(['ui','get_meta_by_window_action_id_v2',{'action_id':action_id}])\n\n\tdef _load_meta_from_view(self,viewname):\n\t\treturn self._conn._message(['ui','get_view_by_name_v2',{'name':viewname}])\n\n\tdef _load_meta(self,viewname=None,action_id = None):\n\t\tif viewname:\n\t\t\tinfo,view,src,available = self._load_meta_from_view(viewname)\n\t\telse:\n\t\t\tmeta = self._load_meta_from_wa(action_id)[0]\n\t\t\t\n\t\tself._model = meta['root']\n\t\tself._info = meta['models']\n\t\tself._view = meta['models'][self._model]['views']['search']\n\t\tself._available = meta['models'][self._model]['allow']\t\n\t\tself._models_info = meta['models']\n\t\tself._model_info = self._models_info[self._model]['meta']\n\t\tself._model_columns = self._model_info['columns']\n\t\tself._view_columns = self._view['columns']\t\t\n\t\tself._fieldindex = {}\n\t\tself._viewfieldindex = {}\n\t\t\n\t\tself._selectionFromCode = {} \n\t\tself._selectionToCode = {} \n\t\t\n\t\tfor key in filter(lambda x: self._model_columns[x]['type'] == 'selection',self._view_columns.keys()):\n\t\t\tself._selectionFromCode[key] = {} \n\t\t\tself._selectionToCode[key] = {} \n\t\t\tself._selectionsModel[key] = Gtk.ListStore(str)\n\t\t\tfor k,v in self._model_columns[key]['selections']:\n\t\t\t\tself._selectionFromCode[key][k] = v \n\t\t\t\tself._selectionToCode[key][v] = k\n\t\t\t\tself._selectionsModel[key].append([v])\n\t\ti = 1\n\t\tself._viewfieldindex['id'] = 0\n\t\tfor k in self._view_columns.keys():\n\t\t\tself._viewfieldindex[k] = i\n\t\t\ti += 1\n\n\tdef _create_model(self):\n\t\t\n\t\tcolumn_types = [str]\n\t\trenderer_index = 1\n\t\t\n\t\trenderers = []\n\t\t\n\t\tfor key in self._view_columns.keys():\n\t\t\tct = self._model_columns[key]['type']\n\t\t\tif ct in ('char','varchar','uuid'):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trndr = Gtk.CellRendererText.new()\n\t\t\t\trenderers.append([renderer_index,rndr,self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('selection',):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trndr = Gtk.CellRendererCombo.new()\n\t\t\t\trenderers.append([renderer_index,rndr,self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('integer',):\n\t\t\t\tcolumn_types.append(int)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererSpin(adjustment=Gtk.Adjustment(0, 0-2**63, 2**63-1, 1, 10, 0)),self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('float','double'):\n\t\t\t\tcolumn_types.append(float)\n\t\t\t\t_min = -(10.000 **self._model_columns[key]['size'] - 1.000/(10 ** self._model_columns[key]['size']))\n\t\t\t\t_max = 10.000 **self._model_columns[key]['size'] - 1.000/(10 ** self._model_columns[key]['size'])\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererSpin(adjustment=Gtk.Adjustment(0, _min, _max, 1, 10, 0)),self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('numeric','decimal'):\n\t\t\t\tcolumn_types.append(float)\n\t\t\t\t_min = -(10.000 **self._model_columns[key]['size'][0] - 1.000/(10 ** self._model_columns[key]['size'][1]))\n\t\t\t\t_max = 10.000 **self._model_columns[key]['size'][0] - 1.000/(10 ** self._model_columns[key]['size'][1])\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererSpin(adjustment=Gtk.Adjustment(0, _min, _max, 1, 10, 0)),self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('boolean',):\n\t\t\t\tcolumn_types.append(bool)\n\t\t\t\trndr = Gtk.CellRendererToggle.new()\n\t\t\t\trenderers.append([renderer_index,rndr,self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('date',):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('time',):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('datetime',):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('many2one','related'):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\tif not (hasattr(self,'_many2onefield') and ct == 'many2one' and key == self._many2onefield):\n\t\t\t\t\trenderers.append([renderer_index+1,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type'],key])\n\t\t\t\tself._fieldindex[key] = renderer_index + 1\n\t\t\t\trenderer_index += 2\n\t\t\telif ct in ('referenced',):\n\t\t\t\tfieldref,keyref = self._model_columns[key]['ref'].split('.')\n\t\t\t\tobj = self._model_columns[fieldref]['obj']\n\t\t\t\tcolumnsref = self._models_info[obj]['columns']\n\t\t\t\tif self._model_columns[key]['label']:\n\t\t\t\t\tlabelref = self._model_columns[key]['label']\n\t\t\t\telse:\n\t\t\t\t\tlabelref = columnsref[keyref]['label']\n\t\t\t\ttyperef = columnsref[keyref]['type']\n\t\t\t\t\n\t\t\t\tif typeref in ('char','varchar','uuid'):\n\t\t\t\t\tcolumn_types.append(str)\n\t\t\t\t\trndr = Gtk.CellRendererText.new()\n\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\t\trenderer_index += 1\n\t\t\t\telif typeref in ('selection',):\n\t\t\t\t\tcolumn_types.append(str)\n\t\t\t\t\trndr = Gtk.CellRendererCombo.new()\n\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\t\trenderer_index += 1\n\t\t\t\telif typeref in ('integer',):\n\t\t\t\t\tcolumn_types.append(int)\n\t\t\t\t\trndr = Gtk.CellRendererSpin(adjustment=Gtk.Adjustment(0, 0-2**63, 2**63-1, 1, 10, 0))\n\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\t\trenderer_index += 1\n\t\t\t\telif typeref in ('float','double'):\n\t\t\t\t\tcolumn_types.append(float)\n\t\t\t\t\t_min = -(10.000 **self._model_columns[key]['size'][0] - 1.000/(10 ** self._model_columns[key]['size'][1]))\n\t\t\t\t\t_max = 10.000 **self._model_columns[key]['size'][0] - 1.000/(10 ** self._model_columns[key]['size'][1])\n\t\t\t\t\trndr = Gtk.CellRendererSpin(adjustment=Gtk.Adjustment(0, _min, _max, 1, 10, 0))\n\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\t\trenderer_index += 1\n\t\t\t\telif typeref in ('numeric','decimal'):\n\t\t\t\t\tcolumn_types.append(float)\n\t\t\t\t\t_min = -(10.000 **self._model_columns[key]['size'][0] - 1.000/(10 ** self._model_columns[key]['size'][1]))\n\t\t\t\t\t_max = 10.000 **self._model_columns[key]['size'][0] - 1.000/(10 ** self._model_columns[key]['size'][1])\n\t\t\t\t\trndr = Gtk.CellRendererSpin(adjustment=Gtk.Adjustment(0, _min, _max, 1, 10, 0))\n\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\t\trenderer_index += 1\n\t\t\t\telif typeref in ('boolean',):\n\t\t\t\t\tcolumn_types.append(bool)\n\t\t\t\t\trndr = Gtk.CellRendererToggle.new()\n\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\t\trenderer_index += 1\n\t\t\t\telif typeref in ('date',):\n\t\t\t\t\tcolumn_types.append(str)\n\t\t\t\t\trndr = Gtk.CellRendererText()\n\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\t\trenderer_index += 1\n\t\t\t\telif typeref in ('time',):\n\t\t\t\t\tcolumn_types.append(str)\n\t\t\t\t\trndr = Gtk.CellRendererText()\n\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\t\trenderer_index += 1\n\t\t\t\telif typeref in ('datetime',):\n\t\t\t\t\tcolumn_types.append(str)\n\t\t\t\t\trndr = Gtk.CellRendererText()\n\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index\n\t\t\t\t\trenderer_index += 1\n\t\t\t\telif typeref in ('many2one','related'):\n\t\t\t\t\tcolumn_types.append(str)\n\t\t\t\t\tcolumn_types.append(str)\n\t\t\t\t\trndr = Gtk.CellRendererText()\n\t\t\t\t\tif not (hasattr(self,'_many2onefield') and ct == 'many2one' and key == self._many2onefield):\n\t\t\t\t\t\trenderers.append([renderer_index,rndr,labelref,typeref,key])\n\t\t\t\t\tself._fieldindex[key] = renderer_index + 1\n\t\t\t\t\trenderer_index += 2\n\n\t\t\t\n\t\t\tself._listStore = Gtk.ListStore(*column_types)\n\t\t\tself._treeView = Gtk.TreeView(self._listStore)\n\t\t\tself._treeView.set_rules_hint(True)\n\t\t\tself._treeView.set_enable_search(True)\n\t\t\tself._treeView.set_enable_tree_lines(True)\n\t\t\tself._treeView.set_reorderable(True)\n\t\t\tself._treeView.set_grid_lines(Gtk.TreeViewGridLines.BOTH)\n\t\t\t\n\t\t\tfor renderer in renderers:\n\t\t\t\tif renderer[3] == 'boolean':\n\t\t\t\t\tcolumn = Gtk.TreeViewColumn(renderer[2], renderer[1], active=renderer[0])\n\t\t\t\telif renderer[3] in ('many2one','related'):\n\t\t\t\t\t#print(renderer[3])\n\t\t\t\t\tcellpb_c = CellRendererPixbuf()\n\t\t\t\t\tcellpb_s = CellRendererPixbuf()\n\t\t\t\t\tcellpb_c.connect('activate',self._on_search_activate)\n\t\t\t\t\tcellpb_s.connect('activate',self._on_search_activate)\t\t\t\t\t\n\t\t\t\t\tcolumn = Gtk.TreeViewColumn(renderer[2]) #, renderer[1], text=renderer[0])\n\t\t\t\t\tcolumn.pack_start(renderer[1],True)\n\t\t\t\t\tcolumn.pack_start(cellpb_c,False)\n\t\t\t\t\tcolumn.set_attributes(cellpb_c,icon_name=0)\n\t\t\t\t\tcolumn.pack_start(cellpb_s,False)\n\t\t\t\t\tcolumn.set_attributes(cellpb_s,icon_name=1)\n\t\t\t\t\tcolumn.set_attributes(renderer[1], text=renderer[0])\n\t\t\t\t\t\n\n\n\t\t\t\telse:\n\t\t\t\t\tcolumn = Gtk.TreeViewColumn(renderer[2], renderer[1], text=renderer[0])\n\t\t\t\tcolumn.set_sort_column_id(renderer[0])\n\t\t\t\tself._treeView.append_column(column)\n\t\t\t\n\t\t\tself._renderers = renderers\n\n\tdef get_value_by_index(self,path,idx):\n\t\titr = self._listStore.get_iter(path)\n\t\tfield = list(self._model_columns.keys())[idx]\n\t\tif self._model_columns[field]['type'] in ('many2one','related'):\n\t\t\tv = {'id':self._listStore.get_value(itr,idx-1),'name':self._listStore.get_value(itr,idx)}\n\t\telif self._model_columns[field]['type'] == 'referenced':\n\t\t\tpass\n\t\telse:\n\t\t\tv = self._listStore.get_value(itr,idx)\n\t\t\n\t\treturn v\n\t\t\n\tdef set_value_by_index(self,path,idx,value):\n\t\titr = self._listStore.get_iter(path)\n\t\tfield = list(self._model_columns.keys())[idx]\n\t\tif self._model_columns[field]['type'] in ('many2one','related'):\n\t\t\tself._listStore.set_value(itr,idx-1,value['id'])\n\t\t\tself._listStore.set_value(itr,idx-1,value['name'])\n\t\telif self._model_columns[field]['type'] == 'referenced':\n\t\t\tpass\n\t\telse:\n\t\t\tself._listStore.set_value(itr,idx,value)\n\n\t\n\tdef get_value_by_name(self,path,field):\n\t\titr = self._listStore.get_iter(path)\n\t\tidx = self._fieldindex[field]\n\t\tif self._model_columns[field]['type'] in ('many2one','related'):\n\t\t\tv = {'id':self._listStore.get_value(itr,idx-1),'name':self._listStore.get_value(itr,idx)}\n\t\telif self._model_columns[field]['type'] == 'referenced':\n\t\t\tpass\n\t\telse:\n\t\t\tv = self._listStore.get_value(itr,idx)\n\t\t\n\t\treturn v\n\t\t\n\tdef set_value_by_name(self,path,field,value):\n\t\titr = self._listStore.get_iter(path)\n\t\tidx = self._fieldindex[field]\n\t\tif self._model_columns[field]['type'] in ('many2one','related'):\n\t\t\tself._listStore.set_value(itr,idx-1,value['id'])\n\t\t\tself._listStore.set_value(itr,idx,value['name'])\n\t\telif self._model_columns[field]['type'] == 'referenced':\n\t\t\tpass\n\t\telse:\n\t\t\tself._listStore.set_value(itr,idx,value)\n\t\n\tdef get_values(self):\n\t\tvalues = []\n\t\tfor i in range(len(self._listStore)):\n\t\t\titr = self._listStore.get_iter(i)\n\t\t\t#value = []\n\t\t\trecord = {}\n\t\t\t\n\t\t\toid = self._listStore.get_value(itr,0)\n\t\t\tif oid and len(oid) > 0:\n\t\t\t\trecord['id'] = oid\n\t\t\t\n\t\t\tfor key in self._view_columns.keys():\n\t\t\t\tct = self._model_columns[key]['type']\n\t\t\t\tif ct == 'referenced':\n\t\t\t\t\tcontinue\n\t\t\t\tif key in self._fieldindex:\n\t\t\t\t\tif ct in ('many2one','related'):\n\t\t\t\t\t\tv = self._listStore.get_value(itr,self._fieldindex[key]-1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tv = self._listStore.get_value(itr,self._fieldindex[key])\n\n\t\t\t\t\tif ct == 'selection':\n\t\t\t\t\t\tif type(v) == str:\n\t\t\t\t\t\t\tv = self._selectionToCode[key][v]\n\t\t\t\t\telif ct in ('numeric','decimal'):\n\t\t\t\t\t\tv = Decimal(v)\n\t\t\t\t\telif ct == 'datetime':\n\t\t\t\t\t\tif type(v) == str:\n\t\t\t\t\t\t\tif len(v) == 10:\n\t\t\t\t\t\t\t\tv = datetime.strptime(v+' 00:00:00+0000','%Y-%m-%d %H:%M:%S%z')\n\t\t\t\t\t\t\telif len(v) == 19:\n\t\t\t\t\t\t\t\tv = datetime.strptime(v+'+0000','%Y-%m-%d %H:%M:%S%z')\n\t\t\t\t\t\t\telif len(v) == 24:\n\t\t\t\t\t\t\t\tv = datetime.strptime(v,'%Y-%m-%d %H:%M:%S%z')\n\t\t\t\t\t\t\telif len(v) > 24:\n\t\t\t\t\t\t\t\tv = datetime.strptime(v[:24],'%Y-%m-%d %H:%M:%S%z')\n\t\t\t\t\telif ct == 'date':\n\t\t\t\t\t\tif type(v) == str:\n\t\t\t\t\t\t\tif len(v) == 10:\n\t\t\t\t\t\t\t\tv = datetime.strptime(v,'%Y-%m-%d').date()\n\t\t\t\t\telif ct == 'time':\n\t\t\t\t\t\tif type(v) == str:\n\t\t\t\t\t\t\tif len(v) == 8:\n\t\t\t\t\t\t\t\tv = datetime.strptime(v,'%H:%M:%S').time()\n\t\t\t\t\t\t\telif len(v) == 13:\n\t\t\t\t\t\t\t\tv = datetime.strptime(v,'%H:%M:%S%z').time()\n\t\t\t\t\telif ct == 'one2many':\n\t\t\t\t\t\tv = self.get_one2many(key)\n\t\t\t\t\n\t\t\t\t\t#value.append(v)\n\t\t\t\t\trecord[key] = v\n\t\t\t\n\t\t\t#values.append(value)\n\t\t\tvalues.append(record)\n\t\t\t\n\t\treturn values\n\t\t\t\n\tdef set_values(self, fields, records):\n\t\t_fields = ['id']\n\t\t_fields.extend(fields)\n\t\tself._listStore.clear()\n\t\tfor record in records:\n\t\t\tprint('RECORD:',record)\n\t\t\trow = []\n\t\t\tfor i,field in enumerate(_fields):\n\t\t\t\tprint('i,field:',i,field)\n\t\t\t\t#field = _fields[i]\n\t\t\t\tif field == 'id':\n\t\t\t\t\tct = 'uuid'\n\t\t\t\telse:\n\t\t\t\t\tct = self._model_info['columns'][field]['type']\n\n\t\t\t\tif ct in ('many2one','related'):\n\t\t\t\t\trow.append(record[i]['id'])\n\t\t\t\t\tif 'name' in record[i]:\n\t\t\t\t\t\trow.append(record[i]['name'])\n\t\t\t\t\telse:\n\t\t\t\t\t\trow.append(record[i]['id'])\n\t\t\t\telif ct == 'selection':\n\t\t\t\t\tif record[i] in self._selectionFromCode[field]:\n\t\t\t\t\t\trow.append(self._selectionFromCode[field][record[i]])\n\t\t\t\t\telse:\n\t\t\t\t\t\trow.append(None)\n\t\t\t\telif ct == 'date':\n\t\t\t\t\tif record[i]:\n\t\t\t\t\t\trow.append(record[i].strftime('%Y-%m-%d%z'))\n\t\t\t\t\telse:\n\t\t\t\t\t\trow.append(record[i])\n\t\t\t\telif ct == 'time':\n\t\t\t\t\tif record[i]:\n\t\t\t\t\t\trow.append(record[i].strftime('%H:%M:%S'))\n\t\t\t\t\telse:\n\t\t\t\t\t\trow.append(record[i])\n\t\t\t\telif ct == 'datetime':\n\t\t\t\t\tif record[i]:\n\t\t\t\t\t\trow.append(record[i].strftime('%Y-%m-%d %H:%M:%S%z'))\n\t\t\t\t\telse:\n\t\t\t\t\t\trow.append(record[i])\n\t\t\t\telse:\n\t\t\t\t\trow.append(record[i])\n\n\t\t\tself._listStore.append(row)\n\n\tdef get_one2many(self,field):\n\t\treturn []\n\n\tdef set_editable(self,editable):\n\t\tself._set_editable(editable)\n\t\tif self._editable != editable:\n\t\t\tself._editable = editable\n\n\tdef _set_editable(self,editable):\n\t\t#print('set-editable:',self._model,editable)\n\t\trecname = self._model_info['names']['rec_name']\n\t\tfor cellrenderer in self._renderers:\n\t\t\tct = cellrenderer[3]\n\t\t\trndr = cellrenderer[1]\n\t\t\tname = cellrenderer[4]\n\t\t\tif ct in ('char','varchar','uuid'):\n\t\t\t\trndr.set_property(\"editable\", editable)\n\t\t\t\trndr.connect(\"edited\", self._text_edited,name)\n\t\t\telif ct in ('selection',):\n\t\t\t\trndr.set_property(\"editable\", editable)\n\t\t\t\trndr.set_property(\"model\", self._selectionsModel[name])\n\t\t\t\trndr.set_property(\"text-column\", 0)\n\t\t\t\trndr.set_property(\"has-entry\", editable)\n\t\t\t\trndr.connect(\"edited\", self._on_combo_changed,name)\n\t\t\telif ct in ('integer',):\n\t\t\t\trndr.set_property(\"editable\", editable)\n\t\t\t\trndr.connect(\"edited\", self._on_integer_edited,name)\t\t\t\t\t\t\t\t\n\t\t\telif ct in ('float','double'):\n\t\t\t\trndr.set_property(\"editable\", editable)\n\t\t\t\trndr.connect(\"edited\", self._on_float_edited,name)\n\t\t\telif ct in ('numeric','decimal'):\n\t\t\t\trndr.connect(\"edited\", self._on_decimal_edited,name)\n\t\t\t\trndr.set_property(\"editable\", editable)\n\t\t\telif ct in ('boolean',):\n\t\t\t\trndr.set_activatable(editable)\n\t\t\t\trndr.connect(\"toggled\", self._on_cell_toggled,name)\n\t\t\telif ct in ('date',):\n\t\t\t\trndr.set_property(\"editable\", editable)\n\t\t\t\trndr.connect(\"edited\", self._text_edited,name)\n\t\t\telif ct in ('time',):\n\t\t\t\trndr.set_property(\"editable\", editable)\n\t\t\t\trndr.connect(\"edited\", self._text_edited,name)\n\t\t\telif ct in ('datetime',):\n\t\t\t\trndr.set_property(\"editable\", editable)\n\t\t\t\trndr.connect(\"edited\", self._text_edited,name)\n\t\t\telif ct in ('many2one','related'):\n\t\t\t\tif not (hasattr(self,'_many2onefield') and ct == 'many2one' and name == self._many2onefield):\n\t\t\t\t\trndr.set_property(\"editable\", editable)\n\t\t\t\t\trndr.connect(\"edited\", self._text_edited,name)\n\t\t\telif ct in ('referenced',):\n\t\t\t\trndr.set_property(\"editable\", False)\n\n\tdef _on_search_activate(self,widget,path,background_area,cell_area,flags):\n\t\tprint('_on_search_activate')\t\t\t\n\t\n\tdef _text_edited(self, widget, path, text, user_data):\n\t\tfield = user_data\n\t\trec_name = self._model_info['names']['rec_name']\n\t\tif self._model_columns[field]['type'] in ('many2one','related','many2many'):\n\t\t\tobj = self._model_columns[field]['obj']\n\t\t\tbutton = widget\n\t\t\tself.on_do_m2o_find(button,field,obj,path,text)\n\t\telif rec_name and field == rec_name and hasattr(self,'_many2manyobj') and hasattr(self,'_many2manyrel') and hasattr(self,'_many2manyfieldid1') and hasattr(self,'_many2manyfieldid2'):\n\t\t\tfield = rec_name\n\t\t\tobj = self._many2manyobj\n\t\t\tbutton = widget\n\t\t\t#print('_text_edited:',self._model,field,rec_name,obj,self._many2manyrel),\n\n\t\t\tself.on_do_m2m_find(button,field,obj,path,text)\n\t\telse:\n\t\t\tself._listStore[path][self._fieldindex[user_data]] = text\n\n\tdef _on_cell_toggled(self, widget, path, user_data):\n\t\tself._listStore[path][self._fieldindex[user_data]] = not self._listStore[path][self._fieldindex[user_data]]\n\n\tdef _on_combo_changed(self, widget, path, text,user_data):\n\t\tself._listStore[path][self._fieldindex[user_data]] = text\n\n\tdef _on_integer_edited(self, widget, path, text,user_data):\n\t\tself._listStore[path][self._fieldindex[user_data]] = int(text)\n\n\tdef _on_float_edited(self, widget, path, text,user_data):\n\t\tself._listStore[path][self._fieldindex[user_data]] = float(text)\n\n\tdef _on_decimal_edited(self, widget, path, text,user_data):\n\t\tself._listStore[path][self._fieldindex[user_data]] = float(text)\n\n\tdef on_tree_selection_changed(self,selection):\n\t\tmode = selection.get_mode()\n\t\tif mode == Gtk.SelectionMode.SINGLE:\n\t\t\tmodel, treeiter = selection.get_selected()\n\t\t\toid = model[treeiter][0]\n\t\t\trec_name = self._listStore.get_value(treeiter,self._fieldindex[self._model_info['names']['rec_name']])\n\t\t\tif self._mode == 's':\n\t\t\t\tif self._parent._viewtype == 'form':\n\t\t\t\t\tself._parent._m2o_id[self._searchField] = oid\n\t\t\t\t\tself._parent._record[self._searchField].set_text(rec_name)\n\t\t\t\telif self._parent._viewtype in ('m2olist','m2mlist'):\t\n\t\t\t\t\tcolumn = self._parent._fieldindex[self._searchField]\n\t\t\t\t\trowiter = self._parent._listStore.get_iter(Gtk.TreePath.new_from_string(self._path))\n\t\t\t\t\tif self._parent._viewtype == 'm2olist':\n\t\t\t\t\t\tself._parent._listStore.set_value(rowiter,column-1,oid)\n\t\t\t\t\telif self._parent._viewtype == 'm2mlist':\n\t\t\t\t\t\tself._parent._listStore.set_value(rowiter,0,oid)\n\t\t\t\t\tself._parent._listStore.set_value(rowiter,column,rec_name)\n\t\t\t\t\t\n\t\t\t\tif self._widget:\n\t\t\t\t\tself._widget.close()\n\t\t\t\treturn\n\t\t\telif self._mode == 'S':\n\t\t\t\tcolumn = self._parent._fieldindex[self._searchField]\n\t\t\t\trowiter = self._parent._listStore.get_iter(Gtk.TreePath.new_from_string(self._path))\n\t\t\t\tself._parent._listStore.set_value(rowiter,0,oid)\n\t\t\t\tself._parent._listStore.set_value(rowiter,column,rec_name)\n\t\t\t\tif self._widget:\n\t\t\t\t\tself._widget.close()\n\t\t\t\treturn\n\n\t\t\tform = FormView(parent=self,mode = 'r')\n\t\t\tform.message = self.message\n\t\t\tm = self.message(['ui','view',{'name':'view.' + self._model + '.form'}])\n\t\t\tform.load(m[0],m[1],m[3])\n\t\t\tform.read(oid)\n\t\telif mode == Gtk.SelectionMode.MULTIPLE:\n\t\t\tcount = selection.count_selected_rows()\n\t\t\tself._selectedRows = []\n\t\t\tif count > 0:\n\t\t\t\tmodel,path = selection.get_selected_rows()\n\t\t\t\tfor p in path:\n\t\t\t\t\tr = p[0]\n\t\t\t\t\tself._selectedRows.append(model[p[0]][0])\n\nclass BaseViewTree(BaseView):\n\t_widget = None\n\t_parent = None\n\t_mode = 'r'\n\t_searchField = None\n\t_entries = {}\n\t_selections = {}\n\t_form = None\n\t\n\tdef load(self,info,view,available):\n\t\tself._info = info\n\t\tself._view = view\n\t\tself._models_info = info['models']\n\t\tself._model = info['root']\n\t\tself._model_info = self._models_info[self._model]\n\t\tself._model_columns = self._model_info['columns']\n\t\tself._view_columns = self._view['columns']\t\t\n\t\tif self._parent:\n\t\t\tself._widget = Gtk.Window(title=self._getTitle())\n\t\t\tself._widget.set_default_size(720,600)\n\t\tself._model = self._model_info['name']\n\t\tself._keyindex = {}\n\t\tself._fields = list(filter(lambda x: not self._model_columns[x]['type'] in ('one2many','many2many','text','binary','xml'),view['columns'].keys()))\n\t\tself._queryfields = list(filter(lambda x: 'selectable' in self._model_columns[x] and self._model_columns[x]['selectable'],self._model_columns.keys()))\n\t\tself._available = available\n\t\tself._selection_fields = list(filter(lambda x: self._model_columns[x]['type'] == 'selection',self._fields))\n\t\n\t\tif self._model_info['names']['inactive']:\n\t\t\tpass\n\t\t\t#self._selection_fields.append(self._info['names']['inactive'])\n\t\n\t\tfor selection_field in self._selection_fields:\n\t\t\tself._selections[selection_field] = dict(self._model_columns[selection_field]['selections'])\n\t\t\n\t\tcolumn_types = [str]\n\t\tself._keyindex['id'] = 0\n\t\trenderers = []\n\t\trenderer_index = 1\n\t\t\n\t\tfor key in self._fields:\n\t\t\tct = self._model_columns[key]['type']\n\t\t\tif ct in ('char','varchar','uuid'):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trndr = Gtk.CellRendererText.new()\n\t\t\t\trenderers.append([renderer_index,rndr,self._model_columns[key]['label'],self._model_columns[key]['type']])\n\t\t\t\tself._keyindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('selection',):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trndr = Gtk.CellRendererCombo.new()\n\t\t\t\trenderers.append([renderer_index,rndr,self._model_columns[key]['label'],self._model_columns[key]['type']])\n\t\t\t\tself._keyindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('integer',):\n\t\t\t\tcolumn_types.append(int)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type']])\n\t\t\t\tself._keyindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('float','double'):\n\t\t\t\tcolumn_types.append(float)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type']])\n\t\t\t\tself._keyindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('numeric','decimal'):\n\t\t\t\tcolumn_types.append(float)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type']])\n\t\t\t\tself._keyindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('boolean',):\n\t\t\t\tcolumn_types.append(bool)\n\t\t\t\trndr = Gtk.CellRendererToggle.new()\n\t\t\t\trndr.set_activatable(True)\n\t\t\t\trenderers.append([renderer_index,rndr,self._model_columns[key]['label'],self._model_columns[key]['type']])\n\t\t\t\tself._keyindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('date',):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererText(),self._model_columns[key]['label']])\n\t\t\t\tself._keyindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('time',):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type']])\n\t\t\t\tself._keyindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('datetime',):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trenderers.append([renderer_index,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type']])\n\t\t\t\tself._keyindex[key] = renderer_index\n\t\t\t\trenderer_index += 1\n\t\t\telif ct in ('many2one','related'):\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\tcolumn_types.append(str)\n\t\t\t\trenderers.append([renderer_index+1,Gtk.CellRendererText(),self._model_columns[key]['label'],self._model_columns[key]['type']])\n\t\t\t\tself._keyindex[key] = renderer_index + 1\n\t\t\t\trenderer_index += 2\n\t\t\n\t\t\tself._store = Gtk.TreeStore(*column_types)\n\t\t\tself._tree = Gtk.TreeView(self._store)\n\t\t\tself._tree.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)\n\t\t\tself._box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n\t\t\tself._viewbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\n\t\t\tbuttonSearch = Gtk.Button.new_with_label(\"Search\")\n\t\t\tbuttonSearch.connect(\"clicked\", self.on_click_do_switch, 'search')\n\t\t\tself._viewbox.pack_start(buttonSearch, False, False, 5)\n\n\t\t\tbuttonForm = Gtk.Button.new_with_label(\"Form\")\n\t\t\tbuttonForm.connect(\"clicked\", self.on_click_do_switch, 'form')\n\t\t\tself._viewbox.pack_start(buttonForm, False, False, 5)\n\n\t\t\tif 'tree' in self._available:\n\t\t\t\tbuttonTree = Gtk.Button.new_with_label(\"Tree\")\n\t\t\t\tbuttonTree.connect(\"clicked\", self.on_click_do_switch, 'tree')\n\t\t\t\tself._viewbox.pack_start(buttonTree, False, False, 5)\n\n\t\t\tself._box.pack_start(self._viewbox, False, False, 5)\n\n\t\t\tself._toolbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n\t\t\tif self._mode != 's':\n\t\t\t\tif (self._model_info['access']['create'] or self._model_info['access']['insert']) and self._editable: \n\t\t\t\t\tself._buttonCreate = Gtk.Button.new_from_icon_name(\"gtk-add\",1)\n\t\t\t\t\tself._buttonCreate.connect(\"clicked\", self.on_click_do_action, 'add')\n\t\t\t\t\tself._toolbox.pack_start(self._buttonCreate, False, False, 10)\n\t\t\n\t\t\t\tif self._model_info['access']['read'] or self._mode_info['access']['select']: \n\t\t\t\t\tself._buttonLookUp = Gtk.Button.new_from_icon_name(\"gtk-properties\",1)\n\t\t\t\t\tself._buttonLookUp.connect(\"clicked\", self.on_click_do_action, 'edit')\n\t\t\t\t\tself._toolbox.pack_start(self._buttonLookUp, False, False, 10)\n\t\t\t\t\t\n\t\t\t\tif (self._model_info['access']['write'] or self._mode_info['access']['update']) and self._editable: \n\t\t\t\t\tself._buttonEdit = Gtk.Button.new_from_icon_name(\"gtk-edit\",1)\n\t\t\t\t\tself._buttonEdit.connect(\"clicked\", self.on_click_do_action, 'edit')\n\t\t\t\t\tself._toolbox.pack_start(self._buttonEdit, False, False, 10)\n\t\t\n\t\t\t\tif (self._model_info['access']['unlink'] or self._model_info['access']['delete']) and self._editable: \t\n\t\t\t\t\tself._buttonDelete = Gtk.Button.new_from_icon_name(\"gtk-remove\",1)\n\t\t\t\t\tself._buttonDelete.connect(\"clicked\", self.on_click_do_action, 'remove')\n\t\t\t\t\tself._toolbox.pack_start(self._buttonDelete, False, False, 10)\n\t\t\t\t\n\t\t\t\tself._box.pack_start(self._toolbox, False, False, 5)\n\n\t\tselect = self._tree.get_selection()\n\t\tselect.connect(\"changed\", self.on_tree_selection_changed)\n\n\t\tgrid = Gtk.Grid()\n\t\t\n\t\tfor queryfield in self._queryfields:\n\t\t\tself._entries[queryfield] = Gtk.Entry()\n\t\t\tself._entries[queryfield].set_editable(True)\n\t\t\tself._entries[queryfield].set_placeholder_text(self._model_columns[queryfield]['label'])\n\t\t\tgrid.add(self._entries[queryfield])\n\t\tbutton = Gtk.Button.new_with_label(\"Search\")\n\t\tbutton.connect(\"clicked\", self.on_click_do_query)\n\t\tgrid.add(button)\n \n\t\tself._box.pack_start(grid, False, True, 0)\n\t\tscrolledwindow = Gtk.ScrolledWindow( Gtk.Adjustment(value=350,lower=350,page_size=400) )\n\t\tscrolledwindow.add(self._tree)\n\n\t\tfor renderer in renderers:\n\t\t\tcolumn = Gtk.TreeViewColumn(renderer[2], renderer[1], text=renderer[0])\n\t\t\tcolumn.set_sort_column_id(renderer[0])\n\t\t\tself._tree.append_column(column)\n\n\t\tself._box.pack_start(scrolledwindow, True, True, 0)\n\n\t\tif self._widget:\n\t\t\tself._widget.add(self._box)\n\t\t\tself._widget.show_all()\n\n\tdef _text_edited(self, widget, path, text, user_data):\n\t\tself._store[path][self._keyindex[user_data]] = text\n\n\tdef on_click_do_query(self,button):\n\n\t\tcond = []\n\t\tif self._domain and len(self._domain) > 0:\n\t\t\tcond.extend(self._domain)\n\t\tfor key in self._entries.keys():\n\t\t\tvalue = self._entries[key].get_text()\n\t\t\tif len(value) > 0:\n\t\t\t\tcond.append((key,'like',value))\n\t\t\n\t\tself.select(cond=cond)\n\n\tdef append(self,row):\n\t\tself._store.append(row)\n\n\tdef select(self,parent=None,cond = []):\n\t\tparent_id = self._model_info['names']['parent_id']\n\t\trec_name = self._model_info['names']['rec_name']\n\t\tif parent is None:\n\t\t\tself._store.clear()\n\t\t\tcond.extend([(parent_id,'?',)])\n\t\trecords = self.model.select(model=self._model,fields=self._fields,cond=cond)\n\t\tlf = len(self._fields)\n\t\tfor record in records:\n\t\t\trow = [record[0]]\n\t\t\tfor i in range(lf):\n\t\t\t\tfield = self._fields[i]\n\t\t\t\tct = self._model_info['columns'][field]['type']\n\t\t\t\tif ct in ('many2one','related'):\n\t\t\t\t\trow.append(record[i+1]['id'])\n\t\t\t\t\tif 'name' in record[i+1]:\n\t\t\t\t\t\trow.append(record[i+1]['name'])\n\t\t\t\t\telse:\n\t\t\t\t\t\trow.append(record[i+1]['id'])\n\t\t\t\telif ct == 'selection':\n\t\t\t\t\trow.append(self._selections[field][record[i+1]])\n\t\t\t\telif ct == 'date':\n\t\t\t\t\tif record[i+1]:\n\t\t\t\t\t\trow.append(record[i+1].strftime('%Y-%m-%d%z'))\n\t\t\t\t\telse:\n\t\t\t\t\t\trow.append(record[i+1])\n\t\t\t\telif ct == 'time':\n\t\t\t\t\tif record[i+1]:\n\t\t\t\t\t\trow.append(record[i+1].strftime('%H:%M:%S'))\n\t\t\t\t\telse:\n\t\t\t\t\t\trow.append(record[i+1])\n\t\t\t\telif ct == 'datetime':\n\t\t\t\t\tif record[i+1]:\n\t\t\t\t\t\trow.append(record[i+1].strftime('%Y-%m-%d %H:%M:%S%z'))\n\t\t\t\t\telse:\n\t\t\t\t\t\trow.append(record[i+1])\n\t\t\t\telse:\n\t\t\t\t\trow.append(record[i+1])\n\n\t\t\tchilds_cond = []\n\t\t\tchilds_cond.extend([(parent_id,'=',row[self._keyindex[rec_name]])])\n\t\t\tself.select(self._store.append(parent,row),childs_cond)\n\t\t\t\n\t\n\tdef on_tree_selection_changed(self,selection):\n\t\tmode = selection.get_mode()\n\t\tif mode == Gtk.SelectionMode.SINGLE:\n\t\t\tmodel, treeiter = selection.get_selected()\n\t\t\toid = model[treeiter][0]\n\t\t\trec_name = model[treeiter][self._keyindex[self._info['names']['rec_name']]]\n\t\t\tif self._mode == 's':\n\t\t\t\tif self._parent._viewtype == 'form':\n\t\t\t\t\tself._parent._m2o_id[self._searchField] = oid\n\t\t\t\t\tself._parent._record[self._searchField].set_text(rec_name)\n\t\t\t\telif self._parent._viewtype in ('m2olist','m2mlist'):\t\n\t\t\t\t\tcolumn = self._parent._fieldindex[self._searchField]\n\t\t\t\t\trowiter = self._parent._listStore.get_iter(Gtk.TreePath.new_from_string(self._path))\n\t\t\t\t\tif self._parent._viewtype == 'm2olist':\n\t\t\t\t\t\tself._parent._listStore.set_value(rowiter,column-1,oid)\n\t\t\t\t\telif self._parent._viewtype == 'm2mlist':\n\t\t\t\t\t\tself._parent._listStore.set_value(rowiter,0,oid)\n\t\t\t\t\tself._parent._listStore.set_value(rowiter,column,rec_name)\n\n\t\t\t\tif self._widget:\n\t\t\t\t\tself._widget.close()\n\t\t\t\treturn\n\t\t\tform = FormView(parent=self,mode = 'r')\n\t\t\tform.message = self.message\n\t\t\tm = self.message(['ui','view',{'name':'view.' + self._model + '.form'}])\n\t\t\tform.load(m[0],m[1],m[3])\n\t\t\tform.read(oid)\n\t\telif mode == Gtk.SelectionMode.MULTIPLE:\n\t\t\tcount = selection.count_selected_rows()\n\t\t\tself._selectedRows = []\n\t\t\tif count > 0:\n\t\t\t\tmodel,path = selection.get_selected_rows()\n\t\t\t\tfor p in path:\n\t\t\t\t\tr = p[0]\n\t\t\t\t\tself._selectedRows.append(model[p[0]][0])\n\n\tdef on_click_do_switch(self,button,user_data):\n\t\tif user_data == 'tree':\n\t\t\tform = TreeView(parent=self,mode = 'r')\n\t\t\tform.message = self.message\n\t\t\tm = self.message(['ui','view',{'name':'view.' + self._model + '.tree'}])\n\t\t\tform.load(m[0],m[1],m[3])\n\n\tdef on_click_do_action(self,button, user_data):\n\t\tif user_data == 'add':\n\t\t\tform = self.FORM(parent=self,mode = 'c',conn=self.model._conn,modelapi=self.model) \n\t\t\tm = self._conn._message(['ui','view',{'name':'view.' + self._model + '.form'}])\n\t\t\tform.load(m[0],m[1],m[3])\n\t\telif user_data == 'edit':\n\t\t\tif len(self._selectedRows) > 0:\n\t\t\t\tform = self.FORM(parent=self,mode = 'w',conn=self.model._conn,modelapi=self.model)\n\t\t\t\tm = self._conn._message(['ui','view',{'name':'view.' + self._model + '.form'}])\n\t\t\t\tform.load(m[0],m[1],m[3])\n\t\t\t\tform.read(self._selectedRows[0])\t\t\n\t\telif user_data == 'remove':\n\t\t\tif len(self._selectedRows) > 0:\n\t\t\t\tdialog = YesNoDialog(None,'Delete selected records','Do yuo want delete selected records?')\n\t\t\t\tresponse = dialog.run()\n\t\t\t\tif response == Gtk.ResponseType.YES:\n\t\t\t\t\tunlinkIds = self.model.unlink(self._model,self._selectedRows)\n\t\t\t\t\tfor oid in unlinkIds:\n\t\t\t\t\t\tself._delete_row(oid)\n\t\t\t\tdialog.destroy()\n\n\tdef _delete_row(self,oid):\n\t\tfor ri in range(len(self._listStore)):\n\t\t\ti = self._listStore.get_iter(ri)\n\t\t\tif self._listStore[i][0] == oid:\n\t\t\t\tself._listStore.remove(i)\n\t\t\t\tbreak\n\nclass SearchView(BaseViewList):\n\t_widget = None\n\t_mode = 'r'\n\t_searchField = None\n\t_entries = {}\n\t_selections = {}\n\t_form = None\n\t\n\tdef load(self,viewname=None,window_action_id = None):\n\t\treturn searchview.load(self,viewname,window_action_id)\n\n\tdef _load_data(self,fields,cond):\n\t\treturn searchview._load_data(self,fields,cond)\n\n\tdef _text_edited(self, widget, path, text, user_data):\n\t\treturn searchview._text_edited(self, widget, path, text, user_data)\n\t\n\tdef on_click_do_query(self,button):\n\t\treturn searchview.on_click_do_query(self,button)\n\n\tdef append(self,row):\n\t\treturn searchview.append(self,row)\n\n\tdef on_click_do_switch(self,button,user_data):\n\t\treturn searchview.on_click_do_switch(self,button,user_data)\n\n\tdef on_click_do_action(self,button, user_data):\n\t\treturn searchview.on_click_do_action(self,button, user_data)\n\t\n\tdef _delete_row(self,oid):\n\t\treturn searchview._delete_row(self,oid)\n\nclass FormView(BaseView):\n\t_widget = None\n\t_parent = None\n\t_mode = 'r'\n\t_notebook = None\n\t_pages = {}\n\t_textBuffer = {}\n\t_selectionStore = {}\n\t_storeEntrycompletion = {}\n\t\t\n\tdef load(self,info,view,available):\n\t\treturn formview.load(self,info,view,available)\n\n\tdef on_do_import(self,button):\n\t\treturn formview.on_do_import(self,button)\n\n\tdef on_do_export(self,button):\n\t\treturn formview.on_do_export(self,button)\n \n\tdef on_do_save(self,button):\n\t\treturn formview.on_do_save(self,button)\n\n\tdef on_do_cancel(self,button):\n\t\treturn formview.on_do_cancel(self,button)\n\n\tdef on_do_m2o_create(self,button,field,obj):\n\t\treturn formview.on_do_m2o_create(self,button,field,obj)\n\n\tdef on_do_m2o_find(self,button,field,obj):\n\t\treturn formview.on_do_m2o_find(self,button,field,obj)\n\n\tdef m2m_read(self, oid, field):\n\t\treturn formview.m2m_read(self, oid, field)\n\n\tdef read(self,oid):\n\t\treturn formview.read(self,oid)\n\n\tdef _getTitleView(self):\n\t\treturn formview._getTitleView(self)\n\n\tdef create(self,record):\n\t\treturn formview.create(self,record)\n\n\tdef write(self,record):\n\t\treturn formview.write(self,record)\n\n\tdef modify(self,record):\n\t\treturn formview.modify(self,record)\n\n\tdef _checkUpdate(self,record):\n\t\treturn formview._checkUpdate(self,record)\n\t\t\n\tdef _getValues(self):\n\t\treturn formview._getValues(self)\n\n\tdef _setValues(self,record):\n\t\treturn formview._setValues(self,record)\n\n\nclass ListView(BaseViewList):\n\t\n\t_selections = {}\n\t_selectionsModel = {}\n\t\n\tdef load(self,viewname=None,window_action_id = None,loading = True):\n\t\treturn listview.load(self,viewname,window_action_id,loading)\n\n\tdef _load_data(self,fields=[],cond=[]):\n\t\treturn listview._load_data(self,fields,cond)\n\n\tdef _load_data_childs(self,oid,field):\n\t\treturn listview._load_data_childs(self,oid,field)\n\t\n\tdef on_click_do_action(self,button, user_data):\n\t\treturn listview.on_click_do_action(self,button, user_data)\n\t\n\tdef _delete_row(self,oid):\n\t\treturn listview._delete_row(self,oid)\n\n\tdef on_do_m2o_find(self,button,field,obj,path,text=''):\n\t\treturn listview.on_do_m2o_find(self,button,field,obj,path,text='')\n\n\tdef on_do_m2m_find(self,button,field,obj,path,text=''):\n\t\treturn listview.on_do_m2m_find(self,button,field,obj,path,text='')\n\n\tdef _getValues(self):\n\t\treturn listview._getValues(self)\n\n\tdef _setValues(self,record):\n\t\treturn listview._setValues(self,record)\n\nclass TreeView(BaseViewTree):\n\t_widget = None\n\t_parent = None\n\t_mode = 'r'\n\t_searchField = None\n\t_entries = {}\n\t_selections = {}\n\t_form = None\n\t\n\t# def load(self,info,view,available):\n\t\t# return treeview.load(self,info,view,available)\n\n\t# def _text_edited(self, widget, path, text, user_data):\n\t\t# return treeview._text_edited(self, widget, path, text, user_data)\n\n\t# def on_click_do_query(self,button):\n\t\t# return treeview.on_click_do_query(self,button)\n\n\t# def append(self,row):\n\t\t# return treeview.append(self,row)\n\n\t# def select(self,parent=None,cond = []):\n\t\t# return treeview.select(self,parent,cond)\n\t\n\t# def on_tree_selection_changed(self,selection):\n\t\t# return treeview.on_tree_selection_changed(self,selection)\n\n\t# def on_click_do_switch(self,button,user_data):\n\t\t# return treeview.on_click_do_switch(self,button,user_data)\n\n\t# def on_click_do_createRecord(self,button):\n\t\t# return treeview.on_click_do_createRecord(self,button)\n\t\t\n\t# def on_click_do_editRecord(self,button):\n\t\t# return treeview.on_click_do_editRecord(self,button)\n\n\t# def on_click_do_deleteRecord(self,button):\n\t\t# return treeview.on_click_do_deleteRecord(self,button)\n\nclass GanttView(BaseViewTree):\n\t_widget = None\n\t_parent = None\n\t_mode = 'r'\n\t_searchField = None\n\t_entries = {}\n\t_selections = {}\n\t_form = None\n\n\t# def load(self,info,view,available):\n\t\t# return ganttview.load(self,info,view,available)\n\n\t# def _text_edited(self, widget, path, text, user_data):\n\t\t# return ganttview._text_edited(self, widget, path, text, user_data)\n\n\t# def on_click_do_query(self,button):\n\t\t# return ganttview.on_click_do_query(self,button)\n\n\t# def append(self,row):\n\t\t# return ganttview.append(self,row)\n\n\t# def select(self,parent=None,cond = []):\n\t\t# return ganttview.select(self,parent,cond)\n\t\n\t# def on_tree_selection_changed(self,selection):\n\t\t# return ganttview.on_tree_selection_changed(self,selection)\n\n\t# def on_click_do_switch(self,button,user_data):\n\t\t# return ganttview.on_click_do_switch(self,button,user_data)\n\n\t# def on_click_do_createRecord(self,button):\n\t\t# return ganttview.on_click_do_createRecord(self,button)\n\t\t\n\t# def on_click_do_editRecord(self,button):\n\t\t# return ganttview.on_click_do_editRecord(self,button)\n\n\t# def on_click_do_deleteRecord(self,button):\n\t\t# return ganttview.on_click_do_deleteRecord(self,button)\n\nclass ChartView(object):\n\t_widget = None\n\t_parent = None\n\t_mode = 'r'\n\t_searchField = None\n\t_entries = {}\n\t_selections = {}\n\t_form = None\n\tdef __init__(self,parent = None, mode = 'r', searchField=None, domain = []):\n\t\tself._parent = parent\n\t\tself._mode = mode\n\t\tself._searchField = searchField\n\t\tself._selectedRows = []\n\t\tself._domain = domain\n\t\n\tdef load(self,info,view,available):\n\t\tself._info = info\n\t\tself._view = view\n\t\tself._model_columns = self._info['columns']\n\t\tself._view_columns = self._view['columns']\n\t\tif self._parent:\n\t\t\tself._widget = Gtk.Window(title=self._getTitleView())\n\t\tself._model = info['name']\n\t\tself._keyindex = {}\n\t\tself._fields = list(filter(lambda x: not self._model_columns[x]['type'] in ('one2many','many2many','text','binary','xml'),view['columns'].keys()))\n\t\tself._queryfields = list(filter(lambda x: 'selectable' in self._model_columns[x] and self._model_columns[x]['selectable'],self._model_columns.keys()))\n\t\tself._available = available\n\t\tself._selection_fields = list(filter(lambda x: self._model_columns[x]['type'] == 'selection',self._fields))\n\t\n\t\tself._box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n\t\n\t\tfor selection_field in self._selection_fields:\n\t\t\tself._selections[selection_field] = dict(self._model_columns[selection_field]['selections'])\n\t\t\n\t\tgrid = Gtk.Grid()\n\t\t\n\t\tfor queryfield in self._queryfields:\n\t\t\tself._entries[queryfield] = Gtk.Entry()\n\t\t\tself._entries[queryfield].set_editable(True)\n\t\t\tself._entries[queryfield].set_placeholder_text(self._model_columns[queryfield]['label'])\n\t\t\tgrid.add(self._entries[queryfield])\n\t\t\n\t\tbutton = Gtk.Button.new_with_label(\"Search\")\n\t\tbutton.connect(\"clicked\", self.on_click_do_query)\n\t\tgrid.add(button)\n \n\t\tself._box.pack_start(grid, False, True, 0)\n\t\tself._scrolledwindow = Gtk.ScrolledWindow( Gtk.Adjustment(value=350,lower=350,page_size=400) )\n\t\tself._box.pack_start(self._scrolledwindow, True, True, 0)\n\n\t\t#print('self._widget:',self._widget)\n\t\tif self._widget:\n\t\t\tself._widget.add(self._box)\n\t\t\tself._widget.show_all()\n\n\tdef on_click_do_query(self,button):\n\n\t\tcond = []\n\t\tif self._domain and len(self._domain) > 0:\n\t\t\tcond.extend(self._domain)\n\n\t\tfor key in self._entries.keys():\n\t\t\tvalue = self._entries[key].get_text()\n\t\t\tif len(value) > 0:\n\t\t\t\tcond.append((key,'like',value))\n\t\t\n\t\tself.select(cond=cond)\n\n\tdef draw(self):\n\t\tx = [5,8,10]\n\t\ty = [12,16,6]\n\t\t\n\t\tx2 = [6,9,11]\n\t\ty2 = [6,15,7]\n\t\t\n\t\tfig, ax = plt.subplots()\t\t\n\t\tcanvas = FigureCanvas(fig)\n\t\tcanvas.set_size_request(600,600)\n\t\tcanvas.set_parent(self._scrolledwindow)\n\t\t#self._scrolledwindow.add_with_viewport(canvas)\n\t\n\t\tax.bar(x, y, align='center')\n\t\t\n\t\tax.bar(x2, y2, color='y', align='center')\n\t\t\n\t\tax.plot()\n\t\t\n\t\tplt.title('Epic Info')\n\t\tplt.ylabel('Y axis')\n\t\tplt.xlabel('X axis')\n\n\t\t#print(dir(fig.canvas))\n\t\t#plt.draw()\n\t\tplt.show(block=False)\n\n\t\n\tdef select(self,cond = []):\n\t\trecords = self.message(['models',self._model,'select',{'fields':self._fields,'cond':cond,'context':{'FETCH':'LIST'}}])\n\t\treturn records\n\t\n\tdef _getTitleView(self):\n\t\td = self._info['description'].split(' ')\n\t\tif len(d) > 3:\n\t\t\treturn reduce(lambda x,y: x + ' ' + y,d[2:])\n\t\telif len(d) == 3:\n\t\t\treturn d[2]\n\t\telif len(d) == 2:\n\t\t\treturn reduce(lambda x,y: x + ' ' + y,d)\n\t\telif len(d) == 1:\n\t\t\treturn d[0]\n\n","sub_path":"client/gsrp5client/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":44396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"17048878","text":"#_Author_:Monkey\n#!/usr/bin/env python\n#-*- coding:utf-8 -*-\nfrom sklearn.metrics import auc\nimport numpy as np\nfrom numpy import * #python 中矩阵处理函数\nfrom pylab import *\n\ndef PlotRoc(Predict_array,train_array,test_array,TPR_Data,FPR_Data,PRE_Data):\n\t'''\n\t:param Predict_array:预测结果 \n\t:param train_array:训练数组\n\t:param test_array: 测试数组\n\t:param TPR_Data: 返回的结果\n\t:param FPR_Data: 返回的结果\n\t:param PRE_Data: 返回的结果\n\n\t:return: TPR、FPR、PRE\n\t'''\n\tPredict_array,test_array = asarray(Predict_array),asarray(test_array)\n\t#将预测结果中,训练集中1的位置将其概率置0\n\tfor i in range(Predict_array.shape[0]):\n\t\tfor j in range(Predict_array.shape[1]):\n\t\t\tif train_array[i][j] == 1:\n\t\t\t\tPredict_array[i][j] = -1\n\t#统计Prediction中每行0的个数\n\tzero_list = []\n\tfor i in range(Predict_array.shape[0]):\n\t\tzero_list.append( Predict_array[i].tolist().count(-1) )\n\n\tmaxValue = zero_list[ zero_list.index(max(zero_list)) ]\t\t#zero_list.index(max(zero_list))返回列表最大元素的ID\n\n\tfor i in range(test_array.shape[0]):#test_array.shape[0]\n\t\t# enumerate会将数组或列表组成一个索引序列\n\t\tsort_Data = sorted(enumerate(Predict_array[i]),key = lambda x:x[1],reverse = True)\n\n\t\ttotal_TPR = test_array[i].tolist().count(1) \t# 统计1的个数\n\t\tif total_TPR == 0:\n\n\t\t\tcontinue\n\t\ttotal_FPR = test_array[i].tolist().count(0)\n\t\t#print(i,sort_Data)\n\t\tTPR, FPR ,PRE = [], [],[]\n\t\tTPR_num,FPR_num = 0.0,0.0\n\t\tipre_a,ipre_b = 0,0\t\t# a 是分子,b 分母\n\t\tfor j in range(test_array.shape[1]-maxValue):\t\t#test_array.shape[1]\n\t\t\t# {先计算当前行的除1之外的个数 / 最终只要一行元素的个数(311/296)} * 当前第几个元素\n\t\t\tk = round( float( (test_array.shape[1]-zero_list[i]) / (test_array.shape[1] - maxValue) )*j )\n\n\t\t\tipre_b += 1\n\t\t\t#print(total_TPR + total_FPR)\n\t\t\tif total_TPR != 0 and test_array[i][ sort_Data[k][0] ] == 1:\n\t\t\t\tTPR_num += 1/total_TPR\n\t\t\t\tTPR.append(TPR_num)\n\t\t\t\tFPR.append(FPR_num)\n\t\t\t\tipre_a += 1\n\t\t\t\tPRE.append(ipre_a / ipre_b)\n\t\t\telse:\n\t\t\t\tFPR_num += 1/total_FPR\n\t\t\t\tTPR.append(TPR_num)\n\t\t\t\tFPR.append(FPR_num)\n\t\t\t\tPRE.append(ipre_a / ipre_b)\n\n\t\tTPR_Data.append(TPR)\n\t\tFPR_Data.append(FPR)\n\t\tPRE_Data.append(PRE)\n\n\n\treturn TPR_Data,FPR_Data,PRE_Data\n\n\ndef ROC(modelname):\n\tRD = np.load(\"MIX\\\\\" + modelname + \".npy\")\n\tTrainMatrix = np.load(\"FFCV_datas\\\\TrainGroup.npy\")[0]\n\tA = np.loadtxt(\"Datasets\\\\DiDrAMat.txt\")\n\tTEST = []\n\tfor i in range(5):\n\t\tTEST.append(np.load(\"FFCV_datas\\\\knownXY_Array\" + str(i) + \".npy\"))\n\n\n\tTestMatrix = zeros(RD.shape)\n\tfor xy in TEST[0]:\n\t\tTestMatrix[xy[0], xy[1]] = 1\n\n\tTPR, FPR, PR = PlotRoc(RD, TrainMatrix, TestMatrix, [], [], [])\n\n\ty_TPR, x_FPR, y_PR = [], [], []\n\ty_TPR.append(np.array(TPR).sum(axis=0) / np.array(TPR).shape[0])\n\tx_FPR.append(np.array(FPR).sum(axis=0) / np.array(FPR).shape[0])\n\ty_PR.append(np.array(PR).sum(axis=0) / np.array(PR).shape[0])\n\n\tplot(x_FPR[0], y_TPR[0], \"r\")\n\tprint('ROC_AUC:', auc(x_FPR[0], y_TPR[0]))\n\t# show()\n\n\tplot(y_TPR[0], y_PR[0], \"b\")\n\tprint('PR:', auc(y_TPR[0], y_PR[0]) + y_TPR[0][0] * y_PR[0][0])\n\tshow()\n\n\n# ROC(\"mixResult\")\n\n\n","sub_path":"201811/CNN/ROC_block.py","file_name":"ROC_block.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"479753076","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'LixinZhang'\nSITENAME = u'Backyard of LixinZhang'\nSITEURL = 'http://lixinzhang.github.io'\n\nTIMEZONE = 'Europe/Paris'\n\nDEFAULT_LANG = u'cn'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\n\nPYGMENTS_RST_OPTIONS = {'classprefix': 'pgcss', 'linenos': 'table'}\n#MD_EXTENSIONS = ['fenced_code', 'codehilite(css_class=highlight, linenums=True)', 'extra']\n\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('weibo', 'http://weibo.com/zhanglixin5566/'),\n ('Facebook', 'https://www.facebook.com/zhanglixin.peter'),)\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\nTHEME = './elegant'\nGITHUB_URL = 'https://github.com/LixinZhang'\nDISQUS_SITENAME = 'zhanglixinseu'\nOUTPUT_PATH = './'\n\nLANDING_PAGE_ABOUT = {'title':'Backyard of LixinZhang', 'details':'''\n北京,腾讯,微信,基础研究
\n本科在东南大学,研究生在北京航空航天大学。
\n\n目前的工作方向主要是推荐系统、搜索引擎,关注机器学习、数据挖掘、自然语言处理相关领域。
\n微信是我毕业第一份正式工作,毕业前实习过Yahoo!、阿里巴巴、创新工场。
\n\n\n折腾过好多技术方向,到现在也没折腾明白,还在探索中...\n
\nBalance work and life,希望自己能逐渐成为一个有趣的人。
\n\n '''}\n\nPROJECTS = [\n {'name':'Gmail',\n 'url':'mailto:zhanglixinseu@gmail.com',\n 'description': 'Feel free to reach me via Email'},\n {'name': 'Github',\n 'url': 'https://github.com/LixinZhang',\n 'description': '一些代码,放在上面托管'},\n {'name':'Facebook',\n 'url':'https://www.facebook.com/zhanglixin.peter',\n 'description': '联系在外国朋友的'},\n {'name':'Linkedin',\n 'url':'http://www.linkedin.com/pub/lixin-zhang/72/bb/66a',\n 'description': '欢迎互相关注'},\n {'name': '新浪微博',\n 'url': 'http://weibo.com/zhanglixin5566',\n 'description': '扯淡用的'},\n {'name':'知乎',\n 'url':'http://www.zhihu.com/people/zhanglixin',\n 'description': '学习各种知识'},\n {'name':'博客园',\n 'url':'http://www.cnblogs.com/coser',\n 'description': '之前的博客,很多是本科时候记录的东西'}\n ]\n","sub_path":"publish_tool/pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"392292579","text":"\"\"\"Support for Subaru door locks.\"\"\"\nimport logging\n\nfrom homeassistant.components.lock import DOMAIN as LOCK_DOMAIN, LockEntity\n\nfrom . import DOMAIN as SUBARU_DOMAIN\nfrom .const import (\n ENTRY_CONTROLLER,\n ENTRY_COORDINATOR,\n ENTRY_VEHICLES,\n VEHICLE_HAS_REMOTE_SERVICE,\n)\nfrom .entity import SubaruEntity\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n \"\"\"Set up the Subaru locks by config_entry.\"\"\"\n controller = hass.data[SUBARU_DOMAIN][config_entry.entry_id][ENTRY_CONTROLLER]\n coordinator = hass.data[SUBARU_DOMAIN][config_entry.entry_id][ENTRY_COORDINATOR]\n vehicle_info = hass.data[SUBARU_DOMAIN][config_entry.entry_id][ENTRY_VEHICLES]\n entities = []\n for vin in vehicle_info.keys():\n if vehicle_info[vin][VEHICLE_HAS_REMOTE_SERVICE]:\n entities.append(SubaruLock(vehicle_info[vin], coordinator, controller))\n async_add_entities(entities, True)\n\n\nclass SubaruLock(SubaruEntity, LockEntity):\n \"\"\"\n Representation of a Subaru door lock.\n\n Note that the Subaru API currently does not support returning the status of the locks. Therefore lock status is always unknown.\n \"\"\"\n\n def __init__(self, vehicle_info, coordinator, controller):\n \"\"\"Initialize the locks for the vehicle.\"\"\"\n super().__init__(vehicle_info, coordinator)\n self.title = \"Door Lock\"\n self.hass_type = LOCK_DOMAIN\n self.controller = controller\n\n async def async_lock(self, **kwargs):\n \"\"\"Send the lock command.\"\"\"\n _LOGGER.debug(\"Locking doors for: %s\", self.vin)\n await self.controller.lock(self.vin)\n\n async def async_unlock(self, **kwargs):\n \"\"\"Send the unlock command.\"\"\"\n _LOGGER.debug(\"Unlocking doors for: %s\", self.vin)\n await self.controller.unlock(self.vin)\n","sub_path":"custom_components/subaru/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"289785494","text":"from ..modeles.enums import UniteAngleEnum\n\nfrom src.calculs.modeles.entites_mathemathiques import \\\n Vecteur3D, \\\n TupleAnglesRotation, \\\n SpaceRechercheAnglesLimites, \\\n IntervalleLineaire, \\\n SystemeRepereSpherique\n\nfrom src.calculs.modeles.entites_systeme_minlight import \\\n DimensionsPave, \\\n Pave, \\\n ConfigurationAncrage, \\\n ConfigurationCable\n\nfrom src.calculs.simulation.angles_limites import VerificateurAnglesLimites\n\n'''\n Setup d'un faux systeme.\n Destiné juste à faire des debugs avec des valeurs plus simples.\n'''\n\n''' ************************ Maisonette ************************ '''\n# centre\ncentre_maisonette = \\\n Vecteur3D(\n x = 7500, # mm\n y = 5000, # mm\n z = 5000 # mm\n )\n\n# dimensions\ndimensions_maisonette = \\\n DimensionsPave(\n longueur = 5000, # mm\n largeur = 10000, # mm\n hauteur = 10000 # mm\n )\n\n# pave\nmaisonette = \\\n Pave(\n centre = centre_maisonette,\n ypr_angles = TupleAnglesRotation.ZERO(),\n dimensions = dimensions_maisonette\n)\n\n\n''' ************************ Source ************************ '''\n\n# dimensions\ndimensions_source = \\\n DimensionsPave(\n longueur = 1000, # mm\n largeur = 1000, # mm\n hauteur = 1000 # mm\n )\n\n\n''' ************************ Chambre ************************ '''\n\n# centre\ncentre_chambre = \\\n Vecteur3D(\n x = 5000, # mm\n y = 5000, # mm\n z = 5000 # mm\n )\n\n# dimensions\ndimensions_chambre = \\\n DimensionsPave(\n longueur = 10000, # mm\n largeur = 10000, # mm\n hauteur = 10000 # mm\n )\n\n# pavé\nchambre = \\\n Pave(\n centre = centre_chambre,\n ypr_angles = TupleAnglesRotation.ZERO(),\n dimensions = dimensions_chambre\n )\n\n\n''' ************************ Ancrage ************************ '''\n\n''' ****** Points Fixes ****** '''\n# toutes les ancrages sont supposées dans les coins de la chambre\n# donc, les coordonnées sont toujours, soit 0, soit la dimension de la chambre\n# sauf la longueur qui s'arrete juste au niveau de la maisonette\n\n# coordonnées d'ancrage\nx = 5000 # mm\ny = 10000 # mm\nz = 10000 # mm\n\n# la numérotation <> suit la logique des sommets des pavés\n# le <> indique à quel \"coin\" de la chambre le point est fixé\n\nPF_000 = Vecteur3D(0, 0, 0) # PF_000\nPF_100 = Vecteur3D(x, 0, 0) # PF_100\nPF_010 = Vecteur3D(0, y, 0) # PF_010\nPF_110 = Vecteur3D(x, y, 0) # PF_110\nPF_001 = Vecteur3D(0, 0, z) # PF_001\nPF_101 = Vecteur3D(x, 0, z) # PF_101\nPF_011 = Vecteur3D(0, y, z) # PF_011\nPF_111 = Vecteur3D(x, y, z) # PF_111\n\n\n''' ****** Configurations des Câbles ****** '''\n# la numérotation <> suit la logique des sommets des pavés\n# le <> indique à quel sommet le cable sera rataché DANS LA SOURCE\n\ncc_000 = ConfigurationCable(nom_sommet_source='S000', point_ancrage= PF_000) # cc_000\ncc_100 = ConfigurationCable(nom_sommet_source='S100', point_ancrage= PF_100) # cc_100\ncc_010 = ConfigurationCable(nom_sommet_source='S010', point_ancrage= PF_010) # cc_010\ncc_110 = ConfigurationCable(nom_sommet_source='S110', point_ancrage= PF_110) # cc_110\ncc_001 = ConfigurationCable(nom_sommet_source='S001', point_ancrage= PF_001) # cc_001\ncc_101 = ConfigurationCable(nom_sommet_source='S101', point_ancrage= PF_101) # cc_101\ncc_011 = ConfigurationCable(nom_sommet_source='S011', point_ancrage= PF_011) # cc_011\ncc_111 = ConfigurationCable(nom_sommet_source='S111', point_ancrage= PF_111) # cc_111\n\n\n''' ****** Configurations des Câbles ****** '''\nconfig_ancrage = ConfigurationAncrage(\n configs_cables=[cc_000, cc_100, cc_010, cc_110, cc_001, cc_101, cc_011, cc_111]\n)\n\n\n''' ************************ Systeme Spherique Baie Vitrée ************************ '''\n\n# centre - supposé dans le centre de la face d'intérêt de la maisonette\ncentre_systeme_spherique = \\\n Vecteur3D(\n x = 5000, # mm\n y = 5000, # mm\n z = 5000 # mm\n )\n\n# rotation\nrotation_systeme_spherique = \\\n TupleAnglesRotation(\n yaw = 180, # degrés\n pitch = 0, # degrés\n row = 0, # degrés\n unite = UniteAngleEnum.DEGRE,\n )\n\n# systeme sphérique\nsysteme_spherique_baie_vitree = SystemeRepereSpherique(\n centre = centre_systeme_spherique,\n ypr_angles = rotation_systeme_spherique\n)\n\n\n''' ************************ Configs Simulation ************************ '''\n\n# space de recherche\nspace_recherche = \\\n SpaceRechercheAnglesLimites(\n intervalle_rho = IntervalleLineaire(min= 1000, max= 1501, pas= 250), # mm\n intervalle_phi = IntervalleLineaire(min= 0, max= 180, pas= 6), # degres\n intervalle_theta = IntervalleLineaire(min= 0, max= 180, pas= 6), # degres\n unite = UniteAngleEnum.DEGRE\n)\n\n# diametre des câbles\ndiametre_cable = 10 # mm\n\n# discretisation des câbles\nn_discretisation_cables = 20 # point/câble\n\n# discretisation des cubes\nk_dicretisation_cubes = 3 # division/arête --> nb points/face = (k+1)^2\n\n# verbose\nverbose = True\n\n# dictionnaire de configs\nconfigs_simulation = {\n 'space_recherche' : space_recherche,\n 'diametre_cable' : diametre_cable,\n 'n_discretisation_cables' : n_discretisation_cables,\n 'k_dicretisation_cubes' : k_dicretisation_cubes,\n 'verbose' : verbose\n}\n\n\n''' ************************ VerificateurAnglesLimites ************************ '''\n\nverificateur = VerificateurAnglesLimites(\n dimensions_source = dimensions_source,\n maisonette = maisonette,\n chambre = chambre,\n config_ancrage = config_ancrage,\n systeme_spherique_baie_vitree = systeme_spherique_baie_vitree,\n configs_simulation = configs_simulation\n)\n\ndef __main__():\n print('faux a été importé')\n print(verificateur) # overwrite str function\n","sub_path":"src/calculs/setups/faux.py","file_name":"faux.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"180579302","text":"\"\"\"\nAuthor: angles\nDate and time: 01/10/16 - 21:48\n\"\"\"\n\nimport numpy as np\nfrom sklearn import decomposition\nimport matplotlib.pyplot as plt\n\nparams = {\n 'font.size': 11, # For the title\n 'axes.labelsize': 11,\n 'legend.fontsize': 11,\n 'xtick.labelsize': 10,\n 'ytick.labelsize': 10,\n 'text.usetex': False,\n 'figure.figsize': [18, 7.5]\n}\nplt.rcParams.update(params)\nnice_blue = '#006BB2'\n\nfig, axes = plt.subplots(2, 5, facecolor='white')\nfig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, hspace=0.3, wspace=0.5)\naxes = axes.ravel()\n\nlayers = [(1, 1), (2, 1), (3, 1), (3, 2), (3, 3), (3, 4), (4, 1), (4, 2), (4, 3), (4, 4)]\n\nidx_plot = 0\n\nfor (group, conv) in layers:\n file_name = '931_g' + str(group) + '_c' + str(conv)\n X = np.load('./sources/' + file_name + '.npy').T\n pca = decomposition.PCA()\n\n pca.fit(X)\n\n axes[idx_plot].plot(pca.explained_variance_, linewidth=2, color=nice_blue)\n title = 'Layer G{0}-C{1}'.format(group, conv)\n axes[idx_plot].set_title(title)\n axes[idx_plot].autoscale(tight=True)\n axes[idx_plot].set_frame_on(False)\n axes[idx_plot].grid(True)\n [y_bottom, y_top] = axes[idx_plot].get_ylim()\n axes[idx_plot].set_ylim([-0.1 * (y_top - y_bottom), y_top])\n [x_bottom, x_top] = axes[idx_plot].get_xlim()\n axes[idx_plot].set_xlim([x_bottom, x_top + 2])\n idx_plot += 1\n\nplt.show()\nfig.savefig('subplots.png')","sub_path":"subplots.py","file_name":"subplots.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"587137855","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n\npackages = find_packages()\nversion_str = '5.2.2'\n\nsetup(\n name = 'ConceptNet',\n version = version_str,\n description = 'A semantic network of general knowledge',\n author = \"Rob Speer, Commonsense Computing Group\",\n author_email = 'conceptnet@media.mit.edu',\n packages=packages,\n package_data={'conceptnet5': ['support_data/*']},\n install_requires=['metanl >= 1.0b2', 'assoc-space', 'xmltodict', 'pyyaml', 'flask'],\n license = 'GPLv3'\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"35136994","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom matplotlib import *\nfrom numpy import *\nimport matplotlib.pyplot as plt ##Import Libraries\nimport pylab\nfrom matplotlib.pyplot import figure \nimport math\n\ninfile=open('110mvpp_sep.txt', 'r')\ndata = []\nfor line in infile:\n data.append(line.strip().split())\n\ninfile.close()\n\ninfile=open('110_vsweep.txt', 'r')\nvol = []\nfor line in infile:\n vol.append(line.strip().split())\n\ninfile.close()\n\nfor line in data:\n for i in range(len(line)):\n line[i] = float(line[i])\n \nfor line in vol:\n for i in range(len(line)):\n line[i] = float(line[i])\n\nx = []\ny = []\nfor line in data:\n x.append(line[0])\n y.append(line[1])\n \ny = [i / 3.42 for i in y]\n \nvoly = []\nfor line in vol:\n voly.append(line[1])\n\n#ycorr = [y - b for y,b in zip(y,saty)]\n\nplt.figure(figsize=(50,50))\n \n \nfrom scipy.signal import find_peaks_cwt\ncb = np.array(y)\nindexes = find_peaks_cwt(cb, np.arange(1, 800))\nprint('Peaks are: %s' % (indexes))\nindex_to_x = [i * 0.000004 + 0.0044 - 0.008104 for i in indexes]\nprint('Times are: %s' % (index_to_x))\n\nfor xc in index_to_x:\n plt.axvline(x=xc)\n \nx = [i - 0.008104 for i in x]\nprint(voly[926] - voly[3042])\n\n\nx = x[600:3200]\ny = y[600:3200]\nvoly = voly[600:3200]\n\ndef airy(x_data,fine,lamb):\n return [1.0 / (1 + fine * (math.sin(math.pi * p / lamb)** 2)) for p in x_data]\npopt, pcov = curve_fit(airy,x,y,p0=[150,0.00832])\nprint(\"best_vals: {}\".format(popt))\n\n\n\n#plt.scatter(x,ycorr,label='doppler - sat')\nplt.scatter(x,y)\nplt.plot(x,airy(x,*popt))\nplt.scatter(x,voly)\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"separation_110vpp.py","file_name":"separation_110vpp.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"258351754","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: Backend.utils\n :platform: Unix, Windows\n.. moduleauthor:: Aki Mäkinen \n\n\"\"\"\n\n__author__ = 'Aki Mäkinen'\n\ns_codes = {\n \"OK\": 200,\n \"BAD\": 400,\n \"UNAUTH\": 401,\n \"FORBIDDEN\": 403,\n \"NOTFOUND\": 404,\n \"METHODNOTALLOWED\": 405,\n \"TEAPOT\": 418,\n \"INTERNALERROR\": 500\n}\n\n_geojson_feature_fields = {\n \"type\": {\n \"_self_\": (unicode(), True),\n \"_values_\": [\"Feature\"]\n },\n \"geometry\": {\n \"_self_\": (dict(), True),\n \"type\": {\n \"_self_\": (unicode(), True),\n \"_values_\": [\"Point\"]\n },\n \"coordinates\": {\n \"_self_\": (list(), True),\n \"_elements_\": float(),\n \"_elementcount_\": 2\n }\n },\n \"properties\": {\n \"_self_\": (dict(), True),\n \"metadata\": {\n \"_self_\": (dict(), False),\n \"status\": (unicode(), True),\n \"info\": (unicode(), True)\n }\n },\n \"id\": (unicode(), True)\n}\n\n_geojson_featurecollection_fields = {\n \"type\": (unicode(), True), # Field key hard coded in validation\n \"totalFeatures\": (int(), False),\n \"features\": {\n \"_self_\": (list(), True),\n \"_elements_\": _geojson_feature_fields\n } # Field key hard coded in validation\n}\n\ndef geo_json_scheme_validation(jsondict):\n \"\"\"\n A simple GeoJSON validator.\n\n Uses the GeoJSON definitions described in LBD JSON Formats document.\n JSON format is described as python dictionary, where the key specifies the name of a JSON field and\n value describes if the field/value is required and what is the type of the value. There are some special\n key values: _self_ (if the value is list or embedded document), _elements_ (if the value is a list, this describes\n the element type) and _elementcount_ (restricts how many elements list can have).\n\n .. note::\n\n This function is a if-else hell... and the JSON format document is outdated.\n\n :param jsondict: GeoJSON formatted Python dictionary containing either GeoJSON Feature or FeatureCollection.\n :return Boolean: True or False depending on the result of the validation\n \"\"\"\n if not isinstance(jsondict, dict):\n return False\n\n if \"type\" in jsondict:\n # Check that the given itemdict follows the given format.\n # Stops at the first error returning False\n def check_items(itemdict, itemformat):\n for key, value in itemformat.iteritems():\n if isinstance(value, tuple):\n if value[1] == True and key not in itemdict:\n return False\n elif key in itemdict:\n if not isinstance(itemdict[key], type(value[0])):\n return False\n elif key.lower() in [k.lower() for k in itemdict]:\n return False\n else:\n pass\n elif isinstance(value, dict):\n if value[\"_self_\"][1] == True and key not in itemdict:\n return False\n elif key in itemdict:\n if isinstance(value[\"_self_\"][0], list):\n if \"_elementcount_\" in value:\n if not len(itemdict[key]) == value[\"_elementcount_\"]:\n return False\n if isinstance(value[\"_elements_\"], dict):\n itemlist = itemdict[key]\n newitemformat = dict(value[\"_elements_\"])\n for item in itemlist:\n result = check_items(item, newitemformat)\n if not result:\n return False\n else:\n for listitem in itemdict[key]:\n if not isinstance(listitem, type(value[\"_elements_\"])):\n return False\n elif isinstance(value[\"_self_\"][0], dict):\n newitemdict = itemdict[key]\n newitemformat = dict(value)\n del newitemformat[\"_self_\"]\n result = check_items(newitemdict, newitemformat)\n if not result:\n return False\n else:\n if isinstance(itemdict[key], type(value[\"_self_\"][0])):\n if \"_values_\" in value:\n try:\n if itemdict[key].lower() not in [v.lower() for v in value[\"_values_\"]]:\n return False\n except AttributeError:\n if itemdict[key] not in value[\"_values_\"]:\n return False\n else:\n return False\n elif key in [k.lower() for k in itemdict]:\n return False\n else:\n pass\n else:\n return False\n return True\n if jsondict[\"type\"].lower() == \"featurecollection\":\n result = check_items(jsondict, _geojson_featurecollection_fields)\n elif jsondict[\"type\"].lower() == \"feature\":\n result = check_items(jsondict, _geojson_feature_fields)\n else:\n return False\n else:\n result = False\n\n return result\n\n\ndef flattener(dicti, parent):\n \"\"\"\n Dictionary flattener\n\n Flattens a dictionary and... Ok I don't remember what this is for.\n Creates once iterable list.\n\n :param dicti: Dictionary to be flattened\n :param parent: Parent element of the dictionary\n \"\"\"\n for k, v in dicti.iteritems():\n if isinstance(v, dict):\n if parent is None:\n father = k\n else:\n father = parent + \".\" + k\n for item in flattener(v, father):\n yield item\n else:\n if parent is not None:\n yield parent + \".\" + k\n else:\n yield k","sub_path":"backend/lbd_backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"218370099","text":"import sys, string, math\r\nfrom itertools import permutations,combinations\r\n\r\nn,k = input().split()\r\nn,k = int(n),int(k)\r\nLw = [ int(x) for x in input().split()]\r\nLv = [ int(x) for x in input().split()]\r\ndic1 = {}\r\nfor i in range(0,n) :\r\n if Lw[i] not in dic1 :\r\n dic1[Lw[i]] = [Lv[i]]\r\n else :\r\n dic1[Lw[i]].append(Lv[i])\r\n\r\nL3 = []\r\nfor i in range(1,n+1) :\r\n L2 = list(combinations(Lw,i))\r\n for x in L2 :\r\n if sum(x) <= k :\r\n L3.append(x)\r\n#print(L3)\r\nmax1 = 0\r\nfor x in L3 :\r\n sum1 = 0\r\n dic2 = {}\r\n for i in x :\r\n if i not in dic2 :\r\n dic2[i]= dic1[i][:]\r\n #print('dic2=',dic2)\r\n for i in x :\r\n if len(dic2[i]) == 1 :\r\n sum1 += dic2[i][0]\r\n else :\r\n a = max(dic2[i])\r\n sum1 += a\r\n dic2[i].remove(a)\r\n #print(x,sum1,dic2)\r\n if sum1 > max1 :\r\n max1 = sum1\r\nprint(max1)\r\n\r\n\r\n\r\n","sub_path":"Level-4/s03/guvi-L4-s03-py07.py","file_name":"guvi-L4-s03-py07.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"224890672","text":"import numpy as np\nfrom rf_transform import RFTransform\nfrom eig_weighting import EigenvectorWeighting\nfrom kmedoids import JKMedoids, SquishyJKMedoids\nfrom sklearn.pipeline import Pipeline\n\nclass RFCluster(Pipeline):\n def __init__(self, k,\n model_type='random_forest',\n kmedoids_type='normal',\n n_forests=150,\n n_trees=1,\n n_features_to_predict=0.5,\n max_depth=5, #should be 2 if model_type is boosting\n learning_rate=0.6,\n using_weights=False,\n using_pca=False,\n weight_extent=1, # 2 if model_type is boosting\n max_iter=60,\n n_attempts=10,\n weight_adjustment=0,\n eig_extent=0,\n n_jobs=1):\n rft = RFTransform(n_forests,\n model_type=model_type,\n n_trees=n_trees,\n n_features_to_predict=n_features_to_predict,\n max_depth=max_depth,\n outputting_weights=using_weights,\n using_pca=using_pca,\n weight_extent=weight_extent,\n learning_rate=learning_rate,\n n_jobs=n_jobs)\n ew = EigenvectorWeighting(extent=eig_extent)\n if kmedoids_type == 'normal':\n jk = JKMedoids(k,\n max_iter=max_iter,\n n_attempts=n_attempts,\n accepting_weights=using_weights,\n weight_adjustment=weight_adjustment,\n n_jobs=n_jobs)\n else:\n jk = SquishyJKMedoids(k,\n max_iter=max_iter,\n n_attempts=n_attempts,\n accepting_weights=using_weights,\n weight_adjustment=weight_adjustment,\n n_jobs=n_jobs)\n if eig_extent == 0 or not using_weights:\n Pipeline.__init__(self,[('rft', rft), ('jkmeans', jk)])\n else:\n Pipeline.__init__(self,[('rft', rft), ('ew', ew), ('jkmeans', jk)])\n","sub_path":"rf_cluster.py","file_name":"rf_cluster.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"208848979","text":"\n\nfrom xai.brain.wordbase.adjectives._varsity import _VARSITY\n\n#calss header\nclass _VARSITIES(_VARSITY, ):\n\tdef __init__(self,): \n\t\t_VARSITY.__init__(self)\n\t\tself.name = \"VARSITIES\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"varsity\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_varsities.py","file_name":"_varsities.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"89999971","text":"# По введённым сторонам треугольника определить,\n# существует ли такой треугольник, если я то выяснить:\n# является ли он равносторонним, равнобедренным или разносторонним.\n\nprint('Введите длины треугольника.')\n\na = int(input('Первая сторона = '))\nb = int(input('Вторая сторона = '))\nc = int(input('Третья сторона = '))\n\nif (a + b > c) and (b + c > a) and (c + a > b):\n if a == b == c:\n print('Треугольник равносторонний.')\n elif (a == b) or (b == c) or (c == a):\n print('Треугольник равнобедренный.')\n else:\n print('Треугольник разносторонний.')\nelse:\n print('Треугольник нельзя составить.')\n\n","sub_path":"homework_1/task_7.py","file_name":"task_7.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"472400979","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Copyright Kitware Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\nimport datetime\nimport json\nimport os\nimport unittest\n\nimport boto3\nfrom six import BytesIO\n\nfrom girder.constants import AccessType\nfrom girder.utility import parseTimestamp\nfrom girder.utility.ziputil import ZipGenerator\nfrom tests import base\n\nfrom .isic_base import IsicTestCase\n\n\ndef setUpModule():\n base.enabledPlugins.append('isic_archive')\n base.startServer()\n\n\ndef tearDownModule():\n base.stopServer()\n\n\nclass UploadTestCase(IsicTestCase):\n def setUp(self):\n super(UploadTestCase, self).setUp()\n\n # Set up girder_worker\n from girder.plugins import worker\n Setting = self.model('setting')\n Setting.set(\n worker.PluginSettings.BROKER,\n 'mongodb://localhost:27017/girder_worker')\n Setting.set(\n worker.PluginSettings.BACKEND,\n 'mongodb://localhost:27017/girder_worker')\n # TODO: change this to 'amqp://guest@127.0.0.1/' for RabbitMQ\n\n self.testDataDir = os.path.join(\n os.environ['GIRDER_TEST_DATA_PREFIX'], 'plugins', 'isic_archive')\n\n def _createReviewerUser(self):\n \"\"\"Create a reviewer user that will receive notification emails.\"\"\"\n Group = self.model('group')\n User = self.model('user', 'isic_archive')\n\n resp = self.request(path='/user', method='POST', params={\n 'email': 'reviewer-user@isic-archive.com',\n 'login': 'reviewer-user',\n 'firstName': 'reviewer',\n 'lastName': 'user',\n 'password': 'password'\n })\n self.assertStatusOk(resp)\n\n reviewerUser = User.findOne({'login': 'reviewer-user'})\n reviewersGroup = Group.findOne({'name': 'Dataset QC Reviewers'})\n Group.addUser(reviewersGroup, reviewerUser, level=AccessType.READ)\n\n return reviewerUser\n\n def _createUploaderUser(self):\n \"\"\"Create an uploader user.\"\"\"\n Group = self.model('group')\n User = self.model('user', 'isic_archive')\n\n resp = self.request(path='/user', method='POST', params={\n 'email': 'uploader-user@isic-archive.com',\n 'login': 'uploader-user',\n 'firstName': 'uploader',\n 'lastName': 'user',\n 'password': 'password'\n })\n self.assertStatusOk(resp)\n\n uploaderUser = User.findOne({'login': 'uploader-user'})\n contributorsGroup = Group.findOne({'name': 'Dataset Contributors'})\n Group.addUser(contributorsGroup, uploaderUser, level=AccessType.READ)\n\n return uploaderUser\n\n def _createSiteAdminUser(self):\n \"\"\"Create a site admin user.\"\"\"\n User = self.model('user', 'isic_archive')\n params = {\n 'email': 'admin-user@isic-archive.com',\n 'login': 'admin-user',\n 'firstName': 'admin',\n 'lastName': 'user',\n 'password': 'password',\n 'admin': True\n }\n return User.createUser(**params)\n\n def _createZipFile(self, zipName, zipContentNames):\n \"\"\"\n Create a zip file of images.\n\n Returns (stream, size).\n \"\"\"\n zipStream = BytesIO()\n zipGen = ZipGenerator(zipName)\n for fileName in zipContentNames:\n with open(os.path.join(self.testDataDir, fileName), 'rb') as fileObj:\n for data in zipGen.addFile(lambda: fileObj, fileName):\n zipStream.write(data)\n zipStream.write(zipGen.footer())\n # Seek to the end of the stream\n zipStream.seek(0, 2)\n zipSize = zipStream.tell()\n zipStream.seek(0)\n return zipStream, zipSize\n\n def _uploadDataset(self, uploaderUser, zipName, zipContentNames,\n datasetName, datasetDescription):\n Dataset = self.model('dataset', 'isic_archive')\n Folder = self.model('folder')\n Upload = self.model('upload')\n\n # Create a ZIP file of images\n zipStream, zipSize = self._createZipFile(zipName, zipContentNames)\n\n # Create new folders in the uploader user's home\n resp = self.request(\n path='/folder', method='POST', user=uploaderUser, params={\n 'parentType': 'user',\n 'parentId': str(uploaderUser['_id']),\n 'name': '%s_upload_folder' % zipName\n })\n self.assertStatusOk(resp)\n uploadZipFolder = Folder.load(resp.json['_id'], force=True)\n\n # Uploading files is complicated via REST, so upload the ZIP via models\n # No special behavior should be attached to uploading a plain ZIP file\n zipFile = Upload.uploadFromFile(\n obj=zipStream,\n size=zipSize,\n name='%s.zip' % zipName,\n parentType='folder',\n parent=uploadZipFolder,\n user=uploaderUser,\n mimeType='application/zip'\n )\n\n resp = self.request(\n path='/dataset', method='POST', user=uploaderUser, params={\n 'name': datasetName,\n 'description': datasetDescription,\n 'license': 'CC-0',\n 'attribution': 'Test Organization',\n 'owner': 'Test Organization'\n })\n self.assertStatusOk(resp)\n dataset = Dataset.findOne({'name': datasetName})\n self.assertIsNotNone(dataset)\n self.assertEqual(str(dataset['_id']), resp.json['_id'])\n\n self.assertNoMail()\n resp = self.request(\n path='/dataset/%s/zipBatch' % dataset['_id'], method='POST', user=uploaderUser, params={\n 'zipFileId': str(zipFile['_id']),\n 'signature': 'Test Uploader'\n })\n self.assertStatusOk(resp)\n # Uploader user and reviewer user should receive emails\n self.assertMails(count=2)\n\n return dataset\n\n def testUploadDataset(self):\n User = self.model('user', 'isic_archive')\n\n # Create users\n reviewerUser = self._createReviewerUser()\n uploaderUser = self._createUploaderUser()\n adminUser = self._createSiteAdminUser()\n\n # Create and upload two ZIP files of images\n publicDataset = self._uploadDataset(\n uploaderUser=uploaderUser,\n zipName='test_zip_1',\n zipContentNames=['test_1_small_1.jpg', 'test_1_small_2.jpg',\n 'test_1_large_1.jpg'],\n datasetName='test_dataset_1',\n datasetDescription='A public test dataset'\n )\n privateDataset = self._uploadDataset(\n uploaderUser=uploaderUser,\n zipName='test_zip_2',\n zipContentNames=['test_1_small_3.jpg', 'test_1_large_2.jpg'],\n datasetName='test_dataset_2',\n datasetDescription='A private test dataset'\n )\n\n # Ensure that ordinary users aren't getting review tasks\n resp = self.request(\n path='/task/me/review', method='GET')\n self.assertStatus(resp, 401)\n resp = self.request(\n path='/task/me/review', method='GET', user=uploaderUser)\n self.assertStatus(resp, 403)\n\n # Ensure that reviewer users are getting tasks\n resp = self.request(\n path='/task/me/review', method='GET', user=reviewerUser)\n self.assertStatusOk(resp)\n reviewTasks = resp.json\n self.assertEqual(len(reviewTasks), 2)\n self.assertIn({\n 'dataset': {\n '_id': str(publicDataset['_id']),\n 'name': publicDataset['name']},\n 'count': 3\n }, reviewTasks)\n self.assertIn({\n 'dataset': {\n '_id': str(privateDataset['_id']),\n 'name': privateDataset['name']},\n 'count': 2\n }, reviewTasks)\n\n # Ensure that review task redirects are working\n resp = self.request(\n path='/task/me/review/redirect', method='GET', user=reviewerUser)\n self.assertStatus(resp, 400)\n for reviewTask in reviewTasks:\n reviewId = reviewTask['dataset']['_id']\n resp = self.request(\n path='/task/me/review/redirect', method='GET',\n params={'datasetId': reviewId}, user=reviewerUser, isJson=False)\n self.assertStatus(resp, 307)\n self.assertDictContainsSubset({\n 'Location': '/#tasks/review/%s' % reviewId\n }, resp.headers)\n\n # Accept all images\n resp = self.request(\n path='/dataset/%s/review' % publicDataset['_id'], method='GET', user=reviewerUser)\n self.assertStatusOk(resp)\n self.assertEqual(len(resp.json), 3)\n imageIds = [image['_id'] for image in resp.json]\n resp = self.request(\n path='/dataset/%s/review' % publicDataset['_id'], method='POST', user=reviewerUser,\n params={\n 'accepted': json.dumps(imageIds),\n 'flagged': []\n })\n self.assertStatusOk(resp)\n\n # Attempt to register metadata as invalid users\n csvPath = os.path.join(self.testDataDir, 'test_1_metadata.csv')\n with open(csvPath, 'rb') as csvStream:\n resp = self.request(\n path='/dataset/%s/metadata' % publicDataset['_id'], method='POST',\n body=csvStream.read(), type='text/csv',\n params={\n 'filename': 'test_1_metadata.csv'\n })\n self.assertStatus(resp, 401)\n\n # Attempt to register metadata with invalid parameters\n with open(csvPath, 'rb') as csvStream:\n resp = self.request(\n path='/dataset/%s/metadata' % publicDataset['_id'], method='POST',\n body=csvStream.read(), type='text/csv',\n user=uploaderUser)\n self.assertStatus(resp, 400)\n self.assertIn('\"filename\" is required', resp.json['message'].lower())\n with open(csvPath, 'rb') as csvStream:\n resp = self.request(\n path='/dataset/%s/metadata' % publicDataset['_id'], method='POST',\n body=csvStream.read(), type='text/csv',\n user=uploaderUser, params={\n 'filename': ' '\n })\n self.assertStatus(resp, 400)\n self.assertIn('filename must be specified', resp.json['message'].lower())\n\n # Attempt to list registered metadata as invalid users\n resp = self.request(\n path='/dataset/%s/metadata' % publicDataset['_id'], method='GET')\n self.assertStatus(resp, 401)\n resp = self.request(\n path='/dataset/%s/metadata' % publicDataset['_id'], method='GET',\n user=uploaderUser)\n self.assertStatus(resp, 403)\n\n # List (empty) registered metadata\n resp = self.request(\n path='/dataset/%s/metadata' % publicDataset['_id'], method='GET',\n user=reviewerUser)\n self.assertStatusOk(resp)\n self.assertEqual(resp.json, [])\n\n # Register metadata with dataset\n self.assertNoMail()\n with open(csvPath, 'rb') as csvStream:\n resp = self.request(\n path='/dataset/%s/metadata' % publicDataset['_id'], method='POST',\n body=csvStream.read(), type='text/csv', isJson=False,\n user=uploaderUser, params={\n 'filename': 'test_1_metadata.csv'\n })\n self.assertStatusOk(resp)\n # Reviewer user should receive email\n self.assertMails(count=1)\n\n # List registered metadata\n resp = self.request(\n path='/dataset/%s/metadata' % publicDataset['_id'],\n user=reviewerUser)\n self.assertStatusOk(resp)\n self.assertIsInstance(resp.json, list)\n self.assertEqual(len(resp.json), 1)\n\n # Check file field\n self.assertIn('file', resp.json[0])\n self.assertIn('_id', resp.json[0]['file'])\n self.assertIn('name', resp.json[0]['file'])\n self.assertEqual('test_1_metadata.csv', resp.json[0]['file']['name'])\n self.assertIn('user', resp.json[0])\n\n # Check user field\n self.assertDictEqual({\n '_id': str(uploaderUser['_id']),\n 'name': User.obfuscatedName(uploaderUser)\n }, resp.json[0]['user'])\n\n # Check time field\n self.assertIn('time', resp.json[0])\n self.assertLess(parseTimestamp(resp.json[0]['time']),\n datetime.datetime.utcnow())\n metadataFileId = resp.json[0]['file']['_id']\n\n # Test downloading metadata as invalid users\n resp = self.request(\n path='/dataset/%s/metadata/%s/download' % (publicDataset['_id'], metadataFileId),\n method='GET', isJson=False)\n self.assertStatus(resp, 401)\n resp = self.request(\n path='/dataset/%s/metadata/%s/download' % (publicDataset['_id'], metadataFileId),\n method='GET', user=uploaderUser, isJson=False)\n self.assertStatus(resp, 403)\n\n # Test downloading metadata\n resp = self.request(\n path='/dataset/%s/metadata/%s/download' % (publicDataset['_id'], metadataFileId),\n method='GET', user=reviewerUser, isJson=False)\n with open(csvPath, 'rb') as csvStream:\n self.assertEqual(csvStream.read(), self.getBody(resp))\n\n # Test applying metadata\n resp = self.request(\n path='/dataset/%s/metadata/%s/apply' % (publicDataset['_id'], metadataFileId),\n method='POST', user=uploaderUser, params={\n 'save': False\n })\n self.assertStatus(resp, 403)\n resp = self.request(\n path='/dataset/%s/metadata/%s/apply' % (publicDataset['_id'], metadataFileId),\n method='POST', user=reviewerUser, params={\n 'save': False\n })\n self.assertStatusOk(resp)\n self.assertIn('errors', resp.json)\n self.assertIn('warnings', resp.json)\n self.assertEqual(0, len(resp.json['errors']))\n self.assertEqual(\n resp.json['warnings'], [\n {'description':\n 'on CSV row 4: no images found that match u\\'filename\\': u\\'test_1_small_3.jpg\\''},\n {'description':\n 'on CSV row 6: no images found that match u\\'filename\\': u\\'test_1_large_2.jpg\\''},\n {'description':\n 'unrecognized field u\\'age_approx\\' will be added to unstructured metadata'},\n {'description':\n 'unrecognized field u\\'isic_source_name\\' will be added to unstructured metadata'}\n ])\n\n # Test removing metadata as site admin\n resp = self.request(\n path='/dataset/%s/metadata/%s' % (publicDataset['_id'], metadataFileId),\n method='DELETE', user=adminUser, isJson=False)\n self.assertStatus(resp, 204)\n resp = self.request(\n path='/dataset/%s/metadata' % publicDataset['_id'],\n user=reviewerUser)\n self.assertStatusOk(resp)\n self.assertIsInstance(resp.json, list)\n self.assertEqual(len(resp.json), 0)\n\n def testUploadImages(self):\n \"\"\"\n Test the single image upload lifecycle.\n\n Test creating dataset, uploading images to the dataset individually, and applying metadata\n to an uploading image.\n \"\"\"\n # Create users\n reviewerUser = self._createReviewerUser()\n uploaderUser = self._createUploaderUser()\n\n # Create a dataset\n resp = self.request(path='/dataset', method='POST', user=uploaderUser, params={\n 'name': 'test_dataset_1',\n 'description': 'A public test dataset',\n 'license': 'CC-0',\n 'attribution': 'Test Organization',\n 'owner': 'Test Organization'\n })\n self.assertStatusOk(resp)\n dataset = resp.json\n\n # Add images to the dataset\n for imageName in ['test_1_small_1.jpg', 'test_1_large_1.jpg']:\n with open(os.path.join(self.testDataDir, imageName), 'rb') as fileObj:\n fileData = fileObj.read()\n\n resp = self.request(\n path='/dataset/%s/image' % dataset['_id'], method='POST', user=uploaderUser,\n body=fileData, type='image/jpeg', isJson=False,\n params={\n 'filename': imageName,\n 'signature': 'Test Uploader'\n })\n self.assertStatusOk(resp)\n\n # Accept all images\n resp = self.request(\n path='/dataset/%s/review' % dataset['_id'], method='GET', user=reviewerUser)\n self.assertStatusOk(resp)\n self.assertEqual(2, len(resp.json))\n imageIds = [image['_id'] for image in resp.json]\n resp = self.request(\n path='/dataset/%s/review' % dataset['_id'], method='POST', user=reviewerUser,\n params={\n 'accepted': json.dumps(imageIds),\n 'flagged': []\n })\n self.assertStatusOk(resp)\n\n # Check number of images in dataset\n resp = self.request(path='/dataset/%s' % dataset['_id'], user=uploaderUser)\n self.assertStatusOk(resp)\n dataset = resp.json\n self.assertEqual(2, dataset['count'])\n\n # Add metadata to images\n resp = self.request(path='/image', user=uploaderUser, params={\n 'datasetId': dataset['_id']\n })\n self.assertStatusOk(resp)\n self.assertEqual(2, len(resp.json))\n image = resp.json[0]\n\n metadata = {\n 'diagnosis': 'melanoma',\n 'benign_malignant': 'benign'\n }\n resp = self.request(\n path='/image/%s/metadata' % image['_id'], method='POST',\n user=uploaderUser, body=json.dumps(metadata), type='application/json', params={\n 'save': False\n })\n self.assertStatusOk(resp)\n self.assertIn('errors', resp.json)\n self.assertIn('warnings', resp.json)\n self.assertEqual(1, len(resp.json['errors']))\n self.assertEqual([], resp.json['warnings'])\n\n metadata = {\n 'diagnosis': 'melanoma',\n 'benign_malignant': 'malignant',\n 'diagnosis_confirm_type': 'histopathology',\n 'custom_id': '111-222-3334'\n }\n resp = self.request(\n path='/image/%s/metadata' % image['_id'], method='POST',\n user=uploaderUser, body=json.dumps(metadata), type='application/json', params={\n 'save': True\n })\n self.assertStatusOk(resp)\n self.assertIn('errors', resp.json)\n self.assertIn('warnings', resp.json)\n self.assertEqual([], resp.json['errors'])\n self.assertEqual(1, len(resp.json['warnings']))\n\n # Verify that metadata exists on image\n resp = self.request(path='/image/%s' % image['_id'], user=uploaderUser)\n self.assertStatusOk(resp)\n self.assertEqual('melanoma', resp.json['meta']['clinical']['diagnosis'])\n self.assertEqual('malignant', resp.json['meta']['clinical']['benign_malignant'])\n self.assertEqual('histopathology', resp.json['meta']['clinical']['diagnosis_confirm_type'])\n self.assertEqual('111-222-3334', resp.json['meta']['unstructured']['custom_id'])\n\n @unittest.skip('Test must be configured and run manually.')\n def testZipUploadToS3(self):\n \"\"\"\n Test uploading a ZIP file of images directly to S3 and adding them to a dataset.\n\n Note that Moto, the library to mock Boto calls, currently ignores AWS credentials;\n all calls will succeed. Therefore, this test is intended to be run manually against\n real AWS resources.\n \"\"\"\n Image = self.model('image', 'isic_archive')\n Setting = self.model('setting')\n\n # Create user\n user = self._createUploaderUser()\n\n # Read settings from environment variables\n if not all(key in os.environ for key in [\n 'ISIC_UPLOAD_ROLE_ARN',\n 'ISIC_UPLOAD_BUCKET_NAME'\n ]):\n self.fail('Test requires environment variables for AWS configuration to be set.')\n Setting.set('isic.upload_role_arn',\n os.environ['ISIC_UPLOAD_ROLE_ARN'])\n Setting.set('isic.upload_bucket_name',\n os.environ['ISIC_UPLOAD_BUCKET_NAME'])\n\n # Create a dataset\n datasetName = 'test_dataset_1'\n resp = self.request(\n path='/dataset', method='POST', user=user, params={\n 'name': datasetName,\n 'description': 'A public test dataset',\n 'license': 'CC-0',\n 'attribution': 'Test Organization',\n 'owner': 'Test Organization'\n })\n self.assertStatusOk(resp)\n dataset = resp.json\n\n #\n # Initiate and finalize a direct-to-s3 upload of a ZIP file of images.\n #\n\n # Initiate upload\n resp = self.request(\n path='/dataset/%s/zip' % dataset['_id'], method='POST', user=user, params={\n 'signature': 'Test Uploader'\n })\n self.assertStatusOk(resp)\n self.assertHasKeys(resp.json, ['accessKeyId', 'secretAccessKey', 'sessionToken',\n 'bucketName', 'objectKey', 'batchId'])\n accessKeyId = resp.json['accessKeyId']\n secretAccessKey = resp.json['secretAccessKey']\n sessionToken = resp.json['sessionToken']\n bucketName = resp.json['bucketName']\n objectKey = resp.json['objectKey']\n batchId = resp.json['batchId']\n\n # Upload ZIP file to S3\n zipName = 'test_zip_1'\n zipStream, zipSize = self._createZipFile(\n zipName=zipName, zipContentNames=['test_1_small_1.jpg', 'test_1_small_2.jpg'])\n s3 = boto3.client(\n 's3',\n aws_access_key_id=accessKeyId,\n aws_secret_access_key=secretAccessKey,\n aws_session_token=sessionToken)\n s3.upload_fileobj(\n Fileobj=zipStream,\n Bucket=bucketName,\n Key=objectKey)\n\n # Finalize upload\n self.assertEqual(0, Image.find().count())\n resp = self.request(\n path='/dataset/%s/zip/%s' % (dataset['_id'], batchId),\n method='POST', user=user)\n self.assertStatusOk(resp)\n self.assertEqual(2, Image.find().count())\n\n #\n # Initiate and cancel a direct-to-s3 upload of a ZIP file of images, without uploading\n # the file.\n #\n\n # Initiate upload\n resp = self.request(\n path='/dataset/%s/zip' % dataset['_id'], method='POST', user=user, params={\n 'signature': 'Test Uploader'\n })\n self.assertStatusOk(resp)\n self.assertHasKeys(resp.json, ['accessKeyId', 'secretAccessKey', 'sessionToken',\n 'bucketName', 'objectKey', 'batchId'])\n accessKeyId = resp.json['accessKeyId']\n secretAccessKey = resp.json['secretAccessKey']\n sessionToken = resp.json['sessionToken']\n bucketName = resp.json['bucketName']\n objectKey = resp.json['objectKey']\n batchId = resp.json['batchId']\n\n # Don't upload file\n\n # Cancel upload\n resp = self.request(\n path='/dataset/%s/zip/%s' % (dataset['_id'], batchId), method='DELETE', user=user)\n self.assertStatusOk(resp)\n\n #\n # Initiate and cancel a direct-to-s3 upload of a ZIP file of images.\n #\n\n # Initiate upload\n resp = self.request(\n path='/dataset/%s/zip' % dataset['_id'], method='POST', user=user, params={\n 'signature': 'Test Uploader'\n })\n self.assertStatusOk(resp)\n self.assertHasKeys(resp.json, ['accessKeyId', 'secretAccessKey', 'sessionToken',\n 'bucketName', 'objectKey', 'batchId'])\n accessKeyId = resp.json['accessKeyId']\n secretAccessKey = resp.json['secretAccessKey']\n sessionToken = resp.json['sessionToken']\n bucketName = resp.json['bucketName']\n objectKey = resp.json['objectKey']\n batchId = resp.json['batchId']\n\n # Upload ZIP file to S3\n zipName = 'test_zip_1'\n zipStream, zipSize = self._createZipFile(\n zipName=zipName, zipContentNames=['test_1_small_1.jpg', 'test_1_small_2.jpg'])\n s3 = boto3.client(\n 's3',\n aws_access_key_id=accessKeyId,\n aws_secret_access_key=secretAccessKey,\n aws_session_token=sessionToken)\n s3.upload_fileobj(\n Fileobj=zipStream,\n Bucket=bucketName,\n Key=objectKey)\n\n # Cancel upload\n resp = self.request(\n path='/dataset/%s/zip/%s' % (dataset['_id'], batchId), method='DELETE', user=user)\n self.assertStatusOk(resp)\n","sub_path":"plugin_tests/upload_test.py","file_name":"upload_test.py","file_ext":"py","file_size_in_byte":25679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"483349899","text":"from django.conf.urls import url, include\nfrom rest_framework_extensions.routers import ExtendedDefaultRouter\nfrom . import views\n\nrouter = ExtendedDefaultRouter()\n\n(\n router.register(r'queues', views.QueueViewSet, base_name='queue')\n .register(r'tasks',\n views.TaskViewSet,\n base_name='queue-task',\n parents_query_lookups=['queue_id'])\n)\n(\n router.register(r'tasks', views.TaskViewSet, base_name='task')\n .register(r'comments',\n views.TaskCommentViewSet,\n base_name='task-comment',\n parents_query_lookups=['task_id'])\n)\nrouter.register(r'comments', views.TaskCommentViewSet, base_name='task-comment')\nrouter.register(r'callbacks', views.CallbackViewSet, base_name='callback')\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n]\n","sub_path":"web_plant/calls/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"639027248","text":"import telebot\nimport os\nfrom telebot import types\n\nMainPath = r\"C:\\Users\\123\\PycharmProjects\\FindVk\\users\"\n\n# def saveData(path, data, type):\n# with open()\n\ndef checText(text):\n try:\n return {\n \"привет\" : \"соси хуй\"\n }[text]\n except: return \"атсаси\"\n\n#передаем наш токен бота\nbot = telebot.TeleBot(\"1496934915:AAFEMM4mxh3SyYO6hazmq2WK2ORQxcyT3vA\")\n\n#commands обрабатывает команды, которые передает пользователь\n@bot.message_handler(commands=[\"start\"])\ndef start(message):\n src = MainPath + f\"\\\\{message.from_user.first_name}_{message.chat.id}\"\n if os.path.exists(src) == False:\n os.mkdir(src)\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)\n btn1 = types.KeyboardButton(\"найти по фотке\")\n markup.add(btn1)\n\n #massege передает декоратор\n sendMessage = f\"привет {message.from_user.first_name }!\"\n bot.send_message(message.chat.id, sendMessage, reply_markup=markup)\n bot.send_message(message.chat.id, \"Что хочешь сделать?\")\n\n@bot.message_handler(content_types=[\"text\"])\ndef findUser(message):\n if message.text.strip().lower() == \"найти по фотке\":\n bot.send_message(message.chat.id, \"нажми 'поделиться контактом'\")\n\n@bot.message_handler(content_types=['contact'])\ndef contact(message):\n if message.contact is not None: #Если присланный объект contact не равен нулю\n phoneNumber = message.contact.phone_number\n src = MainPath + f\"\\\\{message.from_user.first_name}_{message.chat.id}\\\\data.json\"\n with open(src, \"w\") as data:\n data.write(\"phoneNuber:\" + phoneNumber)\n\n\n@bot.message_handler(content_types=[\"document\"])\ndef handle_docs_photo(message):\n try:\n bot.reply_to(message, \"спасибо\")\n\n file_info = bot.get_file(message.document.file_id)\n downloaded_file = bot.download_file(file_info.file_path)\n\n src = MainPath + f\"\\\\{message.from_user.first_name}_{message.chat.id}\" + \"\\\\\" + str(message.document.file_id) + \".jpg\"\n with open(src, \"wb\") as new_file:\n new_file.write(downloaded_file)\n\n except Exception as e:\n bot.reply_to(message, e)\n\n@bot.message_handler(content_types=[\"text\"])\ndef mess(message):\n getMessage = message.text.strip().lower()\n try:\n\n src = MainPath + f\"\\\\{message.from_user.first_name}_{message.chat.id}\\\\messages.txt\"\n with open(src, \"a\") as savedMessage:\n savedMessage.write(getMessage + \"\\n\")\n outputMessage = checText(getMessage)\n bot.send_message(message.chat.id, outputMessage)\n except UnicodeEncodeError:\n pass\n\n#этот метод нужен что бы бот не отключался\nbot.polling(none_stop=True)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"214248803","text":"def allLongestStrings(array):\n\n answer = [array[0]]\n for i in range(1, len(array)):\n #DB 1#for i in range(len(array)):\n if len(array[i]) == len(answer[0]):\n #DB 2#if array[i] == answer[0]:\n #DB 3#if len(array[i]) == len(answer):\n answer.append(array[i])\n #DB 4#answer = [array[i]]\n #RV 1#answer.append( ... )\n if len(array[i]) > len(answer[0]):\n answer = [array[i]]\n #DB 5#answer.append(array[i])\n #RV 2#answer = ... \n return answer\n #DB 6#return array\n","sub_path":"allLongestStrings/allLongestStrings.py","file_name":"allLongestStrings.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"548678030","text":"import sys\nimport prosper\nimport numpy as np\nfrom scipy.stats import truncnorm\n\nfrom utils.Cards_loader import Cards_loader\nfrom utils.Time_Surface_generators import Time_Surface_all, Time_Surface_event\n\nfrom prosper.em import EM\nfrom prosper.em.annealing import LinearAnnealing\nfrom prosper.em.camodels.bsc_et import BSC_ET\nfrom prosper.em.camodels.dbsc_et import DBSC_ET\nfrom prosper.utils import create_output_path\nfrom prosper.utils.datalog import dlog, StoreToH5, TextPrinter, StoreToTxt\nimport tables as tb\nfrom mpi4py import MPI\n\nimport os, sys\n\ncomm = MPI.COMM_WORLD\n\nnprocs = comm.size\nprint(\"running {} parallel processes\".format(nprocs))\n\n# np.set_printoptions(threshold=sys.maxsize)\noutput_path = create_output_path()\n\n# PARAMETERS ####\n\nlearning = True # Decide whether to run the sparse coding algorithm\nclassification = True # Run classification\n\nts_size = 13 # size of the time surfaces\n# size of the pip cards (square so dimension D = rec_size * rec_size)\nrec_size = 35\ntau = 5000 # time constant for the construction of time surfaces\n# number of polarities that we will use in the dataset (1 because polarities are not informative in the cards dataset)\npolarities = 1\n\n# IMPORTING DATASET ####\ndtr = None\ndte = None\nprint(\"rank: \", comm.rank)\n\nto_scatter_train = None\nto_scatter_test = None\nif comm.rank == 0:\n fh = tb.open_file(\"../datasets/pokerDVS.h5\")\n dtr = [d.read() for d in fh.root.train]\n dte = [d.read() for d in fh.root.test]\n\n to_scatter_train = [dtr[i::nprocs] for i in range(nprocs)]\n to_scatter_test = [dte[i::nprocs] for i in range(nprocs)]\n\ndtr = comm.scatter(to_scatter_train)\ndte = comm.scatter(to_scatter_test)\nprint(\"rank: \", comm.rank, len(dtr), len(dte))\nsys.stdout.flush()\n# number_of_samples = sum(sizes_of_train_samples)\n\n\n# idx = 0\nts = []\ntrain_labels = []\ntrain_rec_sizes = []\nfor recording in range(len(dtr)):\n for k in range(dtr[recording].shape[0]):\n single_event = [dtr[recording][k, 0].astype(np.int),\n dtr[recording][k, 1:3].astype(np.int)]\n dataset = [dtr[recording][:, 0].astype(np.int),\n dtr[recording][:, 1:3].astype(np.int),\n dtr[recording][:, 3].astype(np.int)]\n\n time_surface = Time_Surface_event(xdim=ts_size,\n ydim=ts_size,\n event=single_event,\n timecoeff=tau,\n dataset=dataset,\n num_polarities=polarities,\n verbose=False)\n ts.append(time_surface)\n train_labels.append(int(dtr[recording][k, -1]))\n # idx += 1\n train_rec_sizes.append(dtr[recording].shape[0])\nts = np.array(ts)\nts = ts.reshape((ts.shape[0], -1))\ntrain_labels = np.array(train_labels)\n# ts_res = ts.shape[0] % comm.size\n# ts = ts[:-ts_res]\n# train_labels = train_labels[:-ts_res]\n# print(len(train_labels))\n\nts_test = []\ntest_labels = []\ntest_rec_sizes = []\nfor recording in range(len(dte)):\n for k in range(dte[recording].shape[0]):\n\n single_event = [dte[recording][k, 0].astype(np.int),\n dte[recording][k, 1:3].astype(np.int)]\n dataset = [dte[recording][:, 0].astype(np.int),\n dte[recording][:, 1:3].astype(np.int),\n dte[recording][:, 3].astype(np.int)]\n time_surface = Time_Surface_event(xdim=ts_size,\n ydim=ts_size,\n event=single_event,\n timecoeff=tau,\n dataset=dataset,\n num_polarities=polarities,\n verbose=False)\n ts_test.append(time_surface)\n test_labels.append(int(dte[recording][k, -1]))\n test_rec_sizes.append(dte[recording].shape[0])\nts_test = np.array(ts_test)\nts_test = ts_test.reshape((ts_test.shape[0], -1))\ntest_labels = np.array(test_labels)\n# ts_test_res = ts_test.shape[0] % comm.size\n# ts_test = ts_test[:-ts_test_res]\n# test_labels = test_labels[:-ts_test_res]\nprint(\"rank: \", comm.rank, ts.shape, train_labels.shape)\nsys.stdout.flush()\ncomm.barrier()\n#### RUNNING THE SPARSE CODING ALGORITHM ####\nif learning:\n # Dimensionality of the model\n H = 4 # let's start with 100\n D = ts_size**2 # dimensionality of observed data\n\n # Approximation parameters for Expectation Truncation (It has to be Hprime>=gamma)\n Hprime = 4\n gamma = 4\n\n # Import and instantiate a model\n discriminative = False\n if discriminative:\n model = DBSC_ET(D, H, Hprime, gamma)\n else:\n model = BSC_ET(D, H, Hprime, gamma)\n\n # Configure DataLogger\n print_list = ('T', 'L', 'pi', 'sigma')\n dlog.set_handler(print_list, TextPrinter) # prints things to terminal\n txt_list = ('T', 'L', 'pi', 'sigma')\n dlog.set_handler(txt_list, StoreToTxt, output_path +\n '/results.txt') # stores things in a txt file\n h5_list = ('T', 'L', 'pi', 'sigma', 'W')\n dlog.set_handler(h5_list, StoreToH5, output_path +\n '/results.h5') # stores things in an h5 file\n\n # Choose annealing schedule\n from prosper.em.annealing import LinearAnnealing\n anneal = LinearAnnealing(120) # decrease\n anneal['T'] = [(0, 5.), (.8, 1.)]\n anneal['Ncut_factor'] = [(0, 0.), (0.5, 0.), (0.6, 1.)]\n # anneal['Ncut_factor'] = [(0,0.),(0.7,1.)]\n # anneal['Ncut_factor'] = [(0,0.),(0.7,1.)]\n anneal['W_noise'] = [(0, np.std(ts) / 2.), (0.7, 0.)]\n # anneal['pi_noise'] = [(0,0.),(0.2,0.1),(0.7,0.)]\n anneal['anneal_prior'] = False\n\n assert train_labels.shape[0] == ts.shape[0]\n my_data = {'y': ts, 'l': train_labels}\n model_params = model.standard_init(my_data)\n print(\"model defined\")\n em = EM(model=model, anneal=anneal)\n em.data = my_data\n em.lparams = model_params\n em.run()\n print(\"em finished\")\n\n my_test_data = {'y': ts_test}\n res = model.inference(anneal, em.lparams, my_test_data)\n sparse_codes = res['s'][:, 0, :] # should be Number of samples x H\n dlog.close()\n\nif classification:\n\n my_train_data = {'y': ts}\n res_train = model.inference(anneal, em.lparams, my_train_data)\n\n train_features = []\n train_labels2 = []\n start = 0\n for i in range(len(train_rec_sizes)):\n stop = start + train_rec_sizes[i]\n train_features.append(res_train['s'][start:stop, 0, :].mean(0))\n this_l = train_labels[start:stop]\n assert (this_l == this_l[0]).all()\n train_labels2.append(this_l[0])\n start = stop\n\n train_features = np.array(train_features)\n train_labels = np.array(train_labels2)\n\n my_test_data = {'y': ts_test}\n res_test = model.inference(anneal, em.lparams, my_test_data)\n\n test_features = []\n test_labels2 = []\n start = 0\n for i in range(len(test_rec_sizes)):\n stop = start + test_rec_sizes[i]\n test_features.append(res_test['s'][start:stop, 0, :].mean(0))\n this_l = test_labels[start:stop]\n assert (this_l == this_l[0]).all()\n test_labels2.append(this_l[0])\n start = stop\n test_features = np.array(test_features)\n test_labels = np.array(test_labels2)\n\n train_features_labels = comm.gather((train_features, train_labels))\n test_features_labels = comm.gather((test_features, test_labels))\n if comm.rank == 0:\n train_features = np.concatenate([f[0] for f in train_features_labels])\n train_labels = np.concatenate([f[1] for f in train_features_labels])\n test_features = np.concatenate([f[0] for f in test_features_labels])\n test_labels = np.concatenate([f[1] for f in test_features_labels])\n\n from sklearn.linear_model import LogisticRegression\n from sklearn import metrics\n\n lreg = LogisticRegression(solver='lbfgs', multi_class='auto')\n lreg.fit(train_features, train_labels)\n predicted_labels = lreg.predict(test_features)\n\n test_labels = np.array(test_labels)\n print(\"Classification report for classifier %s:\\n%s\\n\"\n % (lreg, metrics.classification_report(test_labels, predicted_labels)))\n print(\"Confusion matrix:\\n%s\" %\n metrics.confusion_matrix(test_labels, predicted_labels))\n","sub_path":"legacy-codes/prosper_stuff/.ipynb_checkpoints/main_h5-checkpoint.py","file_name":"main_h5-checkpoint.py","file_ext":"py","file_size_in_byte":8402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"77524022","text":"while True:\n print(\"Options\")\n print(\"Enter 'add' to add two numbers\")\n print(\"Enter 'substract' to add two numbers\")\n print(\"Enter 'multiply' to add two numbers\")\n print(\"Enter 'divide' to add two numbers\")\n print(\"Enter 'quit' to end the program\")\n user_input=input(\": \")\n\n if user_input==\"quit\":\n break\n else:\n num1=float(input(\"Enter a number: \"))\n num2=float(input(\"Enter another number: \"))\n\n if user_input==\"add\":\n result=num1+num2\n elif user_input==\"substract\":\n result=num1-num2\n elif user_input==\"multiply\":\n result=num1*num2\n elif user_input==\"divide\":\n result=num1/num2\n else:\n print(\"Unknown input\")\n continue\n print(\"The answer is : \"+str(result))\n","sub_path":"Utilities/Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"100677837","text":"import numpy as np\nfrom moviepy.editor import VideoFileClip\nimport csv\n\nFILE = 'sample.MOV'\nclip = VideoFileClip(FILE)\ndata = clip.iter_frames(fps=None, with_times=True, progress_bar=True)\n\nrgb_list = []\ntimes_list = []\naverages = {}\n\nfor time, rgb in data:\n times_list.append(time)\n rgb_list.append(rgb)\n\nredFinalValue = 0\ngreenFinalValue = 0\nblueFinalValue = 0\n\nframes = 0\nfor time, frame in zip(times_list, rgb_list):\n print(time)\n line_average = np.array([0,0,0])\n line_count = 0\n for line in frame:\n line_average += (np.average(line, axis=0))\n line_count = line_count+1\n \n redFinalValue = redFinalValue + (line_average[0]/line_count)\n greenFinalValue = greenFinalValue + line_average[1]/line_count\n blueFinalValue = blueFinalValue + line_average[2]/line_count\n frames = frames + 1\n\nprint(frames)\n\nprint(\"Red\")\nprint(redFinalValue/frames)\nprint(\"Green\")\nprint(greenFinalValue/frames)\nprint(\"Blue\")\nprint(blueFinalValue/frames)\n\n\"\"\"\nsumAll = redFinalValue + greenFinalValue + blueFinalValue\npercetageRed = redFinalValue/sumAll\npercetageGreen = greenFinalValue/sumAll\npercetageBlue = blueFinalValue/sumAll\n\"\"\"\n\nwith open('data.csv','wb') as fp:\n a = csv.writer(fp, delimiter=',')\n data = [['colors', 'finalcolor'],['Red', redFinalValue/frames], ['Green', greenFinalValue/frames], ['Blue', blueFinalValue/frames]]\n a.writerows(data)\n\n\n\n\n\n","sub_path":"random/finalcolors.py","file_name":"finalcolors.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"32660855","text":"from utilities.run_log_command import run_shell_command\nimport shutil\nimport os\nimport pandas as pd\n\n# cmd_rclone = 'rclone -v copyto {} gdriveremote:/RClone/{}'\n# cmd_save_sst_files = \"rclone -v copy {} 'gdriveremote:/Sunnyside Times/SST Admin/{}'\"\n# cmd_get_sst_files = \"rclone -v copy 'gdriveremote:/Sunnyside Times/SST Admin/{}' {}\"\n\n# RClone config file in /home/don/.config/rclone/rclone.conf\n\n\nclass ManageGoogleDrive(object):\n def __init__(self):\n self.cmd_list_files = \"rclone ls 'gdriveremote:/Sunnyside Times/SSTAdmin/'\"\n self.cmd_download_csv_file = \"rclone -v --drive-formats csv copy \\'gdriveremote:/\\'{} {}\"\n self.cmd_download_dir = \"rclone -v {} copy \\'gdriveremote:/{}\\' {}\"\n self.auto_update = 'Membership Lists/'\n self.minutes = 'Minutes/'\n self.docs_to_update = 'Updated Docs/'\n self.possible_directories = {'auto': self.auto_update, 'minutes': self.minutes, 'other': self.docs_to_update}\n\n def download_csv_file(self, logger, file, download_dir, dummy_source=None):\n '''Download Google Spreadsheet as csv file.'''\n try:\n if dummy_source:\n shutil.copy(dummy_source, download_dir)\n else:\n download_files_cmd = self.cmd_download_csv_file.format(file, download_dir)\n run_shell_command(download_files_cmd, logger)\n except Exception as e:\n logger.make_error_entry('Error downloading spreadsheet {}'.format(file))\n raise e\n\n def download_directory(self, logger, dir_to_download, target_dir, as_type=None, dummy_source=None):\n \"\"\"Download contents of specified directory to local directory.\n \"\"\"\n try:\n if dummy_source:\n copytree(dummy_source, target_dir)\n convert_directory_to_csv(target_dir)\n else:\n parm = ''\n if as_type:\n parm = '--drive-formats ' + as_type\n download_files_cmd = self.cmd_download_dir.format(parm, dir_to_download, target_dir)\n run_shell_command(download_files_cmd, logger)\n except Exception as e:\n logger.make_error_entry('Error downloading file directory {}'.format(dir_to_download))\n raise e\n\n def download_file(self, logger, file):\n pass\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\ndef convert_directory_to_csv(src):\n '''Convert directory of spreadsheets to csv and delete originals'''\n for item in os.listdir(src):\n s = os.path.join(src, item)\n fn, tp = item.split('.')\n d = os.path.join(src, fn + '.csv')\n if tp == 'xls' or tp == 'xlsx' or tp == 'ods':\n data_xls = pd.read_excel(s, None, index_col=None)\n df = data_xls[list(data_xls)[0]]\n df.to_csv(d, encoding='utf-8', index=False)","sub_path":"external_sites/manage_google_drive.py","file_name":"manage_google_drive.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"158073811","text":"from hailo_sdk_common.targets.inference_targets import SdkPartialNumeric, SdkFPOptimized\nfrom hailo_sdk_client import InferenceContext\n\ntry:\n from hailo_platform import PcieDevice, HailoRTException\n PLATFORM_AVAILABLE = True\nexcept ModuleNotFoundError:\n PLATFORM_AVAILABLE = False\n\nTARGETS = {\n 'hailo8': PcieDevice if PLATFORM_AVAILABLE else None,\n 'full_precision': SdkFPOptimized,\n 'emulator': SdkPartialNumeric,\n}\n\nINFERENCE_TARGETS = {\n 'hailo8': InferenceContext.SDK_HAILO_HW,\n 'full_precision': InferenceContext.SDK_FP_OPTIMIZED,\n 'emulator': InferenceContext.SDK_QUANTIZED,\n}\n\nDEVICES = {}\nDEVICE_NAMES = set()\nif PLATFORM_AVAILABLE:\n try:\n devices = PcieDevice.scan_devices()\n TARGETS.update({str(name): lambda: PcieDevice(name) for name in devices})\n INFERENCE_TARGETS.update({str(name): InferenceContext.SDK_HAILO_HW for name in devices})\n DEVICES.update({str(name): name for name in devices})\n DEVICE_NAMES.update([str(name) for name in devices])\n except HailoRTException:\n # Ignore HailoRT exception when the driver is not installed\n pass\n","sub_path":"hailo_model_zoo/utils/hw_utils.py","file_name":"hw_utils.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"621229677","text":"# Copyright (c) 2016, Daniele Venzano\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nfrom werkzeug.exceptions import BadRequest\nfrom flask_restful import Resource, request\n\nfrom zoe_lib.exceptions import ZoeException, ZoeRestAPIException\nfrom zoe_scheduler.rest_api.utils import catch_exceptions\nfrom zoe_scheduler.state.manager import StateManager\nfrom zoe_scheduler.platform_manager import PlatformManager\nfrom zoe_scheduler.state.execution import Execution\nfrom zoe_scheduler.rest_api.auth.authentication import authenticate\nfrom zoe_scheduler.rest_api.auth.authorization import is_authorized, check_quota\nfrom zoe_scheduler.config import singletons\n\n\nclass ExecutionAPI(Resource):\n \"\"\"\n :type state: StateManager\n :type platform: PlatformManager\n \"\"\"\n def __init__(self, **kwargs):\n self.state = kwargs['state']\n self.platform = kwargs['platform']\n\n @catch_exceptions\n def get(self, execution_id):\n start_time = time.time()\n calling_user = authenticate(request, self.state)\n\n e = self.state.get_one('execution', id=execution_id)\n if e is None:\n raise ZoeRestAPIException('No such execution', 404)\n\n is_authorized(calling_user, e, 'get')\n ret = e.to_dict(checkpoint=False)\n\n singletons['metric'].metric_api_call(start_time, 'execution', 'get', calling_user)\n return ret\n\n @catch_exceptions\n def delete(self, execution_id: int):\n \"\"\"\n This method is called when a user wants to stop an execution. To actually delete the execution,\n the user has to delete the 'parent' application.\n :param execution_id: the execution to be deleted\n :return:\n \"\"\"\n start_time = time.time()\n calling_user = authenticate(request, self.state)\n\n e = self.state.get_one('execution', id=execution_id)\n if e is None:\n raise ZoeRestAPIException('No such execution', 404)\n\n is_authorized(calling_user, e, 'delete')\n\n if e.status == \"running\" or e.status == \"scheduled\":\n self.platform.execution_terminate(e, reason='terminated')\n\n self.state.state_updated()\n\n singletons['metric'].metric_api_call(start_time, 'execution', 'delete', calling_user)\n return '', 204\n\n\nclass ExecutionCollectionAPI(Resource):\n \"\"\"\n :type state: StateManager\n :type platform: PlatformManager\n \"\"\"\n def __init__(self, **kwargs):\n self.state = kwargs['state']\n self.platform = kwargs['platform']\n\n @catch_exceptions\n def get(self):\n \"\"\"\n Returns a list of all active executions.\n\n :return:\n \"\"\"\n start_time = time.time()\n calling_user = authenticate(request, self.state)\n execs = self.state.get('execution')\n ret = []\n for e in execs:\n try:\n is_authorized(calling_user, e, \"get\")\n except ZoeRestAPIException:\n continue\n else:\n ret.append(e.to_dict(checkpoint=False))\n singletons['metric'].metric_api_call(start_time, 'execution', 'list', calling_user)\n return ret\n\n @catch_exceptions\n def post(self):\n \"\"\"\n Starts an execution, given an application_id. Takes a JSON object like this: { \"application_id\": 4 }\n :return: the new execution_id\n \"\"\"\n start_time = time.time()\n calling_user = authenticate(request, self.state)\n\n try:\n data = request.get_json()\n except BadRequest:\n raise ZoeRestAPIException('Error decoding JSON data')\n\n execution = Execution(self.state)\n try:\n execution.from_dict(data, checkpoint=False)\n except ZoeException as e:\n raise ZoeRestAPIException(e.value)\n\n# if not zoe_sched_singleton.validate(execution.application):\n# return error('admission control refused this application description')\n\n is_authorized(calling_user, execution, 'create')\n check_quota(calling_user, self.state)\n\n execution.id = self.state.gen_id()\n self.state.new('execution', execution)\n\n self.platform.execution_submitted(execution)\n\n self.state.state_updated()\n\n singletons['metric'].metric_api_call(start_time, 'execution', 'start', calling_user)\n return {'execution_id': execution.id}, 201\n","sub_path":"zoe_scheduler/rest_api/execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"161002523","text":"from django.contrib.auth import get_user_model, get_user\nfrom hypothesis import given\nfrom hypothesis.extra.django import TestCase\n\nfrom djangobase.hyposettings import hypothesis_settings\nfrom .. import factories as ft\n\nADMIN_URL = '/admin/'\n\n\nclass UserModelAdminTestCase(TestCase):\n \"\"\"\n TestS related to admin application in django.\n We want to make sure that normal user cannot\n be logged in into Admin and superuser can.\n \"\"\"\n UserModel = get_user_model()\n ADMIN_URL = ADMIN_URL\n\n ##############################################\n # ------------------ASSERTS----------------- #\n ##############################################\n\n def assertClientIsLoggedIn(self, user, client):\n self.assertEquals(\n get_user(client),\n user\n )\n\n def assertIsNOTLOGGEDIntoAdmin(self, client):\n resp = client.get(self.ADMIN_URL)\n self.assertEqual(resp.status_code, 302)\n self.assertIn('/login', resp.url)\n\n def assertIsLoggedIntoAdmin(self, client):\n resp = client.get(self.ADMIN_URL)\n self.assertEqual(resp.status_code, 200)\n\n ##############################################\n # ------------------HELPERS----------------- #\n #############################################\n\n @staticmethod\n def login_client(user, client):\n client.login(\n username=user.email,\n password=ft.DEFAULT_PASSWORD,\n )\n\n ##############################################\n # ------------------TESTS------------------ #\n #############################################\n\n def test_admin_page_works(self):\n \"\"\"\n Admin page answers with 302 and redirects to login.\n \"\"\"\n resp = self.client.get(self.ADMIN_URL)\n self.assertEqual(resp.status_code, 302)\n self.assertIn('/login', resp.url)\n\n @hypothesis_settings\n @given(ft.builds_user_model())\n def test_anonymous_user_is_not_logged_into_admin(self, user):\n \"\"\"\n Create random user and check if\n we are logged into admin.\n \"\"\"\n self.assertIsNOTLOGGEDIntoAdmin(self.client)\n\n @hypothesis_settings\n @given(ft.builds_user_model())\n def test_active_user_is_not_logged_into_admin(self, user):\n \"\"\"\n Create active user, log him in.\n We cant be logged into admin.\n \"\"\"\n user.is_active = True\n user.save()\n self.login_client(user, self.client)\n self.assertClientIsLoggedIn(user, self.client)\n self.assertIsNOTLOGGEDIntoAdmin(self.client)\n\n @hypothesis_settings\n @given(ft.builds_user_model())\n def test_inactive_user_is_not_logged_into_admin(self, user):\n \"\"\"\n Create inactive user, try to log him in.\n We cant be logged into admin.\n \"\"\"\n user.is_active = False\n user.save()\n self.login_client(user, self.client)\n self.assertIsNOTLOGGEDIntoAdmin(self.client)\n\n @hypothesis_settings\n @given(ft.builds_user_model(is_superuser=True))\n def test_inactive_superuser_is_not_logged_into_admin(self, user):\n \"\"\"\n Create inactive superuser, try to log him in.\n We cannot be logged into admin.\n \"\"\"\n user.is_active = False\n user.save()\n self.login_client(user, self.client)\n self.assertIsNOTLOGGEDIntoAdmin(self.client)\n\n @hypothesis_settings\n @given(ft.builds_user_model(is_superuser=True))\n def test_active_superuser_is_logged_into_admin(self, user):\n \"\"\"\n Create active superuser, log him in.\n We have to be logged into admin.\n \"\"\"\n user.is_active = True\n user.save()\n self.login_client(user, self.client)\n self.assertClientIsLoggedIn(user, self.client)\n self.assertIsLoggedIntoAdmin(self.client)\n","sub_path":"djangobase/users/tests/test_admin.py","file_name":"test_admin.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"267480769","text":"#conversor numerico\ncores = {'limpar': '\\033[m',\n 'vermelho': '\\033[41m'\n}\nnum = int(input('Digite um número inteiro: '))\nprint('Digite o número da opção que você deseja converter:\\n 1 - Binário\\n 2 - Octal\\n 3 - Hexadecimal')\nopcao = int(input('Sua opção: '))\nif opcao == 1:\n print('{} em binário é {}'.format(num, bin(num)[2:]))\nelif opcao == 2:\n print('{} em octal é {}'.format(num, oct(num)[2:]))\nelif opcao == 3:\n print('{} em hexadecimal é {}'.format(num, hex(num)[2:]))\nelse:\n print('{}Opcão Invalida!!!{}'.format(cores['vermelho'], cores['limpar']))","sub_path":"CursoEmVideo/pythonProject/ex037.py","file_name":"ex037.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"527327771","text":"from django import forms\r\nfrom .models import person, body_style, make, transmission_type, drive_train, fuel_type, engine, drive, car, model, slide, plans\r\n\r\nclass userForm(forms.ModelForm):\r\n\tclass Meta:\r\n\t\tmodel = person\r\n\t\tfields = [\r\n\t\t\t'username',\r\n\t\t\t'email',\r\n\t\t\t'password',\r\n\t\t\t# 'phone',\r\n\t\t\t# 'image',\r\n\t\t]\r\n\t\tlabels= {\r\n\t\t\t'username': 'Username',\r\n\t\t\t'email': 'Email',\r\n\t\t\t'password': 'Password',\r\n\t\t\t# 'phone': 'Phone',\r\n\t\t\t# 'image': 'Image',\r\n\t\t}\r\n\r\n\t\twidget = {\r\n\t\t\t'user': forms.TextInput(attrs={'class':'form-control'}),\r\n\t\t\t'email': forms.TextInput(attrs={'class':'form-control'}),\r\n\t\t\t'password': forms.TextInput(attrs={'class':'form-control'}),\r\n\t\t\t'userid': forms.HiddenInput(attrs={'class':'form-control'}),\r\n\t\t\t# 'phone': forms.TextInput(attrs={'class':'form-control'}),\r\n\t\t\t# 'image': forms.ImageField(),\r\n\t\t}\r\nclass newPlanForm(forms.ModelForm):\r\n\tclass Meta:\r\n\t\tmodel = plans\r\n\t\tfields = [\r\n\t\t\t'name',\r\n\t\t\t'image',\r\n\t\t\t'number_of_cars',\r\n\t\t\t'price',\r\n\t\t]\r\n\t\tlabels= {\r\n\t\t\t'name': 'Nombre del plan',\r\n\t\t\t'image': 'Imagen asociada',\r\n\t\t\t'number_of_cars': 'Número máximo de carros',\r\n\t\t\t'price': 'Precio del plan',\r\n\t\t}\r\n\t\terror_messages = {\r\n\t\t\t'number_of_cars': {\r\n\t\t\t\t'unique': \"Ya hay otro plan con ese número de carros\",\r\n\t\t\t},\r\n }\r\n\t\twidget = {\r\n\t\t\t'name': forms.TextInput(attrs={'class':'form-control'}),\r\n\t\t\t'image': forms.FileInput(attrs={'class':'form-control file'}),\r\n\t\t\t'number_of_cars': forms.TextInput(attrs={'class':'form-control'}),\r\n\t\t\t'price': forms.TextInput(attrs={'class':'form-control'}),\r\n\t\t}\r\nclass slideForm(forms.ModelForm):\r\n\tclass Meta:\r\n\t\tmodel = slide\r\n\t\tfields = [\r\n\t\t\t'title_1',\r\n\t\t\t'title_2',\r\n\t\t\t'title_3',\r\n\t\t\t'url_1',\r\n\t\t\t'url_2',\r\n\t\t\t'url_3',\r\n\t\t\t'image',\r\n\t\t]\r\n\t\tlabels= {\r\n\t\t\t'title_1': 'Title 1',\r\n\t\t\t'title_2': 'Title 2',\r\n\t\t\t'title_3': 'Title 3',\r\n\t\t\t'url_1': 'Url 1',\r\n\t\t\t'url_2': 'Url 2',\r\n\t\t\t'url_3': 'Url 3',\r\n\t\t\t'image': 'Image',\r\n\t\t}\r\n\r\n\t\twidget = {\r\n\t\t\t'image': forms.FileInput(attrs={'class':'file'}),\r\n\t\t}\r\nclass userCreation(forms.Form):\r\n\tusername = forms.CharField(\r\n\trequired=True,\r\n\tlabel='Nombre de usuario*',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'username', 'name':'username',}),\r\n\t)\r\n\tfirst_name = forms.CharField(\r\n\trequired=False,\r\n\tlabel='Nombre',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'fname', 'name':'fname',}),\r\n\t)\r\n\tlast_name = forms.CharField(\r\n\trequired=False,\r\n\tlabel='Apellido',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'lname', 'name':'lname',}),\r\n\t)\r\n\tpassword = forms.CharField(\r\n\trequired=True,\r\n\tlabel='Contraseña*',\r\n\twidget=forms.PasswordInput(render_value = True, attrs={'class':'form-control', 'id':'psw', 'name':'psw',}),\r\n\t)\r\n\temail = forms.CharField(\r\n\trequired=True,\r\n\tlabel='Correo*',\r\n\twidget=forms.EmailInput(attrs={'class':'form-control', 'id':'email', 'name':'email',}),\r\n\t)\r\n\tphone = forms.CharField(\r\n\trequired=False,\r\n\tlabel='Teléfono',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'phone', 'name':'phone',}),\r\n\t)\r\n\tcell_phone = forms.CharField(required=False,\r\n\tlabel='Teléfono Celular',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'cell_phone', 'name':'cell_phone',}),\r\n\t)\r\n\tphoto = forms.ImageField(required=False,\r\n\tlabel='Foto',\r\n\twidget=forms.FileInput(attrs={'class':'form-control', 'id':'photo', 'name':'photo',}),\r\n\t)\r\n\taddress = forms.CharField(required=False,\r\n\tlabel='Dirección',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'address', 'name':'address',}),\r\n\t)\r\n\tdescription = forms.CharField(required=False,\r\n\tlabel='Descripción',\r\n\twidget=forms.Textarea(attrs={'rows':\"4\", 'cols':\"50\", 'class':'form-control', 'id':'cell_phone', 'name':'cell_phone',}),\r\n\t)\r\n\tfacebook = forms.CharField(required=False,\r\n\tlabel='Facebook',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'facebook', 'name':'facebook',}),\r\n\t)\r\n\ttwitter = forms.CharField(required=False,\r\n\tlabel='Twitter',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'twitter', 'name':'twitter',}),\r\n\t)\r\n\tinstagram = forms.CharField(required=False,\r\n\tlabel='Instagram',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'instagram', 'name':'instagram',}),\r\n\t)\r\n\tyoutube = forms.CharField(required=False,\r\n\tlabel='Youtube',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'youtube', 'name':'youtube',}),\r\n\t)\r\n\tgoogle = forms.CharField(required=False,\r\n\tlabel='Google plus',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'google', 'name':'google',}),\r\n\t)\r\n\tpinterest = forms.CharField(required=False,\r\n\tlabel='Pinterest',\r\n\twidget=forms.TextInput(attrs={'class':'form-control', 'id':'pinterest', 'name':'pinterest',}),\r\n\t)\r\n\r\n\tuser_type = forms.ChoiceField(\r\n required=True,\r\n label='Tipo de Usuario*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'user_type', 'name':'user_type',}),\r\n choices=[(\"Private user\", \"Private user\"), (\"Dealer user\", \"Dealer user\")],\r\n )\r\n\r\n\r\nclass signForm(forms.Form):\r\n\tusername = forms.CharField(required=True, label='Username', widget=forms.TextInput(attrs={'class':'form-control'}))\r\n\temail = forms.CharField(required=True, label='Email', widget=forms.TextInput(attrs={'class':'form-control'}))\r\n\tpassword = forms.CharField(required=True, label='Password', widget=forms.PasswordInput(attrs={'class':'form-control'}))\r\n\tphone = forms.CharField(required=True, label='Phone', widget=forms.TextInput(attrs={'class':'form-control'}))\r\n\r\nclass MySelect(forms.Select):\r\n def render_option(self, selected_choices, option_value, option_label):\r\n return u'' + str(option_label) + ' '\r\n\r\nclass indexForm(forms.Form):\r\n\tmakers = make.objects.all()\r\n\tdominician_republic_cities = [('', \"Seleccione una ciudad\"),\r\n\t\t\t\t\t\t\t\t (\"Azua\", \"Azua\"),\r\n\t\t\t\t\t\t\t\t (\"Baoruco\", \"Baoruco\"),\r\n\t\t\t\t\t\t\t\t (\"Barahona\", \"Barahona\"),\r\n\t\t\t\t\t\t\t\t (\"Dajabon\", \"Dajabon\"),\r\n\t\t\t\t\t\t\t\t (\"Distrito Nacional\", \"Distrito Nacional\"),\r\n\t\t\t\t\t\t\t\t (\"Duarte\", \"Duarte\"),\r\n\t\t\t\t\t\t\t\t (\"El Seibo\", \"El Seibo\"),\r\n\t\t\t\t\t\t\t\t (\"Elias Piña\", \"Elias Piña\"),\r\n\t\t\t\t\t\t\t\t (\"Espaillat\", \"Espaillat\"),\r\n\t\t\t\t\t\t\t\t (\"Hato Mayor\", \"Hato Mayor\"),\r\n\t\t\t\t\t\t\t\t (\"Hermanas Mirabal\", \"Hermanas Mirabal\"),\r\n\t\t\t\t\t\t\t\t (\"Independencia\", \"Independencia\"),\r\n\t\t\t\t\t\t\t\t (\"La Altagracia\", \"La Altagracia\"),\r\n\t\t\t\t\t\t\t\t (\"La Romana\", \"La Romana\"),\r\n\t\t\t\t\t\t\t\t (\"La Vega\", \"La Vega\"),\r\n\t\t\t\t\t\t\t\t (\"Maria Trinidad Sanchez\", \"Maria Trinidad Sanchez\"),\r\n\t\t\t\t\t\t\t\t (\"Monseñor Nouel\", \"Monseñor Nouel\"),\r\n\t\t\t\t\t\t\t\t (\"Monte Cristi\", \"Monte Cristi\"),\r\n\t\t\t\t\t\t\t\t (\"Monte Plata\", \"Monte Plata\"),\r\n\t\t\t\t\t\t\t\t (\"Pedernales\", \"Pedernales\"),\r\n\t\t\t\t\t\t\t\t (\"Peravia\", \"Peravia\"),\r\n\t\t\t\t\t\t\t\t (\"Puerto Plata\", \"Puerto Plata\"),\r\n\t\t\t\t\t\t\t\t (\"Samaná\", \"Samaná\"),\r\n\t\t\t\t\t\t\t\t (\"San Cristóbal\", \"San Cristóbal\"),\r\n\t\t\t\t\t\t\t\t (\"San José de Ocoa\", \"San José de Ocoa\"),\r\n\t\t\t\t\t\t\t\t (\"San Juan\", \"San Juan\"),\r\n\t\t\t\t\t\t\t\t (\"San Pedro de Macoris\", \"San Pedro de Macoris\"),\r\n\t\t\t\t\t\t\t\t (\"Sánchez Ramirez\", \"Sánchez Ramirez\"),\r\n\t\t\t\t\t\t\t\t (\"Santiago\", \"Santiago\"),\r\n\t\t\t\t\t\t\t\t (\"Santiago Rodriguez\", \"Santiago Rodriguez\"),\r\n\t\t\t\t\t\t\t\t (\"Santo Domingo\", \"Santo Domingo\"),\r\n\t\t\t\t\t\t\t\t (\"Valverde\", \"Valverde\"),]\r\n\tmaker_options = [('', \"Marca\"),]\r\n\tmaker_options += ((str(element.id), element.description) for element in (makers))\r\n\tmakefield = forms.ChoiceField(\r\n required=False,\r\n label='Marca*',\r\n\t\twidget=forms.Select(attrs={'class':'dropdown', 'id':'make', 'name':'make', 'onchange':'changeModels();'}),\r\n choices=maker_options,\r\n )\r\n\tmodels_options = [('', \"Seleccione marca\"),]\r\n\tyear_options = [('', \"Seleccione trim\"),]\r\n\ttrim_options = [('', \"Seleccione modelp\"),]\r\n\tmodelfield = forms.ChoiceField(\r\n required=False,\r\n label='Model',\r\n\t\twidget=forms.Select(attrs={'class':'dropdown', 'id':'model', 'name':'model', 'onchange':'addInfo();'}),\r\n choices=models_options,\r\n )\r\n\ttrim = forms.ChoiceField(\r\n\t\trequired=False,\r\n\t\tlabel='Trim',\r\n\t\twidget=forms.Select(attrs={'class':'dropdown', 'id':'trim', 'name':'trim', 'onchange':'bringYears();'}),\r\n\t\tchoices=trim_options,\r\n\t)\r\n\tcity = forms.ChoiceField(\r\n required=False,\r\n label='Ciudad',\r\n\t\twidget=forms.Select(attrs={'class':'dropdown', 'id':'city', 'name':'city'}),\r\n choices=dominician_republic_cities,\r\n )\r\n\tyear = forms.ChoiceField(\r\n\t\trequired=False,\r\n\t\tlabel='Year',\r\n\t\twidget=forms.Select(attrs={'class':'dropdown', 'id':'year', 'name':'year'}),\r\n\t\tchoices=year_options,\r\n\t)\r\n\r\nclass expForm(forms.Form):\r\n\t#Taking data from DB\r\n\tmakers = make.objects.all()\r\n\ttrans_t = transmission_type.objects.all()\r\n\tdrive_t = drive_train.objects.all()\r\n\tfuel_t = fuel_type.objects.all()\r\n\teng = engine.objects.all()\r\n\t_drive = drive.objects.all()\r\n\t#Creating tuples\r\n\tmaker_options = [('', \"Marca\"),]\r\n\tmaker_options += ((str(element.id), element.description) for element in (makers))\r\n\ttrans_options = [('', \"Tipo de Transmisión\"),]\r\n\ttrans_options += ((str(element.id), element.description) for element in trans_t)\r\n\tdrive_t_options = [('', \"Transmisión\"),]\r\n\tdrive_t_options += ((str(element.id), element.description) for element in drive_t)\r\n\tfuel_options = [('', \"Tipo de combustible\"),]\r\n\tfuel_options += ((str(element.id), element.description) for element in fuel_t)\r\n\tengine_options = [('', \"Motor\"),]\r\n\tengine_options += ((str(element.id), element.description) for element in eng)\r\n\tcolors = [('', \"Color\"),]\r\n\tcolors += car._meta.get_field('exterior_color').choices\r\n\tdrive_options = [('', \"Drive\"),]\r\n\tdrive_options += ((str(element.id), element.description) for element in _drive)\r\n\tmodels_options = [('', \"Seleccione marca\"),]\r\n\ttrim_options = [('', \"Seleccione modelo\"),]\r\n\tyear_options = [('', \"Seleccione trim\"),]\r\n\tdominician_republic_cities = [('', \"Seleccione una ciudad\"),\r\n\t\t\t\t\t\t\t\t (\"Azua\", \"Azua\"),\r\n\t\t\t\t\t\t\t\t (\"Baoruco\", \"Baoruco\"),\r\n\t\t\t\t\t\t\t\t (\"Barahona\", \"Barahona\"),\r\n\t\t\t\t\t\t\t\t (\"Dajabon\", \"Dajabon\"),\r\n\t\t\t\t\t\t\t\t (\"Distrito Nacional\", \"Distrito Nacional\"),\r\n\t\t\t\t\t\t\t\t (\"Duarte\", \"Duarte\"),\r\n\t\t\t\t\t\t\t\t (\"El Seibo\", \"El Seibo\"),\r\n\t\t\t\t\t\t\t\t (\"Elias Piña\", \"Elias Piña\"),\r\n\t\t\t\t\t\t\t\t (\"Espaillat\", \"Espaillat\"),\r\n\t\t\t\t\t\t\t\t (\"Hato Mayor\", \"Hato Mayor\"),\r\n\t\t\t\t\t\t\t\t (\"Hermanas Mirabal\", \"Hermanas Mirabal\"),\r\n\t\t\t\t\t\t\t\t (\"Independencia\", \"Independencia\"),\r\n\t\t\t\t\t\t\t\t (\"La Altagracia\", \"La Altagracia\"),\r\n\t\t\t\t\t\t\t\t (\"La Romana\", \"La Romana\"),\r\n\t\t\t\t\t\t\t\t (\"La Vega\", \"La Vega\"),\r\n\t\t\t\t\t\t\t\t (\"Maria Trinidad Sanchez\", \"Maria Trinidad Sanchez\"),\r\n\t\t\t\t\t\t\t\t (\"Monseñor Nouel\", \"Monseñor Nouel\"),\r\n\t\t\t\t\t\t\t\t (\"Monte Cristi\", \"Monte Cristi\"),\r\n\t\t\t\t\t\t\t\t (\"Monte Plata\", \"Monte Plata\"),\r\n\t\t\t\t\t\t\t\t (\"Pedernales\", \"Pedernales\"),\r\n\t\t\t\t\t\t\t\t (\"Peravia\", \"Peravia\"),\r\n\t\t\t\t\t\t\t\t (\"Puerto Plata\", \"Puerto Plata\"),\r\n\t\t\t\t\t\t\t\t (\"Samaná\", \"Samaná\"),\r\n\t\t\t\t\t\t\t\t (\"San Cristóbal\", \"San Cristóbal\"),\r\n\t\t\t\t\t\t\t\t (\"San José de Ocoa\", \"San José de Ocoa\"),\r\n\t\t\t\t\t\t\t\t (\"San Juan\", \"San Juan\"),\r\n\t\t\t\t\t\t\t\t (\"San Pedro de Macoris\", \"San Pedro de Macoris\"),\r\n\t\t\t\t\t\t\t\t (\"Sánchez Ramirez\", \"Sánchez Ramirez\"),\r\n\t\t\t\t\t\t\t\t (\"Santiago\", \"Santiago\"),\r\n\t\t\t\t\t\t\t\t (\"Santiago Rodriguez\", \"Santiago Rodriguez\"),\r\n\t\t\t\t\t\t\t\t (\"Santo Domingo\", \"Santo Domingo\"),\r\n\t\t\t\t\t\t\t\t (\"Valverde\", \"Valverde\"),]\r\n\r\n\t#year = forms.IntegerField(required=True, label='Year*', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'year', 'type': 'text', 'placeholder': \"year\", 'aria-describedby': \"sizing-addon2\"}))\r\n\tyear = forms.ChoiceField(\r\n\t\trequired=True,\r\n\t\tlabel='Año*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'year', 'name':'year'}),\r\n\t\tchoices=year_options,\r\n\t)\r\n\r\n\ttrim = forms.ChoiceField(\r\n\t\trequired=True,\r\n\t\tlabel='Trim*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'trim', 'name':'trim', 'onchange':'bringYears();'}),\r\n\t\tchoices=trim_options,\r\n\t)\r\n\r\n\tmakefield = forms.ChoiceField(\r\n required=True,\r\n label='Marca*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'make', 'name':'make', 'onchange':'changeModels();'}),\r\n choices=maker_options,\r\n )\r\n\tmodelfield = forms.ChoiceField(\r\n required=True,\r\n label='Modelo*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'model', 'name':'model', 'onchange':'addInfo();'}),\r\n choices=models_options,\r\n )\r\n\ttransmission_type_in = forms.ChoiceField(\r\n required=True,\r\n label='Tipo de Transmisión*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'transmission_type', 'name':'transmission_type',}),\r\n choices=trans_options,\r\n )\r\n\tdrive_train_in = forms.ChoiceField(\r\n required=True,\r\n label='Transmisión*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'drive_train', 'name':'drive_train',}),\r\n choices=drive_t_options,\r\n )\r\n\tfuel_type_in = forms.ChoiceField(\r\n required=True,\r\n label='Tipo de Gasolina*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'fuel_type', 'name':'fuel_type',}),\r\n choices=fuel_options,\r\n )\r\n\tengine_in = forms.ChoiceField(\r\n required=True,\r\n label='Motor*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'engine', 'name':'engine',}),\r\n choices=engine_options,\r\n )\r\n\tdoors = forms.ChoiceField(\r\n required=False,\r\n label='Puertas',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'doors', 'name':'doors',}),\r\n choices= [('', \"Doors\"),] + car._meta.get_field('doors').flatchoices\r\n )\r\n\text_c = forms.ChoiceField(\r\n required=False,\r\n label='Color Exterior',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'ext_colors', 'name':'exterior_color',}),\r\n choices= colors\r\n )\r\n\tint_c = forms.ChoiceField(\r\n required=False,\r\n label='Color Interior',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'int_colors', 'name':'interior_color',}),\r\n\t\tchoices= colors\r\n )\r\n\tdrive = forms.ChoiceField(\r\n required=False,\r\n label='Drive',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'drive', 'name':'drive',}),\r\n\t\tchoices= drive_options\r\n )\r\n\tint_f = forms.ChoiceField(\r\n required=False,\r\n label='Tejido interior',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'int_f', 'name':'int_f',}),\r\n\t\tchoices = [('', \"Interior fabric\"),] + car._meta.get_field('interior_fabric').flatchoices\r\n )\r\n\tlatitude = forms.FloatField(required=True, label='Latitud * (para ubicación exacta del vehículo)', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'latitude', 'type': 'text', 'placeholder': \"Latitud\", 'aria-describedby': \"sizing-addon2\"}))\r\n\tlongitude = forms.FloatField(required=True, label='Longitud * (para ubicación exacta del vehículo)', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'longitude', 'type': 'text', 'placeholder': \"Latitud\", 'aria-describedby': \"sizing-addon2\"}))\r\n\ttransmission_des = forms.CharField(required=False, label='Descripción de la Transmisión', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'transmission_des', 'type': 'text', 'placeholder': \"Descripción de la Transmisión\", 'aria-describedby': \"sizing-addon2\"}))\r\n\teng_des = forms.CharField(required=False, label='Descripción del motor', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'engine_des', 'type': 'text', 'placeholder': \"Descripción del motor\", 'aria-describedby': \"sizing-addon2\"}))\r\n\tcity_mpg = forms.ChoiceField(\r\n required=True,\r\n label='Ciudad*',\r\n\t\twidget=forms.Select(attrs={'class':'form-control', 'id':'city_MPG', 'name':'city_MPG',}),\r\n\t\tchoices= dominician_republic_cities\r\n )\r\n\text_c_des = forms.CharField(required=False, label='Descripcción del color exterior', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'ext_c_des', 'type': 'text', 'placeholder': \"Descripcción del color exterior\", 'aria-describedby': \"sizing-addon2\"}))\r\n\tint_c_des = forms.CharField(required=False, label='Descripcción del color interior', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'int_c_des', 'type': 'text', 'placeholder': \"Descripcción del color interior\", 'aria-describedby': \"sizing-addon2\"}))\r\n\thigh_mpg = forms.FloatField(required=False, label='Highway MPG', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'high_mpg', 'type': 'text', 'placeholder': \"Highway MPG\", 'aria-describedby': \"sizing-addon2\"}))\r\n\tprice = forms.IntegerField(required=True, max_value=1000000, label='Precio', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'price', 'type': 'text', 'placeholder': \"Precio\", 'aria-describedby': \"sizing-addon2\"}))\r\n\tstock_n = forms.IntegerField(required=False, max_value=1000, label='Numere de serie', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'Numere de serie', 'type': 'text', 'placeholder': \"Stock\", 'aria-describedby': \"sizing-addon2\"}))\r\n\tmileage = forms.IntegerField(required=True, label='Kilometraje*', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'mileage', 'type': 'text', 'placeholder': \"Kilometraje\", 'aria-describedby': \"sizing-addon2\"}))\r\n\toff_price = forms.IntegerField(required=False, max_value=500000, label='Precio de descuento', widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'off_price', 'type': 'text', 'placeholder': \"Precio de descuento\", 'aria-label': \"Amount (to the nearest dollar)\"}))\r\n\tcondition = forms.ChoiceField(\r\n required=True,\r\n label='Condición',\r\n\t\twidget=forms.RadioSelect(attrs={'id':'condition', 'name':'condition',}),\r\n\t\tchoices = car._meta.get_field('condition').flatchoices\r\n )\r\n\tphoto = forms.FileField(required=True, label='Photos', widget=forms.FileInput(attrs={ 'name': \"img\", 'id': \"img\", 'type': 'file', 'class': \"file\", 'multiple accept': \"image\"}))\r\n\t# photo = forms.FileField(required=True, label='Photos', widget=forms.FileInput())\r\n\t# \r\n\tcopts = (\r\n\t\t('Asientos de tercera fila', 'Asientos de tercera fila'),\r\n\t\t('Cámara de respaldo', 'Cámara de respaldo'),\r\n\t\t('Control de velocidad', 'Control de velocidad'),\r\n\t\t('Entrada sin llave', 'Entrada sin llave'),\r\n\t\t('Control climatico', 'Control climatico'),\r\n\t\t('Cerraduras eléctricas', 'Cerraduras eléctricas'),\r\n\t\t('Ventanas eléctricas', 'Ventanas eléctricas'),\r\n\t\t('Controles de dirección', 'Controles de dirección'),\r\n )\r\n# (attrs={'id':'concom', 'name':'Convenience_Comfort',})\r\n\tConvenience_Comfort = forms.MultipleChoiceField(\r\n required=False,\r\n label='Convenience/Comfort',\r\n\t\twidget=forms.CheckboxSelectMultiple(attrs={'id':'Convenience_Comfort', 'name':'Convenience_Comfort',}),\r\n\t\tchoices = copts\r\n )\r\n\r\n\tent = (\r\n\t\t('Bluetooth, Manos libres', 'Bluetooth, Manos libres'),\r\n\t\t('Reproductor de CD', 'Reproductor de CD'),\r\n\t\t('Reproductor de DVD', 'Reproductor de DVD'),\r\n\t\t('Navegación', 'Navegación'),\r\n\t\t('Audio portátil', 'Audio portátil'),\r\n\t\t('Audio Premium', 'Audio Premium'),\r\n\t\t('Sistema de seguridad', 'Sistema de seguridad'),\r\n\t)\r\n\tEntertainment_Technology = forms.MultipleChoiceField(\r\n required=False,\r\n label='Entertainment/Technology',\r\n\t\twidget=forms.CheckboxSelectMultiple(attrs={'id':'Entertainment_Technology', 'name':'Entertainment_Technology',}),\r\n\t\tchoices = ent\r\n )\r\n\tlux = (\r\n\t\t('Asientos con calefacción', 'Asientos con calefacción'),\r\n\t\t('Asientos de cuero', 'Asientos de cuero'),\r\n\t\t('Ruedas Premium', 'Ruedas Premium'),\r\n\t\t('Techo solar', 'Techo solar'),\r\n\t)\r\n\tLuxury = forms.MultipleChoiceField(\r\n required=False,\r\n label='Luxury',\r\n\t\twidget=forms.CheckboxSelectMultiple(attrs={'id':'Luxury', 'name':'Luxury',}),\r\n\t\tchoices = lux\r\n )\r\n\r\n\tmisc = (\r\n\t\t('Discapacidad Equipada', 'Discapacidad Equipada'),\r\n\t\t('Kit de elevación', 'Kit de elevación'),\r\n\t\t('Enganche de remolque', 'Enganche de remolque'),\r\n\t)\r\n\r\n\tMiscellaneous = forms.MultipleChoiceField(\r\n required=False,\r\n label='Miscellaneous',\r\n\t\twidget=forms.CheckboxSelectMultiple(attrs={'id':'Miscellaneous', 'name':'Miscellaneous',}),\r\n\t\tchoices = misc\r\n )\r\n\r\n\tdef is_valid(self):\r\n\t\tvalid = super(expForm, self).is_valid()\r\n\t\tif self.fields[\"modelfield\"]:\r\n\t\t\tif int(self[\"modelfield\"].value()) < 0:\r\n\t\t\t\tdel self.errors[\"modelfield\"]\r\n\t\t\t\tself.add_error('modelfield', 'You have to choose a model')\r\n\t\t\t\tvalid = False\r\n\t\t\telse:\r\n\t\t\t\tvalid = True\r\n\r\n\t\tif self.fields[\"trim\"]:\r\n\t\t\ttry:\r\n\t\t\t\tif int(self[\"trim\"].value()) < 0:\r\n\t\t\t\t\tdel self.errors[\"trim\"]\r\n\t\t\t\t\tself.add_error('trim', 'You have to choose a trim')\r\n\t\t\t\t\tvalid = False\r\n\t\t\texcept:\r\n\t\t\t\tvalid = True\r\n\r\n\r\n\t\treturn valid\r\n","sub_path":"apps/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":20809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"228075329","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import fits\nimport os\nimport sys\nimport scipy.optimize as opt\nimport scipy.stats as stat\n\nimport emcee\n\nimport velfun\n\n\n\n#path = '/Volumes/P/data_reduction/wifes/final/GGL_050650-2020_feb19_n2/GGL_050650-2020_r_vel.fits'\n\n#path = '/Volumes/P/data_reduction/wifes/final/GGL_050650-2020_feb19_n5/GGL_050650-2020_r_vel.fits'\n\nif False:\n dirrr = '/Volumes/P/data_reduction/wifes/final/C'\n for iii in [os.path.join(dirrr,i) for i in os.listdir(dirrr) if not i.startswith('.') and not i.startswith('d')]:\n file = [os.path.join(iii + '/fits_files/',i) for i in os.listdir(iii + '/fits_files/') if 'r_vel.fits' in i][0]\n run_me(file)\n\n\ndef run_me(path):\n\n save_files = True\n plots = True\n sn_threshold = 3\n\n name = path.split('_vel.fits')[0].split('/')[-1]\n galaxy_dir = os.path.dirname(os.path.dirname(path))\n\n print(' Working on: ' + name )\n\n #---------------------------------\n # Get Data\n #---------------------------------\n\n with fits.open(path) as hdulist:\n vel = hdulist[0].data.T\n unc = hdulist[1].data.T\n sn = hdulist[2].data.T\n width = hdulist[3].data.T\n\n\n ra, dec = np.meshgrid(np.arange(vel.shape[1]), np.arange(vel.shape[0]))\n \n plt.figure(1)\n cs = plt.contour(ra,dec,sn, [sn_threshold])\n \n contours = cs.collections[0].get_paths()\n sorted_contours = np.argsort([len(i) for i in contours])[::-1]\n polygon = contours[sorted_contours[0]]#.vertices\n \n plt.plot(polygon.vertices[:,0],polygon.vertices[:,1],'r')\n plt.close()\n\n inside_bool = np.zeros(sn.shape, dtype=bool)\n\n for i,foo in enumerate(np.arange(sn.T.shape[0])):\n for j,foo in enumerate(np.arange(sn.T.shape[1])):\n inside_bool[j,i] = polygon.contains_point([i,j])\n if np.isnan(vel[j,i]):\n inside_bool[j,i] = False\n\n\n vel_copy = np.copy(vel)\n unc_copy = np.copy(unc)\n sn_copy = np.copy(sn)\n ra_copy = np.copy(ra)\n dec_copy = np.copy(dec)\n\n # get only point within the main countour\n\n vel = vel[ inside_bool ]\n unc = unc[ inside_bool ]\n sn = sn[ inside_bool ]\n ra = ra[ inside_bool ]\n dec = dec[ inside_bool ]\n\n # remove points with large uncertainties\n \n v0 = np.median(vel)\n\n good_data = (unc < 50) & (np.abs(vel - v0) < 3 * np.std(vel - v0))\n\n vel = vel[ good_data ]\n unc = unc[ good_data ]\n sn = sn[ good_data ]\n ra = ra[ good_data ]\n dec = dec[ good_data ]\n\n print(np.max(vel))\n print(np.min(vel))\n\n #---------------------------------\n # Constraint center\n #---------------------------------\n ra0 = np.median(ra)\n dec0 = np.median(dec)\n\n\n\n #-!!!-#\n #---------------------------------\n # 0.0 FINDING ANGLES\n #---------------------------------\n theta0 = 0\n i0 = 0\n\n\n\n ndim, nwalkers, steps = 8, 100, 6000\n\n\n p0 = np.random.rand(ndim * nwalkers).reshape((nwalkers, ndim))\n\n for ii in range(len(p0)):\n p0[ii,:] = [\n np.random.uniform(ra0 - np.std(ra) , ra0 + np.std(ra)), #ra_c\n np.random.uniform(dec0 - np.std(dec), dec0 + np.std(dec) ), #dec_c\n np.random.uniform(np.pi/2 , np.pi/2 + 0.01), #theta\n np.random.uniform(np.pi/2 , np.pi/2 + 0.01), #i\n np.random.uniform(1. , 300.), #v_asym\n np.random.uniform(7. , 7.01), #r_t\n np.random.uniform(v0 - np.std(vel)/10 , v0 + np.std(vel)/10 ), #v_sys\n np.random.uniform(0.08,0.09) #gamma\n ]\n\n sampler = emcee.EnsembleSampler(nwalkers, ndim, velfun.lnprob, args=(vel,ra,dec,unc**2,theta0,i0))\n\n\n a = sampler.run_mcmc(p0, steps)\n\n finals = np.zeros(8)\n for iii in np.arange(8):\n vect = np.linspace(np.min(sampler.chain[:,-500:,iii]),np.max(sampler.chain[:,-500:,iii]),100)\n step = vect[1]-vect[0]\n finals[iii] = vect[np.argmax([np.sum( (sampler.chain[:,-500:,iii] > i) & ((sampler.chain[:,-500:,iii] < i + step))) for i in vect])]\n\n\n ra_c,dec_c,theta,inc,v_asym,r_t,v_sys,gamma = finals\n\n # ---------------------------------\n # 1.2 iteration MCMC (with log/exp)\n # ---------------------------------\n\n ndim, nwalkers, steps = 11, 100, 5000\n\n p0 = np.random.rand(ndim * nwalkers).reshape((nwalkers, ndim))\n\n for ii in range(len(p0)):\n p0[ii,:] = [\n np.random.uniform(ra_c - 0.01 , ra_c + 0.01 ), #ra_c\n np.random.uniform(dec_c - 0.01 , dec_c + 0.01 ), #dec_c\n np.random.uniform(theta - 0.01 , theta + 0.01 ), #theta\n np.random.uniform(inc - 0.01 , inc + 0.01 ), #i\n np.random.uniform(v_asym - 0.01 , v_asym + 0.01 ), #v_asym\n np.random.uniform(r_t - 0.01 , r_t + 0.01 ), #r_t\n np.random.uniform(v_sys - 0.01 , v_sys + 0.01 ), #v_sys\n np.random.uniform(gamma - 0.01 , gamma + 0.01 ), #gamma \n np.random.uniform(0.001,0.01), #Pw\n np.random.uniform(0,0.01), #Yw\n np.random.uniform(0,0.01) #Vw\n ]\n\n\n sampler2 = emcee.EnsembleSampler(nwalkers, ndim, velfun.explnprob, args=(vel,ra,dec,unc**2,theta0,i0))\n bb = sampler2.run_mcmc(p0, steps)\n\n\n #ra_c,dec_c,theta,inc,v_asym,r_t,v_sys,gamma = [np.median(sampler2.chain[:,-1,i]) for i in np.arange(8)]\n\n finals = np.zeros(8)\n widths = np.zeros(8)\n for iii in np.arange(8):\n \n vect = np.linspace(np.min(sampler2.chain[:,-500:,iii]),np.max(sampler2.chain[:,-500:,iii]),100)\n step = vect[1]-vect[0]\n\n\n to_plot = [np.sum( (sampler2.chain[:,-500:,iii] > i) & ((sampler2.chain[:,-500:,iii] < i + step))) for i in vect]\n\n x0 = vect[np.argmax(to_plot)]\n w0 = (np.max(vect) - np.min(vect))/2\n\n res = opt.minimize( log_lh, [ x0, w0 ], args = (vect,to_plot,1 ))\n finals[iii] = res.x[0]\n widths[iii] = res.x[1]\n\n ra_c,dec_c,theta,inc,v_asym,r_t,v_sys,gamma = finals\n\n ##alternative\n # samples = sampler2.chain[:,-2,:8]\n # a = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(samples, [16,50, 84], axis=0)))\n # list(a)\n\n #inc = 1.25\n velff = velfun.vel_with_shear(ra,dec,ra_c,dec_c,theta0 + theta, np.pi + i0 + inc,v_asym,r_t,v_sys,gamma)\n\n\n pred_gamma = np.median(sampler2.chain[:,-1,7])\n\n data = np.zeros(vel_copy.shape)*np.NaN\n model = np.zeros(vel_copy.shape)*np.NaN\n uncer = np.zeros(vel_copy.shape)*np.NaN\n snr = np.zeros(vel_copy.shape)*np.NaN\n\n for iii in np.arange(ra.shape[0]):\n data[dec[iii],ra[iii]] = vel[iii]\n model[dec[iii],ra[iii]] = velff[iii]\n uncer[dec[iii],ra[iii]] = unc[iii]\n snr[dec[iii],ra[iii]] = sn[iii]\n\n # save file with results\n\n final_res = ''\n final_with = ''\n\n for item in np.arange(len(finals)):\n final_res += str('%.4f' %finals[item]) + ', '\n final_with += str('%.4f' %np.abs(widths[item])) + ', '\n\n final_res = final_res[:-2]\n final_with = final_with[:-2]\n\n if save_files:\n with open(galaxy_dir + '/txt/' + name +'.txt', 'w+') as file:\n file.write(\"ra, dec, ra_c, dec_c, theta, inc, v_asym, r_t, v_sys, gamma \\n\")\n file.write(final_res + '\\n')\n file.write(final_with + '\\n')\n\n\n # Reduced chi squared\n\n dd = data[~np.isnan(data)]\n mm = model[~np.isnan(data)]\n num_of_param = 8\n\n d_m2 = np.abs(dd - mm)\n d_m = d_m2[np.isfinite(d_m2)]\n unc = unc[np.isfinite(d_m2)]\n chi = np.nansum( np.nansum( (d_m[d_m < 100] )**2 / unc[d_m < 100]**2) / (len(dd) - num_of_param) )\n\n if plots:\n \n # -- plot -----------------------------------------------------\n\n v0 = finals[ 6 ] #systemic velocity\n\n vel_lim = np.max([np.abs(np.nanmax(data-v0)),np.abs(np.nanmin(data-v0))])\n x_top = np.max(ra) + 2\n x_low = np.min(ra) - 2\n y_top = np.max(dec) + 2\n y_low = np.min(dec) - 2\n\n fig = plt.figure(11, figsize=(15,10))\n plt.clf()\n c_ax = plt.subplot(221)\n plt.title('data')\n axp = plt.imshow(data-v0, origin = 'lower', aspect = 2 , cmap = 'RdBu', vmin = -vel_lim, vmax = vel_lim )\n\n #plt.xlim([x_low,x_top])\n #plt.ylim([y_low,y_top])\n\n cbaxes = fig.add_axes([0.08, 0.2, 0.01, 0.59]) \n cb = plt.colorbar(axp, cax = cbaxes)\n cbaxes.yaxis.set_ticks_position('left')\n\n\n plt.subplot(222)\n v_all = velfun.vel_with_shear(ra_copy,dec_copy,ra_c,dec_c,theta,inc,v_asym,r_t,v_sys,gamma); \n plt.imshow(v_all-v0, origin = 'lower', aspect = 2 , cmap = 'RdBu', vmin = -vel_lim, vmax = vel_lim )\n # plt.xlim([x_low,x_top])\n # plt.ylim([y_low,y_top])\n\n plt.subplot(223)\n plt.title('model -- g= ' + '%2.3f'%pred_gamma)\n plt.imshow(model-v0, origin = 'lower', aspect = 2 , cmap = 'RdBu', vmin = -vel_lim, vmax = vel_lim )\n # plt.xlim([x_low,x_top])\n # plt.ylim([y_low,y_top])\n\n plt.subplot(224)\n plt.title('data-model')\n\n limits = np.max([np.nanmax(data-model), np.nanmin(data-model)])\n axp2 = plt.imshow(data-model, origin = 'lower', aspect = 2 , cmap = 'PiYG', vmin = - limits, vmax = limits)\n # plt.xlim([x_low,x_top])\n # plt.ylim([y_low,y_top])\n\n cbaxes = fig.add_axes([1-0.09, 0.2, 0.01, 0.59]) \n cb = plt.colorbar(axp2, cax = cbaxes)\n\n if save_files:\n plt.savefig(galaxy_dir + '/images/' + name +'_comp.png')\n\n # -- checks --\n\n\n plt.figure(2, figsize=(20,30))\n plt.clf()\n ax1 = fig.suptitle('First Iteration', fontsize=14, fontweight='bold')\n\n\n for jj in range(8):\n plt.subplot2grid((8,2),(jj,0))\n plt.plot(sampler.chain[ :,:, jj ].T, 'k', alpha=0.1 )\n\n vect = np.linspace(np.min(sampler.chain[:,-500:,jj]),np.max(sampler.chain[:,-500:,jj]),100)\n step = vect[1]-vect[0]\n to_plot = [np.sum( (sampler.chain[:,-500:,jj] > i) & ((sampler.chain[:,-500:,jj] < i + step))) for i in vect]\n plt.subplot2grid((8,2),(jj,1))\n plt.plot(vect, to_plot)\n final = np.argmax(to_plot)\n plt.plot([vect[final],vect[final]], [0,to_plot[final]])\n\n if save_files:\n plt.savefig(galaxy_dir + '/images/' + name +'_diag1.png')\n\n plt.figure(3, figsize=(20,30))\n plt.clf()\n ax1 = fig.suptitle('Second Iteration', fontsize=14, fontweight='bold')\n\n\n for jj in range(9):\n plt.subplot2grid((9,2),(jj,0))\n plt.plot(sampler2.chain[ :,:, jj ].T, 'k', alpha=0.1 )\n\n vect = np.linspace(np.min(sampler2.chain[:,-500:,jj]),np.max(sampler2.chain[:,-500:,jj]),100)\n step = vect[1]-vect[0]\n to_plot = [np.sum( (sampler2.chain[:,-500:,jj] > i) & ((sampler2.chain[:,-500:,jj] < i + step))) for i in vect]\n \n plt.subplot2grid((9,2),(jj,1))\n plt.plot(vect, to_plot)\n \n if jj < 8:\n result = f_A( vect, to_plot, finals[jj], widths[jj]) * g( vect, finals[jj], widths[jj] )\n \n final = np.argmax(to_plot)\n plt.plot(vect,result)\n \n final = np.argmax(result)\n plt.plot([vect[final],vect[final]], [0,to_plot[final]])\n \n\n if save_files:\n plt.savefig(galaxy_dir + '/images/' + name +'_diag2.png')\n\n\n\n fig = plt.figure(4, figsize=(15,10))\n plt.clf()\n\n ax1 = fig.suptitle(path.split('/')[-1], fontsize=14, fontweight='bold')\n\n\n Y,X = np.meshgrid(\n np.linspace(0,(vel_copy.shape[0]-1),vel_copy.shape[0]),\n np.linspace(0,(vel_copy.shape[1]-1),vel_copy.shape[1]))\n xlim_1 = np.min(X.T[~np.isnan(data)]) - 1 \n xlim_2 = np.max(X.T[~np.isnan(data)]) + 1\n ylim_1 = np.min(Y.T[~np.isnan(data)]) - 1\n ylim_2 = np.max(Y.T[~np.isnan(data)]) + 1\n\n \n plt.subplot(121)\n plt.title('data and model')\n plt.imshow(data-v0, origin = 'lower', cmap = 'RdBu', aspect = 2)\n plt.colorbar()\n cont1 = plt.contour(X,Y,model.T-v0, cmap = 'RdBu', linewidths = 3)\n cont2 = plt.contour(X,Y,model.T-v0, 0, colors='black', linewidths = 3)\n plt.plot([ra_c-20, ra_c+20], [dec_c - 10 * np.tan(-theta), dec_c + 10 * np.tan(-theta)] ,'k--')\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n plt.text(xlim_1 + 1, ylim_2 - 1, r'$\\gamma = $' + str(finals[7])[:7] + '+-' + str(widths[7])[:5] ,color='black', fontsize=12)\n\n ax2 = plt.subplot(122)\n plt.title('residuals (d-m)')\n plt.imshow(data-model, origin = 'lower', aspect = 2, cmap = 'PiYG')\n plt.colorbar()\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n plt.text(xlim_1 + 1, ylim_2 - 1, r'$\\chi^2_\\nu = $' + str(abs(chi))[:5] ,color='black', fontsize=12)\n\n plt.savefig(galaxy_dir + '/images/' + name + '_master_fit.png')\n\n\n\n # -------------------------------------------------------------\n fig = plt.figure(12, figsize=(15,15))\n plt.clf()\n\n ax1 = fig.suptitle(path.split('/')[-1], fontsize=14, fontweight='bold')\n\n # -------------------------\n \n plt.subplot(221)\n plt.title('data [km/s]')\n plt.imshow(data-v0, origin = 'lower', cmap = 'RdBu', aspect = 2, vmin = -vel_lim, vmax = vel_lim)\n plt.colorbar()\n cont1 = plt.contour(X,Y,data.T-v0, np.linspace(-280,280,29), \n colors= 'black', linewidths = 2)\n cont2 = plt.contour(X,Y,data.T-v0, 0, colors='green', linewidths = 2)\n plt.plot([ra_c-20, ra_c+20], [dec_c - 10 * np.tan(-theta), dec_c + 10 * np.tan(-theta)] ,'k--')\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n\n\n # -------------------------\n\n ax2 = plt.subplot(222)\n plt.title('model [km/s]')\n plt.imshow(model-v0, origin = 'lower', cmap = 'RdBu', aspect = 2, vmin = -vel_lim, vmax = vel_lim )\n plt.colorbar()\n cont1 = plt.contour(X,Y,model.T-v0, np.linspace(-280,280,29), \n colors= 'black', linewidths = 2)\n cont2 = plt.contour(X,Y,model.T-v0, 0, colors='green', linewidths = 2)\n plt.plot([ra_c-20, ra_c+20], [dec_c - 10 * np.tan(-theta), dec_c + 10 * np.tan(-theta)] ,'k--')\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n plt.text(xlim_1 + 1, ylim_2 - 1, r'$\\gamma = $' + str(finals[7])[:7] + '+-' + str(widths[7])[:5] ,color='black', fontsize=12)\n\n # -------------------------\n\n ax2 = plt.subplot(223)\n plt.title('uncertainties [km/s]')\n plt.imshow(uncer, origin = 'lower', aspect = 2)\n plt.colorbar()\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n\n # -------------------------\n err_lim = 2 * np.nanstd(data-model)\n ax2 = plt.subplot(224)\n plt.title('residuals (d-m) [km/s]')\n plt.imshow(data-model, origin = 'lower', aspect = 2, cmap = 'PiYG', vmin = -err_lim, vmax = err_lim)\n \n plt.colorbar()\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n plt.text(xlim_1 + 1, ylim_2 - 1, r'$\\chi^2_\\nu = $' + str(abs(chi))[:5] ,color='black', fontsize=12)\n\n plt.savefig(galaxy_dir + '/images/' + name + '_big_test.png')\n\n # -----------------------------------------------------------\n\n # -----------------------------------------------------------\n\n fig = plt.figure(13, figsize=(15,15))\n plt.clf()\n\n ax1 = fig.suptitle(path.split('/')[-1], fontsize=14, fontweight='bold')\n\n # -------------------------\n \n plt.subplot(221)\n plt.title('Sum of SNR')\n plt.imshow(snr, origin = 'lower', aspect = 2)\n plt.colorbar()\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n\n\n # -------------------------\n\n ax2 = plt.subplot(222)\n plt.title('data & model [km/s]')\n plt.imshow(data-v0, origin = 'lower', cmap = 'RdBu', aspect = 2, vmin = -vel_lim, vmax = vel_lim )\n plt.colorbar()\n cont1 = plt.contour(X,Y,model.T-v0, np.linspace(-280,280,29), \n colors = 'black', linewidths = 2)\n\n cont2 = plt.contour(X,Y,model.T-v0, 0, colors='green', linewidths = 2)\n plt.plot([ra_c-20, ra_c+20], [dec_c - 10 * np.tan(-theta), dec_c + 10 * np.tan(-theta)] ,'k--')\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n plt.text(xlim_1 + 1, ylim_2 - 1, r'$\\gamma = $' + str(finals[7])[:7] + '+-' + str(widths[7])[:5] ,color='black', fontsize=12)\n\n # -------------------------\n\n ax2 = plt.subplot(223)\n plt.title('uncertainties [km/s]')\n plt.imshow(uncer, origin = 'lower', aspect = 2)\n plt.colorbar()\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n\n # -------------------------\n \n\n ax2 = plt.subplot(224)\n plt.title('residuals (d-m) [km/s]')\n plt.imshow(data-model, origin = 'lower', aspect = 2, cmap = 'PiYG', vmin = - err_lim, vmax = err_lim)\n plt.colorbar()\n plt.xlim([xlim_1, xlim_2])\n plt.ylim([ylim_1, ylim_2])\n plt.text(xlim_1 + 1, ylim_2 - 1, r'$\\chi^2_\\nu = $' + str(abs(chi))[:5] ,color='black', fontsize=12 )\n\n plt.savefig(galaxy_dir + '/' + name + '_big_test.png')\n\n\n\n # return data, model, unc \n return finals, widths\n\n\n\n\n# ---------------- FIT GAUSSIAN ---------------------------------------\n\ndef f_A( x, flux, x0, sig_g ):\n # Model of the gaussian using analytic amplitude\n\n # Normalized 2D Gaussian\n gg = g( x, x0, sig_g )\n\n # Compute analytic amplitude\n A_up = np.nansum( flux * gg )#/ sig_f**2 )\n A_down = np.nansum( gg**2 )#/ sig_f**2 )\n\n A = 0\n if ( A_down > 0.0 ): A = A_up / A_down\n\n return A\n\ndef g( x, x0, sig_g ):\n # 2D normalized gaussian\n g = 1 / ( 2 * np.pi * sig_g**2 ) * np.exp( \n -( ( x-x0 )**2 ) / ( 2 * sig_g**2 ) )\n\n return g\n\n\ndef log_lh( param, x, flux, sig_g ):\n # log likelihood funciton\n\n x0, sig_g = param\n l_lh = 0\n \n # gaussian model\n model = f_A( x, flux, x0, sig_g) * g(x, x0, sig_g)\n \n # negative log likelihood ( negative because we want to minimize )\n l_lh = + 0.5 * np.nansum( ( flux - model )**2 ) + np.log( sig_g**2 )\n\n return l_lh\n\n\n\n# ---------------- RUN ME --------------------------------------------\n\nif False:\n\n work_path = '/home/pgurri/polproject/data_reduction/wifes/final/'\n\n vel_paths = []\n\n for obs in [work_path + i for i in os.listdir( work_path ) if 'GGL_' in i]:\n for vel in [ obs +'/' + n for n in os.listdir( obs) if '_vel.fits' in n]:\n vel_paths.append(vel)\n\n\n mylen = len(vel_paths)\n\n\n if len(sys.argv) > 1:\n if int (sys.argv[1]) < mylen:\n run_me( vel_paths[ int( sys.argv[1] ) ] )\n #run_me('/home/pgurri/polproject/data_reduction/wifes/final/GGL_105333+0838_apr18_n1/GGL_105333+0838_r_vel.fits')\n\n\n\n\n\n\n\n","sub_path":"extra_stuff/Pol_code/vel_fit_all.py","file_name":"vel_fit_all.py","file_ext":"py","file_size_in_byte":18886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"556345520","text":"def prList(arr):\n for idx,el in enumerate(arr):\n if(idx!=len(arr)-1):\n print(el,end=\",\")\n else:\n print(el)\na=[10,20,30,40,50]\nb=[30,4,56]\nprList(a)\nprList(b)\n\ndef enumList(arr):\n for idx,el in enumerate(arr):\n print(idx+1,el,sep=\". \")\n print(idx+1,\".\",el)\n print(\"{}. {}\".format(idx+1,el))\nc=['apple','banana','orange']\nenumList(c)","sub_path":"3_list/list_enumerate.py","file_name":"list_enumerate.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"74809110","text":"from allauth.account.adapter import get_adapter\nfrom rest_auth.registration.serializers import RegisterSerializer\n#from rest_auth.serializers import PasswordResetSerializer\nfrom rest_framework import serializers\nfrom rest_framework.authtoken.models import Token\nfrom quiz.models import *\nfrom . import models\nfrom django.contrib.auth.forms import PasswordResetForm\nfrom django.conf import settings\n#from django.utils.translation import gettext as _\nfrom dj_rest_auth.serializers import PasswordResetSerializer\nfrom allauth.account.forms import ResetPasswordForm\nfrom quiz.serializers import *\n\nimport datetime\nfrom datetime import datetime,timedelta,date\nfrom pytz import timezone\n\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n fields = ('email', 'username', 'password', 'mobile', 'profile_pic', 'first_name', 'last_name')\n\n#class CustomPasswordResetSerializer(PasswordResetSerializer):\n # password_reset_form_class = ResetPasswordForm\n # def save(self):\n # request = self.context.get('request')\n # Set some values to trigger the send_email method.\n # opts = {\n # 'use_https': request.is_secure(),\n # 'from_email': 'testingserver.12307@gmail.com',\n # 'request': request,\n # here I have set my desired template to be used\n # don't forget to add your templates directory in settings to be found\n # 'email_template_name': 'registration/password_reset_email.html'\n #}\n\n # opts.update(self.get_email_options())\n #self.reset_form.save(**opts)\n\nclass CustomPasswordResetSerializer(PasswordResetSerializer):\n password_reset_form_class = ResetPasswordForm\n def get_email_options(self) :\n \n return {\n 'email_template_name': 'registration/password_reset_email.html'\n }\n\n\n\nclass CustomRegisterSerializer(RegisterSerializer):\n mobile = serializers.CharField(allow_blank = True, allow_null=True)\n first_name = serializers.CharField()\n last_name = serializers.CharField(allow_blank = True, allow_null=True)\n email = serializers.EmailField(allow_blank = True, allow_null=True)\n\n\n class Meta:\n model = models.User\n fields = ('email', 'username', 'password', 'mobile', 'first_name', 'last_name')\n\n def get_cleaned_data(self):\n return {\n 'username': self.validated_data.get('username', ''),\n 'first_name': self.validated_data.get('first_name', ''),\n 'last_name': self.validated_data.get('last_name', ''),\n 'password1': self.validated_data.get('password1', ''),\n 'password2': self.validated_data.get('password2', ''),\n 'email': self.validated_data.get('email', ''),\n 'mobile': self.validated_data.get('mobile', ''),\n\n }\n\n def save(self, request):\n adapter = get_adapter()\n user = adapter.new_user(request)\n self.cleaned_data = self.get_cleaned_data()\n user.mobile = self.cleaned_data.get('mobile')\n user.save()\n adapter.save_user(request, user, self)\n return user\n\n\nclass TokenSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Token\n fields = ('key', 'user')\n\nclass TeamMemberSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"team-members-detail\")\n\n class Meta:\n model = models.TeamMember\n fields = ['id', 'image', 'name', 'designation']\n\nclass TeamFormSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"team-form-detail\")\n\n class Meta:\n model = models.TeamForm\n fields = ['id', 'full_name', 'email', 'mobile', 'cv', 'other_document']\n\nclass ContactUsSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.ContactUs\n fields = ['id', 'full_name', 'email', 'mobile', 'message']\n\nclass FeedbackSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"feedback-detail\")\n\n class Meta:\n model = models.Feedback\n fields = ['id', 'experience', 'message', 'is_bug']\n\nclass FAQSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"faqs-detail\")\n\n class Meta:\n model = models.FAQ\n fields = ['id', 'question', 'answer']\n\nclass ArticleSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"articles-detail\")\n\n class Meta:\n model = models.Article\n fields = ['id', 'title', 'image', 'date', 'content']\n\nclass NewsSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"news-detail\")\n\n class Meta:\n model = models.News\n fields = ['id', 'title', 'image', 'date', 'content']\n\n\nclass NewsletterSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"newsletter-detail\")\n\n class Meta:\n model = models.Newsletter\n fields = ['id', 'email']\n\nclass CategorySerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:category-detail\")\n\n class Meta:\n model = models.Category\n fields = ['id', 'name']\n\nclass SubCategorySerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:sub-category-detail\")\n\n class Meta:\n model = models.SubCategory\n fields = ['id', 'name']\n\n\nclass PDFSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:pdf-detail\")\n sub_category = SubCategorySerializer(many=False, read_only = True)\n type = serializers.SerializerMethodField(read_only = True)\n\n class Meta:\n model = models.PDF\n fields = [ 'id', 'name', 'file', 'price', 'sub_category', 'type']\n\n def get_type(self, obj):\n return \"pdf\"\n\nclass PDFListSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:pdf-detail\")\n sub_category = SubCategorySerializer(many=False, read_only = True)\n type = serializers.SerializerMethodField(read_only = True)\n file = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = models.PDF\n fields = ['id', 'name', 'price', 'sub_category', 'file', 'type']\n\n def get_type(self, obj):\n return \"pdf\"\n\n def get_file(self,obj):\n\n if obj.price<1:\n file =self.context['request'].build_absolute_uri(obj.file.url)\n # return obj.file.url\n return file\n \n user = self.context['request'].user\n if user.id is not None:\n sub = models.UserSubscriptions.get(user = user)\n if obj in sub.pdfs.all():\n file =self.context['request'].build_absolute_uri(obj.file.url)\n # return obj.file.url\n return file\n else:\n return None\n\nclass MCQSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:mcq-detail\")\n sub_category = SubCategorySerializer(many=False, read_only = True)\n type = serializers.SerializerMethodField(read_only = True)\n\n class Meta:\n model = models.MCQ\n fields = ['id', 'name', 'file', 'price', 'sub_category', 'image', 'preview_file', 'description', 'type']\n\n def get_type(self, obj):\n return \"mcq\"\n\nclass MCQListSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:mcq-detail\")\n sub_category = SubCategorySerializer(many=False, read_only = True)\n type = serializers.SerializerMethodField(read_only = True)\n file = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = models.MCQ\n fields = [ 'id', 'file', 'name', 'price', 'sub_category', 'image', 'preview_file', 'description', 'type', \"file\"]\n\n def get_type(self, obj):\n return \"mcq\"\n\n def get_file(self,obj):\n\n if obj.price<1:\n file =self.context['request'].build_absolute_uri(obj.file.url)\n # return obj.file.url\n return file\n \n user = self.context['request'].user\n if user.id is not None:\n sub = models.UserSubscriptions.objects.get(user = user)\n if obj in sub.mcqs.all():\n file =self.context['request'].build_absolute_uri(obj.file.url)\n # return obj.file.url\n return file\n else:\n return None\n\nclass SummarySerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:summary-detail\")\n sub_category = SubCategorySerializer(many=False, read_only = True)\n mcq = MCQSerializer(many=True, read_only = True)\n type = serializers.SerializerMethodField(read_only = True)\n\n class Meta:\n model = models.Summary\n fields = ['id', 'name', 'file', 'price', 'sub_category', 'description','mcq', 'image', 'preview_file', 'type']\n\n def get_type(self, obj):\n return \"summary\"\n\nclass SummaryListSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:summary-detail\")\n sub_category = SubCategorySerializer(many=False, read_only = True)\n mcq = MCQListSerializer(many=True, read_only = True)\n type = serializers.SerializerMethodField(read_only = True)\n file = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = models.Summary\n fields = ['id', 'name', 'price', 'sub_category', 'description','mcq', 'image', 'preview_file', 'type',\"file\"]\n\n def get_type(self, obj):\n return \"summary\"\n \n def get_file(self,obj):\n\n if obj.price<1:\n file =self.context['request'].build_absolute_uri(obj.file.url)\n # return obj.file.url\n return file\n \n user = self.context['request'].user\n if user.id is not None:\n sub = models.UserSubscriptions.objects.get(user = user)\n if obj in sub.summaries.all():\n file =self.context['request'].build_absolute_uri(obj.file.url)\n # return obj.file.url\n return file\n else:\n return None\n\nclass SessionSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:session-detail\")\n type = serializers.SerializerMethodField(read_only = True)\n upcoming = serializers.SerializerMethodField(read_only = True)\n \n class Meta:\n model = models.Session\n fields = ['id', 'name', 'image', 'price', 'date', 'video','youtube_link', 'type', 'upcoming', 'demo']\n\n def get_type(self, obj):\n return \"session\"\n\n def get_upcoming(self, obj):\n if obj.date > date.today():\n return True\n else:\n return False\n\nclass SessionListSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:session-detail\")\n type = serializers.SerializerMethodField(read_only = True)\n upcoming = serializers.SerializerMethodField(read_only = True)\n file = serializers.SerializerMethodField(read_only=True)\n class Meta:\n model = models.Session\n fields = ['id', 'name', 'image', 'price', 'date','youtube_link', 'type', 'upcoming', 'demo',\"file\"]\n\n def get_type(self, obj):\n return \"session\"\n\n def get_upcoming(self, obj):\n if obj.date > date.today():\n return True\n else:\n return False\n \n def get_file(self,obj):\n\n if obj.price<1:\n file =self.context['request'].build_absolute_uri(obj.video.url)\n # return obj.file.url\n return file\n \n user = self.context['request'].user\n if user.id is not None:\n sub = models.UserSubscriptions.objects.get(user = user)\n if obj in sub.sessions.all():\n file =self.context['request'].build_absolute_uri(obj.video.url)\n # return obj.file.url\n return file\n else:\n return None\n\nclass UserSubscriptionsSerializer(serializers.ModelSerializer):\n # url = serializers.HyperlinkedIdentityField(view_name=\"core:user-subscription-detail\")\n user = UserSerializer(many=False, read_only = True)\n pdfs = PDFSerializer(many=True, read_only = True)\n mcqs = MCQListSerializer(many=True, read_only = True)\n summaries = SummaryListSerializer(many=True, read_only = True)\n sessions = SessionListSerializer(many=True, read_only = True)\n tests = QuizListSerializer(many=True, read_only = True)\n # quizinfo = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = models.UserSubscriptions\n fields = ['id', 'user', 'pdfs', 'mcqs', 'summaries', 'sessions', 'tests']\n \n # def get_quizinfo(self,obj):\n\n # list1=[]\n # for i in obj.tests.all():\n\n # try:\n # quiztaker = QuizTaker.objects.get(user=obj.user,quiz=i)\n # data={\n # \"quiz id\":i.id,\n # \"quiz name\":i.name,\n # \"complete\":quiztaker.complete,\n # \"quiz_day_rank\":quiztaker.quiz_day_rank\n # }\n # list1.append(data)\n # except:\n # data={\n # \"quiz id\":i.id,\n # \"quiz name\":i.name,\n # \"complete\":False\n # }\n # list1.append(data)\n # return list1\n\nclass SearchSerializer(serializers.Serializer):\n\n # pdfs = PDFListSerializer(many=True, read_only = True)\n # mcqs = MCQListSerializer(many=True, read_only = True)\n # summaries = SummarySerializer(many=True, read_only = True)\n # sessions = SessionSerializer(many=True, read_only = True)\n # tests = QuizListSerializer(many=True, read_only = True)\n\n pdfs = serializers.SerializerMethodField('get_pdfs')\n mcqs = serializers.SerializerMethodField('get_mcqs')\n summaries = serializers.SerializerMethodField('get_summaries')\n sessions = serializers.SerializerMethodField('get_sessions')\n tests = serializers.SerializerMethodField('get_tests')\n\n class Meta:\n fields =[\"pdfs\",\"mcqs\",\"summaries\",\"sessions\",\"tests\"]\n\n def get_pdfs(self,obj):\n serializer_context = {'request': self.context.get('request') }\n queryset = obj.pdfs\n serializer = PDFListSerializer(queryset, many=True, context=serializer_context)\n return serializer.data\n\n def get_mcqs(self,obj):\n serializer_context = {'request': self.context.get('request') }\n queryset = obj.mcqs\n serializer = MCQListSerializer(queryset, many=True, context=serializer_context)\n return serializer.data\n\n def get_summaries(self,obj):\n serializer_context = {'request': self.context.get('request') }\n queryset = obj.summaries\n serializer = SummaryListSerializer(queryset, many=True, context=serializer_context)\n return serializer.data\n\n def get_sessions(self,obj):\n serializer_context = {'request': self.context.get('request') }\n queryset = obj.sessions\n serializer = SessionListSerializer(queryset, many=True, context=serializer_context)\n return serializer.data\n\n def get_tests(self,obj):\n serializer_context = {'request': self.context.get('request') }\n queryset = obj.tests\n serializer = QuizListSerializer(queryset, many=True, context=serializer_context)\n return serializer.data\nclass GeneralNotificationSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.GeneralNotification\n fields = '__all__'\n\nclass PersonalNotification(serializers.ModelSerializer):\n\n class Meta:\n model = models.PersonalNotification\n fields = '__all__'\n\nclass Personalnotif(serializers.ModelSerializer):\n\n# quiz = QuizMInSerializer(many=True)\n quizinfo = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n quizname = serializers.SerializerMethodField()\n quizslug = serializers.SerializerMethodField()\n class Meta:\n\n model = models.PersonalNotification\n fields = '__all__'\n \n def get_quizinfo(self,obj):\n\n now = datetime.now()\n quiz = Quiz.objects.get(id=obj.quiz_id)\n\n quizSlot = QuizSlot.objects.filter(quiz=quiz)\n for slot in quizSlot:\n if now>=slot.start_datetime and now<=(slot.start_datetime+quiz.duration):\n data={\n \"date\":(slot.start_datetime+timedelta(hours=5,minutes=30)).strftime(\"%b %d %Y\"),\n \"time\":(slot.start_datetime+timedelta(hours=5,minutes=30)).strftime(\"%H:%M:%S\")\n }\n return data\n elif now 0):\n time.sleep(delta)\n \n return metric\n\n\n def __random_metric(self):\n\n # Adjust the \"true\" state of the vehicle\n distance_traveled = random.uniform(0,0.08)\n fuel_consumed = max(0,(1/self.__fuel_economy) * distance_traveled * random.normalvariate(1.0, 0.25))\n self.__odometer += distance_traveled\n self.__fuel -= fuel_consumed\n self.__time += 1\n\n # Occassionally refuel\n if random.uniform(0, self.__fuel) < 0.02:\n self.__fuel = min(self.__tank_size, self.__tank_size + random.normalvariate(-1, 1))\n\n # Fuel consumption metrics are 20 times more common than odometer metrics\n if random.randint(0,19) == 0:\n # Odometer metrics have no noise, but are truncated to round numbers of miles\n return Metric(\"odometer\", int(self.__odometer), self.__time)\n else:\n # Fuel level has a significant level of noise\n noise = random.normalvariate(0, 1.5)\n return Metric(\"fuel\", max(0, min(self.__fuel + noise, self.__tank_size)), self.__time)\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"topic\", help=\"The kafka topic to which metric supplier will send data\")\n parser.add_argument(\"key\", help=\"The key with which the metric supplier will send messages to kafka\")\n args = parser.parse_args()\n \n\n \n producer = KafkaProducer(bootstrap_servers=['localhost:9092'],value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n \n supplier = MetricSupplier()\n for m in supplier:\n print(m.timestamp(), m.kind(), m.value())\n producer.send(args.topic,key=args.key, value={'timestamp': m.timestamp, 'kind': m.kind, 'value': m.value})\n \n\n","sub_path":"supplier.py","file_name":"supplier.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"506807767","text":"import asyncio\nimport binascii\nfrom distutils.version import StrictVersion\nimport json\nimport logging\nimport os.path\nimport requests\nimport struct\nimport voluptuous as vol\n\nfrom homeassistant.const import (\n ATTR_FRIENDLY_NAME, __version__ as current_ha_version)\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.typing import ConfigType\n\n_LOGGER = logging.getLogger(__name__)\n\nDOMAIN = 'smartir'\nVERSION = '1.2.1'\nVERSION_URL = (\n \"https://raw.githubusercontent.com/smartHomeHub/SmartIR/master/version.json\")\nREMOTE_BASE_DIR = (\n \"https://raw.githubusercontent.com/smartHomeHub/SmartIR/master/smartir/\"\n)\nCONF_CHECK_UPDATES = 'check_updates'\n\nCONFIG_SCHEMA = vol.Schema({\n DOMAIN: vol.Schema({\n vol.Optional(CONF_CHECK_UPDATES, default=True): cv.boolean\n })\n}, extra=vol.ALLOW_EXTRA)\n\nasync def async_setup(hass, config):\n \"\"\"Set up the SmartIR component.\"\"\"\n conf = config.get(DOMAIN)\n\n async def check_updates(service):\n await _update(hass)\n\n async def update_component(service):\n await _update(hass, True)\n\n hass.services.async_register(DOMAIN, 'check_updates', check_updates)\n hass.services.async_register(DOMAIN, 'update_component', update_component)\n\n if conf[CONF_CHECK_UPDATES]:\n await _update(hass, False, False)\n\n return True\n\nasync def _update(hass, do_update=False, notify_if_latest=True):\n has_errors = False\n\n request = requests.get(VERSION_URL, stream=True, timeout=10)\n \n if request.status_code == 200:\n data = request.json()\n last_version = data['version']\n min_ha_version = data['minHAVersion']\n release_notes = data['releaseNotes']\n \n if StrictVersion(last_version) <= StrictVersion(VERSION):\n if notify_if_latest:\n hass.components.persistent_notification.async_create(\n \"You're already using the latest version\", title='SmartIR')\n return\n \n if StrictVersion(current_ha_version) >= StrictVersion(min_ha_version):\n if do_update:\n files = data['files']\n abspath = os.path.dirname(os.path.abspath(__file__))\n\n for file in files:\n try:\n source = REMOTE_BASE_DIR + file\n dest = os.path.join(abspath, file)\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n Helper.downloader(source, dest)\n except:\n _LOGGER.error(\"Error updating %s. Please update the file manually.\", file)\n has_errors = True\n else:\n hass.components.persistent_notification.async_create(\n release_notes, title='SmartIR')\n\n else:\n hass.components.persistent_notification.async_create(\n \"There is a new version of SmartIR, but it is **incompatible** \"\n \"with your HA version. Please first update Home Assistant.\", title='SmartIR')\n\n else:\n _LOGGER.error(\"Invalid response from the server while checking for a new version\")\n has_errors = True\n\n if do_update:\n if has_errors:\n hass.components.persistent_notification.async_create(\n \"There was an error updating SmartIR. Please \"\n \"check the logs for more information.\", title='SmartIR')\n else:\n hass.components.persistent_notification.async_create(\n \"Successfully updated to {}. Please restart Home Assistant.\"\n .format(last_version), title='SmartIR')\n\nclass Helper():\n @staticmethod\n def downloader(source, dest):\n req = requests.get(source, stream=True, timeout=10)\n\n if req.status_code == 200:\n with open(dest, 'wb') as fil:\n for chunk in req.iter_content(1024):\n fil.write(chunk)\n else:\n raise Exception('File not found')\n\n @staticmethod\n def pronto2lirc(pronto):\n codes = [int(binascii.hexlify(pronto[i:i+2]), 16) for i in range(0, len(pronto), 2)]\n\n if codes[0]:\n raise ValueError('Pronto code should start with 0000')\n if len(codes) != 4 + 2 * (codes[2] + codes[3]):\n raise ValueError('Number of pulse widths does not match the preamble')\n\n frequency = 1 / (codes[1] * 0.241246)\n return [int(round(code / frequency)) for code in codes[4:]]\n\n @staticmethod\n def lirc2broadlink(pulses):\n array = bytearray()\n\n for pulse in pulses:\n pulse = int(pulse * 269 / 8192)\n\n if pulse < 256:\n array += bytearray(struct.pack('>B', pulse))\n else:\n array += bytearray([0x00])\n array += bytearray(struct.pack('>H', pulse))\n\n packet = bytearray([0x26, 0x00])\n packet += bytearray(struct.pack(' 21:\r\n print('выйграл игрок2')\r\n sys.exit()\r\n elif choice == 'n':\r\n print('У Игрок1 %d очков.' %count1)\r\n break\r\nif count1 > 21:\r\n print('выйграл игрок2')\r\n sys.exit()\r\nwhile True:\r\n choice = input('Игрок2 Будете брать карту? y/n')\r\n if choice == 'y':\r\n current2 = koloda.pop()\r\n print('Вам попалась карта достоинством %d' %current2)\r\n count2 += current2\r\n print('У Игрок2 %d очков.' %count2)\r\n if count2 > 21:\r\n print('выйграл игрок1')\r\n sys.exit()\r\n elif choice == 'n':\r\n print('У Игрок2 %d очков.' %count2)\r\n break\r\nif count1 == count2:\r\n print('Ничья')\r\nif count1 > count2:\r\n print('Выйграл Игрок1')\r\nif count1 < count2:\r\n print('Выйграл Игрок2')\r\n\r\n\r\n","sub_path":"21 на двоих РАБОЧИЙ 18.05.py","file_name":"21 на двоих РАБОЧИЙ 18.05.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"90283440","text":"with open(\"sequence.protein.gb\",\"r\") as handle:\n content = handle.readlines()\n\nfor i in range(len(content)):\n if i == 0:\n title = content[i].strip()\n elif content[i].startswith(\"ORIGIN\"):\n seq = [i.lstrip() for i in content[i+1:]]\nseq = \"\".join(seq)\n\nseq_line = seq.split(\"\\n\")\nseq_result = \"\"\nfor line in seq_line:\n seq_list = line.split(\" \")\n if len(seq_list) < 2:\n continue\n seq_result += \"\".join(seq_list[1:])\nprint(seq_result)\n","sub_path":"bioinformatics_4_2.py","file_name":"bioinformatics_4_2.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"287742023","text":"# coding:utf-8\n\nfrom flask import Blueprint, render_template, request, jsonify, json\nfrom flask_security import login_required, current_user\n\nfrom ..service import articleService, locationService, categoryService\nfrom ..model import ArticleAsset\n\nbp = Blueprint('article_admin', __name__, url_prefix=\"/admin/article\")\n\n\n@bp.route(\"/\", methods=[\"GET\"])\n@login_required\ndef article_mgr():\n return render_template(\"backend/postsMgr.html\")\n\n\n@bp.route(\"/create\", methods=['GET'])\n@bp.route(\"//update\", methods=[\"GET\"])\n@login_required\ndef article_form(article_id=None):\n locations = locationService.get_all()\n categories = categoryService.get_all()\n article = None\n if article_id:\n article = articleService.get_article_by_id(article_id)\n article = {} if article is None else article\n return render_template(\"backend/postsUpdate.html\", article=article, locations=locations, categories=categories)\n\n\n@bp.route(\"/article_video\", methods=[\"GET\"])\n@login_required\ndef article_video_mgr():\n return render_template(\"backend/videosMgr.html\")\n\n\n@bp.route(\"/article_video/create\", methods=['GET'])\n@bp.route(\"/article_video//update\", methods=[\"GET\"])\n@login_required\ndef article_video_form(article_id=None):\n locations = locationService.get_all()\n article = None\n if article_id:\n article = articleService.get_article_by_id(article_id)\n article = {} if article is None else article\n return render_template(\"backend/videosUpdate.html\", article=article, locations=locations)\n\n\n@bp.route(\"/article_music\", methods=[\"GET\"])\n@login_required\ndef article_music_mgr():\n return render_template(\"backend/musicsMgr.html\")\n\n\n@bp.route(\"/article_music/create\", methods=['GET'])\n@bp.route(\"/article_music//update\", methods=[\"GET\"])\n@login_required\ndef article_music_form(article_id=None):\n locations = locationService.get_all()\n article = None\n if article_id:\n article = articleService.get_article_by_id(article_id)\n article = {} if article is None else article\n return render_template(\"backend/musicsUpdate.html\", article=article, locations=locations)\n\n\n@bp.route(\"/create_or_update\", methods=['POST'])\n@login_required\ndef create_or_update():\n article_id = request.form.get(\"article_id\", None)\n title = request.form.get(\"title\")\n description = request.form.get(\"description\")\n profile = request.form.get(\"profile\")\n longitude = float(request.form.get(\"longitude\"))\n latitude = float(request.form.get(\"latitude\"))\n user_id = current_user.id\n content_type = int(request.form.get(\"content_type\")) if request.form.get(\"content_type\", None) else None\n category_id = int(request.form.get(\"category_id\")) if request.form.get(\"category_id\", None) else None\n location_id = int(request.form.get(\"location_id\"))\n assets = map(lambda asset_dict: ArticleAsset(**asset_dict), json.loads(request.form.get(\"assets\")))\n\n if article_id:\n articleService.update_article(int(article_id), title, description, profile, longitude, latitude, user_id,\n category_id, location_id, content_type, assets)\n else:\n articleService.add_article(title, description, profile, longitude, latitude, user_id, category_id, location_id,\n content_type, assets)\n return jsonify(data=dict(success=True))\n\n\n@bp.route(\"//assets\", methods=['GET'])\n@login_required\ndef load_assets(article_id):\n article = articleService.get_article_by_id(article_id)\n if article:\n assets = article.assets\n else:\n assets = []\n return jsonify(data=dict(success=True, assets=assets))\n\n\n@bp.route(\"//delete\", methods=['POST'])\n@login_required\ndef delete(article_id):\n articleService.remove_article(article_id)\n return jsonify(data=dict(success=True))\n\n\n@bp.route(\"/list\", methods=[\"GET\"])\n@login_required\ndef data():\n limit = int(request.args.get(\"iDisplayLength\", \"10\"))\n offset = int(request.args.get(\"iDisplayStart\", \"0\"))\n sEcho = request.args.get(\"sEcho\")\n search_content = request.args.get(\"search_content\")\n content_type = int(request.args.get(\"content_type\")) if request.args.get(\"content_type\", None) else None\n count, articles = articleService.paginate(search_content, content_type, offset, limit)\n return jsonify(\n data=dict(success=True, sEcho=sEcho, iTotalRecords=count, iTotalDisplayRecords=count, aaData=articles))\n\n\n\n\n","sub_path":"program/frontend/article_admin.py","file_name":"article_admin.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"12053606","text":"import os\nfrom flask import Flask, Response, request\nimport dictcc # import grabber\n\napp = Flask(__name__)\n# returns JSON w/ word pairs\n# /getJson?from=en&to=de&word=seriously\n@app.route(\"/getJson\")\ndef getPlotCSV():\n # get GET request params\n from_language = request.args.get('from')\n to_language = request.args.get('to')\n word = request.args.get('word')\n\n # data request\n resp = dictcc._get_response(word, from_language, to_language)\n json_obj = dictcc._parse_response(resp)\n\n return Response(response=json_obj,\n status=200, \\\n mimetype=\"application/json\")\n\nif __name__ == \"__main__\":\n app.run(host=os.getenv('IP', '0.0.0.0'),port=int(os.getenv('PORT', 8080)))","sub_path":"flask-server.py","file_name":"flask-server.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"87952899","text":"import pytest\nfrom panda3d import core\n\n\n@pytest.mark.parametrize(\"type\", (core.Mat4, core.Mat4D))\ndef test_mat4_invert(type):\n mat = type((1, 0, 0, 0,\n 0, 1, 0, 0,\n 0, 0, 1, 0,\n 1, 2, 3, 1))\n inv = type()\n assert inv.invert_from(mat)\n\n assert inv == type(( 1, 0, 0, 0,\n 0, 1, 0, 0,\n 0, 0, 1, 0,\n -1, -2, -3, 1))\n\n assert (mat * inv).is_identity()\n assert (inv * mat).is_identity()\n","sub_path":"tests/linmath/test_matrix_invert.py","file_name":"test_matrix_invert.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"98681184","text":"import os\nimport requests\nfrom random import randint\n\n\ndef make_dir(name):\n if not os.path.exists(name):\n os.makedirs(name)\n\n\ndef download_image(url, path='images'):\n make_dir(path)\n img_data = requests.get(url).content\n image_extension = url.split('.')[-1]\n image_name = f'image{randint(1,9999)}.{image_extension}'\n with open(os.path.join(path, image_name), 'wb') as handler:\n handler.write(img_data)\n","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"378672176","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom model_utils.models import TimeStampedModel\nfrom mptt.fields import TreeForeignKey\nfrom mptt.models import MPTTModel\n\nMSG_TYPES = (\n ('text', '文本消息'),\n ('event', '事件消息'),\n ('image', '图片消息'),\n ('location', '位置消息'),\n ('voice', '语音消息'),\n ('video', '视频消息'),\n)\nEVENTS = (\n ('subscribe', '关注事件'),\n ('unsubscribe', '取消关注事件'),\n ('SCAN', '扫描二维码'),\n ('LOCATION', '上报地理位置'),\n ('CLICK', '自定义菜单事件'),\n ('VIEW', '用户点击链接的跳转事件'),\n)\n\n\nclass WechatMenu(MPTTModel):\n TYPE_CHOICES = (('click', '点击'), ('view', '链接'))\n name = models.CharField(blank=True, max_length=50, verbose_name=_(\"名称\"), help_text=\"可以为空,仅用来标识消息\")\n slug = models.CharField(max_length=64, default='00')\n type = models.CharField(blank=True, max_length=50, verbose_name=_(\"类型\"), choices=TYPE_CHOICES)\n key = models.CharField(blank=True, max_length=50, verbose_name=_(\"键值\"), help_text=\"可以为空,仅用来标识消息\")\n\n parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children',\n default='0')\n is_active = models.BooleanField()\n order = models.IntegerField()\n\n def __str__(self):\n return self.name\n\n class MPTTMeta:\n order_insertion_by = ['order']\n parent_attr = 'parent'\n\n class Meta:\n verbose_name = '微信菜单'\n verbose_name_plural = '微信菜单'\n\n\nclass WechatMember(TimeStampedModel):\n \"\"\"msg in database\"\"\"\n\n openid = models.CharField(blank=True, max_length=50, verbose_name=_(\"openid\"), help_text=\"不能为空\", unique=True)\n mobile = models.CharField(_('手机号'), max_length=100, blank=True, null=True, default=None)\n verify = models.CharField(_('验证码'), max_length=100, blank=True, null=True)\n avatar = models.URLField(verbose_name=_('头像'))\n\n remark = models.CharField(_('微信备注'), max_length=100, blank=True, null=True)\n wechat = models.CharField(_('微信用户'), max_length=100, blank=True, null=True)\n # wechat_nick = models.CharField(_('微信昵称'), max_length=100, blank=True, null=True)\n\n nickname = models.CharField(blank=True, max_length=50, verbose_name=_(\"昵称\"), help_text=\"可以为空,仅用来标识消息\")\n name = models.CharField(blank=True, max_length=50, verbose_name=_(\"姓名\"), help_text=\"可以为空,仅用来标识消息\")\n city = models.CharField(blank=True, max_length=50, verbose_name=_(\"城市\"), help_text=\"可以为空,仅用来标识消息\")\n country = models.CharField(blank=True, max_length=50, verbose_name=_(\"国家\"), help_text=\"可以为空,仅用来标识消息\")\n province = models.CharField(blank=True, max_length=50, verbose_name=_(\"省份\"), help_text=\"可以为空,仅用来标识消息\")\n language = models.CharField(blank=True, max_length=50, verbose_name=_(\"语言\"), help_text=\"可以为空,仅用来标识消息\")\n headimgurl = models.CharField(blank=True, max_length=200, verbose_name=_(\"头像\"), help_text=\"可以为空,仅用来标识消息\")\n unionid = models.CharField(blank=True, max_length=200, verbose_name=_(\"唯一标示\"), help_text=\"可以为空,仅用来标识消息\")\n subscribe_time = models.BigIntegerField(blank=True, verbose_name=_(\"关注事件\"), null=True)\n groupid = models.IntegerField(blank=True, verbose_name=_(\"分组ID\"), null=True)\n sex = models.SmallIntegerField(verbose_name=_('性别'), blank=True, null=True)\n\n def on_delete(self):\n pass\n\n def __str__(self):\n return '%s %s' % (self.id, self.name)\n\n class Meta:\n verbose_name = '微信会员管理'\n verbose_name_plural = '微信会员管理'\n\n\nclass DBTextMsg(models.Model):\n \"\"\"msg in database\"\"\"\n\n class Meta:\n verbose_name = '回复管理(文字消息)'\n verbose_name_plural = '回复管理(文字消息)'\n\n name = models.CharField(blank=True, max_length=50, verbose_name=\"消息名字\", help_text=\"可以为空,仅用来标识消息\")\n content = models.TextField(blank=False, verbose_name=\"消息内容\")\n\n def on_delete(self):\n pass\n\n def __str__(self):\n return '%s %s' % (self.id, self.name)\n\n\nclass DBImgTextMsg(models.Model):\n \"\"\"image_text msg in database\"\"\"\n\n class Meta:\n verbose_name = '回复管理(图文消息)'\n verbose_name_plural = '回复管理(图文消息)'\n\n name = models.CharField(blank=True, max_length=50, verbose_name=\"消息名称\", help_text=\"可以为空,仅用来标识消息\")\n title = models.CharField(blank=True, max_length=255, verbose_name=\"消息标题\")\n description = models.TextField(blank=True, verbose_name=\"消息描述\")\n pic_url = models.URLField(blank=False, verbose_name=\"图片地址\")\n url = models.URLField(blank=False, max_length=255, verbose_name=\"文章地址\")\n\n def on_delete(self):\n pass\n\n def __str__(self):\n return '%s %s' % (self.id, self.name)\n\n\nclass PatternE2T(models.Model):\n \"\"\"text response pattern to user\"\"\"\n\n class Meta:\n verbose_name = '回复规则(事件>文本消息)'\n verbose_name_plural = '回复规则(事件>文本消息)'\n\n name = models.CharField(blank=True, max_length=50, verbose_name=\"规则命名\",\n help_text=\"可以为空,仅用来标识规则\")\n type = models.CharField(max_length=20,\n choices=MSG_TYPES, verbose_name=\"收到的消息类型(请保持默认)\",\n default='event', )\n event = models.CharField(max_length=30,\n choices=EVENTS,\n default='CLICK', verbose_name=\"事件类型\",\n help_text=\"除非收到的消息类型为“自定义菜单事件或者点击链接跳转事件,否则不要修改本字段”\")\n event_key = models.CharField(blank=True, max_length=255,\n verbose_name=\"event_key或者自定义url\",\n help_text='对于自定义菜单事件和自定义链接跳转事件这个是必填的! ')\n handler = models.ForeignKey(DBTextMsg, verbose_name=\"回复消息\", on_delete=models.CASCADE)\n\n def on_delete(self):\n pass\n\n def __str__(self):\n return '%s %s' % (self.id, self.name)\n\n\nclass PatternE2PT(models.Model):\n \"\"\"text response pattern to user\"\"\"\n\n class Meta:\n verbose_name = '回复规则(事件>图文消息)'\n verbose_name_plural = '回复规则(事件>图文消息)'\n\n name = models.CharField(blank=True, max_length=50, verbose_name=\"规则命名\",\n help_text=\"可以为空,仅用来标识规则\")\n type = models.CharField(max_length=20,\n choices=MSG_TYPES,\n default='event', verbose_name=\"用户消息类型(请保持默认)\",\n help_text=\"除非你清楚这个字段的含义,否则请不要随意更改\")\n event = models.CharField(max_length=30,\n choices=EVENTS,\n default='CLICK', verbose_name=\"事件类型\")\n event_key = models.CharField(blank=True, max_length=255,\n verbose_name=\"event_key或者自定义url\",\n help_text='对于自定义菜单事件和自定义链接跳转事件这个是必填的! ')\n handler = models.ManyToManyField(\n DBImgTextMsg, verbose_name=\"回复消息\", help_text=\"最多允许五条,不然会出错\")\n\n def __str__(self):\n return '%s %s' % (self.id, self.name)\n\n def on_delete(self):\n pass\n\n\nclass PatternT2PT(models.Model):\n \"\"\"image_text response pattern to user\"\"\"\n\n class Meta:\n verbose_name = '回复规则(文本>图文消息)'\n verbose_name_plural = '回复规则(文本>图文消息)'\n\n name = models.CharField(blank=True, max_length=50, verbose_name=\"规则命名\",\n help_text=\"可以为空,仅用来标识规则\")\n type = models.CharField(max_length=20,\n choices=MSG_TYPES,\n default='text', verbose_name=\"用户消息类型(请保持默认)\",\n help_text=\"除非你清楚这个字段的含义,否则请不要随意更改\")\n content = models.CharField(max_length=50, blank=True, verbose_name=\"需要匹配的消息\",\n help_text=\"使用正则表达式\")\n handler = models.ManyToManyField(\n DBImgTextMsg, verbose_name=\"回复消息\", help_text=\"最多允许五条,不然会出错\")\n\n def on_delete(self):\n pass\n\n def __str__(self):\n return '%s %s' % (self.id, self.name)\n\n\nclass PatternT2T(models.Model):\n \"\"\"text response pattern to user\"\"\"\n\n class Meta:\n verbose_name = '回复规则(文本>文本消息)'\n verbose_name_plural = '回复规则(文本>文本消息)'\n\n name = models.CharField(blank=True, max_length=50, verbose_name=\"规则命名\",\n help_text=\"可以为空,仅用来标识规则\")\n type = models.CharField(max_length=20,\n choices=MSG_TYPES,\n default='text', verbose_name=\"用户消息类型(请保持默认)\",\n help_text=\"除非你清楚这个字段的含义,否则请不要随意更改\")\n content = models.CharField(max_length=100, blank=True, verbose_name=\"收到的消息\",\n help_text=\"使用正则表达式\")\n handler = models.ForeignKey(DBTextMsg, on_delete=models.CASCADE, verbose_name=\"响应的消息内容\")\n\n def on_delete(self):\n pass\n\n def __str__(self):\n return '%s %s' % (self.id, self.name)\n","sub_path":"server/service/wechat/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"59924370","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom glob import glob \nfrom scipy.spatial.distance import cosine\nimport datetime\n\ndef cosine_series(arr):\n output = [1.0]\n for i in range(len(arr)):\n if i < len(arr)-1:\n a = arr[i]\n b = arr[i+1]\n dist = cosine(a,b)\n output.append(dist)\n return np.array(output)\n \n\ndef visualize_frames(fp,diffs=None):\n video = np.load(fp)\n if diffs is not None:\n frames_idx = (diffs > np.quantile(diffs,.90)) & (diffs > 0.05)\n sample_frames = video[frames_idx]\n else:\n sample_frames = video[0::1,:,:,:]\n plot = sum(frames_idx) >= 3\n \n if plot:\n \n plt.figure(figsize=(10,10))\n plt.imshow(np.hstack(sample_frames))\n plt.show()\n\n plt.figure(figsize=(5,5))\n plt.plot(list(range(len(diffs))),diffs)\n plt.plot(list(range(len(diffs))),diffs*frames_idx,'bo')\n plt.show()\n \n \ndef naive_diff(arr):\n diffs = np.diff(arr)\n sdiffs = np.absolute(np.sum(diffs,axis=1))**24\n return np.insert(sdiffs,0,[1])\n \ndef visualize_features(fp,diff_function=cosine_series):\n nfp = fp.replace('frames','features')\n feats = np.load(nfp)\n sdiffs = diff_function(feats)\n\n return sdiffs\n\ndef visualize_vid(fp):\n sdiffs = visualize_features(fp)\n visualize_frames(fp,diffs=sdiffs)\n\ndef get_duration(scenes):\n return [y-x for x,y in scenes]\n\n\ndef seconds_to_time(list_of_durations):\n start = 0 \n results = []\n for i,n in enumerate(list_of_durations):\n n = int(n)\n \n if i == 0:\n start_time = datetime.timedelta(seconds=0)\n end_time = datetime.timedelta(seconds=n)\n else:\n start_time = end_time\n end_time = start_time + datetime.timedelta(seconds=n)\n\n results.append((str(start_time),str(end_time)))\n\n return results\n\n\ndef extract_scenes(list_of_files,minimum_duration=10):\n \"\"\"Extracts scenes from a list of files\n \n Arguments:\n list_of_files {[List[npy files]]} -- List of filepaths for framelevel representations of videos\n \n Keyword Arguments:\n minimum_duration {int} -- Minimum duration of video in seconds. (default: {10})\n \n Returns:\n Filtered Videos [list] -- List of the videos processed (after filtering for the minimum duration threshold)\n Durations [list[Duration]] -- List of lists containing duration (in seconds) of each scene where List i corresponds to filtered_video[i]\n Number of Scenes [list] -- Derived from Durations -> Mainly the length of the list of scene durations\n Average Duration [list] -- Derived from Durations -> Average Scene length\n Total Video Duration [list] -- Total video duration\n \"\"\"\n\n filtered_videos = [x for x in list_of_files if np.load(x).shape[0] > minimum_duration]\n raw_scenes = [visualize_features(x) for x in filtered_videos]\n scene_ident = [((diffs > np.quantile(diffs,.90)) & (diffs > 0.05)) for diffs in raw_scenes]\n num_scens = [sum(sid) for sid in scene_ident]\n\n video_scenes = []\n for sid in scene_ident:\n idxs = np.array(list(range(len(sid))))[sid]\n scenes = []\n for z,i in enumerate(idxs):\n start = i\n if z == (len(idxs) - 1):\n end = len(sid) - 1\n else:\n end = idxs[z + 1]\n scenes.append([start,end])\n video_scenes.append(scenes)\n\n\n durations = [get_duration(x) for x in video_scenes]\n scenes_timestamp = [seconds_to_time(d) for d in durations]\n num_scenes = [len(x) for x in video_scenes]\n avg_duration = [np.mean(x) for x in durations]\n total_video = [sid.shape[0] for sid in scene_ident]\n total_video_duration_timestamp = [datetime.timedelta(seconds=x) for x in total_video]\n\n return filtered_videos,durations,num_scenes,avg_duration,total_video,scenes_timestamp,total_video_duration_timestamp","sub_path":"winnow/utils/scene_detection.py","file_name":"scene_detection.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"582369078","text":"\"\"\"add server description\n\nRevision ID: 5bb20df3f035\nRevises: ffdd07363665\nCreate Date: 2020-08-21 21:40:14.688639\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5bb20df3f035'\ndown_revision = 'ffdd07363665'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('discord_server_lists', sa.Column('server_description', sa.String(length=200), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('discord_server_lists', 'server_description')\n # ### end Alembic commands ###\n","sub_path":"migration/versions/5bb20df3f035_add_server_description.py","file_name":"5bb20df3f035_add_server_description.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"182542232","text":"from django.contrib import admin\nfrom .models import Task , TaskComment , WeekProject , Event , Wallet , Transistion , Notes \nfrom django.contrib.admin.options import ModelAdmin\n\n\n# Register your models here.\nclass TaskAdmin(ModelAdmin):\n\tlist_display = ['taskname','published', 'author']\n\tsearch_field = ['author']\n\tlist_filter = [ 'author', 'published']\n\n\nadmin.site.register(Task, TaskAdmin)\n\n\nclass NotesAdmin(ModelAdmin):\n\tlist_display = ['note','published', 'author']\n\tsearch_field = ['author']\n\tlist_filter = [ 'author', 'published']\n\n\nadmin.site.register(Notes, NotesAdmin)\n\n\nclass WalletAdmin(ModelAdmin):\n\tlist_display = ['person_name','published', 'author']\n\tsearch_field = ['author']\n\tlist_filter = [ 'author', 'published']\n\n\nadmin.site.register(Wallet, WalletAdmin)\n\n\nclass EventAdmin(ModelAdmin):\n\tlist_display = ['eventname', 'eventon', 'published', 'author']\n\tsearch_field = ['author']\n\tlist_filter = [ 'author', 'published']\n\n\nadmin.site.register(Event, EventAdmin)\n\n\nclass TaskCommentAdmin(ModelAdmin):\n\tlist_display = ['user','task','published', 'comment_content']\n\tsearch_field = ['user','task',]\n\tlist_filter = [ 'user', 'published' ,'task',]\n\n\nadmin.site.register(TaskComment, TaskCommentAdmin)\n\nclass WeekProjectAdmin(ModelAdmin):\n\tlist_display = ['job', 'author','published']\n\tsearch_field = [ 'author', 'published']\n\tlist_filter = ['author', 'published']\n\nadmin.site.register(WeekProject, WeekProjectAdmin)\n\n\nclass TransistionAdmin(ModelAdmin):\n\tlist_display = ['author','published', 'payment_status' , 'payment_mode' , 'comment' , 'amount']\n\tsearch_field = [ 'published' , 'author','published']\n\tlist_filter = ['published', 'author','published']\n\nadmin.site.register(Transistion, TransistionAdmin)\n","sub_path":"todo/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"25873287","text":"import tkinter as tk\nimport pickle as cPickle\nimport numpy as np\nfrom scipy.io.wavfile import read\nfrom sklearn import mixture\nfrom sklearn.mixture import gaussian_mixture as GMM\nfrom featureExtraction import extract_features\nfrom tkinter import ttk\nimport warnings\nimport pyaudio\nimport wave\nimport os\nimport webbrowser\nimport ctypes\nimport wmi\nfrom win32com.client import GetObject\n\n\nCHUNK = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\nRECORD_SECONDS = 2\n\ndef execute(cmd):\n brightness = None\n if cmd == 'chrome':\n webbrowser.get(\"C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s\").open(\"http://google.com.vn\")\n elif cmd == 'khoa may':\n ctypes.windll.user32.LockWorkStation()\n elif cmd == 'tang do sang':\n objWMI = GetObject('winmgmts:\\\\\\\\.\\\\root\\\\WMI').InstancesOf('WmiMonitorBrightness')\n for obj in objWMI:\n if obj.CurrentBrightness != None:\n brightness = obj.CurrentBrightness # percentage [0-100]\n break\n # print brightness\n if brightness <= 90:\n brightness += 10\n c = wmi.WMI(namespace='wmi')\n methods = c.WmiMonitorBrightnessMethods()[0]\n methods.WmiSetBrightness(brightness, 0)\n elif cmd == 'giam do sang':\n objWMI = GetObject('winmgmts:\\\\\\\\.\\\\root\\\\WMI').InstancesOf('WmiMonitorBrightness')\n for obj in objWMI:\n if obj.CurrentBrightness != None:\n brightness = obj.CurrentBrightness # percentage [0-100]\n break\n if brightness >= 10:\n brightness -= 10\n c = wmi.WMI(namespace='wmi')\n methods = c.WmiMonitorBrightnessMethods()[0]\n methods.WmiSetBrightness(brightness, 0)\n\ndef test():\n display = tk.Text(master=window, height=8, width=40)\n display.config(font=(\"Helvetica\"))\n display.grid(columnspan=2, row=5, sticky='e')\n WAVE_OUTPUT_FILENAME = \"test/test\"\n\n # #path to training data\n source = \"data/\"\n modelpath = \"models/\"\n # test_file = \"development_set_test.txt\"\n # file_paths = open(test_file,'r')\n #\n\n gmm_files = [os.path.join(modelpath, fname) for fname in\n os.listdir(modelpath) if fname.endswith('.gmm')]\n # Load the Gaussian gender Models\n models = [cPickle.load(open(fname, 'rb')) for fname in gmm_files]\n #models = [cPickle.load(open(\"giam do sang.gmm\", 'rb')) ]\n\n speakers = [fname.split(\"/\")[-1].split(\".gmm\")[0] for fname\n in gmm_files]\n\n # Read the test directory and get the list of test audio files\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n # print(\"* recording\")\n\n frames = []\n\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n\n # print(\"* done recording\")\n stream.stop_stream()\n stream.close()\n p.terminate()\n wf = wave.open(WAVE_OUTPUT_FILENAME + \".wav\", 'wb')\n\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n\n sr, audio = read(\"test/test.wav\")\n vector = extract_features(audio, sr)\n\n log_likelihood = np.zeros(len(models))\n\n for i in range(len(models)):\n gmm = models[i] # checking with each model one by one\n scores = np.array(gmm.score(vector))\n log_likelihood[i] = scores.sum()\n\n winner = np.argmax(log_likelihood)\n # print \"\\tDETECTED AS: \", speakers[winner]\n display.insert(tk.END, \"DETECTED AS: \" + speakers[winner])\n execute(speakers[winner])\n\ndef start_record():\n display = tk.Text(master=window, height=8, width=40)\n display.config(font=(\"Helvetica\"))\n display.grid(columnspan=2, row=5, sticky='e')\n input = entry.get()\n directory = \"data/\" + str(input) + \"/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n count = 0\n while os.path.exists(directory + \"w\" + str(count)+\".wav\"):\n count += 1\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n # print(\"* recording\")\n display.insert(tk.END, \"\")\n frames = []\n\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n\n # print(\"* done recording\")\n display.insert(tk.END, \"* Done recording: \" + input + \"/w\" + str(count) + \".wav\")\n display.insert(tk.END, \"\\n\")\n stream.stop_stream()\n stream.close()\n p.terminate()\n wf = wave.open(directory + \"w\" + str(count)+\".wav\", 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n with open(\"data.txt\", \"a\") as file:\n file.write(input + \"/\" + \"w\" + str(count)+\".wav\")\n file.write(\"\\n\")\n file.close()\n\ndef train():\n display = tk.Text(master=window, height=8, width=40)\n display.config(font=(\"Helvetica\"))\n display.grid(columnspan=2, row=5, sticky='e')\n warnings.filterwarnings(\"ignore\")\n\n # path to training data\n source = \"data/\"\n\n # path where training speakers will be saved\n dest = \"models/\"\n train_file = \"data.txt\"\n file_paths = open(train_file, 'r')\n\n count = 1\n # Extracting features for each speaker (5 files per speakers)\n features = np.asarray(())\n for path in file_paths:\n path = path.strip()\n # print path\n # display.insert(tk.END, path)\n # display.insert(tk.END, \"\\n\")\n\n # read the audio\n sr, audio = read(source + path)\n\n # extract 40 dimensional MFCC & delta MFCC features\n vector = extract_features(audio, sr)\n\n if features.size == 0:\n features = vector\n else:\n features = np.vstack((features, vector))\n # when features of 5 files of speaker are concatenated, then do model training\n if count == 5:\n gmm = GMM(n_components=16, n_iter=200, covariance_type='diag', n_init=3)\n gmm.fit(features)\n\n # dumping the trained gaussian model\n picklefile = path.split(\"/\")[0] + \".gmm\"\n # print picklefile\n # f = open(dest + picklefile, 'w+')\n cPickle.dump(gmm, open(dest + picklefile, 'w+'))\n # print '+ modeling completed for word:', picklefile, \" with data point = \", features.shape\n phrase = \"Modeling completed: \" + picklefile\n display.insert(tk.END, phrase)\n display.insert(tk.END, \"\\n\")\n features = np.asarray(())\n count = 0\n count = count + 1\n\nwindow = tk.Tk()\nwindow.style = ttk.Style()\nwindow.style.theme_use(\"default\")\nwindow.title(\"Voice Command\")\nwindow.geometry(\"380x300\")\ntitle = tk.Label(text=\"Voice Command\")\ntitle.config(font=(\"Helvetica\", 30))\ntitle.grid(columnspan=2, row=0)\ntrainlb = tk.Label(text=\"Training Models\")\ntrainlb.grid(column=0, row=1, sticky='e')\nbtnTrain = tk.Button(text=\"Train\", width=28, command=train)\nbtnTrain.grid(column=1, row=1)\ntrainlb = tk.Label(text=\"Voice Recognition\")\ntrainlb.grid(column=0, row=2, sticky='e')\nbtnTest = tk.Button(text=\"Voice\", width=28, command=test)\nbtnTest.grid(column=1, row=2)\ntrainlb = tk.Label(text=\"New Data\")\ntrainlb.grid(column=0, row=3, sticky='e')\nentry = tk.Entry(width=28)\nentry.grid(column=1, row=3)\ntrainlb = tk.Label(text=\"Record New Training Data\")\ntrainlb.grid(column=0, row=4, sticky='e')\nbtnRecord = tk.Button(text=\"Record\", width=28, command=start_record)\nbtnRecord.grid(column=1, row=4)\ndisplay = tk.Text(master=window, height=8, width=40)\ndisplay.grid(columnspan=2, row=5, sticky='e')\nwindow.mainloop()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"303146578","text":"# coding=utf-8\nfrom datetime import datetime\nimport urllib2\nimport urllib\n\nfrom rest_framework.exceptions import NotFound\n\nfrom api.models.subscription import Subscription\nfrom api.models.subscription_type import SubscriptionType\nfrom api.models.company import Company\nfrom base_service import get_object\nfrom subscription_type import get_subscription_type_by_id\nfrom company import get_company_by_id\nfrom api.utils.exceptions.subscription import SubscriptionStatusException, \\\n SubscriptionNotFoundException\n\n\ndef create_subscription(serializer, support):\n data = serializer.validated_data\n if support.is_booker:\n data['status'] = 2\n company_id = data.get('company_id', 0)\n data['start_dt'] = datetime.now()\n if support.is_admin:\n if data['status'] != 1:\n raise SubscriptionStatusException()\n company_id = support.company.id\n company = get_company_by_id(company_id)\n\n subscription_type_id = data.get('subscription_type_id', 0)\n subscription_type = get_subscription_type_by_id(subscription_type_id)\n \n subscription_id = serializer.create(data, company, subscription_type)\n if support.is_admin:\n return subscription_id \n\n\ndef update_subscription(serializer, subscription):\n data = serializer.validated_data\n if not data['status'] in [2,3]:\n raise SubscriptionStatusException()\n company = get_company_by_id(data['company_id'])\n if data['status'] == 2:\n data['start_dt'] = datetime.now()\n serializer.update(subscription, data)\n\n\ndef get_all_subscriptions(support):\n if support.is_booker or support.is_superadmin:\n subscriptions = Subscription.objects.all().order_by('status','-purchase_dt')\n elif support.is_admin:\n company = support.company\n subscriptions = Subscription.objects.filter(company=company).order_by('status','-purchase_dt')\n else:\n subscriptions = None\n return subscriptions\n\n\ndef create_payonline_link(sub_id):\n link = 'https://payonline.com?order_id={}'.format(sub_id)\n return link\n\n\ndef get_subscription_by_id(id):\n try:\n subscription = Subscription.objects.get(id=id)\n except Subscription.DoesNotExist:\n raise SubscriptionNotFoundException()\n return subscription\n\n\ndef check_transaction(order_id):\n url = 'http://volstelecom.ru'\n \"\"\"user = ''\n password = ''\n\n values = {'order_id' : order_id,\n 'user' : user,\n 'password' : password }\n data = urllib.urlencode(values)\n req = urllib2.Request(url, data)\n response = urllib2.urlopen(req)\"\"\"\n response = urllib2.urlopen(url)\n return response","sub_path":"api/service/subscription.py","file_name":"subscription.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"305916404","text":"# -*- coding: utf-8 -*- \n#coding:utf8\n\nfrom flask import render_template, url_for, flash, request, redirect\nfrom app import app, db\nfrom app.models import *\nfrom app.forms import *\nimport time\nfrom datetime import datetime\nimport os\nimport secrets\nfrom PIL import Image\nimport boto3\nfrom flask_login import login_user, current_user, logout_user, login_required\nimport json\n# Essentially how we are going to write routes in general\nimport codecs\nimport sys\nsys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach())\n\nupload_bucket = 'mary-app-upload'\nos.environ['LC_ALL'] = \"en_US.UTF-8\"\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n return render_template(\"home.html\")\n\n# save picture on s3\ndef save_picture(form_picture):\n s3 = boto3.resource('s3')\n\n # first: save it on local machine\n filename = form_picture.filename\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(filename)\n picture_fn = random_hex + f_ext\n picture_path = os.path.join(app.root_path, 'static/upload/image', picture_fn)\n # output_size = (125, 125)\n i = Image.open(form_picture)\n # i.thumbnail(output_size)\n i.save(picture_path)\n\n # then: upload it to s3 bucket\n response = s3.meta.client.upload_file(os.path.join(app.root_path, 'static/upload/image', picture_fn), \n upload_bucket, \n \"/\".join((\"image\", picture_fn)))\n \n print(response)\n return picture_fn\n\n# text detection\ndef textDetection(filename):\n client = boto3.client('rekognition', region_name='us-east-1')\n boto3.set_stream_logger('')\n response = client.detect_text(\n Image={ \n 'S3Object': {\n 'Bucket': upload_bucket,\n 'Name': \"/\".join((\"image\", filename))\n }\n }\n )\n\n texts = response['TextDetections']\n result = \"\"\n\n # only output the lines\n for text in texts:\n if text['Type'] == 'LINE':\n result += text['DetectedText']\n result += \" \"\n \n return result\n\n# translate text into target language\ndef translation(original,language):\n client = boto3.client('translate', region_name='us-east-1')\n response = client.translate_text(\n Text=original,\n SourceLanguageCode='en',\n TargetLanguageCode=language\n )\n return response['TranslatedText']\n\n# [text detection]\n@app.route(\"/detect\", methods=['GET', 'POST'])\ndef detect():\n form = DetectForm()\n # get file, save the s3 url of it\n if form.validate_on_submit():\n # save it \n flash('Your image has been sent to server! Please wait...', 'info')\n f = form.picture.data\n picture_file = save_picture(f)\n detect = Detect(image = picture_file)\n db.session.add(detect)\n db.session.commit()\n # flash('Your image has been sent to server! Please wait...', 'info')\n\n # detect\n # content = textDetection(picture_file)\n # translated = translation(content,form.data['language'])\n # return render_template('detect.html', form=form, title=\"Text Dectection\",content=textDetection(picture_file), img = os.path.join(app.root_path, 'static/upload/image', picture_file),translate=translated)\n\n try:\n content = textDetection(picture_file)\n flash('Text detection completed!', 'success')\n print('Text detection completed!')\n # delete local file\n temp = os.path.join(app.root_path, 'static/upload/image', picture_file)\n os.remove(temp)\n print('Remove file completed!')\n\n # translate\n if form.data['language'] and form.data['language'] != 'null':\n translated = translation(content,form.data['language'])\n print('Translate completed!')\n return render_template('detect.html', form=form, title=\"Text Dectection\",content=content, img = os.path.join(app.root_path, 'static/upload/image', picture_file),translate=translated)\n\n\n return render_template('detect.html', form=form, title=\"Text Dectection\",content=content, img = os.path.join(app.root_path, 'static/upload/image', picture_file))\n except:\n flash('No text detected!', 'warning')\n return render_template('detect.html', form=form, title=\"Text Dectection\",content=\"no text detected\", img = os.path.join(app.root_path, 'static/upload/image', picture_file))\n\n return render_template('detect.html', form=form, title=\"Text Dectection\")\n\n\n\n# save picture on s3\ndef save_audio(form_audio):\n s3 = boto3.resource('s3')\n\n # first: save it on local machine\n filename = form_audio.filename\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(filename)\n audio_fn = random_hex + f_ext\n audio_path = os.path.join(app.root_path, 'static/upload/audio', audio_fn)\n form_audio.save(audio_path)\n print('save audio locally completed!')\n\n # then: upload it to s3 bucket\n response = s3.meta.client.upload_file(os.path.join(app.root_path, 'static/upload/audio', audio_fn), \n upload_bucket, \n \"/\".join((\"audio\", audio_fn)))\n \n print('upload to s3 completed')\n return audio_fn\n\n# start a transcribe job, and return the job name\ndef perform_transcribe(email,title,filename):\n # create transcribe job\n client = boto3.client('transcribe', region_name='us-east-1')\n print(filename)\n _, f_ext = os.path.splitext(filename)\n\n # combine user email and title\n jobname = \"-JOBNAME-\".join((email,title))\n response = client.start_transcription_job(\n TranscriptionJobName = jobname,\n LanguageCode='en-AU',\n MediaFormat=f_ext.replace('.',''),\n Media={\n 'MediaFileUri': \"/\".join(('s3://mary-app-upload/audio',filename))\n },\n OutputBucketName='mary-app-output'\n )\n return jobname\n\n# add the job into dynamodb for certain user\ndef add_to_list(jobname):\n email,title = jobname.split(\"-JOBNAME-\")\n dynamodb = boto3.resource('dynamodb',region_name='us-east-1')\n table = dynamodb.Table('user')\n\n # try to search user in database\n response = table.get_item(\n Key={\n 'email':email\n }\n )\n print('db response:')\n print(response)\n\n \n\n # if the list exists, so the user has already in the db, just append list\n if 'Item' in response:\n item = response['Item']\n original_list = item[u'lists']\n original_list.add(title)\n table.update_item(\n Key={\n 'email': email\n },\n UpdateExpression='SET lists = :val1',\n ExpressionAttributeValues={\n ':val1': original_list\n }\n )\n else:\n table.put_item(\n Item={\n 'email': email,\n 'lists':{title}\n }\n )\n\n\n# [transcribe]\n@app.route(\"/transcribe\", methods=['GET', 'POST'])\ndef transcribe():\n # authentication\n if current_user.is_authenticated:\n form = TranscribeForm() \n if form.validate_on_submit():\n print('Form submitted completed!')\n flash('Your audio has been sent to server! Please wait...', 'info')\n f = form.audio.data\n\n # save file on S3\n audio_file = save_audio(f) \n # title = form.title.data \n # transcribe = Transcribe(title=title,audio = audio_file)\n # db.session.add(transcribe)\n # db.session.commit()\n print('Save audio completed!')\n\n # # using the file on S3 to create a new transcribe job\n jobname = perform_transcribe(str(current_user.id),form.title.data,audio_file)\n print('Perform tasks completed!')\n # delete local file\n temp = os.path.join(app.root_path, 'static/upload/audio', audio_file)\n os.remove(temp)\n print('Local audio removed completed!')\n \n # # store the job into job list of current user in dynamodb\n add_to_list(jobname)\n print(\"add to list successful!\")\n\n flash('Your new transcribe job has been created! It often takes a while to finish the job, you can check it later in the list!', 'success')\n return redirect(url_for(\"home\"))\n return render_template('transcribe.html', form=form, title=\"Transcribe\")\n flash('Note: You have to login to use this function', 'info')\n return redirect(url_for(\"login\"))\n\n\ndef getJobStatus(jobname):\n\n client = boto3.client('transcribe', region_name='us-east-1')\n\n # combine user email and title\n \n response = client.get_transcription_job(\n TranscriptionJobName = jobname\n )\n\n return response['TranscriptionJob']\n\ndef getJobList(email):\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n table = dynamodb.Table('user')\n response = table.get_item(\n Key={\n 'email': email\n }\n )\n if 'Item' in response:\n jobs = response['Item']['lists']\n result = []\n for job in jobs:\n jobname = \"-JOBNAME-\".join((email,job))\n response = getJobStatus(jobname)\n print(response)\n status = response['TranscriptionJobStatus']\n if status == 'COMPLETED':\n element = {'title':job,'status':status,'url':response['Transcript']['TranscriptFileUri']}\n result.append(element.copy())\n else:\n element = {'title':job,'status':status}\n result.append(element.copy())\n return result\n else:\n return None\n\n\n# [check transcribtion list]\n@app.route(\"/lists\", methods=['GET', 'POST'])\ndef lists():\n # get all jobs from dynamodb\n if current_user.is_authenticated:\n jobs = getJobList(str(current_user.id))\n return render_template('lists.html', title=\"Lists\", jobs=jobs)\n return redirect(url_for(\"home\"))\n\n\n# Login route \n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n # if logged in, redirect to home page\n if current_user.is_authenticated:\n return redirect(url_for(\"home\"))\n form = LoginForm()\n if form.validate_on_submit():\n customer = Customer.query.filter_by(email=form.email.data).first()\n if customer and customer.password == form.password.data:\n login_user(customer, remember=form.remember.data)\n flash(f\"You're now logged in\", \"success\")\n return redirect(url_for(\"home\"))\n else:\n flash(\"Invalid email and password combination.\", \"danger\")\n return render_template(\"login.html\", title=\"Log In\", form=form)\n\n# sign up\n@app.route(\"/signup\", methods=['GET', 'POST'])\ndef signup():\n # if logged in, redirect to home page\n if current_user.is_authenticated:\n return redirect(url_for(\"home\"))\n form = SignUpForm()\n if form.validate_on_submit():\n customer = Customer(email=form.email.data, password=form.password.data)\n db.session.add(customer)\n db.session.commit()\n flash(f\"Account successfully created!\", \"success\")\n return redirect(url_for(\"home\"))\n return render_template(\"signup.html\", title=\"Sign Up\", form=form)\n\n\n# Logout route\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n flash(f\"You are now logged out.\", \"success\")\n return redirect(url_for('home'))\n\n\ndef getTranscribeContentURL(jobname):\n client = boto3.client('transcribe', region_name='us-east-1')\n # combine user email and title\n \n response = client.get_transcription_job(\n TranscriptionJobName = jobname\n )\n return response['TranscriptionJob']['Transcript']['TranscriptFileUri']\n\n# get the content of audio\ndef getContent(url):\n s3 = boto3.resource('s3')\n strings = url.split(\"/\")\n filename = strings[-1]\n obj = s3.Object('mary-app-output', filename)\n # body = obj.get()['Body'].read()\n file_content = obj.get()['Body'].read().decode('utf-8')\n json_content = json.loads(file_content)\n # data = json.load(body)\n results = json_content['results']\n content = results['transcripts']\n return content[0]['transcript']\n\n\ndef generateLinkToS3(id,title,content,translated):\n s3 = boto3.resource('s3')\n\n print(\"original translated class:\")\n print(translated.__class__)\n # generate local file\n filename = \"-JOBNAME-\".join((str(id),title))\n filename += '.txt'\n path = os.path.join(app.root_path,'static/result', filename)\n # create file locally\n f= open(path,\"w+\",encoding='utf-8')\n f.write(\"Original content: \\n\")\n f.write(content)\n f.write(\"\\nTranslated content: \\n\")\n print(\"translated class:\")\n print (translated.__class__)\n print(\"translated:\")\n print(translated)\n f.write(translated)\n f.close\n\n f = open(path,\"r\",encoding='utf-8')\n print(f.read())\n f.close\n # upload to s3 and make it public\n upload_key = \"/\".join((\"from-audio\", filename))\n # print(\"path: \"+os.path.join(app.root_path,'static\\\\result', filename))\n # print(\"upload key: \" + upload_key)\n response = s3.meta.client.upload_file(\n os.path.join(app.root_path,'static/result', filename), \n 'mary-app-output', \n upload_key,\n ExtraArgs={'ACL': 'public-read'}\n )\n\n # generte url of it\n url = \"https://\"+\"mary-app-output.s3.amazonaws.com\"+\"/from-audio/\"+filename\n return url\n\n\ndef sendEmail(id,email, url):\n # check topic\n target_topic = 'USERID-'+str(id)+'-'\n topicArn = ''\n client = boto3.client('sns',region_name='us-east-1')\n response = client.list_topics()\n for topic in response['Topics']:\n if target_topic in topic['TopicArn']:\n topicArn = topic['TopicArn']\n break\n\n # if topic is not created, then create and subscribe\n if topicArn == '':\n # create topic\n response = client.create_topic(\n Name=target_topic\n )\n # print(response)\n\n # get topic arn from response\n topicArn = response['TopicArn']\n\n # send subscription link\n response = client.subscribe(\n TopicArn=topicArn,\n Protocol='email',\n Endpoint=email\n )\n # print(response)\n return \"first\"\n # topic created\n else:\n # check subscription\n response = client.list_subscriptions_by_topic(\n TopicArn=topicArn\n )\n # print(response)\n\n status = response['Subscriptions'][0]['SubscriptionArn']\n\n # if not subscribe, resend link\n if status == 'PendingConfirmation':\n response = client.subscribe(\n TopicArn=topicArn,\n Protocol='email',\n Endpoint=email\n )\n print(response)\n return \"pending\"\n # subscribed\n else:\n response = client.publish(\n TopicArn=topicArn,\n Message='url:'+url,\n Subject='Your transcription and translation job ready to download',\n )\n return \"success\"\n\n\n\n\n# show specific job\n@app.route('/job/',methods=['GET', 'POST'])\ndef job(title):\n if current_user.is_authenticated:\n form = JobForm()\n jobname = \"-JOBNAME-\".join((str(current_user.id),title))\n url = getTranscribeContentURL(jobname)\n content = getContent(url)\n\n # form2 = GetFileForm(prefix='form2')\n # translated = \"\"\n # do translation work\n if form.validate_on_submit:\n if form.data['language'] and form.data['language'] != 'null':\n translated = translation(content,form.data['language'])\n # form.contentTest.data = content\n print(translated.__class__)\n print(translated)\n\n url = generateLinkToS3(current_user.id,title,content,translated)\n response = sendEmail(current_user.id,current_user.email,url)\n if response == \"first\":\n flash(\"Please subscribe for first time!\",\"info\")\n if response == \"pending\":\n flash(\"Please comfirm subscription! The confirmation link has been re-sent to your email address!\",\"danger\")\n if response == \"success\":\n flash(\"The link has been sent to your email address! It may takes several minutes to receive it, please wait a while ...\",\"success\")\n return render_template('job.html', form=form, title=\"Job\",content=content,translate=translated)\n\n # check whether want to download file\n \n # print(request.form['content'])\n # if form2.validate_on_submit:\n # if form.request['']\n \n return render_template(\"job.html\", title=\"Job\", form=form, content=content)\n return redirect(url_for('home'))\n \n\n\n","sub_path":"codes/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":16764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"225185559","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport time\nfrom selenium import webdriver\nimport os\n\n\n\n# 找到指定的课程名称,未找到返回0\ndef enterStudy(browser):\n studys = browser.find_elements_by_css_selector(\"button[class='btn bg-primary']\")\n for s in studys:\n if studyName in s.find_element_by_xpath(\"./..\").find_element_by_xpath(\"./..\").find_element_by_xpath(\n \"./..\").find_element_by_xpath(\"./h3\").text:\n s.click()\n return 1\n return 0\n\n\n# 1.找到办公室管理的进入学习按钮\ndef enterTest(browser, xkurl):\n enterStudy(browser) # 进入学习的按钮会新开一个tab\n time.sleep(1)\n windowstabs = browser.window_handles\n if len(windowstabs) > 1: # 如果没找到课程,至少别报错\n browser.switch_to.window(windowstabs[1])\n try:\n browser.find_element_by_xpath('//i[@class=\"funGsClose closeBtn\"]').click() # find一下,保证新页面加载完成\n except:\n pass\n browser.get(xkurl) # 先考形1\n else:\n return 0\n\n\n# 2.立即考试.判断一下,防止多次考试\ndef readyToTest(browser):\n time.sleep(2)\n readyTest = browser.find_element_by_xpath('//input[@type=\"submit\"]')\n if '再次' not in readyTest.get_attribute(\"value\"):\n if '现在' in readyTest.get_attribute(\"value\") or '继续' in readyTest.get_attribute(\"value\"):\n readyTest.click()\n time.sleep(2)\n return 1\n return 0\n\n\n# 论坛形式试卷进入方法\ndef readyToTestForum(browser):\n readyTest = browser.find_element_by_xpath('//button[starts-with(@id,\"single_\")]')\n readyTest.click()\n return 1\n\n\n\n# 等待三秒,让我们看到卷子已经答题提交完成,然后关tab,切到第一个tab,再进学习\ndef wait3AndCloseTab(browser):\n time.sleep(2)\n browser.close()\n browser.switch_to.window(browser.window_handles[0])\n time.sleep(1.5)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\noption = webdriver.ChromeOptions()\noption.add_argument('disable-infobars')\nbrowser = webdriver.Chrome(chrome_options=option)\n# browser.maximize_window() #max_window\n\nbrowser.get('http://student.ouchn.cn/')\nbrowser.implicitly_wait(15) # wait\n\n\n#取到科目名\nstudyName = os.path.basename(__file__).split('.')[0]\n\n\n\n\n\n\nxingkao1 = 'http://hubei.ouchn.cn/mod/quiz/view.php?id=437530'\nxingkao2 = 'http://hubei.ouchn.cn/mod/quiz/view.php?id=437536'\n\n\n\n\n\n\nfile = open(studyName + '.txt')\nkeys = []\nfor line in file.readlines():\n keys.append(line.strip())\n\nfor key in keys:\n username = key.split(\"\\t\")[0]\n password = key.split(\"\\t\")[1]\n\n # login\n browser.find_element_by_id(\"username\").send_keys(username)\n browser.find_element_by_id(\"password\").send_keys(password)\n browser.find_element_by_css_selector('button[value=\"login\"]').click()\n # enter study...此处要注意,不同账号进来看到的开放大学指南的位置不同,要动态抓元素...2019年11月13日09:10:54发现不用抓元素,直接根据URL进入国开开放指南页面,并且形考1-5的URL也是指定的,所以不用抓元素\n\n if enterTest(browser, xingkao1) != 0:\n if readyToTest(browser) == 1: # 除非没考过,否则就关闭tab,重进学习页面,考下一个形考\n writeAnswer1(browser)\n # saveTest2GetAnswer(browser, proxy)\n wait3AndCloseTab(browser)\n\n enterTest(browser, xingkao2)\n if readyToTest(browser) == 1: # 除非没考过,否则就关闭tab,重进学习页面,考下一个形考\n writeAnswer2(browser)\n wait3AndCloseTab(browser)\n\n enterTest(browser, xingkao3)\n if readyToTest(browser) == 1: # 除非没考过,否则就关闭tab,重进学习页面,考下一个形考\n writeAnswer3(browser)\n wait3AndCloseTab(browser)\n\n enterTest(browser, xingkao4)\n if readyToTest(browser) == 1: # 除非没考过,否则就关闭tab,重进学习页面,考下一个形考\n writeAnswer4(browser)\n wait3AndCloseTab(browser)\n\n enterTest(browser, xingkao5)\n if readyToTest(browser) == 1: # 除非没考过,否则就关闭tab,重进学习页面,考下一个形考\n writeAnswer5(browser)\n wait3AndCloseTab(browser)\n\n enterTest(browser, xingkao6)\n if readyToTest(browser) == 1: # 除非没考过,否则就关闭tab,重进学习页面,考下一个形考\n writeAnswer6(browser)\n wait3AndCloseTab(browser)\n\n enterTest(browser, xingkao7)\n if readyToTest(browser) == 1: # 除非没考过,否则就关闭tab,重进学习页面,考下一个形考\n writeAnswer7(browser)\n wait3AndCloseTab(browser)\n\n enterTest(browser, xingkao8)\n if readyToTest(browser) == 1: # 除非没考过,否则就关闭tab,重进学习页面,考下一个形考\n writeAnswer8(browser)\n wait3AndCloseTab(browser)\n\n # 5个形考走完提交之后直接换账号\n browser.get(\"http://passport.ouchn.cn/Account/Logout?logoutId=student.ouchn.cn\")\n time.sleep(6)\n","sub_path":"网络实用技术基础.py","file_name":"网络实用技术基础.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"266446764","text":"#!/usr/bin/python\n#\n# Copyright (c) 2017-2019 NVIDIA CORPORATION. All rights reserved.\n# This file is part of the WebDataset library.\n# See the LICENSE file for licensing terms (BSD-style).\n#\n\n\n\"\"\"Train PyTorch models directly from POSIX tar archive, locally\nor over HTTP connections.\n\"\"\"\n\n__all__ = \"WebDataset tariterator default_handlers imagehandler\".split()\n\nimport sys\nimport random\nfrom functools import reduce, wraps\n\nfrom .checks import checktype\nfrom . import autodecode\n\n\ndef reraise_exception(exn):\n raise exn\n\n\ndef curried(f):\n \"\"\"A decorator for currying functions in the first argument.\"\"\"\n\n @wraps(f)\n def wrapper(*args, **kw):\n def g(x):\n return f(x, *args, **kw)\n\n return g\n\n return wrapper\n\n\ndef compose2(f, g):\n \"\"\"Compose two functions, g(f(x))\"\"\"\n return lambda x: g(f(x))\n\n\ndef compose(*args):\n \"\"\"Compose a sequence of functions (left-to-right)\"\"\"\n return reduce(compose2, args)\n\n\ndef pipeline(source, *args):\n \"\"\"Write an input pipeline; first argument is source, rest are filters.\"\"\"\n if len(args) == 0:\n return source\n return compose(*args)(source)\n\n\ndef getfirst(a, keys, default=None, missing_is_error=True):\n if isinstance(keys, str):\n assert \" \" not in keys\n keys = keys.split(\";\")\n for k in keys:\n if k in a:\n return a[k]\n if missing_is_error:\n raise ValueError(f\"didn't find {keys} in {list(a.keys())}\")\n return default\n\n\ndef parse_field_spec(fields):\n if isinstance(fields, str):\n fields = fields.split()\n return [field.split(\";\") for field in fields]\n\n\ndef transform_with(sample, transformers):\n \"\"\"Transform a list of values using a list of functions.\n\n sample: list of values\n transformers: list of functions\n\n \"\"\"\n checktype(sample, (tuple, list))\n if transformers is None or len(transformers) == 0:\n return sample\n result = list(sample)\n ntransformers = len(transformers)\n for i in range(len(sample)): # skipcq: PYL-C0200\n f = transformers[i % ntransformers]\n if f is not None:\n result[i] = f(sample[i])\n return result\n\n\ndef transformer(transformers):\n \"\"\"Curried version of `transform_with`.\n\n transformers :\n\n \"\"\"\n\n def f(x):\n return transform_with(x, transformers)\n\n return f\n\n\n# @curried\n# def associate(data, associator):\n# \"\"\"Extract the given fields and return a tuple.\n# \"\"\"\n# for sample in data:\n# if callable(associator):\n# extra = associator(sample[\"__key__\"])\n# else:\n# extra = associator.get(sample[\"__key__\"], {})\n# sample.update(extra)\n# yield sample\n\n\n# @curried\n# def extract(data, *fields):\n# \"\"\"Extract the given fields and return a tuple.\n# \"\"\"\n# for sample in data:\n# if fields is None:\n# yield sample\n# else:\n# yield [getfirst(sample, f) for f in fields]\n\n\n@curried\ndef map_stream(data, f=None, handler=reraise_exception):\n \"\"\"Map entire samples using the given function.\n\n data: iterator\n f: function from samples to samples\n returns: iterator over transformed samples\n\n \"\"\"\n\n if f is None:\n\n def f(x): # skipcq: PYL-E0102\n return x\n\n for sample in data:\n try:\n result = f(sample)\n except Exception as exn:\n if handler(exn):\n continue\n else:\n break\n if isinstance(sample, dict) and isinstance(result, dict):\n result[\"__key__\"] = sample.get(\"__key__\")\n yield result\n\n\n@curried\ndef info(data, fmt=None, n=3, every=-1, width=50, stream=sys.stderr, name=\"\"):\n for i, sample in enumerate(data):\n if i < n or (every > 0 and (i + 1) % every == 0):\n if fmt is None:\n print(\"---\", name, file=stream)\n for k, v in sample.items():\n print(k, repr(v)[:width], file=stream)\n else:\n print(fmt.format(**sample), file=stream)\n yield sample\n\n\n@curried\ndef shuffle(data, bufsize=1000, initial=100):\n \"\"\"Shuffle the data in the stream.\n\n This uses a buffer of size `bufsize`. Shuffling at\n startup is less random; this is traded off against\n yielding samples quickly.\n\n data: iterator\n bufsize: buffer size for shuffling\n returns: iterator\n\n \"\"\"\n initial = min(initial, bufsize)\n buf = []\n startup = True\n for sample in data:\n if len(buf) < bufsize:\n try:\n buf.append(next(data)) # skipcq: PYL-R1708\n except StopIteration:\n pass\n k = random.randint(0, len(buf) - 1)\n sample, buf[k] = buf[k], sample\n if startup and len(buf) < initial:\n buf.append(sample)\n continue\n startup = False\n yield sample\n for sample in buf:\n yield sample\n\n\n@curried\ndef select(data, predicate):\n for sample in data:\n if predicate(sample):\n yield sample\n\n\ndef decode(decoder=\"rgb\", handler=reraise_exception):\n f = autodecode.make_decoder(decoder)\n\n def stage(data):\n for sample in data:\n assert isinstance(sample, dict), sample\n try:\n decoded = f(sample)\n except Exception as exn: # skipcq: PYL-W0703\n if handler(exn):\n continue\n else:\n break\n yield decoded\n\n return stage\n\n\ndef map(f, handler=reraise_exception):\n def stage(data):\n for sample in data:\n try:\n result = f(sample)\n except Exception as exn:\n if handler(exn):\n continue\n else:\n break\n if isinstance(sample, dict) and isinstance(result, dict):\n result[\"__key__\"] = sample.get(\"__key__\")\n yield result\n\n return stage\n\n\ndef rename(handler=reraise_exception, **kw):\n def stage(data):\n for sample in data:\n try:\n yield {\n k: getfirst(sample, v, missing_is_error=True) for k, v in kw.items()\n }\n except Exception as exn:\n if handler(exn):\n continue\n else:\n break\n\n return stage\n\n\ndef associate(associator, **kw):\n def stage(data):\n for sample in data:\n if callable(associator):\n extra = associator(sample[\"__key__\"])\n else:\n extra = associator.get(sample[\"__key__\"], {})\n sample.update(extra) # destructive\n yield sample\n\n return stage\n\n\ndef map_dict(handler=reraise_exception, **kw):\n assert len(list(kw.keys())) > 0\n for f in kw.values():\n assert callable(f)\n\n def stage(data):\n for sample in data:\n assert isinstance(sample, dict)\n try:\n for k, f in kw.items():\n sample[k] = f(sample[k])\n except Exception as exn:\n if handler(exn):\n continue\n else:\n break\n yield sample\n\n return stage\n\n\ndef to_tuple(*args, handler=reraise_exception):\n if len(args) == 1 and isinstance(args[0], str) and \" \" in args[0]:\n args = args[0].split()\n\n def stage(data):\n for sample in data:\n try:\n yield tuple([getfirst(sample, f, missing_is_error=True) for f in args])\n except Exception as exn:\n if handler(exn):\n continue\n else:\n break\n\n return stage\n\n\ndef map_tuple(*args, handler=reraise_exception):\n def stage(data):\n for f in args:\n assert callable(f)\n for sample in data:\n assert isinstance(sample, (list, tuple))\n assert len(args) == len(\n sample\n ), f\"len(args) {len(args)} != len(sample) {len(sample)}\"\n sample = list(sample)\n try:\n for i in range(min(len(args), len(sample))):\n sample[i] = args[i](sample[i])\n except Exception as exn:\n if handler(exn):\n continue\n else:\n break\n yield tuple(sample)\n\n return stage\n","sub_path":"webdataset/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":8407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"159068768","text":"#***** O BRABO PRA DAR APPEND EM DIC*******\n'''def agrupa_por_idade(dic):\n novodic={}\n for k,v in dic.items():\n if v <=11:\n novodic.setdefault('crianca',[]).append(k)\n elif v>=12 and v<=17:\n novodic.setdefault('adolescente',[]).append(k)\n elif v>=18 and v<=59:\n novodic.setdefault('adulto',[]).append(k)\n else:\n novodic.setdefault('idoso',[]).append(k)\n\n return novodic '''\ndef agrupa_por_idade(dic):\n novodic={'criança':[],'adolescente':[],'adulto':[],'idoso':[]}\n for k,v in dic.items():\n if v <=11:\n novodic['criança'].append(k)\n elif v>=12 and v<=17:\n novodic['adolescente'].append(k)\n elif v>=18 and v<=59:\n novodic['adulto'].append(k)\n else:\n novodic['idoso'].append(k)\n return novodic\n ","sub_path":"backup/user_211/ch153_2020_06_21_19_10_08_089406.py","file_name":"ch153_2020_06_21_19_10_08_089406.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"551918948","text":"# coding=utf-8\n# create database tlog character set utf8mb4;\nimport logging\nimport pandas as pd\nfrom app_mysql_backend import Appversion, PageInfo, db, user\n\nFORMAT = '%(asctime)-15s %(levelname)s %(message)s'\nlogging.basicConfig(format=FORMAT, level=logging.INFO)\n\n# df = pd.read_excel('分页面打点纪录点.xlsx', sheetname=['4.6', '4.7', '4.8', '4.9', '4.10'])\ndf = pd.read_excel('分页面打点记录点 new.xlsx', sheetname=['4.11', '4.12', '4.13'])\n# sheetname = ['4.6', '4.7', '4.8', '4.9', '4.10']\n\n\nsheetname = ['4.11', '4.12', '4.13']\n\n\ndef exl2mysql():\n for sheet in sheetname:\n df[sheet] = df[sheet].fillna('')\n appversion = Appversion(sheet)\n db.session.add(appversion)\n db.session.commit()\n app_id = appversion.id\n\n for i in df[sheet].index:\n logging.info('start to push line %d appversion: %s' % (i + 1, sheet))\n row = df[sheet].ix[i]\n if not row[u'页面'].strip():\n logging.error(u'该页面字段为空,跳过! appversion:%s, line: %d' % (sheet, i + 1))\n continue\n\n if row['se_category'].strip() or row['se_action'].strip():\n pageinfo_and = PageInfo(page=row[u'页面'], event=row[u'事件'], objects=row[u'对象'],\n appversion=appversion, types=row['type'], sub_type=row['sub_type'],\n pm=row.get(u'产品负责人', ''), page_key=row['page_key'], se_action=row[u'se_action'],\n se_category=row[u'se_category'], platform='Android',\n notes=row[u'额外信息'] + ', ' + row['Android'])\n db.session.add(pageinfo_and)\n\n if row['se_category.1'].strip() or row['se_action.1'].lstrip():\n pageinfo_ios = PageInfo(page=row[u'页面'], event=row[u'事件'], objects=row[u'对象'],\n appversion=appversion, types=row['type'], sub_type=row['sub_type'],\n pm=row.get(u'产品负责人', ''), page_key=row['page_key'],\n se_action=row[u'se_action.1'],\n se_category=row[u'se_category.1'], platform='iOS',\n notes=row[u'额外信息'] + ', ' + row['iOS'])\n db.session.add(pageinfo_ios)\n\n if row['se_category.2'].strip() or row['se_action.2'].strip():\n pageinfo_h5 = PageInfo(page=row[u'页面'], event=row[u'事件'], objects=row[u'对象'],\n appversion=appversion, types=row['type'], sub_type=row['sub_type'],\n pm=row.get(u'产品负责人', ''), page_key=row['page_key'], se_action=row[u'se_action.2'],\n se_category=row[u'se_category.2'], platform='H5',\n notes=row[u'额外信息'] + ', ' + row['H5'])\n db.session.add(pageinfo_h5)\n db.session.commit()\n\nif __name__ == '__main__':\n # db.create_all()\n # admin = user('admin', 'admin', 'admin')\n # db.session.add(admin)\n # db.session.commit()\n exl2mysql()\n","sub_path":"excel2mysql.py","file_name":"excel2mysql.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"388074579","text":"#!/usr/bin python3\n\nimport time, argparse\nfrom datetime import datetime\nfrom libSegment import SegmentDisplay\nimport paho.mqtt.client as mqtt\n\nSCREENS = [\"clock\", \"date\", \"temperature\", \"weekday\", \"Servo\", \"Volume\"]\nEncoderAction = {\"DIR\":0, \"SW\":0}\nDisplayAction = {\"MQTT_TEXT\":0}\n\nSENSOR_DATA = {\"temperature\":0, \"humidity\":0, \"eCO2\":0, \"TVOC\":0}\n\nACTUATOR_DATA = {\"Servo\":[0, 0, 0], \"Volume\":0}\n\n# MQTT\nMQTT_CTRL_TOPIC = {\n \"cmd\":\"HomeHub/hw/frontprint/display/cmd\",\n \"set\":\"HomeHub/hw/frontprint/display/set\",\n \"EnDIR\":\"HomeHub/hw/frontprint/encoder/direction\",\n \"EnSW\":\"HomeHub/hw/frontprint/input/EnSW\",\n \"Volume\":\"HomeHub/media/volume\",\n \"Servo\":\"HomeHub/hw/firmata/servo\"\n}\nMQTT_SENSOR_TOPIC = {\n \"temperature\":\"HomeHub/sensors/temperature\",\n \"humidity\":\"HomeHub/sensors/humidity\",\n \"eCO2\":\"HomeHub/sensors/eCO2\",\n \"TVOC\":\"HomeHub/sensors/TVOC\"\n}\n\n\ndef ServoControl(servo, step=10):\n global ACTUATOR_DATA\n if(EncoderAction[\"DIR\"] != 0):\n newPosition = (ACTUATOR_DATA[\"Servo\"][servo - 1] + (EncoderAction[\"DIR\"] * step))\n if(newPosition > 180):\n newPosition = 180\n if(newPosition < 0):\n newPosition = 0\n ACTUATOR_DATA[\"Servo\"][servo - 1] = newPosition\n client.publish(F'{MQTT_CTRL_TOPIC[\"Servo\"]}/{servo}', ACTUATOR_DATA[\"Servo\"][servo - 1])\n EncoderAction[\"DIR\"] = 0\n return (\"{0:3}\".format(ACTUATOR_DATA[\"Servo\"][servo - 1]))\n\n\ndef VolumeControl(step=5):\n global ACTUATOR_DATA\n if(EncoderAction[\"DIR\"] != 0):\n newVolume = (ACTUATOR_DATA[\"Volume\"] + (EncoderAction[\"DIR\"] * step))\n if(newVolume > 100):\n newVolume = 100\n if(newVolume < 0):\n newVolume = 0\n ACTUATOR_DATA[\"Volume\"] = newVolume\n client.publish(F'{MQTT_CTRL_TOPIC[\"Volume\"]}', ACTUATOR_DATA[\"Volume\"])\n EncoderAction[\"DIR\"] = 0\n return (\"Vol {0:3d}%\".format(ACTUATOR_DATA[\"Volume\"]))\n\n\ndef on_connect(client, userdata, flags, rc):\n if (rc==0):\n print(F\"connected OK Returned code={rc}\")\n for topic in MQTT_CTRL_TOPIC:\n client.subscribe(MQTT_CTRL_TOPIC[topic])\n for topic in MQTT_SENSOR_TOPIC:\n client.subscribe(MQTT_SENSOR_TOPIC[topic])\n\n\ndef on_message(client, userdata, msg):\n global SENSOR_DATA\n global ACTUATOR_DATA\n global EncoderAction\n global DisplayAction\n data = msg.payload.decode()\n if msg.topic == MQTT_CTRL_TOPIC[\"cmd\"]:\n pass\n elif msg.topic == MQTT_CTRL_TOPIC[\"set\"]:\n DisplayAction[\"MQTT_TEXT\"] = str(data)\n elif msg.topic == MQTT_CTRL_TOPIC[\"EnDIR\"]:\n EncoderAction[\"DIR\"] = int(data)\n elif msg.topic == MQTT_CTRL_TOPIC[\"EnSW\"]:\n if(int(data) == 1):\n EncoderAction[\"SW\"] = int(data)\n elif msg.topic == MQTT_CTRL_TOPIC[\"Volume\"]:\n ACTUATOR_DATA[\"Volume\"] = int(data)\n else:\n for topic in MQTT_SENSOR_TOPIC:\n if(msg.topic == MQTT_SENSOR_TOPIC[topic]):\n print(msg.topic, data)\n SENSOR_DATA[topic] = data\n\nclient = mqtt.Client(\"SegmentDisplay\")\nclient.on_message=on_message \nclient.on_connect=on_connect\nclient.connect(\"127.0.0.1\")\nclient.loop_start() \n\n\n# ----------------------------------------------- MAIN ---------------------------------------\nif __name__ == '__main__':\n\n SegmentDisplay.setBrightness(0xff)\n SCREEN_CTRL = True\n ACTIVE = 0\n newText = \"- - - - \"\n oldText = \"\"\n while True:\n try:\n if(SCREEN_CTRL):\n if(EncoderAction[\"DIR\"] != 0):\n ACTIVE += EncoderAction[\"DIR\"]\n ACTIVE = 0 if(ACTIVE >= len(SCREENS)) else ACTIVE\n ACTIVE = (len(SCREENS) - 1) if(ACTIVE < 0) else ACTIVE\n EncoderAction[\"DIR\"] = 0\n \n\n if(DisplayAction[\"MQTT_TEXT\"]):\n newText = str(DisplayAction[\"MQTT_TEXT\"])\n ACTIVE = len(SCREENS)\n DisplayAction[\"MQTT_TEXT\"] = 0\n\n if(ACTIVE < len(SCREENS)):\n if(SCREENS[ACTIVE] == \"clock\"):\n newText = datetime.now().strftime(\"%H-%M-%S\")\n\n elif(SCREENS[ACTIVE] == \"date\"):\n newText = datetime.now().strftime(\"%d-%m-%y\")\n\n elif(SCREENS[ACTIVE] == \"weekday\"):\n newText = datetime.today().strftime('%A').upper()\n\n elif(SCREENS[ACTIVE] == \"temperature\"):\n newText = (\"{0:6.1f}(C\".format(float(SENSOR_DATA[\"temperature\"])))\n\n elif(SCREENS[ACTIVE] == \"Servo\"):\n newText = \"SERVO\"\n if(EncoderAction[\"SW\"] or (SCREEN_CTRL == False)):\n EncoderAction[\"SW\"] = 0 if SCREEN_CTRL else EncoderAction[\"SW\"]\n SCREEN_CTRL = False\n newText = ServoControl(1)\n if(EncoderAction[\"SW\"]):\n EncoderAction[\"SW\"] = 0\n SCREEN_CTRL = True\n newText = \"SERVO\"\n\n elif(SCREENS[ACTIVE] == \"Volume\"):\n newText = \"VOLUME\"\n if(EncoderAction[\"SW\"] or (SCREEN_CTRL == False)):\n EncoderAction[\"SW\"] = 0 if SCREEN_CTRL else EncoderAction[\"SW\"]\n SCREEN_CTRL = False\n newText = VolumeControl()\n if(EncoderAction[\"SW\"]):\n EncoderAction[\"SW\"] = 0\n SCREEN_CTRL = True\n newText = \"VOLUME\"\n\n \n newText = (\"{text:8}\".format(text=newText))\n if(newText != oldText):\n SegmentDisplay.setText(newText)\n oldText = newText\n time.sleep(.1)\n\n except KeyboardInterrupt:\n SegmentDisplay.clear()\n quit()\n ","sub_path":"Actuators/16Segment/HomeHub_Showcase.py","file_name":"HomeHub_Showcase.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"532344290","text":"from django import forms\nfrom courses import models\nfrom django.core.files.uploadedfile import SimpleUploadedFile\n\nclass SendEmailForm(forms.Form):\n instructor_email = forms.CharField(max_length=50, help_text='Please input instructor email here.')\n\nclass SignupForm(forms.Form):\n username= forms.CharField(max_length=30,help_text='Required. Please input your username.')\n password = forms.CharField(max_length=30, help_text='Required. Inform input your password.')\n first_name = forms.CharField(max_length = 30, help_text = 'Required. Please input your first name')\n last_name = forms.CharField(max_length = 30, help_text = 'Required. Please input your last name')\n autobiography = forms.CharField(max_length = 2000, help_text = 'Required. Please input a short autobiography (2000 characters)')\n\n\nclass createCourse(forms.ModelForm):\n class Meta:\n model = models.Course\n fields = ['title', 'description', 'thumb', 'category', 'CECU', 'category']\n\nclass createModule(forms.ModelForm):\n class Meta:\n model = models.Module\n fields = ['title', 'order']\n\nclass addComponent(forms.Form):\n def __init__(self, *args, **kwargs):\n # print(kwargs)\n self.courseid = kwargs.pop('courseid')\n # QUESTION_CHOICES = models.QuizQuestion.objects.filter(module_id=self.moduleid)\n COMPONENT_CHOICES = [[x.id, x.title] for x in models.Component.objects.filter(Course_id=self.courseid) if x.Module_id== None ]\n super(addComponent, self).__init__(*args, **kwargs)\n self.fields['components'] = forms.MultipleChoiceField(choices=COMPONENT_CHOICES, required=False,\n widget=forms.CheckboxSelectMultiple())\n self.fields['order'] = forms.IntegerField(max_value=100,required = False, min_value = 1)\n\nclass reorderModule(forms.Form):\n def __init__(self, *args, **kwargs):\n # print(kwargs)\n self.courseid = kwargs.pop('courseid')\n # QUESTION_CHOICES = models.QuizQuestion.objects.filter(module_id=self.moduleid)\n MODULE_CHOICES = [[x.id, x.title] for x in models.Module.objects.filter(Course_id=self.courseid) ]\n super(reorderModule, self).__init__(*args, **kwargs)\n self.fields['module'] = forms.CharField(required=False,\n widget=forms.Select(choices=MODULE_CHOICES))\n self.fields['order'] = forms.IntegerField(max_value=100,required = True, min_value = 1)\n\nclass reorderComponent(forms.Form):\n def __init__(self, *args, **kwargs):\n # print(kwargs)\n self.courseid = kwargs.pop('courseid')\n self.moduleid = kwargs.pop('moduleid')\n # QUESTION_CHOICES = models.QuizQuestion.objects.filter(module_id=self.moduleid)\n COMPONENT_CHOICES = [[x.id, x.title] for x in models.Component.objects.filter(Course_id=self.courseid, Module_id = self.moduleid) ]\n super(reorderComponent, self).__init__(*args, **kwargs)\n self.fields['components'] = forms.CharField(required=False,\n widget=forms.Select(choices=COMPONENT_CHOICES))\n self.fields['order'] = forms.IntegerField(max_value=100,required = True, min_value = 1)\n\nclass createQuiz(forms.Form):\n def __init__(self, *args, **kwargs):\n # print(kwargs)\n self.courseid = kwargs.pop('courseid')\n # QUESTION_CHOICES = models.QuizQuestion.objects.filter(module_id=self.moduleid)\n QUESTION_CHOICES = [[x.id, x.question_text] for x in models.QuizQuestion.objects.filter(course_id=self.courseid) if x.selected==False ]\n super(createQuiz, self).__init__(*args, **kwargs)\n self.fields['questions'] = forms.MultipleChoiceField(choices=QUESTION_CHOICES, required=False,\n widget=forms.CheckboxSelectMultiple())\n self.fields['pass_score'] = forms.IntegerField(min_value=0, max_value=100)\n self.fields['question_number'] = forms.IntegerField(min_value=0, max_value=10)\n","sub_path":"instructors/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"306925166","text":"import math\n\nimport numpy as np\nfrom numba import jit\n\n\n#@jit(\"float64(float64)\", nopython=True, cache=True)\ndef _LennardJones(r):\n force = (r ** 12 - r ** 6)\n return force\n\n\n# @jit(cache=True)\ndef _LennardJones2(R0, i_pt, pi2xij, xij, Fa):\n fij = (i_pt - xij) / pi2xij\n fij *= _LennardJones(R0 / pi2xij) * Fa\n return fij\n\n\n@jit\ndef _density(pixel_val):\n x = 256 / (256 - pixel_val)\n # x = 1. + math.log(pixel_val + 1, 2.)\n return x\n\n\n@jit\ndef _R0_val(i_pt, imin, R0, R1_R0):\n i_pt0 = max(min(round(i_pt[0]), imin.shape[0] - 1), 0)\n i_pt1 = max(min(round(i_pt[1]), imin.shape[1] - 1), 0)\n r0 = R0 * _density(imin[i_pt0][i_pt1])\n return r0, R1_R0 * r0\n\n\n@jit(nopython=True, cache=True)\ndef _distABtoP(a_pt, b_pt, p_pt):\n seg_x = b_pt[0] - a_pt[0]\n seg_y = b_pt[1] - a_pt[1]\n\n seglen_sqrd = seg_x * seg_x + seg_y * seg_y\n\n u = ((p_pt[0] - a_pt[0]) * seg_x + (p_pt[1] - a_pt[1]) * seg_y) / float(seglen_sqrd)\n\n if u > 1:\n u = 1\n elif u < 0:\n u = 0\n\n x = a_pt[0] + u * seg_x\n y = a_pt[1] + u * seg_y\n\n dx = x - p_pt[0]\n dy = y - p_pt[1]\n\n dist = math.sqrt(dx * dx + dy * dy)\n\n if dist == 0:\n dist = 1e-10\n\n return dist, (x, y)\n\n\ndef attract_repel_segment(s, im, maze_path, kdtree, R0, R1_R0, Fa, chunk=2000):\n fi_l = []\n for i in range(s, s + chunk):\n if i >= len(maze_path):\n continue\n fi = np.array([0., 0.])\n i_pt = maze_path[i]\n r0, r1 = _R0_val(i_pt, im, R0, R1_R0)\n neighbors = kdtree.query_ball_point(i_pt, r1)\n n_set = set(neighbors)\n for x in neighbors:\n n_set.add(x - 1)\n\n for j in n_set:\n if j < 0 or j == len(maze_path) - 1:\n continue\n if j < i - 2 or j >= i + 2:\n j_pt = maze_path[j]\n jp1_pt = maze_path[j + 1]\n pi2xij, xij = _distABtoP(j_pt, jp1_pt, i_pt)\n if pi2xij < r1:\n fij = _LennardJones2(r0, i_pt, pi2xij, xij, Fa)\n fi += fij\n fi_l.append(fi)\n\n return fi_l\n\ndef attract_repel_global(im, maze_path, R0, R1_R0, Fa):\n fi_l = []\n for i in range(0, len(maze_path)):\n\n fi = np.array([0., 0.])\n i_pt = maze_path[i]\n r0, r1 = _R0_val(i_pt, im, R0, R1_R0)\n\n for j in range(0,len(maze_path)):\n if j == len(maze_path) - 1:\n continue\n if j < i - 2 or j >= i + 2:\n j_pt = maze_path[j]\n jp1_pt = maze_path[j + 1]\n pi2xij, xij = _distABtoP(j_pt, jp1_pt, i_pt)\n if pi2xij < r1:\n fij = _LennardJones2(r0, i_pt, pi2xij, xij, Fa)\n fi += fij\n fi_l.append(fi)\n\n return fi_l\n\n@jit\ndef _repulse(r):\n force = (r ** 12)\n return force\n\n\n# def attract_repel_segment(s, im, maze_path, kdtree, R0, R1_R0, Fa, chunk=2000):\n# return _attract_repel_segment(s,im,maze_path,kdtree,R0,R1_R0,Fa,chunk)\n\ndef boundary(r0_b, Fo, maze_path, boundary_seg):\n \"\"\"\n This is the brute force version\n Returns:\n \"\"\"\n rArray = np.empty([len(maze_path), 2])\n\n R1 = 2.0 * r0_b\n\n for i in range(0,\n len(maze_path)):\n fi = np.array([0., 0.])\n pi = np.array(maze_path[i])\n for j in range(0,\n len(boundary_seg) - 1):\n j_pt = boundary_seg[j]\n jp1_pt = boundary_seg[j + 1]\n pi2xij, xij = _distABtoP(j_pt, jp1_pt, pi)\n if pi2xij < R1:\n fij = (pi - xij) / max(0.00001, pi2xij)\n fij *= _repulse(r0_b / pi2xij) * Fo\n fi += fij\n rArray[i] = np.array(fi)\n return rArray\n","sub_path":"AttractRepel.py","file_name":"AttractRepel.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"129850763","text":"import os\nimport cv2\nimport time\nimport imutils\nimport numpy as np\nfrom imutils.video import VideoStream\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\n\n# function that detect face and determine mask wearing\ndef detection_prediction(window, predictFace, predictMask):\n\n\t# initialize list for face, location of face and predictions\n\tfaces = []\n\tlocs = []\n\tpreds = []\n\n\t# construct a blob from the dimensions of the window\n\t(h, w) = window.shape[:2]\n\tblob = cv2.dnn.blobFromImage(window, 1.0, (224, 224),(104.0, 177.0, 123.0))\n\n\t# insert blob to the FacePrediction net\n\tpredictFace.setInput(blob)\n\tdetections = predictFace.forward()\n\tprint(\"Face Detected:\",detections.shape)\n\n\t# detection for loop\n\tfor i in range(0, detections.shape[2]):\n\t\t# extract the probability associated with the current detection\n\t\tprobability = detections[0, 0, i, 2]\n\n\t\t# ignore weak detection that is lower than 0.5\n\t\tif probability > 0.5:\n\t\t\t# compute the (x, y)-coordinates of the bounding box for the face\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t# arrange the bounding boxes so that it fall within dimensions of the window\n\t\t\t(startX, startY) = (max(0, startX), max(0, startY))\n\t\t\t(endX, endY) = (min(w - 1, endX), min(h - 1, endY))\n\n\t\t\t# extract the face ROI, convert it from BGR to RGB channel ordering, resize and preprocess\n\t\t\tface = window[startY:endY, startX:endX]\n\t\t\tface = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n\t\t\tface = cv2.resize(face, (224, 224))\n\t\t\tface = img_to_array(face)\n\t\t\tface = preprocess_input(face)\n\n\t\t\t# insert the face and bounding boxes to their respective lists\n\t\t\tfaces.append(face)\n\t\t\tlocs.append((startX, startY, endX, endY))\n\n\t# if a face is detected, make prediction\n\tif len(faces) > 0:\n\t\tfaces = np.array(faces, dtype=\"float32\")\n\t\tpreds = predictMask.predict(faces, batch_size=32)\n\n\t# return 2-tuple of the face locations and their corresponding locations\n\treturn (locs, preds)\n\n\n# pre-trained Caffe deep learning model provided by OpenCV to detect faces\n#.prototxt file(s) define the model architecture (i.e., the layers themselves)\n#.caffemodel file which contains the weights for the actual layers\nprototxtPath = \"/home/pi/COMP3025/opencv_facedetector/deploy.prototxt\"\nweightsPath = \"/home/pi/COMP3025/opencv_facedetector/res10_300x300_ssd_iter_140000.caffemodel\"\npredictFace = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n# load in the face mask model that was traiend \npredictMask = load_model(\"/home/pi/COMP3025/trained_model/Real-person-dataset-model.model\")\n\n# initialize\nprint(\"[INFO] Starting Video Stream\")\ncamera = VideoStream(src=0).start()\n\nwhile True:\n\t# resize window to maximum width of 400 pixels\n\twindow = camera.read()\n\twindow = imutils.resize(window, width=400)\n\n\t# call function() for detection\n\t(locs, preds) = detection_prediction(window, predictFace, predictMask)\n\n\t# constantly updating the detected face locations\n\tfor (box, pred) in zip(locs, preds):\n\t\t(startX, startY, endX, endY) = box\n\t\t(mask, withoutMask) = pred\n\n\t\t# display bounding box and text according to class label\n\t\tlabel = \"Mask\" if mask > withoutMask else \"No Mask\"\n\t\tcolor = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\n\n\t\t# format to 100% for predicted probability \n\t\tlabel = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\n\t\t# output to window\n\t\tcv2.putText(window, label, (startX, startY - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n\t\tcv2.rectangle(window, (startX, startY), (endX, endY), color, 2)\n\n\t# display the output window\n\tcv2.imshow(\"Window\", window)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# stop camera if q is pressed\n\tif key == ord(\"q\"):\n\t\tbreak\n\n# end window and camera \ncv2.destroyAllWindows()\ncamera.stop()","sub_path":"raspi-real-time-detection.py","file_name":"raspi-real-time-detection.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"443837128","text":"from keras import layers\nfrom keras import models\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nnetwork = models.Sequential()\nnetwork.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\nnetwork.add(layers.MaxPool2D((2, 2)))\nnetwork.add(layers.Conv2D(64, (3, 3), activation='relu'))\nnetwork.add(layers.MaxPool2D((2, 2)))\nnetwork.add(layers.Conv2D(64, (3, 3), activation='relu'))\n\nnetwork.add(layers.Flatten())\nnetwork.add(layers.Dense(64, activation='relu'))\nnetwork.add(layers.Dense(10, activation='softmax'))\n\nsummary = network.summary()\nprint(summary)\n\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\ntrain_images = train_images.reshape(60000, 28, 28, 1)\ntrain_images = train_images.astype('float32') / 255\n\ntest_images = test_images.reshape(10000, 28, 28, 1)\ntest_images = test_images.astype('float32') / 255\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\n\nnetwork.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['acc'])\n\nnetwork.fit(train_images, train_labels, epochs=5, batch_size=64)\n\ntest_loss, test_acc = network.evaluate(test_images, test_labels)\nprint(test_acc, test_loss)","sub_path":"convNet.py","file_name":"convNet.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"170500747","text":"from django.shortcuts import render, redirect\n\n# Create your views here.\nfrom .models import Comp\nfrom product.models import Product\n\n#def cart_create(user=None): #default method\n# cart_obj = Cart.objects.create(user=None)\n# print(\"new cart created\")\n# return cart_obj\n\ndef comp_home(request):\n comp_obj, new_obj = Comp.objects.new_or_get(request)\n print(\"2\")\n print(new_obj)\n return render(request, \"comp/home.html\", {\"comp\": comp_obj})\n\n\ndef comp_update(request):\n print(request.POST)\n product_id=request.POST.get('product')\n product_obj=Product.objects.get(id = product_id)\n comp_obj, new_obj = Comp.objects.new_or_get(request)\n #print (new_obj)\n comp_obj.product.add(product_obj)\n print(\"3\")\n current_user = request.user\n #Product.objects.filter(id=product_id).update(uid=current_user.id, status='IN USE')\n return redirect(\"comp:home\")\n","sub_path":"component/comp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"12488315","text":"import os\nimport glob\nimport csv\nfrom importlib import reload\nimport re\nimport io\nimport sys\nreload(sys)\nfrom lxml import etree as ET\nfrom lxml import html\ntry:\n from itertools import zip_longest as zip_longest\nexcept:\n from itertools import izip_longest as zip_longest\ndictList = []\n\nprint(\"***Getting the latest pubmed xml downloaded file*****\")\n\n#get the latest file with xml extension downloaded from pubmed\nfiles_path = os.path.join('/Users/srijan/Downloads','*.xml')\nfiles = sorted(\n glob.iglob(files_path), key=os.path.getctime, reverse=True\n)\nprint(\"*** File is located at \" + files_path)\n\n#print the file path\nxml_file = files[0]\nprint('************************')\n\nprint('***** Parsing Begins now *******')\ntree = ET.parse(xml_file)\nroot = tree.getroot()\n\n#name csv file as searchterm+timestamp\n\n\ndef parse_authors(tree):\n dictList = []\n affiliations = list()\n if tree.xpath('//AffiliationInfo/Affiliation') is not None:\n for affil in tree.xpath('//AffiliationInfo/Affiliation'):\n affiliations.append(affil.text)\n affiliations_text = '; '.join(affiliations)\n\n authors_tree = tree.xpath('//AuthorList/Author')\n authors = list()\n if authors_tree is not None:\n for a in authors_tree:\n firstname = a.find('ForeName').text if a.find('ForeName') is not None else ''\n lastname = a.find('LastName').text if a.find('LastName') is not None else ''\n fullname = (firstname + ' ' + lastname).strip()\n if fullname == '':\n fullname = a.find('CollectiveName').text if a.find('CollectiveName') is not None else ''\n authors.append(fullname)\n authors_text = '; '.join(authors)\n else:\n authors_text = ''\n\n dict_out = {\n 'authors': authors_text,\n 'affiliation': affiliations_text}\n\n x= dict_out['authors'].split(';')\n print (x)\n y = dict_out['affiliation'].split(';')\n\n newl = list()\n print(type(newl))\n pattern = r\"[\\w.]+@[\\w.]+\"\n for item in y:\n if re.findall(pattern,item):\n newl.append(item)\n else:\n newl.append(\"noemail@mailinator.com\")\n\n d=[x,newl]\n export_data = zip_longest(*d, fillvalue = '')\n file = 'numbers.csv'\n with open(file, 'w', encoding=\"utf8\", newline='') as myfile:\n wr = csv.writer(myfile)\n wr.writerow((\"Author\", \"Email\"))\n wr.writerows(export_data)\n myfile.close()\n\n\n \n \nparse_authors(tree)\n\nprint(\"***** Parsing Complete *****\")\n\n\n\n\n\n\n\n# re_pattern = re.compile(r'[\\w\\.-]+@[\\w\\.-]+')\n\n# with open(\"test.csv\") as fh_in: \n# with open(\"mailout.csv\", \"a+\") as fh_out:\n# for line in fh_in:\n# match_list = re_pattern.findall(line)\n# if match_list:\n# fh_out.write(match_list[0]+\"\\r\\n\") \n\n# #count the number of emails scrapped\n# reader=csv.reader(open(\"mailout.csv\"))\n# count=0\n# for row in reader:\n# count+=1\n# print \"total no in row \"+str(count)+\" is \"+str(len(row))\n# for i in row:\n# print (i)\n\n\n","sub_path":"xmltoexcel.py","file_name":"xmltoexcel.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"62470671","text":"from test_framework import generic_test\n\n\ndef count_inversions(A):\n def count_subarray_inversions(start, end):\n def merge_sort_and_count_inversions_across_subarray(start, mid, end):\n sorted_A = []\n left_start, right_start, inversion_count = start, mid, 0\n\n while left_start < mid and right_start < end:\n if A[left_start] <= A[right_start]:\n sorted_A.append(A[left_start])\n left_start += 1\n else:\n inversion_count += mid - left_start\n sorted_A.append(A[right_start])\n right_start += 1\n\n A[start:end] = sorted_A + A[left_start:mid] + A[right_start:end]\n return inversion_count\n\n if end - start <= 1:\n return 0\n\n mid = (start + end) // 2\n return (count_subarray_inversions(start, mid) +\n count_subarray_inversions(mid, end) +\n merge_sort_and_count_inversions_across_subarray(start, mid, end))\n\n return count_subarray_inversions(0, len(A))\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main(\n \"24-25-count_inversions.py\", 'count_inversions.tsv', count_inversions))\n","sub_path":"Problems/EPI/epi_judge_python/24-25-count_inversions.py","file_name":"24-25-count_inversions.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"345609755","text":"from decimal import getcontext, Decimal\nfrom datetime import datetime\nimport requests\nimport time\nimport uuid\n\n\n# The API seems to use 18 digits, so I copied that\ngetcontext().prec = 18\n\n\nENDPOINTS = {\n 'token': 'https://api.robinhood.com/oauth2/token/',\n 'challenge': 'https://api.robinhood.com/challenge/',\n 'accounts': 'https://api.robinhood.com/accounts/',\n 'quotes': 'https://api.robinhood.com/quotes/',\n 'orders': 'https://api.robinhood.com/orders/',\n 'news': 'https://api.robinhood.com/midlands/news/',\n 'holdings': 'https://nummus.robinhood.com/holdings/',\n 'fundamentals': 'https://api.robinhood.com/marketdata/fundamentals/',\n 'instruments': 'https://api.robinhood.com/instruments/',\n 'historicals': 'https://api.robinhood.com/marketdata/historicals/',\n 'earnings': 'https://api.robinhood.com/marketdata/earnings/',\n 'instruments_similar': 'https://dora.robinhood.com/instruments/similar/',\n 'nummus_orders': 'https://nummus.robinhood.com/orders/',\n 'currency_pairs': 'https://nummus.robinhood.com/currency_pairs/',\n 'nummus_accounts': 'https://nummus.robinhood.com/accounts/',\n 'nummus_historicals': 'https://api.robinhood.com/marketdata/forex/historicals/',\n 'port_historicals': 'https://api.robinhood.com/portfolios/historicals/',\n 'forex_market_quote': 'https://api.robinhood.com/marketdata/forex/quotes/',\n 'tags': 'https://api.robinhood.com/midlands/tags/tag/',\n 'ratings': 'https://api.robinhood.com/midlands/ratings/',\n 'unified': 'https://phoenix.robinhood.com/accounts/unified',\n 'popularity': 'https://api.robinhood.com/instruments/popularity/',\n 'ratings': 'https://api.robinhood.com/midlands/ratings/'\n}\n\n\nAPI_HEADERS = { # Default header params\n 'Accept': '*/*',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9',\n 'X-Robinhood-API-Version': '1.221.0'\n}\n\n\n# Extracted from robinhood web app\nOAUTH_CLIENT_ID = 'c82SH0WZOsabOXGP2sxqcj34FxkvfnWRZBKlBjFS'\n\n\nclass RobinhoodException(Exception):\n \"\"\"Basic Robinhood exception\"\"\"\n pass\n\n\nclass APIError(RobinhoodException):\n \"\"\"An issue interfacing with the Robinhood API\"\"\"\n pass\n\n\nclass UsageError(RobinhoodException):\n \"\"\"An issue using this interface\"\"\"\n pass\n\n\nclass Robinhood:\n \"\"\"Robinhood API interface\n\n Attributes:\n token: (str) API authorization token\n acc_num: (str) Robinhood account number\n nummus_id: (str) The account id associated with currencies\n account_url: (str) The account url\n logged_in: (bool) If successfully authenticated\n \"\"\"\n token = None\n acc_num = None\n nummus_id = None\n account_url = None\n logged_in = False\n\n def __init__(self):\n \"\"\"Creates session used in client\"\"\"\n self.session = requests.session()\n self.session.headers = API_HEADERS\n self.device_token = str(uuid.uuid4())\n self._load()\n\n def _load(self):\n \"\"\"Inits basic internal information\"\"\"\n asset_currs = self.session.get(ENDPOINTS['currency_pairs']).json()['results']\n\n for curr_json in asset_currs:\n\n currency = Currency(self.session, curr_json)\n\n def _load_auth(self, acc_num=None, nummus_id=None):\n \"\"\"Inits internal account information from Robinhood\n\n Args:\n acc_num: (str, optional) manually specify the account number\n nummus_id: (str, optional) manually specify the nummus id\n\n Raises:\n APIError: If logged in but no account found\n \"\"\"\n assert self.logged_in\n\n try:\n\n if not acc_num:\n res_json = self.session.get(ENDPOINTS['accounts']).json()['results']\n if len(res_json) == 0:\n raise APIError('No robinhood accounts found. ' +\n 'You may still be in the process of being verified.')\n self.acc_num = res_json[0]['account_number']\n else:\n self.acc_num = acc_num\n\n self.account_url = ENDPOINTS['accounts'] + self.acc_num + '/'\n\n if not nummus_id:\n res_nummus_json = self.session.get(ENDPOINTS['nummus_accounts']).json()['results']\n if len(res_nummus_json) == 0:\n raise APIError('No robinhood crypto accounts found. ' +\n 'Try buying some online to get this part of your account activated.')\n self.nummus_id = res_nummus_json[0]['id']\n else:\n self.nummus_id = nummus_id\n\n except KeyError:\n raise APIError('Unable to load secure content (retry login)')\n\n def login(self, token='', username='', password='', mfa_code='', verification='sms', acc_num=None, nummus_id=None):\n \"\"\"Login/Authenticate\n\n Args:\n token: (str) required if username/password not given, bypasses login\n since API token already known\n username: (str) required login information if token not specified\n password: (str) required login information if token not specified\n mfa_code: (str) 2 Factor code, required if enabled on the account\n verification: (str) The type of verification to use if required [sms, email]\n acc_num: (str, optional) manual specify the account number\n nummus_id: (str, optional) manual specify the nummus id\n\n Returns:\n (bool) If login was successful\n\n Raises:\n APIError: If login fails\n \"\"\"\n if token: # Skip login\n self.token = token\n self.session.headers['Authorization'] = 'Bearer ' + self.token\n self.logged_in = True\n self._load_auth(acc_num)\n return True\n\n if not username or not password: # If not provided, manually prompt\n import getpass\n username = input('Username: ')\n password = getpass.getpass('Password (Hidden): ')\n\n req_json = {\n 'client_id': OAUTH_CLIENT_ID,\n 'expires_in': 86400,\n 'grant_type': 'password',\n 'scope': 'internal',\n 'username': username,\n 'password': password,\n 'device_token': self.device_token,\n 'challenge_type': verification\n }\n\n if mfa_code:\n req_json['mfa_code'] = mfa_code\n\n res_json = {}\n try:\n res = self.session.post(ENDPOINTS['token'], json=req_json)\n res_json = res.json()\n if 'detail' in res_json and 'challenge issued' not in res_json['detail']:\n res.raise_for_status()\n except Exception:\n raise APIError('Login failed ' + str(res_json))\n\n if 'detail' in res_json and 'challenge issued' in res_json['detail']:\n code = input('Verification Code: ')\n challenge_id = res_json['challenge']['id']\n challenge_res = self.session.post(\n ENDPOINTS['challenge'] + challenge_id + '/respond/', json={'response': code})\n if challenge_res.json()['status'] != 'validated':\n raise APIError('Provided challenge code failed.')\n self.session.headers['X-ROBINHOOD-CHALLENGE-RESPONSE-ID'] = challenge_id\n try:\n res = self.session.post(ENDPOINTS['token'], json=req_json)\n res.raise_for_status()\n res_json = res.json()\n except Exception:\n raise APIError('Challenge auth failed')\n\n if 'mfa_required' in res_json and res_json['mfa_required']:\n mfa_code = input('MFA Code: ')\n req_json['mfa_code'] = mfa_code\n try:\n res = self.session.post(ENDPOINTS['token'], json=req_json)\n res.raise_for_status()\n res_json = res.json()\n except Exception:\n raise APIError('MFA auth failed')\n\n if 'access_token' in res_json:\n\n self.token = res_json['access_token']\n self.session.headers['Authorization'] = 'Bearer ' + self.token\n self.logged_in = True\n self._load_auth(acc_num, nummus_id)\n\n return True\n\n return False\n\n def save_login(self, fn='robinhood-login'):\n \"\"\"Save login to file\"\"\"\n with open(fn, 'w') as save_fp:\n save_fp.write(self.token)\n\n def load_login(self, fn='robinhood-login'):\n \"\"\"Login from file\"\"\"\n with open(fn, 'r') as save_fp:\n token = save_fp.read()\n self.login(token=token)\n\n def __repr__(self):\n return ''.format(self.acc_num)\n\n def __getitem__(self, symbol):\n \"\"\"Access items using robinhood[symbol]\n\n Args:\n symbol: (str) The currency or stock symbol, ex. AMZN, DOGE\n\n Returns:\n (Currency | Stock) The object associated with that symbol\n\n Raises:\n APIError: If symbol cannot be associated with a stock or currency\n \"\"\"\n if symbol in Currency.cache: # check caches first\n return Currency.cache[symbol]\n\n if symbol in Stock.cache: # check caches first\n return Stock.cache[symbol]\n\n try:\n\n assert self.logged_in # instruments endpoint requires auth\n\n res = self.session.get(ENDPOINTS['instruments'] + '?active_instruments_only=false&symbol=' + symbol)\n res.raise_for_status()\n results = res.json()['results']\n\n stock = Stock(self.session, results[0])\n return stock\n\n except Exception:\n raise APIError('Unable to find asset')\n\n def quantity(self, asset, include_held=False):\n \"\"\"Get owned quantity of asset\n\n Args:\n asset: (Currency | Stock | str) the query currency/stock or symbol\n include_held: (bool, optional) whether to included held assets in the tally\n\n Returns:\n (Decimal) Quantity of asset owned\n\n Raises:\n UsageError: If the asset is not valid\n \"\"\"\n assert self.logged_in\n\n if isinstance(asset, str): # convert str to Stock or Currency\n asset = self.__getitem__(asset)\n\n if isinstance(asset, Currency):\n assets = self.get_assets(include_positions=False, include_holdings=True, include_held=include_held)\n\n elif isinstance(asset, Stock):\n assets = self.get_assets(include_positions=True, include_holdings=False, include_held=include_held)\n\n else:\n raise UsageError('Invalid asset type')\n\n return assets.get(asset, Decimal('0.00')) # default to zero if not in positions or holdings\n\n def _order(self, order_side, asset, amt, type='market', price=None, stop_price=None, time_in_force='gtc', return_json=False):\n \"\"\"Internal order method\n\n See .buy(...) and .sell(...)\n \"\"\"\n assert self.logged_in\n assert order_side in ['buy', 'sell']\n assert time_in_force in ['gtc', 'gfd', 'ioc', 'opg']\n\n if isinstance(asset, str): # convert str to asset\n asset = self.__getitem__(asset)\n\n assert asset.tradable\n\n if not price: # if price not given just use current or last known price\n price = asset.price\n\n price = str(price)\n\n if isinstance(asset, Currency):\n\n assert type in ['market', 'limit']\n assert stop_price is None\n\n amt = str(amt)\n\n req_json = {\n 'type': type,\n 'side': order_side,\n 'quantity': amt,\n 'account_id': self.nummus_id,\n 'currency_pair_id': asset.pair_id,\n 'price': price,\n 'ref_id': str(uuid.uuid4()), # Generated temp id\n 'time_in_force': time_in_force\n }\n\n res = self.session.post(ENDPOINTS['nummus_orders'], json=req_json)\n res_json = res.json()\n\n if 'error_code' in res_json:\n raise APIError(res_json['error_code'])\n\n if return_json:\n return res_json\n else:\n return Order(self.session, res_json, 'cryptocurrency', symbol=asset.symbol, asset=asset)\n\n elif isinstance(asset, Stock):\n\n assert type in ['market', 'limit', 'stoploss', 'stoplimit']\n\n # Convert types into correct parameters\n order_type = 'market' if (type in ['market', 'stoploss']) else 'limit'\n trigger = 'immediate' if (type in ['market', 'limit']) else 'stop'\n\n amt = str(round(amt, 0)) # Shares must be integers\n\n if trigger == 'stop':\n assert stop_price\n stop_price = str(stop_price)\n else:\n assert stop_price is None\n\n req_json = {\n 'time_in_force': time_in_force,\n 'price': price,\n 'quantity': amt,\n 'side': order_side,\n 'trigger': trigger,\n 'type': order_type,\n 'account': self.account_url,\n 'instrument': asset.instrument_url,\n 'symbol': asset.symbol,\n 'ref_id': str(uuid.uuid4()), # Generated temp id\n 'extended_hours': False # not sure what this is\n }\n\n if stop_price:\n req_json['stop_price'] = stop_price\n\n res = self.session.post(ENDPOINTS['orders'], json=req_json)\n res_json = res.json()\n\n if 'error_code' in res_json:\n raise APIError(res_json['error_code'])\n\n if return_json:\n return res_json\n else:\n return Order(self.session, res_json, 'stock', symbol=asset.symbol, asset=asset)\n\n else:\n raise UsageError('Invalid asset')\n\n def buy(self, asset, amt, **kwargs):\n \"\"\"Buy item\n\n Args:\n asset: (Currency | Stock | str) the asset to be bought\n amt: (Decimal | float | int) the amt to buy\n type: (str, optional) the order type\n ['market', 'limit', 'stoploss', 'stoplimit']\n price: (Decimal | float | int) the order price\n stop_price: (Decimal | float | int) the stop price, required if using stoploss/stoplimit\n time_in_force: (str, optional) when to cancel\n ['gtc', 'gfd', 'ioc', 'opg']\n return_json: (bool) override return with API response\n\n Returns:\n (Order) The order created\n\n Raises:\n UsageError: If used incorrectly...\n \"\"\"\n return self._order('buy', asset, amt, **kwargs)\n\n def sell(self, asset, amt, **kwargs):\n \"\"\"Sell item\n\n Args:\n asset: (Currency | Stock | str) tthe asset to be sold\n amt: (Decimal | float | int) The amt to sell\n type: (str, optional) the order type\n ['market', 'limit', 'stoploss', 'stoplimit']\n price: (Decimal | float | int) the order price\n stop_price: (Decimal | float | int) the stop price, required if using stoploss/stoplimit\n time_in_force: (str, optional) when to cancel\n ['gtc', 'gfd', 'ioc', 'opg']\n return_json: (bool) override return with API response\n\n Returns:\n (Order) The order created\n\n Raises:\n UsageError: If used incorrectly...\n \"\"\"\n return self._order('sell', asset, amt, **kwargs)\n\n @property\n def orders(self, sort_by_time=True, return_json=False):\n \"\"\"Get order history\"\"\"\n assert self.logged_in\n\n try:\n\n res_stocks = self.session.get(ENDPOINTS['orders'])\n res_crypto = self.session.get(ENDPOINTS['nummus_orders'])\n res_stocks.raise_for_status()\n res_crypto.raise_for_status()\n json_stocks = res_stocks.json()\n json_crypto = res_crypto.json()\n\n if return_json:\n return [json_stocks, json_crypto]\n\n orders = [Order(self.session, json_data, 'stock') for json_data in json_stocks['results']]\n orders += [Order(self.session, json_data, 'cryptocurrency') for json_data in json_crypto['results']]\n\n if sort_by_time:\n orders.sort(key=lambda o: o.created_at)\n\n return orders\n\n except Exception:\n raise APIError('Unable to access orders')\n\n def wait_for_orders(self, orders, delay=5, timeout=120, force=False):\n \"\"\"Sleep until order is complete\n\n Args:\n orders: (list: Order) the orders to wait for\n delay: (int) time in seconds between checks\n timeout: (int) time in seconds to give up waiting\n force: (bool) cancel all orders which were not completed in time\n\n Returns:\n (bool) if the orders where complete\n \"\"\"\n order_complete = lambda order: order.state in ['filled', 'cancelled']\n checks = timeout // delay\n\n while not all(map(order_complete, orders)) and checks > 0:\n time.sleep(delay)\n checks -= 1\n\n # cancel orders not completed\n if force:\n for order in orders:\n if order.state in ['confirmed', 'queued']: \n order.cancel()\n\n return all(map(order_complete, orders))\n\n def get_assets(self, include_positions=True, include_holdings=True, include_held=False, include_zero=False):\n \"\"\"Get all owned assets\n\n Args:\n include_positions: (bool) whether to include stocks\n include_holdings: (bool) whether to include currencies\n include_held: (bool) whether to include held assets\n include_zero: (bool) whether to include assets with zero quantity\n\n Returns:\n (dict) Stock or Currency objects paired with quantities\n \"\"\"\n assert self.logged_in\n\n my_assets = {}\n\n if include_positions:\n\n stocks = self.positions\n\n for stock_json in stocks:\n\n stock = Stock.from_url(self.session, stock_json['instrument'])\n amt = Decimal(stock_json['quantity'])\n\n if include_held:\n amt += Decimal(stock_json['shares_held_for_buys'])\n amt += Decimal(stock_json['shares_held_for_sells'])\n amt += Decimal(stock_json['shares_held_for_options_collateral'])\n amt += Decimal(stock_json['shares_held_for_options_events'])\n amt += Decimal(stock_json['shares_held_for_stock_grants'])\n\n if include_zero or amt > 0:\n my_assets[stock] = amt\n\n if include_holdings:\n\n currs = self.holdings\n\n for curr_json in currs:\n\n code = curr_json['currency']['code']\n\n if code in Currency.cache: # all currencies already cached\n\n curr = Currency.cache[code]\n amt = Decimal(curr_json['quantity_available'])\n\n if include_held:\n amt += Decimal(curr_json['quantity_held_for_buy'])\n amt += Decimal(curr_json['quantity_held_for_sell'])\n\n if include_zero or amt > 0:\n my_assets[curr] = amt\n\n return my_assets\n\n @property\n def account_info(self):\n \"\"\"Account info\"\"\"\n assert self.logged_in\n\n try:\n assert self.acc_num is not None\n res = self.session.get(ENDPOINTS['accounts'] + self.acc_num)\n res.raise_for_status()\n return res.json()\n except Exception:\n raise APIError('Unable to access account')\n\n @property\n def holdings(self):\n \"\"\"Currency holdings\"\"\"\n assert self.logged_in\n\n try:\n res = self.session.get(ENDPOINTS['holdings'])\n res.raise_for_status()\n return res.json()['results']\n except Exception:\n raise APIError('Unable to access holdings')\n\n @property\n def positions(self):\n \"\"\"Share positions\"\"\"\n assert self.logged_in\n\n try:\n res = self.session.get(ENDPOINTS['accounts'] + self.acc_num + '/positions/')\n res.raise_for_status()\n return res.json()['results']\n except Exception:\n raise APIError('Unable to access holdings')\n\n @property\n def withdrawable_cash(self):\n \"\"\"Cash that can be withdrawn\"\"\"\n return Decimal(self.account_info['cash_available_for_withdrawal'])\n\n @property\n def buying_power(self):\n \"\"\"Buying power\"\"\"\n return Decimal(self.account_info['buying_power'])\n\n @property\n def cash(self):\n \"\"\"Cash\"\"\"\n return Decimal(self.account_info['cash'])\n\n @property\n def unsettled_funds(self):\n \"\"\"Unsettled funds\"\"\"\n return Decimal(self.account_info['unsettled_funds'])\n\n def get_stocks_by_tag(self, tag):\n \"\"\"Get stock list by tag\n \n Args:\n tag: (str) The tag to use (exs. top-movers, 100-most-popular)\n \n Returns:\n (tuple str, list) The name and list of stocks\n \"\"\"\n assert self.logged_in\n\n try:\n res = self.session.get(ENDPOINTS['tags'] + tag + '/')\n res.raise_for_status()\n resp_json = res.json()\n name = resp_json['name']\n stocks = [Stock.from_url(self.session, url) for url in resp_json['instruments']]\n return (name, stocks)\n except Exception:\n raise APIError('Unable to download stock list')\n\n def history(self, bounds='trading', interval='5minute', span='day', account_id=None):\n \"\"\"Get portfolio value history\n \n Args:\n bounds: (str) The bounds for the returned price data\n interval: (str) The resolution of the data\n span: (str) The span of time to get data for\n account_id: (str, optional) The account id of the portfolio\n \n Returns:\n (dict) Portfolio price data\n \"\"\"\n assert self.logged_in\n if account_id is None:\n account_id = self.acc_num\n\n try:\n url = ENDPOINTS['port_historicals'] \\\n + '{0}/?account={0}&bounds={1}&interval={2}&span={3}'\\\n .format(account_id, bounds, interval, span)\n res = self.session.get(url)\n res.raise_for_status()\n return res.json()\n except Exception:\n raise APIError('Unable to download portfolio history')\n\n @property\n def unified_data(self):\n \"\"\"Get the unified data of the account\"\"\"\n try:\n res = self.session.get(ENDPOINTS['unified'])\n res.raise_for_status()\n return res.json()\n except Exception:\n raise APIError('Unable to access unified data')\n\n def get_bulk_prices(self, stocks, bounds='trading', include_inactive=True):\n \"\"\"Get the prices of multiple stocks at the same time\n\n Args:\n stocks: (list) Stocks to find prices for\n bounds: (str) The bounds for the returned price data\n include_inactive: (str) Include inactive stocks\n \n Returns:\n (dict) Price data\n \"\"\"\n assert len(stocks) > 0\n instrument_urls = ','.join([stock.instrument_url for stock in stocks])\n try:\n res = self.session.get(ENDPOINTS['quotes'] + \n '?bounds={}&include_inactive={}&instruments={}'\\\n .format(bounds, str(include_inactive).lower(), instrument_urls))\n results = res.json()['results']\n prices = {}\n for stock in stocks:\n stock_data = None\n for item in results:\n if item['symbol'] == stock.symbol:\n stock_data = item\n break\n prices[stock] = stock_data\n return prices\n except Exception:\n raise APIError('Unable to access price data')\n\n def get_bulk_popularity(self, stocks):\n \"\"\"Get the popularity of multiple stocks at the same time\n\n Args:\n stocks: (list) Stocks to find popularity for\n \n Returns:\n (dict) Popularity data\n \"\"\"\n assert len(stocks) > 0\n instrument_ids = ','.join([stock.id for stock in stocks])\n try:\n res = self.session.get(ENDPOINTS['popularity'] + \n '?ids={}'.format(instrument_ids))\n results = res.json()['results']\n pop = {}\n for item in results:\n item_stock = None\n for stock in stocks:\n if item['instrument'] == stock.instrument_url:\n item_stock = stock\n break\n pop[item_stock] = item['num_open_positions']\n return pop\n except Exception:\n raise APIError('Unable to access popularity data')\n\n def get_bulk_ratings(self, stocks):\n \"\"\"Get the ratings of multiple stocks at the same time\n\n Args:\n stocks: (list) Stocks to find ratings for\n \n Returns:\n (dict) Ratings data\n \"\"\"\n assert len(stocks) > 0\n instrument_ids = ','.join([stock.id for stock in stocks])\n try:\n res = self.session.get(ENDPOINTS['ratings'] + \n '?ids={}'.format(instrument_ids))\n results = res.json()['results']\n ratings = {}\n for item in results:\n item_stock = None\n for stock in stocks:\n if item['instrument_id'] == stock.id:\n item_stock = stock\n break\n ratings[item_stock] = item['summary']\n return ratings\n except Exception:\n raise APIError('Unable to access ratings data')\n\n\nclass Currency:\n \"\"\"Currency asset object\n\n Attributes:\n session: (Session) current session used by the API\n json: (dict) internal data json\n name: (str) currency name\n code: (str) currency symbol\n tradable: (bool) if tradable\n type: (str) asset type\n pair_id: (str) currency Pair id\n asset_id: (str) the APIs id for this currency\n \"\"\"\n cache = {}\n\n def __init__(self, session, asset_json):\n\n self.session = session\n self.json = asset_json\n\n self.name = self.json['asset_currency']['name']\n self.code = self.json['asset_currency']['code']\n self.symbol = self.json['symbol']\n self.tradable = (self.json['tradability'] == 'tradable')\n self.type = self.json['asset_currency']['type']\n self.pair_id = self.json['id']\n self.asset_id = self.json['asset_currency']['id']\n\n Currency.cache[self.code] = self\n\n def history(self, bounds='24_7', interval='day', span='year'):\n \"\"\"Retrieve the price history of this crypto\"\"\"\n try:\n res = self.session.get(ENDPOINTS['nummus_historicals'] + \n '{}/?bounds={}&interval={}&span={}'.format(self.pair_id, bounds, interval, span))\n return res.json()['data_points']\n except Exception:\n raise APIError('Unable to access historical market data')\n\n @property\n def market_open(self):\n \"\"\"Is this crypto's market open\"\"\"\n return True # I think its always open...\n\n @property\n def current_quote(self):\n \"\"\"Current trade data\"\"\"\n try:\n res = self.session.get(ENDPOINTS['forex_market_quote'] + self.pair_id + '/')\n res.raise_for_status()\n return res.json()\n except Exception:\n raise APIError('Unable to access currency data')\n\n @property\n def price(self):\n \"\"\"Current price\"\"\"\n return Decimal(self.current_quote['mark_price'])\n\n @property\n def ask(self):\n \"\"\"Current ask price\"\"\"\n return Decimal(self.current_quote['ask_price'])\n\n @property\n def bid(self):\n \"\"\"Current bid price\"\"\"\n return Decimal(self.current_quote['bid_price'])\n\n def __hash__(self):\n return hash(self.type + self.code)\n\n def __eq__(self, other):\n return isinstance(other, Currency) and other.code == self.code\n\n def __repr__(self):\n return f''\n\n\nclass Stock:\n \"\"\"Stock asset object\n\n Attributes:\n session: (Session) current session used by the API\n json: (dict) internal data json\n name: (str) stock name\n simple_name: (str) simple stock name\n code: (str) currency symbol\n symbol: (str) currency symbol\n tradable: (bool) if tradable\n type: (str) asset type\n instrument_url: (str) the instrument url for this stock\n id: (str) the APIs id for this stock\n \"\"\"\n cache = {}\n\n def __init__(self, session, instrument_json):\n\n self.session = session\n self.json = instrument_json\n\n self.id = self.json['id']\n self.name = self.json['name']\n self.simple_name = self.json['simple_name']\n self.symbol = self.json['symbol']\n self.code = self.symbol\n self.tradable = self.json['tradeable']\n self.type = self.json['type']\n self.instrument_url = ENDPOINTS['instruments'] + self.id + '/'\n self.market_url = self.json['market']\n\n Stock.cache[self.symbol] = self\n\n @classmethod\n def from_url(self, session, instrument_url):\n \"\"\"Create a stock from its instrument url\"\"\"\n for symbol, stock in Stock.cache.items(): # try cache\n if stock.id in instrument_url:\n return stock\n\n return Stock(session, session.get(instrument_url).json())\n\n @classmethod\n def from_id(self, session, id_):\n \"\"\"Create a stock from its instrument id\"\"\"\n return Stock.from_url(session, ENDPOINTS['instruments'] + id_ + '/')\n\n def history(self, bounds='regular', interval='day', span='year'):\n \"\"\"Retrieve the price history of this stock\"\"\"\n try:\n res = self.session.get(ENDPOINTS['historicals'] + \n '{}/?bounds={}&interval={}&span={}'.format(self.symbol, bounds, interval, span))\n return res.json()['historicals']\n except Exception:\n raise APIError('Unable to access historical market data')\n\n @property\n def market_open(self):\n \"\"\"If the market for this stock is open\"\"\"\n try:\n res = self.session.get(self.market_url + 'hours/' + datetime.today().isoformat()[:10] + '/')\n res.raise_for_status()\n return res.json()['is_open']\n except Exception:\n raise APIError('Unable to access market data')\n\n @property\n def current_quote(self):\n \"\"\"Stock quote info\"\"\"\n try:\n res = self.session.get(ENDPOINTS['quotes'] + self.symbol + '/')\n res.raise_for_status()\n return res.json()\n except Exception:\n raise APIError('Unable to access stock data')\n\n @property\n def price(self):\n \"\"\"Current price\"\"\"\n return Decimal(self.current_quote['last_trade_price'])\n\n @property\n def ask(self):\n \"\"\"Current ask price\"\"\"\n return Decimal(self.current_quote['ask_price'])\n\n @property\n def bid(self):\n \"\"\"Current bid price\"\"\"\n return Decimal(self.current_quote['bid_price'])\n\n @property\n def popularity(self):\n \"\"\"Get the number of open positions by Robinhood users\"\"\"\n try:\n res = self.session.get(self.instrument_url + 'popularity/')\n res.raise_for_status()\n return Decimal(res.json()['num_open_positions'])\n except Exception:\n raise APIError('Unable to access popularity data')\n\n @property\n def earnings(self):\n \"\"\"Get the earnings history and estimates\"\"\"\n try:\n res = self.session.get(ENDPOINTS['earnings'] + \n '?instrument=/instruments/' + self.id + '/')\n res.raise_for_status()\n results = res.json()['results']\n earnings = []\n for item in results:\n earning = {}\n for key in ['year', 'quarter', 'eps', 'report', 'call']:\n earning[key] = item[key]\n earnings.append(earning)\n return earnings\n except Exception:\n raise APIError('Unable to access earnings data')\n\n @property\n def fundamentals(self):\n \"\"\"Ges\"\"\"\n try:\n res = self.session.get(ENDPOINTS['fundamentals'] + \n self.id + '/?include_inactive=true')\n res.raise_for_status()\n return res.json()\n except Exception:\n raise APIError('Unable to get fundamentals')\n\n def get_similar(self):\n \"\"\"Get similar stocks\"\"\"\n try:\n res = self.session.get(ENDPOINTS['instruments_similar'] + self.id + '/')\n res.raise_for_status()\n results = res.json()['similar']\n stocks = []\n for item in results:\n stocks.append(Stock.from_id(self.session, item['instrument_id']))\n return stocks\n except Exception:\n raise APIError('Unable to find similar stocks')\n\n def get_news(self):\n \"\"\"Get news for this stock\"\"\"\n try:\n res = self.session.get(ENDPOINTS['news'] + self.symbol + '/?')\n res.raise_for_status()\n return res.json()['results']\n except Exception:\n raise APIError('Unable to find news')\n\n @property\n def ratings(self):\n \"\"\"Get the overall buy/sell/hold ratings for this stock\"\"\"\n try:\n res = self.session.get(ENDPOINTS['ratings'] + self.id + '/')\n res.raise_for_status()\n resp_json = res.json()\n summary = dict(\n buy=resp_json['summary']['num_buy_ratings'],\n hold=resp_json['summary']['num_hold_ratings'],\n sell=resp_json['summary']['num_sell_ratings'],\n published=resp_json['ratings_published_at']\n )\n ratings_cnt = summary['buy'] + summary['hold'] + summary['sell']\n summary.update(dict(\n ratings_cnt=ratings_cnt, \n buy_percent=summary['buy'] / ratings_cnt * 100,\n hold_percent=summary['hold'] / ratings_cnt * 100,\n sell_percent=summary['sell'] / ratings_cnt * 100\n ))\n ratings = [\n dict(text=rate_json['text'], rating=rate_json['text'], published=rate_json['published_at']) \n for rate_json in resp_json['ratings']\n ]\n return summary, ratings\n except Exception:\n raise APIError('Unable to access popularity data')\n\n def __hash__(self):\n return hash(self.type + self.symbol)\n\n def __eq__(self, other):\n return isinstance(other, Stock) and other.symbol == self.symbol\n\n def __repr__(self):\n return f''\n\n\nclass Order:\n \"\"\"Order object\n\n Attributes:\n session: (Session) current session used by the API\n json: (dict) internal data json\n id: (str) the order id\n side: (str) buy or sell\n time_in_force: (str) how the order in enforced\n created_at: (str) when the order was created\n quantity: (Decimal) quantity of the asset\n asset_type: (str) cryptocurrency or stock\n cancel_url: (str) the url to cancel the order\n price: (Decimal) the price set in the order,\n this can be None\n stop_price: (Deciaml) the stop price, None if not a stop order\n symbol: (str) the symbol traded in the order, defaults None\n asset: (Stock or Currency) the asset traded in the order, defaults None\n \"\"\"\n def __init__(self, session, order_json, asset_type, symbol=None, asset=None):\n\n self.session = session\n self.json = order_json\n\n self.id = self.json['id']\n self.side = self.json['side']\n self.time_in_force = self.json['time_in_force']\n self.created_at = self.json['created_at']\n\n self.quantity = Decimal(self.json['quantity'])\n self.order_type = self.json['type']\n self.asset_type = asset_type\n self.symbol = symbol\n self.asset = asset\n\n if self.asset_type == 'cryptocurrency':\n\n self.pair_id = self.json['currency_pair_id']\n self.url = ENDPOINTS['nummus_orders'] + self.id\n\n if not self.symbol: # try to pair with cache\n for symbol, asset in Currency.cache.items():\n if asset.pair_id == self.pair_id:\n self.symbol = symbol\n self.asset = asset\n\n elif self.asset_type == 'stock':\n\n self.instrument_url = self.json['instrument']\n self.url = ENDPOINTS['orders'] + self.id\n\n if not self.symbol: # try to pair with cache\n for symbol, asset in Stock.cache.items():\n if asset.instrument_url == self.instrument_url:\n self.symbol = symbol\n self.asset = asset\n\n if 'cancel' in self.json:\n self.cancel_url = self.json['cancel']\n else:\n self.cancel_url = self.json['cancel_url']\n\n if self.json['price']:\n self.price = Decimal(self.json['price'])\n else:\n self.price = None # price not set\n\n if 'stop_price' in self.json and self.json['stop_price']:\n self.stop_price = Decimal(self.json['stop_price'])\n else:\n self.stop_price = None # stop price not set\n\n @property\n def state(self):\n \"\"\"Get order state [confirmed, queued, cancelled, filled]\"\"\"\n try:\n res = self.session.get(self.url)\n res.raise_for_status()\n res_json = res.json()\n return res_json['state']\n except Exception:\n raise APIError('Unable to access order data')\n\n def cancel(self):\n \"\"\"Cancel this order\"\"\"\n try:\n res = self.session.post(self.cancel_url)\n res.raise_for_status()\n return res.json()\n except Exception:\n raise APIError('Unable to cancel')\n\n def __repr__(self):\n if self.symbol:\n return f''\n else:\n return f'' # symbol has yet to be identified\n","sub_path":"tradinhood/robinhood.py","file_name":"robinhood.py","file_ext":"py","file_size_in_byte":38929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"393964108","text":"from xmind.tests import logging_configuration as lc\nfrom xmind.tests import base\nimport xmind\nimport json\nimport os\n\n\nclass TestE2EOpen(base.Base):\n\n def getLogger(self):\n if not getattr(self, '_logger', None):\n self._logger = lc.get_logger('TestE2EOpen')\n return self._logger\n\n def _check_sheets_number(self, workbook):\n _number = len(workbook.getSheets())\n self.assertEqual(\n _number, 2, 'Number of sheets is {} - expected 2'.format(_number))\n\n def _check_sheet_title(self, sheet, title):\n self.assertEqual(sheet.getTitle(), title)\n\n def _check_topic_title(self, topic, title):\n _topic_title_element = topic.getTitle()\n if type(title) is dict:\n self.assertEqual(_topic_title_element, title['#text'],\n 'Topic title is \"{}\", expected \"{}\"'.format(_topic_title_element, title['#text']))\n else:\n self.assertEqual(_topic_title_element, title,\n 'Topic title is \"{}\", expected \"{}\"'.format(_topic_title_element, title))\n\n def _check_relationship_title(self, relationship, title):\n _test_title = relationship.getElementsByTagName(\n 'title')[0]._get_firstChild()._get_data()\n self.assertEqual(_test_title, title,\n 'Relationship title is \"{}\", expected \"{}\"'.format(_test_title, title))\n\n def _check_notes(self, topic, note):\n if topic.getNotes():\n _note = topic.getNotes().getContent()\n self.assertEqual(\n _note, note, 'Topic note is {}, expected \"{}\"'.format(_note, note))\n else:\n self.assertIsNone(note, 'Topic note is not None - expected None')\n\n def _check_tag_attribute(self, tag, attr_name, attr_value):\n _value = tag.getAttribute(attr_name)\n self.assertEqual(\n _value, attr_value, '{} expected to be {} - not {}'.format(attr_name, attr_value, _value))\n\n def _check_topic_marker_ref_attr(self, _root_test_topic, _root_expected_topic):\n _marker_ref = _root_test_topic._get_markerrefs()._get_firstChild()\n for _key, _value in _root_expected_topic['marker-refs']['marker-ref'].items():\n _ref_attr = _marker_ref.getAttribute(_key[1:])\n self.assertEqual(\n _ref_attr, _value, 'Expected {} value = {} - not {}'.format(_key, _value, _ref_attr))\n\n def _check_topic_position(self, _root_test_topic, _root_expected_topic):\n _test_position = _root_test_topic.getPosition()\n _expected_position = (\n int(_root_expected_topic['position']['-svg:x']), int(_root_expected_topic['position']['-svg:y']))\n self.assertEqual(_test_position, _expected_position,\n 'Expected position: {} - not {}'.format(_expected_position, _test_position))\n\n def _check_xmap_content(self, _workbook, _content):\n _xmap_test = _workbook.getChildNodesByTagName('xmap-content')[0]\n _xmap_expected = _content['xmap-content']\n for _key, _value in _xmap_expected.items():\n if _key[0] == '-':\n _test_attr = _xmap_test.getAttribute(_key[1:])\n self.assertEqual(_test_attr, _value,\n 'Expected xmap {} value is: {} - not {}'.format(_key[1:], _value, _test_attr))\n\n def _check_element_extension(self, test_element, exp_element):\n if hasattr(test_element, 'getFirstChildNodeByTagName'):\n _test_ext = test_element.getFirstChildNodeByTagName(\n 'extensions').getElementsByTagName('extension')[0]\n else:\n _test_ext = test_element.getElementsByTagName(\n 'extensions')[0].getElementsByTagName('extension')[0]\n _expected_ext = exp_element['extensions']['extension']\n for _key, _value in _expected_ext.items():\n if _key[0] == '-':\n _test_value = _test_ext.getAttribute(_key[1:])\n self.assertEqual(_test_value, _value,\n 'Expected extension value with attr {} is: {} - not {}'.format(_key[1:], _value,\n _test_value))\n else:\n for _key_exp, _value_exp in _value.items():\n _test_tag = _test_ext.getElementsByTagName(\n _key)[0].getElementsByTagName(_key_exp)[0]\n _test_value = _test_tag._get_firstChild()._get_data()\n self.assertEqual(_test_value, _value_exp,\n 'Expected extension value for {} tag is: {} - not {}'.format(_key_exp, _value_exp,\n _test_value))\n\n def _check_control_point(self, test_point, exp_point):\n for _key, _value in exp_point.items():\n self._check_tag_attribute(test_point, _key[1:], _value)\n\n def _check_control_points(self, test_points, exp_points):\n for i in range(0, len(exp_points)):\n self._check_control_point(test_points[i], exp_points[i])\n\n def _check_relationship(self, test_relationship, exp_relationship):\n self._check_relationship_title(\n test_relationship, exp_relationship['title'])\n for _key, _value in exp_relationship.items():\n if _key[0] == '-':\n self._check_tag_attribute(test_relationship, _key[1:], _value)\n self._check_control_points(test_relationship.getElementsByTagName('control-points')[0]._get_childNodes(),\n exp_relationship['control-points']['control-point'])\n\n def _check_relationships(self, test_sheet, exp_sheet):\n _test_relationships_list = test_sheet.getFirstChildNodeByTagName(\n 'relationships')._get_childNodes()\n _exp_relationships_list = exp_sheet['relationships']['relationship']\n for _i in range(0, len(_exp_relationships_list)):\n self._check_relationship(\n _test_relationships_list[_i], _exp_relationships_list[_i])\n\n def _check_full_topic(self, _root_topic_test, _root_topic_expected):\n self._check_topic_title(\n _root_topic_test, _root_topic_expected['title'])\n for _key, _value in _root_topic_expected.items():\n if _key[0] == '-':\n self._check_tag_attribute(_root_topic_test, _key[1:], _value)\n if 'notes' in _root_topic_expected:\n self._check_notes(_root_topic_test,\n _root_topic_expected['notes']['plain'])\n if 'marker-refs' in _root_topic_expected:\n self._check_topic_marker_ref_attr(\n _root_topic_test, _root_topic_expected)\n if 'position' in _root_topic_expected:\n self._check_topic_position(_root_topic_test, _root_topic_expected)\n if 'extensions' in _root_topic_expected:\n self._check_element_extension(\n _root_topic_test, _root_topic_expected)\n\n # Check attached subtopics of root topic\n if 'children' in _root_topic_expected:\n _children = _root_topic_expected['children']['topics']\n if type(_children) is list:\n for element in _root_topic_expected['children']['topics']:\n _subtopics_list_test = _root_topic_test.getSubTopics(\n element['-type'])\n _subtopics_list_expected = element['topic']\n if type(_subtopics_list_expected) is not list:\n _subtopics_list_expected = [_subtopics_list_expected]\n for i in range(0, len(_subtopics_list_expected)):\n self._check_full_topic(\n _subtopics_list_test[i], _subtopics_list_expected[i])\n else:\n _subtopics_list_test = _root_topic_test.getSubTopics(\n _children['-type'])\n _subtopics_list_expected = _children['topic']\n if type(_subtopics_list_expected) is not list:\n _subtopics_list_expected = [_subtopics_list_expected]\n for i in range(0, len(_subtopics_list_expected)):\n self._check_full_topic(\n _subtopics_list_test[i], _subtopics_list_expected[i])\n\n # This is the test!\n def test_e2e_open(self):\n current_dir = os.path.dirname(__file__)\n content_file_path = os.path.join(current_dir, 'content.json')\n xmind_file_path = os.path.join(current_dir, 'test_file.xmind')\n with open(content_file_path, 'r') as file:\n _content = json.load(file)\n _workbook = xmind.load(xmind_file_path)\n self.assertTrue(_workbook, 'There is no workbook!')\n self._check_sheets_number(_workbook)\n self._check_xmap_content(_workbook, _content)\n self._check_element_extension(_workbook.getChildNodesByTagName(\n 'xmap-content')[0], _content['xmap-content'])\n\n for i in range(0, len(_content['xmap-content']['sheet'])):\n _test_sheet = _workbook.getSheets()[i]\n _expected_sheet = _content['xmap-content']['sheet'][i]\n self._check_sheet_title(_test_sheet, _expected_sheet['title'])\n if _test_sheet.getFirstChildNodeByTagName('relationships'):\n self._check_relationships(_test_sheet, _expected_sheet)\n\n # Checking sheet attributes\n for _key, _value in _expected_sheet.items():\n if _key[0] == '-':\n self._check_tag_attribute(_test_sheet, _key[1:], _value)\n\n # Check root topic title, attributes, notes\n _root_topic_test = _test_sheet.getRootTopic()\n _root_topic_expected = _expected_sheet['topic']\n self._check_full_topic(_root_topic_test, _root_topic_expected)\n","sub_path":"xmind/tests/test_e2e_open.py","file_name":"test_e2e_open.py","file_ext":"py","file_size_in_byte":9885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"249988755","text":"#!/usr/bin/env python3\n\"\"\"\nUsage: plot_centroids.py [FUNCTION]\n\nFUNCTION can be one of: diff1, hist, (more coming soon)\n\"\"\"\n\nimport json\n\nimport plotly.plotly as py\nfrom docopt import docopt\nfrom plotly import offline as pyoff\nfrom plotly.graph_objs import Scatter, Marker, Data, Histogram\nfrom sqlalchemy import select, Table\n\nfrom common.centroid import centroid_difference, get_normalizing_vector, USED_FIELDS, USED_TO_DB, DB_META\n\n\nOFFLINE = True\n\n\nclass Plotter:\n\n def __init__(self):\n # Initialize database\n with open('crx_conf.json') as fin:\n db_conf = json.load(fin)['db']\n\n self.db_meta = DB_META\n self.db_conn = self.db_meta.bind.connect()\n self.extension = Table('extension', self.db_meta)\n self.all_fields = USED_FIELDS + ('_c_size',)\n\n # Get normalizing values\n self.norm_tup = get_normalizing_vector(self.db_meta)\n\n self.quit = lambda: None\n self.q = lambda: None\n\n def diff1(self):\n point_data = {} # Keys: distances, Values: list of extension IDs\n baseline_row = None\n baseline_id = None\n\n # Iterate through the rows\n for row in self.db_conn.execute(select([self.extension])):\n # Get the centroid\n centroid = ()\n for field in self.all_fields:\n col = getattr(self.extension.c, USED_TO_DB[field])\n centroid += (row[col],)\n\n if baseline_row is None:\n baseline_row = centroid\n baseline_id = row[self.extension.c.ext_id]\n continue\n\n diff = centroid_difference(baseline_row, centroid, self.norm_tup)\n try:\n point_data[diff].append(row[self.extension.c.ext_id])\n except KeyError:\n point_data[diff] = [row[self.extension.c.ext_id]]\n\n diffs = list(point_data.keys())\n diffs.sort()\n y = []\n text = []\n for r in diffs:\n for p in point_data[r]:\n y.append(r)\n text.append(p)\n\n trace = Scatter(\n y=y[:40000],\n text=text[:40000],\n name='Diff against %s' % baseline_id,\n mode='markers',\n marker=Marker(size=3)\n )\n\n data = Data([trace])\n if OFFLINE:\n pyoff.plot(data)\n else:\n plot_url = py.plot(data, filename=\"centroid-distances-normalized\")\n print(\"Plot ready at: %s\" % plot_url)\n\n def hist(self):\n ids_calculated = []\n centroid_counts = []\n key_field = USED_TO_DB[self.all_fields[0]]\n\n # Iterate through the rows\n for row in self.db_conn.execute(select([self.extension])):\n # Skip if we've already seen this row\n if row[self.extension.c.ext_id] in ids_calculated:\n continue\n ids_calculated.append(row[self.extension.c.ext_id])\n same_centroid = 1\n\n # Get the centroid\n centroid = ()\n for field in self.all_fields:\n col = getattr(self.extension.c, USED_TO_DB[field])\n centroid += (row[col],)\n\n # Do some really inefficient coding...\n result = self.db_conn.execute(select([self.extension]).\n where((getattr(self.extension.c, key_field)) == row[key_field]))\n for other_row in result:\n # Get the other centroid\n other_centroid = ()\n for field in self.all_fields:\n col = getattr(self.extension.c, USED_TO_DB[field])\n other_centroid += (other_row[col],)\n\n # Calculate the difference between the two vectors\n diff = centroid_difference(centroid, other_centroid, self.norm_tup)\n if diff == 0:\n same_centroid += 1\n ids_calculated.append(other_row[self.extension.c.ext_id])\n centroid_counts.append(same_centroid)\n\n # Create the histogram\n data = Data([Histogram(x=centroid_counts)])\n if OFFLINE:\n pyoff.plot(data)\n else:\n plot_url = py.plot(data, filename=\"centroid-cluster-histogram\")\n print(\"Plot ready at: %s\" % plot_url)\n\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n\n plot = Plotter()\n try:\n getattr(plot, args['FUNCTION'])()\n except AttributeError:\n while True:\n try:\n _resp = input(\"What function do you want to call: \")\n getattr(plot, _resp)()\n except AttributeError:\n print(\"\\nUnknown function. Try again. 'quit' or 'q' to exit.\")\n pass\n except KeyboardInterrupt:\n # TODO: Deinit on plot object\n break\n else:\n break\n","sub_path":"other/plot_centroids.py","file_name":"plot_centroids.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"445256122","text":"import matplotlib.pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport numexpr as ne\n\nimport timeit\n\nfrom src.ass6_1 import laplacian, get_affinities\n\n\ndef power_iteration(A, max_iter = 1000):\n assert A.shape[0] == A.shape[1], \"non square matrix\"\n\n r = np.random.sample(A.shape[0])\n r_ = 0\n i = 0\n\n while np.abs(r_ - r).max() > 10e-18:\n r_ = r\n r = A.dot(r)\n r /= np.linalg.norm(r)\n i += 1\n if i > max_iter:\n break\n\n print(i)\n\n return r\n\n\n\ndef rayleighq_(v, A):\n \"\"\"\n rayleigh quotient\n :param v:\n :param A:\n :return:\n \"\"\"\n num = v.T.dot(A).dot(v)\n denom = v.T.dot(v)\n return np.divide(num, denom)\n\n\ndef rayleighq(v, A):\n \"\"\"\n rayleigh quotient\n :param v:\n :param A:\n :return:\n \"\"\"\n return np.divide(v.T.dot(A).dot(v), v.T.dot(v))\n\n\ndef eigensolver(A, max_iter = 1000, n = 2, all = False):\n size = A.shape[0]\n\n if all:\n n = size\n\n eigvalues = np.zeros(n)\n eigvectors = np.zeros((size, n))\n\n for i in range(n):\n eigvectors[:,i] = power_iteration(A, max_iter=max_iter)\n eigvalues[i] = rayleighq(eigvectors[:,i], A)\n # deflate\n A = A - eigvalues[i] * eigvectors[:,i].reshape(size,1).dot(eigvectors[:,i].reshape(1,size))\n\n\n args = np.argsort(eigvalues)\n\n return eigvalues[args], eigvectors[:,args]\n\n\ndef is_eigpair(A, eigv, eigvec, n_digits):\n return np.round(eigv * eigvec, n_digits) == np.round(A.dot(eigvec), n_digits)\n\n\ndef test_eigensolver(L, n_iter, reference):\n \"\"\"\n :param L:\n :param n_iter:\n :param reference: eigenvectors, sorted by eigenvalues\n :return:\n \"\"\"\n eigvals, eigvec = eigensolver(L, n_iter, n=L.shape[0])\n #eigvals2, eigvec2 = sp.linalg.eigh(L)\n\n ind = np.where(np.sign(reference[0]) != np.sign(eigvec[0]))\n\n eigvec[:, ind] = -eigvec[:,ind]\n\n err = np.power(eigvec - reference, 2).sum()\n return err\n\n\ndef plot_21(M, upper, stepsize):\n eigvals, eigvec = sp.linalg.eigh(M)\n\n # seed for comparibility\n np.random.seed(42)\n\n errors = pd.Series(np.zeros(int(upper/stepsize - 1)),\n index = range(0, upper, stepsize)[1:])\n n_tryouts = 10\n\n for i in errors.index:\n e = test_eigensolver(M, i, reference=eigvec)\n errors[i] = e / n_tryouts\n\n errors.plot(linewidth = 1.3)\n plt.xlabel('$N_i$')\n plt.ylabel('$r^2$')\n plt.savefig('gfx/errors.png')\n plt.close()\n\n\ndef task22_times(matrix):\n\n times = np.zeros(10)\n for i, size in enumerate(range(0, 401, 200)[1:]):\n K = matrix[:size, :size]\n times[i] = timeit.timeit('eigensolver(K, n=K.shape[0])', number=1, globals={**locals(), **globals()})\n\n return times\n\n\n\nif __name__ == '__main__':\n points = pd.read_csv('data/ass6_circles.txt', sep='\\t',\n header=None, names=['X', 'Y'])\n\n A = get_affinities(points[:400], 0.01)\n\n #global L\n #L = laplacian(A)\n\n #plot_21(L, 1400, 10)\n #times = task22_times(A)\n\n","sub_path":"src/ass6/ass6_2.py","file_name":"ass6_2.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"467086546","text":"'''\r\nCreated on 19 Nov 2013\r\n\r\n@author: Nathan\r\n'''\r\nimport unittest\r\nimport os\r\nimport http.client\r\nimport urllib.request\r\nimport json\r\nimport requests\r\nimport socket\r\nfrom Slave.MasterURLFormatter import MasterURLFormatter\r\n\r\nfrom Agent import *\r\nfrom Repo import * \r\nfrom Host import *\r\n\r\nfrom TestKit import TestData, TestSubs\r\n\r\n\r\nclass AgentTest(unittest.TestCase):\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n self.testRepoRoot=TestData.testMasterRepoRoot\r\n self.testRepoName=\"MasterRepo.TestMasterAgent\"\r\n self.testDir=os.path.join(self.testRepoRoot, self.testRepoName)\r\n self.testHost1 = self.getHostname(self)\r\n \r\n self.testData = {\r\n \"\\\\MasterRepo.TestMasterAgent\\\\environments\\\\Dev1\\\\applications\\\\TestPackage1\\\\1.0\",\r\n \"\\\\MasterRepo.TestMasterAgent\\\\environments\\\\Dev1\\\\applications\\\\TestPackage3\\\\3.2\",\r\n \"\\\\MasterRepo.TestMasterAgent\\\\environments\\\\Dev2\\\\applications\\\\TestPackage1\\\\1.0\",\r\n \"\\\\MasterRepo.TestMasterAgent\\\\environments\\\\Dev2\\\\applications\\\\TestPackage2\\\\3.4\"\r\n }\r\n \r\n self.myRepo = TestSubs.deleteThenCreateFreshMasterRepo( self.testRepoRoot, self.testRepoName )\r\n self.myRepo.copyContentToRepo(\"TestPackage1\", \"1.0\", \"RH4\", TestData.testPackage1Location) \r\n self.myRepo.copyContentToRepo(\"TestPackage2\", \"3.4\", [\"RH3\",\"RH4\"], TestData.testPackage1Location) \r\n self.myRepo.copyContentToRepo(\"TestPackage3\", \"3.2\", [\"RH2\",\"RH3\"], TestData.testPackage1Location) \r\n self.myRepo.addEnvironment(\"Dev1\")\r\n self.myRepo.addApplicationToEnvironment(\"Dev1\", \"TestPackage1\", \"1.0\")\r\n self.myRepo.addApplicationToEnvironment(\"Dev1\", \"TestPackage3\", \"3.2\")\r\n self.myRepo.addEnvironment(\"Dev2\")\r\n self.myRepo.addApplicationToEnvironment(\"Dev2\", \"TestPackage1\", \"1.0\")\r\n self.myRepo.addApplicationToEnvironment(\"Dev2\", \"TestPackage2\", \"3.4\") \r\n host = Host(self.testHost1, self.myRepo, \"RH4,RH2\")\r\n host.createHost()\r\n self.myRepo.addHostToEnvironment(self.testHost1, \"Dev1\")\r\n self.myRepo.addHostToEnvironment(self.testHost1, \"Dev2\")\r\n self.masterURLFormatter = MasterURLFormatter(TestData.testMasterURL)\r\n \r\n os.chdir( self.myRepo.getRepoLocation() )\r\n self.agent = Agent()\r\n self.agent.start()\r\n\r\n @classmethod\r\n def tearDownClass(self):\r\n #self.agent.stopit()\r\n pass\r\n\r\n\r\n def testStartAgent(self):\r\n req = requests.get(TestData.testMasterURL)\r\n self.assertEqual(req.status_code, 200, \"Agent did not start.\")\r\n \r\n #@unittest.skip(\"demonstrating skipping\")\r\n def testEnvJSONDataWithHost(self): \r\n url = self.masterURLFormatter.getEnvData(self.testHost1)\r\n req = requests.get(url)\r\n print( req.text )\r\n self.assertEqual(req.text, '{\"Dev1\": {\"appgroups\": {}, \"apps\": {\"TestPackage3\": \"3.2\", \"TestPackage1\": \"1.0\"}}, \"Dev2\": {\"appgroups\": {}, \"apps\": {\"TestPackage2\": \"3.4\", \"TestPackage1\": \"1.0\"}}}', \"JSON response was different\")\r\n \r\n #@unittest.skip(\"demonstrating skipping\") \r\n def testSendFileToAgentWithBadCheckSum(self):\r\n filename = os.path.join(TestData.testPackage1Location, \"bin\", \"HelloWorld.py\") \r\n files = {'file':(\"testSendFileToAgent\", open(filename,\"rb\")), 'checksum':'bad_data'}\r\n url = self.masterURLFormatter.commitApps()\r\n r = requests.post(url, files=files)\r\n print(r.text) \r\n agentside_absfile = os.path.join(self.testRepoRoot, self.testRepoName, \"tmp\", \"applications\", \"testSendFileToAgent\")\r\n agentside_checksum = os.path.join(self.testRepoRoot, self.testRepoName, \"tmp\", \"applications\", \"checksum\")\r\n with open(agentside_checksum, \"r\") as fp:\r\n cs = fp.read()\r\n cs = str(cs.rstrip())\r\n print(\"Recieved checksum:\"+cs)\r\n self.assertFalse( CheckSum().verifyCheckSumWithDir(agentside_absfile, cs) ) \r\n\r\n def testSendFileToAgentWithGoodCheckSum(self):\r\n oldDir = os.getcwd()\r\n os.chdir(TestData.testPackage1Location)\r\n tgz_filename = os.path.join(self.testDir, \"TestPackage1.1.0.tar.gz\")\r\n tar = tarfile.open(tgz_filename, \"w:gz\")\r\n for f in os.listdir(TestData.testPackage1Location):\r\n tar.add(f)\r\n tar.close()\r\n os.chdir(oldDir)\r\n \r\n file_cs = CheckSum().generate(tgz_filename)\r\n \r\n files = {'file':(\"TestSendFile\", open(tgz_filename,\"rb\")), 'checksum':file_cs, 'app':'TestSendFile', 'ver':'1.0', 'subs':'MYSUBS' }\r\n url = self.masterURLFormatter.commitApps()\r\n r = requests.post(url, files=files)\r\n print(r.text) \r\n \r\n agentside_absfile = os.path.join(self.testRepoRoot, self.testRepoName, \"tmp\", \"applications\", \"TestSendFile.1.0.tar.gz\")\r\n agentside_checksum = os.path.join(self.testRepoRoot, self.testRepoName, \"tmp\", \"applications\", \"checksum\")\r\n with open(agentside_checksum, \"r\") as fp:\r\n cs = fp.read()\r\n cs = cs.rstrip()\r\n print(\"Recieved checksum:\"+cs)\r\n self.assertTrue( CheckSum().verifyCheckSumWithDir(agentside_absfile, cs) ) \r\n\r\n url = self.masterURLFormatter.getCheckSum(\"TestSendFile\", \"1.0\")\r\n req = requests.get(url)\r\n print( req.text )\r\n self.assertEqual(req.text, file_cs)\r\n\r\n #@unittest.skip(\"demonstrating skipping\")\r\n def testDownloadApp(self):\r\n #ask for checksum then download\r\n oldDir = os.getcwd()\r\n os.chdir(TestData.testPackage1Location)\r\n tgz_filename = os.path.join(self.testDir, \"TestPackage1.1.0.tar.gz\")\r\n tar = tarfile.open(tgz_filename, \"w:gz\")\r\n for f in os.listdir(TestData.testPackage1Location):\r\n tar.add(f)\r\n tar.close()\r\n os.chdir(oldDir)\r\n print (\"testDownloadApp cwd:\"+os.getcwd())\r\n \r\n file_cs = CheckSum().generate(tgz_filename)\r\n \r\n files = {'file':(\"TestDownloadApp\", open(tgz_filename,\"rb\")), 'checksum':file_cs, 'app':'TestDownloadApp', 'ver':'1.0', 'subs':'MYSUBS' }\r\n url = self.masterURLFormatter.commitApps()\r\n r = requests.post(url, files=files)\r\n print(r.text)\r\n \r\n downloaded_tgz_filename = os.path.join(self.testDir, \"TestDownloadApp.1.0.tar.gz\")\r\n url = self.masterURLFormatter.getCheckSum(\"TestDownloadApp\", \"1.0\")\r\n req = requests.get(url, stream=True)\r\n print( req.headers )\r\n byte_num=0\r\n with open(downloaded_tgz_filename, 'wb') as fd:\r\n for chunk in req.iter_content(64):\r\n byte_num = byte_num + len(chunk)\r\n print(\"Chunk:\"+str(byte_num)+\"/\"+req.headers['content-length'])\r\n fd.write(chunk)\r\n self.assertTrue(tarfile.is_tarfile(downloaded_tgz_filename))\r\n \r\n \r\n def getHostname(self):\r\n hn = socket.gethostname() \r\n if hn:\r\n return hn\r\n else:\r\n raise ValueError(\"Unable to get the hostname value.\") \r\n\r\nif __name__ == \"__main__\":\r\n #import sys;sys.argv = ['', 'Test.testName']\r\n unittest.main()","sub_path":"src/SlaveAgent/AgentTest.py","file_name":"AgentTest.py","file_ext":"py","file_size_in_byte":7205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"273580479","text":"#!/usr/bin/env python3\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n#\n\nimport collections\nimport os\nimport torch\nimport math\n\nfrom fairseq import data, options, utils\nfrom fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\nfrom fairseq.multiprocessing_trainer import MultiprocessingTrainer\n\n\ndef main():\n parser = options.get_parser('Trainer')\n dataset_args = options.add_dataset_args(parser)\n dataset_args.add_argument('--max-tokens', default=6000, type=int, metavar='N',\n help='maximum number of tokens in a batch')\n dataset_args.add_argument('--max-sentences', type=int, metavar='N',\n help='maximum number of sentences in a batch')\n dataset_args.add_argument('--train-subset', default='train', metavar='SPLIT',\n choices=['train', 'valid', 'test'],\n help='data subset to use for training (train, valid, test)')\n dataset_args.add_argument('--valid-subset', default='valid', metavar='SPLIT',\n help='comma separated list of data subsets '\n ' to use for validation (train, valid, valid1,test, test1)')\n options.add_optimization_args(parser)\n options.add_checkpoint_args(parser)\n options.add_model_args(parser)\n\n args = utils.parse_args_and_arch(parser)\n\n if args.no_progress_bar and args.log_format is None:\n args.log_format = 'simple'\n\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n torch.manual_seed(args.seed)\n\n # Load dataset\n splits = ['train', 'valid']\n if data.has_binary_files(args.data, splits):\n dataset = data.load_dataset(args.data, splits, args.source_lang, args.target_lang)\n else:\n dataset = data.load_raw_text_dataset(args.data, splits, args.source_lang, args.target_lang)\n if args.source_lang is None or args.target_lang is None:\n # record inferred languages in args, so that it's saved in checkpoints\n args.source_lang, args.target_lang = dataset.src, dataset.dst\n\n print(args)\n print('| [{}] dictionary: {} types'.format(dataset.src, len(dataset.src_dict)))\n print('| [{}] dictionary: {} types'.format(dataset.dst, len(dataset.dst_dict)))\n for split in splits:\n print('| {} {} {} examples'.format(args.data, split, len(dataset.splits[split])))\n\n if not torch.cuda.is_available():\n raise NotImplementedError('Training on CPU is not supported')\n num_gpus = torch.cuda.device_count()\n\n print('| using {} GPUs (with max tokens per GPU = {} and max sentences per GPU = {})'.format(\n num_gpus, args.max_tokens, args.max_sentences))\n\n # Build model and criterion\n model = utils.build_model(args, dataset.src_dict, dataset.dst_dict)\n criterion = utils.build_criterion(args, dataset.src_dict, dataset.dst_dict)\n print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))\n\n # The max number of positions can be different for train and valid\n # e.g., RNNs may support more positions at test time than seen in training\n max_positions_train = (args.max_source_positions, args.max_target_positions)\n max_positions_valid = (\n min(args.max_source_positions, model.max_encoder_positions()),\n min(args.max_target_positions, model.max_decoder_positions())\n )\n\n # Start multiprocessing\n trainer = MultiprocessingTrainer(args, model, criterion)\n\n # Load the latest checkpoint if one is available\n checkpoint_path = os.path.join(args.save_dir, args.restore_file)\n extra_state = trainer.load_checkpoint(checkpoint_path)\n if extra_state is not None:\n epoch = extra_state['epoch']\n batch_offset = extra_state['batch_offset']\n print('| loaded checkpoint {} (epoch {})'.format(checkpoint_path, epoch))\n if batch_offset == 0:\n epoch += 1\n else:\n epoch, batch_offset = 1, 0\n\n # Train until the learning rate gets too small\n val_loss = None\n max_epoch = args.max_epoch or math.inf\n lr = trainer.get_lr()\n train_meter = StopwatchMeter()\n train_meter.start()\n while lr > args.min_lr and epoch <= max_epoch:\n # train for one epoch\n train(args, epoch, batch_offset, trainer, dataset, max_positions_train, num_gpus)\n\n # evaluate on validate set\n for k, subset in enumerate(args.valid_subset.split(',')):\n val_loss = validate(args, epoch, trainer, dataset, max_positions_valid, subset, num_gpus)\n if k == 0:\n if not args.no_save:\n # save checkpoint\n save_checkpoint(trainer, args, epoch, 0, val_loss)\n # only use first validation loss to update the learning schedule\n lr = trainer.lr_step(val_loss, epoch)\n\n epoch += 1\n batch_offset = 0\n train_meter.stop()\n print('| done training in {:.1f} seconds'.format(train_meter.sum))\n\n # Stop multiprocessing\n trainer.stop()\n\n\ndef get_perplexity(loss):\n try:\n return round(math.pow(2, loss), 2)\n except OverflowError:\n return float('inf')\n\n\ndef train(args, epoch, batch_offset, trainer, dataset, max_positions, num_gpus):\n \"\"\"Train the model for one epoch.\"\"\"\n\n seed = args.seed + epoch\n torch.manual_seed(seed)\n trainer.set_seed(seed)\n\n itr = dataset.train_dataloader(\n args.train_subset, num_workers=args.workers,\n max_tokens=args.max_tokens, max_sentences=args.max_sentences,\n max_positions=max_positions, seed=seed, epoch=epoch,\n sample_without_replacement=args.sample_without_replacement,\n sort_by_source_size=(epoch <= args.curriculum))\n loss_meter = AverageMeter()\n bsz_meter = AverageMeter() # sentences per batch\n wpb_meter = AverageMeter() # words per batch\n wps_meter = TimeMeter() # words per second\n clip_meter = AverageMeter() # % of updates clipped\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n\n lr = trainer.get_lr()\n with utils.build_progress_bar(args, itr, epoch) as t:\n for i, sample in data.skip_group_enumerator(t, num_gpus, batch_offset):\n loss_dict = trainer.train_step(sample)\n loss = loss_dict['loss']\n del loss_dict['loss'] # don't include in extra_meters or extra_postfix\n\n ntokens = sum(s['ntokens'] for s in sample)\n nsentences = sum(s['src_tokens'].size(0) for s in sample)\n loss_meter.update(loss, nsentences if args.sentence_avg else ntokens)\n bsz_meter.update(nsentences)\n wpb_meter.update(ntokens)\n wps_meter.update(ntokens)\n clip_meter.update(1 if loss_dict['gnorm'] > args.clip_norm else 0)\n\n extra_postfix = []\n for k, v in loss_dict.items():\n extra_meters[k].update(v)\n extra_postfix.append((k, extra_meters[k].avg))\n\n t.log(collections.OrderedDict([\n ('loss', loss_meter),\n ('wps', round(wps_meter.avg)),\n ('wpb', round(wpb_meter.avg)),\n ('bsz', round(bsz_meter.avg)),\n ('lr', lr),\n ('clip', '{:.0%}'.format(clip_meter.avg)),\n ] + extra_postfix))\n\n if i == 0:\n # ignore the first mini-batch in words-per-second calculation\n wps_meter.reset()\n if args.save_interval > 0 and (i + 1) % args.save_interval == 0:\n save_checkpoint(trainer, args, epoch, i + 1)\n\n t.print(collections.OrderedDict([\n ('train loss', round(loss_meter.avg, 2)),\n ('train ppl', get_perplexity(loss_meter.avg)),\n ('s/checkpoint', round(wps_meter.elapsed_time)),\n ('words/s', round(wps_meter.avg)),\n ('words/batch', round(wpb_meter.avg)),\n ('bsz', round(bsz_meter.avg)),\n ('lr', lr),\n ('clip', '{:3.0f}%'.format(clip_meter.avg * 100)),\n ] + [\n (k, meter.avg)\n for k, meter in extra_meters.items()\n ]))\n\n\ndef save_checkpoint(trainer, args, epoch, batch_offset, val_loss):\n extra_state = {\n 'epoch': epoch,\n 'batch_offset': batch_offset,\n 'val_loss': val_loss,\n }\n\n if batch_offset == 0:\n if not args.no_epoch_checkpoints:\n epoch_filename = os.path.join(args.save_dir, 'checkpoint{}.pt'.format(epoch))\n trainer.save_checkpoint(epoch_filename, extra_state)\n\n assert val_loss is not None\n if not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best:\n save_checkpoint.best = val_loss\n best_filename = os.path.join(args.save_dir, 'checkpoint_best.pt')\n trainer.save_checkpoint(best_filename, extra_state)\n\n last_filename = os.path.join(args.save_dir, 'checkpoint_last.pt')\n trainer.save_checkpoint(last_filename, extra_state)\n\n\ndef validate(args, epoch, trainer, dataset, max_positions, subset, ngpus):\n \"\"\"Evaluate the model on the validation set and return the average loss.\"\"\"\n\n itr = dataset.eval_dataloader(\n subset, max_tokens=args.max_tokens, max_sentences=args.max_sentences,\n max_positions=max_positions,\n skip_invalid_size_inputs_valid_test=args.skip_invalid_size_inputs_valid_test,\n descending=True, # largest batch first to warm the caching allocator\n )\n loss_meter = AverageMeter()\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n\n prefix = 'valid on \\'{}\\' subset'.format(subset)\n with utils.build_progress_bar(args, itr, epoch, prefix) as t:\n for _, sample in data.skip_group_enumerator(t, ngpus):\n loss_dict = trainer.valid_step(sample)\n loss = loss_dict['loss']\n del loss_dict['loss'] # don't include in extra_meters or extra_postfix\n\n ntokens = sum(s['ntokens'] for s in sample)\n loss_meter.update(loss, ntokens)\n\n extra_postfix = []\n for k, v in loss_dict.items():\n extra_meters[k].update(v)\n extra_postfix.append((k, extra_meters[k].avg))\n\n t.log(collections.OrderedDict([\n ('valid loss', round(loss_meter.avg, 2)),\n ] + extra_postfix))\n\n t.print(collections.OrderedDict([\n ('valid loss', round(loss_meter.avg, 2)),\n ('valid ppl', get_perplexity(loss_meter.avg)),\n ] + [\n (k, meter.avg)\n for k, meter in extra_meters.items()\n ]))\n\n # update and return the learning rate\n return loss_meter.avg\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"parlai/agents/fairseq/fairseq_py/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"593674330","text":"#!/usr/bin/env python\n\nfrom ProdCommon.SiteDB.CmsSiteMapper import CmsSEMap\n\ndata_service_prefix = 'root://xrootd.unl.edu/'\ncms_se = CmsSEMap()\n# Sites that can handle being read FROM\nglobal_data_svc_hosts = cms_se.match(\"T2_US_*,cmseos.fnal.gov,T1_US_*\")\n# Sites that can have jobs sent TO them\nglobal_data_svc_targets = cms_se.match(\"T2_US_*,T3_US_Omaha,T3_US_Vanderbilt_EC2\")\n\n# helper functions for the global data service\ndef modifyJobFilenames( job ):\n new_files = []\n for file in job[0].split(','):\n if file.find(\"://\") >= 0:\n new_files.append(file)\n else:\n new_files.append(data_service_prefix + file)\n job[0] = ','.join(new_files)\n\ndef modifyPossibleJobLocations( job ):\n # logic: If there is a copy of the block at any of the global_data_hosts\n # sites, then set the \"available sites\" to anything in USCMS.\n site_list = job['dlsDestination']\n site_set = set(site_list)\n site_set.intersection_update(global_data_svc_hosts)\n if site_set:\n job['dlsDestination'] = global_data_svc_targets\n\ndef modifyPossibleBlockLocations( unsorted_sites ):\n # logic: If there is a copy of the block at any of the global_data_svc\n # sites, then set the \"available sites\" to anything in USCMS. \n for block, site_list in unsorted_sites.items():\n site_set = set(site_list)\n site_set.intersection_update(global_data_svc_hosts)\n if site_set:\n unsorted_sites[block] = list(global_data_svc_targets)\n else:\n unsorted_sites[block] = []\n\n","sub_path":"python/GlobalDataService.py","file_name":"GlobalDataService.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"51898105","text":"import tensorflow as tf\nimport numpy as np\n\nfrom ares.attack.base import BatchAttack\nfrom ares.attack.utils import get_xs_ph, get_ys_ph, maybe_to_array, get_unit\n\n\nclass BIM(BatchAttack):\n ''' Basic Iterative Method (BIM). A white-box iterative constraint-based method. Require a differentiable loss\n function and a ``ares.model.Classifier`` model.\n\n - Supported distance metric: ``l_2``, ``l_inf``.\n - Supported goal: ``t``, ``tm``, ``ut``.\n - References: https://arxiv.org/abs/1607.02533.\n '''\n\n def __init__(self, model, batch_size, loss, goal, distance_metric, session, iteration_callback=None):\n ''' Initialize BIM.\n\n :param model: The model to attack. A ``ares.model.Classifier`` instance.\n :param batch_size: Batch size for the ``batch_attack()`` method.\n :param loss: The loss function to optimize. A ``ares.loss.Loss`` instance.\n :param goal: Adversarial goals. All supported values are ``'t'``, ``'tm'``, and ``'ut'``.\n :param distance_metric: Adversarial distance metric. All supported values are ``'l_2'`` and ``'l_inf'``.\n :param session: The ``tf.Session`` to run the attack in. The ``model`` should be loaded into this session.\n :param iteration_callback: A function accept a ``xs`` ``tf.Tensor`` (the original examples) and a ``xs_adv``\n ``tf.Tensor`` (the adversarial examples for ``xs``). During ``batch_attack()``, this callback function would\n be runned after each iteration, and its return value would be yielded back to the caller. By default,\n ``iteration_callback`` is ``None``.\n '''\n self.model, self.batch_size, self._session = model, batch_size, session\n self.loss, self.goal, self.distance_metric = loss, goal, distance_metric\n # placeholder for batch_attack's input\n self.xs_ph = get_xs_ph(model, batch_size)\n self.ys_ph = get_ys_ph(model, batch_size)\n # flatten shape of xs_ph\n xs_flatten_shape = (batch_size, np.prod(self.model.x_shape))\n # store xs and ys in variables to reduce memory copy between tensorflow and python\n # variable for the original example with shape of (batch_size, D)\n self.xs_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))\n # variable for labels\n self.ys_var = tf.Variable(tf.zeros(shape=(batch_size,), dtype=self.model.y_dtype))\n # variable for the (hopefully) adversarial example with shape of (batch_size, D)\n self.xs_adv_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))\n # magnitude\n self.eps_ph = tf.placeholder(self.model.x_dtype, (self.batch_size,))\n self.eps_var = tf.Variable(tf.zeros((self.batch_size,), dtype=self.model.x_dtype))\n # step size\n self.alpha_ph = tf.placeholder(self.model.x_dtype, (self.batch_size,))\n self.alpha_var = tf.Variable(tf.zeros((self.batch_size,), dtype=self.model.x_dtype))\n # expand dim for easier broadcast operations\n eps = tf.expand_dims(self.eps_var, 1)\n alpha = tf.expand_dims(self.alpha_var, 1)\n # calculate loss' gradient with relate to the adversarial example\n # grad.shape == (batch_size, D)\n self.xs_adv_model = tf.reshape(self.xs_adv_var, (batch_size, *self.model.x_shape))\n self.loss = loss(self.xs_adv_model, self.ys_var)\n grad = tf.gradients(self.loss, self.xs_adv_var)[0]\n if goal == 't' or goal == 'tm':\n grad = -grad\n elif goal != 'ut':\n raise NotImplementedError\n # update the adversarial example\n if distance_metric == 'l_2':\n grad_unit = get_unit(grad)\n xs_adv_delta = self.xs_adv_var - self.xs_var + alpha * grad_unit\n # clip by max l_2 magnitude of adversarial noise\n xs_adv_next = self.xs_var + tf.clip_by_norm(xs_adv_delta, eps, axes=[1])\n elif distance_metric == 'l_inf':\n xs_lo, xs_hi = self.xs_var - eps, self.xs_var + eps\n grad_sign = tf.sign(grad)\n # clip by max l_inf magnitude of adversarial noise\n xs_adv_next = tf.clip_by_value(self.xs_adv_var + alpha * grad_sign, xs_lo, xs_hi)\n else:\n raise NotImplementedError\n # clip by (x_min, x_max)\n xs_adv_next = tf.clip_by_value(xs_adv_next, self.model.x_min, self.model.x_max)\n\n self.update_xs_adv_step = self.xs_adv_var.assign(xs_adv_next)\n self.config_eps_step = self.eps_var.assign(self.eps_ph)\n self.config_alpha_step = self.alpha_var.assign(self.alpha_ph)\n self.setup_xs = [self.xs_var.assign(tf.reshape(self.xs_ph, xs_flatten_shape)),\n self.xs_adv_var.assign(tf.reshape(self.xs_ph, xs_flatten_shape))]\n self.setup_ys = self.ys_var.assign(self.ys_ph)\n self.iteration = None\n\n self.iteration_callback = None\n if iteration_callback is not None:\n xs_model = tf.reshape(self.xs_var, (self.batch_size, *self.model.x_shape))\n self.iteration_callback = iteration_callback(xs_model, self.xs_adv_model)\n\n def config(self, **kwargs):\n ''' (Re)config the attack.\n\n :param magnitude: Max distortion, could be either a float number or a numpy float number array with shape of\n (batch_size,).\n :param alpha: Step size for each iteration, could be either a float number or a numpy float number array with\n shape of (batch_size,).\n :param iteration: Iteration count. An integer.\n '''\n if 'magnitude' in kwargs:\n eps = maybe_to_array(kwargs['magnitude'], self.batch_size)\n self._session.run(self.config_eps_step, feed_dict={self.eps_ph: eps})\n if 'alpha' in kwargs:\n alpha = maybe_to_array(kwargs['alpha'], self.batch_size)\n self._session.run(self.config_alpha_step, feed_dict={self.alpha_ph: alpha})\n if 'iteration' in kwargs:\n self.iteration = kwargs['iteration']\n\n def _batch_attack_generator(self, xs, ys, ys_target):\n ''' Attack a batch of examples. It is a generator which yields back ``iteration_callback()``'s return value\n after each iteration if the ``iteration_callback`` is not ``None``, and returns the adversarial examples.\n '''\n labels = ys if self.goal == 'ut' else ys_target\n self._session.run(self.setup_xs, feed_dict={self.xs_ph: xs})\n self._session.run(self.setup_ys, feed_dict={self.ys_ph: labels})\n for _ in range(self.iteration):\n self._session.run(self.update_xs_adv_step)\n if self.iteration_callback is not None:\n yield self._session.run(self.iteration_callback)\n return self._session.run(self.xs_adv_model)\n\n def batch_attack(self, xs, ys=None, ys_target=None):\n ''' Attack a batch of examples.\n\n :return: When the ``iteration_callback`` is ``None``, return the generated adversarial examples. When the\n ``iteration_callback`` is not ``None``, return a generator, which yields back the callback's return value\n after each iteration and returns the generated adversarial examples.\n '''\n g = self._batch_attack_generator(xs, ys, ys_target)\n if self.iteration_callback is None:\n try:\n next(g)\n except StopIteration as exp:\n return exp.value\n else:\n return g\n","sub_path":"ares/attack/bim.py","file_name":"bim.py","file_ext":"py","file_size_in_byte":7451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"614908806","text":"from django.shortcuts import render, get_object_or_404, reverse, redirect\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.template import loader\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django import forms\n\nfrom .models import Post,Reply\n\ndef index(request):\n template = loader.get_template('status/index.html')\n if request.user.is_authenticated:\n user = request.user\n else:\n user = None\n context = {\n 'user': user\n }\n print(request.user)\n return HttpResponse(template.render(context, request))\n\ndef send(request, user):\n template = loader.get_template('status/send.html')\n context = {\n 'user' : user\n }\n return HttpResponse(template.render(context, request))\n\ndef verify(request):\n try:\n title = request.POST['title']\n body = request.POST['body']\n username = request.POST['user']\n date = timezone.now()\n try:\n user = User.objects.get(username=username)\n except(KeyError, User.DoesNotExist) :\n return HttpResponse(\"User does not exist!\")\n except :\n return HttpResponse(\"Something went wrong...\")\n else :\n post = Post(title=title, body=body, user=user, pub_date=date)\n post.save()\n return HttpResponseRedirect(reverse('status:index'))\n\ndef user(request, username):\n try:\n user = User.objects.get(username=username)\n except(KeyError, User.DoesNotExist):\n return HttpResponse(\"The user does not exist!\")\n else :\n template = loader.get_template('status/user.html')\n context = {\n 'user' : user\n }\n return HttpResponse(template.render(context, request))\n\ndef details(request, no):\n try:\n post = Post.objects.get(pk=no)\n except(KeyError, Post.DoesNotExist):\n return HttpResponse(\"The Post does not exist!\")\n else:\n template = loader.get_template('status/details.html')\n context = {\n 'post': post,\n 'user': request.user\n }\n return HttpResponse(template.render(context, request))\n\ndef replyto(request, no, username):\n try:\n post = Post.objects.get(pk=no)\n user = User.objects.get(username=username)\n except(KeyError, Post.DoesNotExist, User.DoesNotExist):\n return HttpResponse(\"The Post or User does not exist!\")\n else:\n template = loader.get_template('status/replyto.html')\n context = {\n 'post': post,\n 'user': user\n }\n return HttpResponse(template.render(context, request))\n\ndef verifyr(request):\n try:\n temp = request.POST['reply']\n username = request.POST['username']\n postid = request.POST['postid']\n try:\n user = User.objects.get(username=username)\n post = Post.objects.get(pk=postid)\n except(KeyError, User.DoesNotExist, Post.DoesNotExist) :\n return HttpResponse(\"User or post does not exist!\")\n except :\n return HttpResponse(\"Something went wrong...\")\n else :\n reply = Reply(user=user,text=temp,post=post)\n reply.save()\n return redirect('status:details', no=postid)\n","sub_path":"status/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"506097087","text":"import tensorflow as tf\nfrom util.cnn import CNN\nfrom tqdm import tqdm\nfrom util.nn import *\n\nclass LabelNet(CNN):\n def build_loss(self):\n #build self.loss and self.opt_op\n #use: self.conv_feats \n self.clsgt = tf.placeholder(tf.int32,[self.batch_size])\n feashape = self.conv_feats.get_shape().as_list()\n #feadim = feashape[-1]*feashape[-2]*feashape[-3]\n conv_feats_reshape = tf.reshape(self.conv_feats,[feashape[0],-1])\n #self.logits = fully_connected(conv_feats_reshape,46,\"cls_logits\",init_w='normal', stddev=0.01)\n self.feat_embbed = fully_connected(conv_feats_reshape,4096,\"feat_logits\",init_w='xavier', stddev=0.01)\n #self.feat_embbed = tf.nn.dropout(self.feat_embbed, 0.5) if self.mode==\"train\" else self.feat_embbed\n self.logits = fully_connected(self.feat_embbed,46,\"cls_logits\",init_w='xavier', stddev=0.01)\n\n self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = self.clsgt, logits = self.logits)\n self.loss = tf.reduce_mean(self.loss)\n self.opt_op = tf.train.AdamOptimizer(self.params.lr).minimize(self.loss,global_step=self.global_step)\n #correct_prediction = tf.equal(tf.argmax(logits,1), self.clsgt)\n #self.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n def set_dataset(self,traind,testd):\n self.train_dataset = traind\n self.test_dataset = testd\n def test(self, sess):\n correct_prediction = tf.equal(tf.cast(tf.argmax(self.logits,1),tf.int32), self.clsgt)\n self.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n test_dataset = self.test_dataset\n total_acc = 0\n for idx in tqdm(list(range(test_dataset.num_batches)), desc='test'): \n batch = test_dataset.next_batch_for_all() \n img_files, clabels, ctypes = batch\n [acc] = sess.run([self.acc], feed_dict={self.img_files:img_files, self.is_train:False, self.clsgt:clabels}) \n print(\"accuracy=%f \" %(acc)) \n total_acc += acc\n total_acc /= test_dataset.num_batches\n print(\"total accuracy: %f\" %(total_acc))\n test_dataset.reset()\n def train(self, sess):\n print(\"Training the model...\")\n params = self.params\n train_dataset = self.train_dataset\n for epoch_no in tqdm(list(range(params.num_epochs)), desc='epoch'): \n for idx in tqdm(list(range(train_dataset.num_batches)), desc='batch'): \n batch = train_dataset.next_batch_for_all() \n img_files, clabels, ctypes = batch\n #feats = sess.run(self.conv_feats, feed_dict={self.img_files:img_files, self.is_train:False})\n #feed_dict = self.get_feed_dict_for_all(batch, is_train=True, feats=feats) \n _, loss, global_step = sess.run([self.opt_op, self.loss, self.global_step], feed_dict={self.img_files:img_files, self.is_train:True, self.clsgt:clabels}) \n print(\" loss=%f \" %(loss)) \n\n #if (idx+1) % params.test_period == 0:\n #\tself.test(sess)\n if (idx+1) % params.save_period == 0:\n self.save(sess)\n train_dataset.reset()\n\n print(\"Model trained.\")\n def eval_embbed(self, sess, imf):\n return sess.run([self.feat_embbed], feed_dict={self.is_train:False, self.img_files:imf})","sub_path":"LabelNet.py","file_name":"LabelNet.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"345833680","text":"from matplotlib import pyplot as plt\nimport numpy as np\nfrom matplotlib_venn import *\nimport pandas as pd\n\nT = pd.Series(['Alcohol','Parent Child','Sidekick','Subsiduary','Sex'])\nA = pd.Series([1,1,0,1,1])\nR = pd.Series([1,1,1,1,0])\nB = pd.Series([1,1,1,0,1])\n\ndf = pd.DataFrame({'theme':T,'Archer':A,'Rick':R,'Bojack':B})\n\nA = set(df.theme[df.Archer==1]) #Archer\nB = set(df.theme[df.Rick==1]) #Rick\nC = set(df.theme[df.Bojack==1]) #BoJack\n\nv = venn3_unweighted([A,B,C], ('Archer', 'R&M', 'BoJack'))\n\nv.get_label_by_id('110').set_text('\\n'.join(A&B-C))\nv.get_label_by_id('011').set_text('\\n'.join(B&C-A))\nv.get_label_by_id('101').set_text('\\n'.join(A&C-B))\nv.get_label_by_id('111').set_text('\\n'.join(A&B&C))\n","sub_path":"pop_culture_venn.py","file_name":"pop_culture_venn.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"621192736","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 8 19:32:52 2019\n\n@author: jasonjsyuan\n\"\"\"\n\nimport pandas as pd\nimport sqlite3\nfrom datetime import datetime\n\nconn=sqlite3.connect('wacai365.so')\n\ndf=pd.read_sql_query('select uuid,name from TBL_ACCOUNTINFO',conn)\naccounts={}\nfor _, row in df.iterrows():\n accounts[row['uuid']]=row['name']\n \ndf=pd.read_sql_query('select uuid,name from TBL_OUTGOMAINTYPEINFO',conn)\noutgomaintype={}\nfor _,row in df.iterrows():\n outgomaintype[row['uuid']]=row['name']\n \ndf=pd.read_sql_query('select uuid,name,parentUuid from TBL_OUTGOSUBTYPEINFO',conn)\noutgosubtype={}\noutgosubtomain={}\nfor _,row in df.iterrows():\n outgosubtype[row['uuid']]=row['name']\n outgosubtomain[row['uuid']]=row['parentUuid']\n \ndf=pd.read_sql_query('select uuid,name from TBL_INCOMEMAINTYPEINFO',conn)\nincomemaintype={}\nfor _,row in df.iterrows():\n incomemaintype[row['uuid']]=row['name']\n \ndf=pd.read_sql_query('select uuid,name from TBL_BOOK',conn)\nbooks={}\nfor _,row in df.iterrows():\n books[row['uuid']]=row['name']\n \ndf=pd.read_sql_query('select * from TBL_TRADEINFO where date>0 order by date',conn)\n\ndd_1=[]\ndd_2=[]\ndd_3=[]\n\nfor _,row in df.iterrows():\n try:\n if row['isdelete']==1:\n continue\n \n book=books[row['bookUuid']]\n account=accounts[row['accountUuid']]\n pos=account.find('-')\n fee_type='人民币'\n if pos!=-1:\n fee_type=account[pos+1:]\n account=account[:pos]\n dd=datetime.fromtimestamp(row['date']).strftime('%Y-%m-%d %H:%M:%S')\n #print(book,account)\n\n tradetype=row['tradetype']\n if tradetype==1:\n # outcome\n maintyp=outgomaintype[outgosubtomain[row['typeUuid']]]\n subtyp=outgosubtype[row['typeUuid']]\n dd_1.append((maintyp, subtyp, account, fee_type, '日常', '', \n '非报销', dd, '%.2f'%(float(row['money'])/100),\n '', row['comment'] or '', book))\n elif tradetype==2:\n # income\n typ=incomemaintype[row['typeUuid']]\n dd_2.append((typ, account, fee_type, '日常', '', \n dd, '%.2f'%(float(row['money'])/100),\n '', row['comment'] or '', book))\n elif tradetype==3:\n account2=accounts[row['accountUuid2']]\n pos=account2.find('-')\n fee_type2='人民币'\n if pos!=-1:\n fee_type2=account2[pos+1:]\n account2=account2[:pos]\n dd_3.append((account, fee_type, '%.2f'%(float(row['money'])/100),\n account2, fee_type2, '%.2f'%(float(row['money2'])/100), dd, row['comment'] or '', book))\n else:\n print(row)\n except Exception as e:\n print(\"exception: \" + str(e))\n continue\n \n\n\ndf_1=pd.DataFrame(dd_1,columns=['支出大类','支出小类','账户','币种','项目','商家','报销','消费日期','消费金额','成员金额','备注','账本'])\ndf_2=pd.DataFrame(dd_2,columns=['收入大类','账户','币种','项目','付款方','收入日期','收入金额','成员金额','备注','账本'])\ndf_3=pd.DataFrame(dd_3,columns=['转出账户','币种','转出金额','转入账户','币种','转入金额','转账时间','备注','账本'])\n\nwriter = pd.ExcelWriter('out.xls')\ndf_1.to_excel(writer,sheet_name='支出',index=False)\ndf_2.to_excel(writer,sheet_name='收入',index=False)\ndf_3.to_excel(writer,sheet_name='转账',index=False)\nwriter.save()","sub_path":"wacai.py","file_name":"wacai.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"49137660","text":"import os\nimport shutil\nimport inject\nimport json\n\nfrom cloudshell.networking.sdn.configuration.cloudshell_controller_configuration import CONTROLLER_HANDLER\nfrom cloudshell.networking.sdn.resolution.topology_resolution import SDNTopologyResolution\n\n\nclass InstallStaticFlows(object):\n def __init__(self, controller_handler=None, logger=None):\n\n self._controller = controller_handler\n self._logger = logger\n self.route_resolution = SDNTopologyResolution(self.controller, self._logger)\n\n @property\n def logger(self):\n if self._logger is None:\n self._logger = inject.instance('logger')\n return self._logger\n\n @property\n def controller(self):\n if self._controller is None:\n self._controller = inject.instance(CONTROLLER_HANDLER)\n\n return self._controller\n\n def initialize_folder(self):\n working_dir = os.path.dirname(os.path.abspath(__file__))\n if (os.path.isdir(working_dir + \"/installed_flows\")):\n\n shutil.rmtree(working_dir + \"/installed_flows\")\n os.makedirs(working_dir + \"/installed_flows\")\n else:\n os.makedirs(working_dir + \"/installed_flows\")\n\n def build_flow(self, nodeid, flowname, ethertype='', destip='', srcip='', ipcos='', ipprot='',\n dst_port=None, outdstmac=None, vlan='', src_port=None, actions_list=list(), priority=500):\n newflow = dict()\n\n newflow['name'] = flowname\n newflow['installInHw'] = 'true'\n newflow['node'] = {u'id': nodeid, u'type': u'OF'}\n if (destip != ''): newflow['nwDst'] = destip\n if (srcip != ''): newflow['nwSrc'] = srcip\n if (ethertype != ''): newflow['etherType'] = ethertype\n if (ipcos != ''): newflow['tosBits'] = ipcos\n if (ipprot != ''): newflow['protocol'] = ipprot\n if (vlan != ''): newflow['vlanId'] = vlan\n if (src_port): newflow['ingressPort'] = src_port\n newflow['priority'] = priority\n node = dict()\n node['id'] = nodeid\n node['type'] = 'OF'\n newflow['node'] = node\n if (dst_port): actions_list.append('OUTPUT=%s' % str(dst_port))\n # if (outdstmac): actions_list.append('SET_DL_DST=%s'%str(outdstmac))\n newflow['actions'] = actions_list\n\n return newflow\n\n def static_flow_pusher(self, flow_name, switch_id, port):\n\n self.logger.info('*' * 10)\n self.logger.info('Start Pushing Static Flows')\n\n new_flow = self.build_flow(nodeid=switch_id, flowname=flow_name, src_port=port, ethertype=\"0x800\",\n outdstmac='', actions_list=[\"CONTROLLER\"], priority=650)\n self.logger.info('{0},\\t\\t{1},\\t\\t{2}'.format(switch_id, flow_name,\n port))\n\n response = self.controller.push_static_flow(switch_id, flow_name, new_flow)\n self.save_installed_flow_into_file(switch_id, port)\n route, dst_switch, dst_port = self.return_path_if_path_exists(switch_id)\n if (len(route) > 0):\n self.send_route_to_ctrl(switch_id, port, dst_switch, dst_port, route)\n if (dst_switch != ''):\n switch_id = dst_switch\n port = dst_port\n route, dst_switch, dst_port = self.return_path_if_path_exists(switch_id)\n if (len(route) > 0):\n self.send_route_to_ctrl(switch_id, port, dst_switch, dst_port, route)\n return response\n\n def remove_static_files_folder(self):\n working_dir = os.path.dirname(os.path.abspath(__file__))\n installed_flows_folder = working_dir + \"/installed_flows\"\n if os.path.isdir(installed_flows_folder):\n shutil.rmtree(installed_flows_folder)\n\n def delete_static_flow(self, flow_name, switch_id, port):\n self.logger.info(\"Deleting flow {} for {}p{}...\".format(flow_name, switch_id, port))\n self.controller.delete_flow(src_switch=switch_id, flow_name=flow_name)\n self.controller.delete_route(src_switch=switch_id, src_switch_port=port)\n self.logger.info(\"Flow {} for {}p{} was successfully deleted\".format(flow_name, switch_id, port))\n\n def save_installed_flow_into_file(self, switch_id, port):\n\n working_dir = os.path.dirname(os.path.abspath(__file__))\n\n if not (os.path.isdir(working_dir + \"/installed_flows\")):\n os.makedirs(working_dir + \"/installed_flows\")\n\n filename = working_dir + \"/installed_flows/flows.txt\"\n # if not os.path.exists(filename):\n f = file(filename, \"a+\")\n f.write(\"%s,%s\" % (switch_id, port) + \"\\n\")\n f.close()\n\n def return_path_if_path_exists(self, switch_id):\n\n working_dir = os.path.dirname(os.path.abspath(__file__))\n filename = working_dir + \"/installed_flows/flows.txt\"\n lines = open(filename, 'r').readlines()\n\n for line in lines:\n splittedline = line.split(\",\")\n dst_switch = splittedline[0]\n dst_port = splittedline[1].strip(\"\\n\")\n if (dst_switch != switch_id):\n route = self.route_resolution.get_routing_path_between_two_endpoints(switch_id, dst_switch)\n if (len(route) > 0):\n return route, dst_switch, dst_port\n return [], '', ''\n\n def send_route_to_ctrl(self, src_switch, src_switch_port, dst_switch, dst_switch_port, route):\n route_with_ports = self.route_resolution.compute_the_route_with_ports(src_switch, src_switch_port, dst_switch, \\\n dst_switch_port, route)\n data_dict = dict()\n data_dict[\"route\"] = {}\n for indx, switch in enumerate(route_with_ports):\n switchid = switch.split(\":\")[-1]\n switchid = int(switchid, 16)\n data_dict[\"route\"].update({\"switch\" + str(indx): switchid})\n data_dict[\"route\"].update({\"port\" + str(indx): '%s-%s' % (\n route_with_ports[switch][\"in_port\"], route_with_ports[switch][\"out_port\"])})\n\n self.controller.send_route_to_ctrl(src_switch=src_switch,\n src_switch_port=src_switch_port,\n data=json.dumps(data_dict))\n","sub_path":"cloudshell/networking/sdn/static_flows/static_flows_configuration.py","file_name":"static_flows_configuration.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"37027961","text":"'''\n给定两个非空链表来表示两个非负整数。位数按照逆序方式存储,它们的每个节点只存储单个数字。将两数相加返回一个新的链表。\n你可以假设除了数字 0 之外,这两个数字都不会以零开头。\n\n示例:\n\n输入:(2 -> 4 -> 3) + (5 -> 6 -> 4)\n输出:7 -> 0 -> 8\n原因:342 + 465 = 807\n'''\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n# 打印链表\ndef printListNode(l):\n strList = ''\n while l != None:\n strList = strList + str(l.val)\n l = l.next\n # strList = strList[::-1]\n print(strList)\n\ndef addTwoNumbers(l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n carry = 0\n root = n = ListNode(0)\n while l1 or l2 or carry:\n v1 = v2 = 0\n if l1:\n v1 = l1.val\n l1 = l1.next\n if l2:\n v2 = l2.val\n l2 = l2.next\n carry,val = divmod(v1+v2+carry,10)\n n.next = ListNode(val)\n n = n.next\n return root.next\n\n\n# 初始化第一个链表\ncurrNode1 = ListNode1 = ListNode(2)\ncurrNode1.next = ListNode(4)\ncurrNode1 = currNode1.next\ncurrNode1.next = ListNode(3)\n\n# 初始化第二个链表\ncurrNode2 = ListNode2 = ListNode(5)\ncurrNode2.next = ListNode(6)\ncurrNode2 = currNode2.next\ncurrNode2.next = ListNode(4)\n\nprintListNode(addTwoNumbers(ListNode1,ListNode2))\n\n\n\n\n\n\n","sub_path":"腾讯2018秋��精选/2.AddTwoNumbers.py","file_name":"2.AddTwoNumbers.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"416302427","text":"import os\n\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\n# from flask_pymongo import PyMongo\nimport pymongo\n\napp = Flask(__name__)\nuri = os.environ.get('MONGOLAB_URI')\n# app.config[\"MONGO_URI\"] = uri\n# mongo = PyMongo(app)\nmongo = pymongo.MongoClient(uri)\ndb = mongo.get_default_database()\nmy_rasps = db['rasps']\n\n# print(db.collection_names())\n\n\n@app.route('/')\ndef hello():\n return \"Raspberry pi List of working ips\"\n\n\n@app.route('/index')\ndef index():\n raspberries = my_rasps.find()\n return render_template('index.html', title='Home', raspberries=raspberries)\n\n\n@app.route('/saveip')\ndef saveip():\n name = request.args.get('name')\n ip = request.args.get('ip')\n mydict = {'name': name, 'ip': ip}\n my_rasps.insert_one(mydict)\n return \"ok\"\n\n\n@app.route('/clearlist')\ndef clear_all_rasp():\n my_rasps.delete_many({})\n return \"The list is cleared\"\n\n\nif __name__ == '__main__':\n app.run(debug=True, use_reloader=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"117569008","text":"from django.urls import path, re_path\nfrom . import views,views_cbv\n\napp_name = 'dojo'\nurlpatterns = [\n re_path(r'^sum/(?P[\\d/]+)/$', views.mysum, name='mysum'),\n re_path(r'^hello/(?P[ㄱ-힣]+)/(?P\\d+)/$', views.hello, name='hello'),\n path('list1/', views.post_list1, name='post_list1'),\n path('list2/', views.post_list2, name='post_list2'),\n path('list3/', views.post_list3, name='post_list3'),\n path('excel/', views.excel_download, name='excel_download'),\n\n path('cbv/list1/', views_cbv.post_list1, name='post_list1'),\n path('cbv/list2/', views_cbv.post_list2, name='post_list2'),\n path('cbv/list3/', views_cbv.post_list3, name='post_list3'),\n path('cbv/excel/', views_cbv.excel_download, name='excel_download'),\n\n path('new/', views.post_new, name='post_new'),\n path('/edit/', views.post_edit, name='post_edit'),\n path('/', views.post_detail, name='post_detail'),\n\n]\n","sub_path":"dojo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"409620705","text":"def makeFIMOScript(fastaFileName, fastaOutPath, GCContentFileNamePrefix, outputFileName, FIMOOutputFileNamePrefix, PWMFileName):\n\t# Separate fastas into their own file and make a script that will run FIMO on each fasta file separately\n\tfastaFile = open(fastaFileName)\n\tfastaFileNameParts = fastaFileName.split(\"/\")\n\tfastaFileNameEnd = fastaFileNameParts[len(fastaFileNameParts) - 1]\n\toutputFile = open(outputFileName, 'w+')\n\tfastaLineCount = 0\n\tfastaOutFileName = \"\"\n\tfastaOutFile = \"\"\n\tfor line in fastaFile:\n\t\t# Iterate through the lines of the fasta file, write them to the appropriate output file, and add to the script\n\t\t# ASSUMES THAT THE FIRST LINE STARTS WITH >\n\t\tif line[0] == \">\":\n\t\t\tif fastaLineCount > 0:\n\t\t\t\t# Close the last fasta file\n\t\t\t\tfastaOutFile.close()\n\t\t\tfastaLineCount = fastaLineCount + 1\n\t\t\tfastaOutFileName = fastaFileName + str(fastaLineCount)\n\t\t\tGCContentFileName = GCContentFileNamePrefix + str(fastaLineCount)\n\t\t\tFIMOOutputFileName = FIMOOutputFileNamePrefix + str(fastaLineCount)\n\t\t\tfastaOutFile = open(fastaOutFileName, 'w+')\n\t\t\tfastaOutFileNameScript = fastaOutPath + fastaFileNameEnd + str(fastaLineCount)\n\t\t\toutputFile.write(\"fimo --bgfile \" + GCContentFileName + \" --max-stored-scores 10000000 --o \" + FIMOOutputFileName + \" --qv-thresh --thresh 0.05 \" + PWMFileName + \" \" + fastaOutFileNameScript + \"\\n\") # Modified for fraser-server\n\t\tfastaOutFile.write(line)\n\tfastaOutFile.close()\n\nif __name__==\"__main__\":\n import sys\n fastaFileName = sys.argv[1]\n fastaOutPath = sys.argv[2]\n GCContentFileNamePrefix = sys.argv[3]\n outputFileName = sys.argv[4]\n FIMOOutputFileNamePrefix = sys.argv[5]\n PWMFileName = sys.argv[6]\n makeFIMOScript(fastaFileName, fastaOutPath, GCContentFileNamePrefix, outputFileName, FIMOOutputFileNamePrefix, PWMFileName)\n","sub_path":"makeFIMOScript.py","file_name":"makeFIMOScript.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"172327644","text":"import wx\nfrom .BaseModule import *\n\nclass ExampleModule(BaseModule):\n def __init__(self, parent):\n super().__init__(parent)\n self.layout = {\n \"testA\" : [wx.Button(self, wx.ID_ANY, 'A'), 0, wx.SHAPED | wx.ALL, 3],\n \"testB\" : [wx.Button(self, wx.ID_ANY, 'B'), 1, wx.GROW | wx.ALL, 3],\n \"testC\" : [wx.Button(self, wx.ID_ANY, 'C'), 2, wx.GROW | wx.ALL, 3],\n }\n # Key : [Widget, proportion, flag, border]\n\n self.bind = [\n [\"testA\", wx.EVT_BUTTON, self.ExampleEventA]\n ]\n # Key, Event, Function\n\n self.SelectSizer(wx.BoxSizer, [wx.VERTICAL])\n self.SetItem()\n\n self.SetValue(\"Click_num\", 0)\n \n def ExampleEventA(self, event):\n v = self.GetValue(\"Click_num\")\n print(v)\n self.SetValue(\"Click_num\", v+1)\n","sub_path":"Module/ExampleModule.py","file_name":"ExampleModule.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"4449305","text":"\n\n# Get SM ms_metadata object\nfrom lib.CMHandler import CMHandler\nfrom lib.SMHandler import SMHandler\nfrom lib.dto.StatusResponse import StatusResponse\n\nfrom flask import render_template\n\n\ndef session_manager_handler(data_dir, ms_metadata_url,\n httpsig_private_key, cm_cache_lifetime,\n httpsig_send_retries):\n cm = CMHandler(data_dir,\n ms_source_url=ms_metadata_url,\n key=httpsig_private_key,\n lifetime=cm_cache_lifetime)\n sm = cm.get_microservice_by_api('SM')\n smh = SMHandler(sm,\n key=httpsig_private_key,\n retries=httpsig_send_retries,\n validate=False)\n return smh\n\n\ndef redirect_return(sm_handler, url, status, origin, destination, message=\"\"):\n\n result = StatusResponse()\n result.primaryCode = status\n result.secondaryCode = \"\"\n result.message = \"\"\n if message != \"\":\n result.message = message\n\n token = sm_handler.generateToken(origin, destination, result.json_marshall())\n print(\"Generated msToken: \" + token + \" \\n\")\n\n template = {\n 'url': url,\n 'token': token,\n }\n\n return render_template('msToken.html', template=template)\n","sub_path":"lib/Microservice.py","file_name":"Microservice.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"9027901","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\nimport torch\nimport numpy as np\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd.variable as V\nimport torchvision.models as models\nimport time\nimport os\nimport copy\nfrom torch.optim import lr_scheduler\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n#torch.cuda.set_device(1)\n# Other settings\nnum_epochs = 10\nbatch_size = 50\n\n\n# In[2]:\n\n\n# Load Data\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntrainset = torchvision.datasets.CIFAR100(root='./data', train=True,\n download=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR100(root='./data', train=False,\n download=True, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n shuffle=False, num_workers=2)\n\nprint('Load data successfully!')\n\n\n# In[3]:\n\n\n# Load pre-trained model\nmodel = models.resnet18(pretrained=True)\nmodel.to(device)\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(),lr=0.0001)\n\n\n# In[4]:\n\n\n## Test\ndef TestMyModel(model,testloader):\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n images, labels = data\n images = nn.functional.interpolate(images, size=224)\n images,labels = images.to(device),labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\n\n\n\n# In[5]:\n\n\n# Train Model\n## Training\nfor epoch in range(50): # loop over the dataset multiple times\n if epoch % 5 == 0:\n TestMyModel(model,testloader)\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs\n inputs, labels = data\n inputs = nn.functional.interpolate(inputs, size=224)\n inputs,labels = inputs.to(device),labels.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 100 == 1: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n\nprint('Finished Training')\n\n\n# In[ ]:\n\n\n# Get 75% accuracy after 4 epochs\n\n","sub_path":"resnet_fromSratch_pytorch/TansferLearning_75.py","file_name":"TansferLearning_75.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"268807412","text":"from django.urls import reverse\nfrom django.shortcuts import render,redirect\nfrom django.http import JsonResponse , HttpResponse , HttpResponseRedirect\nimport json\nimport datetime\nfrom .models import * \nfrom .utils import cookieCart, cartData, guestOrder, token_generator\nfrom .forms import CreateUserForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate , login, logout\nfrom .decorators import emptycart\nfrom django.contrib.auth.models import User\n\n# for user activation\nfrom django.views import View\nfrom django.core.mail import EmailMessage\nfrom django.utils.encoding import force_bytes,force_text,DjangoUnicodeDecodeError\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.contrib.sites.shortcuts import get_current_site\nimport threading\n\n\nclass EmailThread(threading.Thread):\n\tdef __init__(self, email):\n\t\tself.email = email\n\t\tthreading.Thread.__init__(self)\n\n\tdef run(self):\n\t\tself.email.send()\n\n\n\ndef registerPage(request):\n\n\tdata = cartData(request)\n\tcartItems = data['cartItems']\n\n\tform = CreateUserForm()\n\t\t\n\tif request.user.is_authenticated:\n\t\treturn redirect(\"store\")\n\telse:\n\t\tif request.method == 'POST':\n\t\t\tform = CreateUserForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\temail_id = form.cleaned_data.get(\"email\")\n\t\t\t\tuser=form.save()\n\t\t\t\tuser.is_active = False\n\t\t\t\tuser.save()\n\n\t\t\t\tuidb64 = urlsafe_base64_encode(force_bytes(user.pk))\n\t\t\t\tdomain = get_current_site(request).domain\n\t\t\t\tlink = reverse('activate',kwargs={'uidb64':uidb64,'token':token_generator.make_token(user)})\n\t\t\t\tactivate_url = 'https://'+domain+link\n\n\t\t\t\temail_subject = 'Activate Your Account'\n\t\t\t\temail_body = 'Hi '+user.username+', Please use the following link to verify your account for BuyKaro\\n'+activate_url\n\n\t\t\t\temail = EmailMessage(\n\t\t\t email_subject,\n\t\t\t email_body,\n\t\t\t 'noreply@buykaro.com',\n\t\t\t [email_id],\n\t\t\t\t)\n\n\t\t\t\t# email.send(fail_silently=False) \n\n\t\t\t\tEmailThread(email).start()\n\t\t\t\t\n\t\t\t\tuser1 = form.cleaned_data.get('username')\n\t\t\t\tmessages.success(request,\"Account created successfully for \"+user1+\".\\\n\t\t\t\t A verification mail has been sent\\n\"+\"to your entered email address. Please verify your email address to login. \")\n\t\t\t\treturn redirect(\"login\")\n\n\t\tcontext = {'form':form,'cartItems':cartItems}\n\t\treturn render(request,'store/register.html',context)\n\nclass VerificationView(View):\n\tdef get(self,request,uidb64,token):\n\t\tid = force_text(urlsafe_base64_decode(uidb64))\n\t\tuser = User.objects.get(pk=id)\n\n\t\tif user.is_active:\n\t\t\tmessages.info(request,'Account already Activated')\n\t\t\treturn redirect(\"login\")\n\t\telse:\n\t\t\tuser.is_active = True\n\t\t\tuser.save()\n\t\t\tmessages.success(request,'Account has been successfully Activated')\n\t\t\treturn redirect(\"login\")\n\t\t\n\t\t# except Exception as ex:\n\t\t# \tpass\n\n\t\t# return redirect(\"login\")\n\ndef loginPage(request):\n\tdata = cartData(request)\n\n\tcartItems = data['cartItems']\n\n\tif request.user.is_authenticated:\n\t\treturn redirect(\"store\")\n\telse:\n\t\tif request.method == 'POST':\n\t\t\tusername = request.POST.get('username')\n\t\t\tpassword = request.POST.get('password')\n\n\t\t\tuser = authenticate(request,username=username,password=password)\n\t\t\t\n\t\t\tif user is not None:\n\t\t\t\tlogin(request,user)\n\t\t\t\treturn redirect(\"store\")\n\t\t\telse:\n\t\t\t\tmessages.info(request,\"username OR passoword is incorrect\")\n\t\tcontext = {'cartItems':cartItems}\n\t\treturn render(request,'store/login.html',context)\n\ndef logoutPage(request):\n\tlogout(request)\n\treturn redirect(\"login\")\n\ndef search(request):\n\tdata = cartData(request)\n\n\tcartItems = data['cartItems']\n\n\tquery=request.GET['query']\n\tproducts= Product.objects.filter(name__icontains=query)\n\tcontext={'products':products,'query':query,'cartItems':cartItems}\n\t\n\treturn render(request,'store/search.html',context)\n\ndef store(request):\n\tdata = cartData(request)\n\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\n\tproducts = Product.objects.all()\n\tcontext = {'products':products, 'cartItems':cartItems}\n\treturn render(request, 'store/store.html', context)\n\ndef product(request,pk):\n\tdata = cartData(request)\n\tdomain = get_current_site(request).domain\n\turl = 'https://'+domain\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\tproducts = Product.objects.filter(id=pk)\n\tcontext = {'products':products,'cartItems':cartItems,'domain':domain}\n\treturn render(request,'store/product.html', context)\n\ndef removeitem(request, pk):\n \tproduct = Product.objects.filter(id=pk)\n \tproduct.item = None\n \tdata = cartData(request)\n \tcartItems = data['cartItems']\n \titems = data['items']\n \torder = data['order']\n \t# cartItems = cartItems * 0\n \tcontext= {'order':order,'items':items,'product':product,'cartItems':cartItems}\n \treturn render(request, 'store/cart.html', context)\n\ndef cart(request):\n\tdata = cartData(request)\n\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\n\tcontext = {'items':items, 'order':order, 'cartItems':cartItems}\n\treturn render(request, 'store/cart.html', context)\n\n@emptycart\ndef checkout(request):\n\tdata = cartData(request)\n\t\n\tcartItems = data['cartItems']\n\torder = data['order']\n\titems = data['items']\n\n\tcontext = {'items':items, 'order':order, 'cartItems':cartItems}\n\treturn render(request, 'store/checkout.html', context)\n\ndef updateItem(request):\n\tdata = json.loads(request.body)\n\tproductId = data['productId']\n\taction = data['action']\n\tprint('Action:', action)\n\tprint('Product:', productId)\n\n\tcustomer = request.user.customer\n\tproduct = Product.objects.get(id=productId)\n\torder, created = Order.objects.get_or_create(customer=customer, complete=False)\n\n\torderItem, created = OrderItem.objects.get_or_create(order=order, product=product)\n\n\tif action == 'add':\n\t\torderItem.quantity = (orderItem.quantity + 1)\n\telif action == 'remove':\n\t\torderItem.quantity = (orderItem.quantity - 1)\n\n\torderItem.save()\n\n\tif orderItem.quantity <= 0:\n\t\torderItem.delete()\n\n\treturn JsonResponse('Item was added', safe=False)\n\n\ndef processOrder(request):\n\ttransaction_id = datetime.datetime.now().timestamp()\n\tdata = json.loads(request.body)\n\n\tif request.user.is_authenticated:\n\t\tcustomer = request.user.customer\n\t\torder, created = Order.objects.get_or_create(customer=customer, complete=False)\n\telse:\n\t\tcustomer, order = guestOrder(request, data)\n\n\ttotal = float(data['form']['total'])\n\torder.transaction_id = transaction_id\n\n\tif total == order.get_cart_total:\n\t\torder.complete = True\n\torder.save()\n\n\tif order.shipping == True:\n\t\tShippingAddress.objects.create(\n\t\tcustomer=customer,\n\t\torder=order,\n\t\taddress=data['shipping']['address'],\n\t\tcity=data['shipping']['city'],\n\t\tstate=data['shipping']['state'],\n\t\tzipcode=data['shipping']['zipcode'],\n\t\t)\n\n\treturn JsonResponse('Payment submitted..', safe=False)\n\t","sub_path":"Desktop/django_BuyKaro/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"258589975","text":"# Graph data readers and parsers for electron ID\n#\n# Mikael Mieskolainen, 2020\n# m.mieskolainen@imperial.ac.uk\n\nimport numpy as np\nimport numba\nfrom tqdm import tqdm\n\nimport torch\nfrom torch_geometric.data import Data\n\nimport uproot_methods\nimport icenet.algo.analytic as analytic\n\n\ndef parse_graph_data(X, VARS, features, Y=None, W=None, global_on=True, coord='ptetaphim', EPS=1e-12):\n \"\"\"\n Jagged array data into pytorch-geometric style Data format array.\n \n Args:\n X : Jagged array of variables\n VARS : Variable names as an array of strings\n features : Array of active scalar feature strings\n Y : Target class array (if any, typically MC only)\n W : (Re-)weighting array (if any, typically MC only)\n global_on : Global features on / off\n coord : Coordinates used for nodes ('ptetaphim', 'pxpypze')\n \n Returns:\n Array of pytorch-geometric Data objects\n \"\"\"\n\n num_node_features = 6\n num_edge_features = 4\n num_classes = 2\n\n N_events = X.shape[0]\n dataset = []\n\n print(__name__ + f'.parse_graph_data: Converting {N_events} events into graphs ...')\n zerovec = uproot_methods.TLorentzVector(0,0,0,0)\n\n # Collect feature indices\n feature_ind = np.zeros(len(features), dtype=np.int32)\n for i in range(len(features)):\n feature_ind[i] = VARS.index(features[i])\n\n\n # Collect indices\n ind__trk_pt = VARS.index('trk_pt')\n ind__trk_eta = VARS.index('trk_eta')\n ind__trk_phi = VARS.index('trk_phi')\n\n ind__image_clu_e = VARS.index('image_clu_e')\n ind__image_clu_eta = VARS.index('image_clu_eta')\n ind__image_clu_phi = VARS.index('image_clu_phi')\n\n\n # Loop over events\n for e in tqdm(range(N_events)):\n\n num_nodes = 1 + len(X[e, ind__image_clu_eta]) # + 1 virtual node\n num_edges = num_nodes**2 # include self-connections\n \n # Construct 4-vector for the track, with pion mass\n p4track = \\\n uproot_methods.TLorentzVector.from_ptetaphim(\n X[e, ind__trk_pt], X[e, ind__trk_eta], X[e, ind__trk_phi], 0.13957)\n\n # Construct 4-vector for each ECAL cluster [@@ JAGGED @@]\n p4vec = []\n if len(X[e, ind__image_clu_e]) > 0:\n pt = X[e, ind__image_clu_e] / np.cosh(X[e, ind__image_clu_eta]) # Massless approx.\n p4vec = uproot_methods.TLorentzVectorArray.from_ptetaphim(\n pt, X[e, ind__image_clu_eta], X[e, ind__image_clu_phi], 0) # Massless\n\n\n # ====================================================================\n # CONSTRUCT TENSORS\n\n # Construct output class, note [] is important to have for right dimensions\n if Y is not None:\n y = torch.tensor([Y[e]], dtype=torch.long)\n else:\n y = torch.tensor([0], dtype=torch.long)\n\n # Training weights, note [] is important to have for right dimensions\n if W is not None:\n w = torch.tensor([W[e]], dtype=torch.float)\n else:\n w = torch.tensor([1.0], dtype=torch.float)\n\n ## Construct global feature vector\n u = torch.tensor(X[e, feature_ind].tolist(), dtype=torch.float)\n \n ## Construct node features\n x = get_node_features(p4vec=p4vec, p4track=p4track, X=X[e], VARS=VARS, num_nodes=num_nodes, num_node_features=num_node_features, coord=coord)\n x = torch.tensor(x, dtype=torch.float)\n\n ## Construct edge features\n edge_attr = get_edge_features(p4vec=p4vec, num_nodes=num_nodes, num_edges=num_edges, num_edge_features=num_edge_features)\n edge_attr = torch.tensor(edge_attr, dtype=torch.float)\n\n ## Construct edge connectivity\n edge_index = get_edge_index(num_nodes=num_nodes, num_edges=num_edges)\n edge_index = torch.tensor(edge_index, dtype=torch.long)\n\n\n # Add this event\n if global_on == False: # Null the global features\n u = torch.tensor(np.zeros(len(u)), dtype=torch.float)\n\n dataset.append(Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y, w=w, u=u))\n\n return dataset\n\n\ndef get_node_features(p4vec, p4track, X, VARS, num_nodes, num_node_features, coord):\n\n # Node feature matrix\n x = np.zeros((num_nodes, num_node_features))\n\n for i in range(num_nodes):\n\n # i = 0 case is the virtual node\n if i > 0:\n if coord == 'ptetaphim':\n x[i,0] = p4vec[i-1].pt\n x[i,1] = p4vec[i-1].eta\n x[i,2] = p4vec[i-1].phi\n x[i,3] = p4vec[i-1].mass\n elif coord == 'pxpypze':\n x[i,0] = p4vec[i-1].x\n x[i,1] = p4vec[i-1].y\n x[i,2] = p4vec[i-1].z\n x[i,3] = p4vec[i-1].t\n else:\n raise Exception(__name__ + f'parse_graph_data: Unknown coordinate representation')\n \n # other features\n x[i,4] = X[VARS.index('image_clu_nhit')][i-1]\n x[i,5] = p4track.delta_r(p4vec[i-1])\n\n return x\n\n\n@numba.njit\ndef get_edge_index(num_nodes, num_edges):\n\n # Graph connectivity: (~ sparse adjacency matrix)\n edge_index = np.zeros((2, num_edges))\n\n n = 0\n for i in range(num_nodes):\n for j in range(num_nodes):\n\n # Full connectivity\n edge_index[0,n] = i\n edge_index[1,n] = j\n n += 1\n \n return edge_index\n\n\ndef get_edge_features(p4vec, num_nodes, num_edges, num_edge_features, EPS=1E-12):\n\n # Edge features: [num_edges, num_edge_features]\n edge_attr = np.zeros((num_edges, num_edge_features), dtype=float)\n indexlist = np.zeros((num_nodes, num_nodes), dtype=int)\n \n n = 0\n for i in range(num_nodes):\n for j in range(num_nodes):\n\n # Compute only non-zero\n if (i > 0 and j > 0) and (j > i):\n\n p4_i = p4vec[i-1]\n p4_j = p4vec[j-1]\n\n # kt-metric (anti)\n dR2_ij = ((p4_i.eta - p4_j.eta)**2 + (p4_i.phi - p4_j.phi)**2)\n kt2_i = p4_i.pt2 + EPS \n kt2_j = p4_j.pt2 + EPS\n edge_attr[n,0] = analytic.ktmetric(kt2_i=kt2_i, kt2_j=kt2_j, dR2_ij=dR2_ij, p=-1, R=1.0)\n \n # Lorentz scalars\n edge_attr[n,1] = (p4_i + p4_j).p2 # Mandelstam s-like\n edge_attr[n,2] = (p4_i - p4_j).p2 # Mandelstam t-like\n edge_attr[n,3] = p4_i.dot(p4_j) # 4-dot\n\n indexlist[i,j] = n\n n += 1\n\n ### Copy to the lower triangle for speed (we have symmetric adjacency)\n n = 0\n for i in range(num_nodes):\n for j in range(num_nodes):\n\n # Copy only non-zero\n if (i > 0 and j > 0) and (j < i):\n edge_attr[n,:] = edge_attr[indexlist[j,i],:] # note [j,i] !\n n += 1\n\n return edge_attr\n\n\n'''\ndef find_k_nearest(edge_attr, num_nodes, k=5):\n \"\"\"\n Find fixed k-nearest neighbours, return corresponding edge connectivities\n \"\"\"\n\n # Loop over each node\n for i in range(num_nodes):\n\n # Loop over each other node, take distances\n for j in range(num_nodes):\n\n # Graph connectivity: (~ adjacency matrix)\n edge_index = torch.tensor(np.zeros((2, num_edges)), dtype=torch.long)\n'''\n\n","sub_path":"iceid/graphio.py","file_name":"graphio.py","file_ext":"py","file_size_in_byte":7332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"378103792","text":"n = int(input(''))\n\n# is modulo 0 bij het delen van n door alle getallen van 2 tot getal zelf\n# zolang de modulo verschillend van 0 is, zijn we goed bezig.\ni = 2\nwhile (n // i != n/i) and i < (n // 2) + 1:\n i += 1\n\nif i == (n // 2 + 1):\n print(str(n) + ' is een priemgetal')\nelse:\n print(str(n) + ' is geen priemgetal')","sub_path":"Oefeningen/07b - Iteraties - WhileLus/priemgetallen_verbeterde versie.py","file_name":"priemgetallen_verbeterde versie.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"416690343","text":"import os\n\npath = 'C:/Users/vinaykub/Documents/Linux'\n\ndef files(path):\n for file in os.listdir(path):\n if os.path.isfile(os.path.join(path, file)):\n yield file\n\nfor file in files(path):\n print (file)\n","sub_path":"IOT Hackathon Final/read_directory.py","file_name":"read_directory.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"504875678","text":"import os\nimport json\nimport time\nfrom prettytable import PrettyTable\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom torchvision import datasets, transforms\nfrom tqdm import tqdm\nfrom deit_model import deit_tiny_patch16_224\nfrom xception_py import Xception\n\n\ndef confusion_matrix(preds, labels, conf_matrix):\n # preds = torch.argmax(preds, 1)\n for p, t in zip(preds, labels):\n conf_matrix[p, t] += 1\n return conf_matrix\n\ndef summary_table(matrix,num_classes,labels):\n # calculate accuracy\n sum_TP = 0\n for i in range(num_classes):\n sum_TP += matrix[i, i]\n acc = round(sum_TP / np.sum(matrix),5)\n print(\"the model accuracy is \", acc)\n # precision, recall, specificity\n table = PrettyTable()\n table.field_names = [\"\", \"Precision\", \"Recall\", \"Specificity\",\"F1-Score\"]\n sum1 = sum2 = sum3 = sum4 = 0;\n for i in range(num_classes):\n TP = matrix[i, i]\n FP = np.sum(matrix[i, :]) - TP\n FN = np.sum(matrix[:, i]) - TP\n TN = np.sum(matrix) - TP - FP - FN\n Precision = round(TP / (TP + FP), 3) if TP + FP != 0 else 0.\n Recall = round(TP / (TP + FN), 3) if TP + FN != 0 else 0.\n Specificity = round(TN / (TN + FP), 3) if TN + FP != 0 else 0.\n F1_score = round(2*((Precision*Recall)/(Precision+Recall)),3)if Precision*Recall != 0 else 0.\n table.add_row([labels[i], Precision, Recall, Specificity, F1_score])\n sum1 = sum1+Precision;\n sum2 = sum2+Recall;\n sum3 += Specificity;\n sum4 += F1_score;\n table.add_row([\"Average\", round(sum1/num_classes,3), round(sum2/num_classes,3),\n round(sum3/num_classes,3), round(sum4/num_classes,3)])\n with open('table_net_test.txt', 'a+') as f:\n f.write(str(table))\n f.close()\n print(table)\n\ndef plot_conf(matrix,num_classes,labels):\n matrix = matrix\n plt.imshow(matrix, cmap=plt.cm.Blues)\n # 设置x轴坐标label\n plt.xticks(range(num_classes), labels, rotation=45)\n # 设置y轴坐标label\n plt.yticks(range(num_classes), labels)\n # 显示colorbar\n plt.colorbar()\n plt.xlabel('True Labels')\n plt.ylabel('Predicted Labels')\n plt.title('Confusion matrix')\n\n # 在图中标注数量/概率信息\n thresh = matrix.max() / 2\n for x in range(num_classes):\n for y in range(num_classes):\n # 注意这里的matrix[y, x]不是matrix[x, y]\n # info = int(matrix[y, x]),round(int(matrix[y, x])/int(matrix.sum(axis=0)[x]),2)\n info = int(matrix[y, x])\n plt.text(x, y, info,\n verticalalignment='center',\n horizontalalignment='center',\n color=\"white\" if int(matrix[y, x]) > thresh else \"black\")\n plt.tight_layout()\n plt.savefig('./conf_net_test.jpg')\n plt.show()\n\ndef main():\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(\"using {} device.\".format(device))\n # save_path = './Vit-100epoch.pth'\n data_transform = {\n \"train\": transforms.Compose([transforms.Resize((224, 224)),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor()]),\n \"val\": transforms.Compose([transforms.Resize((224, 224)), # cannot 224, must (224, 224)\n transforms.ToTensor()]),\n \"test\": transforms.Compose([transforms.Resize((224, 224)), # cannot 224, must (224, 224)\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])}\n\n # data_root = os.path.abspath(os.path.join(os.getcwd())) # get data root path\n # image_path = os.path.join(data_root, \"data_set\", \"cell_data\") # flower data set path\n image_path = 'E:\\实验\\第二篇实验\\Cric Database\\\\11 classes\\cell_data';\n assert os.path.exists(image_path), \"{} path does not exist.\".format(image_path)\n\n test_dataset = datasets.ImageFolder(root=os.path.join(image_path, \"test\"),\n transform=data_transform[\"test\"])\n test_num = len(test_dataset)\n flower_list = test_dataset.class_to_idx\n cla_dict = dict((val, key) for key, val in flower_list.items())\n cla_dict2 = {\"0\": \"1\", \"1\": \"2\", \"2\": \"3\", \"3\": \"4\", \"4\": \"5\", \"5\": \"6\", \"6\": \"7\", \"7\": \"8\", \"8\": \"9\", \"9\": \"10\",\n \"10\": \"11\"}\n batch_size = 32\n nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers 线程数\n print('Using {} dataloader workers every process'.format(nw))\n\n test_loader = torch.utils.data.DataLoader(test_dataset,\n shuffle=True,\n num_workers=nw)\n\n print(\"using {} images for test.\".format(test_num))\n # efficient_transformer = Linformer(\n # dim=128,\n # seq_len=49 + 1, # 7x7 patches + 1 cls-token\n # depth=12,\n # heads=8,\n # k=64\n # )\n\n # net =ViT(\n # dim=128,\n # image_size=224,\n # patch_size=32,\n # num_classes=5,\n # transformer=efficient_transformer,\n # channels=3,\n # )\n\n # net = ViT(\n # image_size=224,\n # patch_size=32,\n # num_classes=5,\n # dim=128,\n # depth=6,\n # heads=16,\n # mlp_dim=3000,\n # dropout=0.1,\n # emb_dropout=0.1\n # )\n\n # Inception-v3\n net = deit_tiny_patch16_224(num_classes=11, pretrained=False)\n\n weights_path = \"./deit_xception.pth\"\n net.load_state_dict(torch.load(weights_path, map_location=device))\n net.to(device)\n # loss_function = nn.CrossEntropyLoss()\n # pata = list(net.parameters())\n # optimizer = optim.Adam(net.parameters(), lr=0.0002)\n # 输出模型大小和内存消耗\n # input = torch.randn(1, 3, 224, 224)\n # flops, params = profile(net, (input.to(device),))\n # print('flops: ', flops, 'params: ', params*4/1000/1000)\n # summary(net, (3, 224, 224), device=device.type)\n\n\n epochs = 1\n # t3 = time.time()\n for epoch in range(epochs):\n t1 = time.perf_counter()\n conf_matrix = torch.zeros(11, 11) # 混淆矩阵\n net.eval()\n acc = 0.0 # accumulate accurate number / epoch\n with torch.no_grad():\n test_bar = tqdm(test_loader, colour='green')\n for test_data in test_bar:\n test_images, test_labels = test_data\n outputs = net(test_images.to(device))\n predict_y = torch.max(outputs, dim=1)[1]\n acc += torch.eq(predict_y, test_labels.to(device)).sum().item()\n conf_matrix = confusion_matrix(predict_y, test_labels, conf_matrix)\n\n\n # t6 = time.time()\n val_accurate = acc / test_num\n t2 = time.perf_counter() - t1;\n\n print(\"测试准确率:\",val_accurate)\n print()\n print(\"总测试时间:\", t2)\n with open('acc_time_net_test.txt', 'a+', encoding='utf-8') as f:\n f.write(\"测试准确率:\" + str(val_accurate) + '\\n')\n f.write(\"总测试时间:\" + str(t2) + '\\n')\n f.close()\n #绘制混淆矩阵 输出参数\n cm = np.array(conf_matrix)\n # con_mat_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # 归一化\n con_mat_norm = np.around(cm, decimals=3)\n # === plot ===\n labels = [label for _, label in cla_dict2.items()]\n plot_conf(con_mat_norm, 11, labels)\n # 输出评价指标\n summary_table(con_mat_norm, 11, labels);\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":7651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"643329787","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\n@author: zhaogao\n@license: (C) Copyright 2013-2018.\n@contact: 449628536@qq.com\n@software: learn-py\n@file: len160_以编程方式定义类.py\n@time: 12/04/2018 9:12 PM\n'''\n\n\n# 你在写一段代码,最终需要创建���个新的类对象。你考虑将类的定义源代码以字符 串的形式发布出去。并且使用函数比如 exec() 来执行它,但是你想寻找一个更加优雅 的解决方案\n# 你可以使用函数 types.new class() 来初始化新的类对象。你需要做的只是提供 类的名字、父类元组、关键字参数,以及一个用成员变量填充类字典的回调函数\n\n# 从部件手动创建类的示例\n\n# 方法\ndef __init__(self, name, shares, price):\n self.name = name\n self.shares = shares\n self.price = price\n\n\ndef cost(self):\n return self.shares * self.price\n\n\ncls_dict = {\n '__init__': __init__,\n 'cost': cost,\n}\n\n# 穿件一个类\nimport types\n\n# 第四个参数 它是一个用来接受类命名空间的映射对象的函 数。通常这是一个普通的字典,但是它实际上是 prepare () 方法返回的任意对象。这个函数需要使用上面演示的 update() 方法给命名 空间增加内容\nStock = types.new_class('Stock', (), {}, lambda ns: ns.update(cls_dict))\n# 每次当一个类被定义后,它的 module 属性包含定义 它的模块名。这个名字用于生成 repr () 方法的输出\nStock.__module__ = __name__\n\n# 这种方式会构建一个普通的类对象,并且按照你的期望工作:\n\ns = Stock('ACME', 50, 91.1)\nprint(s)\nprint(s.cost())\n\n# 如果你想创建的类需要一个不同的元类,可以通过 types.new class() 第三个参 数传递给它\n\nimport abc\n\nStock = types.new_class('Stock', (), {'metaclass': abc.ABCMeta}, lambda ns: ns.update(cls_dict))\nStock.__module__ = __name__\n\nprint(Stock)\nprint(type(Stock))\n\n\n# 第三个参数还可以包含其他的关键字参数\n\ndef test_prepare_class(self):\n class Base(metaclass=self.Meta):\n pass\n\n Spam = types.new_class('Spam', (Base,), {'debug': True, 'typecheck': False}, lambda ns: ns.update(cls_dict))\n\n\n# 很多时候如果能构造新的类对象是很有用的。有个很熟悉的例子是调用collections.namedtuple() 函数\n\nfrom collections import namedtuple\n\nStock3 = namedtuple('Stock3', ['name', 'shares', 'price'])\nprint(Stock3)\n\n# namedtuple() 使用 exec() 而不是上面介绍的技术。但是,下面通过一个简单的变 化,我们直接创建一个类\n\nimport operator, types, sys\n\n\ndef named_tuple(classname, fieldnames):\n # 填充字段属性访问器的字典\n cls_dict = {name: property(operator.itemgetter(n)) for n, name in enumerate(fieldnames)}\n\n # 创建一个__new__函数并添加到类字典中\n def __new__(cls, *args):\n if len(args) != len(fieldnames):\n raise TypeError('expected {} arguments'.format(len(fieldnames)))\n return tuple.__new__(cls, args)\n\n cls_dict['__new__'] = __new__\n\n # 创建一个类\n cls = types.new_class(classname, (tuple,), {}, lambda ns: ns.update(cls_dict))\n\n # 将模块设置为主叫方的模块\n # 通过调用 sys. getframe() 来获取调用者的模块名\n cls.__module__ = sys._getframe(1).f_globals['__name__']\n return cls\n\n\n# 下面的例子演示了前面的代码是如何工作的\n\nPoint = named_tuple('Point', ['x', 'y'])\nprint(Point)\np = Point(4, 5)\nprint(len(p))\nprint(p.x)\nprint(p.y)\n# p.x = 2 # AttributeError: can't set attribute\nprint('%s %s' % p)\n\n# 通过直接实例化一个元类来直接创建一个类\n\nStock4 = type('Stock4', (), cls_dict)\n\n# 这种方法的问题在于它忽略了一些关键步骤,比如对于元类中 prepare () 方法 的调用。通过使用 types.new class() ,你可以保证所有的必要初始化步骤都能得到 执行。比如,types.newclass()第四个参数的回调函数接受 prepare()方法返回 的映射对象\n\n\n# 仅仅只是想执行准备步骤,可以使用 types.prepare class()\n\nimport types\n\nmetaclass, kwargs, ns = types.prepare_class('Stock5', (), {'metaclass': type})\n","sub_path":"cook/len160_以编程方式定义类.py","file_name":"len160_以编程方式定义类.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"472451793","text":"import codecs\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\n\n# ----------------------------------------------------------------------------\n\n\ndef read(*parts):\n with codecs.open(os.path.join(here, *parts), 'r') as fp:\n return fp.read()\n\n \ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")\n \n \nhere = os.path.abspath(os.path.dirname(__file__)) \nlong_description = read(\"README.rst\")\n\n \n# ---------------------------------------------------------------------------- \n \n\nsetup(\n name=\"mayapip\",\n version=find_version(\"src\", \"mayapip\", \"__init__.py\"),\n author=\"Robert Joosten\",\n author_email=\"rwm.joosten@gmail.com\",\n package_dir={\"\": \"src\"},\n packages=find_packages(\n where=\"src\",\n exclude=[\"contrib\", \"docs\", \"tests*\", \"tasks\"],\n ),\n license=\"MIT\",\n description=\"The Maya wrapper to pip for installing Python packages\",\n long_description=long_description,\n keywords=\"pip maya\",\n entry_points={\n \"console_scripts\": [\n \"mayapip=mayapip._internal:main\",\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"199938203","text":"\"\"\"empty message\n\nRevision ID: a130d21b2127\nRevises: 59ab9931e64a\nCreate Date: 2018-09-06 14:48:32.358148\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a130d21b2127'\ndown_revision = '59ab9931e64a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('cate_to_post',\n sa.Column('category_id', sa.Integer(), nullable=True),\n sa.Column('post_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['category_id'], ['catelog.id'], ),\n sa.ForeignKeyConstraint(['post_id'], ['post.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('cate_to_post')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/a130d21b2127_.py","file_name":"a130d21b2127_.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"368170239","text":"from PIL import Image, ImageDraw\nfrom PIL.ImageQt import ImageQt\nimport os\ndef sepia(image):\n draw = ImageDraw.Draw(image)\n width = image.size[0]\n height = image.size[1]\n pix = image.load()\n\n for i in range(width):\n for j in range(height):\n a = pix[i, j][0]\n b = pix[i, j][1]\n c = pix[i, j][2]\n S = (a + b + c)\n c = c\n b = c * 2\n a = c * 3\n if (a > 255):\n a = 255\n if (b > 255):\n b = 255\n if (c > 255):\n c = 255\n draw.point((i, j), (a, b, c))\n\n return image\n\ndef negative(image):\n draw = ImageDraw.Draw(image)\n width = image.size[0]\n height = image.size[1]\n pix = image.load()\n\n for i in range(width):\n for j in range(height):\n a = pix[i, j][0]\n b = pix[i, j][1]\n c = pix[i, j][2]\n draw.point((i, j), (255 - a, 255 - b, 255 - c))\n\n return image\n\ndef gray(image):\n draw = ImageDraw.Draw(image)\n width = image.size[0]\n height = image.size[1]\n pix = image.load()\n for i in range(width):\n for j in range(height):\n a = pix[i, j][0]\n b = pix[i, j][1]\n c = pix[i, j][2]\n # Среднее значение\n S = (a + b + c) // 3\n draw.point((i, j), (S, S, S))\n return image\n\ndef get_bits(image):\n try:\n img = open(image, \"rb\")\n b_image = img.read()\n img.close()\n except:\n b_image = None\n return b_image\n\ndef set_bits(data,name):\n file = open(name,'wb')\n file.write(data)\n file.close()\n\ndef mini_picture(name,save_as,h = 250,w = 250):\n image = Image.open(name)\n image = image.resize((h, w), Image.ANTIALIAS)\n image.save(save_as)\n","sub_path":"Client/repo/Filter_pack.py","file_name":"Filter_pack.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"604254354","text":"from rest_framework import serializers\n\nfrom goods.serializers import GoodsSerializer\nfrom orders.models import Order, OrderDetail\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n\n def to_representation(self, instance): #将instance实例(每个订单对象)转换为字典\n print(\"instance=======>\",instance)\n order_dict = super().to_representation(instance) #调用父类方法,将对象拆分为字典对象\n print(\"instance========>\",instance)\n orderdetails = instance.orderdetail_set.all() #获取当前订单的关联的订单详情对象\n details = OrderDetailSerializer(orderdetails,many=True).data\n order_dict[\"order_goods_info\"] = details #给用户动态添加 属性\n return order_dict\n\n\n class Meta:\n model = Order\n fields = \"__all__\"\n\nclass OrderDetailSerializer(serializers.ModelSerializer):\n\n o_goods = GoodsSerializer() #对c_goods属性序列化进行特殊处理,交给GoodsSerializer序列化\n\n class Meta:\n model = OrderDetail\n fields = \"__all__\"","sub_path":"Dianshang_1/orders/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"601434643","text":"# coding: utf-8\n\nimport json\nimport six\nfrom six import StringIO\n\nfrom django.core.management import call_command\nfrom django.views.generic import TemplateView, View\nfrom django.http import JsonResponse\n\nif six.PY2:\n import ast_utils\nelse:\n from . import ast_utils\n\n\nclass IndexView(TemplateView):\n template_name = \"main.html\"\n\n\nclass EvalView(View):\n\n def post(self, request):\n to_eval = request.POST.get(\"toEval\")\n default_namespace_value = json.dumps({'functions': []})\n namespace = json.loads(request.session.get('namespace', default_namespace_value))\n out = StringIO()\n if ast_utils.isFunction(to_eval):\n namespace['functions'].append(to_eval)\n call_command(\"eval\", '', json.dumps(namespace), stdout=out)\n else:\n call_command(\"eval\", to_eval, json.dumps(namespace), stdout=out)\n values = json.loads(out.getvalue())\n out, namespace = values['out'], values['namespace']\n err = values['error']\n request.session['namespace'] = namespace\n return JsonResponse({'out': out, 'err': err})\n","sub_path":"TryPython/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"583654571","text":"# module_tacacs_plus.py\n# \n# Copyright 2015 Daniel Mende \n#\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n# \n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of the nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport hashlib\nimport os\nimport struct\nimport tempfile\nimport threading\nimport time\n\nimport dnet\nimport dpkt\n\ngobject = None\ngtk = None\nurwid = None\n\nTACACS_PLUS_PORT = 49\nTACACS_PLUS_VERSION_MAJOR = 0xc\nTACACS_PLUS_VERSION_MINOR_DEFAULT = 0x0\nTACACS_PLUS_VERSION_MINOR_ONE = 0x1\n\nclass tacacs_plus_header(object):\n #~ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8\n #~ \n #~ +----------------+----------------+----------------+----------------+\n #~ |major | minor | | | |\n #~ |version| version| type | seq_no | flags |\n #~ +----------------+----------------+----------------+----------------+\n #~ | |\n #~ | session_id |\n #~ +----------------+----------------+----------------+----------------+\n #~ | |\n #~ | length |\n #~ +----------------+----------------+----------------+----------------+\n\n TYPE_AUTHEN = 0x01\n TYPE_AUTHOR = 0x02\n TYPE_ACCT = 0x03\n \n type_to_str = { 0x01 : \"TYPE_AUTHEN\",\n 0x02 : \"TYPE_AUTHOR\",\n 0x03 : \"TYPE_ACCT\"\n }\n\n FLAGS_UNENCRYPTED = 0x01\n \n flags_to_str = { 0x00 : \"FLAGS_NONE\",\n 0x01 : \"FLAGS_UNENCRYPTED\"\n }\n\n def __init__(self, version_minor=None, _type=None, seq_no=None, flags=None, session_id=None):\n self.version_minor = version_minor\n self._type = _type\n self.seq_no = seq_no\n self.flags = flags\n self.session_id = session_id\n \n def __repr__(self):\n try:\n return \"TACACS+ Header: VERSION(%d) %s SEQNO(%x) %s SESSIONID(%x)\" % \\\n (self.version_minor, self.type_to_str[self._type], self.seq_no, self.flags_to_str[self.flags], self.session_id)\n except:\n return \"Invalid TACACS+ Header\"\n def render(self, data):\n return struct.pack(\"!BBBBII\", TACACS_PLUS_VERSION_MAJOR << 4 + self.version_minor,\n self._type,\n self.seq_no,\n self.flags,\n self.session_id,\n len(data)) + data\n \n def parse(self, data):\n (ver, self._type, self.seq_no, self.flags, self.session_id, self.length) = struct.unpack(\"!BBBBII\", data[:12])\n self.version_minor = ver & 0x0F\n return data[12:]\n\nclass tacacs_plus_authentication_start(object):\n #~ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8\n #~ +----------------+----------------+----------------+----------------+\n #~ | action | priv_lvl | authen_type | service |\n #~ +----------------+----------------+----------------+----------------+\n #~ | user len | port len | rem_addr len | data len |\n #~ +----------------+----------------+----------------+----------------+\n #~ | user ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | port ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | rem_addr ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | data...\n #~ +----------------+----------------+----------------+----------------+\n\n ACTION_AUTHEN_LOGIN = 0x01\n ACTION_AUTHEN_CHPASS = 0x02\n ACTION_AUTHEN_SENDPASS = 0x03\n ACTION_AUTHEN_SENDAUTH = 0x04\n \n action_to_str = { 0x01 : \"ACTION_AUTHEN_LOGIN\",\n 0x02 : \"ACTION_AUTHEN_CHPASS\",\n 0x03 : \"ACTION_AUTHEN_SENDPASS\",\n 0x04 : \"ACTION_AUTHEN_SENDAUTH\"\n }\n \n PRIV_LVL_MAX = 0x0f\n PRIV_LVL_ROOT = 0x0f\n PRIV_LVL_USER = 0x01\n PRIV_LVL_MIN = 0x00\n \n priv_to_str = { 0x0f : \"PRIV_LVL_ROOT\",\n 0x01 : \"PRIV_LVL_USER\",\n 0x00 : \"PRIV_LVL_MIN\"\n }\n \n TYPE_ASCII = 0x01\n TYPE_PAP = 0x02\n TYPE_CHAP = 0x03\n TYPE_ARAP = 0x04\n TYPE_MSCHAP = 0x05\n\n type_to_str = { 0x01 : \"TYPE_ASCII\",\n 0x02 : \"TYPE_PAP\",\n 0x03 : \"TYPE_CHAP\",\n 0x04 : \"TYPE_ARAP\",\n 0x05 : \"TYPE_MSCHAP\"\n }\n\n SVC_NONE = 0x00\n SVC_LOGIN = 0x01\n SVC_ENABLE = 0x02\n SVC_PPP = 0x03\n SVC_ARAP = 0x04\n SVC_PT = 0x05\n SVC_RCMD = 0x06\n SVC_X25 = 0x07\n SVC_NASI = 0x08\n SVC_FWPROXY = 0x09\n\n service_to_str = { 0x00 : \"SVC_NONE\",\n 0x01 : \"SVC_LOGIN\",\n 0x02 : \"SVC_ENABLE\",\n 0x03 : \"SVC_PPP\",\n 0x04 : \"SVC_ARAP\",\n 0x05 : \"SVC_PT\",\n 0x06 : \"SVC_RCMD\",\n 0x07 : \"SVC_X25\",\n 0x08 : \"SVC_NASI\",\n 0x09 : \"SVC_FWPROXY\"\n }\n\n\n def __init__(self, action=None, priv_lvl=None, authen_type=None, service=None, user=None, port=None, rem_addr=None, data=None):\n self.action = action\n self.priv_lvl = priv_lvl\n self.authen_type = authen_type\n self.service = service\n self.user = user\n self.port = port\n self.rem_addr = rem_addr\n self.data = data\n \n def __repr__(self):\n try:\n ret = \"TACACS+ Authentication Start: %s %s %s %s\" % \\\n (self.action_to_str[self.action], self.priv_to_str[self.priv_lvl], self.type_to_str[self.authen_type], self.service_to_str[self.service])\n if len(self.user) > 0:\n ret += \" USER(%s)\" % self.user\n if len(self.port) > 0:\n ret += \" PORT(%s)\" % self.port\n if len(self.rem_addr) > 0:\n ret += \" ADDR(%s)\" % self.rem_addr\n if len(self.data) > 0:\n ret += \" DATA(%s)\" % self.data\n return ret\n except:\n return \"Invalid TACACS+ Body\"\n \n def render(self):\n return struct.pack(\"!BBBBBBBB\", self.action, \n self.priv_lvl,\n self.authen_type,\n self.service,\n len(self.user),\n len(self.port),\n len(self.rem_addr),\n len(self.data)) + \\\n self.user + \\\n self.port + \\\n self.rem_addr + \\\n self.data\n \n def parse(self, data):\n (self.action, self.priv_lvl, self.authen_type, self.service,\n self.user_len, self.port_len, self.rem_addr_len, self.data_len) = \\\n struct.unpack(\"!BBBBBBBB\", data[:8])\n self.user = data[8:8+self.user_len]\n self.port = data[8+self.user_len:8+self.user_len+self.port_len]\n self.rem_addr = data[8+self.user_len+self.port_len:8+self.user_len+self.port_len+self.rem_addr_len]\n self.data = data[8+self.user_len+self.port_len+self.rem_addr_len:8+self.user_len+self.port_len+self.rem_addr_len+self.data_len]\n return data[8+self.user_len+self.port_len+self.rem_addr_len+self.data_len:]\n\nclass tacacs_plus_authentication_reply(object):\n #~ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8\n #~ +----------------+----------------+----------------+----------------+\n #~ | status | flags | server_msg len |\n #~ +----------------+----------------+----------------+----------------+\n #~ | data len | server_msg ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | data ...\n #~ +----------------+----------------+\n \n STATUS_PASS = 0x01\n STATUS_FAIL = 0x02\n STATUS_GETDATA = 0x03\n STATUS_GETUSER = 0x04\n STATUS_GETPASS = 0x05\n STATUS_RESTART = 0x06\n STATUS_ERROR = 0x07\n STATUS_FOLLOW = 0x21\n \n status_to_str = { 0x01 : \"STATUS_PASS\",\n 0x02 : \"STATUS_FAIL\",\n 0x03 : \"STATUS_GETDATA\",\n 0x04 : \"STATUS_GETUSER\",\n 0x05 : \"STATUS_GETPASS\",\n 0x06 : \"STATUS_RESTART\",\n 0x07 : \"STATUS_ERROR\",\n 0x21 : \"STATUS_FOLLOW\"\n }\n \n FLAG_NOECHO = 0x01\n \n flags_to_str = { 0x00 : \"FLAG_NONE\",\n 0x01 : \"FLAG_NOECHO\"\n }\n \n def __init__(self, status=None, flags=None, server_msg=None, data=None):\n self.status = status\n self.flags = flags\n self.server_msg = server_msg\n self.data = data\n \n def __repr__(self):\n try:\n ret = \"TACACS+ Authentication Reply: %s %s\" % (self.status_to_str[self.status], self.flags_to_str[self.flags])\n if len(self.server_msg) > 0:\n ret += \" MSG(%s)\" % self.server_msg\n if len(self.data) > 0:\n ret += \" DATA(%s)\" % self.data\n return ret\n except:\n return \"Invalid TACACS+ Body\"\n \n def render(self):\n return struct.pack(\"!BBHH\", self.status, self.flags, len(self.server_msg), len(self.data))\n \n def parse(self, data):\n (self.status, self.flags, self.server_msg_len, self.data_len) = struct.unpack(\"!BBHH\", data[:6])\n self.server_msg = data[6:6+self.server_msg_len]\n self.data = data[6+self.server_msg_len:6+self.server_msg_len+self.data_len]\n return data[6+self.server_msg_len+self.data_len:]\n\nclass tacacs_plus_authentication_continue(object):\n #~ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8\n #~ +----------------+----------------+----------------+----------------+\n #~ | user_msg len | data len |\n #~ +----------------+----------------+----------------+----------------+\n #~ | flags | user_msg ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | data ...\n #~ +----------------+\n \n FLAG_ABORT = 0x01\n \n flags_to_str = { 0x00 : \"FLAG_NONE\",\n 0x01 : \"FLAG_ABORT\"\n }\n \n def __init__(self, flags=None, user_msg=None, data=None):\n self.flags = flags\n self.user_msg = user_msg\n self.data = data\n \n def __repr__(self):\n try:\n ret = \"TACACS+ Authentication Continue: %s\" % self.flags_to_str[self.flags]\n if len(self.user_msg) > 0:\n ret += \" MSG(%s)\" % self.user_msg\n if len(self.data) > 0:\n ret += \" DATA(%s)\" % self.data\n return ret\n except:\n return \"Invalid TACACS+ Body\"\n \n def render(self):\n return struct.pack(\"!HHB\", len(self.user_msg), len(self.data), self.flags) + \\\n self.user_msg + self.data\n \n def parse(self, data):\n (self.user_msg_len, self.data_len, self.flags) = struct.unpack(\"!HHB\", data[:5])\n self.user_msg = data[5:5+self.user_msg_len]\n self.data = data[5+self.user_msg_len:5+self.user_msg_len+self.data_len]\n return data[5+self.user_msg_len+self.data_len:]\n\nclass tacacs_plus_authorization_request(object):\n #~ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8\n #~ +----------------+----------------+----------------+----------------+\n #~ | authen_method | priv_lvl | authen_type | authen_service |\n #~ +----------------+----------------+----------------+----------------+\n #~ | user len | port len | rem_addr len | arg_cnt |\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg 1 len | arg 2 len | ... | arg N len |\n #~ +----------------+----------------+----------------+----------------+\n #~ | user ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | port ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | rem_addr ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg 1 ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg 2 ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg N ...\n #~ +----------------+----------------+----------------+----------------+\n \n METH_NOT_SET = 0x00\n METH_NONE = 0x01\n METH_KRB5 = 0x02\n METH_LINE = 0x03\n METH_ENABLE = 0x04\n METH_LOCAL = 0x05\n METH_TACACSPLUS = 0x06\n METH_GUEST = 0x08\n METH_RADIUS = 0x10\n METH_KRB4 = 0x11\n METH_RCMD = 0x20\n \n meth_to_str = { 0x00 : \"METH_NOT_SET\",\n 0x01 : \"METH_NONE\",\n 0x02 : \"METH_KRB5\",\n 0x03 : \"METH_LINE\",\n 0x04 : \"METH_ENABLE\",\n 0x05 : \"METH_LOCAL\",\n 0x06 : \"METH_TACACSPLUS\",\n 0x08 : \"METH_GUEST\",\n 0x10 : \"METH_RADIUS\",\n 0x11 : \"METH_KRB4\",\n 0x20 : \"METH_RCMD\"\n }\n \n PRIV_LVL_MAX = 0x0f\n PRIV_LVL_ROOT = 0x0f\n PRIV_LVL_USER = 0x01\n PRIV_LVL_MIN = 0x00\n \n priv_to_str = { 0x0f : \"PRIV_LVL_ROOT\",\n 0x01 : \"PRIV_LVL_USER\",\n 0x00 : \"PRIV_LVL_MIN\"\n }\n\n TYPE_ASCII = 0x01\n TYPE_PAP = 0x02\n TYPE_CHAP = 0x03\n TYPE_ARAP = 0x04\n TYPE_MSCHAP = 0x05\n\n type_to_str = { 0x01 : \"TYPE_ASCII\",\n 0x02 : \"TYPE_PAP\",\n 0x03 : \"TYPE_CHAP\",\n 0x04 : \"TYPE_ARAP\",\n 0x05 : \"TYPE_MSCHAP\"\n }\n\n SVC_NONE = 0x00\n SVC_LOGIN = 0x01\n SVC_ENABLE = 0x02\n SVC_PPP = 0x03\n SVC_ARAP = 0x04\n SVC_PT = 0x05\n SVC_RCMD = 0x06\n SVC_X25 = 0x07\n SVC_NASI = 0x08\n SVC_FWPROXY = 0x09\n\n service_to_str = { 0x00 : \"SVC_NONE\",\n 0x01 : \"SVC_LOGIN\",\n 0x02 : \"SVC_ENABLE\",\n 0x03 : \"SVC_PPP\",\n 0x04 : \"SVC_ARAP\",\n 0x05 : \"SVC_PT\",\n 0x06 : \"SVC_RCMD\",\n 0x07 : \"SVC_X25\",\n 0x08 : \"SVC_NASI\",\n 0x09 : \"SVC_FWPROXY\"\n }\n \n def __init__(self, authen_method=None, priv_lvl=None, authen_type=None, authen_service=None, user=None, port=None, rem_addr=None, args=[]):\n self.authen_method = authen_method\n self.priv_lvl = priv_lvl\n self.authen_type = authen_type\n self.authen_service = authen_service\n self.user = user\n self.port = port\n self.rem_addr = rem_addr\n self.args = args\n \n def __repr__(self):\n try:\n ret = \"TACACS+ Autorization Request: %s %s %s %s\" % (self.meth_to_str[self.authen_method], self.priv_to_str[self.priv_lvl], \\\n self.type_to_str[self.authen_type], self.service_to_str[self.authen_service])\n if len(self.user) > 0:\n ret += \" USER(%s)\" % self.user\n if len(self.port) > 0:\n ret += \" PORT(%s)\" % self.port\n if len(self.rem_addr) > 0:\n ret += \" ADDR(%s)\" % self.rem_addr\n if len(self.args) > 0:\n ret += \" ARGS( %s )\" % self.args\n return ret\n except:\n return \"Invalid TACACS+ Body\"\n \n def render(self):\n ret = struct.pack(\"!BBBBBBBB\", self.authen_method, self.priv_lvl, self.authen_type, self.authen_service, \\\n len(self.user), len(self.port), len(self.rem_addr), len(self.args))\n for i in self.args:\n ret += struct.pack(\"!B\", len(i))\n ret += self.user + self.port + self.rem_addr\n for i in self.args:\n ret += i\n return ret\n \n def parse(self, data):\n self.args_len = []\n (self.authen_method, self.priv_lvl, self.authen_type, self.authen_service, \\\n self.user_len, self.port_len, self.rem_addr_len, self.arg_cnt) = \\\n struct.unpack(\"!BBBBBBBB\", data[:8])\n data = data[8:]\n for i in xrange(self.arg_cnt):\n arg_len, = struct.unpack(\"!B\", data[:1])\n data = data[1:]\n self.args_len.append(arg_len)\n self.user = data[:self.user_len]\n self.port = data[self.user_len:self.user_len+self.port_len]\n self.rem_addr = data[self.user_len+self.port_len:self.user_len+self.port_len+self.rem_addr_len]\n data = data[self.user_len+self.port_len+self.rem_addr_len:]\n for i in self.args_len:\n self.args.append(data[:i])\n data = data[i:]\n return data\n\nclass tacacs_plus_authorization_response(object):\n #~ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8\n #~ +----------------+----------------+----------------+----------------+\n #~ | status | arg_cnt | server_msg len |\n #~ +----------------+----------------+----------------+----------------+\n #~ + data len | arg 1 len | arg 2 len |\n #~ +----------------+----------------+----------------+----------------+\n #~ | ... | arg N len | server_msg ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | data ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg 1 ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg 2 ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg N ...\n #~ +----------------+----------------+----------------+----------------+\n \n STATUS_PASS_ADD = 0x01\n STATUS_PASS_REPL = 0x02\n STATUS_FAIL = 0x10\n STATUS_ERROR = 0x11\n STATUS_FOLLOW = 0x21\n \n status_to_str = { 0x01 : \"STATUS_PASS_ADD\",\n 0x02 : \"STATUS_PASS_REPL\",\n 0x10 : \"STATUS_FAIL\",\n 0x11 : \"STATUS_ERROR\",\n 0x21 : \"STATUS_FOLLOW\"\n }\n \n def __init__(self, status=None, server_msg=None, data=None, args=[]):\n self.status = status\n self.server_msg = server_msg\n self.data = data\n self.args = args\n \n def __repr__(self):\n try:\n ret = \"TACACS+ Autorization Response: %s\" % (self.status_to_str[self.status])\n if len(self.server_msg) > 0:\n ret += \" MSG(%s)\" % self.server_msg\n if len(self.data) > 0:\n ret += \" DATA(%s)\" % self.data\n if len(self.args) > 0:\n ret += \" ARGS( %s )\" % self.args\n return ret\n except:\n return \"Invalid TACACS+ Body\"\n \n def render(self):\n ret = struct.pack(\"!BBHH\", self.status, len(self.args), len(self.server_msg), len(self.data))\n for i in self.args:\n ret += struct.pack(\"!B\", len(i))\n ret += self.server_msg + self.data\n for i in self.args:\n ret += i\n return ret\n \n def parse(self, data):\n self.args_len = []\n (self.status, self.arg_cnt, self.server_msg_len, self.data_len) = struct.unpack(\"!BBHH\", data[:6])\n data = data[6:]\n for i in xrange(self.arg_cnt):\n arg_len, = struct.unpack(\"!B\", data[:1])\n self.args_len.append(arg_len)\n data = data[1:]\n self.server_msg = data[:self.server_msg_len]\n self.data = data[self.server_msg_len:self.server_msg_len+self.data_len]\n data = data[self.server_msg_len+self.data_len:]\n for i in self.args_len:\n self.args.append(data[:i])\n data = data[i:]\n return data\n\nclass tacacs_plus_account_request(object):\n #~ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8\n #~ +----------------+----------------+----------------+----------------+\n #~ | flags | authen_method | priv_lvl | authen_type |\n #~ +----------------+----------------+----------------+----------------+\n #~ | authen_service | user len | port len | rem_addr len |\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg_cnt | arg 1 len | arg 2 len | ... |\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg N len | user ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | port ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | rem_addr ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg 1 ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg 2 ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | arg N ...\n #~ +----------------+----------------+----------------+----------------+\n \n FLAG_MORE = 0x01\n FLAG_START = 0x02\n FLAG_STOP = 0x04\n FLAG_WATCHDOG = 0x08\n \n def flags_to_str(self, flags):\n ret = []\n if flags & self.FLAG_MORE:\n ret.append(\"FLAG_MORE\")\n if flags & self.FLAG_START:\n ret.append(\"FLAG_START\")\n if flags & self.FLAG_STOP:\n ret.append(\"FLAG_STOP\")\n if flags & self.FLAG_WATCHDOG:\n ret.append(\"FLAG_WATCHDOG\")\n return \"|\".join(ret)\n \n METH_NOT_SET = 0x00\n METH_NONE = 0x01\n METH_KRB5 = 0x02\n METH_LINE = 0x03\n METH_ENABLE = 0x04\n METH_LOCAL = 0x05\n METH_TACACSPLUS = 0x06\n METH_GUEST = 0x08\n METH_RADIUS = 0x10\n METH_KRB4 = 0x11\n METH_RCMD = 0x20\n \n meth_to_str = { 0x00 : \"METH_NOT_SET\",\n 0x01 : \"METH_NONE\",\n 0x02 : \"METH_KRB5\",\n 0x03 : \"METH_LINE\",\n 0x04 : \"METH_ENABLE\",\n 0x05 : \"METH_LOCAL\",\n 0x06 : \"METH_TACACSPLUS\",\n 0x08 : \"METH_GUEST\",\n 0x10 : \"METH_RADIUS\",\n 0x11 : \"METH_KRB4\",\n 0x20 : \"METH_RCMD\"\n }\n \n PRIV_LVL_MAX = 0x0f\n PRIV_LVL_ROOT = 0x0f\n PRIV_LVL_USER = 0x01\n PRIV_LVL_MIN = 0x00\n \n priv_to_str = { 0x0f : \"PRIV_LVL_ROOT\",\n 0x01 : \"PRIV_LVL_USER\",\n 0x00 : \"PRIV_LVL_MIN\"\n }\n\n TYPE_ASCII = 0x01\n TYPE_PAP = 0x02\n TYPE_CHAP = 0x03\n TYPE_ARAP = 0x04\n TYPE_MSCHAP = 0x05\n\n type_to_str = { 0x01 : \"TYPE_ASCII\",\n 0x02 : \"TYPE_PAP\",\n 0x03 : \"TYPE_CHAP\",\n 0x04 : \"TYPE_ARAP\",\n 0x05 : \"TYPE_MSCHAP\"\n }\n\n SVC_NONE = 0x00\n SVC_LOGIN = 0x01\n SVC_ENABLE = 0x02\n SVC_PPP = 0x03\n SVC_ARAP = 0x04\n SVC_PT = 0x05\n SVC_RCMD = 0x06\n SVC_X25 = 0x07\n SVC_NASI = 0x08\n SVC_FWPROXY = 0x09\n\n service_to_str = { 0x00 : \"SVC_NONE\",\n 0x01 : \"SVC_LOGIN\",\n 0x02 : \"SVC_ENABLE\",\n 0x03 : \"SVC_PPP\",\n 0x04 : \"SVC_ARAP\",\n 0x05 : \"SVC_PT\",\n 0x06 : \"SVC_RCMD\",\n 0x07 : \"SVC_X25\",\n 0x08 : \"SVC_NASI\",\n 0x09 : \"SVC_FWPROXY\"\n }\n \n def __init__(self, flags=None, authen_method=None, priv_lvl=None, authen_type=None, authen_service=None, user=None, port=None, rem_addr=None, args=[]):\n self.flags = flags\n self.authen_method = authen_method\n self.priv_lvl = priv_lvl\n self.authen_type = authen_type\n self.authen_service = authen_service\n self.user = user\n self.port = port\n self.rem_addr = rem_addr\n self.args = args\n \n def __repr__(self):\n try:\n ret = \"TACACS+ Account Request: %s %s %s %s %s\" % (self.flags_to_str(self.flags), self.meth_to_str[self.authen_method], \\\n self.priv_to_str[self.priv_lvl], self.type_to_str[self.authen_type], self.service_to_str[self.authen_service])\n if len(self.user) > 0:\n ret += \" USER(%s)\" % self.user\n if len(self.port) > 0:\n ret += \" PORT(%s)\" % self.port\n if len(self.rem_addr) > 0:\n ret += \" ADDR(%s)\" % self.rem_addr\n if len(self.args) > 0:\n ret += \" ARGS( %s )\" % self.args\n return ret\n except:\n return \"Invalid TACACS+ Body\"\n \n def render(self):\n ret = struct.pack(\"!BBBBBBBBB\", self.flags, self.authen_method, self.priv_lvl, self.authen_type, \\\n self.authen_service, len(self.user), len(self.port), len(self.rem_addr), len(self.args))\n for i in self.args:\n ret += struct.pack(\"!B\", len(i))\n ret += self.user + self.port + self.rem_addr\n for i in self.args:\n ret += i\n return ret\n \n def parse(self, data):\n self.args_len = []\n (self.flags, self.authen_method, self.priv_lvl, self.authen_type, self.authen_service, \\\n self.user_len, self.port_len, self.rem_addr_len, self.arg_cnt) = \\\n struct.unpack(\"!BBBBBBBBB\", data[:9])\n data = data[9:]\n for i in xrange(self.arg_cnt):\n arg_len, = struct.unpack(\"!B\", data[:1])\n data = data[1:]\n self.args_len.append(arg_len)\n self.user = data[:self.user_len]\n self.port = data[self.user_len:self.user_len+self.port_len]\n self.rem_addr = data[self.user_len+self.port_len:self.user_len+self.port_len+self.rem_addr_len]\n data = data[self.user_len+self.port_len+self.rem_addr_len:]\n for i in self.args_len:\n self.args.append(data[:i])\n data = data[i:]\n return data\n \nclass tacacs_plus_account_response(object):\n #~ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8\n #~ +----------------+----------------+----------------+----------------+\n #~ | server_msg len | data len |\n #~ +----------------+----------------+----------------+----------------+\n #~ | status | server_msg ...\n #~ +----------------+----------------+----------------+----------------+\n #~ | data ...\n #~ +----------------+\n\n STATUS_SUCCESS = 0x01\n STATUS_ERROR = 0x02\n STATUS_FOLLOW = 0x21\n \n status_to_str = { 0x01 : \"STATUS_SUCCESS\",\n 0x02 : \"STATUS_ERROR\",\n 0x21 : \"STATUS_FOLLOW\"\n }\n \n def __init__(self, status=None, server_msg=None, data=None):\n self.status = status\n self.server_msg = server_msg\n self.data = data\n \n def __repr__(self):\n try:\n ret = \"TACACS+ Account Reponse: %s\" % self.status_to_str[self.status]\n if len(self.server_msg) > 0:\n ret += \" MSG(%s)\" % self.server_msg\n if len(self.data) > 0:\n ret += \" DATA(%s)\" % self.data\n return ret\n except:\n return \"Invalid TACACS+ Body\"\n \n \n def render(self):\n return struct.pack(\"!HHB\", len(self.server_msg), len(self.data), self.status)\n \n def parse(self, data):\n (self.server_msg_len, self.data_len, self.status) = struct.unpack(\"!HHB\", data[:5])\n self.server_msg = data[5:5+self.server_msg_len]\n self.data = data[5+self.server_msg_len:5+self.server_msg_len+self.data_len]\n return data[5+self.server_msg_len+self.data_len:]\n \nclass tacacs_plus_bf(threading.Thread):\n def __init__(self, parent, ident, header, body):\n self.parent = parent\n self._ident = ident\n self.header = header\n self.body = body\n self.obj = None\n threading.Thread.__init__(self)\n\n def run(self):\n predata = struct.pack(\"!I\", self.header.session_id)\n postdata = struct.pack(\"!BB\", TACACS_PLUS_VERSION_MAJOR << 4 + self.header.version_minor, self.header.seq_no)\n\n if self.parent.platform == \"Windows\":\n import bf\n else:\n from loki_bindings import bf\n l = self.parent.parent\n self.obj = bf.tacacs_bf()\n self.obj.num_threads = l.bruteforce_threads\n if not l.bruteforce:\n self.obj.mode = bf.MODE_WORDLIST\n self.obj.wordlist = l.wordlist\n else:\n if not l.bruteforce_full:\n self.obj.mode = bf.MODE_ALPHANUM\n else:\n self.obj.mode = bf.MODE_FULL\n self.obj.pre_data = predata\n self.obj.hash_data = postdata\n self.obj.ciphertext = self.body\n \n self.obj.start()\n while self.obj.running:\n time.sleep(0.01)\n if not self.obj.pw is None:\n if self.parent.ui == 'urw':\n self.parent.set_secret(self._ident, self.obj.pw)\n elif self.parent.ui == 'gtk':\n with gtk.gdk.lock:\n self.parent.set_secret(self._ident, self.obj.pw)\n\n self.parent.peers[self._ident]['crack'] = False\n if self.parent.ui == 'urw':\n self.parent.peerlist[self.parent.peers[self._ident]['iter']].contents[1][0].set_attr_map({None : \"button normal\"})\n elif self.parent.ui == 'gtk':\n with gtk.gdk.lock:\n self.parent.treestore[self.parent.peers[self._ident]['iter']][self.parent.STORE_CRACK_ROW] = False\n self.obj = None\n \n def quit(self):\n if not self.obj is None:\n self.obj.stop()\n self.obj = None\n \nclass mod_class(object):\n STORE_CON_ROW = 0\n STORE_CRYPT_ROW = 1\n STORE_CRACK_ROW = 2\n STORE_SECRET_ROW = 3\n \n def __init__(self, parent, platform, ui):\n self.parent = parent\n self.platform = platform\n self.ui = ui\n if self.ui == 'gtk':\n import gobject as gobject_\n import gtk as gtk_\n #import gtk.glade as glade_\n global gobject\n global gtk\n gobject = gobject_\n gtk = gtk_\n #gtk.glade = glade_\n else:\n import urwid as urwid_\n global urwid\n urwid = urwid_\n \n class _PopUpDialog(urwid.WidgetWrap):\n signals = ['close']\n def __init__(self, parent, ident):\n close_button = urwid.AttrMap(urwid.Button(\"OK\"), 'button normal', focus_map='reversed')\n urwid.connect_signal(close_button.base_widget, 'click',\n lambda button:self.base_widget._emit(\"close\"))\n pile = urwid.Pile([urwid.Edit(\"\", \"\\n\".join(parent.peers[ident]['log']), multiline=True), close_button])\n fill = urwid.Filler(pile)\n urwid.WidgetWrap.__init__(self, urwid.AttrWrap(fill, 'popbg'))\n self.PopUpDialog = _PopUpDialog\n\n class _PopUpButton(urwid.PopUpLauncher):\n def __init__(self, msg, parent, ident):\n urwid.PopUpLauncher.__init__(self, urwid.AttrMap(urwid.Button(msg), 'button normal', focus_map='reversed'))\n self.parent = parent\n self.ident = ident\n urwid.connect_signal(self.base_widget, 'click',\n lambda button: self.open_pop_up())\n\n def create_pop_up(self):\n pop_up = self.parent.PopUpDialog(self.parent, self.ident)\n urwid.connect_signal(pop_up, 'close',\n lambda button: self.close_pop_up())\n return pop_up\n\n def get_pop_up_parameters(self):\n return {'left':1, 'top':1, 'overlay_width':76, 'overlay_height':17}\n self.PopUpButton = _PopUpButton\n\n class _PwPopUpDialog(urwid.WidgetWrap):\n signals = ['close']\n def __init__(self, parent, ident):\n self.parent = parent\n self.ident = ident\n close_button = urwid.AttrMap(urwid.Button(\"OK\"), 'button normal', focus_map='reversed')\n urwid.connect_signal(close_button.base_widget, 'click', self.close)\n self.edit = urwid.Edit(\"TACACS+ Secret: \")\n pile = urwid.Pile([self.edit, close_button])\n fill = urwid.Filler(pile)\n urwid.WidgetWrap.__init__(self, urwid.AttrWrap(fill, 'popbg'))\n \n def close(self, button):\n self.parent.set_secret(self.ident, self.edit.get_edit_text().encode(\"ascii\"))\n self.base_widget._emit(\"close\")\n self.PwPopUpDialog = _PwPopUpDialog\n\n class _PwPopUpButton(urwid.PopUpLauncher):\n def __init__(self, parent, ident):\n urwid.PopUpLauncher.__init__(self, urwid.AttrMap(urwid.Button(\"Set Secret\"), 'button normal', focus_map='reversed'))\n self.parent = parent\n self.ident = ident\n urwid.connect_signal(self.base_widget, 'click',\n lambda button: self.open_pop_up())\n\n def create_pop_up(self):\n pop_up = self.parent.PwPopUpDialog(self.parent, self.ident)\n urwid.connect_signal(pop_up, 'close',\n lambda button: self.close_pop_up())\n return pop_up\n\n def get_pop_up_parameters(self):\n return {'left':1, 'top':1, 'overlay_width':30, 'overlay_height':4}\n self.PwPopUpButton = _PwPopUpButton\n self.name = \"tacacs+\"\n self.group = \"AAA\"\n #self.gladefile = \"/modules/module_tacacs_plus.glade\"\n if ui == 'gtk':\n self.treestore = gtk.TreeStore(str, bool, bool, str)\n self.thread = None\n\n def start_mod(self):\n self.peers = {}\n\n def shut_mod(self):\n if self.ui == 'gtk':\n pass\n elif self.ui == 'urw':\n for i in self.peerlist:\n self.peerlist.remove(i)\n if self.thread:\n if self.thread.is_alive():\n self.thread.quit()\n\n def get_root(self):\n treeview = gtk.TreeView(self.treestore)\n treeview.set_headers_visible(True)\n\n column = gtk.TreeViewColumn()\n column.set_title(\"CONNECTION\")\n render_text = gtk.CellRendererText()\n column.pack_start(render_text, expand=True)\n column.add_attribute(render_text, 'text', self.STORE_CON_ROW)\n treeview.append_column(column)\n \n column = gtk.TreeViewColumn()\n column.set_title(\"ENCRYPTED\")\n render_toggle = gtk.CellRendererToggle()\n column.pack_start(render_toggle, expand=False)\n column.add_attribute(render_toggle, \"active\", self.STORE_CRYPT_ROW)\n treeview.append_column(column)\n \n column = gtk.TreeViewColumn()\n column.set_title(\"CRACK\")\n render_toggle = gtk.CellRendererToggle()\n render_toggle.set_property('activatable', True)\n render_toggle.set_property('radio', True)\n render_toggle.connect('toggled', self.crack_toogled_callback, self.treestore)\n column.pack_start(render_toggle, expand=False)\n column.add_attribute(render_toggle, 'active', self.STORE_CRACK_ROW)\n treeview.append_column(column)\n \n column = gtk.TreeViewColumn()\n column.set_title(\"SECRET\")\n render_text = gtk.CellRendererText()\n render_text.set_property('editable', True)\n render_text.connect('edited', self.secret_edited_callback, self.treestore)\n column.pack_start(render_text, expand=True)\n column.add_attribute(render_text, 'text', self.STORE_SECRET_ROW)\n treeview.append_column(column)\n \n return treeview\n \n def secret_edited_callback(self, cell, path, new_text, model):\n ident = model[path][self.STORE_CON_ROW]\n model[path][self.STORE_SECRET_ROW] = new_text\n self.set_secret(ident, new_text)\n \n def crack_toogled_callback(self, cell, path, model):\n self.crack_activated(model[path], model[path][self.STORE_CON_ROW])\n \n def get_urw(self):\n peerlist = [ urwid.AttrMap(urwid.Text(\"Hosts seen:\"), 'header'), urwid.Divider() ]\n self.peerlist = urwid.SimpleListWalker(peerlist)\n peerlist = urwid.LineBox(urwid.ListBox(self.peerlist))\n return urwid.Pile([ peerlist ])\n \n def crack_activated(self, button, ident):\n if self.peers[ident]['crack_pkg'] is None:\n self.log(\"TACACS+: No suitable packet for cracking found, yet\")\n return\n if not self.peers[ident]['crack']:\n (header, body) = self.peers[ident]['crack_pkg']\n self.thread = tacacs_plus_bf(self, ident, header, body)\n self.thread.start()\n self.peers[ident]['crack'] = True\n if self.ui == 'gtk':\n button[self.STORE_CRACK_ROW] = True\n elif self.ui == \"urw\":\n self.peerlist[self.peers[ident]['iter']].contents[1][0].set_attr_map({None : \"button select\"})\n else:\n self.thread.quit()\n self.peers[ident]['crack'] = False\n if self.ui == 'gtk':\n button[self.STORE_CRACK_ROW] = False\n elif self.ui == \"urw\":\n self.peerlist[self.peers[ident]['iter']].contents[1][0].set_attr_map({None : \"button normal\"})\n\n def log(self, msg):\n self.__log(msg, self.name)\n\n def set_log(self, log):\n self.__log = log\n \n def set_ip(self, ip, mask):\n self.ip = dnet.ip_aton(ip)\n\n def set_dnet(self, dnet):\n self.dnet = dnet\n self.mac = dnet.eth.get()\n \n def get_tcp_checks(self):\n return (self.check_tcp, self.input_tcp)\n \n def check_tcp(self, tcp):\n if tcp.dport == TACACS_PLUS_PORT or tcp.sport == TACACS_PLUS_PORT:\n if len(tcp.data) > 12:\n return (True, False)\n return (False, False)\n \n def input_tcp(self, eth, ip, tcp, timestamp):\n if not eth.src == self.mac:\n header = tacacs_plus_header()\n data = header.parse(str(tcp.data))\n server = tcp.sport == TACACS_PLUS_PORT\n if server:\n ident = \"%s -> %s\" % (dnet.ip_ntoa(ip.dst), dnet.ip_ntoa(ip.src))\n else:\n ident = \"%s -> %s\" % (dnet.ip_ntoa(ip.src), dnet.ip_ntoa(ip.dst))\n \n if not ident in self.peers:\n encrypt = not (header.flags & tacacs_plus_header.FLAGS_UNENCRYPTED)\n #add to gui\n if self.ui == \"gtk\":\n row_iter = self.treestore.append( None, [ ident, encrypt, False, \"\" ] )\n elif self.ui == \"urw\":\n column = [ ('weight', 3, self.PopUpButton(\"%s - ENCRYPTED(%s)\" % (ident, str(encrypt)), self, ident)) ]\n if not header.flags & tacacs_plus_header.FLAGS_UNENCRYPTED:\n column.append( self.parent.menu_button(\"Crack\", self.crack_activated, ident) )\n column.append( self.PwPopUpButton(self, ident) )\n column = urwid.Columns(column)\n self.peerlist.append(column)\n row_iter = self.peerlist.index(column)\n \n self.peers[ident] = { 'encrypt' : encrypt,\n 'secret' : None,\n 'iter' : row_iter,\n 'packets' : [],\n 'log' : [],\n 'crack' : False,\n 'crack_pkg' : None,\n }\n self.log(\"TACACS+: Got connection %s\" % ident)\n if header.flags & tacacs_plus_header.FLAGS_UNENCRYPTED:\n #cleartext\n self.peers[ident]['log'].append(self.body_to_str(header, data))\n else:\n #crypted\n if not self.peers[ident]['secret'] is None:\n self.peers[ident]['log'].append(self.body_to_str(header, self.decrypt(header, data, self.peers[peer]['secret']), server))\n self.peers[ident]['packets'].append((header, data, server))\n if server and header._type == tacacs_plus_header.TYPE_AUTHEN:\n self.peers[ident]['crack_pkg'] = (header, data)\n \n def set_secret(self, ident, secret):\n self.log(\"TACACS+: Setting secret to '%s'\" % secret)\n self.peers[ident]['secret'] = secret\n self.peers[ident]['log'] = []\n for (header, data, server) in self.peers[ident]['packets']:\n self.peers[ident]['log'].append(self.body_to_str(header, self.decrypt(header, data, secret), server))\n if self.ui == 'gtk':\n self.treestore[self.peers[ident]['iter']][self.STORE_SECRET_ROW] = secret\n child = self.treestore.iter_children(self.peers[ident]['iter'])\n while not child is None:\n self.treestore.remove(child)\n child = self.treestore.iter_children(self.peers[ident]['iter'])\n for i in self.peers[ident]['log']:\n self.treestore.insert(self.peers[ident]['iter'], len(self.peers[ident]['log']), [ i, False, False, \"\" ])\n\n \n def body_to_str(self, header, data, server):\n try:\n if header._type == tacacs_plus_header.TYPE_AUTHEN:\n if not server:\n if header.seq_no == 1:\n body = tacacs_plus_authentication_start()\n else:\n body = tacacs_plus_authentication_continue()\n else:\n body = tacacs_plus_authentication_reply()\n elif header._type == tacacs_plus_header.TYPE_AUTHOR:\n if not server:\n body = tacacs_plus_authorization_request()\n else:\n body = tacacs_plus_authorization_response()\n elif header._type == tacacs_plus_header.TYPE_ACCT:\n if not server:\n body = tacacs_plus_account_request()\n else:\n body = tacacs_plus_account_response()\n else:\n return \"%s : Unknown body type\" % str(header)\n body.parse(data)\n return str(body)\n except:\n self.log(\"TACACS+: Can't decode decrypted packet, likely wrong secret.\")\n return \"Invalid packet\"\n \n def decrypt(self, header, data, secret):\n md5 = hashlib.md5()\n md5.update(struct.pack(\"!I\", header.session_id))\n md5.update(secret)\n md5.update(struct.pack(\"!BB\", TACACS_PLUS_VERSION_MAJOR << 4 + header.version_minor, header.seq_no))\n digest = md5.digest()\n pad = digest\n while len(pad) < len(secret):\n md5 = hashlib.md5()\n md5.update(struct.pack(\"!I\", header.session_id))\n md5.update(secret)\n md5.update(struct.pack(\"!BB\", TACACS_PLUS_VERSION_MAJOR << 4 + header.version_minor, header.seq_no))\n md5.update(digest)\n digest = md5.digest()\n pad += digest\n return ''.join(chr(ord(x) ^ ord(y)) for x, y in zip(data, pad[:len(data)]))\n","sub_path":"modules/module_tacacs_plus.py","file_name":"module_tacacs_plus.py","file_ext":"py","file_size_in_byte":47333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"486688605","text":"from flask.ext.script import Manager\nfrom app import app\nfrom AuthorityReporter.commands import analyze_wiki, ingest_data, analyze_global\n\nmanager = Manager(app)\n\nscripts = {\n 'analyze_wiki': analyze_wiki.AnalyzeWiki(),\n 'ingest_data': ingest_data.IngestData(),\n 'analyze_global': analyze_global.AnalyzeGlobal()\n}\n\n\nif __name__ == \"__main__\":\n manager.run(scripts)","sub_path":"AuthorityReporter/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"75466948","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom bolinha import *\n\ndef main(inic):\n big_bang(inic, frequencia=FREQUENCIA,\n a_cada_tick=mover, #Posicao -> Posicao\n desenhar=desenha #Posicao -> imagem\n )\n\nmain(Posicao)","sub_path":"Python/exercicios4/bola_main.py","file_name":"bola_main.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"356153705","text":"\"\"\"\nYou have a number of envelopes with widths and heights given as a pair of integers (w, h). One envelope can fit into another if and only if both the width and height of one envelope is greater than the width and height of the other envelope.\n\nWhat is the maximum number of envelopes can you Russian doll? (put one inside other)\n\nExample:\nGiven envelopes = [[5,4],[6,4],[6,7],[2,3]], the maximum number of envelopes you can Russian doll is 3 ([2,3] => [5,4] => [6,7]).\n\"\"\"\n\n# First sort the envelopes, then same as longest increasing subsequence with dp\n# T: O(n^2)\nclass Solution(object):\n def maxEnvelopes(self, envelopes):\n \"\"\"\n :type envelopes: List[List[int]]\n :rtype: int\n \"\"\"\n def helper(x, y):\n if x[0] > y[0]:\n return 1\n elif x[0] < y[0]:\n return -1\n elif x[0] == y[0]:\n if x[1] > y[1]:\n return 1\n elif x[1] < y[1]:\n return -1\n else:\n return 0\n if not envelopes:\n return 0\n envelopes.sort(cmp=helper)\n dp = [1 for _ in range(len(envelopes))]\n for i in range(len(envelopes)):\n for j in range(i):\n if envelopes[i][0] > envelopes[j][0] and envelopes[i][1] > envelopes[j][1]:\n dp[i] = max(dp[i], dp[j]+1)\n return max(dp)\n\n# O(nlgn)\nclass Solution(object):\n def maxEnvelopes(self, envelopes):\n \"\"\"\n :type envelopes: List[List[int]]\n :rtype: int\n \"\"\"\n def insert(target):\n left, right = 0, len(result) - 1\n while left <= right:\n mid = left + (right - left) / 2\n if result[mid] >= target:\n right = mid - 1\n else:\n left = mid + 1\n if left == len(result):\n result.append(target)\n else:\n result[left] = target\n\n result = []\n\n envelopes.sort(lambda x, y: y[1] - x[1] if x[0] == y[0] else \\\n x[0] - y[0])\n for envelope in envelopes:\n insert(envelope[1])\n return len(result)","sub_path":"leetcode/354_russian_doll_envelopes.py","file_name":"354_russian_doll_envelopes.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"103699208","text":"from bxutils import logging\nfrom bxutils.logging.log_record_type import LogRecordType\n\nfrom bxcommon.messages.bloxroute.abstract_cleanup_message import AbstractCleanupMessage\nfrom bxcommon.services.transaction_service import TransactionService\n\nlogger = logging.get_logger(LogRecordType.TransactionCleanup)\n\n\ndef contents_cleanup(transaction_service: TransactionService,\n block_confirmation_message: AbstractCleanupMessage\n ):\n message_hash = block_confirmation_message.message_hash()\n for short_id in block_confirmation_message.short_ids():\n transaction_service.remove_transaction_by_short_id(short_id, remove_related_short_ids=True)\n for tx_hash in block_confirmation_message.transaction_hashes():\n transaction_service.remove_transaction_by_tx_hash(tx_hash)\n transaction_service.on_block_cleaned_up(message_hash)\n logger.statistics(\n {\n \"type\": \"MemoryCleanup\",\n \"event\": \"CacheStateAfterBlockCleanup\",\n \"message_hash\": repr(message_hash),\n \"data\": transaction_service.get_cache_state_json()\n }\n )\n","sub_path":"src/bxcommon/services/normal_cleanup_service_helpers.py","file_name":"normal_cleanup_service_helpers.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"128916622","text":"\"\"\"\r\nThe classic FizzBuzz problem\r\nMichael South\r\nmikesouth@gmail.com\r\n\"\"\"\r\n\r\n\r\ndef fizz_buzz(value):\r\n\r\n\tif value % 2 == 0 and value % 3 == 0:\r\n\t\tprint(\"FizzBuzz\")\r\n\telif value % 2 == 0:\r\n\t\tprint(\"Fizz\")\r\n\telif value % 3 == 0:\r\n\t\tprint(\"Buzz\")\r\n\telse:\r\n\t\tprint(value)\r\n\r\nif __name__ == \"__main__\":\r\n\t# run default test for 1-100\r\n\tfor i in range(1, 101):\r\n\t\tfizz_buzz(i)\r\n","sub_path":"fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"197540031","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Dim reduction tools\nimport umap\nimport torch\nimport torch.utils.data\nfrom torch import optim\n\nfrom torch.utils.data import TensorDataset\nfrom sklearn.model_selection import ShuffleSplit\n\nfrom utils import *\n\n\nclass ModelAgent:\n def __init__(self, data):\n self.input = data[0]\n self.label = data[1]\n print(\"ModelAgent initialised\")\n\n def plot_2d(self, data, label):\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n sc = ax.scatter(data[:, 0], data[:, 1], s=2.0, c=np.min(label, 1))\n # colorbar(sc)\n return 1\n\n def plot_3d(self, data, feature):\n return 'Not implemented'\n\n\nclass UMAP(ModelAgent):\n def __init__(self, data):\n super().__init__(data)\n\n def reduce(self, n_neighbors=50, min_dist=0.001):\n embedding_stack_ff = umap.UMAP(n_neighbors=n_neighbors,\n min_dist=min_dist,\n metric='correlation',\n verbose=False,\n # this was in ASAP notebook: random_state=42).fit_transform(np.concatenate([stacked, FF.reshape(-1, 64)], 1))\n random_state=42).fit_transform(self.input)\n\n return embedding_stack_ff\n\n\nclass VAE_model(ModelAgent):\n def __init__(self, data):\n super().__init__(data)\n\n def create_dataloader(self, batch_size=32):\n # split the concatenated input back into two arrays\n X = torch.from_numpy(np.stack(np.split(self.input, 2, axis=1), 1)).float()\n # Create a stacked representation and a zero tensor so we can use the standard Pytorch TensorDataset\n y = torch.from_numpy(np.zeros((X.shape[0], 1))).float()\n\n print('adf', X.shape)\n\n split = ShuffleSplit(n_splits=1, test_size=0.5)\n for train_index, test_index in split.split(X):\n X_train, y_train = X[train_index], y[train_index]\n X_test, y_test = X[test_index], y[test_index]\n\n train_dset = TensorDataset(X_train, y_train)\n test_dset = TensorDataset(X_test, y_test)\n all_dset = TensorDataset(X, y)\n\n kwargs = {'num_workers': 1, 'pin_memory': True}\n self.train_loader = torch.utils.data.DataLoader(train_dset, batch_size=batch_size, shuffle=True, **kwargs)\n self.test_loader = torch.utils.data.DataLoader(test_dset, batch_size=batch_size, shuffle=False, **kwargs)\n self.all_loader = torch.utils.data.DataLoader(all_dset, batch_size=batch_size, shuffle=False, **kwargs)\n\n def train_vae(self, cuda=False, epochs=30):\n set_seed(42) # Set the random seed\n self.model = VAE(hidden_size=8) # Inititalize the model\n\n # use cuda if chosen\n if cuda:\n self.model.cuda()\n\n # Create a gradient descent optimizer\n optimizer = optim.Adam(self.model.parameters(), lr=1e-2, betas=(0.9, 0.999))\n\n # Store and plot losses\n self.losses = []\n\n # Start training loop\n for epoch in range(1, epochs + 1):\n tl = train(epoch, self.model, optimizer, self.train_loader, cuda=False) # Train model on train dataset\n testl = test(epoch, self.model, self.test_loader, cuda=False) # Validate model on test dataset\n self.losses.append([tl, testl])\n\n def run_vae(self):\n _, self.zs = forward_all(self.model, self.all_loader, cuda=False)\n\n def vae_umap(self):\n transformer = umap.UMAP(n_neighbors=5,\n min_dist=0.001,\n metric='correlation', verbose=True).fit(self.zs.numpy())\n embedding = transformer.transform(self.zs.numpy())\n print('shape of zs', self.zs.shape)\n\n # plot umap\n fig, ax = plt.subplots(1, 1, figsize=(12, 12))\n sc = ax.scatter(embedding[::, 0], embedding[::, 1], s=2.0, c=np.min(self.label, 1)[::])\n plt.show()\n\n def reduce(self):\n self.create_dataloader()\n self.train_vae()\n self.run_vae()\n self.vae_umap()\n\n\n","sub_path":"Project-Files/ModelAgent.py","file_name":"ModelAgent.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"472361753","text":"import sys\n\ndef prints(mtext):\n\n\tsys.stdout.write(\"%s\\r\" %(mtext))\n\tsys.stdout.flush()\n\tsys.stdout.write(\"%s\\r\" %(\" \" * len(mtext)))\n\ndef progress_bar(trying, completed, total, bsize = 60):\n\t\"\"\"\n\t\tMULTIPLE LINES PROGRESS BAR IS NOT WORKING FOR WINDOWS AND ANDROID TERM\n\t\tCreate a progress bar to show current process\n\t\tProgessbar format [+++#####-----]\n\t\t\t+ is completed tasks. Tasks should recived responses\n\t\t\t# is submited tasks. Tasks have no responses\n\t\t\t- is waiting tasks\n\t\"\"\"\n\tfinished = int((completed * bsize) / total)\n\trunning = int((trying * bsize) / total - finished)\n\trunning = 1 if running < 1 else running\n\n\tprints(\"|%s%s%s| %10s\" %(\n\t\tfinished * \"+\",\n\t\trunning * \"#\",\n\t\t(bsize - finished - running) * '-',\n\t\tcompleted\n\t))\n","sub_path":"utils/progressbar.py","file_name":"progressbar.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"555005552","text":"from . import views\nfrom django.urls import path\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('course', views.course, name='course'),\n path('vip', views.vip, name='vip'),\n path('test', views.test, name='test'),\n path('test1', views.test1, name='test'),\n path('login', views.LoginView.as_view(), name='login'),\n path('register', views.RegisterView.as_view(), name='register'),\n path('comment/', views.CommentView.as_view(), name='comment'),\n path('logout', views.my_logout, name='logout'),\n path('blog-list', views.blog_list, name='blog-list'),\n path('blog-detail/', views.blog_detail, name='blog-detail'),\n path('tag/', views.tag, name='tag'),\n path('course-detail/', views.course_detail, name='course-detail'),\n path('comment_del//',views.comment_del, name='comment_del'),\n path('comment_update//',views.comment_update, name='comment_update'),\n # path('com_update/',views.CommentUpdate.as_view(), name='com_update'),\n\n]\n","sub_path":"myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"595315912","text":"import argparse, os, os.path, shutil, sys, re, hashlib\nfrom datetime import date\n\n\nclass File:\n regex = re.compile(\"(?P\\\\d{4}-\\\\d{2}-\\\\d{2})_(?P[^.]+?)(-v(?P\\\\d+))?\\\\.(?P.+)\")\n\n def __init__(self, path):\n self.path = path\n self.basename = os.path.basename(self.path)\n\n m = self.regex.fullmatch(self.basename)\n self.is_draft = m is not None\n\n if self.is_draft:\n self.date = m.group(\"date\")\n\n self.prefix = m.group(\"prefix\")\n self.extension = m.group(\"extension\")\n self.name = self.prefix + \".\" + self.extension\n\n if m.group(\"version\") is None:\n self.version = 1\n else:\n self.version = int(m.group(\"version\"))\n else:\n self.name = self.basename\n\n def __repr__(self):\n return f\"File({repr(self.path)})\"\n\n def identical_contents_to(self, other):\n return self.digest_file(self.path) == self.digest_file(other.path)\n\n @staticmethod\n def digest_file(path):\n h = hashlib.md5()\n\n with open(path, \"rb\") as f:\n while True:\n chunk = f.read(h.block_size)\n if not chunk:\n break\n\n h.update(chunk)\n\n return h.hexdigest()\n\n def is_draft_of(self, source):\n return self.is_draft and not source.is_draft and self.name == source.name\n\n def next_draft_basename(self):\n today = date.today().isoformat()\n\n if self.date == today:\n # increment version\n return f\"{today}_{self.prefix}-v{self.version + 1}.{self.extension}\"\n else:\n # increment date only\n return f\"{today}_{self.prefix}.{self.extension}\"\n","sub_path":"drafter/draft_file.py","file_name":"draft_file.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"313176089","text":"import connectors\nimport multiprocessing\nimport time\nfrom config import config\nimport cv2 as cv\nimport math\nimport queue\nimport threading\nimport serial\nimport pyvesc\nimport re\n\n\nclass SmoothieAdapter:\n RESPONSE_OK = \"ok\\r\\n\"\n RESPONSE_ALARM_LOCK = \"error:Alarm lock\\n\"\n RESPONSE_HALT = \"!!\\r\\n\"\n RESPONSE_IGNORED = \"ok - ignored\\n\"\n\n def __init__(self, smoothie_host):\n if type(smoothie_host) is not str:\n raise TypeError(\"invalid smoothie_host type: should be str, received \" + type(smoothie_host).__name__)\n\n if config.SMOOTHIE_BACKEND == 1:\n self.__smc = connectors.SmoothieV11TelnetConnector(smoothie_host)\n elif config.SMOOTHIE_BACKEND == 2:\n self.__smc = connectors.SmoothieV11SerialConnector(smoothie_host, config.SMOOTHIE_BAUDRATE)\n else:\n raise ValueError(\"wrong config.SMOOTHIE_BACKEND value: \" + str(smoothie_host))\n\n self.__sync_locker = multiprocessing.RLock()\n self.__x_cur = multiprocessing.Value(\"d\", 0)\n self.__y_cur = multiprocessing.Value(\"d\", 0)\n self.__z_cur = multiprocessing.Value(\"d\", 0)\n self.__a_cur = multiprocessing.Value(\"d\", 0)\n self.__b_cur = multiprocessing.Value(\"d\", 0)\n self.__c_cur = multiprocessing.Value(\"d\", 0)\n\n res = self.switch_to_relative()\n if res == self.RESPONSE_IGNORED or \"ignored\" in res:\n res = self.switch_to_relative()\n if res != self.RESPONSE_OK:\n # TODO: what if so?\n print(\"Switching smoothie to relative was failed! Smoothie's response:\\n\", res)\n\n # TODO: temporary crutch - vesc is moving Z upward before smoothie loads, so we need to lower the cork a bit down\n res = self.custom_move_for(Z_F=config.Z_F_EXTRACTION_DOWN, Z=5)\n self.wait_for_all_actions_done()\n if res != self.RESPONSE_OK:\n print(\"Couldn't move cork down for Z5! Calibration errors on Z axis are possible!\")\n\n res = self.ext_calibrate_cork()\n if res != self.RESPONSE_OK:\n print(\"Initial cork calibration was failed, smoothie response:\\n\", res) # TODO: what if so??\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.__smc.disconnect()\n\n def disconnect(self):\n self.__smc.disconnect()\n\n @property\n def is_disconnect(self):\n return self._smc.is_open\n\n def get_connector(self):\n \"\"\"Only for debug!\"\"\"\n\n return self.__smc\n\n def wait_for_all_actions_done(self):\n with self.__sync_locker:\n self.__smc.write(\"M400\")\n # \"ok\\r\\n\"\n return self.__smc.read_some()\n\n def halt(self):\n with self.__sync_locker:\n self.__smc.write(\"M112\")\n # \"ok Emergency Stop Requested - reset or M999 required to exit HALT state\\r\\n\"\n return self.__smc.read_some() + self.__smc.read_some() if self.__smc is connectors.SmoothieV11TelnetConnector else self.__smc.read_some()\n\n def reset(self):\n with self.__sync_locker:\n self.__smc.write(\"reset\")\n return self.__smc.read_some()\n\n def freewheels(self):\n with self.__sync_locker:\n self.__smc.write(\"M18\")\n return self.__smc.read_some()\n\n def checkendstop(self, axe):\n with self.__sync_locker:\n self.__smc.write(\"M119\")\n response = self.__smc.read_some()\n matches = re.findall(f\"(?:(?:{axe}_min)|(?:{axe}_max)):(.)\", response)\n if matches:\n return matches[0]\n return 1 # refaire demande\n\n def switch_to_relative(self):\n with self.__sync_locker:\n self.__smc.write(\"G91\")\n # \"ok\\r\\n\"\n return self.__smc.read_some()\n\n def set_current_coordinates(self, X=None, Y=None, Z=None, A=None, B=None, C=None):\n with self.__sync_locker:\n if self.__check_arg_types([type(None)], X, Y, Z, A, B, C):\n raise TypeError(\"at least one axis shouldn't be None\")\n if not self.__check_arg_types([float, int, type(None)], X, Y, Z, A, B, C):\n raise TypeError(\"incorrect axis current value(s) type(s)\")\n\n g_code = \"G92\"\n\n if X is not None:\n g_code += \" X\" + str(self.mm_to_smoothie(X, \"X\"))\n if Y is not None:\n g_code += \" Y\" + str(self.mm_to_smoothie(Y, \"Y\"))\n if Z is not None:\n g_code += \" Z\" + str(self.mm_to_smoothie(Z, \"Z\"))\n if A is not None:\n g_code += \" A\" + str(self.mm_to_smoothie(A, \"A\"))\n if B is not None:\n g_code += \" B\" + str(self.mm_to_smoothie(B, \"B\"))\n if C is not None:\n g_code += \" C\" + str(self.mm_to_smoothie(C, \"C\"))\n\n self.__smc.write(g_code)\n response = self.__smc.read_some()\n\n if response == self.RESPONSE_OK:\n if X is not None:\n self.__x_cur.value = X\n if Y is not None:\n self.__y_cur.value = Y\n if Z is not None:\n self.__z_cur.value = Z\n if A is not None:\n self.__a_cur.value = A\n if B is not None:\n self.__b_cur.value = B\n if C is not None:\n self.__c_cur.value = C\n return response\n\n def get_adapter_current_coordinates(self):\n with self.__sync_locker:\n return {\n \"X\": self.__x_cur.value,\n \"Y\": self.__y_cur.value,\n \"Z\": self.__z_cur.value,\n \"A\": self.__a_cur.value,\n \"B\": self.__b_cur.value\n # \"C\": self._c_cur.value\n }\n\n def get_smoothie_current_coordinates(self, convert_to_mms=True):\n \"\"\"\n\n :param convert_to_mms: \n :return: \n \"\"\"\n \"\"\"\n Answers:\n M114: 'ok C: X:2.0240 Y:0.0000 Z:0.0000\\r\\n'\n M114.1 'ok WCS: X:2.0250 Y:0.0000 Z:0.0000\\r\\n'\n M114.2 'ok MCS: X:2.0250 Y:0.0000 Z:0.0000 A:0.0000 B:0.0000\\r\\n'\n M114.3 'ok APOS: X:2.0250 Y:0.0000 Z:0.0000 A:0.0000 B:0.0000\\r\\n'\n M114.4 'ok MP: X:2.0240 Y:0.0000 Z:0.0000 A:0.0000 B:0.0000\\r\\n'\n \"\"\"\n\n with self.__sync_locker:\n self.__smc.write(\"M114.2\")\n response, coordinates = (self.__smc.read_some() + self.__smc.read_some()\n if type(self.__smc) is connectors.SmoothieV11TelnetConnector\n else self.__smc.read_some())[:-2].split(\" \")[2:], {}\n for coord in response:\n coordinates[coord[0]] = float(coord[2:])\n if convert_to_mms:\n coordinates[coord[0]] = self.smoothie_to_mm(coordinates[coord[0]], coord[0])\n return coordinates\n\n @staticmethod\n def compare_coordinates(coordinates_a, coordinates_b, precision=1e-10):\n if type(coordinates_a) != dict or type(coordinates_b) != dict:\n raise AttributeError(\"coordinates should be stored in dict\")\n if len(coordinates_a) != len(coordinates_b):\n raise AttributeError(\"coordinates dicts should have similar items count\")\n\n for key in coordinates_a:\n if abs(coordinates_a[key] - coordinates_b[key]) > precision:\n return False\n return True\n\n def custom_move_for(self, *,\n X_F=None,\n Y_F=None,\n Z_F=None,\n A_F=None,\n B_F=None,\n C_F=None,\n X=None,\n Y=None,\n Z=None,\n A=None,\n B=None,\n C=None):\n \"\"\"Movement by some value(s)\n\n Minimal force is applied if multiple values are given\n \"\"\"\n\n with self.__sync_locker:\n # check given forces\n if self.__check_arg_types([type(None)], X_F, Y_F, Z_F, A_F, B_F, C_F):\n raise TypeError(\"at least one given force value shouldn't be a None\")\n if not self.__check_arg_types([float, int, type(None)], X_F, Y_F, Z_F, A_F, B_F, C_F):\n raise TypeError(\"incorrect force value(s) type(s)\")\n\n # check given axes\n if self.__check_arg_types([type(None)], X, Y, Z, A, B, C):\n raise TypeError(\"at least one given axis value shouldn't be a None\")\n if not self.__check_arg_types([float, int, type(None)], X, Y, Z, A, B, C):\n raise TypeError(\"incorrect axis value(s) type(s)\")\n\n # apply min of given forces (and pass by Nones)\n min_f_msg = \"(min force value applied)\"\n min_f = min([item for item in [X_F, Y_F, Z_F, A_F, B_F, C_F] if item is not None])\n g_code = \"G0\"\n\n if X is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.X_F_MIN, config.X_F_MAX, \"X_F_MIN\", \"X_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(self.__x_cur.value,\n X,\n \"X\",\n self.smoothie_to_mm(config.X_MIN, \"X\"),\n self.smoothie_to_mm(config.X_MAX, \"X\"),\n \"X_MIN\",\n \"X_MAX\")\n if err_msg:\n return err_msg\n g_code += \" X\" + str(self.mm_to_smoothie(X, \"X\"))\n\n if Y is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.Y_F_MIN, config.Y_F_MAX, \"Y_F_MIN\", \"Y_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(self.__y_cur.value,\n Y,\n \"Y\",\n self.smoothie_to_mm(config.Y_MIN, \"Y\"),\n self.smoothie_to_mm(config.Y_MAX, \"Y\"),\n \"Y_MIN\",\n \"Y_MAX\")\n if err_msg:\n return err_msg\n g_code += \" Y\" + str(self.mm_to_smoothie(Y, \"Y\"))\n\n if Z is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.Z_F_MIN, config.Z_F_MAX, \"Z_F_MIN\", \"Z_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(self.__z_cur.value,\n Z,\n \"Z\",\n self.smoothie_to_mm(config.Z_MIN, \"Z\"),\n self.smoothie_to_mm(config.Z_MAX, \"Z\"),\n \"Z_MIN\",\n \"Z_MAX\")\n if err_msg:\n return err_msg\n g_code += \" Z\" + str(self.mm_to_smoothie(Z, \"Z\"))\n\n if A is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.A_F_MIN, config.A_F_MAX, \"A_F_MIN\", \"A_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(self.__a_cur.value,\n A,\n \"A\",\n self.smoothie_to_mm(config.A_MIN, \"A\"),\n self.smoothie_to_mm(config.A_MAX, \"A\"),\n \"A_MIN\",\n \"A_MAX\")\n if err_msg:\n return err_msg\n g_code += \" A\" + str(self.mm_to_smoothie(A, \"A\"))\n\n if B is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.B_F_MIN, config.B_F_MAX, \"B_F_MIN\", \"B_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(self.__b_cur.value,\n B,\n \"B\",\n self.smoothie_to_mm(config.B_MIN, \"B\"),\n self.smoothie_to_mm(config.B_MAX, \"B\"),\n \"B_MIN\",\n \"B_MAX\")\n if err_msg:\n return err_msg\n g_code += \" B\" + str(self.mm_to_smoothie(B, \"B\"))\n\n if C is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.C_F_MIN, config.C_F_MAX, \"C_F_MIN\", \"C_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(self.__c_cur.value,\n C,\n \"C\",\n self.smoothie_to_mm(config.C_MIN, \"C\"),\n self.smoothie_to_mm(config.C_MAX, \"C\"),\n \"C_MIN\",\n \"C_MAX\")\n if err_msg:\n return err_msg\n g_code += \" C\" + str(self.mm_to_smoothie(C, \"C\"))\n\n g_code += \" F\" + str(min_f)\n\n self.__smc.write(g_code)\n response = self.__smc.read_some()\n\n if response == self.RESPONSE_OK:\n if X is not None:\n self.__x_cur.value += X\n if Y is not None:\n self.__y_cur.value += Y\n if Z is not None:\n self.__z_cur.value += Z\n if A is not None:\n self.__a_cur.value += A\n if B is not None:\n self.__b_cur.value += B\n if C is not None:\n self.__c_cur.value += C\n return response\n\n def custom_move_to(self, *,\n X_F=None,\n Y_F=None,\n Z_F=None,\n A_F=None,\n B_F=None,\n C_F=None,\n X=None,\n Y=None,\n Z=None,\n A=None,\n B=None,\n C=None):\n \"\"\"Movement to the specified position\"\"\"\n\n with self.__sync_locker:\n # check given forces\n if self.__check_arg_types([type(None)], X_F, Y_F, Z_F, A_F, B_F, C_F):\n raise TypeError(\"at least one given force value shouldn't be a None\")\n if not self.__check_arg_types([float, int, type(None)], X_F, Y_F, Z_F, A_F, B_F, C_F):\n raise TypeError(\"incorrect force value(s) type(s)\")\n\n # check given axes\n if self.__check_arg_types([type(None)], X, Y, Z, A, B, C):\n raise TypeError(\"at least one given axis value shouldn't be a None\")\n if not self.__check_arg_types([float, int, type(None)], X, Y, Z, A, B, C):\n raise TypeError(\"incorrect axis value(s) type(s)\")\n\n # apply min of given forces (and pass by Nones)\n min_f_msg = \"(min force value applied)\"\n min_f = min([item for item in [X_F, Y_F, Z_F, A_F, B_F, C_F] if item is not None])\n g_code = \"G0\"\n\n if X is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.X_F_MIN, config.X_F_MAX, \"X_F_MIN\", \"X_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(0,\n X,\n \"X\",\n self.smoothie_to_mm(config.X_MIN, \"X\"),\n self.smoothie_to_mm(config.X_MAX, \"X\"),\n \"X_MIN\",\n \"X_MAX\")\n if err_msg:\n return err_msg\n sm_x_mm = X - self.__x_cur.value\n g_code += \" X\" + str(self.mm_to_smoothie(sm_x_mm, \"X\"))\n\n if Y is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.Y_F_MIN, config.Y_F_MAX, \"Y_F_MIN\", \"Y_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(0,\n Y,\n \"Y\",\n self.smoothie_to_mm(config.Y_MIN, \"Y\"),\n self.smoothie_to_mm(config.Y_MAX, \"Y\"),\n \"Y_MIN\",\n \"Y_MAX\")\n if err_msg:\n return err_msg\n sm_y_mm = Y - self.__y_cur.value\n g_code += \" Y\" + str(self.mm_to_smoothie(sm_y_mm, \"Y\"))\n\n if Z is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.Z_F_MIN, config.Z_F_MAX, \"Z_F_MIN\", \"Z_F_MAX\")\n if err_msg:\n return err_msg\n err_msg = self.__validate_axis(0,\n Z,\n \"Z\",\n self.smoothie_to_mm(config.Z_MIN, \"Z\"),\n self.smoothie_to_mm(config.Z_MAX, \"Z\"),\n \"Z_MIN\",\n \"Z_MAX\")\n if err_msg:\n return err_msg\n sm_z_mm = Z - self.__z_cur.value\n g_code += \" Z\" + str(self.mm_to_smoothie(sm_z_mm, \"Z\"))\n\n if A is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.A_F_MIN, config.A_F_MAX, \"A_F_MIN\", \"A_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(0,\n A,\n \"A\",\n self.smoothie_to_mm(config.A_MIN, \"A\"),\n self.smoothie_to_mm(config.A_MAX, \"A\"),\n \"A_MIN\",\n \"A_MAX\")\n if err_msg:\n return err_msg\n sm_a_mm = A - self.__a_cur.value\n g_code += \" A\" + str(self.mm_to_smoothie(sm_a_mm, \"A\"))\n\n if B is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.B_F_MIN, config.B_F_MAX, \"B_F_MIN\", \"B_F_MAX\")\n if err_msg:\n return err_msg\n # validate axis\n err_msg = self.__validate_axis(0,\n B,\n \"B\",\n self.smoothie_to_mm(config.B_MIN, \"B\"),\n self.smoothie_to_mm(config.B_MAX, \"B\"),\n \"B_MIN\",\n \"B_MAX\")\n if err_msg:\n return err_msg\n sm_b_mm = B - self.__b_cur.value\n g_code += \" B\" + str(self.mm_to_smoothie(sm_b_mm, \"B\"))\n\n if C is not None:\n # validate force\n err_msg = self.__validate_force(min_f, min_f_msg, config.C_F_MIN, config.C_F_MAX, \"C_F_MIN\", \"C_F_MAX\")\n if err_msg:\n return err_msg\n err_msg = self.__validate_axis(0,\n C,\n \"C\",\n self.smoothie_to_mm(config.C_MIN, \"C\"),\n self.smoothie_to_mm(config.C_MAX, \"C\"),\n \"C_MIN\",\n \"C_MAX\")\n if err_msg:\n return err_msg\n sm_c_mm = C - self.__c_cur.value\n g_code += \" C\" + str(self.mm_to_smoothie(sm_c_mm, \"C\"))\n\n g_code += \" F\" + str(min_f)\n\n self.__smc.write(g_code)\n response = self.__smc.read_some()\n\n if response == self.RESPONSE_OK:\n if X is not None:\n self.__x_cur.value += sm_x_mm\n if Y is not None:\n self.__y_cur.value += sm_y_mm\n if Z is not None:\n self.__z_cur.value += sm_z_mm\n if A is not None:\n self.__a_cur.value += sm_a_mm\n if B is not None:\n self.__b_cur.value += sm_b_mm\n if C is not None:\n self.__c_cur.value += sm_c_mm\n return response\n\n def custom_separate_xy_move_for(self, *,\n X_F=None,\n Y_F=None,\n X=None,\n Y=None):\n \"\"\"Temporary wrapper for custom_move_for function, separates X and Y axes movement if X:Y ratio exceeds given\n threshold\n\n Supports only X and Y axes movement.\n \"\"\"\n with self.__sync_locker:\n if config.ALLOW_SEPARATE_XY_MOVEMENT and X is not None and Y is not None and X_F is not None \\\n and Y_F is not None:\n rel_x, rel_y = abs(X), abs(Y)\n if (rel_x != 0 and rel_y != 0 and rel_x / rel_y > config.XY_SEP_MOV_MAX_RATIO_THRESHOLD) or \\\n (rel_x != 0 and rel_y == 0):\n # X movement\n res = self.custom_move_for(X_F=X_F, X=X)\n if res != self.RESPONSE_OK:\n err_msg = \"Couldn't do separate X movement:\\n\" + res\n return err_msg\n # Y movement\n res = self.custom_move_for(Y_F=Y_F, Y=Y)\n if res != self.RESPONSE_OK:\n err_msg = \"Couldn't do separate Y movement:\\n\" + res\n return err_msg\n return res\n return self.custom_move_for(X_F=X_F, Y_F=Y_F, X=X, Y=Y)\n\n def custom_separate_xy_move_to(self, *,\n X_F=None,\n Y_F=None,\n X=None,\n Y=None):\n \"\"\"Temporary wrapper for custom_move_to function, separates X and Y axes movement if X:Y ratio exceeds given\n threshold\n\n Supports only X and Y axes movement.\n \"\"\"\n\n with self.__sync_locker:\n if config.ALLOW_SEPARATE_XY_MOVEMENT and X is not None and Y is not None and X_F is not None \\\n and Y_F is not None:\n rel_x, rel_y = abs(X - self.__x_cur.value), abs(Y - self.__y_cur.value)\n if (rel_x != 0 and rel_y != 0 and rel_x / rel_y > config.XY_SEP_MOV_MAX_RATIO_THRESHOLD) or \\\n (rel_x != 0 and rel_y == 0):\n # X movement\n res = self.custom_move_to(X_F=X_F, X=X)\n if res != self.RESPONSE_OK:\n err_msg = \"Couldn't do separate X movement:\\n\" + res\n return err_msg\n # Y movement\n res = self.custom_move_to(Y_F=Y_F, Y=Y)\n if res != self.RESPONSE_OK:\n err_msg = \"Couldn't do separate Y movement:\\n\" + res\n return err_msg\n return res\n return self.custom_move_to(X_F=X_F, Y_F=Y_F, X=X, Y=Y)\n\n def nav_calibrate_wheels(self):\n \"\"\"Calibrates nav. wheels and sets their current position to adapter and smoothie.\n NOT TESTED YET!\n \"\"\"\n\n with self.__sync_locker:\n res = self.custom_move_for(A_F=config.A_F_MAX, A=config.A_MAX)\n self.wait_for_all_actions_done()\n if res != self.RESPONSE_OK:\n return res\n\n res = self.custom_move_for(A_F=config.A_F_MAX, A=-(abs(config.A_MIN) + abs(config.A_MAX)))\n self.wait_for_all_actions_done()\n if res != self.RESPONSE_OK:\n return res\n\n return self.set_current_coordinates(A=config.A_MIN)\n\n def ext_calibrate_cork(self):\n # Z axis calibration\n if config.USE_Z_AXIS_CALIBRATION:\n res = self.__calibrate_axis(self.__z_cur, \"Z\", config.Z_MIN, config.Z_MAX, config.Z_AXIS_CALIBRATION_TO_MAX)\n if res != self.RESPONSE_OK:\n raise RuntimeError(\"Couldn't pick up corkscrew, smoothie response:\\n\" + res)\n\n # X axis calibration\n if config.USE_X_AXIS_CALIBRATION:\n res = self.__calibrate_axis(self.__x_cur, \"X\", config.X_MIN, config.X_MAX, config.X_AXIS_CALIBRATION_TO_MAX)\n if res != self.RESPONSE_OK:\n raise RuntimeError(\"Couldn't calibrate X axis, smoothie response:\\n\" + res)\n\n # Y axis calibration\n if config.USE_Y_AXIS_CALIBRATION:\n res = self.__calibrate_axis(self.__y_cur, \"Y\", config.Y_MIN, config.Y_MAX, config.Y_AXIS_CALIBRATION_TO_MAX)\n if res != self.RESPONSE_OK:\n raise RuntimeError(\"Couldn't calibrate Y axis, smoothie response:\\n\" + res)\n\n return self.RESPONSE_OK\n\n def ext_cork_up(self):\n # cork up is done by Z axis calibration\n if config.USE_Z_AXIS_CALIBRATION:\n # TODO: stub (G28 isn't reading F value from smoothie config, it uses last received F)\n response = self.custom_move_for(Z_F=config.Z_F_EXTRACTION_UP, Z=-0.1)\n if response != self.RESPONSE_OK:\n return response\n\n return self.__calibrate_axis(self.__z_cur,\n \"Z\",\n config.Z_MIN,\n config.Z_MAX,\n config.Z_AXIS_CALIBRATION_TO_MAX)\n else:\n raise RuntimeError(\n \"picking up corkscrew with stoppers usage requires Z axis calibration permission in config\"\n )\n\n @staticmethod\n def mm_to_smoothie(mm_axis_val, axis_label: str):\n \"\"\"Converts given mms value to smoothie value applying (multiplying) coefficient corresponding to given axis\n label\n\n Example: config coefficient = 0.5, given mms value = 100, returned smoothie value = 50\n \"\"\"\n\n if axis_label not in [\"X\", \"Y\", \"Z\", \"A\", \"B\", \"C\"]:\n raise ValueError(\"unsupported axis label or wrong type\")\n if not SmoothieAdapter.__check_arg_types([int, float], mm_axis_val):\n raise TypeError(\"axis_value should be float or int\")\n\n if mm_axis_val == 0:\n return mm_axis_val\n\n if axis_label == \"X\":\n return mm_axis_val * config.X_COEFFICIENT_TO_MM\n if axis_label == \"Y\":\n return mm_axis_val * config.Y_COEFFICIENT_TO_MM\n if axis_label == \"Z\":\n return mm_axis_val * config.Z_COEFFICIENT_TO_MM\n if axis_label == \"A\":\n return mm_axis_val * config.A_COEFFICIENT_TO_MM\n if axis_label == \"B\":\n return mm_axis_val * config.B_COEFFICIENT_TO_MM\n if axis_label == \"C\":\n return mm_axis_val * config.C_COEFFICIENT_TO_MM\n\n @staticmethod\n def smoothie_to_mm(sm_axis_val, axis_label: str):\n \"\"\"Converts given smoothie value to mms value applying (dividing) coefficient corresponding to given axis\n label\n\n Example: coefficient = 0.5, given smoothie value = 50, returned mms value = 100\n \"\"\"\n\n if axis_label not in [\"X\", \"Y\", \"Z\", \"A\", \"B\", \"C\"]:\n raise ValueError(\"unsupported axis label or wrong type\")\n if not SmoothieAdapter.__check_arg_types([int, float], sm_axis_val):\n raise TypeError(\"axis_value should be float or int\")\n\n if sm_axis_val == 0:\n return sm_axis_val\n\n if axis_label == \"X\":\n if config.X_COEFFICIENT_TO_MM == 0:\n raise ValueError(\"config.X_COEFFICIENT_TO_MM can't be a zero\")\n return sm_axis_val / config.X_COEFFICIENT_TO_MM\n\n if axis_label == \"Y\":\n if config.Y_COEFFICIENT_TO_MM == 0:\n raise ValueError(\"config.Y_COEFFICIENT_TO_MM can't be a zero\")\n return sm_axis_val / config.Y_COEFFICIENT_TO_MM\n\n if axis_label == \"Z\":\n if config.Z_COEFFICIENT_TO_MM == 0:\n raise ValueError(\"config.Z_COEFFICIENT_TO_MM can't be a zero\")\n return sm_axis_val / config.Z_COEFFICIENT_TO_MM\n\n if axis_label == \"A\":\n if config.A_COEFFICIENT_TO_MM == 0:\n raise ValueError(\"config.A_COEFFICIENT_TO_MM can't be a zero\")\n return sm_axis_val / config.A_COEFFICIENT_TO_MM\n\n if axis_label == \"B\":\n if config.B_COEFFICIENT_TO_MM == 0:\n raise ValueError(\"config.B_COEFFICIENT_TO_MM can't be a zero\")\n return sm_axis_val / config.B_COEFFICIENT_TO_MM\n\n if axis_label == \"C\":\n if config.C_COEFFICIENT_TO_MM == 0:\n raise ValueError(\"config.C_COEFFICIENT_TO_MM can't be a zero\")\n return sm_axis_val / config.C_COEFFICIENT_TO_MM\n\n @staticmethod\n def __check_arg_types(types: list, *args):\n \"\"\"Returns True if all given variables (*args) types are in given types list, False otherwise\n \"\"\"\n if len(args) < 1:\n raise TypeError(\"item(s) to check is missed\")\n if type(types) is not list:\n raise TypeError(\"expected list of types, received \" + str(type(types)))\n if len(types) < 1:\n raise ValueError(\"list of types should contain at least one item\")\n\n for arg in args:\n if type(arg) not in types:\n return False\n return True\n\n @staticmethod\n def __validate_axis(cur_axis_val, mov_axis_val, key_label, key_min, key_max, key_min_label, key_max_label):\n \"\"\"Checks if given axis movement can be done. Returns None if value is ok, info/error message otherwise.\n\n Receives smoothie values (may be not in mms).\n \"\"\"\n\n if cur_axis_val + mov_axis_val > key_max:\n return \"Value {0} for {1} goes beyond max acceptable range of {3} = {2}, as current value is {4}\" \\\n .format(mov_axis_val, key_label, key_max, key_max_label, cur_axis_val)\n if cur_axis_val + mov_axis_val < key_min:\n return \"Value {0} for {1} goes beyond min acceptable range of {3} = {2}, as current value is {4}\" \\\n .format(mov_axis_val, key_label, key_min, key_min_label, cur_axis_val)\n return None\n\n @staticmethod\n def __validate_force(value, key_label, key_min, key_max, key_min_label, key_max_label):\n \"\"\"Checks if given force can be applied. Returns None if value is ok, info/error message otherwise.\n \"\"\"\n\n if value > key_max:\n return f\"Value {value} for {key_label} goes beyond max acceptable range of {key_max_label} = {key_max}\"\n if value < key_min:\n return f\"Value {value} for {key_label} goes beyond min acceptable range of {key_min_label} = {key_min}\"\n return None\n\n def __calibrate_axis(self,\n axis_cur: multiprocessing.Value,\n axis_label,\n sm_axis_min,\n sm_axis_max,\n axis_calibration_to_max):\n # TODO: need to implement outer axix_cur var if removing multiprocessing.Value in future\n\n with self.__sync_locker:\n # TODO: stub (G28 isn't reading F value from smoothie config, it uses last received F)\n if axis_label == \"Z\":\n response = self.custom_move_for(Z_F=config.Z_F_EXTRACTION_UP, Z=-0.1)\n if response != self.RESPONSE_OK:\n return response\n\n # do calibration\n if axis_calibration_to_max:\n self.__smc.write(\"G28 {0}{1}\".format(axis_label, config.CALIBRATION_DISTANCE))\n response = self.__smc.read_some()\n if response == self.RESPONSE_OK:\n sm_val, axis_cur.value = sm_axis_max, self.smoothie_to_mm(sm_axis_max, axis_label)\n else:\n return response\n else:\n self.__smc.write(\"G28 {0}{1}\".format(axis_label, -config.CALIBRATION_DISTANCE))\n response = self.__smc.read_some()\n if response == self.RESPONSE_OK:\n sm_val, axis_cur.value = sm_axis_min, self.smoothie_to_mm(sm_axis_min, axis_label)\n else:\n return response\n\n # set fresh current coordinates on smoothie too\n self.__smc.write(\"G92 {0}{1}\".format(axis_label, sm_val))\n return self.__smc.read_some()\n\n\nclass PiCameraAdapter:\n\n def __init__(self):\n from picamera.array import PiRGBArray\n from picamera import PiCamera\n self._camera = PiCamera()\n self._camera.resolution = (config.CAMERA_W, config.CAMERA_H)\n self._camera.framerate = config.CAMERA_FRAMERATE\n self._raw_capture = PiRGBArray(self._camera, size=(config.CAMERA_W, config.CAMERA_H))\n self._gen = self._camera.capture_continuous(self._raw_capture, format=\"rgb\")\n time.sleep(2)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.release()\n\n def release(self):\n self._camera.close()\n\n def get_image(self):\n image = cv.cvtColor(next(self._gen).array, cv.COLOR_RGB2BGR)\n self._raw_capture.truncate(0)\n return image\n\n\n'''\n# test\nclass CameraAdapterIMX219_170_BS1:\n \"\"\"Buffer size is set to 1 frame, getting 2 frames per call, return last\"\"\"\n\n def __init__(self,\n capture_width=config.CAMERA_W,\n capture_height=config.CAMERA_H,\n display_width=config.CAMERA_W,\n display_height=config.CAMERA_H,\n framerate=config.CAMERA_FRAMERATE,\n flip_method=config.CAMERA_FLIP_METHOD):\n\n gst_config = (\n \"nvarguscamerasrc ! \"\n \"video/x-raw(memory:NVMM), \"\n \"width=(int)%d, height=(int)%d, \"\n \"format=(string)NV12, framerate=(fraction)%d/1 ! \"\n \"nvvidconv flip-method=%d ! \"\n \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\n \"videoconvert ! \"\n \"video/x-raw, format=(string)BGR ! appsink\"\n % (\n capture_width,\n capture_height,\n framerate,\n flip_method,\n display_width,\n display_height\n )\n )\n self._cap = cv.VideoCapture(gst_config, cv.CAP_GSTREAMER)\n self._cap = cv.VideoCapture(cv.CAP_PROP_BUFFERSIZE, 1)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.release()\n\n def release(self):\n self._cap.release()\n\n def get_image(self):\n if self._cap.isOpened():\n for i in range(self._cap.get(cv.CAP_PROP_BUFFERSIZE) + 1):\n ret, image = self._cap.read()\n # rotate for 90 degrees and crop black zones\n return cv.rotate(image, 2)[config.CROP_H_FROM:config.CROP_H_TO, config.CROP_W_FROM:config.CROP_W_TO]\n else:\n raise RuntimeError(\"Unable to open camera\")\n'''\n\n\n# old with no shutter, gain and rest camera control\nclass CameraAdapterIMX219_170_Auto:\n\n def __init__(self,\n crop_w_from,\n crop_w_to,\n crop_h_from,\n crop_h_to,\n cv_rotate_code,\n ispdigitalgainrange_from,\n ispdigitalgainrange_to,\n gainrange_from,\n gainrange_to,\n exposuretimerange_from,\n exposuretimerange_to,\n aelock,\n capture_width,\n capture_height,\n display_width,\n display_height,\n framerate,\n nvidia_flip_method):\n\n self._crop_w_from = crop_w_from\n self._crop_w_to = crop_w_to\n self._crop_h_from = crop_h_from\n self._crop_h_to = crop_h_to\n self._cv_rotate_code = cv_rotate_code\n aelock = \"aelock=true \" if aelock else \"\"\n\n gst_config = (\n \"nvarguscamerasrc ! \"\n \"video/x-raw(memory:NVMM), \"\n \"width=(int)%d, height=(int)%d, \"\n \"format=(string)NV12, framerate=(fraction)%d/1 ! \"\n \"nvvidconv flip-method=%d ! \"\n \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\n \"videoconvert ! \"\n \"video/x-raw, format=(string)BGR ! appsink\"\n % (\n capture_width,\n capture_height,\n framerate,\n nvidia_flip_method,\n display_width,\n display_height\n )\n )\n\n if config.APPLY_THREAD_BUFF_CLEANING:\n self._cap = VideoCaptureNoBuffer(gst_config, cv.CAP_GSTREAMER)\n else:\n self._cap = cv.VideoCapture(gst_config, cv.CAP_GSTREAMER)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.release()\n\n def release(self):\n self._cap.release()\n\n def get_image(self):\n if self._cap.isOpened():\n if config.BUFF_CLEANING_DELAY > 0:\n time.sleep(config.BUFF_CLEANING_DELAY)\n\n if config.APPLY_THREAD_BUFF_CLEANING:\n image = self._cap.read()\n else:\n ret, image = self._cap.read()\n\n if config.CV_APPLY_ROTATION:\n image = cv.rotate(image, self._cv_rotate_code)\n\n # crop black zones\n if config.APPLY_IMAGE_CROPPING:\n image = image[self._crop_h_from:self._crop_h_to, self._crop_w_from:self._crop_w_to]\n return image\n else:\n raise RuntimeError(\"Unable to open camera\")\n\n\nclass CameraAdapterIMX219_170:\n\n def __init__(self,\n crop_w_from,\n crop_w_to,\n crop_h_from,\n crop_h_to,\n cv_rotate_code,\n ispdigitalgainrange_from,\n ispdigitalgainrange_to,\n gainrange_from,\n gainrange_to,\n exposuretimerange_from,\n exposuretimerange_to,\n aelock,\n capture_width,\n capture_height,\n display_width,\n display_height,\n framerate,\n nvidia_flip_method):\n\n self._crop_w_from = crop_w_from\n self._crop_w_to = crop_w_to\n self._crop_h_from = crop_h_from\n self._crop_h_to = crop_h_to\n self._cv_rotate_code = cv_rotate_code\n aelock = \"aelock=true \" if aelock else \"\"\n # ispdigitalgainrange=\"14.72 14.72\" gainrange=\"14.72 14.72\" exposuretimerange=\"55000 55000\" aelock=true\n gst_config = (\n \"nvarguscamerasrc \"\n \"ispdigitalgainrange=\\\"%.2f %.2f\\\" \"\n \"gainrange=\\\"%.2f %.2f\\\" \"\n \"exposuretimerange=\\\"%d %d\\\" \"\n \"%s\"\n \"! \"\n \"video/x-raw(memory:NVMM), \"\n \"width=(int)%d, height=(int)%d, \"\n \"format=(string)NV12, framerate=(fraction)%d/1 ! \"\n \"nvvidconv flip-method=%d ! \"\n \"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! \"\n \"videoconvert ! \"\n \"video/x-raw, format=(string)BGR ! appsink\"\n % (\n ispdigitalgainrange_from,\n ispdigitalgainrange_to,\n gainrange_from,\n gainrange_to,\n exposuretimerange_from,\n exposuretimerange_to,\n aelock,\n capture_width,\n capture_height,\n framerate,\n nvidia_flip_method,\n display_width,\n display_height\n )\n )\n\n if config.APPLY_THREAD_BUFF_CLEANING:\n self._cap = VideoCaptureNoBuffer(gst_config, cv.CAP_GSTREAMER)\n else:\n self._cap = cv.VideoCapture(gst_config, cv.CAP_GSTREAMER)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.release()\n\n def release(self):\n self._cap.release()\n\n def get_image(self):\n if self._cap.isOpened():\n if config.BUFF_CLEANING_DELAY > 0:\n time.sleep(config.BUFF_CLEANING_DELAY)\n\n if config.APPLY_THREAD_BUFF_CLEANING:\n image = self._cap.read()\n else:\n ret, image = self._cap.read()\n\n if config.CV_APPLY_ROTATION:\n image = cv.rotate(image, self._cv_rotate_code)\n\n # crop black zones\n if config.APPLY_IMAGE_CROPPING:\n image = image[self._crop_h_from:self._crop_h_to, self._crop_w_from:self._crop_w_to]\n # image = cv.imread('test.jpg') #fake image for debug\n return image\n else:\n raise RuntimeError(\"Unable to open camera\")\n\n\nclass VideoCaptureNoBuffer:\n \"\"\"Minimalistic layer for cv2's VideoCapture with buffer cleaning thread ()\"\"\"\n\n def __init__(self, *args):\n self._cap = cv.VideoCapture(*args)\n self._queue = queue.Queue()\n self._thread = threading.Thread(target=self._reader)\n self._thread.daemon = True\n self._thread.start()\n\n def release(self):\n self._cap.release()\n\n def isOpened(self):\n return self._cap.isOpened()\n\n # read frames as soon as they are available, keeping only most recent one\n def _reader(self):\n while True:\n ret, frame = self._cap.read()\n if not ret:\n break\n if not self._queue.empty():\n try:\n self._queue.get_nowait() # discard previous (unprocessed) frame\n except queue.Empty:\n pass\n self._queue.put(frame)\n\n def read(self):\n return self._queue.get()\n\n\nclass CompassOldAdapter:\n \"\"\"Provides to the robot's on-board compass (some old card, the first one, not sure about model)\"\"\"\n\n def __init__(self):\n import smbus\n self._register_a = 0 # Address of Configuration register A\n self._register_b = 0x01 # Address of configuration register B\n self._register_mode = 0x02 # Address of mode register\n self._x_axis_h = 0x03 # Address of X-axis MSB data register\n self._z_axis_h = 0x05 # Address of Z-axis MSB data register\n self._y_axis_h = 0x07 # Address of Y-axis MSB data register\n self._bus = smbus.SMBus(1) # or bus = smbus.SMBus(0) for older version boards\n\n # write to Configuration Register A\n self._bus.write_byte_data(config.COMPASS_DEVICE_ADDRESS, self._register_a, 0x70)\n # Write to Configuration Register B for gain\n self._bus.write_byte_data(config.COMPASS_DEVICE_ADDRESS, self._register_b, 0xa0)\n # Write to mode Register for selecting mode\n self._bus.write_byte_data(config.COMPASS_DEVICE_ADDRESS, self._register_mode, 0)\n\n def _read_raw_data(self, address):\n \"\"\"Reads raw data from compass\"\"\"\n\n # Read raw 16-bit value\n high = self._bus.read_byte_data(config.COMPASS_DEVICE_ADDRESS, address)\n low = self._bus.read_byte_data(config.COMPASS_DEVICE_ADDRESS, address + 1)\n\n # concatenate higher and lower value\n value = ((high << 8) | low)\n\n # get signed value from module\n return value - 65536 if value > 32768 else value\n\n def get_heading_angle(self):\n \"\"\"Returns current heading angle in degrees\"\"\"\n\n x = self._read_raw_data(self._x_axis_h)\n y = self._read_raw_data(self._y_axis_h)\n heading = math.atan2(y, x) + config.COMPASS_DECLINATION\n\n # Due to declination check for > 360 degree\n if heading > 2 * math.pi:\n heading -= 2 * math.pi\n # check for sign\n if heading < 0:\n heading += 2 * math.pi\n\n # convert into angle\n return int(heading * 180 / math.pi)\n\n\nclass CompassBNO055Adapter:\n \"\"\"Provides access to the robot's on-board compass\"\"\"\n\n def __init__(self):\n import adafruit_bno055\n from busio import I2C\n import board\n\n self._i2c = I2C(board.SCL, board.SDA)\n self._sensor = adafruit_bno055.BNO055(self._i2c)\n # turn on \"compass mode\"\n self._sensor.mode = adafruit_bno055.COMPASS_MODE\n # sensor.mode = adafruit_bno055.M4G_MODE\n\n def get_euler_angle(self):\n return self._sensor.euler\n\n\nclass VescAdapter:\n \"\"\"Provides navigation engines (forward/backward) control using vesc\"\"\"\n\n def __init__(self, rpm, moving_time, alive_freq, check_freq, ser_port, ser_baudrate):\n self.start_cycle_time = time.time()\n\n self._ser = serial.Serial(port=ser_port, baudrate=ser_baudrate)\n\n self._rpm = rpm\n self._moving_time = moving_time\n self._alive_freq = alive_freq\n self._check_freq = check_freq\n self._start_time = self._next_alive_time = None\n self._allow_movement = False\n self._keep_thread_alive = True\n self._last_stop_time = 0\n\n self._ser.flushInput()\n self._ser.flushOutput()\n self._movement_ctrl_th = threading.Thread(target=self._movement_ctrl_th_tf, daemon=True)\n self._movement_ctrl_th.start()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.disconnect()\n\n def disconnect(self):\n self._ser.write(pyvesc.encode(pyvesc.SetRPM(0)))\n self._keep_thread_alive = False\n self._ser.close()\n\n def _movement_ctrl_th_tf(self):\n \"\"\"Target function of movement control thread. Responsive for navigation engines work (forward/backward)\"\"\"\n\n try:\n while self._keep_thread_alive:\n if self._allow_movement:\n if time.time() - self._start_time > self._moving_time:\n self._ser.write(pyvesc.encode(pyvesc.SetRPM(0)))\n self._last_stop_time = time.time()\n self._allow_movement = False\n continue\n\n if time.time() > self._next_alive_time:\n self._next_alive_time = time.time() + self._alive_freq\n self._ser.write(pyvesc.encode(pyvesc.SetAlive))\n time.sleep(self._check_freq)\n except serial.SerialException as ex:\n print(ex)\n\n def start_moving(self):\n self._start_time = self._next_alive_time = time.time()\n self._ser.write(pyvesc.encode(pyvesc.SetRPM(self._rpm)))\n self._allow_movement = True\n\n def stop_moving(self):\n self._allow_movement = False\n self._last_stop_time = time.time()\n self._ser.write(pyvesc.encode(pyvesc.SetRPM(0)))\n\n def stop_current(self):\n self._ser.write(pyvesc.encode(pyvesc.SetCurrent(0)))\n\n def wait_for_stop(self):\n while self._allow_movement:\n time.sleep(self._check_freq)\n\n def apply_rpm(self, rpm):\n if self._rpm != rpm: # TODO: bug to fix: if rpm was set by set_rpm - it won't be applied on vesc\n self._rpm = rpm\n self._ser.write(pyvesc.encode(pyvesc.SetRPM(self._rpm)))\n\n def set_rpm(self, rpm):\n self._rpm = rpm\n\n def set_moving_time(self, moving_time):\n self._moving_time = moving_time\n\n def set_alive_freq(self, alive_freq):\n self._alive_freq = alive_freq\n\n def set_check_freq(self, check_freq):\n self._check_freq = check_freq\n\n def is_movement_allowed(self):\n return self._allow_movement\n\n def get_last_stop_time(self):\n return self._last_stop_time\n\n def get_last_start_time(self):\n return self._start_time\n\n def get_last_moving_time(self):\n \"\"\"Returns last moving time if VESCs are not working at the moment;\n returns current working time if VESCs are working at the moment.\n \"\"\"\n if self._start_time is None:\n return 0\n elif self._allow_movement or not self._last_stop_time:\n return time.time() - self._start_time\n else:\n return self._last_stop_time - self._start_time\n\n def get_sensors_data(self, report_field_names):\n self._ser.write(pyvesc.encode_request(pyvesc.GetValues))\n in_buf = b''\n while self._ser.in_waiting > 0:\n in_buf += self._ser.read(self._ser.in_waiting)\n\n if len(in_buf) == 0:\n return None\n response, consumed = pyvesc.decode(in_buf)\n if consumed == 0:\n return None\n\n if isinstance(response, pyvesc.GetValues):\n report_row = {}\n for field_name in report_field_names:\n report_row[field_name] = getattr(response, field_name)\n return report_row\n return None\n\n\nclass GPSUbloxAdapter:\n \"\"\"Provides access to the robot's on-board GPS navigator (UBLOX card)\"\"\"\n\n def __init__(self, ser_port, ser_baudrate, last_pos_count):\n if last_pos_count < 1:\n raise ValueError(\"last_pos_count shouldn't be less than 1\")\n\n self._position_is_fresh = False\n self._last_pos_count = last_pos_count\n self._last_pos_container = []\n self._sync_locker = multiprocessing.RLock()\n\n self._serial = serial.Serial(port=ser_port, baudrate=ser_baudrate)\n # self._hot_reset()\n self._USBNMEA_OUT()\n\n self._keep_thread_alive = True\n self._reader_thread = threading.Thread(target=self._reader_thread_tf, daemon=True)\n self._reader_thread.start()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._keep_thread_alive = False\n self._serial.close()\n\n def disconnect(self):\n self._keep_thread_alive = False\n self._serial.close()\n\n def get_fresh_position(self):\n \"\"\"Waits for new fresh position from gps and returns it, blocking until new position received.\n Returns copy of stored position (returned value can be safely changed with no worrying about obj reference\n features)\"\"\"\n\n self._get_fresh_time = time.time()\n \n while len(self._last_pos_container) < 1:\n if time.time() - self._get_fresh_time > config.NO_GPS_TIMEOUT:\n raise TimeoutError\n pass\n \n with self._sync_locker:\n self._position_is_fresh = False\n \n while True:\n if time.time() - self._get_fresh_time > config.NO_GPS_TIMEOUT:\n raise TimeoutError\n with self._sync_locker:\n if not self._position_is_fresh:\n continue\n return self.get_last_position()\n\n def get_last_position(self):\n \"\"\"Waits until at least one position is stored, returns last saved position copy at the moment of call\n (reference type safe)\"\"\"\n\n while len(self._last_pos_container) < 1:\n pass\n with self._sync_locker:\n position = self._last_pos_container[-1].copy() # var may be need for context manager\n return position\n\n def get_last_positions_list(self):\n \"\"\"Waits until at least one position is stored, returns list of last saved positions copies at the moment of\n call (reference type safe)\"\"\"\n\n get_last_positions_list_time = time.time()\n\n positions = []\n while len(self._last_pos_container) < 1:\n if time.time() - get_last_positions_list_time > config.NO_GPS_TIMEOUT:\n raise TimeoutError\n pass\n with self._sync_locker:\n for position in self._last_pos_container:\n positions.append(position.copy())\n return positions\n\n def get_stored_pos_count(self):\n return len(self._last_pos_container)\n\n def _reader_thread_tf(self):\n try:\n while self._keep_thread_alive:\n position = self._read_from_gps()\n with self._sync_locker:\n if len(self._last_pos_container) == self._last_pos_count:\n self._last_pos_container.pop(0)\n self._last_pos_container.append(position)\n self._position_is_fresh = True\n except serial.SerialException as ex:\n print(\"Ublox reading error:\", ex)\n\n def _read_from_gps(self):\n \"\"\"Returns GPS coordinates of the current position\"\"\"\n\n while True:\n try:\n read_line = self._serial.readline() \n except:\n continue\n if isinstance(read_line,bytes):\n data = str(read_line)\n # if len(data) == 3:\n # print(\"None GNGGA or RTCM threads\")\n if \"GNGGA\" in data and \",,,\" not in data:\n # bad string with no position data\n # print(data) # debug\n data = data.split(\",\")\n try:\n lati, longi = self._D2M2(data[2], data[3], data[4], data[5])\n except ValueError:\n continue\n point_quality = data[6]\n return [lati, longi, point_quality] # , float(data[11]) # alti\n\n def _D2M2(self, Lat, NS, Lon, EW):\n \"\"\"Traduce NMEA format ddmmss to ddmmmm\"\"\"\n\n Latdd = float(Lat[:2])\n Latmmmmm = float(Lat[2:])\n Latddmmmm = Latdd + (Latmmmmm / 60.0)\n if NS == 'S':\n Latddmmmm = -Latddmmmm\n\n Londd = float(Lon[:3])\n Lonmmmmm = float(Lon[3:])\n Londdmmmm = Londd + (Lonmmmmm / 60.0)\n if EW == 'W':\n Londdmmmm = -Londdmmmm\n return round(Latddmmmm, 7), round(Londdmmmm, 7)\n\n def _USBNMEA_OUT(self):\n \"\"\"Start sending NMEA out on USB port at 19200 baud\"\"\"\n\n Matrame = \"B5 62 06 00 14 00 03 00 00 00 00 00 00 00 00 00 00 00 23 00 03 00 00 00 00 00 43 AE\"\n self._serial.write(bytearray.fromhex(Matrame))\n\n # Start a Hot restart\n def _hot_reset(self):\n Mythread = \"B5 62 06 04 04 00 00 00 02 00 10 68\"\n self._serial.write(bytearray.fromhex(Mythread))\n","sub_path":"adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":56487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"577682717","text":"# Task 3 \n\"\"\"\n1. 結合前面用過的程式,詢問使用者的名字,性別,還有生日。\n2. 告訴使用者還有幾天生日? 還有幾天就幾歲了 ex: 恭喜你還有101天就20歲了。\n\"\"\"\nname = input(\"請問你的名字是? \")\ngender = input(\"你的性別是? Male or Female? \")\nyear = input(\"出生於哪一年? \")\nmonth = input(\"幾月 ? \")\nday = input(\"幾號 ? \")\n\nimport datetime\n\nif gender==\"Male\":\n print(name,\"先生您好\")\nelse :\n print(name,\"小姐您好\")\n\nbirthday = datetime.datetime(int(year),int(month),int(day))\nthis_birth = datetime.datetime(2020,int(month),int(day))\nage = this_birth.year-birthday.year\nday_to_birth = this_birth - datetime.datetime.now()\n\nprint(\"恭喜你再\",day_to_birth.days,\"就\",age,\"歲了\")\n","sub_path":"01-PythonBasic_Ans/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"190615398","text":"import math\n\nimport numpy as np\nfrom astropy.stats import mad_std, sigma_clip\nfrom photutils import CircularAperture\nfrom photutils import aperture_photometry\nfrom scipy import optimize\n\n##\n# import matplotlib.pyplot as plt\n# import matplotlib.cm as cm\n# from matplotlib.colors import Normalize as Normalize\n\n################################################################\n##global variables\nPSF_model = []\n\n\n##################################################################\ndef D2_moffat(A, F, x0, y0):\n B = PSF_model[0]\n C = PSF_model[1]\n D = PSF_model[2]\n E = PSF_model[3]\n return lambda y, x: A * (1 + ((x - x0) * B) ** 2. +\n ((y - y0) * C) ** 2. + ((x - x0) * (y - y0) * (D ** 2.))) ** (-E) + F\n\n\n##################################################################\ndef D2_moffat_phot(ROI, x_coo, y_coo, R1, R2, R3):\n x0 = x_coo - math.floor(x_coo) + ROI.shape[1] / 2.\n y0 = y_coo - math.floor(y_coo) + ROI.shape[0] / 2.\n\n # make mask\n grid = np.indices(ROI.shape)\n mask = np.sqrt((grid[1] - x0) * (grid[1] - x0) + (grid[0] - y0) * (grid[0] - y0))\n MaxPix = np.max(ROI[mask < R1])\n # calc sigma clipped background\n ROI_Copy = np.copy(ROI)\n ROI_Copy[mask < R2] = np.nan\n ROI_Copy[mask > R3] = np.nan\n\n Sky = sigma_clip(ROI_Copy, sigma=3, maxiters=5, stdfunc=mad_std).filled(np.nan)\n NSky = np.count_nonzero(~np.isnan(Sky))\n median_Sky = np.nanmedian(Sky)\n sigma_Sky = np.nanstd(Sky)\n\n # print(median_Sky)\n # plt.imshow(Sky)\n # plt.show()\n\n # recompute center of star\n params = (MaxPix, median_Sky, x0, y0)\n errorfunction = lambda p: np.ravel(D2_moffat(*p)(*np.indices(ROI.shape)) - ROI)\n p, success = optimize.leastsq(errorfunction, params, maxfev=1000, ftol=0.05)\n\n # med=np.median(ROI)\n # stdv=np.std(ROI)\n # plt.imshow(ROI, cmap=cm.Greys_r, aspect='equal',\n # norm= Normalize(vmin=med-stdv*2., vmax=med+stdv*5.), interpolation='nearest')\n #\n # plt.plot(p[2], p[3], 'ro')\n # plt.show()\n\n return (p[2] + math.floor(x_coo) - (ROI.shape[1] / 2.), p[3] + math.floor(y_coo) - (ROI.shape[0] / 2.),\n median_Sky, NSky, sigma_Sky, MaxPix)\n\n\n##############################\ndef Phot(Data, XY, PSF, RAper, FWHM, Gain, Rnoise, Saturation):\n global PSF_model\n PSF_model = PSF\n\n R1 = np.ceil(FWHM * 1) # estimation of aperture radii\n R2 = np.ceil(FWHM * 4.) # sky annulus inner radii\n R3 = np.ceil(FWHM * 7.) # sky annulus outer radii\n\n # outputs empty lists\n coo = []\n sky = []\n nsky = []\n ssky = []\n maximum = []\n flux = []\n\n for ii in range(0, len(XY)):\n x_coo = XY[ii, 0]\n y_coo = XY[ii, 1]\n _coo = (0., 0.)\n _sky = np.nan # sky level\n _nsky = np.nan # number of pixel for sky\n _ssky = np.nan # sigma of sky\n _max = np.nan # max pixel\n\n # check maximum level for linearity\n if R3 < y_coo < (Data.shape[0] - R3) and R3 < x_coo < (Data.shape[1] - R3):\n # copy subarray\n ROI = np.copy(Data[int(y_coo - R3):int(y_coo + R3), int(x_coo - R3):int(x_coo + R3)])\n # try:\n param = D2_moffat_phot(ROI, x_coo, y_coo, R1, R2, R3)\n if np.isnan(param[0]) == 0:\n _coo = (param[0], param[1])\n _sky = param[2]\n _nsky = param[3]\n _ssky = param[4]\n if param[5] < Saturation:\n _max = param[5]\n # except:\n # print ('#', ii, 'photomety failed')\n # pass\n\n # append 0 if photometry failed\n coo.append(_coo)\n maximum.append(_max)\n sky.append(_sky)\n nsky.append(_nsky)\n ssky.append(_ssky)\n\n coo = np.array(coo)\n coo = np.around(coo, 3)\n sky = np.array(sky)\n sky = np.around(sky, 3)\n nsky = np.array(nsky)\n ssky = np.around(ssky, 3)\n maximum = np.array(maximum)\n apertures = [CircularAperture(coo, r=r) for r in RAper]\n # aper = CircularAperture(coo, r=RAper)\n # print(aper)\n phot_table = aperture_photometry(Data, apertures, method='exact')\n # phot_table = aperture_photometry(Data, aper, method='exact')\n bkg_sum = [sky * a.area for a in apertures]\n # bkg_sum = sky * aper.area\n # print (phot_table)\n f = phot_table['aperture_sum_0'] - bkg_sum[0]\n f = np.array(f)\n flux.append(f)\n f = phot_table['aperture_sum_1'] - bkg_sum[1]\n f = np.array(f)\n flux.append(f)\n f = phot_table['aperture_sum_2'] - bkg_sum[2]\n f = np.array(f)\n flux.append(f)\n # f = phot_table['aperture_sum_3'] - bkg_sum[3]\n # f = np.array(f)\n # flux.append(f)\n\n # med=np.median(Data)\n # stdv=np.std(Data)\n # plt.imshow(Data, cmap=cm.Greys_r, aspect='equal',\n # norm= Normalize(vmin=med-stdv*2., vmax=med+stdv*5.), interpolation='nearest')\n # ### aper = CircularAperture(coo, r=R1)\n # aper.plot(color='blue', lw=1.5, alpha=0.5)\n # aper = CircularAperture(coo, r=R2)\n # aper.plot(color='green', lw=1.5, alpha=0.5)\n # aper = CircularAperture(coo, r=R3)\n # aper.plot(color='red', lw=1.5, alpha=0.5)\n # plt.show()\n\n mag = [20. - 2.5 * np.log10(_f) for _f in flux]\n err = [1.0857 * np.sqrt(_f * Gain + a.area * (ssky * ssky * Gain)) / (_f * Gain)\n for _f in flux for a in apertures]\n sn = [1.0857 / error for error in err]\n\n return flux, sn, mag, err, sky, maximum\n\n################################################################################\n","sub_path":"Phot.py","file_name":"Phot.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"78356998","text":"# Faça um programa que leia três números e mostre qual é o maior\n# e qual é o menor.\n\nn1 = int(input('Qual o número? '))\nn2 = int(input('Qual o número? '))\nn3 = int(input('Qual o número? '))\nmenor = n1\nif n2 < n1 and n2 < n3:\n menor = n2\nif n3 < n1 and n3 < n2:\n menor = n3\nmaior = n1\nif n2 > n1 and n2 > n3:\n maior = n2\nif n3 > n1 and n3 > n2:\n maior = n3\nprint('\\033[1;31mO menor número é {}.'.format(menor))\nprint('\\033[1;36mO maior número é {}.'.format(maior))\n","sub_path":"PythonExercicios/ex033.py","file_name":"ex033.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"147710978","text":"#encoding=utf-8\n\nimport need_py3\nimport cv2\nimport argparse\nimport numpy as np\nfrom PIL import Image, ImageFont, ImageDraw\n\ndef step_pick(s, n) :\n if n == 1 :\n return s\n elif n < 0 :\n return s[::n]\n else :\n m = (len(s) - 1) % n\n return s[m::n]\n\ndef get_char_list(name = None, verbose = False) :\n if verbose :\n print('validating char-list: ' + str(name))\n c96 = \"p$gqRHDBhN9bQ8dE506kPA#UMKOG4y@a2ZSf3TLFCl71xuztX%nWojeYmJsiVcrw]&I[v{}!(^*)?>+<\\\\=/|_-~;\\\"\\`\\':,. \"\n if name == '96' :\n char_list = c96\n elif name == '48' :\n char_list = step_pick(c96, 2)\n elif name == '24' :\n char_list = step_pick(c96, 4)\n elif name == '12' :\n char_list = step_pick(c96, 8)\n elif name != None :\n char_list = name\n else :\n char_list = \"@%#*+=-:. \"\n if verbose :\n print('using char-list: \\'' + char_list + '\\'')\n return char_list\n\ndef img2ascii(image, char_list = None, black = False, scale_func = None, verbose = False) :\n if char_list == None :\n char_list = get_char_list(None, verbose)\n\n if black :\n char_list = char_list[::-1]\n\n if scale_func != None :\n dstShape = scale_func(image.shape)\n if dstShape != image.shape :\n if verbose :\n print('resizing image from %s into %s' % (str(image.shape), str(dstShape)))\n image = cv2.resize(image, dstShape[::-1], interpolation=cv2.INTER_AREA)\n\n L = len(char_list)\n image = (image.astype(np.float32) * L / 256).astype(np.uint8)\n\n result = []\n progress = 0\n for line in image :\n one = ''.join([char_list[x] for x in line])\n result.append(one)\n if verbose :\n p = int(len(result) * 100 / image.shape[0])\n if p > progress :\n print('\\rimage progress: ' + str(p) + '%.', end = '')\n progress = p\n\n if verbose :\n print('ascii generated @ ' + str(len(result)))\n return result\n\ndef file2ascii(im_path, char_list = None, black = False, scale_func = None, verbose = False) :\n if verbose :\n print('reading image file: ' + im_path)\n image = cv2.imread(im_path, cv2.IMREAD_GRAYSCALE)\n if verbose :\n print('image loaded: shape = ' + str(image.shape)) # (r,c)\n result = img2ascii(image, char_list, black, scale_func, verbose)\n return result\n\ndef scaled_shape(shape, scale) :\n if scale >= 1 :\n return shape\n else :\n return tuple([int(x * scale) for x in shape])\n\ndef max_dim(shape, max_val) :\n scale = min([max_val / x for x in shape])\n return scaled_shape(shape, scale)\n\ndef max_width(shape, max_val) :\n scale = max_val / shape[1]\n return scaled_shape(shape, scale)\n\ndef max_height(shape, max_val) :\n scale = max_val / shape[0]\n return scaled_shape(shape, scale)\n\ndef save_as_image(text_array, im_path, black = False, \\\n font_path = '/System/Library/Fonts/Courier.dfont', verbose = False) :\n if verbose :\n print('loading font: ' + font_path)\n font = ImageFont.truetype(font_path, 10)\n\n if verbose :\n print('calculating line size')\n line_size = [0, 0]\n for line in text_array :\n one = font.getsize(line) # (w,h)\n line_size[0] = max(line_size[0], one[0])\n line_size[1] = max(line_size[1], one[1])\n # line_size[1] += 1\n if verbose :\n print('line size: ' + str(line_size))\n\n img_size = (line_size[0] + 2, len(text_array) * line_size[1] + 1) # (w,h)\n if verbose :\n print('save image size: ' + str(img_size))\n\n bg = 0 if black else 255\n fg = 255 - bg\n img = Image.new('L', img_size, bg)\n\n draw = ImageDraw.Draw(img)\n position = [1, 1] # (w,h)\n progress = 0\n for line in text_array :\n draw.text(position, line, font = font, fill = fg)\n position[1] += line_size[1]\n if verbose :\n p = int(position[1] * 100 / img_size[1])\n if p > progress :\n print('\\rimage progress: ' + str(p) + '%.', end = '')\n progress = p\n\n if verbose :\n img.show()\n img.save(im_path)\n\nif __name__ == '__main__' :\n argParser = argparse.ArgumentParser()\n argParser.add_argument('imagePath', type = str)\n argParser.add_argument('--char-list', type = str, default = None)\n argParser.add_argument('--black', action = 'store_const', default = False, const = True)\n argParser.add_argument('--max', type = int, default = 0)\n argParser.add_argument('--max-height', type = int, default = 0)\n argParser.add_argument('--max-width', type = int, default = 0)\n # argParser.add_argument('--html', action = 'store_const', default = False, const = True)\n argParser.add_argument('--verbose', action = 'store_const', default = False, const = True)\n argParser.add_argument('--im-out', type = str, default = None)\n cmdArgs = argParser.parse_args()\n\n def scale_max(shape) :\n return max_dim(shape, cmdArgs.max)\n\n def scale_max_height(shape) :\n return max_height(shape, cmdArgs.max_height)\n\n def scale_max_width(shape) :\n return max_width(shape, cmdArgs.max_width)\n\n scale_func = None\n if cmdArgs.max > 0 :\n scale_func = scale_max\n elif cmdArgs.max_height > 0 :\n scale_func = scale_max_height\n elif cmdArgs.max_width > 0 :\n scale_func = scale_max_width\n\n char_list = get_char_list(cmdArgs.char_list, verbose = cmdArgs.verbose)\n\n # if cmdArgs.html :\n # print('')\n # print('\\n' + cmdArgs.imagePath + ':\\n')\n # if cmdArgs.html :\n # print(' ')\n\n chars = file2ascii(cmdArgs.imagePath, char_list, cmdArgs.black, scale_func, verbose = cmdArgs.verbose)\n\n if cmdArgs.im_out != None :\n save_as_image(chars, cmdArgs.im_out, cmdArgs.black, verbose = cmdArgs.verbose)\n else :\n for line in chars :\n print(line) #, end = ' \\n' if cmdArgs.html else '\\n')\n # if cmdArgs.html :\n # print('')\n\n","sub_path":"ascii.py","file_name":"ascii.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"283708724","text":"import argparse\nimport json\nimport os\nfrom collections import Counter\n\nimport RAKE\nimport nltk\n\nMIN_CHAR_LENGTH = 4 # minimum number of characters required for a keyword\nMAX_KEY_PHRASE_LENGTH = 1 # maximum words in a key_phrase\nMAX_NUMBER_KEY_PHRASES = 40 # maximum number of key_phrases (different from number of questions)\nMAX_SENTENCES = 25 # maximum number of sentences (again, different from the number of questions generated)\nUSE_RAKE = True\n\n\nclass Questions:\n def load_text_from_file(self, file_names, tokenization=False):\n \"\"\"\n Given a filename, return a string of contents or a list of sentences.\n @:param filename The name of the file to load text from.\n @:keyword tokenization If true, return a list of sentences. If false, return a single string.\n @:return string or list of sentences\n \"\"\"\n source_text = \"\"\n for file_name in file_names:\n # assume files are in utf-8, but backslash replace them when errors are encountered to allow processing to continue\n # http://python-notes.curiousefficiency.org/en/latest/python3/text_file_processing.html\n for line in open(file_name, encoding=\"utf-8\", errors=\"backslashreplace\"):\n source_text += line\n source_text = ' '.join(source_text.splitlines()) # remove newlines\n\n if tokenization:\n sent_detector = nltk.data.load(\"tokenizers/punkt/english.pickle\")\n source_text = sent_detector.tokenize(source_text.strip())\n\n return source_text\n\n def generate_ner_question(self, question):\n \"\"\"\n Tag the sentence using the Named Entity Recognition tagger from nltk\n @:param sentence sentence that contains a keyphrase\n @:return tuple (anwser, question) or None if nothing found\n \"\"\"\n chunked = nltk.ne_chunk(nltk.pos_tag(question.split()))\n answers = self.get_continuous_chunks(chunked)\n idx = 0\n keys = []\n if answers:\n for a in answers:\n new_question = question.replace(a, '[[' + str(idx) + ']]')\n # because there is a possibility of overlapping answers, some anwswers might not exist by this time\n if new_question != question:\n question = new_question\n idx += 1\n keys.append(a)\n return keys, question\n else:\n return None\n\n def get_continuous_chunks(self, chunked):\n \"\"\"\n Extract all relative information out of the Tree created by ne_chunk\n @param: chunked Tree retruned from the ne_chunk function\n @return: list of tagged values located by traversing the Tree\n \"\"\"\n continuous_chunk = []\n current_chunk = []\n for subtree in chunked:\n if isinstance(subtree, nltk.Tree):\n current_chunk.append(\" \".join([token for token, pos in subtree.leaves()]))\n elif current_chunk:\n named_entity = \" \".join(current_chunk)\n if named_entity not in continuous_chunk:\n continuous_chunk.append(named_entity)\n current_chunk = []\n else:\n continue\n return continuous_chunk\n\n def get_nouns(self, stoplist, text):\n \"\"\"\n An alternate method for extracting length 1 keywords. This searches for NNs, NNPs, and NNS,\n which are nouns, proper nouns, and plural nouns, respectively using the nltk tokenizer.\n Any commonly occuring nouns which are in the stoplist are thrown out.\n :param stoplist: path to a .txt file with words not to treat as keywords\n :param text: a string of all the text to be analyzed\n :return: a list of keyword strings\n \"\"\"\n noun_tags = [\"NN\", \"NNP\", \"NNS\"]\n # common nouns that usually aren't keywords possibly in the future add words that have been determined through machine learning\n # for now, add words that seem like they'd been unhelpful most of the time\n stop_words = []\n for line in open(stoplist):\n stop_words.append(line.strip())\n\n tokenized_text = nltk.word_tokenize(text)\n nouns = [word for word, tag in nltk.pos_tag(tokenized_text) if tag in noun_tags and word not in stop_words]\n most_common_nouns = [noun for noun, count in Counter(nouns).most_common(MAX_NUMBER_KEY_PHRASES)]\n\n return most_common_nouns\n\n def get_RAKE_keyphrases(self, stoplist, text):\n \"\"\"\n Extract the key phrases using RAKE from text using the stoplist.\n :param text: String of text to extract keyphrases from\n :param stoplist: file name of stop list\n :return: list of key phrases meeting criterion\n \"\"\"\n Rake = RAKE.Rake(stoplist) # initialize key_phrase extractor using the SmartStoplist\n key_phrases = Rake.run(text)\n\n key_phrases = [k for (k, weight) in key_phrases\n if len(k.split()) <= MAX_KEY_PHRASE_LENGTH and len(k) >= MIN_CHAR_LENGTH]\n return key_phrases[0:MAX_NUMBER_KEY_PHRASES + 1] # only return as many as requested\n\n def generate_questions(self, file_names, use_rake=True):\n \"\"\"\n Take a list of sentences and key phrases from those sentences and generate questions in JSON format.\n :param sentences: List of sentences.\n :param key_phrases: List of key phrases\n :return: JSON formatted sentence/answer pairs\n \"\"\"\n\n text = self.load_text_from_file(file_names)\n word_count = len(text.split()) # this is an approximation of \"true\" word count\n sentences = self.load_text_from_file(file_names, tokenization=True)\n stoplist_dir = os.path.dirname(__file__)\n stoplist_path = os.path.join(stoplist_dir, \"SmartStoplist.txt\")\n if use_rake:\n key_phrases = self.get_RAKE_keyphrases(stoplist_path, text)\n else:\n key_phrases = self.get_nouns(stoplist_path, text)\n fillInTheBlank = [] # list of dictionaries in format {\"sentence\":sentence, \"keys\":[answer1, answer2, ...]}\n for sentence in sentences:\n ner_sentence = sentence\n keys = []\n found = False\n i = 0 # indexing for sentences with multiple key phrases\n for key_phrase in key_phrases: # for top ranked key_phrases\n if key_phrase in sentence:\n key_phrase_length = len(key_phrase.split())\n sentence_length = len(sentence.split())\n # make sure the key phrase is shorter than remaining words in sentence\n if (sentence_length - key_phrase_length) > key_phrase_length + 1:\n found = True\n sentence = sentence.replace(key_phrase, '[[' + str(i) + ']]')\n keys.append(key_phrase)\n i += 1\n if found:\n new_sentence = {\"sentence\": sentence, \"keys\": keys}\n fillInTheBlank.append(new_sentence)\n ner_results = self.generate_ner_question(ner_sentence)\n if ner_results is not None:\n ner_answers = ner_results[0]\n ner_question = ner_results[1]\n new_question = {\"sentence\": ner_question, \"keys\": ner_answers}\n fillInTheBlank.append(new_question)\n fillInTheBlank = sorted(fillInTheBlank, key=lambda x: len(x[\"keys\"]), reverse=True)[\n :MAX_SENTENCES + 1] # prefer sentences with lots of keys\n fillInTheBlankQuestions = json.dumps({\"fillInTheBlank\": fillInTheBlank})\n # fillInTheBlankQuestions = json.dumps({\"fillInTheBlank\": fillInTheBlank, \"wordCount\": word_count})\n return fillInTheBlankQuestions\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"files\", nargs=\"+\")\n opts = parser.parse_args()\n\n questions = Questions().generate_questions(opts.files, use_rake=USE_RAKE)\n print(questions) # send to sys.stdout for Node.js\n","sub_path":"quizServer/QuestionGenerator/QuestionGenerator.py","file_name":"QuestionGenerator.py","file_ext":"py","file_size_in_byte":8102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"117440086","text":"\"\"\"\nProblem\nGiven a value N, if we want to make change for N cents, and we have infinite supply of each of \nS = { S1, S2, .. , Sm} valued //coins, how many ways can we make the change? \nThe order of coins doesn't matter.\nFor example, for N = 4 and S = [1, 2, 3], there are four solutions: \n[1, 1, 1, 1], [1, 1, 2], [2, 2], [1, 3]. \nSo output should be 4. \nFor N = 10 and S = [2, 5, 3, 6], there are five solutions: \n[2, 2, 2, 2, 2], [2, 2, 3, 3], [2, 2, 6], [2, 3, 5] and [5, 5]. \nSo the output should be 5.\n\"\"\"\nimport sys\nfrom array import array\n\ndef coin_change(denominations, amount):\n solution = array('i',(0 for i in range(0, amount + 1)))\n solution[0] = 1\n for den in denominations:\n for i in range(den, amount + 1):\n solution[i] += solution[i - den] \n return solution[amount]\n\ndef coin_change_2(denomination,amount):\n solution = []\n for i in range(0, amount + 1):\n solution.append(0)\n solution[0] = 1\n\n for den in denomination:\n for i in range(0, len(solution)):\n if (i >= den): \n solution[i] = solution[i] + solution[i - den]\n return solution[amount]\n\n# Bottom Up\ndef number_of_ways_to_change_coin(denominations, amount):\n number_of_ways = [0 for i in range(0, amount + 1)]\n number_of_ways[0] = 1\n for denomination in denominations:\n for i in range(0, amount + 1):\n if i >= denomination:\n # print (i, denomination)\n number_of_ways[i] += number_of_ways[i - denomination]\n\n return number_of_ways[amount]\n\n\nif __name__ == \"__main__\":\n S = [2, 5, 3, 6]\n\n print (coin_change(S,4))\n print (coin_change_2(S,4))\n print (number_of_ways_to_change_coin(S,4))\n \n print (coin_change(S,100))\n print (coin_change_2(S,100))\n print (number_of_ways_to_change_coin(S,100))\n","sub_path":"Leetcode/dp/coin_change.py","file_name":"coin_change.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"181540641","text":"\n# Installation and actions to be always done\n# Author: Evgeny Blokhin\n\nimport os, sys\nimport json\nimport logging\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.pool import QueuePool, NullPool\n\nimport tilde.core.model as model\n\n\nDB_SCHEMA_VERSION = '5.20'\nSETTINGS_FILE = 'settings.json'\nDEFAULT_SQLITE_DB = 'default.db'\nBASE_DIR = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))\nROOT_DIR = os.path.normpath(BASE_DIR + '/../')\nDATA_DIR = os.path.join(ROOT_DIR, 'data')\nEXAMPLE_DIR = os.path.join(ROOT_DIR, '../tests/data')\nINIT_DATA = os.path.join(DATA_DIR, 'sql/init-data.sql')\nTEST_DBS_FILE = os.path.join(DATA_DIR, 'test_dbs.txt')\nTEST_DBS_REF_FILE = os.path.join(DATA_DIR, 'test_dbs_ref.txt')\nSETTINGS_PATH = DATA_DIR + os.sep + SETTINGS_FILE\nGUI_URL_TPL = 'http://tilde-lab.github.io/berlinium/?http://127.0.0.1:%s' # ?https://db.tilde.pro\n\nDEFAULT_SETUP = {\n\n # General part\n 'debug_regime': False, # TODO\n 'log_dir': os.path.join(DATA_DIR, \"logs\"), # TODO\n 'skip_unfinished': False,\n 'skip_notenergy': False,\n 'skip_if_path': [],\n\n # DB part\n 'db': {\n 'default_sqlite_db': DEFAULT_SQLITE_DB,\n 'engine': 'sqlite', # if sqlite is chosen: further info is not used\n 'host': 'localhost',\n 'port': 5432,\n 'user': 'postgres',\n 'password': '',\n 'dbname': 'tilde'\n },\n\n # Server part\n 'webport': 8070,\n 'title': \"Tilde GUI\"\n}\n\n\ndef virtualize_path(item):\n return item\n\n\ndef connect_url(settings, named=None):\n if settings['db']['engine'] == 'sqlite':\n if not named:\n named = settings['db']['default_sqlite_db']\n if os.sep in named:\n named = os.path.realpath(os.path.abspath(named))\n else:\n named = os.path.join(DATA_DIR, named)\n\n return settings['db']['engine'] + ':///' + named\n\n elif settings['db']['engine'] == 'postgresql':\n return settings['db']['engine'] + '+pg8000://' + settings['db']['user'] + ':' + settings['db']['password'] + '@' + settings['db']['host'] + ':' + str(settings['db']['port']) + '/' + settings['db']['dbname']\n\n else: sys.exit('Unsupported DB type: %s!\\n' % settings['db']['engine'])\n\n\ndef connect_database(settings, named=None, no_pooling=False, default_actions=True, scoped=False):\n '''\n @returns session factory on success\n @returns False on failure\n '''\n connstring = connect_url(settings, named)\n poolclass = NullPool if no_pooling else QueuePool\n engine = create_engine(connstring, echo=settings['debug_regime'], poolclass=poolclass)\n Session = sessionmaker(bind=engine, autoflush=False)\n\n if default_actions:\n # 1.\n model.Base.metadata.create_all(engine)\n\n session = Session()\n # 2.\n try: pragma = session.query(model.Pragma.content).one()\n except NoResultFound:\n pragma = model.Pragma(content = DB_SCHEMA_VERSION)\n session.add(pragma)\n else:\n if pragma.content != DB_SCHEMA_VERSION:\n sys.exit('Database %s is incompatible: expected schema version %s, found %s' % (connstring.split('/')[-1], DB_SCHEMA_VERSION, pragma.content))\n # 3.\n chk = session.query(model.Hierarchy_value).first()\n if not chk:\n if not os.path.exists(INIT_DATA):\n sys.exit(INIT_DATA + ' not found!')\n\n f = open(INIT_DATA)\n statements = list(filter(None, f.read().splitlines()))\n f.close()\n\n nlines = 0\n pocket = []\n for n in range(len(statements)):\n if statements[n].startswith('--'):\n continue\n elif not statements[n].endswith(';'):\n pocket.append(statements[n])\n continue\n else:\n if pocket:\n engine.execute( \"\".join(pocket) + statements[n] )\n pocket = []\n else:\n engine.execute(statements[n])\n nlines += 1\n\n logging.warning(\"Applied DB model from file %s\" % INIT_DATA)\n logging.warning(\"SQL statements executed: %s\" % nlines)\n\n session.commit()\n session.close()\n\n if scoped:\n return scoped_session(Session)\n\n return Session()\n\n\ndef write_settings(settings):\n '''\n Saves user's settings\n @returns True on success\n @returns False on failure\n '''\n if not os.access(DATA_DIR, os.W_OK): return False\n try:\n f = open(SETTINGS_PATH, 'w')\n f.writelines(json.dumps(settings, indent=4))\n f.close()\n os.chmod(os.path.abspath(SETTINGS_PATH), 0o777) # to avoid (or create?) IO problems with multiple users\n except IOError:\n return False\n else:\n return True\n\n\ndef get_hierarchy(settings):\n '''\n Gets main mapping source according to what a data classification is made\n Gets the hierarchy groups (only for GUI)\n Gets the hierarchy values\n '''\n hierarchy, hierarchy_groups, hierarchy_values = [], [], {}\n hgroup_ids, enumerated_vals = {}, set()\n\n session = connect_database(settings)\n\n for item in session.query(model.Hierarchy_value).all():\n try:\n hierarchy_values[item.cid].update({item.num: item.name})\n except KeyError:\n hierarchy_values[item.cid] = {item.num: item.name}\n enumerated_vals.add(item.cid)\n try:\n for item in session.query(model.Hierarchy).all():\n if item.has_facet and not item.has_topic:\n raise RuntimeError('Fatal error: \"has_facet\" implies \"has_topic\"')\n if item.slider and not '.' in item.slider:\n raise RuntimeError('Fatal error: \"has_slider\" must have a reference to some table field')\n\n hierarchy.append({\n 'cid': item.cid,\n 'category': item.name,\n 'source': item.source,\n 'html': item.html,\n 'has_slider': item.slider,\n 'sort': item.sort,\n 'multiple': item.multiple,\n 'optional': item.optional,\n 'has_summary_contrb': item.has_summary_contrb,\n 'has_column': item.has_column,\n 'has_facet': item.has_facet,\n 'creates_topic': item.has_topic,\n 'is_chem_formula': item.chem_formula,\n 'plottable': item.plottable,\n 'enumerated': True if item.cid in enumerated_vals else False\n })\n try:\n hgroup_ids[item.hgroup_id].append(item.cid)\n except KeyError:\n hgroup_ids[item.hgroup_id] = [item.cid]\n except RuntimeError as e:\n session.close()\n sys.exit(e)\n for item in session.query(model.Hierarchy_group).all():\n hierarchy_groups.append({\n 'id': item.hgroup_id,\n 'category': item.name,\n 'html_pocket': '', # specially for JavaScript client\n 'landing_group': item.landing_group,\n 'settings_group': item.settings_group,\n 'includes': hgroup_ids[item.hgroup_id]\n })\n session.close()\n return hierarchy, hierarchy_groups, hierarchy_values\n\n\n# DEFAULT ACTIONS ALWAYS TO DO: LOAD/SAVE SETTINGS\nif not os.path.exists(os.path.abspath(SETTINGS_PATH)):\n settings = DEFAULT_SETUP\n if not os.path.exists(DATA_DIR):\n try:\n os.makedirs(DATA_DIR)\n except IOError:\n sys.exit('I/O error: failed write to %s' % DATA_DIR)\n if not write_settings(settings):\n sys.exit('I/O error: failed to save settings in %s' % DATA_DIR)\n\ntry: settings\nexcept NameError:\n try:\n settings = json.loads(open(SETTINGS_PATH).read())\n except ValueError:\n sys.exit('Your %s seems to be bad-formatted, please, pay attention to commas and quotes' % SETTINGS_PATH)\n except IOError:\n sys.exit('Your %s is not accessible' % SETTINGS_PATH)\n\n DEFAULT_SETUP.update(settings)\n settings = DEFAULT_SETUP\n\n\n# DEFAULT ACTIONS ALWAYS TO DO: CHECK SETTINGS COMBINATIONS & RESTRICTIONS\nif settings['skip_if_path'] and len(settings['skip_if_path']) > 3:\n sys.exit('Path skipping directive must not contain more than 3 symbols due to memory limits')\n\nif not 'engine' in settings['db'] or settings['db']['engine'] not in ['sqlite', 'postgresql']:\n sys.exit('This DB backend is not supported')\n\nif not 'default_sqlite_db' in settings['db']:\n sys.exit('Note that the settings.json format has been changed with the respect to the default sqlite DB')\n","sub_path":"tilde/core/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"641633596","text":"from block import *\nimport pygame\nimport random\nimport math\n\nCOLOR = (255, 0, 0)\n\nclass Bad_Block(Block):\n def __init__(self, width, height, image):\n self.image = image\n super().__init__(COLOR, width, height, self.image)\n \n # The \"center\" of the orbit\n self.center_x = random.randrange(700)\n self.center_y = random.randrange(400)\n \n # Where along the orbit the object is at\n self.angle = random.random() * 2 * math.pi\n \n # Radius of the orbit\n self.radius = random.randrange(10, 200)\n \n # Speed of orbit\n self.speed = .01\n\n def update(self):\n self.rect.x = self.radius * math.sin(self.angle) + self.center_x\n self.rect.y = self.radius * math.cos(self.angle) + self.center_y\n \n self.angle += self.speed\n","sub_path":"bad_block.py","file_name":"bad_block.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"383042603","text":"from itertools import product\n\n\ndef lcs(str1, str2):\n \"\"\"\n Find the longest common subsequence of two strings\n :param str1: first string to check\n :param str2: second string to check\n :return: longest common subsequence\n \"\"\"\n m, n = len(str1), len(str2)\n\n # 2d array to store subproblems\n # padded on top and left with empty strings\n sub = [[\"\"] * (n + 1) for i in range(m + 1)]\n\n for i, j in product(range(m + 1), range(n + 1)):\n if i == 0 or j == 0:\n # comparing empty strings, maintain padding\n continue\n\n if str1[i - 1] == str2[j - 1]:\n # same character, add to previous largest solution\n sub[i][j] = sub[i - 1][j - 1] + str1[i - 1]\n else:\n # characters don't match\n # current solution must be the largest previous solution\n sub[i][j] = sub[i - 1][j] if len(sub[i - 1][j]) > len(sub[i][j - 1]) else sub[i][j - 1]\n\n return sub[m][n]\n","sub_path":"dynamic_programming/longest_common_subsequence.py","file_name":"longest_common_subsequence.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"137357281","text":"from django.urls import path, include\nfrom django.conf import settings\nfrom . import views\n\nurlpatterns = [\n path('', views.NewsListView.as_view()),\n path('newsitems', views.NewsListView.as_view(), name='NewsListView'),\n path('newsitems/', views.NewsDetailView.as_view(), name='NewsDetailView'),\n path('edit_news/', views.AddNewsView.as_view(), name='AddNewsView'),\n path('edit_news/', views.EditNewsView.as_view(), name='EditNewsView'),\n path('newsitems//add_comment', views.AddNewsComment.as_view(), name='AddNewsComment'),\n path('login/', views.UserLoginView.as_view(), name='login'),\n path('logout/', views.UserLogoutView.as_view(), name='logout'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += path('__debug__/', include(debug_toolbar.urls)),\n","sub_path":"05_Forms/news/app_news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"584525176","text":"import pickle\nimport pandas\nimport numpy\n\ndf_topo = pandas.read_csv('data/indici_RD.csv', delimiter=',')\n\ndf = pickle.load(open('data/features_2012_2015.pkl'))\n\ndf_training = df.iloc[0:24095]\noutput = open('data/features_2012_2014.pkl', 'wb')\npickle.dump(df_training,output)\n\n\ndf_testing = df.iloc[24095:df.shape[0]]\noutput = open('data/features_2015_2016.pkl', 'wb')\npickle.dump(df_testing,output)\n\nR_topo = numpy.asarray(df_topo[\"R_topo\"])\ntopo = numpy.asarray(df_topo[\"topo\"])\ndf[\"R_topo\"] = R_topo\ndf['topo'] = topo\n\ndf_training = df.iloc[0:24095]\noutput = open('data/features_with_topo_2012_2014.pkl', 'wb')\npickle.dump(df_training,output)\n\n\ndf_testing = df.iloc[24095:df.shape[0]]\noutput = open('data/features_with_topo_2015_2016.pkl', 'wb')\npickle.dump(df_testing,output)","sub_path":"descrittore_topologico/create_df_with_topo.py","file_name":"create_df_with_topo.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"193671364","text":"from gwp import solver\nimport numpy as np\n\nn = 128\nnangles = 6\nalpha = 1\nbeta = 3\nep = 1e-2\ncl = solver.Solver(n, nangles, alpha, beta, ep)\nf = np.random.random([n, n, n]).astype('float32')\ncoeffs = cl.fwd(f)\nfr = cl.adj(coeffs)\n\ns0 = np.sum(f*np.conj(fr))\ns1 = np.float32(0)\nfor k in range(len(coeffs)):\n s1 += np.sum(coeffs[k]*np.conj(coeffs[k]))\n\nprint('Adjoint test ?= :', s0, '?=', s1)\n","sub_path":"examples/adjoint_test.py","file_name":"adjoint_test.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"619995657","text":"from trees import *\nfrom vl_codes import *\nimport arithmetic \nfrom itertools import groupby\nfrom json import dump\nfrom sys import argv\n\n\ndef camzip(method, filename):\n\n with open(filename, 'rb') as fin:\n x = fin.read()\n\n frequencies = dict([(key, len(list(group))) for key, group in groupby(sorted(x))])\n n = sum([frequencies[a] for a in frequencies])\n p = dict([(a,frequencies[a]/n) for a in frequencies])\n\n if method == 'huffman' or method == 'shannon_fano':\n if (method == 'huffman'):\n xt = huffman(p)\n c = xtree2code(xt)\n else:\n c = shannon_fano(p)\n xt = code2xtree(c)\n\n y = vl_encode(x, c)\n\n elif method == 'arithmetic':\n y = arithmetic.encode(x,p)\n\n else:\n raise NameError('Compression method %s unknown' % method)\n \n \n y = bytes(bits2bytes(y))\n \n outfile = filename + '.cz' + method[0]\n\n with open(outfile, 'wb') as fout:\n fout.write(y)\n\n pfile = filename + '.czp'\n n = len(x)\n\n with open(pfile, 'w') as fp:\n dump(frequencies, fp)\n\n\nif __name__ == \"__main__\":\n if (len(argv) != 3):\n print('Usage: python %s compression_method filename\\n' % argv[0])\n print('Example: python %s huffman hamlet.txt' % argv[0])\n print('or: python %s shannon_fano hamlet.txt' % argv[0])\n print('or: python %s arithmetic hamlet.txt' % argv[0])\n exit()\n\n camzip(argv[1], argv[2])\n\n","sub_path":"IIA_3F7/3F7py/camzip.py","file_name":"camzip.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"198383535","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n if head is None or head.next is None:\n return True\n pre, slow, fast = None, head, head\n while fast is not None and fast.next is not None:\n fast = fast.next.next\n post = slow.next\n slow.next = pre\n pre = slow\n slow = post\n # initial: 1->2->3->None\n # end of the loop: None<-1, 2->3->None,\n # pre = 1, slow = 2, fast = 3\n \n # initial: 1->2->None\n # end of the loop: None<-1, 2->None,\n # pre = 1, slow = 2, fast = None\n if fast is not None:\n slow = slow.next\n while slow is not None:\n if slow.val != pre.val:\n return False\n slow, pre = slow.next, pre.next\n return True","sub_path":"online_judge/leetcode_py/234. Palindrome Linked List.py","file_name":"234. Palindrome Linked List.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"65062308","text":"from utils import *\nfrom summarization import *\nfrom settings import *\nimport time, os\n\nSTOPWORDS = 'stop_words_FULL.txt'\nDOCUMENTS_FOLDER = 'documents'\n\nif __name__ == '__main__':\n stopw = load_stopwords(STOPWORDS)\n opzioni_nasari = {'big': NASARI_L_SETTINGS, 'small': NASARI_S_SETTINGS}\n\n scelta_nasari = ask('Scegliere il file NASARI desiderato (big/small): ', opzioni_nasari.keys())\n\n SETTINGS = opzioni_nasari[scelta_nasari]\n with timing(message='Vettori NASARI caricati e preprocessati in'):\n nasari = (load_nasari(**SETTINGS), SETTINGS['dim'])\n\n files = tuple(os.listdir(DOCUMENTS_FOLDER))\n choose_file_question = '\\n'\n for i, f in enumerate(files):\n choose_file_question += f'[{i}] {f}\\n'\n choose_file_question += '\\nScegli un file da riassumere (back per uscire): '\n\n while True:\n file_scelto = ask(choose_file_question, ['back'] + [str(i) for i in range(len(files))])\n if file_scelto == 'back':\n break\n \n file_scelto = files[int(file_scelto)]\n file_scelto = os.path.join(DOCUMENTS_FOLDER, file_scelto)\n\n personalize = ask('Vuoi personalizzare le opzioni (s/n): ', ['s', 'n'])\n \n options = {\n 'stopw': stopw,\n 'sig_size': 20,\n 'title_weight': 5,\n 'first_sentence_bonus': 1.3,\n 'last_sentence_bonus': 1.05\n }\n\n if personalize == 's':\n sig_size = ask('Dimensione signature del topic (numero intero, defalut=20): ', validator=lambda x: x.isdigit())\n title_weight = ask('Peso del titolo sulla signature (numero reale, default=5): ', validator=isfloat)\n first_sentence_bonus = ask('Bonus prima frase nel paragrafo (numero reale, default=1.3): ', validator=isfloat)\n last_sentence_bonus = ask('Bonus ultima frase nel paragrafo (numero reale, default=1.05): ', validator=isfloat)\n \n options = {\n 'stopw': stopw,\n 'sig_size': int(sig_size),\n 'title_weight': float(title_weight),\n 'first_sentence_bonus': float(first_sentence_bonus),\n 'last_sentence_bonus': float(last_sentence_bonus)\n }\n\n doc = load_document(file_scelto, nasari, options_dict=options)\n\n change_compression = True\n\n while change_compression:\n compression = ask('Scegli un livello di compressione (reale tra 0 e 1, back per tornare alla scelta del file): ', validator=lambda x: x == 'back' or (isfloat(x) and 0 <= float(x) and float(x) <= 1))\n if compression == 'back':\n change_compression = False\n continue\n compression = float(compression)\n print(\"\\n-------------------------------------- INIZIO RIASSUNTO --------------------------------------\\n\")\n with timing(message='Riassunto generato in'):\n print(doc.summary(compression_ratio=compression))\n print(\"\\n--------------------------------------- FINE RIASSUNTO ---------------------------------------\\n\")\n ","sub_path":"parte-2/esercizio-4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"192009310","text":"from pandas.core.reshape import melt\nimport pandas.util.testing as tm\n\ndef test_melt():\n df = tm.makeTimeDataFrame()[:10]\n df['id1'] = (df['A'] > 0).astype(int)\n df['id2'] = (df['B'] > 0).astype(int)\n\n molten1 = melt(df)\n molten2 = melt(df, id_vars=['id1'])\n molten3 = melt(df, id_vars=['id1', 'id2'])\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],\n exit=False)\n\n","sub_path":"pandas/tests/test_reshape.py","file_name":"test_reshape.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"52675533","text":"# -*- coding: utf-8 -*-\n\n'''\nCreated on 2015-7-1\n\n@author: wangmianjie\n'''\n\nimport sys\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom logging import StreamHandler\n\n_unicode = True\n\nclass StreamHandlerUTF82GBK(StreamHandler):\n def emit(self, record):\n try:\n msg = self.format(record)\n stream = self.stream\n fs = \"%s\\n\"\n if not _unicode: #if no unicode support...\n stream.write(fs % msg)\n else:\n try:\n if (isinstance(msg, unicode) and\n getattr(stream, 'encoding', None)):\n ufs = u'%s\\n'\n try:\n stream.write(ufs % msg)\n except UnicodeEncodeError:\n #Printing to terminals sometimes fails. For example,\n #with an encoding of 'cp1251', the above write will\n #work if written to a stream opened or wrapped by\n #the codecs module, but fail when writing to a\n #terminal even when the codepage is set to cp1251.\n #An extra encoding step seems to be needed.\n stream.write((ufs % msg).encode(stream.encoding))\n else:\n # stream.write(fs % msg)\n # encode to cp936 by chenzw start\n msg = msg.decode('utf-8').encode(stream.encoding)\n try:\n stream.write(fs % msg)\n except IOError:\n pass\n # encode to cp936 by chenzw end\n except UnicodeError:\n stream.write(fs % msg.encode(\"UTF-8\"))\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n\n\nlogger = logging.getLogger('simServer')\nformatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')\n\nruntimelog = RotatingFileHandler(\"simServer.log\", maxBytes=5*1024*1024, backupCount=3)\nruntimelog.setFormatter(formatter)\nlogger.addHandler(runtimelog)\n# 写屏功能,如不需要,则请注释下面三行(压力下,不写屏可达100TPS,写屏只有20TPS)\nstdoutlog = StreamHandlerUTF82GBK(sys.stdout)\nstdoutlog.setFormatter(formatter)\nlogger.addHandler(stdoutlog)\n\nlogger.setLevel(logging.DEBUG) # DEBUG, INFO, WARNING, ERROR, CRITICAL ...etc\n\ndef log(logStr): \n logger.info(logStr)\n","sub_path":"robotframework/MIGU/tools/SimServer/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"60892651","text":"import socket\nimport pickle\n\n\n# Default configuration\nHEADER_LENGTH = 10\nIP = '127.0.0.1'\nPORT = 1234\n\n\ndef connect_to_server(client_name: str):\n \"\"\"\n Connects to the local server.\n\n :param client_name: string\n :return: client's socket\n \"\"\"\n client_name = client_name.encode('utf-8')\n client_name_header = f'{len(client_name):<{HEADER_LENGTH}}'.encode('utf-8')\n\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((IP, PORT))\n client_socket.setblocking(False)\n\n client_socket.send(client_name_header + client_name) # Send id to the server\n\n return client_socket\n\n\ndef send_object_message(client_socket, message: object):\n message = pickle.dumps(message)\n message_header = bytes(f'{len(message):<{HEADER_LENGTH}}', 'utf-8')\n client_socket.send(message_header + message)\n","sub_path":"main/single_board/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"11577403","text":"from flask import Flask, render_template ,request,url_for,escape,session,redirect,abort\nimport sqlite3 as sql\n# import admin\nfrom flask_bcrypt import Bcrypt\napp = Flask(__name__)\napp.secret_key = 'any random string'\nbcrypt = Bcrypt(app)\n\n\ndef Convert(tup, di):\n\tdi = dict(tup)\n\treturn di\n\n\n\n#INITIAL route\n@app.route('/')\ndef index():\n\treturn render_template('sfiplogin.html')\n\n\n#REGISTER page\n@app.route('/signup', methods = ['GET','POST'])\ndef signup():\n\treturn render_template('sfipreg.html')\n\n\n# REGISTRATION module\n@app.route('/register', methods = ['GET','POST'])\ndef register():\n\tif request.method == 'POST':\n\t\tname=request.form['name']\n\t\troll_no = request.form['roll']\n\t\tbranch=request.form['branch']\n\t\tyear=request.form['year']\n\t\tsemester=request.form['semester']\n\t\temail=request.form['email']\n\t\tmobile=request.form['mobile']\n\t\tpassword =bcrypt.generate_password_hash(request.form['pass']).decode('UTF-8')\n\t\twith sql.connect(\"database.db\") as con:\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(\"select * from users where roll_no = (?)\",[request.form['roll']])\n\t\t\tname2=cur.fetchall()\n\t\t\tif name2:\n\t\t\t\tms2=\"already existing roll no\"\n\t\t\t\treturn render_template(\"sfipreg.html\",ms2=ms2)\n\t\t\tcur.execute(\"INSERT INTO users (name,roll_no,branch,year,semester,email,mobile,password) VALUES (?,?,?,?,?,?,?,?)\",(name,roll_no,branch,year,semester,email,mobile,password))\n\t\t\tmsg = \"registered successfully now login\"\n\t\t\tcon.commit()\n\t\tcon.close()\n\t\treturn render_template('sfiplogin.html',msg=msg)\n\n\n#LOGIN module\n@app.route('/login', methods =['GET','POST'] )\ndef login():\n\tif request.method == 'POST':\n\t\tcon = sql.connect(\"database.db\")\n\t\tcur = con.cursor()\n\t\tcur.execute(\"select * from users where roll_no =?\",[request.form['roll']])\n\t\tname=cur.fetchall()\n\t\tcur.execute('SELECT password FROM users WHERE roll_no=?',[request.form['roll']])\n\t\tpas=cur.fetchone()\n\t\tif name:\n\t\t\tif bcrypt.check_password_hash(pas[0],request.form['password']):\n\t\t\t\tsession['roll_no'] = request.form['roll']\n\t\t\t\tcur.execute(\"select branch from users where roll_no=?\",[request.form['roll']])\n\t\t\t\tbranch = cur.fetchone()\n\t\t\t\tsession['branch']=branch[0]\n\t\t\t\tprint(session['branch'])\n\t\t\t\tcur.execute(\"select semester from users where roll_no=?\",[request.form['roll']])\n\t\t\t\tsemester = cur.fetchone()\n\t\t\t\tsession['semester']=semester[0]\n\t\t\t\tprint(session['semester'])\n\t\t\t\tcur.execute(\"select year from users where roll_no=?\", [request.form['roll']])\n\t\t\t\tyear = cur.fetchone()\n\t\t\t\tsession['year'] = year[0]\n\t\t\t\tprint(session['year'])\n\t\t\t\tcur.execute(\"select name,subject from profs where branch=? and semester=? \",(branch[0],semester[0]))\n\t\t\t\tglobal lis1\n\t\t\t\tlis1=cur.fetchall()\n\t\t\t\tcur.execute(\"select distinct lecturer,subject from feedback where st_rollno=?\", [session['roll_no']])\n\t\t\t\tglobal lis2\n\t\t\t\tlis2 = cur.fetchall()\n\t\t\t\tglobal lis3\n\t\t\t\tlis3 = [x for x in lis1 if x not in lis2]\n\t\t\t\tdictionary = {}\n\t\t\t\tsession['lisp'] = Convert(lis3, dictionary)\n\t\t\t\tsession['exist'] = Convert(lis2, dictionary)\n\n\t\t\t\tcon.close()\n\n\t\t\t\treturn render_template('show_profs.html',lisp=session['lisp'],exist=session['exist'])\n\t\t\telse:\n\t\t\t\talr=\"wrong password\"\n\t\t\t\treturn render_template('sfiplogin.html',alr=alr)\n\t\telse:\n\t\t\talr=\"not registered yet\"\n\t\t\treturn render_template('sfiplogin.html',alr=alr)\n\telse:\n\t\tsession['roll_no']=request.args.get('roll_no')\n\t\treturn render_template('show_profs.html')\n\tcon.close()\n\n\n# Show PROFESSOR list\n@app.route('/show_profs')\ndef show_profs():\n\treturn render_template('show_profs.html', lisp=session['lisp'],exist=session['exist'])\n\n\n# Display feedback FORM\n@app.route('/stform')\ndef stform():\n\tprint(session['branch'],session['semester'])\n\treturn render_template('stform.html', branch=session['branch'],semester=session['semester'])\n\n\n\n# FEEDBACK MODULE......\n@app.route('/feedback', methods=['POST', 'GET'])\ndef feedback():\n\tif request.method == 'POST':\n\t\tlecturer = request.form['lecturer']\n\t\tst_rollno = session['roll_no']\n\t\tyear = session['year']\n\t\tsemester = session['semester']\n\t\tbranch = session['branch']\n\t\tsubject = request.form['subject']\n\t\tpreparation = request.form['preparedness']\n\t\tinformation = request.form['informative']\n\t\texplanation = request.form['explaining']\n\t\tpace = request.form['pace']\n\t\tleadership = request.form['leading']\n\t\treceptive = request.form['receptive']\n\t\tinterest = request.form['interest']\n\t\tdiscussion = request.form['discussion']\n\t\tlearning = request.form['learn']\n\t\trapport = request.form['rapport']\n\t\tavailable = request.form['available']\n\t\tcurrent = [lecturer,st_rollno,year,semester,branch,subject]\n\n\t\tcon = sql.connect(\"database.db\")\n\t\tcur = con.cursor()\n\n\t\tcur.execute(\"select lecturer,st_rollno,year,semester,branch,subject from feedback where st_rollno=?\",[session['roll_no']])\n\t\talrdyexist=cur.fetchall()\n\n\t\tfor i in range(0,len(alrdyexist)):\n\t\t\tif list(alrdyexist[i]) == current:\n\t\t\t\tmsgx=\"This feedback is already registered\"\n\t\t\t\tcon.close()\n\n\t\t\t\treturn render_template('show_profs.html', lisp=session['lisp'], msgx=msgx, exist=session['exist'])\n\n\t\telse:\n\t\t\tcur.execute(\"INSERT INTO feedback (lecturer,st_rollno,year,semester,branch,subject,preparation,information,explanation,pace,leadership,receptive,interest,discussion,learning,rapport,available)VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",(lecturer,st_rollno,year,semester,branch,subject,preparation,information,explanation,pace,leadership,receptive,interest,discussion,learning,rapport,available,))\n\t\t\tcon.commit()\n\t\t\tcur.execute(\"select distinct lecturer,subject from feedback where st_rollno=?\", [session['roll_no']])\n\t\t\tglobal lis1,lis2,lis3\n\t\t\tlis2 = cur.fetchall()\n\t\t\tlis3 = [x for x in lis1 if x not in lis2]\n\t\t\tdictionary = {}\n\t\t\tsession['lisp'] = Convert(lis3, dictionary)\n\t\t\tsession['exist'] = Convert(lis2, dictionary)\n\n\t\t\tcon.close()\n\t\t\treturn redirect(url_for('show_profs'))\n\n\n#view RESPONSE\n@app.route('/response', methods=['POST', 'GET'])\ndef response():\n\tif request.method == 'POST':\n\t\trestech = request.form['restech']\n\t\tressub = request.form['ressub']\n\t\tprint(restech)\n\t\tprint(ressub)\n\t\tcon = sql.connect(\"database.db\")\n\t\tcur = con.cursor()\n\t\tcur.execute(\"select preparation,information,explanation,pace,leadership,receptive,interest,discussion,learning,rapport,available from feedback where st_rollno=? and lecturer=? and subject=?\",(session['roll_no'],restech,ressub))\n\t\tout=cur.fetchall()\n\t\tres = [item for t in out for item in t]\n\t\tprint(res)\n\t\treturn render_template('response.html', res=res,branch=session['branch'],semester=session['semester'],restech=restech,ressub=ressub)\n\n\telse:\n\t\tprint('this worked')\n\n\n#LOGOUT module\n@app.route('/logout')\ndef logout():\n\tsession.pop('roll_no',None)\n\tsession.pop('branch', None)\n\tsession.pop('semester', None)\n\tsession.pop('year', None)\n\tsession.pop('lisp', None)\n\tsession.pop('exist', None)\n\treturn redirect(url_for('index'))\n\n\nif(__name__ == \"__main__\"):\n\tapp.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"53329786","text":"# coding=utf-8\n__author__ = 'Archer'\n\nfrom flask import render_template, jsonify\nfrom marka import app\n\n\nclass BizError(Exception):\n err_code = 1\n\n def __init__(self, message, err_code=None):\n Exception.__init__(self)\n self.message = message\n if err_code is not None:\n self.err_code = err_code\n\n def to_dict(self):\n ret = dict()\n ret['id'] = self.err_code\n ret['msg'] = self.message\n return ret\n\n\n@app.errorhandler(404)\ndef error404(error):\n \"\"\"\n 404错误页面\n \"\"\"\n return render_template('error/error404.html'), 404\n\n\n@app.errorhandler()\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response","sub_path":"marka/handlers/ErrorHandler.py","file_name":"ErrorHandler.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"172552824","text":"\nimport argparse\nimport datetime\nimport operator\nimport sys\nimport time\nimport credentials\nfrom mongoengine import *\nimport lib.mongoLeankit\nimport lib.jira\nimport lib.excel_estimate\n\ndef _initMongoConn ():\n connect('leankit')\n\nclass JiraIssue (object):\n def __init__(self, parent_id):\n self.key = parent_id\n self.parent = None\n self.summary = \"\"\n self.type = None\n self.epic = None\n self.sprints = {}\n self.timespent = 0\n self.totaltimespent = 0\n self.children = []\n self.not_management_task = False\n self.not_analysis_task = False\n self.rank = 0\n\n\ndef get_time_spent(elem, field_name):\n \"\"\" Counts timespent in hours instead of miliseconds.\"\"\"\n ts = elem[\"fields\"].get(field_name, 0)\n if ts is None:\n ts = 0\n ts /= 3600\n return ts\n\ndef get_parent_id(elem):\n \"\"\" Returns paren key or none. \"\"\"\n if elem[\"fields\"].get(\"parent\") is not None:\n return elem[\"fields\"][\"parent\"][\"key\"]\n return None\n\ndef get_sprints(elem):\n \"\"\" Parses sprint string info dictionary of sprint_id: name. \"\"\"\n sprints = elem[\"fields\"].get(\"customfield_10800\")\n dict_sprints = {}\n if sprints is not None:\n for index in range(len(sprints)):\n left_index = sprints[index].find('[')\n right_index = sprints[index].find(']')\n lst_sprints = sprints[index][left_index+1:right_index].split(',')\n\n sprint_id = None\n sprint_name = None\n\n for item in lst_sprints:\n kvPair = item.split('=')\n if kvPair[0] == \"id\":\n sprint_id = kvPair[1]\n elif kvPair[0] == \"name\":\n sprint_name = kvPair[1]\n\n if sprint_id is not None:\n dict_sprints[sprint_id] = sprint_name\n\n return dict_sprints\n\ndef not_management_task(text):\n text = text.lower()\n if \"zarządzanie projektem\" in text\\\n or \"pm\" in text\\\n or \"zarzadzanie projektem\" in text:\n return False\n return True\n\n\ndef not_analysis_task(text):\n text = text.lower()\n if \"scrum\" in text\\\n or \"analityczne\" in text:\n return False\n return True\n\n\ndef build_issues_tree(data):\n \"\"\" Builds logical tree of issues: parent -> subtasks \"\"\"\n response = {}\n for elem in data:\n parent_id = get_parent_id(elem)\n\n issue = JiraIssue(parent_id)\n issue.key = elem[\"key\"]\n issue.parent = None\n issue.summary = elem[\"fields\"][\"summary\"]\n issue.type = elem[\"fields\"][\"issuetype\"][\"name\"]\n issue.epic = elem[\"fields\"][\"customfield_11300\"]\n issue.sprints = get_sprints(elem)\n issue.timespent = get_time_spent(elem, \"timespent\")\n issue.totaltimespent = get_time_spent(elem, \"aggregatetimespent\")\n issue.children = []\n issue.not_management_task = not_management_task(issue.summary)\n issue.not_analysis_task = not_analysis_task(issue.summary)\n issue.rank = elem[\"fields\"][\"customfield_11304\"]\n\n parent_issue = JiraIssue(parent_id)\n\n #it's a subtasks\n if parent_id is not None:\n if not parent_id in response:\n response[parent_id] = parent_issue\n response[parent_id].children.append(issue)\n #it's not a subtask\n else:\n if issue.key in response:\n issue.children = response[issue.key].children\n response[issue.key] = issue\n\n sorted_response = sorted(response.values(), key=operator.attrgetter('not_management_task', 'not_analysis_task', 'rank'))\n\n return sorted_response\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n exit(\"Usage: \" + sys.argv[0] + \" projectname\")\n project_name = sys.argv[1]\n\n _initMongoConn()\n\n #init Jira connection\n user_jira = credentials.loginJira['consumer_secret']\n pwd_jira = credentials.loginJira['password']\n jira = lib.jira.Jira('http://jira.grupa.onet', user_jira, pwd_jira)\n\n #date_text = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n file_name = \"./pw_files/\" + project_name + \"_kosztorys.xlsx\"\n\n excelReport = lib.excel_estimate.ExcelEstimate(file_name, \"Kosztorys\", False)\n\n issues = jira.get_all_issues(project_name)\n data = {\n \"issues\": build_issues_tree(issues),\n \"projectName\": project_name,\n \"show_subtasks\": True\n }\n excelReport.init_report(data)\n excelReport.generate_report()\n excelReport.close()\n\n # WORKAROUND Z POMINIECIEM MONGO\n ## zapis dokumentu do bazy mongo\n #pwfile = lib.mongoLeankit.Pwfile()\n\n #pwfile.data = data\n #pwfile.project = project_name\n #pwfile.generation_date = datetime.datetime.now()\n #pwfile.date_text = date_text\n #pwfile.format_type = \"XLSX\"\n #pwfile.save()\n exit(0)\n","sub_path":"new_pmo/public/python/gen_estimate_tofile2.py","file_name":"gen_estimate_tofile2.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"544598806","text":"import json\nimport sys\nimport argparse\nfrom functools import partial\n\n\ndef load_data(filepath):\n with open(filepath, 'r', encoding='UTF8') as file_handler:\n return json.load(file_handler)['features']\n\n\ndef get_biggest_bar(loaded_bars):\n return max(\n loaded_bars,\n key=lambda bar:bar['properties']['Attributes']['SeatsCount']\n )\n\n\ndef get_smallest_bar(loaded_bars):\n return min(\n loaded_bars,\n key=lambda bar:bar['properties']['Attributes']['SeatsCount']\n )\n\n\ndef get_closest_bar(loaded_bars, longtitude, latitude):\n return min(\n loaded_bars,\n key=lambda bar: (\n (bar['geometry']['coordinates'][0] - latitude)**2 -\n (bar['geometry']['coordinates'][1] - longtitude)**2\n )\n )\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-f',\n action='store',\n dest='filepath',\n help='Filepath with data, \"bars.json\" by default',\n default='bars.json'\n )\n parser.add_argument(\n '-long',\n type=float,\n action='store',\n dest='longtitude',\n help='You longtitude. If not set it will be generated randomly',\n default='55'\n )\n parser.add_argument(\n '-lat',\n type=float,\n action='store',\n dest='latitude',\n help='You latitude. If not set, it will be generated randomly',\n default='37'\n )\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n params = parse_arguments()\n loaded_bars = load_data(params.filepath)\n print(get_biggest_bar(loaded_bars)['properties']['Attributes']['Name'])\n print(get_smallest_bar(loaded_bars)['properties']['Attributes']['Name'])\n print(get_closest_bar(loaded_bars, params.longtitude, params.latitude)\n ['geometry']['coordinates'])\n","sub_path":"bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"654327521","text":"# -*- coding: utf-8 -*-\r\n\r\n# Leetcode 6\r\n# The string \"PAYPALISHIRING\" is written in a zigzag pattern on a given number of rows like this: \r\n# (you may want to display this pattern in a fixed font for better legibility)\r\n# P A H N\r\n# A P L S I I G\r\n# Y I R\r\n# And then read line by line: \"PAHNAPLSIIGYIR\"\r\n# Write the code that will take a string and make this conversion given a number of rows:\r\n# string convert(string s, int numRows);\r\n# Example 1:\r\n# Input: s = \"PAYPALISHIRING\", numRows = 3\r\n# Output: \"PAHNAPLSIIGYIR\"\r\n# Example 2:\r\n# Input: s = \"PAYPALISHIRING\", numRows = 4\r\n# Output: \"PINALSIGYAHRPI\"\r\n# Explanation:\r\n# P I N\r\n# A L S I G\r\n# Y A H R\r\n# P I\r\n\r\nclass Solution:\r\n def convert(self, s, numRows):\r\n \"\"\"\r\n :type s: str\r\n :type numRows: int\r\n :rtype: str\r\n \"\"\"\r\n \r\n if(numRows>=len(s)):\r\n return s;\r\n \r\n cycleElementNum=numRows+(numRows-2);\r\n cycleColumnNum=numRows-1;\r\n cycleNum=len(s)//cycleElementNum+1;\r\n numCols=cycleNum*cycleColumnNum;\r\n strList=[];\r\n for row in range(numRows):\r\n rowList=[];\r\n for col in range(numCols):\r\n rowList.append([]);\r\n strList.append(rowList);\r\n \r\n for index,ch in enumerate(s):\r\n cycleIndex=index//cycleElementNum;\r\n inCycleIndex=index%cycleElementNum;\r\n inCycleCol=0 if(inCycleIndex0:\n message+=\" devienne(nt) positif\"\n messagebox.showinfo(\"Attention\",message)\n liste=[abs(i) for i in liste] # pour transformer les poids negatif en positif\n else:\n existe=0\n message=\"le(s) poid(s) : \"\n for i in liste:\n if i<0 : \n existe+=1\n message+=\" [{}] , \".format(i)\n if existe>0:\n message+=\" ne sont pas conserve\"\n messagebox.showinfo(\"Attention\",message)\n liste=[i for i in liste if i>-1] # pour ne conserver que les poids positif\n return liste\n except ValueError:\n \n message=\"le fichier ne reponds pas a la norme\"\n messagebox.showinfo(\"Attention\",message)\n print(message)\n return None\n\n\ndef structureToListe(chemin):\n \"\"\"\n Retourne la structure de l'abre dans une liste compose de deux elements :\n [0]=le sous arbre gauche et [1]=le sous arbre droit ,de larbre binaire\n la methode accepe les arbres a n branches\n \"\"\"\n try:\n structure = open(chemin,\"r\")\n structure = structure.readline()# transforme de type _io.TextIOWrapper a string\n except FileNotFoundError:\n message=\"Erreur : le fichier na pas ete trouve\"\n messagebox.showinfo(\"Attention\",message)\n print(message)\n \n return None\n try:\n structure=eval(structure)#Start la validite de la structure de larbre binaire ET transforme strucuture en type list\n message=\"ATTENTION\"\n if options.ArbreNonBinaire==\"libre\":# quel que soit le nombre de branche larbre est cree\n \n if 2 != len(structure):\n message=\"ATTENTION arbre non binaire\"\n \n messagebox.showinfo(\"Attention\",message)\n return structure\n elif options.ArbreNonBinaire==\"juste2\":# on ne garde que les deux premier sous arbres\n message+=\"\\n\"\n message+=\"on ne garde pas {}\".format(structure[2:])\n messagebox.showinfo(\"Attention\",message)\n structure=structure[:2]\n return structure\n else:# seul un arbre binaire est autorise\n if 2 != len(structure):\n message=\"ATTENTION arbre non binaire\"\n messagebox.showinfo(\"Attention\",message)\n return None\n else:\n return structure\n \n \n except (SyntaxError , TypeError):\n message=\"le fichier ne reponds pas a la norme\"\n messagebox.showinfo(\"Attention\",message)\n print(message)\n return None\n\ndef aleaListePoids(x,maxi):\n \"\"\"\n Renvoi une liste aleatoire de x entier de poids entre 1 et max\n \"\"\"\n if x < 2 : x = 2# minimum\n def rando(maxi):\n return random.randint(1,maxi)\n liste = [rando(maxi) for y in range(1,x+1)]\n random.shuffle(liste)\n return liste\ndef AdditionListePoids(x):\n \"\"\"\n Renvoi une liste d'entier de poids qui sont l\\additions des poids precendents dans l'ordre croissant\n equivaut aux puissances de 2\n \"\"\"\n \n i = 0\n liste = [1]\n if x==1:\n liste.append(0)\n return liste\n while i < x+1:\n i += 1\n somme = sommeDespoidsListe(liste)\n liste.append(somme)\n return liste\n\ndef sommeDespoidsListe(liste):\n \"\"\"renvoi le int de la somme des chiffre d'une liste \"\"\"\n somme = 0\n for x in liste:\n somme += x\n return somme\ndef EquilibreModulo(nombreDePoids,modulo,valeurmax):\n \"\"\"\n Renvoi une liste de \"nombreDePoids\" poids de valeur maximum \"valeurmax\" , reparti par sous arbre equilibre de modulo \"modulo\"\n \n \"\"\"\n \n ####################################\n #Verification des variables\n \n if nombreDePoids%modulo != 0 or nombreDePoids < 2 :\n if nombreDePoids%modulo != 0:\n message=\"{} % {} est different de 0\".format(nombreDePoids,modulo)\n else:\n message=\"le modulo minimum est 2 , et le nombre de poids 2\"\n messagebox.showinfo(\"Attention\",message)\n messagebox.showinfo(\"Attention\",\"Vont etre afficher 32 poids , reparti modulo 4 , dont le poids max est {}\".format(valeurmax))\n nombreDePoids = 32\n modulo = 4\n \n if valeurmax=modulo:\n maxi=valeurmax\n hasard=random.randint(1,2) # pour agrandir l'aleatoire dans les cas ou le poidsMax choisit est faible\n hasard2=random.randint(0,1) # pour pour placer la somme des poids non placer dans le premier ou dernier poids du groupe de poids\n if hasard2==0:total=modulo//hasard\n else: total=1\n while j epoch\n self.start_epoch = \\\n int((self.start_step + 1) / len(self.image_loader.data_index))\n epoch_size = len(self.image_loader.data_index)\n if args.save_freq == -1:\n args.save_freq = epoch_size\n print('Start point : iter : {}, epoch : {}, save freq : {}'.format(\\\n self.start_step, self.start_epoch, args.save_freq)) \n start_time = time.time()\n \n if self.start_epoch < args.end_epoch:\n for epoch in range(self.start_epoch, args.end_epoch):\n for _ in range(0, epoch_size):\n #discriminator update\n _ = self.sess.run(self.d_adam)\n\n #generator update & loss summary\n _, summary_str= self.sess.run([self.g_adam, self.summary_all_loss])\n self.writer.add_summary(summary_str, self.start_step)\n\n #print point\n if (self.start_step+1) % args.print_freq == 0:\n #print loss & time \n d_loss, g_loss, g_zi_img = self.sess.run(\\\n [self.D_loss, self.G_loss, self.G_xi])\n \n print('Iter {} Time {} d_loss {} g_loss {}'.format(\\\n self.start_step, time.time() - start_time, d_loss, g_loss))\n \n if (self.start_step+1) % args.print_sample_freq == 0:\n #training sample check\n summary_str0 = self.sess.run(self.summary_train_image)\n self.writer.add_summary(summary_str0, self.start_step)\n #test sample check\n self.check_sample(self.start_step)\n\n if (self.start_step+1) % args.save_freq == 0:\n self.save(self.start_step)\n self.start_step += 1\n self.save(self.start_step)\n else:\n print('train complete!, trained model start epoch : {}, \\\n end_epoch : {}'.format(self.start_epoch, self.end_epoch))\n\n #summary test sample image during training\n def check_sample(self, t):\n summary_str1, summary_str2 = self.sess.run(\\\n [self.summary_image, self.summary_psnr])\n self.writer.add_summary(summary_str1, t)\n self.writer.add_summary(summary_str2, t)\n\n def save(self, step):\n model_name = \"PIX2PIX.model\"\n\n self.saver.save(self.sess,\n os.path.join(self.image_loader.model_save_dir, model_name),\n global_step=step)\n\n def load(self):\n print(\" [*] Reading checkpoint...\")\n ckpt = tf.train.get_checkpoint_state(self.sample_image_loader.model_save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n self.start_step = int(ckpt_name.split('-')[-1])\n self.saver.restore(self.sess, os.path.join(self.sample_image_loader.model_save_dir, ckpt_name))\n print(self.start_step)\n return True\n else:\n return False\n\n def inference(self, args):\n self.sess.run(tf.global_variables_initializer())\n\n assert self.load(), 'erorr: trained model is not exsist'\n\n ## test\n for idx in tqdm(self.sample_image_loader.data_index):\n test_X, test_Y, output_img = self.sess.run([self.whole_xi, self.whole_yi, self.G_whole_xi])\n \n save_file_nm_g = 'Gen_from_' + self.sample_image_loader.input_path_list[idx].split('/')[-1][:-4]\n np.save(os.path.join(self.sample_image_loader.inf_save_dir, save_file_nm_g), output_img)\n \n if args.raw_output:\n save_file_nm_f = 'from_' + self.sample_image_loader.input_path_list[idx].split('/')[-1][:-4]\n save_file_nm_t = 'to_' + self.sample_image_loader.target_path_list[idx].split('/')[-1][:-4]\n np.save(os.path.join(self.sample_image_loader.inf_save_dir, save_file_nm_f), test_X)\n np.save(os.path.join(self.sample_image_loader.inf_save_dir, save_file_nm_t), test_Y)\n","sub_path":"PIX2PIX/pix2pix_model.py","file_name":"pix2pix_model.py","file_ext":"py","file_size_in_byte":9694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"565614643","text":"from django.contrib import admin, messages\n\nfrom scraper.models import User, Board, Pin, Comment\nfrom scraper.tasks import (\n filter_pin_task, deduplicate_pins_task, publish_pin_as_post_task\n)\n\n\nclass BoardInline(admin.StackedInline):\n\n '''Board inlined to user.'''\n\n model = Board\n extra = 0\n\n\nclass PinInline(admin.StackedInline):\n\n '''Pin inlined to user and board.'''\n\n model = Pin\n extra = 0\n\n\nclass CommentInline(admin.StackedInline):\n\n '''Comment inlined to pin.'''\n\n model = Comment\n extra = 0\n\n\n@admin.register(User)\nclass UserAdmin(admin.ModelAdmin):\n\n '''Admin integration for user.'''\n\n list_filter = ('is_domain_verified', )\n search_fields = ('username', 'name', 'location')\n list_display = ('id', 'username', 'name', 'location', 'is_domain_verified')\n inlines = (BoardInline, PinInline, CommentInline)\n\n\n@admin.register(Board)\nclass BoardAdmin(admin.ModelAdmin):\n\n '''Admin integration for board.'''\n\n list_filter = ('is_collaborative', )\n search_fields = ('name', 'category', 'description')\n list_display = (\n 'id', 'name', 'description', 'category', 'pin_count',\n 'is_collaborative',\n )\n inlines = (PinInline, )\n\n\n@admin.register(Pin)\nclass PinAdmin(admin.ModelAdmin):\n\n '''Admin integration for pin.'''\n\n inlines = (CommentInline, )\n list_filter = ('is_repin', 'is_filtered', 'is_published', 'is_duplicate')\n search_fields = ('title', 'description')\n list_display = (\n 'id', 'title', 'description', 'category', 'repin_count', 'like_count',\n 'is_repin', 'is_rich', 'is_filtered', 'is_duplicate', 'is_published'\n )\n actions = ('filter_pins', 'deduplicate_pins', 'publish_pins')\n\n def filter_pins(self, request, queryset):\n '''Admin action for filtering pins.'''\n count = len(queryset)\n for pin in queryset:\n filter_pin_task.delay(pin)\n if count == 1:\n self.message_user(\n request,\n 'Started filter_pin_task for 1 pin.',\n level=messages.SUCCESS\n )\n else:\n self.message_user(\n request,\n 'Started filter_pin_task for {} pins.'.format(count),\n level=messages.SUCCESS\n )\n\n def deduplicate_pins(self, request, queryset):\n '''Admin action for deduplicating pins.'''\n count = len(queryset)\n deduplicate_pins_task.delay(queryset)\n if count == 1:\n self.message_user(\n request,\n 'Started deduplicate_pin_task for 1 pin.',\n level=messages.SUCCESS\n )\n else:\n self.message_user(\n request,\n 'Started deduplicate_pin_task for {} pins.'.format(count),\n level=messages.SUCCESS\n )\n\n def publish_pins(self, request, queryset):\n '''Admin action for publishing pins.'''\n queryset = queryset.filter(\n is_filtered=False, is_duplicate=False, is_published=False\n )\n count = len(queryset)\n for pin in queryset:\n publish_pin_as_post_task.delay(pin)\n if count == 1:\n self.message_user(\n request,\n 'Started publish_pin_task for 1 pin.',\n level=messages.SUCCESS\n )\n else:\n self.message_user(\n request,\n 'Started publish_pin_task for {} pins.'.format(count),\n level=messages.SUCCESS\n )\n\n filter_pins.short_description = 'Filter selected pins'\n deduplicate_pins.short_description = 'Deduplicate selected pins'\n publish_pins.short_description = 'Publish selected pins'\n\n\n@admin.register(Comment)\nclass CommentAdmin(admin.ModelAdmin):\n\n search_fields = ('comment', )\n list_display = ('id', 'comment')\n","sub_path":"scraper/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"320836693","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom psd_tools import PSDImage\nimport numpy as np\nfrom PIL import Image\nimport os\nimport pathlib\n\n# 读取psd文件,获取指定层信息,保存指定层图片\nfrom muse.psd_layer import PsdLayer\nfrom muse.psd_model import PsdModel\n\n\nclass PsdParser:\n \"\"\"\n 读取path指向的PSD文件,获取图层信息并保存图层至相应名称的PNG图片;\n \"\"\"\n @staticmethod\n def parse_psd(path):\n psd_name = os.path.basename(path)\n psd_name_without_extension = os.path.splitext(psd_name)[0]\n dir_path = os.path.dirname(path)\n layer_save_path = os.path.join(dir_path, psd_name_without_extension)\n pathlib.Path(layer_save_path).mkdir(parents=True, exist_ok=True)\n\n psd = PSDImage.load(path)\n header = psd.header\n layers = psd.layers\n\n layer_infos = []\n for layer in layers:\n save_path = os.path.join(layer_save_path, layer.name + '.png')\n\n m_x1 = 0\n m_y1 = 0\n\n layer_info = PsdLayer(layer.bbox.width, layer.bbox.height,\n layer.bbox.x1 + m_x1, layer.bbox.y1 + m_y1, layer.bbox.x2, layer.bbox.y2,\n layer.name, save_path)\n layer_infos.append(layer_info)\n\n psd_info = PsdModel(header.width, header.height, layer_infos, psd_name, path)\n return psd_info\n\n @staticmethod\n def remove_black_from_light_layer(layerPIL):\n \"\"\"\n 对于需要通过滤色的混合选项(screen blend)的图层,纯黑不透明(alpha==100%)的部分可以扔掉\n :param layerPIL: 原图层PIL\n :return: 去黑图层PIL\n \"\"\"\n pixel = layerPIL.load()\n\n for y in range(layerPIL.size[1]):\n for x in range(layerPIL.size[0]):\n if pixel[x, y][0] == 0 and pixel[x, y][1] == 0 and pixel[x, y][2] == 0 and pixel[x, y][3] == 255:\n pixel[x, y] = (0, 0, 0, 0)\n\n return layerPIL\n\ndef main():\n print(os.path.dirname(os.path.realpath(__file__)))\n # psd = PsdParser.parse_psd('/Users/sky4star/Github/muse/data/test/psd/600-600.ai.psd')\n psd = PsdParser.parse_psd('C:/workroot/DataTeam/lab/python/muse/image_src/768-1024.ai.psd')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"muse/psd_parser.py","file_name":"psd_parser.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"570204621","text":"#!/usr/bin/env python3\n\nimport json\nimport ast\nimport sys\n\nbanco = sys.argv[1]\nTITU = sys.argv[2]\nDESCR = sys.argv[3]\nDATE = sys.argv[4]\n\ndici = {}\n\n\ndef create_banc(titulo, descricao, date, arquivo):\n dici[titulo] = {'title': descricao, 'data': date}\n dici_string = json.dumps(dici)\n\n arq = open(arquivo, 'w')\n arq.write(dici_string)\n\n\ndef add_banc(titulo, descricao, date, arquivo):\n open_arquivo = open(arquivo)\n dici = open_arquivo.read()\n open_arquivo.close()\n\n dici = ast.literal_eval(dici)\n\n for chave in dici.keys():\n if (chave == titulo):\n print('\\nTitulo já existente!!\\n')\n sys.exit(12)\n\n dici[titulo] = {'title': descricao, 'data': date}\n\n dici_string = json.dumps(dici)\n open_arquivo = open(arquivo, 'w')\n a = open_arquivo.write(dici_string)\n open_arquivo.close()\n\n\ndef master(titulo, descricao, date, arquivo):\n\n try:\n open_arquivo = open(arquivo)\n\n except FileNotFoundError:\n print('\\narquivo não encontrado\\nCriando um novo banco de dados...')\n create_banc(titulo, descricao, date, arquivo)\n\n else:\n add_banc(titulo, descricao, date, arquivo)\n\n\n# Colocar a data, um dia anterior, ou seja, é para o dia 13, colocar dia 12.\n# Exemplo:\n# ./add_banc /home/bruno/teste1.txt 'Folga A' 'Folga escala A - 13/08/2020' '2020-8-12'\n\nmaster(TITU, DESCR, DATE, banco)\n","sub_path":"add_banc.py","file_name":"add_banc.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"401656535","text":"# assume all words set to lowercase\r\n# levenshtein\r\nimport operator\r\nfrom nltk.corpus import wordnet as wn\r\n\r\nfirstlemma = \"rise\"\r\nsecondlemma = \"increase\"\r\n\r\n\r\ndef levenshtein(s1, s2):\r\n if len(s1) < len(s2):\r\n return levenshtein(s2, s1)\r\n\r\n # len(s1) >= len(s2)\r\n if len(s2) == 0:\r\n return len(s1)\r\n\r\n previous_row = range(len(s2) + 1)\r\n for i, c1 in enumerate(s1):\r\n current_row = [i + 1]\r\n for j, c2 in enumerate(s2):\r\n insertions = previous_row[\r\n j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\r\n deletions = current_row[j] + 1 # than s2\r\n substitutions = previous_row[j] + (c1 != c2)\r\n current_row.append(min(insertions, deletions, substitutions))\r\n previous_row = current_row\r\n\r\n return previous_row[-1]\r\n\r\n\r\ndef synonyms(firstlemma, secondlemma):\r\n a = wn.synsets(firstlemma)\r\n print(a)\r\n b = wn.synsets(secondlemma)\r\n print(b)\r\n\r\n if any(firstlemma in b for firstlemma in a):\r\n print(\"True\")\r\n #score = 0.9\r\n #print(score)\r\n else:\r\n print(\"false\")\r\n\r\n\r\ndef numchars(firstlemma, secondlemma):\r\n d = len(firstlemma)\r\n e = len(secondlemma)\r\n\r\n num = max(d, e)\r\n return num\r\n\r\n\r\n\r\ndef wordmetric():\r\n # print(type(num), type(pop))\r\n # operator.truediv((operator.sub(num, levenshtein), num), num)\r\n a = levenshtein(firstlemma, secondlemma)\r\n b = numchars(firstlemma, secondlemma)\r\n x = operator.sub(a, b)\r\n y = operator.truediv(x, a)\r\n if y < 0.9:\r\n score = 0.8\r\n label = \"Entailed\"\r\n # elif\r\n print(score)\r\n print(label)\r\n return a, b\r\n\r\n\r\nif firstlemma == secondlemma:\r\n score = 0\r\n label = \"Neutral\"\r\n print(label)\r\n print(score)\r\nelse:\r\n wordmetric()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# def synonym(firstlemma, secondlemma):\r\n","sub_path":"wordmetric.py","file_name":"wordmetric.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"410549940","text":"\nfrom down import *\n\nurl='https://yuhui.blog.csdn.net/article/list/2?'\n\nif 0:\n data = getdata(url)\n html = etree.HTML(data)\n aa = '//div/@data-articleid'\n li = html.xpath(aa)\n print((li))\n print(len(li))\n print(len(data))\n #save_txt('aaa.txt', data)\n ##print(data)\n\ndef getlist(url, aa):\n ll=[]\n data = getdata(url)\n html = etree.HTML(data)\n print(len(data))\n ll = html.xpath(aa)\n return ll\n\n#li = ['82762601', '83549202', '83410050', '83383119', '83302424', '83104764', '83056420', '82868658', '82865579', '82839713', '82783265', '82684203', '82660979', '82492354', '81506977', '81224151', '81218840', '81171545', '80820511', '80654007', '80207646']\n#li = ['82762601']\n\n#li=getlist()\n\nimport htm2md\n\ndef down_csdn_one(url):\n print(url)\n data = getdata(url)\n #save_txt('test1.html', data)\n root, t, d = htm2md.htm2md(data)\n # 'data-za-detail-view-element_name'\n save_txt_td(root, t, d)\n\ndef down_csdn_list_one(url, aa):\n ll=getlist(url, aa)\n for li in ll:\n down_csdn_one(li)\n return ll\n\ndef getlist_zhihu(url):\n data = getdata1(url)\n #save_txt('test1.html', data)\n tt = data.replace('[', '\\n').replace(']', '\\n').split('\\n')\n c = 0\n t = ''\n for i in (tt):\n cc = i.count('null,null')+i.count('false,false')\n if cc>c:\n c = cc\n t = i\n # t = t.replace('null', '0')\n li = t.split(',')\n ll=[]\n for i in li:\n if 'null'!=i:\n ll.append('https://zhuanlan.zhihu.com/p/'+i)\n return ll\n\ndef getlist_sciencenet(url):\n data = getdata(url)\n tt = data.replace('\"', '\\n').split('\\n')\n ll=[]\n for i in tt:\n if len(i)>15 and 'blog-'==i[0:5] and '.html'==i[-5:]:\n ll.append('http://blog.sciencenet.cn/'+i)\n return ll\n\ndef getlist_sina(url):\n data = getdata(url)\n tt = data.replace('\"', '\\n').split('\\n')\n ll=[]\n for i in tt:\n if i.find('/s/blog_')>0:\n ll.append(i)\n return ll\n\ndef getlist_skywind(url):\n data = getdata(url)\n aa='//a[@rel=\"bookmark\"]/@href'\n html = etree.HTML(data)\n tt = html.xpath(aa)\n ll=[]\n for i in tt:\n if i.find('www.skywind.me/blog/archives/')>0:\n ll.append(i)\n return ll\n\ndef getlist_51cto(url):\n data = getdata(url)\n aa='//a[@class=\"tit\"]/@href'\n html = etree.HTML(data)\n tt = html.xpath(aa)\n ll=[]\n for i in tt:\n if i.find('//blog.51cto.com')>0:\n ll.append(i)\n return ll\n\ndef getlist_jobbole(url):\n data = getdata(url)\n aa='//div[@class=\"post-thumb\"]/a/@href'\n html = etree.HTML(data)\n tt = html.xpath(aa)\n ll=[]\n for i in tt:\n if i.find('//blog.jobbole.com')>0:\n ll.append(i)\n return ll\n\ndef getlist_cnblogs(url):\n data = getdata(url)\n if 0:\n tt = data.replace('\"', '\\n').split('\\n')\n else:\n aa='//div[@class]/a/@href'\n html = etree.HTML(data)\n tt = html.xpath(aa)\n\n ll=[]\n for i in tt:\n if i.find('/articles/')>0 or i.find('/p/')>0 or i.find('/archive/')>0:\n ll.append(i)\n return ll\n\n\n\ndef down_list_one(url, getlist, ll1):\n ll0=getlist(url)\n ll0=set(ll0+ll1)\n ll = list(ll0-set(ll1))\n for li in ll:\n down_csdn_one(li)\n return ll, list(ll0)\n\ndef down_list(urllist, getlist):\n tt = 0\n ll1=[]\n for url in urllist:\n print('------', url)\n ll, ll1 = down_list_one(url, getlist, ll1)\n if len(ll)<5:\n tt+=1\n else:\n tt=0\n if tt>3:\n break\n return 0\n\nimport sys\n\ndef down_csdn_list(url, aa, temp):\n if len(sys.argv)>1:\n tt = 0\n # n = int(sys.argv[1])\n for i in range(1, 1000):\n url1=temp%(url, i)\n print('---'+url1)\n ll = down_csdn_list_one(url1, aa)\n if len(ll)<10:\n tt+=1\n else:\n tt=0\n if tt>3:\n break\n else:\n ll = down_csdn_list_one(url, aa)\n return 0\n\nif __name__ == '__main__':\n url='https://yuhui.blog.csdn.net/article/details/80106526'\n url='https://www.cnblogs.com/chaihy/p/10615117.html'\n url='https://blog.csdn.net/sinat_26917383/article/category/6093543'\n url='https://blog.csdn.net/sinat_26917383/article/details/82880021'\n url='https://www.cnblogs.com/pinard/p/6645766.html'\n url='https://www.cnblogs.com/pinard/category/894690.html'\n url = gettext().decode('gbk')\n if url.find('csdn.net')>0:\n temp='%s/%d?'\n if url.find('/category/')>0:\n aa='//div[@data-articleid]/h4/a/@href'\n down_csdn_list(url, aa, temp)\n else:\n url = '/'.join(url.split('/')[0:4])\n url += '/article/list'\n\n if url.find('/list')>0:\n aa='//li/a/@href'\n aa='//div[@data-articleid]/h4/a/@href'\n down_csdn_list(url, aa, temp)\n else:\n down_csdn_one(url)\n elif url.find('www.cnblogs.com')>0:\n temp='%s=%d'\n if url.find('/articles/')>0 or url.find('/archive/')>0:\n down_csdn_one(url)\n else:\n url = '/'.join(url.split('/')[0:4])\n url += '/default.html?page='\n urllist = map(lambda i:url+str(i), range(1,1000))\n urllist = list(urllist)\n down_list(urllist, getlist_cnblogs)\n\n elif url.find('www.jianshu.com/u/')>0:\n pass\n elif url.find('baike.baidu.com')>0:\n down_csdn_one(url)\n elif url.find('blog.sciencenet.cn')>0:\n if url.find('&do=blog&')>0:\n url = '&'.join(url.split('&')[0:5])\n url += '&page='\n urllist = map(lambda i:url+str(i), range(1,1000))\n urllist = list(urllist)\n down_list(urllist, getlist_sciencenet)\n else:\n down_csdn_one(url)\n elif url.find('blog.sina.com.cn')>0:\n if url.find('/s/articlelist_')>0:\n url = '_'.join(url.split('_')[0:3])\n url += '_'\n urllist = map(lambda i:url+str(i)+'.html', range(1,1000))\n urllist = list(urllist)\n down_list(urllist, getlist_sina)\n else:\n down_csdn_one(url)\n elif url.find('blog.jobbole.com')>0:\n if url.find('/category')>0:\n url = '/'.join(url.split('/')[0:5])\n url += '/page/'\n urllist = map(lambda i:url+str(i)+'/', range(1,1000))\n urllist = list(urllist)\n down_list(urllist, getlist_jobbole)\n elif url.find('/all-posts')>0:\n url = '/'.join(url.split('/')[0:4])\n url += '/page/'\n urllist = map(lambda i:url+str(i)+'/', range(1,1000))\n urllist = list(urllist)\n down_list(urllist, getlist_jobbole)\n else:\n down_csdn_one(url)\n elif url.find('www.skywind.me')>0:\n if url.find('/blog/page/')>0:\n url = '/'.join(url.split('/')[0:4])\n url += '/page/'\n urllist = map(lambda i:url+str(i), range(1,1000))\n urllist = list(urllist)\n down_list(urllist, getlist_skywind)\n else:\n down_csdn_one(url)\n elif url.find('blog.51cto.com')>0:\n tt = url.split('/')\n if len(tt)<5:\n url = '/'.join(tt[0:4])\n url += '/p'\n urllist = map(lambda i:url+str(i), range(1,1000))\n urllist = list(urllist)\n down_list(urllist, getlist_51cto)\n else:\n down_csdn_one(url)\n elif url.find('zhihu.com')>0:\n if url.find('/people/')>0 or url.find('/org/')>0:\n url = '/'.join(url.split('/')[0:5])\n url += '/posts?page='\n urllist = list(map(lambda i:url+str(i), range(1,1000)))\n down_list(urllist, getlist_zhihu)\n down_csdn_one(url)\n else:\n down_csdn_one(url)\n ##print(data)\n","sub_path":"py/downblog/down_csdn.py","file_name":"down_csdn.py","file_ext":"py","file_size_in_byte":7858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"95273780","text":"import numpy as np\nimport pandas as pd\nimport click\nimport ast\nfrom ast import literal_eval\nfrom statistics import stdev, mean\nimport matplotlib.pyplot as pl\n\nimport csv\n\nICA_SUMMARY_FOLDER = 'Unmixing_Mats/Summaries/'\nICA_FIGURE_FOLDER = 'Unmixing_Mats/Figures/'\n\n\n@click.command()\n@click.argument('filename')\ndef summarize_and_hist_ICA(filename):\n ica_coeff_df = pd.read_csv(filename)\n file_prefix = filename.split('/')[-1].split('.')[0]\n with open(ICA_SUMMARY_FOLDER + file_prefix+'_summary.csv', 'w') as summary_file:\n writer = csv.writer(summary_file)\n HEADER = ['region', 'mean', 'stdev']\n for col in ica_coeff_df.columns:\n pl.clf()\n all_vals = [literal_eval(x)[1]\n for x in ica_coeff_df[col].dropna().tolist()]\n fig = pl.hist(all_vals)\n pl.title(col)\n pl.xlabel('Unmixing Matrix value')\n pl.ylabel(\"Frequency\")\n pl.savefig(ICA_FIGURE_FOLDER+file_prefix + '_' + col + '.png')\n writer.writerow([col, mean(all_vals), stdev(all_vals)])\n\n\nif __name__ == '__main__':\n summarize_and_hist_ICA()\n","sub_path":"pipeline/summarize_ICA_coeff.py","file_name":"summarize_ICA_coeff.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"204148469","text":"import argparse\nimport collections\nimport functools\nimport fractions\nimport json\nimport pathlib\nimport statistics\nimport sys\n\nPARSER = argparse.ArgumentParser(description=\"Evaluate JSONs into LaTeX table\")\nPARSER.add_argument(\n \"-f\",\n \"--formula\",\n metavar=\"DIR\",\n type=pathlib.Path,\n required=True,\n help=\"Formulas basedir\",\n)\nPARSER.add_argument(\n \"-s\",\n \"--samples\",\n metavar=\"DIRS\",\n nargs=\"+\",\n type=pathlib.Path,\n required=True,\n help=\"Sample basedirs (multiple supported)\",\n)\nPARSER.add_argument(\"--full\", action=\"store_true\", help=\"do not collapse CAV2009\")\n\n\ndef display(i):\n bold, v = i\n ret = \"\" if not bold else r\"\\textbf \"\n if isinstance(v, fractions.Fraction):\n return f\"{ret}{{{float(v)*100:>8.2f}}}\"\n return f\"{ret}{{{round(v):>8}}}\"\n\n\ndef short_name(x):\n return x\n\n\ndef dump_benchmark(bm, data):\n print(bm)\n for k1, v1 in data.items():\n print(\" \", k1)\n for k2, v2 in v1.items():\n print(\" \", k2, v2)\n\n\ndef collapse_dirs(store, dirnames):\n to_coalesce = collections.defaultdict(list)\n to_remove = []\n for bm, data in store.items():\n for pdir in bm.parents:\n if pdir.name in dirnames:\n to_coalesce[pdir].append(data)\n to_remove.append(bm)\n\n for k, v in to_coalesce.items():\n store[k] = coalesce(v)\n\n for k in to_remove:\n del store[k]\n return store\n\n\ndef coalesce(dicts):\n # assumes all dicts have the same keys\n keys = dicts[0].keys()\n assert all(d.keys() == keys for d in dicts)\n\n combined = {}\n for k in keys:\n values = [d[k] for d in dicts]\n if isinstance(values[0], dict):\n combined_val = coalesce(values)\n else:\n combined_val = sum(values, [])\n combined[k] = combined_val\n\n return combined\n\n\ndef main():\n args = PARSER.parse_args(sys.argv[1:])\n\n for sample_dir in args.samples:\n if not sample_dir.is_dir():\n PARSER.error(f\"{sample_dir} is not a directory\")\n\n if not args.formula.is_dir():\n PARSER.error(f\"{formula} is not a directory.\")\n\n # samples = sorted(args.samples)\n store = collections.defaultdict(\n functools.partial(\n collections.defaultdict, functools.partial(collections.defaultdict, list)\n )\n )\n\n totals = collections.defaultdict(list)\n benchmark_count = collections.defaultdict(int)\n\n for formula in args.formula.glob(\"**/*.smt2\"):\n single = formula.relative_to(args.formula)\n jsons = [\n (\n sample_dir.joinpath(single.with_suffix(\".smt2.json\")),\n sample_dir,\n )\n for sample_dir in args.samples\n if sample_dir.joinpath(single.with_suffix(\".smt2.json\")).is_file()\n ]\n if len(jsons) != len(args.samples):\n continue\n\n formula_dir = single.parent\n d = store[formula_dir]\n benchmark_count[formula_dir] += 1\n\n for json_filename, cat in jsons:\n with open(json_filename) as json_file:\n summary = json.load(json_file)\n\n d[\"d_epochs\"][cat].append(summary[\"epochs\"])\n d[\"b_depth\"][cat].append(summary[\"formula stats\"][\"formula AST depth\"])\n d[\"a_ints\"][cat].append(\n summary[\"formula stats\"][\"num ints\"]\n + summary[\"formula stats\"][\"num arrays\"]\n + summary[\"formula stats\"][\"num bools\"]\n )\n d[\"f_solutions\"][cat].append(summary[\"unique valid samples\"])\n d[\"c_coverage\"][cat].append(\n fractions.Fraction(summary.get(\"wire_coverage\", 0))\n )\n d[\"e_smtcalls\"][cat].append(summary[\"maxsmt calls\"])\n\n totals[f\"coverage_{short_name(cat.name)}\"].append(d[\"c_coverage\"][cat][-1])\n totals[f\"solutions_{short_name(cat.name)}\"].append(\n d[\"f_solutions\"][cat][-1]\n )\n\n megamax_coverages = []\n for benchmark in store:\n mega = store[benchmark][\"c_coverage\"][args.samples[0]]\n megab = store[benchmark][\"c_coverage\"][args.samples[1]]\n for a, b in zip(mega, megab):\n print(a, b)\n megamax_coverages.append(max(a, b))\n\n totals[f\"coverage_megasampler\"] = megamax_coverages\n totals[f\"solutions_megasampler\"] = (\n totals[f\"solutions_{short_name(args.samples[0].name)}\"]\n + totals[f\"solutions_{short_name(args.samples[1].name)}\"]\n )\n\n if not args.full:\n store = collapse_dirs(store, [\"CAV_2009_benchmarks\"])\n\n for key in totals:\n totals[key] = statistics.mean(totals[key])\n\n store2 = {}\n for benchmark in store:\n store2[benchmark] = {}\n for column in store[benchmark]:\n top = 0\n for cat in store[benchmark][column]:\n if column == \"e_smtcalls\" and cat != args.samples[-1]:\n continue\n value = statistics.mean(store[benchmark][column][cat])\n top = max(value, top)\n store2[benchmark][f\"{column}_{short_name(cat.name)}\"] = value\n for cat in store[benchmark][column]:\n if column == \"e_smtcalls\" and cat != args.samples[-1]:\n continue\n value = store2[benchmark][f\"{column}_{short_name(cat.name)}\"]\n if value == top:\n value = (True, value)\n else:\n value = (False, value)\n store2[benchmark][f\"{column}_{short_name(cat.name)}\"] = value\n\n for benchmark in store2:\n depth = -1\n ints = -1\n # It's horrible but I'm tired just do it the stupid way\n for key in list(store2[benchmark].keys()):\n if not key.startswith(\"b_depth\"):\n continue\n if depth != -1:\n assert depth == store2[benchmark][key][1]\n depth = store2[benchmark][key][1]\n del store2[benchmark][key]\n for key in list(store2[benchmark].keys()):\n if not key.startswith(\"a_ints\"):\n continue\n if ints != -1:\n assert ints == store2[benchmark][key][1]\n ints = store2[benchmark][key][1]\n del store2[benchmark][key]\n for key in store2[benchmark]:\n if key.startswith(\"e_smtcalls\"):\n store2[benchmark][key] = (False, store2[benchmark][key][1])\n store2[benchmark][\"a_ints\"] = (False, ints)\n store2[benchmark][\"b_depth\"] = (False, depth)\n\n n = max(len(store2[x]) for x in store2)\n\n # for row in store2:\n # print(row, store2[row]['f_solutions_MeGA'])\n # return\n\n print(\"% benchmark \" + \" \".join((sorted(store2[list(store2.keys())[-1]]))))\n for benchmark in sorted(store2):\n if len(store2[benchmark]) < n:\n continue\n data = \" & \".join(\n display(store2[benchmark][key]) for key in sorted(store2[benchmark].keys())\n )\n quoted = str(benchmark)\n if \"Bromberger\" in quoted:\n quoted = quoted.replace(\n r\"20180326-Bromberger/more_slacked/CAV_2009_benchmarks/smt\",\n r\"CAV2009-slacked\\tnote{1}\\;\\,\",\n )\n quoted = quoted.replace(\n r\"20180326-Bromberger/unbd-sage/unbd010v15c\", r\"unbd-sage\\tnote{2}\\;\\,\"\n )\n quoted = quoted.replace(\n r\"20180326-Bromberger/more_slacked/CAV_2009_benchmarks\",\n r\"CAV2009-slacked\\tnote{1}\\;\\,\",\n )\n elif \"random\" in quoted:\n quoted = quoted.replace(\n r\"bofill-scheduling/SMT_random_LIA\", r\"bofill-sched-random\\tnote{4}\\;\\,\"\n )\n elif \"real\" in quoted:\n quoted = quoted.replace(\n r\"bofill-scheduling/SMT_real_LIA\", r\"bofill-sched-real\\tnote{5}\\;\\,\"\n )\n else:\n quoted = quoted.replace(r\"CAV_2009_benchmarks/smt\", r\"CAV2009\\tnote{3}\\;\\,\")\n quoted = quoted.replace(r\"CAV_2009_benchmarks\", r\"CAV2009\\tnote{3}\\;\\,\")\n quoted = quoted.replace(\"_\", r\"\\_\")\n print(f\"{quoted: <50} & {data} \\\\\\\\\")\n print(\"% \" + \" \".join(f\"{key}={float(value)}\" for key, value in totals.items()))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/create_latex_table.py","file_name":"create_latex_table.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"550231540","text":"#######################################################################################################\n# In this section, we set the user authentication, user and app ID, model details, URL of the video\n# we want as an input, and sample_ms. Change these strings to run your own example.\n#######################################################################################################\n\n# Your PAT (Personal Access Token) can be found in the portal under Authentification\nPAT = 'YOUR_PAT_HERE'\n# Specify the correct user_id/app_id pairings\n# Since you're making inferences outside your app's scope\nUSER_ID = 'clarifai'\nAPP_ID = 'main'\n# Change these to whatever model and video URL you want to use\nMODEL_ID = 'general-image-recognition'\nMODEL_VERSION_ID = 'aa7f35c01e0642fda5cf400f543e7c40'\nVIDEO_URL = 'https://samples.clarifai.com/beer.mp4'\n# Change this to configure the FPS rate (If it's not configured, it defaults to 1 FPS) \nSAMPLE_MS = 500\n\n############################################################################\n# YOU DO NOT NEED TO CHANGE ANYTHING BELOW THIS LINE TO RUN THIS EXAMPLE\n############################################################################\n\nfrom clarifai_grpc.channel.clarifai_channel import ClarifaiChannel\nfrom clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc\nfrom clarifai_grpc.grpc.api.status import status_code_pb2\n\nchannel = ClarifaiChannel.get_grpc_channel()\nstub = service_pb2_grpc.V2Stub(channel)\n\nmetadata = (('authorization', 'Key ' + PAT),)\n\nuserDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID, app_id=APP_ID)\n\npost_model_outputs_response = stub.PostModelOutputs(\n service_pb2.PostModelOutputsRequest(\n user_app_id=userDataObject, # The userDataObject is created in the overview and is required when using a PAT\n model_id=MODEL_ID,\n version_id=MODEL_VERSION_ID, # This is optional. Defaults to the latest model version\n inputs=[\n resources_pb2.Input(\n data=resources_pb2.Data(\n video=resources_pb2.Video(\n url=VIDEO_URL\n )\n )\n )\n ],\n model=resources_pb2.Model(\n output_info=resources_pb2.OutputInfo(\n output_config=resources_pb2.OutputConfig(sample_ms=SAMPLE_MS)\n )\n ),\n ),\n metadata=metadata\n)\nif post_model_outputs_response.status.code != status_code_pb2.SUCCESS:\n print(post_model_outputs_response.status)\n raise Exception(\"Post model outputs failed, status: \" + post_model_outputs_response.status.description)\n\n# Since we have one input, one output will exist here\noutput = post_model_outputs_response.outputs[0]\n\n# A separate prediction is available for each \"frame\"\nfor frame in output.data.frames:\n print(\"Predicted concepts on frame \" + str(frame.frame_info.time) + \":\")\n for concept in frame.data.concepts:\n print(\"\\t%s %.2f\" % (concept.name, concept.value))\n\n# Uncomment this line to print the full Response JSON\n#print(output)","sub_path":"code_snippets/api-guide/predict/python/video_via_url.py","file_name":"video_via_url.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"299655502","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# [START gae_python38_render_template]\nimport datetime\nimport os\nimport uuid\n\nfrom flask import (Flask, Response, abort, jsonify, redirect, render_template,\n request, url_for, flash)\nfrom flask_login import LoginManager, current_user, login_required\nfrom flask_mail import Mail, Message\n\nfrom auth import auth as auth_blueprint\nfrom db_operations import (create_listing, create_listing_without_id,\n delete_user, get_listing, get_listings, get_user, delete_listing, update_listing)\nfrom forms import AdoptionForm, CreateListingForm, EditListingForm\nfrom gcloudstorage import upload_blob\nfrom listings import Listing\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.urandom(32)\napp.config['MAIL_SERVER'] = 'smtp.googlemail.com'\napp.config['MAIL_PORT'] = 587\napp.config['MAIL_USE_TLS'] = True\napp.config['MAIL_USERNAME'] = 'petadoption.sps@gmail.com'\napp.config['MAIL_PASSWORD'] = os.getenv(\"MAIL_PASSWORD\") # SET password as environment variable\n\nmail = Mail(app)\n\napp.register_blueprint(auth_blueprint)\n\nlogin_manager = LoginManager()\nlogin_manager.login_view = 'auth.login'\nlogin_manager.init_app(app)\n\ndef send_email(poster_email, adopter_email, adopter_name, email_message):\n msg = Message('Someone wants to adopt your pet!', sender='petadoption.sps@gmail.com',\n recipients=[poster_email], reply_to=adopter_email)\n msg.body = f'''Hello!\n\n {adopter_name} is interested in your pet! They said:\n {email_message}\n If you think they will make a good family for your pet, simply reply to this message to send them an email.\n \n Cheers!\n '''\n\n mail.send(msg)\n\n@app.route('/', methods=['GET'])\ndef root():\n if request.method == 'GET':\n listings = get_listings()\n form = CreateListingForm()\n adoptform = AdoptionForm()\n editlistingform = EditListingForm()\n return render_template('main.html', listings=listings, form=form, adoptform=adoptform, editlistingform=editlistingform)\n\n\n@app.route('/createadopt', methods=['POST'])\ndef create_adopt():\n if request.method == 'POST':\n for key, upload in request.files.items():\n identity = str(uuid.uuid4()) # or uuid.uuid4().hex\n try:\n img_url = upload_blob(request.files[key], identity, content_type=upload.content_type)\n app.logger.info(f'uploaded images to gcloud with url {img_url}')\n except Exception as e:\n app.logger.error(e)\n form_dict = request.form.to_dict()\n try:\n new_listing = create_listing_without_id(\n form_dict['pet_name'],\n form_dict['animal'],\n form_dict['breed'],\n form_dict['dob'],\n form_dict['description_of_pet'],\n img_url,\n form_dict['email'],\n )\n create_listing(new_listing)\n return redirect(url_for('root'))\n except Exception as e:\n app.logger.error(e)\n abort(500, f\"Failed to create listing\")\n\n@app.route('/adopt/', methods=['POST'])\ndef adopt(listing_id):\n listing = get_listing(listing_id)\n if not listing:\n app.logger.error(f\"Failed to get listing: {listing_id}\")\n abort(404, description=f\"Failed to get listing: {listing_id}\")\n return\n \n adopter_name = request.form[\"name\"]\n adopter_email = request.form[\"email\"]\n email_message = request.form[\"message\"]\n\n poster_email = listing.user_email\n\n send_email(poster_email, adopter_email, adopter_name, email_message)\n app.logger.info(f\"Sent email to {poster_email} with message from {adopter_email}\")\n\n return redirect(url_for('root'))\n\n\n@app.route('/editlisting/', methods=['POST'])\ndef editlisting(listing_id):\n if request.method == 'POST':\n listing_obj = get_listing(listing_id)\n if listing_obj:\n if listing_obj.user_email == current_user.email:\n new_listing = request.form.to_dict()\n del new_listing['csrf_token']\n if update_listing(listing_id, new_listing):\n print(\"Success\")\n return 'Successfully updated listing', 204\n else:\n app.logger.error(f\"Failed to edit listing: {listing_id}\")\n abort(500, f\"Failed to edit listing: {listing_id}\")\n else:\n app.logger.error(f\"Failed to edit listing, listing not owned by current user: {listing_id} | {current_user.email}\")\n abort(500, f\"Failed to edit listing, listing not owned by current user: {listing_id} | {current_user.email}\")\n else:\n app.logger.error(f\"Failed to edit listing, listing does not exist: {listing_id}\")\n abort(500, f\"Failed to edit listing, listing does not exist: {listing_id}\")\n\n@app.route('/users/', methods=['GET', 'DELETE'])\ndef handle_user(email):\n if request.method == 'GET':\n user = get_user(email)\n if not user:\n app.logger.error(f\"Failed to get user: {email}\")\n abort(404, description=f\"Failed to get user: {email}\")\n return user.to_json()\n if request.method == 'DELETE':\n deleted = delete_user(email)\n if not deleted:\n app.logger.error(f\"Failed to delete user: {email}\")\n abort(500, f\"Failed to delete user: {email}\")\n return ('', 204)\n\n@app.route('/listings', methods=['GET'])\ndef handle_listings():\n if request.method == 'GET':\n listings = get_listings()\n if not listings:\n app.logger.error(\"Failed to get listings\")\n abort(500, \"Failed to get listings\")\n return jsonify(listings)\n\n \n@app.route('/listings/delete/', methods=['DELETE'])\n@login_required\ndef delete_listing_api(listing_id):\n if request.method == 'DELETE':\n listing_obj = get_listing(listing_id)\n if isinstance(listing_obj, Listing):\n if listing_obj.user_email == current_user.email:\n if delete_listing(listing_id):\n return 'Successfully deleted listing', 204\n else:\n app.logger.error(f\"Failed to delete listing: {listing_id}\")\n abort(500, f\"Failed to delete listing: {listing_id}\")\n else:\n app.logger.error(f\"Failed to delete listing, listing not owned by current user: {listing_id} | {current_user.email}\")\n abort(500, f\"Failed to delete listing, listing not owned by current user: {listing_id} | {current_user.email}\")\n else:\n app.logger.error(f\"Failed to delete listing, listing does not exist: {listing_id}\")\n abort(500, f\"Failed to delete listing, listing does not exist: {listing_id}\")\n\n \n@login_manager.user_loader\ndef load_user(user_id):\n ''' \n Take unicode id of user and return corresponding user object\n See https://flask-login.readthedocs.io/en/latest/#how-it-works\n\n We use email as the user_id\n '''\n return get_user(user_id)\n\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n # Flask's development server will automatically serve static files in\n # the \"static\" directory. See:\n # http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,\n # App Engine itself will serve those files as configured in app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [START gae_python38_render_template]\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"241209458","text":"\n\nfrom azureml.core import Run\n\nrun = Run.get_context() \n\n#Log a metric value. This is logged as a scalar to the 'Metrics' tab\nrun.log('Accuracy', 4)\n\n#log an image to a run. This is of type Image and is logged under the 'Outputs + logs' tab and can be found in the 'logs' folder\nrun.log_image('food', path='./breadpudding.jpg', plot=None, description='food is life')\n\n#log list of values to a run. This is logged as a 'Vector' and can be found under the 'Metrics' tab *the chart is off and the index looks strange. \n#x axis is 'Index'\nmy_list1 = ['a', 'b', 'c']\nmy_list2 = [1, 2, 3]\nrun.log_list('Letters', value=my_list1, description='some letters')\nrun.log_list('Numbers', value=my_list2, description='some numbers')\n\n#log a row to a run. This is logged as type 'Table' and is found under the 'Metrics' tab\ncitrus = ['orange', 'lemon', 'lime'] \nsizes = [ 10, 7, 3] #y axis\n\n#x axis is fruit\n#y axis is sizes \n \nfor index in range(len(citrus)):\n run.log_row(\"citrus\", fruit = citrus[index], size=sizes[index])\n\n\n#log a table to a run. This is logged as type 'Table' and is found under the 'Metrics' tab and under the 'Table' option. First value(keys) is the column name \n\nfruits = dict([('apples', '5'), ('pears', '9'), ('oranges', '7'), ('bananas', '4'), ('plums', '2') ])\nrun.log_table('test table', fruits, description='testing table')\n\n#log confusion matrix to an AML run. This is logged as a confusion_matrix type and can be found under the 'Outputs + logs' tab and can be found in the 'logs' folder\n# More info - https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html \n\nfrom sklearn.metrics import confusion_matrix\nfrom azureml.core import Run\n\nrun = Run.get_context()\n\ny_true = [\"cat\", \"dog\", \"cat\", \"cat\", \"dog\", \"fish\"]\ny_pred = [\"dog\", \"dog\", \"cat\", \"cat\", \"dog\", \"cat\"]\nmatrix = confusion_matrix(y_true, y_pred, labels=[\"dog\", \"fish\", \"cat\"])\n\nrun.log_confusion_matrix(name='confusion_matrix', \n value={\n \"schema_type\": \"confusion_matrix\",\n \"schema_version\": \"v1\",\n \"data\": { \n \"class_labels\": [\"dog\", \"fish\", \"cat\"], \n \"matrix\": matrix.tolist() \n }\n })\n\n\n\n","sub_path":"run_history/logging_metrics/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"511188119","text":"tf = open('2019/inputs/input-00.txt').readlines() # you can put an example input data here\nf = open('2019/inputs/input-05.txt').readlines() # your input data\n\n\ndef prep_input(f): # edit to adjust how the program reads your files\n data = [list(map(int, x.strip().split(','))) for x in f][0]\n return data\n\n\ndef part1(f, ID):\n data = prep_input(f)\n print(data)\n i = 0\n \n mode0, mode1 = 0, 0\n while data[i] != 99:\n print(data)\n if mode0: val0 = data[i+1]\n else: val0 = data[data[i+1]]\n if mode1: val1 = data[i+2]\n else: val1 = data[data[i+2]]\n \n if data[i] == 1:\n data[data[i+3]] = val0 + val1\n i += 4\n elif data[i] == 2:\n data[data[i+3]] = val0 * val1\n i += 4\n elif data[i] == 3:\n data[data[i+1]] = ID\n i += 2\n else:\n code = str(data[i])\n mode0, mode1 = int(code[1]), int(code[0])\n data[i] = int(code[3]) #NOPE\n continue\n mode0, mode1 = 0, 0\n return data[0]\n\n\ndef part2(f):\n for i in range(0, 100):\n for j in range(0, 100):\n if part1(f, i, j) == 19690720:\n return 100 * i + j\n\n\nprint(f\"part 1:\\n{ part1(f, 1) }\")\n# print(f\"part 2:\\n{ part2(f) }\")\n","sub_path":"2019/day-05.py","file_name":"day-05.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"222066802","text":"# Helper functions for 2_Docking_analysis folder\nimport pandas as pd\nimport numpy as np\nfrom rdkit import Chem\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style='white', context='talk', font_scale=0.8)\n\n\n\ndef violin_plot_helper(feature, lig_datasets, xlabel='', ylabel='', title='', figsize=(12,4),\n split_by_activity=False, palette=\"Spectral\", linewidth=1.2, **kwargs):\n df_ = pd.DataFrame()\n # Create the dataset\n names_ = []\n for name, dataset in lig_datasets.items():\n a = dataset[feature]\n activity = dataset['Activity']\n n_actives = np.sum(activity == 'active')\n length = len(a)\n std_ = round(np.std(a), 2)\n mean_ = round(np.mean(a), 2)\n\n #names_.append(f'{name}\\n' + r'$n_a/N$' + f' = {n_actives}/{length}\\nmean = {mean_}\\nstd = {std_}')\n names_.append(f'{name}\\n' + r'$n_a/N$' + f' = {n_actives}/{length}')\n\n df_ = df_.append(\n pd.DataFrame(\n list(zip([name]*length, a, activity)),\n columns = ['Database', 'Feature', 'Activity']))\n\n plt.figure(figsize=figsize)\n if split_by_activity:\n ax = sns.violinplot(x='Database', y='Feature', hue = 'Activity', linewidth=linewidth,\n data=df_, palette=palette, bw=.15, split=split_by_activity, **kwargs)\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, 0.12),\n fancybox=True, ncol=5)\n else:\n ax = sns.violinplot(x='Database', y='Feature', linewidth=linewidth,\n data=df_, palette=palette, bw=.15, **kwargs) \n \n \n for v in ax.collections:\n v.set_edgecolor('#151515')\n \n for l in ax.lines:\n l.set_linestyle('--')\n l.set_linewidth(1.5)\n l.set_color('blue')\n l.set_alpha(0.8)\n for l in ax.lines[1::3]:\n l.set_linestyle('-')\n l.set_linewidth(2)\n l.set_color('black')\n l.set_alpha(0.8)\n\n # plotting\n plt.xticks(np.arange(len(names_)), labels=names_)\n plt.ylabel(ylabel, weight='bold')\n plt.xlabel(xlabel, weight='bold')\n plt.title(title, weight='bold')\n \n plt.grid(c='lightgrey')\n\ndef swarm_plot_helper(feature, lig_datasets, xlabel='', ylabel='', title='', figsize=(12,4),\n split_by_activity=False, palette=\"Spectral\", linewidth=1.8, **kwargs):\n df_ = pd.DataFrame()\n # Create the dataset\n names_ = []\n for name, dataset in lig_datasets.items():\n a = dataset[feature]\n activity = dataset['Activity']\n n_actives = np.sum(activity == 'active')\n length = len(a)\n std_ = np.std(a).round(2)\n mean_ = np.mean(a).round(2)\n\n names_.append(f'{name}\\nn_a/N = {n_actives}/{length}\\nMean = {mean_}\\nStd = {std_}')\n\n df_ = df_.append(\n pd.DataFrame(\n list(zip([name]*length, a, activity)),\n columns = ['Database', 'Feature', 'Activity']))\n\n plt.figure(figsize=figsize)\n if split_by_activity:\n _ = sns.swarmplot(x='Database', y='Feature', hue = 'Activity',\n data=df_, palette=palette, **kwargs)\n else:\n _ = sns.swarmplot(x='Database', y='Feature', \n data=df_, palette=palette, **kwargs) \n\n # plotting\n plt.xticks(np.arange(len(names_)), labels=names_)\n plt.ylabel(ylabel, weight='bold')\n plt.xlabel(xlabel, weight='bold')\n plt.title(title, weight='bold')\n plt.grid(c='lightgrey')\n\n# http://rdkit.blogspot.com/2013/10/comparing-fingerprints-to-each-other.html\n'''\ndef directCompare(scoredLists,fp1,fp2,plotIt=True,silent=False):\n \"\"\" Returns: Kendall tau, Spearman rho, and Pearson R\n \n \"\"\"\n l1 = scoredLists[fp1]\n l2 = scoredLists[fp2]\n rl1=[x[-1] for x in l1]\n rl2=[x[-1] for x in l2]\n vl1=[x[0] for x in l1]\n vl2=[x[0] for x in l2]\n if plotIt:\n _=scatter(vl1,vl2,edgecolors='none')\n maxv=max(max(vl1),max(vl2))\n minv=min(min(vl1),min(vl2))\n _=plot((minv,maxv),(minv,maxv),color='k',linestyle='-')\n xlabel(fp1)\n ylabel(fp2)\n \n tau,tau_p=stats.kendalltau(vl1,vl2)\n spearman_rho,spearman_p=stats.spearmanr(vl1,vl2)\n pearson_r,pearson_p = stats.pearsonr(vl1,vl2)\n if not silent:\n print fp1,fp2,tau,tau_p,spearman_rho,spearman_p,pearson_r,pearson_p\n return tau,spearman_rho,pearson_r'''\n\n\nfrom itertools import combinations\nfrom rdkit.DataStructs import FingerprintSimilarity\nfrom rdkit import DataStructs\n\ndef compare_lig_db(fp, lig_datasets, method = 'tanimoto', same = None, same_db = ''):\n '''\n Compares pairwise similarity between molecules from two given sets.\n '''\n matched_ligands = {}\n if same:\n combs = (same_db, same_db)\n else:\n combs = combinations(lig_datasets.keys(), 2)\n \n for key_i, key_j in combs:\n print('\\n' + '='*20)\n print(key_i, '\\t', key_j)\n print('='*20)\n d_i = lig_datasets[key_i]\n d_j = lig_datasets[key_j]\n\n # Create the list\n matched = []\n for k in d_i.index:\n for p in d_j.index:\n try:\n fp_sim = FingerprintSimilarity(\n d_i.loc[k, fp], \n d_j.loc[p, fp], metric=DataStructs.TanimotoSimilarity)\n\n if fp_sim >= 0.90:\n # Add to the list\n matched.append( {'match_mols': (d_i.loc[k, 'mol_rdk'], \n d_j.loc[p, 'mol_rdk']), \n 'match_names': (k, p),\n 'tanimoto': fp_sim} )\n if fp_sim >= 0.98:\n print(k, '\\t', p)\n except AttributeError as e:\n print(e, k, '\\t', p)\n break\n # add to the dict\n matched_ligands[F'{key_i}-{key_j}'] = matched\n return matched_ligands\n\n\nfrom rdkit.Chem.Draw import rdMolDraw2D\nfrom rdkit.Chem.Draw import IPythonConsole\nfrom IPython.display import SVG, Image\nfrom rdkit.Chem import rdDepictor\ndef draw_matched_ligs(db, matched_ligands, cutoff = 0.99):\n matched_dabatabases = matched_ligands[db]\n mols_to_draw = {}\n for match in matched_dabatabases:\n score = match['tanimoto']\n if score > cutoff:\n # Get both molecules\n mol_i, mol_j = match['match_mols']\n\n name_i, name_j = match['match_names']\n # Compute 2D coords\n rdDepictor.Compute2DCoords(mol_i)\n rdDepictor.Compute2DCoords(mol_j)\n\n mols_to_draw[F'{name_i} - {name_j}'] = mol_i\n print('='*25 + ' '*5 + db + ' '*5 + '='*25)\n img = Chem.Draw.MolsToGridImage(\n (mol_i, mol_j), legends = (name_i, name_j),\n molsPerRow = 2, subImgSize = (300,200))\n display(img)\n mol_i.GetSubstructMatch(mol_j)\n display(mol_i)\n \n \n#************************\n# Plot Dimentional Reduction with bokeh\n#************************\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import ColumnDataSource, CDSView, GroupFilter, \\\n Span, CategoricalColorMapper, HoverTool\nfrom bokeh.layouts import row, column\nfrom bokeh.transform import factor_cmap, factor_mark\n\n# Vertical line\nvline = Span(location=0, dimension='height', \n line_color='black', line_width=2, line_alpha=0.5, line_dash='dashed')\n# Horizontal line\nhline = Span(location=0, dimension='width', \n line_color='black', line_width=2, line_alpha=0.5, line_dash='dashed')\n# HoverTool options\nhover= HoverTool(tooltips=[ ('Name', '@name'), ('# Atoms', '@num_atoms'),('Library', '@library'),\n ('Activity', '@Activity')], names = ['actives'])\n \ndef create_fig_bokeh(desc, source_act, source_inact, col_library_map,\n title='', kind_dr='tsne', legend_location='top_right', legend=False):\n ''' ColumnDataSources source_act and source_inact must be instantiated'''\n \n \n \n f = figure(title=title, plot_width=450, plot_height=450,\n x_axis_label='First Dimension', y_axis_label='Second Dimension',\n tools='pan,box_select,wheel_zoom,reset')\n # Add hovertool \n\n f.renderers.extend([vline, hline])\n # Add glyphs\n # Plot inactives\n f_inac = f.circle(x= desc + f'_{kind_dr}_x', y= desc + f'_{kind_dr}_y', \n color=col_library_map,\n nonselection_fill_color=col_library_map,\n nonselection_fill_alpha=0.05,\n size=4, alpha=0.15, line_width=0,\n muted_alpha=0.01,\n source=source_inact)\n\n # Plot actives\n library_names = np.unique(source_act.data['library'])\n df_ = pd.DataFrame(source_act.data)\n for library in library_names:\n data = ColumnDataSource(df_.loc[df_['library'] == library, :])\n f.triangle(x= desc + f'_{kind_dr}_x', y= desc + f'_{kind_dr}_y',\n color=col_library_map,\n legend_label=library,\n nonselection_fill_color=col_library_map,\n nonselection_fill_alpha=0.05,\n size=8, line_color='black', line_width=0.5,\n source=data, name=library)\n \n # Styling\n f.title.text_font_size = '1.4em'\n f.axis.axis_label_text_font_size = '1.0em' # font size\n f.axis.axis_label_text_font_style = 'bold'\n f.title.align = 'center'\n f.axis.axis_line_width = 3\n f.axis.major_label_text_font_size = '12pt'\n if legend:\n f.legend.click_policy='hide'\n f.legend.location = legend_location\n else:\n f.legend.visible = False \n return f","sub_path":"2_Docking_analysis/helper_functions_2.py","file_name":"helper_functions_2.py","file_ext":"py","file_size_in_byte":9760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"366645181","text":"from socketserver import StreamRequestHandler, TCPServer\n\nclass EchoHandler(StreamRequestHandler):\n def handle(self):\n print('Got connection from', self.client_address)\n # 使用类似文件读写的方式进行socket的读入写出\n for line in self.rfile:\n self.wfile.write(line)\n\nif __name__ == '__main__':\n serv = TCPServer(('', 20000), EchoHandler)\n # 注意: 虽然server_forever使用的是selector模式, 但是同时只能处理一个客户端的请求,\n # 因为处理请求的线程和selector线程是同一个线程.\n # 如果要并发处理多个client的请求, 需要另开线程.\n serv.serve_forever()\n","sub_path":"network_11/tcp_server_11_2.py","file_name":"tcp_server_11_2.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"263137419","text":"\"\"\"\n Represent a small bilingual lexicon as a Python dictionary in the following fashion\n {\"merry\":\"god\", \"christmas\":\"jul\", \"and\":\"och\", \"happy\":gott\", \"new\":\"nytt\", \"year\":\"år\"}\n and use it to translate your Christmas cards from English into Swedish.\n\n That is, write a function translate() that takes a list of English words and\n returns a list of Swedish words.\n\"\"\"\n\ndef translate(word_list):\n \n _en_to_sw = {\n 'merry': 'god',\n 'christmas': 'jul',\n 'and': 'och',\n 'happy': 'gott',\n 'new': 'nytt',\n 'year': 'år'\n }\n\n _translation = []\n\n for word in word_list:\n if _en_to_sw.get(word):\n _translation.append(_en_to_sw.get(word))\n else:\n _translation.append(word)\n\n return _translation\n","sub_path":"Solutions/exercise_20.py","file_name":"exercise_20.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"85256753","text":"# -*- coding: utf-8 -*-\nimport pytest\nfrom django.test.utils import override_settings\n\nfrom demo.models import AutoIncConcurrentModel, SimpleConcurrentModel\nfrom demo.util import nextname\n\nfrom concurrency.api import concurrency_disable_increment, disable_concurrency\nfrom concurrency.exceptions import RecordModifiedError\nfrom concurrency.utils import refetch\n\n\n@pytest.mark.django_db(transaction=False)\ndef test_disable_concurrency_settings(settings):\n with override_settings(CONCURRENCY_ENABLED=False):\n instance1 = SimpleConcurrentModel(username=next(nextname))\n instance1.save()\n refetch(instance1).save()\n\n\n@pytest.mark.django_db(transaction=False)\ndef test_disable_concurrency_global():\n instance1 = SimpleConcurrentModel(username=next(nextname))\n instance2 = AutoIncConcurrentModel(username=next(nextname))\n instance1.save()\n instance2.save()\n refetch(instance1).save()\n refetch(instance2).save()\n with disable_concurrency():\n instance1.save()\n instance2.save()\n\n copy2 = refetch(instance2)\n refetch(instance2).save()\n with pytest.raises(RecordModifiedError):\n copy2.save()\n\n\n@pytest.mark.django_db(transaction=False)\ndef test_disable_concurrency_class(model_class=SimpleConcurrentModel):\n instance = model_class(username=next(nextname))\n instance.save()\n copy = refetch(instance)\n copy.save()\n with disable_concurrency(SimpleConcurrentModel):\n instance.save()\n\n\n@pytest.mark.django_db(transaction=False)\ndef test_disable_concurrency_instance(model_class=SimpleConcurrentModel):\n instance1 = model_class(username=next(nextname))\n instance1.save()\n copy1 = refetch(instance1)\n copy1.save()\n\n instance2 = model_class(username=next(nextname))\n instance2.save()\n copy2 = refetch(instance2)\n copy2.save()\n\n with disable_concurrency(instance1):\n instance1.save()\n with pytest.raises(RecordModifiedError):\n instance2.save()\n\n\n@pytest.mark.django_db(transaction=False)\ndef test_disable_increment():\n instance1 = AutoIncConcurrentModel(username=next(nextname))\n assert instance1.version == 0\n instance1.save()\n assert instance1.version == 1\n with concurrency_disable_increment(instance1):\n instance1.save()\n instance1.save()\n assert instance1.version == 1\n instance1.save()\n assert instance1.version == 2\n","sub_path":"tests/test_enable_disable.py","file_name":"test_enable_disable.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"427967927","text":"'''\nWrite a Program to Print following Pattern.\n3\n2 3 \n1 2 3\n0 1 2 3\n\n'''\nn=5\n\nfor i in range(n,0,-1):\n\tfor j in range(n):\n\t\tif(j==i):\n\t\t\ti=i+1\n\t\t\tj=j-1\n\t\t\tprint(j,end=\" \")\n\tprint(\" \")\n\n\n\n","sub_path":"Python/DailyFlash/29jan2020/MySolutions/program4.py","file_name":"program4.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"309205620","text":"# Setup: the formatter needs to be ready when we import the messages\nfrom src.messages.formatter import Formatter\nmessage_formatter = Formatter()\n\nfrom src.messages import messages as _messages\n\n__all__ = [\"messages\", \"message_formatter\"]\n\nmessages = _messages.Messages()\n\ndef get_role_name(name, *, number=1):\n \"\"\"Return the localized and potentially pluralized role name.\"\"\"\n role = message_formatter.convert_field(name, \"role\")\n return message_formatter._plural(role, number)\n","sub_path":"src/messages/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"335122012","text":"import math, random, os\nfrom easy_getch import getch\nfrom doggoscript import RTResult, Context, SymbolTable\nfrom doggoscript.error import *\n\nclass Value:\n def __init__(self):\n self.set_pos()\n self.set_context()\n\n def set_pos(self, pos_start=None, pos_end=None):\n self.pos_start = pos_start\n self.pos_end = pos_end\n return self\n\n def set_context(self, context=None):\n self.context = context\n return self\n\n def added_to(self, other):\n return None, self.illegal_operation(other)\n\n def subbed_by(self, other):\n return None, self.illegal_operation(other)\n\n def multed_by(self, other):\n return None, self.illegal_operation(other)\n\n def dived_by(self, other):\n return None, self.illegal_operation(other)\n\n def powed_by(self, other):\n return None, self.illegal_operation(other)\n\n def get_comparison_eq(self, other):\n return None, self.illegal_operation(other)\n\n def get_comparison_ne(self, other):\n return None, self.illegal_operation(other)\n\n def get_comparison_lt(self, other):\n return None, self.illegal_operation(other)\n\n def get_comparison_gt(self, other):\n return None, self.illegal_operation(other)\n\n def get_comparison_lte(self, other):\n return None, self.illegal_operation(other)\n\n def get_comparison_gte(self, other):\n return None, self.illegal_operation(other)\n\n def anded_by(self, other):\n return None, self.illegal_operation(other)\n\n def ored_by(self, other):\n return None, self.illegal_operation(other)\n\n def notted(self, other):\n return None, self.illegal_operation(other)\n\n def execute(self, args):\n return RTResult().failure(self.illegal_operation())\n\n def copy(self):\n raise Exception('No copy method defined')\n\n def is_true(self):\n return False\n\n def illegal_operation(self, other=None):\n if not other:\n other = self\n return RTError(\n self.pos_start, other.pos_end,\n 'Illegal operation',\n self.context\n )\n\n\nclass Number(Value):\n def __init__(self, value, is_null=False):\n super().__init__()\n self.value = value\n self.is_null = is_null\n\n def added_to(self, other):\n if isinstance(other, Number):\n return Number(self.value + other.value).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def subbed_by(self, other):\n if isinstance(other, Number):\n return Number(self.value - other.value).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def multed_by(self, other):\n if isinstance(other, Number):\n return Number(self.value * other.value).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def dived_by(self, other):\n if isinstance(other, Number):\n if other.value == 0:\n return None, RTError(\n other.pos_start, other.pos_end,\n 'Division by zero',\n self.context\n )\n\n return Number(self.value / other.value).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def powed_by(self, other):\n if isinstance(other, Number):\n return Number(self.value ** other.value).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def get_comparison_eq(self, other):\n if isinstance(other, Number):\n return Number(int(self.value == other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def get_comparison_ne(self, other):\n if isinstance(other, Number):\n return Number(int(self.value != other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def get_comparison_lt(self, other):\n if isinstance(other, Number):\n return Number(int(self.value < other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def get_comparison_gt(self, other):\n if isinstance(other, Number):\n return Number(int(self.value > other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def get_comparison_lte(self, other):\n if isinstance(other, Number):\n return Number(int(self.value <= other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def get_comparison_gte(self, other):\n if isinstance(other, Number):\n return Number(int(self.value >= other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def anded_by(self, other):\n if isinstance(other, Number):\n return Number(int(self.value and other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def ored_by(self, other):\n if isinstance(other, Number):\n return Number(int(self.value or other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def notted(self):\n return Number(1 if self.value == 0 else 0).set_context(self.context), None\n\n def copy(self):\n copy = Number(self.value, self.is_null)\n copy.set_pos(self.pos_start, self.pos_end)\n copy.set_context(self.context)\n return copy\n\n def is_true(self):\n return self.value != 0\n\n def __str__(self):\n return str(self.value)\n\n def __repr__(self):\n if self.is_null == False:\n return str(self.value)\n elif self.is_null == True:\n return \"\"\n\nNumber.null = Number(0, True)\nNumber.false = Number(0)\nNumber.true = Number(1)\nNumber.pi = Number(math.pi)\n\nclass String(Value):\n def __init__(self, value):\n super().__init__()\n self.value = value\n\n def added_to(self, other):\n if isinstance(other, String):\n return String(self.value + other.value).set_context(self.context), None\n elif isinstance(other, Number):\n return String(self.value + str(other.value)).set_context(self.context), None\n elif isinstance(other, List):\n return String(self.value + str(other.elements)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def multed_by(self, other):\n if isinstance(other, Number):\n return String(self.value * other.value).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def dived_by(self, other):\n if isinstance(other, Number):\n try:\n return String(self.value[other.value]), None\n except:\n return None, RTError(\n other.pos_start, other.pos_end,\n 'Element at this index could not be retrieved from string because index is out of bounds',\n self.context\n )\n else:\n return None, Value.illegal_operation(self, other)\n\n def is_true(self):\n return len(self.value) > 0\n\n def get_comparison_eq(self, other):\n if isinstance(other, String):\n return Number(int(self.value == other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def get_comparison_ne(self, other):\n if isinstance(other, String):\n return Number(int(self.value != other.value)).set_context(self.context), None\n else:\n return None, Value.illegal_operation(self, other)\n\n def copy(self):\n copy = String(self.value)\n copy.set_pos(self.pos_start, self.pos_end)\n copy.set_context(self.context)\n return copy\n\n def __str__(self):\n return self.value\n\n def __repr__(self):\n return f'\"{self.value}\"'\n\n\nclass List(Value):\n def __init__(self, elements):\n super().__init__()\n self.elements = elements\n\n def added_to(self, other):\n new_list = self.copy()\n new_list.elements.append(other)\n return new_list, None\n\n def subbed_by(self, other):\n if isinstance(other, Number):\n new_list = self.copy()\n try:\n new_list.elements.pop(other.value)\n return new_list, None\n except:\n return None, RTError(\n other.pos_start, other.pos_end,\n 'Element at this index could not be removed from list because index is out of bounds',\n self.context\n )\n else:\n return None, Value.illegal_operation(self, other)\n\n def multed_by(self, other):\n if isinstance(other, List):\n new_list = self.copy()\n new_list.elements.extend(other.elements)\n return new_list, None\n else:\n return None, Value.illegal_operation(self, other)\n\n def dived_by(self, other):\n if isinstance(other, Number):\n try:\n return self.elements[other.value], None\n except:\n return None, RTError(\n other.pos_start, other.pos_end,\n 'Element at this index could not be retrieved from list because index is out of bounds',\n self.context\n )\n else:\n return None, Value.illegal_operation(self, other)\n\n def copy(self):\n copy = List(self.elements)\n copy.set_pos(self.pos_start, self.pos_end)\n copy.set_context(self.context)\n return copy\n\n def __str__(self):\n return \", \".join([str(x) for x in self.elements])\n\n def __repr__(self):\n return f'[{\", \".join([repr(x) for x in self.elements])}]'\n\nclass Dictionary(Value):\n def __init__(self, elements):\n super().__init__()\n self.elements = elements\n\n def copy(self):\n copy = List(self.elements)\n copy.set_pos(self.pos_start, self.pos_end)\n copy.set_context(self.context)\n return copy\n\n def __str__(self):\n return \", \".join([str(x) for x in self.elements])\n\n def __repr__(self):\n return '{' + '{}'.format(\", \".join([repr(x) for x in self.elements])) + \"}\"\n\nclass BaseFunction(Value):\n def __init__(self, name):\n super().__init__()\n self.name = name or \"\"\n\n def generate_new_context(self):\n new_context = Context(self.name, self.context, self.pos_start)\n new_context.symbol_table = SymbolTable(new_context.parent.symbol_table)\n return new_context\n\n def check_args(self, arg_names, args):\n res = RTResult()\n\n if len(args) > len(arg_names):\n return res.failure(RTError(\n self.pos_start, self.pos_end,\n f\"{len(args) - len(arg_names)} too many args passed into {self}\",\n self.context\n ))\n\n if len(args) < len(arg_names):\n return res.failure(RTError(\n self.pos_start, self.pos_end,\n f\"{len(arg_names) - len(args)} too few args passed into {self}\",\n self.context\n ))\n\n return res.success(None)\n\n def populate_args(self, arg_names, args, exec_ctx):\n for i in range(len(args)):\n arg_name = arg_names[i]\n arg_value = args[i]\n arg_value.set_context(exec_ctx)\n exec_ctx.symbol_table.set(arg_name, arg_value)\n\n def check_and_populate_args(self, arg_names, args, exec_ctx):\n res = RTResult()\n res.register(self.check_args(arg_names, args))\n if res.should_return():\n return res\n self.populate_args(arg_names, args, exec_ctx)\n return res.success(None)\n\n\nclass Function(BaseFunction):\n def __init__(self, name, body_node, arg_names, should_auto_return):\n super().__init__(name)\n self.body_node = body_node\n self.arg_names = arg_names\n self.should_auto_return = should_auto_return\n\n def execute(self, args):\n res = RTResult()\n interpreter = Interpreter()\n exec_ctx = self.generate_new_context()\n\n res.register(self.check_and_populate_args(\n self.arg_names, args, exec_ctx))\n if res.should_return():\n return res\n\n value = res.register(interpreter.visit(self.body_node, exec_ctx))\n if res.should_return() and res.func_return_value == None:\n return res\n\n ret_value = (\n value if self.should_auto_return else None) or res.func_return_value or Number.null\n return res.success(ret_value)\n\n def copy(self):\n copy = Function(self.name, self.body_node,\n self.arg_names, self.should_auto_return)\n copy.set_context(self.context)\n copy.set_pos(self.pos_start, self.pos_end)\n return copy\n\n def __repr__(self):\n return f\"\"\n\n\nclass BuiltInFunction(BaseFunction):\n def __init__(self, name):\n super().__init__(name)\n\n def execute(self, args):\n res = RTResult()\n exec_ctx = self.generate_new_context()\n\n method_name = f'execute_{self.name}'\n method = getattr(self, method_name, self.no_visit_method)\n\n res.register(self.check_and_populate_args(\n method.arg_names, args, exec_ctx))\n if res.should_return():\n return res\n\n return_value = res.register(method(exec_ctx))\n if res.should_return():\n return res\n return res.success(return_value)\n\n def no_visit_method(self, node, context):\n raise Exception(f'No execute_{self.name} method defined')\n\n def copy(self):\n copy = BuiltInFunction(self.name)\n copy.set_context(self.context)\n copy.set_pos(self.pos_start, self.pos_end)\n return copy\n\n def __repr__(self):\n return f\"\"\n\n #####################################\n\n def execute_print(self, exec_ctx):\n print(str(exec_ctx.symbol_table.get('value')))\n return RTResult().success(Number.null)\n execute_print.arg_names = ['value']\n\n def execute_print_end(self, exec_ctx):\n end = exec_ctx.symbol_table.get(\"end\")\n print(str(exec_ctx.symbol_table.get('value')), end=str(end.value))\n return RTResult().success(Number.null)\n execute_print_end.arg_names = ['value', 'end']\n\n def execute_print_ret(self, exec_ctx):\n return RTResult().success(String(str(exec_ctx.symbol_table.get('value'))))\n execute_print_ret.arg_names = ['value']\n\n def execute_input(self, exec_ctx):\n text = input()\n return RTResult().success(String(text))\n execute_input.arg_names = []\n\n def execute_input_prompt(self, exec_ctx):\n text = input(exec_ctx.symbol_table.get('prompt').value)\n return RTResult().success(String(text))\n execute_input_prompt.arg_names = ['prompt']\n\n def execute_input_int(self, exec_ctx):\n while True:\n text = input()\n try:\n number = int(text)\n break\n except ValueError:\n print(f\"'{text}' must be an integer. Try again!\")\n return RTResult().success(Number(number))\n execute_input_int.arg_names = []\n\n def execute_clear(self, exec_ctx):\n os.system('cls' if os.name == 'nt' else 'cls')\n return RTResult().success(Number.null)\n execute_clear.arg_names = []\n\n def execute_is_number(self, exec_ctx):\n is_number = isinstance(exec_ctx.symbol_table.get(\"value\"), Number)\n return RTResult().success(Number.true if is_number else Number.false)\n execute_is_number.arg_names = [\"value\"]\n\n def execute_is_string(self, exec_ctx):\n is_number = isinstance(exec_ctx.symbol_table.get(\"value\"), String)\n return RTResult().success(Number.true if is_number else Number.false)\n execute_is_string.arg_names = [\"value\"]\n\n def execute_is_list(self, exec_ctx):\n is_number = isinstance(exec_ctx.symbol_table.get(\"value\"), List)\n return RTResult().success(Number.true if is_number else Number.false)\n execute_is_list.arg_names = [\"value\"]\n\n def execute_is_function(self, exec_ctx):\n is_number = isinstance(\n exec_ctx.symbol_table.get(\"value\"), BaseFunction)\n return RTResult().success(Number.true if is_number else Number.false)\n execute_is_function.arg_names = [\"value\"]\n\n def execute_append(self, exec_ctx):\n list_ = exec_ctx.symbol_table.get(\"list\")\n value = exec_ctx.symbol_table.get(\"value\")\n\n if not isinstance(list_, List):\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"First argument must be list\",\n exec_ctx\n ))\n\n list_.elements.append(value)\n return RTResult().success(Number.null)\n execute_append.arg_names = [\"list\", \"value\"]\n\n def execute_pop(self, exec_ctx):\n list_ = exec_ctx.symbol_table.get(\"list\")\n index = exec_ctx.symbol_table.get(\"index\")\n\n if not isinstance(list_, List):\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"First argument must be list\",\n exec_ctx\n ))\n\n if not isinstance(index, Number):\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"Second argument must be number\",\n exec_ctx\n ))\n\n try:\n element = list_.elements.pop(index.value)\n except:\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n 'Element at this index could not be removed from list because index is out of bounds',\n exec_ctx\n ))\n return RTResult().success(element)\n execute_pop.arg_names = [\"list\", \"index\"]\n\n def execute_extend(self, exec_ctx):\n listA = exec_ctx.symbol_table.get(\"listA\")\n listB = exec_ctx.symbol_table.get(\"listB\")\n\n if not isinstance(listA, List):\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"First argument must be list\",\n exec_ctx\n ))\n\n if not isinstance(listB, List):\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"Second argument must be list\",\n exec_ctx\n ))\n\n listA.elements.extend(listB.elements)\n return RTResult().success(Number.null)\n execute_extend.arg_names = [\"listA\", \"listB\"]\n\n def execute_run(self, exec_ctx):\n fn = exec_ctx.symbol_table.get(\"fn\")\n\n if not isinstance(fn, String):\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"First argument must be string\",\n exec_ctx\n ))\n\n fn = fn.value\n\n try:\n with open(fn, \"r\") as f:\n if os.path.splitext(f.name)[1] == \".ds\":\n script = f.read()\n else:\n return RTResult().failure(InvalidSyntaxError(\n self.pos_start, self.pos_end,\n f\"{f.name} is not a valid file!\"\n ))\n except Exception as e:\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n f\"Failed to load script \\\"{fn}\\\"\\n\" + str(e),\n exec_ctx\n ))\n\n _, error = run(fn, script)\n\n if error:\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n f\"Failed to finish executing script \\\"{fn}\\\"\\n\" +\n error.as_string(),\n exec_ctx\n ))\n\n return RTResult().success(Number.null)\n execute_run.arg_names = [\"fn\"]\n\n def execute_sqrt(self, exec_ctx):\n value = exec_ctx.symbol_table.get(\"value\")\n if isinstance(value, Number):\n sqrt = math.sqrt(value.value)\n return RTResult().success(Number(sqrt))\n execute_sqrt.arg_names = [\"value\"]\n\n def execute_len(self, exec_ctx):\n value = exec_ctx.symbol_table.get(\"value\")\n if isinstance(value, String):\n length = len(value.value)\n elif isinstance(value, List):\n length = len(value.value)\n else:\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"Value is not String\",\n exec_ctx\n ))\n return RTResult().success(Number(length))\n execute_len.arg_names = [\"value\"]\n\n def execute_lower(self, exec_ctx):\n value = exec_ctx.symbol_table.get(\"value\")\n if isinstance(value, String):\n lower = value.value.lower()\n else:\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"Value is not a String\",\n exec_ctx\n ))\n return RTResult().success(String(lower))\n execute_lower.arg_names = [\"value\"]\n\n def execute_random(self, exec_ctx):\n value1 = exec_ctx.symbol_table.get(\"value1\")\n value2 = exec_ctx.symbol_table.get(\"value2\")\n\n if not isinstance(value1, Number):\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"Value1 is not a Number\",\n exec_ctx\n ))\n\n if not isinstance(value2, Number):\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"Value2 is not a Number\",\n exec_ctx\n ))\n\n return RTResult().success(Number(random.randint(value1.value, value2.value)))\n\n execute_random.arg_names = [\"value1\", \"value2\"]\n\n def execute_split(self, exec_ctx):\n value = exec_ctx.symbol_table.get(\"value\")\n\n if not isinstance(value1, String):\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"Value is not a String\",\n exec_ctx\n ))\n\n return RTResult().success(String(value.value.split()))\n\n execute_split.arg_names = [\"value\"]\n\n def execute_getch(self, exec_ctx):\n value = getch().decode(\"utf-8\")\n\n return RTResult().success(String(value))\n\n execute_getch.arg_names = []\n\n def execute_py_eval(self, exec_ctx):\n code = exec_ctx.symbol_table.get(\"code\").value\n\n try:\n res = exec(code)\n except BaseException as e:\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n f\"A error has occured when executing!\\nError: {e}\",\n exec_ctx\n ))\n\n return RTResult().success(String(res) if res else Number.null)\n \n execute_py_eval.arg_names = ['code']\n\n def execute_ord(self, exec_ctx):\n value = exec_ctx.symbol_table.get(\"value\")\n\n if isinstance(value, String):\n uni = ord(value.value[0])\n else:\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n f\"Value is not String\",\n exec_ctx\n ))\n\n return RTResult().success(Number(uni))\n\n execute_ord.arg_names = [\"value\"]\n\n def execute_bin(self, exec_ctx):\n value = exec_ctx.symbol_table.get(\"value\")\n\n if isinstance(value, Number):\n binop = bin(value.value)\n \n else:\n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n f\"Value is not a Number\",\n exec_ctx\n ))\n\n return RTResult().success(Number(int(binop[2:])))\n\n execute_bin.arg_names = [\"value\"]\n\n def execute_murgn(self, exec_ctx):\n return RTResult().success(String(\"is gay.\"))\n\n execute_murgn.arg_names = []\n\n def execute_lag(self, exec_ctx):\n return RTResult().success(String(\"LanguageArtsGrade\"))\n\n execute_lag.arg_names = []\n\n def execute_global(self, exec_ctx):\n _vars = []\n for x in exec_ctx.symbol_table.parent.symbols:\n _vars.append(x)\n\n return RTResult().success(List(_vars))\n\n execute_global.arg_names = []\n\n def execute_reverse(self, exec_ctx):\n value = exec_ctx.symbol_table.get(\"value\")\n \n if isinstance(value, List):\n value.elements.reverse()\n return RTResult().success(List(value.elements))\n elif isinstance(value, String):\n return RTResult().success(String(value.value[::-1]))\n elif isinstance(value, Number):\n return RTResult().success(Number(int(str(value.value)[::-1])))\n \n return RTResult().failure(RTError(\n self.pos_start, self.pos_end,\n \"Value is not string, number, or list\",\n exec_ctx\n ))\n\n execute_reverse.arg_names = [\"value\"]\n\nBuiltInFunction.print = BuiltInFunction(\"print\")\nBuiltInFunction.print_ret = BuiltInFunction(\"print_ret\")\nBuiltInFunction.input = BuiltInFunction(\"input\")\nBuiltInFunction.input_int = BuiltInFunction(\"input_int\")\nBuiltInFunction.clear = BuiltInFunction(\"clear\")\nBuiltInFunction.is_number = BuiltInFunction(\"is_number\")\nBuiltInFunction.is_string = BuiltInFunction(\"is_string\")\nBuiltInFunction.is_list = BuiltInFunction(\"is_list\")\nBuiltInFunction.is_function = BuiltInFunction(\"is_function\")\nBuiltInFunction.append = BuiltInFunction(\"append\")\nBuiltInFunction.pop = BuiltInFunction(\"pop\")\nBuiltInFunction.extend = BuiltInFunction(\"extend\")\nBuiltInFunction.len = BuiltInFunction(\"len\")\nBuiltInFunction.run = BuiltInFunction(\"run\")\nBuiltInFunction.sqrt = BuiltInFunction(\"sqrt\")\nBuiltInFunction.len = BuiltInFunction(\"len\")\nBuiltInFunction.lower = BuiltInFunction(\"lower\")\nBuiltInFunction.random = BuiltInFunction(\"random\")\nBuiltInFunction.print_end = BuiltInFunction(\"print_end\")\nBuiltInFunction.input_prompt = BuiltInFunction(\"input_prompt\")\nBuiltInFunction.getch = BuiltInFunction(\"getch\")\nBuiltInFunction.py_eval = BuiltInFunction(\"py_eval\")\nBuiltInFunction.ord = BuiltInFunction(\"ord\")\nBuiltInFunction.bin = BuiltInFunction(\"bin\")\nBuiltInFunction.murgn = BuiltInFunction(\"murgn\")\nBuiltInFunction.lag = BuiltInFunction(\"lag\")\nBuiltInFunction._global = BuiltInFunction(\"global\")\nBuiltInFunction.reverse = BuiltInFunction(\"reverse\")","sub_path":"doggoscript/values.py","file_name":"values.py","file_ext":"py","file_size_in_byte":26960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"300251304","text":"# -*-coding:utf-8 -*-\n# Reference:**********************************************\n# @Time : 2019-12-14 13:54\n# @Author : Fabrice LI\n# @File : 20191212_course_schedule_II.py\n# @User : liyihao\n# @Software : PyCharm\n# @Description: There are a total of n courses you have to take, labeled from 0 to n-1.\n#\n# Some courses may have prerequisites, for example to take course 0 you\n# have to first take course 1, which is expressed as a pair: [0,1]\n#\n# Given the total number of courses and a list of prerequisite pairs,\n# return the ordering of courses you should take to finish all courses.\n#\n# There may be multiple correct orders, you just need to return one of them.\n# If it is impossible to finish all courses, return an empty array.\n# Reference:**********************************************\n\"\"\"\nInput: 2, [[1,0]]\nOutput: [0,1]\nExplanation: There are a total of 2 courses to take. To take course 1 you should have finished\n course 0. So the correct course order is [0,1] .\n\nInput: 4, [[1,0],[2,0],[3,1],[3,2]]\nOutput: [0,1,2,3] or [0,2,1,3]\nExplanation: There are a total of 4 courses to take. To take course 3 you should have finished both\n courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.\n So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3]\n\n\nNote:\n\nThe input prerequisites is a graph represented by a list of edges, not adjacency matrices.\nRead more about how a graph is represented.\nYou may assume that there are no duplicate edges in the input prerequisites.\n\n\n\"\"\"\nimport collections\nfrom typing import List\n\n\nclass Solution:\n def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:\n in_degree, neighbors = self.get_in_degree(numCourses, prerequisites)\n result = []\n queue = collections.deque(n for n in range(numCourses) if in_degree[n] == 0)\n\n while queue:\n n = queue.popleft()\n result.append(n)\n for x in neighbors[n]:\n in_degree[x] -= 1\n if in_degree[x] == 0:\n queue.append(x)\n if len(result) == numCourses:\n return result[::-1]\n else:\n return []\n\n def get_in_degree(self, numCourses, prerequisites):\n result = {}\n neighbors = {}\n for i in range(numCourses):\n result[i] = 0\n neighbors[i] = []\n for i, j in prerequisites:\n result[j] += 1\n neighbors[i].append(j)\n return result, neighbors\n\n\nif __name__ == '__main__':\n s = Solution()\n numCourses = 3\n pre = [[0, 2], [1, 2], [2, 0]]\n print(s.findOrder(numCourses, pre))\n","sub_path":"LintCode/BFS/20191212_course_schedule_II.py","file_name":"20191212_course_schedule_II.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"357928313","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n# JB's favorite Seaborn settings for notebooks\nrc={'lines.linewidth': 2, 'axes.labelsize': 18, 'axes.titlesize': 18}\nsns.set(rc=rc)\n\n# Load in data\nxa_high = np.loadtxt('data/xa_high_food.csv', comments='#')\nxa_low = np.loadtxt('data/xa_low_food.csv', comments='#')\n\n# Make bin boundaries\nbins = np.arange(1700, 2501, 50)\n\n# _ = plt.hist(xa_low, bins=bins)\n\n# Add axis labels\nplt.xlabel('Cross-sectional area (µm$^2$)')\nplt.ylabel('count')\n\n\n# Reset bins, since xa_low has smaller values\nbins = np.arange(1600, 2501, 50)\n\n# Generate the histogram for the low-density fed mother\n_ = plt.hist((xa_low, xa_high), bins=bins)\n\n# Add axis labels\nplt.xlabel('Cross-sectional area (µm$^2$)')\nplt.ylabel('count')\n\n# Add a legend\nplt.legend(('low', 'high'), loc='upper right')\n\nplt.show()\n","sub_path":"matplotlib_practice_21.py","file_name":"matplotlib_practice_21.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"409469286","text":"#!/usr/bin/env python3.4\n# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom time import time, strftime, localtime, sleep\nfrom numpy import array, arange\nimport matplotlib.pyplot as plt\n\nimport ftplib\nfrom os import chdir\n\nfrom subprocess import Popen, PIPE, STDOUT\nfrom shlex import split\n\n\nsins_years = 7\n#Колличество лет, которые нужно выводить в граффике, пока что поставил 7.\n\nspots = {}\nkey = ['date','N','fi','l','r/R','S','Smm','Smaxi','n','?','W','year','month','day']\nmanth = {'янв. ':1,\n 'февр.':2,\n 'марта':3,\n 'апр. ':4,\n 'мая ':5,\n 'июня ':6,\n 'июля ':7,\n 'авг. ':8,\n 'сент.':9,\n 'окт. ':10,\n 'нояб.':11,\n 'дек. ':12}\nfile_data = {}\n\ndef sendToFTP(filename1):\n\n '''\n ftp = ftplib.FTP('158.250.29.123')\n ftp.login('sun', 'pasworld')\n ftp_server_dir = '/usr/local/solar/apache/htdocs/web/Soln_Dann/'\n ftp_file_dir = '/home/pa_antya/spbu/sun_spots'\n '''\n ftp = ftplib.FTP('www.solarstation.ru')\n ftp.login('sun', 'pasworld')\n ftp_server_dir = '/www/solarstation.ru/lastdata/Graphs/'\n ftp_file_dir = '/home/pa_antya/spbu/sun_spots/'\n ftp.cwd(ftp_server_dir)\n chdir(ftp_file_dir)\n print(ftp.retrlines('LIST'))\n file = open(filename1, 'w')\n print('send to ftp: begin')\n #ftp.storlines('STOR_'+ filename1, file.read)\n\n ftp.retrbinary('Wolf_NS.gif', file.read) #скачивание файлов и запись в filename\n print('send to ftp: done.')\n file.close()\n ftp.quit()\n\n\ndef printBase():\n for i in spots:\n for j in spots[i]:\n for l in spots[i][j]:\n print('{} {} {}:'.format(i,j,l))\n for k in spots[i][j][l]:\n print('{}'.format(k))\n print()\n\ndef summSpotsDay(a,st):\n summ_W = 0\n for i in a:\n summ_W += i[st]\n return summ_W\n\ndef summSpotsMonth(a,st):\n summ_W_m = 0\n for i in a:\n summ_W_m += summSpotsDay(a[i],st)\n return summ_W_m / len(a)\n\ndef creatAndSendPlot():\n fig, ax = plt.subplots()\n #plt.axes([0.05, 0.05, 1, 1])\n fig.subplots_adjust(left=0.085, bottom=0.1, top=0.85, right = 0.985)\n\n #plt.figure(1, figsize=(10, 8))\n x = array([datetime(i, j, k) for i in file_year[-sins_years:] for j in spots[i] for k in spots[i][j]])\n y = array([summSpotsDay(spots[i][j][k],'W') for i in file_year[-sins_years:] for j in spots[i] for k in spots[i][j]])\n ax.plot(x,y,linewidth=0.5,label='Daily',linestyle='-', color = '#46A0A6',markersize=6)\n\n x = array([datetime(i, j, 1) for i in file_year[-sins_years:] for j in spots[i]])\n y = array([summSpotsMonth(spots[i][j],'W') for i in file_year[-sins_years:] for j in spots[i]])\n ax.plot(x,y,linewidth=1.5,label='Monthly',linestyle='-', color = '#361686',markersize=6)\n del(x)\n del(y)\n\n #color = '#5636A6, 'Monthly'\n ax.legend(loc=2) # upper left corner\n\n ax.set_xlabel('Year', fontsize=16)\n ax.set_ylabel('W', fontsize=16)\n ax.set_title('Kislovodsk Wolf number'.format())\n\n #fig.show()\n# plt.grid(True)\n plotImg = './{}.png'.format('KWN')\n fig.savefig(plotImg, format='png')#, dpi=600)\n #fig.savefig('./{}.png'.format('Kislovodsk Wolf number grid'), dpi=600)\n plt.close(fig)\n sendToFTP(plotImg[2:])\n del(plotImg)\n\ndef datParser(f,spots_all):\n file = open(f,'r')\n for line in file:\n line_in = line\n if len(line_in) > 20:\n if line_in[26] == ' ':\n line_in = line_in[:24] + '0.0' + line_in[27:]\n if line_in[32] == ' ':\n line_in = line_in[:30] + '0.0' + line_in[33:]\n line_list = line_in.strip().split()\n spot = {}\n for i in arange(len(line_list)):\n if i != 9 and i != 0:\n spot[key[i]] = float(line_list[i])\n else:\n spot[key[i]] = line_list[i]\n spot['year'] = int(line_list[0][:4])\n spot['month'] = int(line_list[0][4:6])\n spot['day'] = int(line_list[0][6:8])\n if len(line_list) == 1:\n for i in arange(len(line_list)):\n if i != 0:\n spot[key[i]] = -1\n else:\n spot[key[i]] = line_list[i]\n spot['W'] = 0\n\n else:\n spot['W'] = int(spot['n']) + 10\n spot['N'] = int(spot['N'])\n spot['n'] = int(spot['n'])\n\n '''\n for i in spot:\n print('{} = {}'.format(i,spot[i]),end=', ')\n print()\n '''\n spots_all.append(spot)\n file.close()\n\ndef setBaseSpotsForDate(spots_all):\n for i in spots_all:\n if int(i['date'][:4]) not in spots:\n spots[int(i['date'][:4])] = {}\n if int(i['date'][4:6]) not in spots[int(i['date'][:4])]:\n spots[int(i['date'][:4])][int(i['date'][4:6])] = {}\n if int(i['date'][6:8]) not in spots[int(i['date'][:4])][int(i['date'][4:6])]:\n spots[int(i['date'][:4])][int(i['date'][4:6])][int(i['date'][6:8])] = []\n\n if i not in spots[int(i['date'][:4])][int(i['date'][4:6])][int(i['date'][6:8])]:\n# if i['N'] not in [j['N'] for j in spots[int(i['date'][:4])][int(i['date'][4:6])][int(i['date'][6:8])]]:\n spots[int(i['date'][:4])][int(i['date'][4:6])][int(i['date'][6:8])].append(i)\n else:\n print('0'.format(i,),end='')\n\n\ndef editFileData(file_data, file_data_tmp, file_year, sins_years):\n for i in file_data_tmp:\n if i not in file_data:\n file_data[i] = {}\n for j in file_data_tmp[i]:\n if j not in file_data[i]:\n file_data[i][j] = {}\n file_data[i][j] = file_data_tmp[i][j]\n file_data_tmp = {}\n filee = ['k{}.dat'.format(i) for i in file_year[-sins_years:]]\n spots_all = []\n for f in filee:\n if f.find('.dat') != -1 and f[-1] != '~':\n datParser(f,spots_all)\n setBaseSpotsForDate(spots_all)\n\ndef isChengFileData(file_data, file_data_tmp, file_year, sins_years):\n filee = ['k{}.dat'.format(i) for i in file_year[-sins_years:]]\n for i in filee:\n if i not in file_data:\n print('file_date ещё пуста. Нет файла {} в ней'.format(i))\n return True\n for i in filee:\n if file_data[i]['day'] != file_data_tmp[i]['day'] or \\\n file_data[i]['size'] != file_data_tmp[i]['size'] or \\\n file_data[i]['h'] != file_data_tmp[i]['h'] or \\\n file_data[i]['min'] != file_data_tmp[i]['min'] or \\\n file_data[i]['month'] != file_data_tmp[i]['month'] or \\\n file_data[i]['year'] != file_data_tmp[i]['year']:\n print('Файл {} изменили.'.format(i))\n return True\n return False\n\n\ndef parserFold(file_data_tmp ,file_year):\n manth = {1:'янв. ',\n 2:'февр.',\n 3:'марта',\n 4:'апр. ',\n 5:'мая ',\n 6:'июня ',\n 7:'июля ',\n 8:'авг. ',\n 9:'сент.',\n 10:'окт. ',\n 11:'нояб.',\n 12:'дек. '}\n # содержит :'year', 'month', 'day', 'h', 'min','file'\n cmd = \"ls -lt\"\n args = split(cmd)\n p = Popen(args, stdout=PIPE, stderr=STDOUT)\n files = p.communicate()[0].decode('utf-8').rstrip('\\n').split('\\n')[1:]\n for f in files:\n if (f[54:].find('.dat') != -1 and f[-1] != '~'):\n data = {}\n #print(f)\n if f[49] == ':':\n data['year'] = int(strftime('%Y',localtime(time())))\n data['h'] = int(f[47:49])\n data['min'] = int(f[50:52])\n else:\n data['year'] = int(f[46:51])\n data['size'] = int(f[30:37])\n data['month'] = 0\n for i in manth:\n if f[38:43] == manth[i]:\n data['month'] = i\n data['day'] = f[44:46]\n data['file'] = f[53:]\n file_data_tmp[data['file']] = data\n for i in file_data_tmp:\n file_year.append(int(i[1:i.find('.')]))\n file_year.sort()\n\n\n'''\nfor i in spots[2016][8][12]:\n print(i)\nprint(len(spots[2016][8][12]))\nfor i in spots_all[-7:-1]:\n print(i)\n'''\nit = 0\nfile_data_tmp = {}\nwhile True:\n it = it + 1\n file_year = []\n parserFold(file_data_tmp,file_year)\n if isChengFileData(file_data, file_data_tmp, file_year, sins_years):\n editFileData(file_data, file_data_tmp, file_year, sins_years)\n print('edit dat, and creatPlot')\n creatAndSendPlot()\n print('complite 100%')\n '''\n if it % 120 == 0:\n print(it / 12)\n '''\n sleep(5)#(60*60)# Время до следующей проверки папки\n","sub_path":"sun_spots_plot.py","file_name":"sun_spots_plot.py","file_ext":"py","file_size_in_byte":8897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"103771826","text":"\"\"\"\n store a mesh constructor.\n a mesh contains an array of Vertices Faces and HalfEdges.\n\"\"\"\nfrom Face import *\nfrom HalfEdge import *\nfrom Vertex import *\nfrom Vector3D import *\n\n\nclass Mesh(object):\n def __init__(self):\n self.faces = []\n self.vertices = []\n self.edges = []\n\n def read_from_off(self, path):\n with open(path) as f:\n counter = 0\n h = 0\n n = 0\n m = 0\n for line in f:\n if counter == 0:\n if line.split()[0] != 'OFF':\n print('This is not an OFF file')\n break\n elif counter == 1:\n (n, m, mm) = map(int, line.split())\n elif counter < n + 2:\n (x, y, z) = map(float, line.split())\n self.vertices.append(Vertex(p=Vector3D(x, y, z), index=counter - 2))\n elif counter < n + m + 2:\n indices = [int(x) for x in line.split()]\n v = indices[0]\n indices = indices[1:]\n h = []\n for i in range(v - 1):\n h.append(self.find_edge(indices[i], indices[i+1]))\n h.append(self.find_edge(indices[v - 1], indices[0]))\n for i in range(v - 1):\n h[i].nh = h[i + 1]\n h[i + 1].ph = h[i]\n h[v - 1].nh = h[0]\n h[0].ph = h[v - 1]\n face = Face(side=h[v - 1], n=v, index=len(self.faces))\n self.faces.append(face)\n for i in range(v):\n h[i].polygon = face\n counter += 1\n\n def find_edge(self, i1, i2):\n \"\"\"\n return the index of halfedge from vertex i1 to i2\n if there is no such halfedge, build 2 halfedges between i1 an i2\n :param i1: index of source vertex\n :param i2: index of target vertex\n :return:\n \"\"\"\n v1 = self.vertices[i1]\n v2 = self.vertices[i2]\n h = v1.out\n while h:\n if h.target.index == i2:\n return h\n h = h.last\n index = len(self.edges)\n h1 = HalfEdge.HalfEdge(source=v1, target=v2, index=index, last=v1.out)\n h2 = HalfEdge.HalfEdge(source=v2, target=v1, index=index + 1, last=v2.out, th=h1)\n h1.th = h2\n v1.out = h1\n v2.out = h2\n v1.n += 1\n v2.n += 1\n self.edges.append(h1)\n self.edges.append(h2)\n return h1\n\n def write_off(self, name='test.off'):\n \"\"\"\n writes the data out to an OFF file named fname\n :param name: the file name\n :return: no return\n \"\"\"\n with open(name, 'w') as f:\n f.write('OFF\\n')\n f.write('{} {} 0\\n'.format(len(self.vertices), len(self.faces)))\n\n for v in self.vertices:\n f.write('{}\\n'.format(v.p))\n for face in self.faces:\n temp = []\n f.write('{}'.format(face.n))\n e = face.side\n for i in range(face.n):\n f.write(' {}'.format(e.target.index))\n e = e.nh\n f.write('\\n')\n\n print('OFF written to:', name)\n\n def set_face_normal(self):\n for face in self.faces:\n face.set_normal()\n \n def set_vertex_normal(self):\n for v in self.vertices:\n temp = v.get_normal()\n\nif __name__ == '__main__':\n aaa = Mesh()\n aaa.read_from_off(path = 'data/chair_0001.off')\n print(dir(aaa))\n aaa.write_off()\n\n","sub_path":"Mesh.py","file_name":"Mesh.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"194448307","text":"# Написать класс FolderInformation с информацией о заданной дериктории.\r\n#\r\n# 1. Инициализация класса.\r\n# В качестве параметра будем использовать относительный путь к папке.\r\n# Пример для папки test, которая находится в той же дирректории, что и запускаемый файл:\r\n# folder_name = os.path.join('test')\r\n# test_folder = FolderInformation(folder_name)\r\n#\r\n# 2. Атрибуты класса:\r\n# Атрибут: files_count\r\n# тип: число,\r\n# описание: количество файлов в папке\r\n# пример:\r\n# print(test_folder.files_count)\r\n# >>> 2\r\n#\r\n# Атрибут: folders_count\r\n# тип: число,\r\n# описание: количество подпапок в папке\r\n# пример:\r\n# print(test_folder.folders_count)\r\n# >>> 1\r\n#\r\n# Атрибут: information\r\n# тип: словарь,\r\n# описание: словарь вида \"имя_папки_или_файла\": \"тип_файл_или_папка\"\r\n# {'name_folder': 'folder', 'name_file': 'file', ...}\r\n# Пример:\r\n# print(test_folder.information)\r\n# >>> {'tmp': 'folder', 'test.txt': 'file', 'class_test.py':'file'}\r\n#\r\n# Атрибут: files_value\r\n# тип: словарь,\r\n# описание: словарь вида \"имя_файла\": \"размер_файла_в_байтах\" (размер можно получить с помощью модуля os)\r\n# Пример:\r\n# print(test_folder.files_value)\r\n# >>> {'test.txt': 1234, 'class_test.py': 234}\r\n#\r\n# Заполнение атрибутов реализовать при инициализации класса.\r\n#\r\n# 3. Методы класса.\r\n# Метод: clear_folder\r\n# описание: при вызове метода из папки удаляется все ее содержимое\r\n#\r\n# *Метод: safe_clear_folder\r\n# описание: при вызове метода все содержимое папки помещается в архив с именем папки, а затем удаляется все содержимое указанной папки кроме этого архива.\r\n\r\n\r\nimport os\r\nimport shutil\r\n\r\nclass FolderInformation():\r\n \"\"\" class with information about the given directory\"\"\"\r\n\r\n def __init__(self, folder_name):\r\n self.folder_name = folder_name\r\n files_couter = 0\r\n folders_counter = 0\r\n d=dict()\r\n size=dict()\r\n for obg in os.listdir(folder_name):\r\n if os.path.isfile(os.path.join(folder_name, obg)):\r\n files_couter += 1\r\n inf=\"file\"\r\n value = os.path.getsize(os.path.join(folder_name, obg))\r\n size[obg] = value\r\n if os.path.isdir(os.path.join(folder_name, obg)):\r\n folders_counter += 1\r\n inf=\"folder\"\r\n d[obg] = inf\r\n\r\n\r\n self.files_count = files_couter\r\n '''returns the number of files in a folder'''\r\n self.folders_count = folders_counter\r\n '''returns the number of subfolders in a folder'''\r\n self.information=d\r\n '''returns a dictionary of objects and their types'''\r\n self.files_value=size\r\n\r\n\r\n def clear_folder(self):\r\n for root, dirs, files in os.walk(folder_name):\r\n for _file in files:\r\n if os.path.isfile(os.path.join(root, _file)):\r\n os.remove(os.path.join(root, _file))\r\n if folder_name != root and os.path.isdir(root):\r\n shutil.rmtree(root, ignore_errors=True, onerror=None)\r\n\r\n\r\n def safe_clear_folder(self):\r\n shutil.make_archive(str(arhiv_name), 'gztar', folder_name)\r\n test_folder.clear_folder()\r\n\r\n\r\n\r\n\r\n\r\nfull_path = os.getcwd()\r\nfolder_name = os.path.join(full_path, 'test')\r\ntest_folder = FolderInformation(folder_name)\r\nw=os.path.split(folder_name)\r\narhiv_name = w[-1]\r\nshutil.make_archive(str(arhiv_name), 'gztar', folder_name)\r\n\r\nprint(\"files count\", test_folder.files_count)\r\nprint(\"directoris count\", test_folder.folders_count)\r\nprint (\"folder information\", test_folder.information)\r\nprint (\"files sizes\", test_folder.files_value)\r\n# print (\"clear folder\", test_folder.clear_folder())\r\nprint (\"sefe and clear folder\", test_folder.safe_clear_folder())\r\n","sub_path":"1_class.py","file_name":"1_class.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"50132611","text":"__author__ = 'Alexey'\n\n\nimport webapp2\nimport datetime\nimport Cookie\nimport time\nfrom models.user import User\ntry:\n import simplejson as json\nexcept ImportError:\n import json\nfrom google.appengine.ext import ndb\nfrom webapp2_extras import jinja2\n\n\ndef jinja2_factory(app):\n j = jinja2.Jinja2(app)\n j.environment.filters.update({\n # Set filters.\n # ...\n })\n j.environment.globals.update({\n # Set global variables.\n 'uri_for': webapp2.uri_for,\n # ...\n })\n return j\n\n\nclass BaseHandler(webapp2.RequestHandler):\n\n user = None\n\n @webapp2.cached_property\n def jinja2(self):\n # Returns a Jinja2 renderer cached in the app registry.\n return jinja2.get_jinja2(factory=jinja2_factory)\n\n def render_tpl(self, _template, **context):\n # Renders a template and writes the result to the response.\n rv = self.jinja2.render_template(_template, **context)\n self.response.write(rv)\n\n def models_to_dict(self, models):\n return [self.model_to_dict(p) for p in models]\n\n def model_to_dict(self, model):\n output = model.to_dict()\n\n for key in output:\n value = output[key]\n if isinstance(value, datetime.date):\n # Convert date/datetime to MILLISECONDS-since-epoch (JS \"new Date()\").\n ms = time.mktime(value.utctimetuple()) * 1000\n ms += getattr(value, 'microseconds', 0) / 1000\n output[key] = int(ms)\n elif isinstance(value, ndb.GeoPt):\n output[key] = {'lat': value.lat, 'lon': value.lon}\n elif isinstance(value, ndb.Key) or isinstance(value, ndb.BlobKey):\n output[key] = value.urlsafe()\n\n output['key'] = model.key.urlsafe()\n return output\n\n def response_json(self, obj):\n self.response.content_type = 'application/json'\n self.response.write(json.dumps(obj))\n\n def get_request_data(self):\n return json.loads(self.request.body)\n\n def initialize(self, request, response):\n super(BaseHandler, self).initialize(request, response)\n self.user = self.get_current_user()\n\n def get_current_user(self):\n token = self.request.cookies.get('token', '')\n return User.query(User.auth_token == token).fetch()\n\n def set_cookie(self, name, value, expires=None):\n \"\"\"Set a cookie\"\"\"\n if value is None:\n value = 'deleted'\n expires = datetime.timedelta(minutes=-50000)\n jar = Cookie.SimpleCookie()\n jar[name] = value\n jar[name]['path'] = u'/'\n if expires:\n if isinstance(expires, datetime.timedelta):\n expires = datetime.datetime.now() + expires\n if isinstance(expires, datetime.datetime):\n expires = expires.strftime('%a, %d %b %Y %H:%M:%S')\n jar[name]['expires'] = expires\n _header = jar.output().split(u': ', 1)\n _header = [str(i) for i in _header]\n self.response.headers.add_header(*_header)","sub_path":"tools/basehandler.py","file_name":"basehandler.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"24009269","text":"'''\nYou are given a positive integer. Your function should calculate the product of the digits excluding any zeroes.\n\nFor example: The number given is 123405. The result will be 1*2*3*4*5=120 (don't forget to exclude zeroes).\n\nInput: A positive integer.\nOutput: The product of the digits as an integer.\n\n'''\n\ndef checkio(number: int) -> int:\n answer = 1\n list = str(number)\n\n for i in list:\n if i != \"0\":\n n = int(i)\n answer *= n\n return answer\n\n\nif __name__ == '__main__':\n print('Example:')\n print(checkio(123405))\n\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n\n assert checkio(999) == 729\n assert checkio(123405) == 120\n assert checkio(1000) == 1\n assert checkio(1111) == 1\n print(\n \"Coding complete? Click 'Check' to review your tests and earn cool rewards!\")","sub_path":"Elementary/ex_05.py","file_name":"ex_05.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"486436644","text":"import pygame, random\nfrom background import Background\n\nclass Pipes:\n def __init__(self):\n self.pipe = pygame.image.load(\"assets/sprites/knife.png\").convert_alpha()\n self.pipe = pygame.transform.scale(self.pipe, (50, 350))\n self.pipe_list = [] #throw in a lot of rectangles\n self.SPAWNPIPE = pygame.USEREVENT\n pygame.time.set_timer(self.SPAWNPIPE, 1000)\n self.pipe_height = [300, 350, 400, 450, 500, 550]\n self.screen = Background().screen\n\n def create_pipe(self):\n random_pipe_position = random.choice(self.pipe_height)\n bottom_pipe = self.pipe.get_rect(midtop = (1024, random_pipe_position))\n top_pipe = self.pipe.get_rect(midbottom = (1024, random_pipe_position - 250))\n return bottom_pipe, top_pipe\n\n def move_pipes(self, pipes):\n for p in pipes:\n p.centerx -= 2.5\n return pipes\n\n def draw_pipes(self, pipes):\n for p in pipes:\n if p.bottom >= 600:\n self.screen.blit(self.pipe, p)\n else: \n flip_pipe = pygame.transform.flip(self.pipe, False, True)\n self.screen.blit(flip_pipe, p)","sub_path":"minigame-1/pipes_all.py","file_name":"pipes_all.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"349605704","text":"#!/usr/bin/env python3\n# author:Alnk(李成果)\nimport os\nimport time\nfrom shopping_car.conf.settings import user_info_path\nfrom shopping_car.core import login\nfrom shopping_car.core.basic import Basic\nfrom atm.core import payment\n\n\nclass Settlement(Basic): # 结算类\n\n def settlemnt(self):\n user_info_path_file = os.path.join(user_info_path, '%s.json' % login.user_name)\n user_info_dict = self.read_info(user_info_path_file)\n shopping_car = user_info_dict['shopping_car'] # 用户购物车存储字典\n\n while 1:\n total_money = 0\n for k in shopping_car:\n money = shopping_car[k]['price'] * shopping_car[k]['number']\n total_money += money # 需要支付的总额\n print('\\n需要支付金额[%s]\\n' % total_money)\n\n if total_money == 0:\n print('没有商品不需要支付哦,先去选购商品吧\\n\\n')\n break\n\n # 调用atm接口进行支付\n p = payment.PayMent()\n ret = p.payment(total_money)\n if ret == 2:\n print('支付成功,开始清理购物车')\n time.sleep(3)\n del shopping_car\n user_info_dict['shopping_car'] = {} # 清理购物车\n print('aaaaaaa', user_info_dict)\n self.write_info(user_info_dict, user_info_path_file) # 写入文件\n print('购物车已经清空\\n')\n return True\n elif ret == 0:\n print('银行卡号或者密码输错了\\n')\n return\n elif ret == 1:\n print('余额不足!\\n')\n print('请删除一些商品然后支付\\n')\n while 1:\n for k in shopping_car:\n print(k,shopping_car[k]['price'],shopping_car[k]['number'])\n action = input('\\n请输入商品名称>>>')\n if shopping_car.get(action):\n # 商品数量为1,直接删除,否则减少商品的数量\n if shopping_car[action]['number'] == 1:\n del shopping_car[action]\n elif shopping_car[action]['number'] > 1:\n shopping_car[action]['number'] -= 1\n break\n","sub_path":"08 day08/03 作业 day08/shopping_car/core/settlement.py","file_name":"settlement.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"243704821","text":"import pandas as pd\r\nimport patsy\r\nimport numpy as np\r\nimport statsmodels.api as sm\r\nimport statsmodels.formula.api as smf\r\nimport statsmodels.tools as sm_tools\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import train_test_split, GridSearchCV\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\r\nfrom sklearn import tree\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.metrics import mean_squared_error\r\nimport graphviz\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.utils import check_array\r\n\r\ndf_pierce_county = pd.read_csv(\"m1_c1_kmean_3clusters.csv\", sep=',', header=0)\r\n\r\nprint(df_pierce_county.describe())\r\nprint(df_pierce_county.info())\r\n\r\ndef mean_absolute_percentage_error(y_test, y_pred):\r\n y_test = y_test.values\r\n y_test = y_test.reshape(-1,1)\r\n y_pred = y_pred.reshape(-1,1)\r\n y_test = check_array(y_test)\r\n y_pred = check_array(y_pred)\r\n return np.mean(np.abs((y_test - y_pred)/y_test)) * 100\r\n\r\ny = df_pierce_county['sale_price']\r\n\r\nprint(df_pierce_county.dtypes)\r\n\r\ndf_pierce_county['View_Quality'] = df_pierce_county['View_Quality'] .astype('category')\r\ndf_pierce_county['Waterfront_Type'] = df_pierce_county['Waterfront_Type'] .astype('category')\r\ndf_pierce_county['withInSewerImprovement'] = df_pierce_county['withInSewerImprovement'] .astype('category')\r\ndf_pierce_county['near_firestation'] = df_pierce_county['near_firestation'] .astype('category')\r\ndf_pierce_county['near_hospital'] = df_pierce_county['near_hospital'] .astype('category')\r\ndf_pierce_county['near_libraries'] = df_pierce_county['near_libraries'] .astype('category')\r\ndf_pierce_county['near_policestation'] = df_pierce_county['near_policestation'] .astype('category')\r\ndf_pierce_county['near_waterplants'] = df_pierce_county['near_waterplants'] .astype('category')\r\ndf_pierce_county['condition'] = df_pierce_county['condition'] .astype('category')\r\ndf_pierce_county['quality'] = df_pierce_county['quality'] .astype('category')\r\ndf_pierce_county['attic_finished_square_feet'] = df_pierce_county['attic_finished_square_feet'] .astype('category')\r\ndf_pierce_county['basement_square_feet'] = df_pierce_county['basement_square_feet'] .astype('category')\r\ndf_pierce_county['basement_finished_square_feet'] = df_pierce_county['basement_finished_square_feet'] .astype('category')\r\ndf_pierce_county['porch_square_feet'] = df_pierce_county['porch_square_feet'] .astype('category')\r\ndf_pierce_county['attached_garage_square_feet'] = df_pierce_county['attached_garage_square_feet'] .astype('category')\r\ndf_pierce_county['detached_garage_square_feet'] = df_pierce_county['detached_garage_square_feet'] .astype('category')\r\ndf_pierce_county['fireplaces'] = df_pierce_county['fireplaces'] .astype('category')\r\ndf_pierce_county['near_private_school'] = df_pierce_county['near_private_school'] .astype('category')\r\ndf_pierce_county['near_elementary_school'] = df_pierce_county['near_elementary_school'] .astype('category')\r\ndf_pierce_county['near_high_school'] = df_pierce_county['near_high_school'] .astype('category')\r\ndf_pierce_county['near_college'] = df_pierce_county['near_college'] .astype('category')\r\ndf_pierce_county['Crime_Num'] = df_pierce_county['Crime_Num'].fillna(0)\r\n\r\nprint(df_pierce_county.columns)\r\n\r\nX = df_pierce_county[['Land_Net_Acres','View_Quality', \r\n 'Waterfront_Type', 'Crime_Num', 'withInSewerImprovement',\r\n 'near_firestation', 'near_hospital', 'near_libraries',\r\n 'near_policestation', 'near_waterplants', 'square_feet',\r\n 'condition', 'quality', 'attic_finished_square_feet',\r\n 'basement_square_feet', 'basement_finished_square_feet',\r\n 'porch_square_feet', 'attached_garage_square_feet',\r\n 'detached_garage_square_feet', 'fireplaces', 'stories', 'bedrooms',\r\n 'bathrooms', 'year_built', 'near_private_school', 'near_elementary_school', 'near_college', 'near_high_school']]\r\n\r\n\r\n# ------------------------------------------------------------- #\r\n# --------------------- Neural Network ------------------------ #\r\n# ------------------------------------------------------------- #\r\n\r\n\r\n# -------------------- Pierce County Model ------------------------##\r\n# Convert all categorical variables to a matrix of zeros and ones\r\n\r\ndf_pierce_county = pd.get_dummies(df_pierce_county)\r\nprint(df_pierce_county.head())\r\n\r\n## Standardizing data improves computations and makes sure all features are weighted equally for NNs\r\n#scaler = StandardScaler()\r\n#df_pierce_county = scaler.fit_transform(df_pierce_county)\r\n\r\n## Split the dataset into training and testing data\r\nX_train_PC, X_test_PC, y_train_PC, y_test_PC = train_test_split(df_pierce_county,y,test_size =0.2,random_state=109)\r\n\r\nnn1_m1_c1_3clust = MLPClassifier(hidden_layer_sizes = (3), activation='logistic', random_state=109)\r\nnn1_m1_c1_3clust.fit(X_train_PC, y_train_PC)\r\ny_pred_nn1_m1_c1_3clust = nn1_m1_c1_3clust.predict(X_test_PC)\r\nprint(\"Neural Network Classifier Model Logistic: PierceCounty\")\r\n\r\nMSE = mean_squared_error(y_test_PC, y_pred_nn1_m1_c1_3clust)\r\nprint(\"MSE for NN with 1 hidden layer and logistic activation :\",MSE)\r\n\r\nMAPE = mean_absolute_percentage_error(y_test_PC, y_pred_nn1_m1_c1_3clust)\r\nprint(\"MAPE: \", MAPE)\r\n\r\nnn1_m1_c1_3clust_node2 = MLPClassifier(hidden_layer_sizes = (3,3), activation='logistic', random_state=109)\r\nnn1_m1_c1_3clust_node2.fit(X_train_PC, y_train_PC)\r\ny_pred_nn1_m1_c1_3clust_node2 = nn1_m1_c1_3clust_node2.predict(X_test_PC)\r\nprint(\"Neural Network Classifier Model Logistic: PierceCounty\")\r\n\r\nMSE = mean_squared_error(y_test_PC, y_pred_nn1_m1_c1_3clust_node2)\r\nprint(\"MSE for NN with 2 hidden layers and logistic activation :\",MSE)\r\n\r\nMAPE = mean_absolute_percentage_error(y_test_PC, y_pred_nn1_m1_c1_3clust_node2)\r\nprint(\"MAPE: \", MAPE)\r\n\r\n\"\"\"\r\nThis code is used to visualize\r\nhttp://deeplearning.net/tutorial/mlp.html#tips-and-tricks-for-training-mlps\r\nUsage: put the following on 389th line\r\n title = \"whatever you want\"\r\n plot_pca(classifier, x, train_set_x, train_set_y, index=epoch, title=title)\r\n\"\"\"\r\n\r\ndef plot_pca(classifier, x_symbol, X, y, index=0,\r\n title=None, sampling=True):\r\n import itertools\r\n import matplotlib.pyplot as plt\r\n from sklearn.decomposition import PCA\r\n\r\n fig, axes = plt.subplots(3, 3, figsize=(5, 5))\r\n axes = axes.flatten()\r\n\r\n apply_hidden = plt.function(inputs=[x_symbol], outputs=classifier.hiddenLayer.output)\r\n z_data = apply_hidden(X.get_value())\r\n labels = y.eval()\r\n\r\n numbers = range(10)\r\n colors = {0: '#263B1C', 1: '#263374', 2: '#3568B5', 3: '#8A5DDF', 4: '#DBB8EE',\r\n 5: '#46B1C9', 6: '#84C0C6', 7: '#9FB7B9', 8: '#BCC1BA', 9: '#F2E2D2'}\r\n\r\n for ax, prod in zip(axes, zip(numbers[:-1], numbers[1:])):\r\n # print(ax, prod)\r\n pca = PCA(n_components=2)\r\n indexer = np.arange(len(labels))[np.in1d(labels, prod)]\r\n label = labels[indexer]\r\n z = z_data[indexer]\r\n pca.fit(z)\r\n z_pca = pca.transform(z)\r\n\r\n if sampling:\r\n indexer = np.arange(len(label))\r\n np.random.shuffle(indexer)\r\n indexer = indexer[:300]\r\n z_pca = z_pca[indexer]\r\n label = label[indexer]\r\n\r\n _c = [colors[l] for l in label]\r\n ax.scatter(z_pca[:, 0], z_pca[:, 1], color=_c, alpha=0.3)\r\n ax.xaxis.set_visible(False)\r\n ax.yaxis.set_visible(False)\r\n ax.set_title('{0}, {1}'.format(prod[0], prod[1]), size='small')\r\n plt.show()\r\n if title is not None:\r\n fig.suptitle(title)\r\n plt.savefig('pca_{0:02d}.png'.format(index))","sub_path":"Neural Network/neural_network_m1_c1_3clusters.py","file_name":"neural_network_m1_c1_3clusters.py","file_ext":"py","file_size_in_byte":7712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"125399538","text":"# 多颜色多模板匹配示例\n#\n# 这个例子显示了使用OpenMV的多色跟踪。\n\nimport sensor, image, time\nfrom image import SEARCH_EX, SEARCH_DS\nfrom pyb import UART\nfrom error_color import color_track\n\n# 颜色跟踪阈值(L Min, L Max, A Min, A Max, B Min, B Max)\n# 下面的阈值跟踪一般红色/绿色的东西。你不妨调整他们...\n#blue=[(34, 40, 10, 18, -60, -40)]\n#green=[(36, 58, -39, -24, -3, 19)]#green\n#red=[(41, 54, 67, 80, 30, 63)] # generic_blue_thresholds,green(36, 58, -39, -24, -3, 19),\n #blue (41, 54, 57, 80, 16, 63)\nred = [(41, 54, 67, 80, 30, 63),(35,55,38,65,10,45),(40,53,62,72,36,47),(38,45,60,70,35,44)]\nblue=[(30,40,-10,20,-45,-30),(35,40,13,20,-60,-43),(40,47,-3,6,-36,-26),(30 ,34,12,21,-58,49),\n (38,58,-6,23,-63,-27)]\ngreen = [(30,40,-30,-20,5,15),(25,33,-27,-23,2,13),(20,26,-25,-20,3,12),(20,39,-30,-20,0,17),\n (23,33,-27,-24,5,15),(31,38,-35,-28,8,17),(17,26,-30,-20,7,17),(22,29,-28,-25,6,12),\n (65,70,-24,-16,11,20),(47,50,-50,-45,35,40)]\nyellow = [(63,70,-3,5,59,68),(71,77,-6,9,35,56)]\nblack = [(1,8,-10,10,-10,10),(3,8,-4,6,8,12)]\nbasketball = [(14,26,6,24,3,20)]\nvolleyball = [(66,70,-5,3,56,70),(30,36,0,9,28,45),(75,87,0,17,30,50)]\nsoccerball = [(37,45,-23,-1,-33,-28),(27,33,-9,1,-28,20),(42,48,-11,-8,-26,-25)]\nwhite = [72,79,-1,3,-8,1]\n#不要超过16个颜色阈值\nuart = UART(3,19200)\n\nsensor.reset()\nsensor.set_pixformat(sensor.RGB565)\nsensor.set_framesize(sensor.QQVGA)\nsensor.skip_frames(time = 2000)\nsensor.set_auto_gain(False) # must be turned off for color tracking\nsensor.set_auto_whitebal(False) # must be turned off for color tracking\nsensor.set_vflip(False)\nsensor.set_hmirror(False)\nclock = time.clock()\n\n# 只有比“pixel_threshold”多的像素和多于“area_threshold”的区域才被\n# 下面的“find_blobs”返回。 如果更改相机分辨率,\n# 请更改“pixels_threshold”和“area_threshold”。 “merge = True”合并图像中所有重叠的色块。\nchange=[0,0,0,0]\n\n\ndef find_max(blobs):\n max_size=0\n for blob in blobs:\n if blob[2]*blob[3] > max_size:\n max_blob=blob\n max_size = blob[2]*blob[3]\n return max_blob\n#balls = [\"basketball3.pgm\",\"football1.pgm\",\"volleyball2.pgm\"]\n\nzfx_tempaltes = [\"zfx1.pgm\",\"zfx2.pgm\",\"zfx3.pgm\",\"zfx4.pgm\",\"zfx5.pgm\",\n \"zfx6.pgm\",\"zfx7.pgm\",\"zfx8.pgm\",\"zfx9.com\"]\nyx_tempaltes = [\"yx1.pgm\",\"yx2.pgm\",\"yx3.pgm\",\"yx4.pgm\",\"yx5.pgm\"]\nsjx_tempaltes = [\"sjx1.pgm\",\"sjx2.pgm\",\"sjx3.pgm\"]\ntemplates = [zfx_tempaltes,yx_tempaltes,sjx_tempaltes] #保存多个模板\n\ndef cal():\n flag=0\n zfx=0\n yx=0\n sjx=0\n r=[0,0,0,0]\n key = 0\n G=0\n while(True):\n key=uart.readchar()\n if key==1:\n break\n sum_zfx=0\n sum_yx=0\n sum_sjx=0\n dis=0\n clock.tick()\n img = sensor.snapshot(1.8)\n #img1 = img.binary(blue)\n\n for x in templates :\n img = sensor.snapshot(1.8)\n img = img.to_grayscale()\n flag = 0\n for t in x:\n clock.tick()\n img = sensor.snapshot(1.8)\n img = img.to_grayscale()\n\n template = image.Image(t)\n #ball = image.Image(t)\n if x == zfx_tempaltes:\n r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))\n if r:\n print(t)\n zfx = r\n sum_zfx=sum_zfx+1\n elif x == yx_tempaltes:\n for c in img.find_circles(threshold = 3500, x_margin = 10, y_margin = 10, r_margin = 10,r_min = 2, r_max = 100, r_step = 2):\n img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0))\n if c.r()>1:\n x=c.x()-c.r()\n y=c.y()-c.r()\n w=c.r()*2\n h=c.r()*2\n r=[x,y,w,h]\n yx = r\n sum_yx=20\n elif x == sjx_tempaltes:\n r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))\n if r:\n print(t)\n sjx = r\n sum_sjx=sum_sjx+1\n if (sum_zfx>sum_yx and sum_zfx>sum_sjx) :\n r=zfx\n t=8#\"zfx\"\n elif (sum_yx>sum_zfx and sum_yx>sum_sjx) :\n r=yx\n t=9#\"yx\"\n else:\n r=sjx\n t=10#\"sjx\"\n if (sum_zfx!=0 or sum_yx!=0 or sum_sjx!=0):\n\n #change[0]=r[0]+0\n #change[1]=r[1]+0\n #change[2]=r[2]-0\n #change[3]=r[3]-0\n sum_red=0\n sum_green=0\n sum_blue=0\n x=r[0]\n y=r[1]\n w=r[2]\n h=r[3]\n center_x=r[0]+int(r[2]/2)\n center_y=r[1]+int(r[3]/2)\n sensor.reset()\n sensor.set_pixformat(sensor.RGB565)\n sensor.set_framesize(sensor.QQVGA)\n sensor.skip_frames(time = 300)\n sensor.set_auto_gain(False) # must be turned off for color tracking\n sensor.set_auto_whitebal(False) # must be turned off for color tracking\n sensor.set_vflip(False)\n sensor.set_hmirror(False)\n img = sensor.snapshot(1.8)\n #r=list(r)\n\n i=3\n while(i>0):\n blobs = img.find_blobs(blue,roi=r,pixel_threshold=200,area_threshold=200)\n if blobs:\n\n max_blob = find_max(blobs)\n img.draw_rectangle(r) # rect\n #img.draw_cross(center_x, center_y) # cx, cy\n img.draw_cross(max_blob.cx(), max_blob.cy())\n #img.draw_line(x+int(w/2),y,x,y+h)\n #img.draw_line(x,y+h,x+w,y+h)\n #img.draw_line(x+w,y+h,x+int(w/2),y)#三角形\n\n img.draw_circle(x+int(w/2),y+int(h/2),int(w/2))\n sum_blue=sum_blue+1\n\n blobs = img.find_blobs(red,roi=r,pixel_threshold=200,area_threshold=200)\n if blobs:\n\n max_blob = find_max(blobs)\n img.draw_rectangle(r) # rect\n img.draw_cross(center_x, center_y) # cx, cy\n img.draw_circle(x+int(w/2),y+int(h/2),int(h/2))\n sum_red=sum_red+1\n\n\n\n blobs = img.find_blobs(green,roi=r,pixel_threshold=200,area_threshold=200)\n if blobs:\n\n max_blob = find_max(blobs)\n img.draw_rectangle(r) # rect\n img.draw_cross(center_x, center_y) # cx, cy\n sum_green=sum_green+1\n i=i-1\n\n if (sum_red>sum_green and sum_red>sum_blue) :\n flag=5#\"red\"\n elif (sum_green>sum_red and sum_green>sum_blue) :\n flag=6#\"green\"\n elif (sum_blue>sum_red and sum_blue>sum_green):\n flag=7#\"blue\"\n else :\n flag = 0\n\n if(r==0 or flag == 0):\n print(\"没找到\")\n else:\n Lm = int(r[2]/2)\n K = 25\n G=1\n length = K/Lm\n #edge =\n print(\"length:\",length)\n print(\"color:\",flag,\"object:\",t,\"range:\",r,\"red:\",sum_red,\n \"green:\",sum_green,\"blue:\",sum_blue,\"zfx_model:\",sum_zfx,\"yx_model:\",\n sum_yx,\"sjx_model:\",sum_sjx)\n uart.writechar(0x55)\n uart.writechar(0x53)\n uart.writechar(flag)\n uart.writechar(t)\n uart.writechar(Lm)\n uart.writechar(K)\n uart.writechar(G)\n uart.writechar(1)\n G=0\n break\n #如果为红色, blob.code()==1; 如果为绿色, blob.code==2.\n #如果为数字0, t==\"0.pgm\"; 如果为数字1, t==\"1.pgm\".\n\n\n #print(clock.fps())\n\n\n","sub_path":"openmv/color_model.py","file_name":"color_model.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"644396158","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nfrom numpy import ndarray\n\n\ndef centering(array_3d):\n \"\"\"\n\n 頂点群の重心が座標軸の原点となるように、全ての頂点座標を平行移動\n\n :type array_3d: ndarray\n :param array_3d: 平行移動後の頂点配列\n\n :rtype : ndarray\n :return 平行移動後の頂点配列\n\n \"\"\"\n\n assert array_3d.shape[-1] == 3\n return array_3d - np.mean(array_3d, axis=0)\n\n\ndef normalize(array_3d):\n \"\"\"\n\n 重心から頂点までの距離が最大1となるように、全ての頂点座標を正規化する\n\n :type array_3d: ndarray\n :param array_3d: 正規化対象の頂点配列\n\n :rtype : ndarray\n :return 正規化後の頂点配列\n\n \"\"\"\n\n assert array_3d.shape[-1] == 3\n c = np.mean(array_3d, axis=0)\n centered_array_3d = centering(array_3d)\n max_norm = np.max(np.linalg.norm(centered_array_3d, axis=1))\n normalized_array_3d = centered_array_3d / max_norm\n return normalized_array_3d + c\n\n\ndef scale(array_3d, r):\n \"\"\"\n\n 頂点群の重心から各頂点までの距離をr倍する\n\n :type array_3d: ndarray\n :param array_3d: スケーリング対象の頂点配列\n\n :type r: float\n :param r: 距離倍率\n\n :rtype : ndarray\n :return : スケーリング後の頂点配列\n\n \"\"\"\n\n assert array_3d.shape[-1] == 3\n c = np.mean(array_3d, axis=0)\n centered_array_3d = centering(array_3d)\n return centered_array_3d * r + c\n\n\ndef translate(array_3d, t):\n \"\"\"\n\n 頂点群をtだけ平行移動する\n\n :type array_3d: ndarray\n :param array_3d: 平行移動対象の頂点配列\n\n :type t: tuple or ndarray\n :param t: (x, y, z)への平行移動量\n\n :rtype: ndarray\n :return: 平行移動後の頂点配列\n\n \"\"\"\n\n t_ = np.array(t)\n assert array_3d.ndim >= 2\n assert array_3d.shape[-1] == 3\n assert np.asarray(t_).shape[-1] == 3\n return array_3d + t_\n\n\ndef rotate(array_3d, angle, axis_vector, center=[0, 0, 0]):\n \"\"\"\n\n 頂点群を、重心からxyz方向に伸びる軸回りにrだけ回転する\n\n :type array_3d: ndarray\n :param array_3d: 回転対象の頂点配列\n\n :type angle:\n :param angle:\n\n :type axis_vector:\n :param axis_vector:\n\n :rtype: ndarray\n :return: 回転後の頂点配列\n\n \"\"\"\n\n assert array_3d.ndim >= 2\n assert array_3d.shape[-1] == 3\n\n # 軸成分\n ax, ay, az = axis_vector / np.linalg.norm(axis_vector)\n\n theta = angle / 180. * np.pi\n cos = np.cos(theta)\n sin = np.sin(theta)\n\n # 回転行列\n r_mtr = np.array([[ax * ax * (1. - cos) + cos,\n ax * ay * (1. - cos) - az * sin,\n az * ax * (1. - cos) + ay * sin],\n [ax * ay * (1. - cos) + az * sin,\n ay * ay * (1. - cos) + cos,\n ay * az * (1. - cos) - ax * sin],\n [az * ax * (1. - cos) - ay * sin,\n ay * az * (1. - cos) + ax * sin,\n az * az * (1. - cos) + cos]])\n\n centered_array_3d = array_3d - center\n\n return np.dot(centered_array_3d, r_mtr.T) + center\n","sub_path":"core/util/array3d.py","file_name":"array3d.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"13417106","text":"# ******************************************************************************\n# Copyright 2017-2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\nimport random\nimport argparse\nimport logging\nimport sys\n\nfrom nlp_architect.models.np2vec import NP2vec\nfrom nlp_architect.utils.io import validate_existing_filepath, check_size\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nouf = open('result.csv', 'w', encoding=\"utf-8\")\n\nif __name__ == \"__main__\":\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n '--np2vec_model_file',\n default='conll2000.train.model',\n help='path to the file with the np2vec model to load.',\n type=validate_existing_filepath)\n arg_parser.add_argument(\n '--binary',\n help='boolean indicating whether the model to load has been stored in binary '\n 'format.',\n action='store_true')\n arg_parser.add_argument(\n '--word_ngrams',\n default=0,\n type=int,\n choices=[0, 1],\n help='If 0, the model to load stores word information. If 1, the model to load stores '\n 'subword (ngrams) information; note that subword information is relevant only to '\n 'fasttext models.')\n arg_parser.add_argument(\n '--mark_char',\n default='_',\n type=str,\n action=check_size(1, 2),\n help='special character that marks word separator and NP suffix.')\n arg_parser.add_argument(\n '--np',\n default='Intel Corp.',\n type=str,\n action=check_size(min_size=1),\n required=True,\n help='NP to print its word vector.')\n\n args = arg_parser.parse_args()\n\n np2vec_model = NP2vec.load(\n args.np2vec_model_file,\n binary=args.binary,\n word_ngrams=args.word_ngrams)\n\n # print(\"word vector for the NP \\'\" + args.np + \"\\':\", np2vec_model[args.mark_char.join(\n # args.np.split()) + args.mark_char])\n \n \n arr = [\n '全新',\n '斗山',\n '60 - 7',\n '挖掘机',\n '野马',\n 'T70',\n '1.8',\n 'T',\n '无级',\n '升级版进取',\n '大众',\n 'POLO',\n 'Cross',\n '北京',\n '旗岭',\n '箱货',\n '4.3',\n '米',\n '大箱',\n '解放',\n '半挂车',\n '国6',\n '国5',\n '国4',\n '国六',\n '国五',\n '国四',\n '奥迪A4',\n '豪华型',\n '起亚K2',\n 'K2',\n '自动',\n '抵押车',\n '仓库',\n '批发',\n '一手',\n '安全',\n '正规',\n '招中介',\n '常年',\n '收售',\n '二手',\n '农机',\n '原装',\n '进口',\n '日立240 - 3',\n '日立',\n '挖掘机',\n '单位',\n '低价',\n '处理',\n '7',\n '吨',\n '10',\n '吨',\n '叉车',\n '转让',\n '东风',\n '天龙',\n '前四后四',\n '载货车',\n '220',\n '马',\n '马力',\n '9.6',\n '米厢',\n '华菱',\n '高配',\n '后八轮',\n '便宜卖'\n ]\n for keyword in arr:\n try:\n result = np2vec_model.similar_by_word(keyword, 20)\n ouf.write(\"{}, {}\\n\".format(keyword, '--'))\n except Exception as e:\n print(e)\n continue\n for _ in range(10):\n slice = random.sample(result, 4)\n ouf.write(\"{}, {}\\n\".format(\"\", ''.join([t[0] for t in slice])))\n ouf.close()\n","sub_path":"examples/np2vec/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"556100951","text":"\"\"\"Console script for chess_tuning_tools.\"\"\"\nimport json\nimport logging\nimport pathlib\nimport sys\nimport time\nfrom datetime import datetime\n\nimport click\nimport dill\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom atomicwrites import AtomicWriter\nfrom bask.optimizer import Optimizer\nfrom scipy.special import erfinv\nfrom skopt.utils import create_result\n\nfrom tune.db_workers import TuningClient, TuningServer\nfrom tune.io import load_tuning_config, prepare_engines_json, write_engines_json\nfrom tune.local import parse_experiment_result, reduce_ranges, run_match\nfrom tune.plots import plot_objective\nfrom tune.summary import confidence_intervals\nfrom tune.utils import expected_ucb\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command(hidden=True, deprecated=True)\n@click.option(\n \"--verbose\", \"-v\", is_flag=True, default=False, help=\"Turn on debug output.\"\n)\n@click.option(\"--logfile\", default=None, help=\"Path to where the log is saved to.\")\n@click.option(\n \"--terminate-after\", default=0, help=\"Terminate the client after x minutes.\"\n)\n@click.option(\n \"--run-only-once\",\n default=False,\n is_flag=True,\n help=\"Terminate the client after one job has been completed or no job can be \"\n \"found.\",\n)\n@click.option(\n \"--skip-benchmark\",\n default=False,\n is_flag=True,\n help=\"Skip calibrating the time control by running a benchmark.\",\n)\n@click.option(\n \"--clientconfig\", default=None, help=\"Path to the client configuration file.\"\n)\n@click.argument(\"dbconfig\")\ndef run_client(\n verbose,\n logfile,\n terminate_after,\n run_only_once,\n skip_benchmark,\n clientconfig,\n dbconfig,\n):\n \"\"\" Run the client to generate games for distributed tuning.\n\n In order to connect to the database you need to provide a valid DBCONFIG\n json file. It contains the necessary parameters to connect to the database\n where it can fetch jobs and store results.\n \"\"\"\n log_level = logging.DEBUG if verbose else logging.INFO\n logging.basicConfig(\n level=log_level,\n filename=logfile,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n tc = TuningClient(\n dbconfig_path=dbconfig,\n terminate_after=terminate_after,\n clientconfig=clientconfig,\n only_run_once=run_only_once,\n skip_benchmark=skip_benchmark,\n )\n tc.run()\n\n\n@cli.command(hidden=True, deprecated=True)\n@click.option(\n \"--verbose\", \"-v\", is_flag=True, default=False, help=\"Turn on debug output.\"\n)\n@click.option(\"--logfile\", default=None, help=\"Path to where the log is saved to.\")\n@click.argument(\"command\")\n@click.argument(\"experiment_file\")\n@click.argument(\"dbconfig\")\ndef run_server(verbose, logfile, command, experiment_file, dbconfig):\n \"\"\"Run the tuning server for a given EXPERIMENT_FILE (json).\n\n To connect to the database you also need to provide a DBCONFIG json file.\n\n \\b\n You can choose from these COMMANDs:\n * run: Starts the server.\n * deactivate: Deactivates all active jobs of the given experiment.\n * reactivate: Reactivates all recent jobs for which sample size is not reached yet.\n \"\"\"\n log_level = logging.DEBUG if verbose else logging.INFO\n logging.basicConfig(\n level=log_level,\n filename=logfile,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n tc = TuningServer(experiment_path=experiment_file, dbconfig_path=dbconfig)\n if command == \"run\":\n tc.run()\n elif command == \"deactivate\":\n tc.deactivate()\n elif command == \"reactivate\":\n tc.reactivate()\n else:\n raise ValueError(f\"Command {command} is not recognized. Terminating...\")\n\n\n@cli.command()\n@click.option(\n \"-c\",\n \"--tuning-config\",\n help=\"json file containing the tuning configuration.\",\n required=True,\n type=click.File(\"r\"),\n)\n@click.option(\n \"-a\",\n \"--acq-function\",\n default=\"mes\",\n help=\"Acquisition function to use for selecting points to try. \"\n \"Can be {mes, pvrs, ei, ts, vr}.\",\n show_default=True,\n)\n@click.option(\n \"--acq-function-samples\",\n default=1,\n help=\"How many GP samples to average the acquisition function over. \"\n \"More samples will slow down the computation time, but might give more \"\n \"stable tuning results. Less samples on the other hand cause more exploration \"\n \"which could help avoid the tuning to get stuck.\",\n show_default=True,\n)\n@click.option(\n \"--confidence\",\n default=0.90,\n help=\"Confidence to use for the highest density intervals of the optimum.\",\n show_default=True,\n)\n@click.option(\n \"-d\",\n \"--data-path\",\n default=\"data.npz\",\n help=\"Save the evaluated points to this file.\",\n type=click.Path(exists=False),\n show_default=True,\n)\n@click.option(\n \"--gp-burnin\",\n default=5,\n type=int,\n help=\"Number of samples to discard before sampling model parameters. \"\n \"This is used during tuning and few samples suffice.\",\n show_default=True,\n)\n@click.option(\n \"--gp-samples\",\n default=300,\n type=int,\n help=\"Number of model parameters to sample for the model. \"\n \"This is used during tuning and it should be a multiple of 100.\",\n show_default=True,\n)\n@click.option(\n \"--gp-initial-burnin\",\n default=100,\n type=int,\n help=\"Number of samples to discard before starting to sample the initial model \"\n \"parameters. This is only used when resuming or for the first model.\",\n show_default=True,\n)\n@click.option(\n \"--gp-initial-samples\",\n default=300,\n type=int,\n help=\"Number of model parameters to sample for the initial model. \"\n \"This is only used when resuming or for the first model. \"\n \"Should be a multiple of 100.\",\n show_default=True,\n)\n@click.option(\n \"-l\",\n \"--logfile\",\n default=\"log.txt\",\n help=\"Path to log file.\",\n type=click.Path(exists=False),\n show_default=True,\n)\n@click.option(\n \"--n-initial-points\",\n default=16,\n help=\"Size of initial dense set of points to try.\",\n show_default=True,\n)\n@click.option(\n \"--n-points\",\n default=500,\n help=\"The number of random points to consider as possible next point. \"\n \"Less points reduce the computation time of the tuner, but reduce \"\n \"the coverage of the space.\",\n show_default=True,\n)\n@click.option(\n \"--plot-every\",\n default=1,\n help=\"Plot the current optimization landscape every n-th iteration. \"\n \"Set to 0 to turn it off.\",\n show_default=True,\n)\n@click.option(\n \"--plot-path\",\n default=\"plots\",\n help=\"Path to the directory to which the tuner will output plots.\",\n show_default=True,\n)\n@click.option(\n \"--random-seed\",\n default=0,\n help=\"Number to seed all internally used random generators.\",\n show_default=True,\n)\n@click.option(\n \"--result-every\",\n default=1,\n help=\"Output the actual current optimum every n-th iteration.\"\n \"The further you are in the tuning process, the longer this will take to \"\n \"compute. Consider increasing this number, if you do not need the output \"\n \"that often. Set to 0 to turn it off.\",\n show_default=True,\n)\n@click.option(\n \"--resume/--no-resume\",\n default=True,\n help=\"Let the optimizer resume, if it finds points it can use.\",\n show_default=True,\n)\n@click.option(\n \"--fast-resume/--no-fast-resume\",\n default=True,\n help=\"If set, resume the tuning process with the model in the file specified by\"\n \" the --model-path. \"\n \"Note, that a full reinitialization will be performed, if the parameter\"\n \"ranges have been changed.\",\n show_default=True,\n)\n@click.option(\n \"--model-path\",\n default=\"model.pkl\",\n help=\"The current optimizer will be saved for fast resuming to this file.\",\n type=click.Path(exists=False),\n show_default=True,\n)\n@click.option(\"--verbose\", \"-v\", count=True, default=0, help=\"Turn on debug output.\")\n@click.option(\n \"--warp-inputs/--no-warp-inputs\",\n default=True,\n show_default=True,\n help=\"If True, let the tuner warp the input space to find a better fit to the \"\n \"optimization landscape.\",\n)\ndef local( # noqa: C901\n tuning_config,\n acq_function=\"mes\",\n acq_function_samples=1,\n confidence=0.9,\n data_path=None,\n gp_burnin=5,\n gp_samples=300,\n gp_initial_burnin=100,\n gp_initial_samples=300,\n logfile=\"log.txt\",\n n_initial_points=16,\n n_points=500,\n plot_every=1,\n plot_path=\"plots\",\n random_seed=0,\n result_every=1,\n resume=True,\n fast_resume=True,\n model_path=\"model.pkl\",\n verbose=0,\n warp_inputs=True,\n):\n \"\"\"Run a local tune.\n\n Parameters defined in the `tuning_config` file always take precedence.\n \"\"\"\n json_dict = json.load(tuning_config)\n settings, commands, fixed_params, param_ranges = load_tuning_config(json_dict)\n log_level = logging.DEBUG if verbose > 0 else logging.INFO\n log_format = logging.Formatter(\"%(asctime)s %(levelname)-8s %(message)s\")\n root_logger = logging.getLogger(\"ChessTuner\")\n root_logger.setLevel(log_level)\n root_logger.propagate = False\n file_logger = logging.FileHandler(settings.get(\"logfile\", logfile))\n file_logger.setFormatter(log_format)\n root_logger.addHandler(file_logger)\n console_logger = logging.StreamHandler(sys.stdout)\n console_logger.setFormatter(log_format)\n root_logger.addHandler(console_logger)\n logging.debug(f\"Got the following tuning settings:\\n{json_dict}\")\n\n # 1. Create seed sequence\n ss = np.random.SeedSequence(settings.get(\"random_seed\", random_seed))\n # 2. Create kernel\n # 3. Create optimizer\n\n random_state = np.random.RandomState(np.random.MT19937(ss.spawn(1)[0]))\n gp_kwargs = dict(\n # TODO: Due to a bug in scikit-learn 0.23.2, we set normalize_y=False:\n normalize_y=True,\n warp_inputs=settings.get(\"warp_inputs\", warp_inputs),\n )\n opt = Optimizer(\n dimensions=list(param_ranges.values()),\n n_points=settings.get(\"n_points\", n_points),\n n_initial_points=settings.get(\"n_initial_points\", n_initial_points),\n # gp_kernel=kernel, # TODO: Let user pass in different kernels\n gp_kwargs=gp_kwargs,\n # gp_priors=priors, # TODO: Let user pass in priors\n acq_func=settings.get(\"acq_function\", acq_function),\n acq_func_kwargs=dict(alpha=1.96, n_thompson=500),\n random_state=random_state,\n )\n X = []\n y = []\n noise = []\n iteration = 0\n\n # 3.1 Resume from existing data:\n if data_path is None:\n data_path = \"data.npz\"\n if resume:\n path = pathlib.Path(data_path)\n if path.exists():\n with np.load(path) as importa:\n X = importa[\"arr_0\"].tolist()\n y = importa[\"arr_1\"].tolist()\n noise = importa[\"arr_2\"].tolist()\n if len(X[0]) != opt.space.n_dims:\n root_logger.error(\n \"The number of parameters are not matching the number of \"\n \"dimensions. Rename the existing data file or ensure that the \"\n \"parameter ranges are correct.\"\n )\n sys.exit(1)\n reduction_needed, X_reduced, y_reduced, noise_reduced = reduce_ranges(\n X, y, noise, opt.space\n )\n if reduction_needed:\n backup_path = path.parent / (\n path.stem + f\"_backup_{int(time.time())}\" + path.suffix\n )\n root_logger.warning(\n f\"The parameter ranges are smaller than the existing data. \"\n f\"Some points will have to be discarded. \"\n f\"The original {len(X)} data points will be saved to \"\n f\"{backup_path}\"\n )\n np.savez_compressed(\n backup_path, np.array(X), np.array(y), np.array(noise)\n )\n X = X_reduced\n y = y_reduced\n noise = noise_reduced\n iteration = len(X)\n reinitialize = True\n if fast_resume:\n path = pathlib.Path(model_path)\n if path.exists():\n with open(model_path, mode=\"rb\") as model_file:\n old_opt = dill.load(model_file)\n root_logger.info(\n f\"Resuming from existing optimizer in {model_path}.\"\n )\n if opt.space == old_opt.space:\n old_opt.acq_func = opt.acq_func\n old_opt.acq_func_kwargs = opt.acq_func_kwargs\n opt = old_opt\n reinitialize = False\n else:\n root_logger.info(\n \"Parameter ranges have been changed and the \"\n \"existing optimizer instance is no longer \"\n \"valid. Reinitializing now.\"\n )\n\n if reinitialize:\n root_logger.info(\n f\"Importing {iteration} existing datapoints. \"\n f\"This could take a while...\"\n )\n opt.tell(\n X,\n y,\n noise_vector=noise,\n gp_burnin=settings.get(\"gp_initial_burnin\", gp_initial_burnin),\n gp_samples=settings.get(\"gp_initial_samples\", gp_initial_samples),\n n_samples=settings.get(\"n_samples\", 1),\n progress=True,\n )\n root_logger.info(\"Importing finished.\")\n\n # 4. Main optimization loop:\n while True:\n root_logger.info(\"Starting iteration {}\".format(iteration))\n result_every_n = settings.get(\"result_every\", result_every)\n if (\n result_every_n > 0\n and iteration % result_every_n == 0\n and opt.gp.chain_ is not None\n ):\n result_object = create_result(Xi=X, yi=y, space=opt.space, models=[opt.gp])\n try:\n best_point, best_value = expected_ucb(result_object, alpha=0.0)\n best_point_dict = dict(zip(param_ranges.keys(), best_point))\n with opt.gp.noise_set_to_zero():\n _, best_std = opt.gp.predict(\n opt.space.transform([best_point]), return_std=True\n )\n root_logger.info(f\"Current optimum:\\n{best_point_dict}\")\n root_logger.info(\n f\"Estimated Elo: {np.around(-best_value * 100, 4)} +- \"\n f\"{np.around(best_std * 100, 4).item()}\"\n )\n confidence_val = settings.get(\"confidence\", confidence)\n confidence_mult = erfinv(confidence_val) * np.sqrt(2)\n lower_bound = np.around(\n -best_value * 100 - confidence_mult * best_std * 100, 4\n ).item()\n upper_bound = np.around(\n -best_value * 100 + confidence_mult * best_std * 100, 4\n ).item()\n root_logger.info(\n f\"{confidence_val * 100}% confidence interval of the Elo value: \"\n f\"({lower_bound}, \"\n f\"{upper_bound})\"\n )\n confidence_out = confidence_intervals(\n optimizer=opt,\n param_names=list(param_ranges.keys()),\n hdi_prob=confidence_val,\n opt_samples=1000,\n multimodal=False,\n )\n root_logger.info(\n f\"{confidence_val * 100}% confidence intervals of the parameters:\"\n f\"\\n{confidence_out}\"\n )\n except ValueError:\n root_logger.info(\n \"Computing current optimum was not successful. \"\n \"This can happen in rare cases and running the \"\n \"tuner again usually works.\"\n )\n plot_every_n = settings.get(\"plot_every\", plot_every)\n if (\n plot_every_n > 0\n and iteration % plot_every_n == 0\n and opt.gp.chain_ is not None\n ):\n if opt.space.n_dims == 1:\n root_logger.warning(\n \"Plotting for only 1 parameter is not supported yet.\"\n )\n else:\n root_logger.debug(\"Starting to compute the next plot.\")\n result_object = create_result(\n Xi=X, yi=y, space=opt.space, models=[opt.gp]\n )\n plt.style.use(\"dark_background\")\n fig, ax = plt.subplots(\n nrows=opt.space.n_dims,\n ncols=opt.space.n_dims,\n figsize=(3 * opt.space.n_dims, 3 * opt.space.n_dims),\n )\n fig.patch.set_facecolor(\"#36393f\")\n for i in range(opt.space.n_dims):\n for j in range(opt.space.n_dims):\n ax[i, j].set_facecolor(\"#36393f\")\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n plot_objective(\n result_object, dimensions=list(param_ranges.keys()), fig=fig, ax=ax\n )\n plotpath = pathlib.Path(settings.get(\"plot_path\", plot_path))\n plotpath.mkdir(parents=True, exist_ok=True)\n full_plotpath = plotpath / f\"{timestr}-{iteration}.png\"\n plt.savefig(\n full_plotpath, dpi=300, facecolor=\"#36393f\",\n )\n root_logger.info(f\"Saving a plot to {full_plotpath}.\")\n plt.close(fig)\n point = opt.ask()\n point_dict = dict(zip(param_ranges.keys(), point))\n root_logger.info(\"Testing {}\".format(point_dict))\n\n engine_json = prepare_engines_json(commands=commands, fixed_params=fixed_params)\n root_logger.debug(f\"engines.json is prepared:\\n{engine_json}\")\n write_engines_json(engine_json, point_dict)\n root_logger.info(\"Start experiment\")\n now = datetime.now()\n settings[\"debug_mode\"] = settings.get(\n \"debug_mode\", False if verbose <= 1 else True\n )\n out_exp = []\n for output_line in run_match(**settings):\n root_logger.debug(output_line.rstrip())\n out_exp.append(output_line)\n out_exp = \"\".join(out_exp)\n later = datetime.now()\n difference = (later - now).total_seconds()\n root_logger.info(f\"Experiment finished ({difference}s elapsed).\")\n\n score, error_variance = parse_experiment_result(out_exp, **settings)\n root_logger.info(\n \"Got Elo: {} +- {}\".format(-score * 100, np.sqrt(error_variance) * 100)\n )\n root_logger.info(\"Updating model\")\n while True:\n try:\n now = datetime.now()\n # We fetch kwargs manually here to avoid collisions:\n n_samples = settings.get(\"acq_function_samples\", acq_function_samples)\n gp_burnin = settings.get(\"gp_burnin\", gp_burnin)\n gp_samples = settings.get(\"gp_samples\", gp_samples)\n if opt.gp.chain_ is None:\n gp_burnin = settings.get(\"gp_initial_burnin\", gp_initial_burnin)\n gp_samples = settings.get(\"gp_initial_samples\", gp_initial_samples)\n opt.tell(\n point,\n score,\n noise_vector=error_variance,\n n_samples=n_samples,\n gp_samples=gp_samples,\n gp_burnin=gp_burnin,\n )\n later = datetime.now()\n difference = (later - now).total_seconds()\n root_logger.info(f\"GP sampling finished ({difference}s)\")\n root_logger.debug(f\"GP kernel: {opt.gp.kernel_}\")\n if warp_inputs and hasattr(opt.gp, \"warp_alphas_\"):\n warp_params = dict(\n zip(\n param_ranges.keys(),\n zip(\n np.around(np.exp(opt.gp.warp_alphas_), 3),\n np.around(np.exp(opt.gp.warp_betas_), 3),\n ),\n )\n )\n root_logger.debug(\n f\"Input warping was applied using the following parameters for \"\n f\"the beta distributions:\\n\"\n f\"{warp_params}\"\n )\n except ValueError:\n root_logger.warning(\n \"Error encountered during fitting. Trying to sample chain a bit. \"\n \"If this problem persists, restart the tuner to reinitialize.\"\n )\n opt.gp.sample(n_burnin=11, priors=opt.gp_priors)\n else:\n break\n X.append(point)\n y.append(score)\n noise.append(error_variance)\n iteration = len(X)\n\n with AtomicWriter(data_path, mode=\"wb\", overwrite=True).open() as f:\n np.savez_compressed(f, np.array(X), np.array(y), np.array(noise))\n with AtomicWriter(model_path, mode=\"wb\", overwrite=True).open() as f:\n dill.dump(opt, f)\n\n\nif __name__ == \"__main__\":\n sys.exit(cli()) # pragma: no cover\n","sub_path":"tune/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":21515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"521686939","text":"from bs4 import BeautifulSoup\nfrom nltk.stem import PorterStemmer\nimport nltk\nimport operator\nimport numpy as np\nimport spacy as sp\nfrom spacy import displacy\nfrom spacy.matcher import Matcher\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, confusion_matrix\n#Generate random integer values\nfrom random import seed\nfrom random import randint\n\nimport math\nimport re\n\nfrom nltk.tokenize import sent_tokenize\nfrom nltk.tag import StanfordPOSTagger\n\n# Tagger\ntagger = \"/Users/administrador/Downloads/stanford-tagger-4.0.0/models/spanish-ud.tagger\"\njar = \"/Users/administrador/Downloads/stanford-tagger-4.0.0/stanford-postagger.jar\"\n# Spacy tagger\nnlp = sp.load(\"es_core_news_sm\")\nmatcher = Matcher(nlp.vocab)\n#Functions:\n################################################\n# Functions Related to the Representation #\n################################################\n#The following function receives a string indicating the path to follow and\n#and returns the data in the file.\ndef openFile(s):\n file = open(s)\n data_file = file.read()\n file.close()\n return data_file\n#The following function receives the data from the stop word's file, splits them\n#and extends them.\ndef cleanStop(sw):\n csw = [ts for ts in sw.split()]\n esw = ['.', ',',';',':', '/','\"', '?', '!', '¡']\n csw.extend(esw)\n return csw\n# The following function utilizes BS to obtain the sections from the thesis. It\n# returns a dictionary (section-text) for the text of each section.\ndef obtainSections(s):\n print(\"Entrance to obtainSections function.\")\n soup = BeautifulSoup(s, 'html.parser')\n section = ['titulo', 'problema','objetivo', 'preguntas', 'hipotesis', 'justificacion', 'metodologia', 'resultados']\n sec_dictionary = {}\n titulo = soup.titulo\n problema = soup.problema\n objetivo = soup.objetivo\n preguntas = soup.preguntas\n hipotesis = soup.hipostesis\n justificacion = soup.justificacion\n metodologia = soup.metodologia\n resultados = soup.resultados\n sections = [titulo, problema, objetivo, preguntas, hipotesis, justificacion, metodologia, resultados]\n for secA, secB in zip(section, sections):\n if secB:\n sec_dictionary[secA] = secB.string\n else:\n sec_dictionary[secA] = None\n return sec_dictionary\n#The folowing function receives a dictionary (section-text). It returns a dictionary\n#with the structure section-paragraph-text.\ndef obtainParagraphs(sec_dictionary):\n paragraph_dictionary = {}\n for sec, text in sec_dictionary.items():\n paragraph_vector = {}\n counter = 1\n if text:\n print(\"Normal text: \", text)\n print(\"Split text: \", text.split('.\\n'))\n text_list = text.split('.\\n')\n for par in text_list:\n paragraph_vector[\"par\" + str(counter)] = par\n counter += 1\n paragraph_dictionary[sec] = paragraph_vector\n else:\n paragraph_dictionary[sec] = None\n return paragraph_dictionary\n\n #paragraph_dictionary[sec] =\n# The following function receives a dictionary (section-text) with the section\n# and its corresponding text. It returns a dictionary with the same structure\n# but it separates the text into a list of the sentences that comprises it.\ndef separateSentences(sections):\n sentence_dictionary = {}\n for sec, text in sections.items():\n print(\"Texto: \", text)\n if text:\n sentence_dictionary[sec] = sent_tokenize(text)\n else:\n sentence_dictionary[sec] = None\n return sentence_dictionary\n# The following function receives a dictionary (section-par-text) and returns\n# a dictionary with the structure: (section-par-sentence-text).\ndef obtainSentences(paragraph_dictionary):\n sentences_dictionary = {}\n for sec, paragraphs in paragraph_dictionary.items():\n sentences_vector = {}\n if paragraphs:\n for paragraph, text in paragraphs.items():\n sentence_vector = {}\n sentences_list = sent_tokenize(text)\n counter = 1\n for sen in sentences_list:\n sen = sen.replace('\\n', ' ')\n sentence_vector[\"sen\" + str(counter)] = sen\n counter += 1\n sentences_vector[paragraph] = sentence_vector\n sentences_dictionary[sec] = sentences_vector\n else:\n sentences_vector[sec] = None\n return sentences_dictionary\n\n# The following function utilizes POS tagger from Stanford to tag each one of\n# the sentences in each section. It returns a dictionary with the\n# Section-SentenceNumber-ListOfTaggs\ndef tag_sentences(sen_dict):\n etiquetador = StanfordPOSTagger(tagger, jar)\n tagg_dic = {}\n for sec, text in sen_dict.items():\n sentence_dic = {}\n if text:\n counter = 1\n for sen in text:\n sentence_dic[\"Sentence\" + str(counter)] = etiquetador.tag(sen.split())\n counter += 1\n tagg_dic[sec] = sentence_dic\n return tagg_dic\n\n# The following function utilizes Stanford POS tagger to tag each one of the\n# sentences. It receives a sentences_dictionary (sec-par-sen#-text) and returns\n# the same structure but with a leaf taggs added.\ndef tag_sentencesR(sen_dict):\n etiquetador = StanfordPOSTagger(tagger, jar)\n tagg_dic = {}\n for sec, paragraphs in sen_dict.items():\n tagg_par = {}\n if paragraphs:\n for par, sentences in paragraphs.items():\n tagg_sen = {}\n for sen, text in sentences.items():\n print(\"Text for tagging: \", text)\n tagg_list = etiquetador.tag(text.split())\n tagg_sen[sen] = tagg_list\n tagg_par[par] = tagg_sen\n tagg_dic[sec] = tagg_par\n else:\n tagg_dic[sec] = None\n return tagg_dic\n# The following function utilizes Spacy tagger to tag each one of the sentences\n# in each section. It receives a sentences_dictionary (sec-par-sen#-text) and\n# returns the same structure but with a leaf taggs added.\ndef tag_sentences_spacy(sen_dictionary):\n tagg_dict = {}\n for sec, paragraphs in sen_dictionary.items():\n tagg_par = {}\n if paragraphs:\n for par, sentences in paragraphs.items():\n tagg_sen = {}\n for sen, text in sentences.items():\n print(\"Text for tagging: \", text)\n tagg_list = get_list_of_tagged_tuples(text)\n tagg_sen[sen] = tagg_list\n tagg_par[par] = tagg_sen\n tagg_dict[sec] = tagg_par\n else:\n tagg_dict[sec] = None\n return tagg_dict\n# The following function receives a sentence and returns a list of tuples. The\n# tuples have the structure: (word, POS).\ndef get_list_of_tagged_tuples(text):\n tuples_list = []\n text_information = nlp(text)\n for token in text_information:\n tuples_list.append((token.text, token.pos_))\n return tuples_list\n# The following receives a tagger dictionary (sec-par-sen#-taggs) and creates\n# a dictionary sec-par-sen#-concepts. It also creates a set containing all the\n# concepts found in the document.\ndef get_concepts(tagg_dict):\n concept_dictionary = {}\n concepts = []\n for sec, paragraphs in tagg_dict.items():\n concept_par = {}\n if paragraphs:\n for par, sentences in paragraphs.items():\n concept_sen = {}\n for sen, tags in sentences.items():\n length = len(tags)\n concept_list = []\n if length > 1:\n for i in range(length-1):\n if(tags[i][1] == 'NOUN' and tags[i+1][1] == 'NOUN'):\n #print(\"Adding a NN: \", tags[i][0] + \" \" + tags[i+1][0])\n concept_list.append(tags[i][0] + \" \" + tags[i+1][0])\n concepts.append(tags[i][0] + \" \" + tags[i+1][0])\n if(tags[i][1] == 'NOUN' and tags[i+1][1] == 'A'):\n #print(\"Adding a NA: \", tags[i][0] + \" \" + tags[i+1][0])\n concept_list.append(tags[i][0] + \" \" + tags[i+1][0])\n concepts.append(tags[i][0] + \" \" + tags[i+1][0])\n if length>2:\n for i in range(length-2):\n if(tags[i][1] == 'NOUN' and tags[i+1][1] == 'ADP' and tags[i+2][1] == 'NOUN'):\n concept_list.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0])\n concepts.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0])\n if length>3:\n for i in range(length - 3):\n if(tags[i][1] == 'NOUN' and tags[i+1][1] == 'ADP' and tags[i+2][1] == 'NOUN' and tags[i+3][1] == 'ADJ'):\n concept_list.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0] + \" \" + tags[i+3][0])\n concepts.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0] + \" \" + tags[i+3][0])\n if length>4:\n for i in range(length - 4):\n if(tags[i][1] == 'NOUN' and tags[i+1][1] == 'ADJ' and tags[i+2][1] == 'ADJ' and tags[i+3][1] == 'ADP' and tags[i+4][1] == 'NOUN'):\n concept_list.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0] + \" \" + tags[i+3][0] + \" \" + tags[i+4][0])\n concepts.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0] + \" \" + tags[i+3][0] + \" \" + tags[i+4][0])\n if length > 5:\n for i in range(length -5):\n if(tags[i][1] == 'NOUN' and tags[i+1][1] == 'ADJ' and tags[i+2][1] == 'ADJ' and tags[i+3][1] == 'ADJ' and tags[i+4][1] == 'ADP' and tags[i+5][1] == 'NOUN'):\n concept_list.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0] + \" \" + tags[i+3][0] + \" \" + tags[i+4][0] + \" \" + tags[i+5][0])\n concepts.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0] + \" \" + tags[i+3][0] + \" \" + tags[i+4][0])\n if(tags[i][1] == 'NOUN' and tags[i+1][1] == 'ADJ' and tags[i+2][1] == 'ADP' and tags[i+3][1] == 'NOUN' and tags[i+4][1] == 'ADJ' and tags[i+5][1] == 'ADJ'):\n concept_list.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0] + \" \" + tags[i+3][0] + \" \" + tags[i+4][0] + \" \" + tags[i+5][0])\n concepts.append(tags[i][0] + \" \" + tags[i+1][0] + \" \" + tags[i+2][0] + \" \" + tags[i+3][0] + \" \" + tags[i+4][0] + \" \" + tags[i+5][0])\n if concept_list:\n concept_sen[sen] = concept_list\n else:\n concept_sen[sen] = None\n concept_par[par] = concept_sen\n concept_dictionary[sec] = concept_par\n else:\n concept_dictionary[sec] = None\n\n return concept_dictionary, concepts\n# The following function receives a dictionary with the form: sec-par-sen and\n# creates a dictionary of the for sec-par-sen-concepts.\ndef get_concepts_by_matching(s_dictionary):\n concept_dictionary = {}\n concepts = []\n pattern1 = [{\"POS\": \"NOUN\"}, {\"POS\":\"NOUN\"}]\n pattern2 = [{\"POS\": \"NOUN\"}, {\"POS\": \"ADJ\"}]\n pattern3 = [{'POS': 'NOUN'},\n {'POS': 'ADJ', 'OP': '?'},\n {'POS': 'ADP'},\n {'POS': 'NOUN'},\n {'POS': 'ADJ', 'OP': '*'}]\n pattern4 = [{'POS': 'NOUN'},\n {'POS': 'ADJ', 'OP': '?'},\n {'POS': 'ADP'},\n {'POS': 'NOUN'},\n {'POS': 'ADJ', 'OP': '*'}]\n # pattern5 = [{'POS': 'NOUN'},\n # {'POS': 'ADJ', 'OP': '?'},\n # {'POS': 'ADP'},\n # {'POS': 'VERB'},\n # {'POS': 'NOUN', 'OP': {'POS': 'ADJ', 'OP': '*'}}]\n\n matcher.add(\"Concept1\", None, pattern1)\n matcher.add(\"Concept2\", None, pattern2)\n matcher.add(\"Concept3\", None, pattern3)\n matcher.add(\"Concept4\", None, pattern4)\n for sec, paragraphs in s_dictionary.items():\n concept_par = {}\n if paragraphs:\n for paragraph, sentences in paragraphs.items():\n concept_sen = {}\n for sentence, text in sentences.items():\n text = nlp(text)\n matches = matcher(text)\n concepts_list = []\n for match_id, start, end in matches:\n span = text[start:end]\n concepts_list.append(span.text)\n concepts.append(span.text)\n if concepts_list:\n concept_sen[sentence] = concepts_list\n else:\n concept_sen[sentence] = None\n concept_par[paragraph] = concept_sen\n concept_dictionary[sec] = concept_par\n else:\n concept_dictionary[sec] = None\n return concept_dictionary, concepts\n# The following function receives the ordered set of concepts in the document\n# and returns an ordered set of co-occurrences\ndef get_occurrences(concepts):\n list_of_cooccurrences = []\n for i in range(len(concepts)):\n for j in range(len(concepts)):\n if ((concepts[i] == concepts[j]) and (i!=j) and (concepts[i] not in list_of_cooccurrences)):\n list_of_cooccurrences.append(concepts[i])\n return list_of_cooccurrences\n# The following function receives the ordered set of concepts in the document\n# and returns a dictionary with the structure: paragraph-occurrences\ndef get_occurrences_per_paragraph(concepts_dictionary):\n paragraphs_occurrences_dictionary = {}\n counter = 1\n for section, paragraphs in concepts_dictionary.items():\n for paragraph, sentences in paragraphs.items():\n paragraph_concepts_list = []\n for sentence, concepts in sentences.items():\n if concepts:\n for con in concepts:\n paragraph_concepts_list.append(con)\n # else:\n # paragraph_concepts_list.append(None)\n paragraphs_occurrences_dictionary[\"Par\" + str(counter)] = paragraph_concepts_list\n counter += 1\n return paragraphs_occurrences_dictionary\n# The following function receives the dictionary with the structure:\n# paragraph-occurrences, and returns a dictionary with the form: paragraph-occurrences-weight\ndef get_paragraph_occurrences_weigth(paragraphs_occurrences):\n paragraphs_occurrences_weigth_dictionary = {}\n for paragraph, concept_list in paragraphs_occurrences.items():\n occurrence_weight_dictionary = {}\n for occurrence in concept_list:\n occurrence_weight_dictionary[occurrence] = occurrence_repetitions(occurrence, concept_list)\n paragraphs_occurrences_weigth_dictionary[paragraph] = occurrence_weight_dictionary\n return paragraphs_occurrences_weigth_dictionary\n# The following function receives and occurrence and returns the number of times\n# that occurrence appears on a list.\ndef occurrence_repetitions(occ, con_list):\n counter = 0\n for con in con_list:\n if occ == con:\n counter += 1\n return counter\n\n# The following function receives a concept and returns a list of dictionaries\n# with the structure: paragraph-weight.\ndef get_paragraphs_concepts_appearance(concept, paragraphs_occurrences_weighted):\n paragraphs_concepts_weight = {}\n for paragraph, occurences_weighted in paragraphs_occurrences_weighted.items():\n if concept in occurences_weighted.keys():\n paragraphs_concepts_weight[paragraph] = occurences_weighted[concept]\n return paragraphs_concepts_weight\n# The follwing function receives a dictionary with the structure:\n# paragraphs-weights and returns the paragraph where the weight is maximum\n# if the paragraphs have a tie, then the first paragraph they appear in will\n# be delimited as the best paragraph.\ndef get_best_paragraph(paragraphs_concept_appearance):\n ordered_list = []\n weight = 0\n for paragraph, paragraph_weight in paragraphs_concept_appearance.items():\n if paragraph_weight > weight:\n ordered_list.insert(0,paragraph)\n else:\n ordered_list.append(paragraph)\n return ordered_list\n\n\n# The following function receives the dictionary with the structure:\n# paragraph-occurrences-weigths , and returns a dictionary with the form:\n# concept-key_paragraph.\ndef get_key_paragraph(concepts,paragraphs_occurrences_weighted):\n concept_KeyParagraph_dictionary = {}\n for concept in concepts:\n #Choose the paragraphs the concept appears in\n paragraphs_concepts_appearance = get_paragraphs_concepts_appearance(concept, paragraphs_occurrences_weighted)\n #Choose the paragraph where the concept is repeated the most\n best_paragraph = get_best_paragraph(paragraphs_concepts_appearance)\n concept_KeyParagraph_dictionary[concept] = best_paragraph\n return concept_KeyParagraph_dictionary\n# The following function receives the set of concepts and a dictionary with the\n# structure: paragraphs_occurrences.\ndef get_related_concepts(concepts, paragraphs_occurrences):\n related_concepts_dictionary = {}\n for concept in concepts:\n related_concept_list = []\n for related_concept in concepts:\n if concept != related_concept:\n counter = 0\n for paragrah, occurrences in paragraphs_occurrences.items():\n if concept in occurrences and related_concept in occurrences:\n counter += 1\n if counter > 1:\n related_concept_list.append(related_concept)\n related_concepts_dictionary[concept] = related_concept_list\n return related_concepts_dictionary\n# The following function receives the set of concepts and a dictionary with the\n# structure: paragraphs_occurrences.\ndef get_document_concept_graph(concepts, paragraphs_occurrences):\n related_concepts_dictionary = {}\n for concept in concepts:\n related_concept_dictionary = {}\n for related_concept in concepts:\n if concept != related_concept:\n counter = 0\n for paragrah, occurrences in paragraphs_occurrences.items():\n if concept in occurrences and related_concept in occurrences:\n counter += 1\n if counter > 1:\n related_concept_dictionary[related_concept] = counter\n related_concepts_dictionary[concept] = related_concept_dictionary\n return related_concepts_dictionary\n#############################################\n# Functions for sequentiality measure #\n#############################################\n# The following function obtain the\n# The following function obtains the significance of a concept in a respective\n# paragraph\ndef get_significance(concept, paragraph, related_concepts_dictionary, paragraphs_concepts_weighted):\n frequency = get_frequency(concept, paragraph, paragraphs_concepts_weighted)\n #print(\"Frequency: \", frequency)\n related_concepts = get_number_of_related_concepts(concept, related_concepts_dictionary)\n #print(\"Number of related concepts: \", related_concepts)\n scalar_significance = frequency*related_concepts\n #print(\"Significance: \", scalar_significance)\n return scalar_significance\n# The following function receives a concept and a paragraph and returns the\n# frequency of the concept in the respective paragraph.\ndef get_frequency(concept, paragraph, paragraphs_occurrences_weighted):\n #print(\"Get frequency: \", paragraphs_occurrences_weighted[paragraph])\n if paragraphs_occurrences_weighted[paragraph]:\n if concept in paragraphs_occurrences_weighted[paragraph].keys():\n return paragraphs_occurrences_weighted[paragraph][concept]\n else:\n return 0\n else:\n return 0\n\n\n# The following function receives a concept and the related concepts dictionary\n# and returns the number of related concepts of the respective concept.\ndef get_number_of_related_concepts(concept, related_concepts_dictionary):\n if related_concepts_dictionary[concept]:\n return len(related_concepts_dictionary[concept])\n else:\n return 0\n# The following function receives the concept, the set of complete concepts,\n# the related concepts per concept dictionary and returns the key paragraph\n# of that concept.\ndef get_key_paragraph_for_significance(concept,\n set_of_concepts,\n related_concepts_dictionary,\n paragraphs_concepts_weighted):\n paragraphs_significance_dictionary = get_paragraphs_significance_dictionary(concept,\n paragraphs_occurrences,\n related_concepts_dictionary,\n paragraphs_concepts_weighted\n )\n best_paragraph = get_best_paragraph_significance(paragraphs_significance_dictionary)\n return best_paragraph\n# The following function receives the a concept, a dictionary containing the\n# paragraphs with their respective recurrences and another dictionary with the\n# structure: related-concepts-dictionary. It returns a dictionary with the\n# structure: paragraph-significance.\ndef get_paragraphs_significance_dictionary(concept, paragraphs_occurrences, related_concepts_dictionary, paragraphs_concepts_weighted):\n paragraphs_significance_dictionary = {}\n for paragraph in paragraphs_occurrences:\n paragraphs_significance_dictionary[paragraph] = get_significance(concept, paragraph, related_concepts_dictionary, paragraphs_concepts_weighted)\n return paragraphs_significance_dictionary\n# The following function receives a dictionary with the structure:\n# paragraphs-significance, and returns the first paragraph with the best\n# significance.\ndef get_best_paragraph_significance(paragraphs_significance_dictionary):\n return max(paragraphs_significance_dictionary.items(), key = operator.itemgetter(1))[0]\n# The following function receives a concept, the key paragraph, the related\n# concepts dictionary and the paragraphs, it returns the comprehension burden\n# for the concept.\ndef get_comprehension_burden_per_concept(concept, key_paragraph, related_concepts_dictionary, paragraph_recurrences, paragraphs_occurrences_weighted):\n sum_concept = 0\n for paragraph in paragraph_recurrences.keys():\n if paragraph != key_paragraph:\n sum_concept = sum_concept + get_significance(concept, key_paragraph, related_concepts_dictionary, paragraphs_occurrences_weighted)\n else:\n break\n return sum_concept\n# The following function returns the comprehension burden for all the concepts\n# in the document.\ndef get_document_comprehension_burden(set_of_concepts, related_concepts_dictionary, paragraphs_occurrences, paragraphs_occurrences_weighted):\n comprehension_burden = 0\n for concept in set_of_concepts:\n key_paragraph = get_key_paragraph_for_significance(concept,\n set_of_concepts,\n related_concepts_dictionary,\n paragraphs_occurrences_weighted)\n comprehension_burden = comprehension_burden + get_comprehension_burden_per_concept(concept, key_paragraph, related_concepts_dictionary, paragraphs_occurrences, paragraphs_occurrences_weighted)\n return comprehension_burden\n\n###########################################################\n# Connectivity Measure #\n###########################################################\n# The following function receives a concept node and returns the weight over all\n# of its edges connecting to the concept node. Remember that alpha is a constant\n# greater than one.\ndef get_connectivity(concept_node, document_concept_graph):\n sum = 0\n alpha = 1\n for value in document_concept_graph[concept_node].values():\n sum = sum + value\n sum = (sum)**alpha\n return sum\n# The following function receives the document concept graph and returns the\n# connectivity measure for the entire document.\ndef get_document_connectivity(document_concept_graph):\n document_connectivity = 0\n for concept_node in document_concept_graph.keys():\n document_connectivity = document_connectivity + get_connectivity(concept_node, document_concept_graph)\n if len(document_concept_graph.keys()) != 0:\n document_connectivity = document_connectivity/len(document_concept_graph.keys())\n return document_connectivity\n##########################################################\n# Dispersion Measure #\n##########################################################\n# The following function receives a concept and a set of concepts (with repetitions) and\n# returns a dictionary with the concept (no repetitions) and probability.\ndef get_concept_probability(concept, set_of_concepts):\n return set_of_concepts.count(concept)/len(set_of_concepts)\n# The following funciton receives a paragraph, the set of concepts, and the\n# dictionary with the structure: paragraphs_concepts_weighted, it returns the\n# entropy for the paragraph.\ndef get_paragraph_entropy(paragraph, set_of_concepts, paragraphs_occurrences_weighted):\n entropy = 0\n for concept in paragraphs_occurrences_weighted[paragraph].keys():\n entropy = entropy + get_concept_probability(concept, set_of_concepts)*np.log2(get_concept_probability(concept, set_of_concepts))\n return -entropy\n# The following function receives the set_of_concepts and the paragraphs_occurrences_weighted\n# returns the dispersion of the whole document.\ndef get_dispersion(set_of_concepts, paragraphs_occurrences_weighted):\n dispersion = 0\n for paragraph in paragraphs_occurrences_weighted.keys():\n dispersion = dispersion + get_paragraph_entropy(paragraph, set_of_concepts, paragraphs_occurrences_weighted)\n dispersion = dispersion/len(paragraphs_occurrences_weighted.keys())\n return dispersion\n############################################################\n# Results #\n############################################################\n# The following function receives a numpy array and returns the average and the\n# standard deviation for each of its columns.\ndef get_average_and_sd(results_array):\n # column mean\n col_mean = np.nanmean(results_array, axis = 0)\n # Find indices where nan value is present\n inds = np.where(np.isnan(results_array))\n # Replace inds with avg of column\n results_array[inds] = np.take(col_mean, inds[1])\n average = np.average(results_array, axis = 0)\n # With the average we can get the sd for each column\n sd = np.std(results_array, axis = 0)\n return average, sd\ndef get_avg_and_sd_handmade(results_array):\n avg = []\n std = []\n cb = []\n con = []\n dis = []\n for line in results_array:\n cb.append(line[0])\n con.append(line[1])\n dis.append(line[2])\n print(cb)\n print(con)\n print(dis)\n avg.append(average_handmade(cb))\n avg.append(average_handmade(con))\n avg.append(average_handmade(dis))\n print(avg)\n std.append(std_handame(cb, avg[0]))\n std.append(std_handame(con, avg[1]))\n std.append(std_handame(dis, avg[2]))\n print(std)\n #get_interesting_thesis(cb)\n\ndef average_handmade(l):\n sum = 0\n for element in l:\n sum = sum + element\n return sum/len(l)\ndef std_handame(l, average):\n sum = 0\n for element in l:\n sum = sum + (element-average)**2\n return np.sqrt((sum)/(len(l)-1))\ndef get_interesting_thesis(v):\n counter = 0\n for e in v:\n if e == 587707:\n print(\"The thesis of interest is: \", counter)\n counter += 1\n# The following function receives the set of concepts and returns a graph\n# dictionary with the following structure: concepts-edges-weights.\n# def create_document_graph(concepts, concepts_dictionary):\n# # Create the graph\n# document_concept_graph = {}\n# for section, paragraphs in concepts_dictionary.items():\n# for paragraph, sentences in paragraphs.items():\n# for sentence, concepts in sentences.items():\n\n\n\n\n\n\n\n\n#Main\n#Obtain the sections of the thesis\nhtml_doc = openFile(\"MaestriaCompleto.xml\")\n#html_doc_original = openFile(\"PruebaTesis.xml\")\n# print(\"HTML file: \")\nsoup = BeautifulSoup(html_doc, 'html.parser')\n# print(soup.prettify())\n# print(\"Title: \", soup.titulo.string)\n# print(\"Problem: \", soup.problema)\n# print(\"Objective: \", soup.objetivo.string)\n# print(\"Prguntas: \", soup.preguntas)\n# print(\"Hypothesis: \", soup.Hipotesis)\n# print(\"Justification: \", soup.justificacion)\nprint(\"Print all thesis: \")\ntheses = soup.find_all('tesis')\nprint(\"Primera tesis: \")\nprint(theses[1])\n# Obtain the first five thesis\ntheses_ten = []\nfor i in range(0, len(theses)):\n print(\"----------------------------------------------\")\n print(\"Thesis: \", i)\n print(theses[i])\n theses_ten.append(theses[i])\n# Obtain sections\nprint(\"---------------------------------------------------\")\nprint(\"theses_ten[0]: \")\nprint(theses_ten[0])\nsections = obtainSections(str(theses_ten[0]))\nprint(\"Sections: \", sections)\n# Check for the five selected theses\noutput_file = open(\"resultados_maestria.txt\", \"w+\")\ncounter = 1\noutput_string = \"\"\nresults = []\nfor thesis in theses:\n print(\"Working on thesis number: \", counter)\n print(\"================================\")\n # Obtain the sections\n sections = obtainSections(str(thesis))\n print(\"Sections\")\n print(sections)\n print(\"================================\")\n # Obtain the paragraphs per section\n paragraphs = obtainParagraphs(sections)\n print(\"Paragraphs: \")\n print(paragraphs)\n print(\"================================\")\n # Obtain the sentences for each paragraph\n sentences = obtainSentences(paragraphs)\n print(\"Sentences: \")\n print(sentences)\n print(\"================================\")\n # Tag the words for each sentence in the document\n taggs = tag_sentences_spacy(sentences)\n print(\"Taggs: \")\n print(taggs)\n print(\"================================\")\n # Obtain the concepts for the complete document\n concepts_dictionary, set_of_concepts = get_concepts_by_matching(sentences)\n print(\"Concepts dictionary: \")\n print(concepts_dictionary)\n print(\"================================\")\n print(\"Concepts found: \")\n print(set_of_concepts)\n print(\"================================\")\n # Obtain the occurrences in the document\n paragraphs_occurrences = get_occurrences_per_paragraph(concepts_dictionary)\n print(\"Paragraphs occurences dictionary: \")\n print(paragraphs_occurrences)\n print(\"================================\")\n # Obtain the occurrences with their respective weight per paragraph\n paragraphs_occurrences_weighted = get_paragraph_occurrences_weigth(paragraphs_occurrences)\n print(\"Paragraphs Occurrences Weighted\")\n print(paragraphs_occurrences_weighted)\n print(\"================================\")\n # Obtain the key paragraph for each concept\n occurrences_key_paragraph= get_key_paragraph(set_of_concepts, paragraphs_occurrences_weighted)\n print(\"Occurrences and key paragraph\")\n print(occurrences_key_paragraph)\n print(\"==============================\")\n # Obtain the related concepts (Two concepts are related if they co-occur in\n # more than one paragraph.)\n related_concepts_dictionary = get_related_concepts(set_of_concepts, paragraphs_occurrences)\n print(\"Related concepts dictionary\")\n print(related_concepts_dictionary)\n print(\"===============================\")\n # Obtain the Concept Graph for the whole document G = (V, E, W), where V represents\n # the concept nodes, E represents the edges between the concepts, and W represents\n # the edge weights.\n document_concept_graph = get_document_concept_graph(set_of_concepts, paragraphs_occurrences)\n print(\"Document Concept Graph: \")\n print(document_concept_graph)\n print(\"===============================\")\n # Check the function for significance\n # checking_significance = get_significance(\"cuestión de almacenamiento\", \"Par4\", related_concepts_dictionary, paragraphs_occurrences_weighted)\n # print(\"Checking significacne: \", checking_significance)\n # Check paragraphs significance dictionary\n # paragraphs_significance_dictionary = get_paragraphs_significance_dictionary(\"cuestión de almacenamiento\", paragraphs_occurrences, related_concepts_dictionary, paragraphs_occurrences_weighted)\n # print(\"Paragraphs significance dictionary for cuestión de almacenamiento: \")\n # print(paragraphs_significance_dictionary)\n # # Get the key paragraph for \"cuestión de almacenamiento\"\n # key_paragraph_for_next = get_key_paragraph_for_significance(\"cuestión de almacenamiento\",\n # set_of_concepts,\n # related_concepts_dictionary,\n # paragraphs_occurrences_weighted)\n # print(\"Key paragraph for cuestión de almacenamiento: \", key_paragraph_for_next)\n # Check the function for obtaining burden for a concept in specific\n # checking_cb = get_comprehension_burden_per_concept(\"cuestión de almacenamiento\", key_paragraph_for_next, related_concepts_dictionary, paragraphs_occurrences, paragraphs_occurrences_weighted)\n # print(\"Comprehension burden for cuestión de almacenamiento: \", checking_cb)\n print(\"################################################\")\n print(\"Results for thesis number: \", counter)\n # Get the comprehension burden for the whole document\n print(\"################################################\")\n document_cb = get_document_comprehension_burden(set_of_concepts, related_concepts_dictionary, paragraphs_occurrences, paragraphs_occurrences_weighted)\n print(\"Comprehension Burden for the complete document: \", document_cb)\n # Connectivity\n # Obtain the connectivity for the concept_node of \"cuestión de almacenamiento\".\n # check_connectivity = get_connectivity(\"cuestión de almacenamiento\", document_concept_graph)\n # print(\"Connectivity for cuestión de almacenamiento: \", check_connectivity)\n # Obtain the connectivity for the entire document\n print(\"################################################\")\n document_connectivity = get_document_connectivity(document_concept_graph)\n print(\"Connectivity for the complete document: \", document_connectivity)\n # Obtain the probability of \"cuestión de almacenamiento\"\n # check_probability = get_concept_probability(\"cuestión de almacenamiento\", set_of_concepts)\n # print(\"Probability for cuestión de almacenamiento: \", check_probability)\n # # Obtain the entropy for \"Par4\"\n # check_entropy = get_paragraph_entropy(\"Par4\", set_of_concepts, paragraphs_occurrences_weighted)\n # print(\"Entropy for Par4: \", check_entropy)\n # Obtain the dispersion for the entire document\n print(\"################################################\")\n dispersion = get_dispersion(set_of_concepts, paragraphs_occurrences_weighted)\n print(\"Dispersion of the complete document: \", dispersion)\n print(\"=================================================\")\n output_string = output_string + \"Thesis number: \" + str(counter) + \" CB: \" + str(document_cb) + \" Connectivity: \" + str(document_connectivity) + \" Dispersion: \" + str(dispersion) + \"\\n\"\n counter += 1\n vector = [document_cb, document_connectivity, dispersion]\n results.append(vector)\n\n# Results\nprint(\"Results before numpy array: \")\nfor v in results:\n print(v)\nresults = np.array(results)\navg, sd = get_average_and_sd(results)\nget_avg_and_sd_handmade(results)\nprint(\"The average results: \")\nprint(avg)\nprint(\"The standard deviation results: \")\nprint(sd)\noutput_string = output_string + \"Average results: \" + \"cb: \" + str(avg[0]) + \" connectivity: \" + str(avg[1]) + \" dispersion: \" + str(avg[2]) + \"\\n\"\noutput_string = output_string + \"Standard Deviation: \" + \"cb: \" + str(sd[0]) + \" connectivity: \" + str(sd[1]) + \" dispersion: \" + str(sd[2]) + \"\\n\"\noutput_file.write(output_string)\noutput_file.close()\n","sub_path":"Maestria/ComprensibilidadMaestria1.0.py","file_name":"ComprensibilidadMaestria1.0.py","file_ext":"py","file_size_in_byte":37334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"246946489","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom datetime import datetime\nfrom datetime import timedelta\n\n#@frappe.whitelist(allow_guest=True)\ndef calculate_gross_salary(self,method):\n if self.ind_company_accommodation==\"Yes\":\n self.ind_food_allowance=300\n else:\n self.ind_food_allowance=0\n\n if self.ind_housing_allowance_provided==\"Yes\":\n self.ind_hra=self.base*0.25\n else:\n self.ind_hra=0\n\n if self.ind_transportation_allowance_provided==\"Yes\":\n if self.base<15001:\n self.ind_transport_allowance=self.base*0.10\n else:\n self.ind_transport_allowance=1500\n else:\n self.ind_transport_allowance=0\n \n self.ind_gross_salary=self.base+self.ind_food_allowance+self.ind_hra+self.ind_transport_allowance+self.ind_mobile_allowance+self.ind_hardship_allowance\n# frappe.throw(_(\"Test\"))\n","sub_path":"indipco/hooks_call/salary_structure_assignment.py","file_name":"salary_structure_assignment.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"172749018","text":"\nimport numpy as np\nimport os\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nimport time\nimport datetime\nimport logging\nimport sys\nfrom flask import Flask\nlogging.basicConfig(stream= sys.stdout, level=logging.INFO)\n\n\n\n# while(True):\n \ntry:\n\n logging.info(\"Doing Analytics\")\n\n data = pd.read_csv('app/events.txt', delimiter = '\\n', header = None)\n\n data.rename(columns = {0:'Y'}, inplace = True) \n X_train = np.arange(1,len(data)+1).reshape(-1, 1)\n Y_train = data['Y'].values.reshape(-1, 1)\n\n regressor = LinearRegression() \n #training the algorithm\n regressor.fit(X_train, Y_train) \n\n X_test = [[len(data)+1]]\n y_pred = regressor.predict(X_test)\n ts = datetime.datetime.now().timestamp()\n os.chdir(\"../app\")\n f = open(\"results.txt\",\"a+\")\n f.write(str(ts))\n f.write(\" \")\n f.write(str(y_pred))\n f.write(\"\\n\")\n f.close() \n\n logging.info(y_pred)\n\nexcept: \n \n logging.info(\"No data recieved yet\")\n\n","sub_path":"app/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"356422326","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nno_load_speed = 122.2\r\nno_load_current = 0.12\r\nstall_torque = 2.7\r\ntorque_constant = 0.163\r\nV_in = 24\r\n\r\nslope = no_load_speed/stall_torque\r\nstall_current = no_load_current + (stall_torque/torque_constant)\r\n\r\nload = np.zeros(101)\r\nmotor_current = np.zeros(101)\r\nmotor_speed = np.zeros(101)\r\npower_output = np.zeros(101)\r\nefficiency = np.zeros(101)\r\n\r\nfor i in range(101):\r\n load[i] = stall_torque*i/100\r\n\r\n motor_current[i] = no_load_current + (load[i]/torque_constant)\r\n\r\n power_input = V_in * motor_current[i]\r\n\r\n motor_speed[i] = no_load_speed - slope*load[i]\r\n\r\n power_output[i] = load[i]*motor_speed[i]\r\n\r\n efficiency[i] = power_output[i]/power_input\r\n\r\nfig1 = plt.gcf()\r\nplt.plot(load, motor_current, color=\"#0095B6\", label='Current')\r\nplt.xlabel(r'Motor Load Torque $\\tau [N\\cdot m]$', fontsize=22)\r\nplt.ylabel(r'Motor Current $[A]$', fontsize=22)\r\nplt.xticks(np.arange(0,stall_torque+0.1, 0.1))\r\nplt.yticks(np.arange(0, max(motor_current)+0.5, 0.5))\r\nplt.title('Current as a function of Motor Torque', fontsize=22)\r\nmng = plt.get_current_fig_manager()\r\nmng.window.showMaximized()\r\nplt.grid()\r\nplt.show()\r\nfig1.savefig('current.png')\r\n\r\nfig2 = plt.gcf()\r\nplt.plot(load, motor_speed, color=\"#F47789\", label='Speed')\r\nplt.xlabel(r'Motor Load Torque $\\tau [N\\cdot m]$', fontsize=22)\r\nplt.ylabel(r'Motor Speed $[rad/sec]$', fontsize=22)\r\nplt.xticks(np.arange(0,stall_torque+0.1, 0.1))\r\nplt.yticks(np.arange(0, motor_speed[0]+10, 5))\r\nplt.title('Speed as a function of Motor Torque', fontsize=22)\r\nmng = plt.get_current_fig_manager()\r\nmng.window.showMaximized()\r\nplt.grid()\r\nplt.show()\r\nfig2.savefig('speed.png')\r\n\r\nfig3 = plt.gcf()\r\nplt.plot(load, efficiency, color=\"#ED4F46\", label='Efficiency')\r\nplt.xlabel(r'Motor Load Torque $\\tau [N\\cdot m]$', fontsize=22)\r\nplt.ylabel(r'Efficiency', fontsize=22)\r\nplt.xticks(np.arange(0,stall_torque+0.1, 0.1))\r\nplt.yticks(np.arange(0, max(efficiency)+0.1, 0.05))\r\nplt.title('Efficiency as a function of Motor Torque', fontsize=22)\r\nmng = plt.get_current_fig_manager()\r\nmng.window.showMaximized()\r\nplt.grid()\r\nplt.show()\r\nfig3.savefig('efficiency.png')\r\n\r\nfig4 = plt.gcf()\r\nplt.plot(load, power_output, color=\"#F38D29\", label='Power')\r\nplt.xlabel(r'Motor Load Torque $\\tau [N\\cdot m]$', fontsize=22)\r\nplt.ylabel(r'Output Power $[W]$', fontsize=22)\r\nplt.xticks(np.arange(0,stall_torque+0.1, 0.1))\r\nplt.yticks(np.arange(0, max(power_output)+10, 5))\r\nplt.title('Output Power as a function of Motor Torque', fontsize=22)\r\nmng = plt.get_current_fig_manager()\r\nmng.window.showMaximized()\r\nplt.grid()\r\nplt.show()\r\nfig4.savefig('power.png')\r\n","sub_path":"Assignment1/torque_speed_curves.py","file_name":"torque_speed_curves.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"538283728","text":"from CRABClient.UserUtilities import config # getUsernameFromSiteDB\nconfig = config()\n\nconfig.General.requestName = 'MVA_DY2018'\nconfig.General.workArea = 'crab_projects'\nconfig.General.transferOutputs = True\nconfig.General.transferLogs = True\nconfig.JobType.allowUndistributedCMSSW = True\n\n\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = '/home/t3-ku/janguian/CMSSW_10_6_11_patch1/src/KUSoftMVA/MuonAnalysis/test/miniNtuple_cfg.py'\n\n#config.Data.inputDataset = '/DYJetsToLL_M-50_HT-70to100_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM'\nconfig.Data.inputDataset = '/DYJetsToLL_M-50_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_new_pmx_94X_mc2017_realistic_v14-v1/MINIAODSIM'\n#/JpsiToMuMu_JpsiPt8_TuneCP5_13TeV-pythia8/RunIIFall17MiniAODv2-PU2017RECOSIMstep_12Apr2018_94X_mc2017_realistic_v14-v1/MINIAODSIM\nconfig.Data.useParent = True\nconfig.Data.inputDBS = 'global'\nconfig.Data.splitting = 'FileBased'\n#config.Data.splitting = 'Automatic'\nconfig.Data.unitsPerJob = 3\nconfig.Data.outLFNDirBase = '/store/user/jsingera/MVA/'\nconfig.Data.publication = True\nconfig.Data.outputDatasetTag = 'MVA_DY2018'\n\nconfig.Site.storageSite = 'T2_US_Nebraska'\n","sub_path":"MuonAnalysis/test/crab/dy2018mc_crab.py","file_name":"dy2018mc_crab.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"556160957","text":"\"\"\"\"connection_forwarder.py: SF connection object.\"\"\"\n\nimport logging\nimport socket\nimport threading\nfrom codecs import encode\n\nfrom six import BytesIO\n\nfrom moteconnection.connection_events import ConnectionEvents\nfrom moteconnection.utils import split_in_two\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n\n__author__ = \"Raido Pahtma\"\n__license__ = \"MIT\"\n\n\nclass SfConnection(threading.Thread):\n\n PROTOCOL_VERSION = b\"U \"\n\n def __init__(self, event_queue, host_and_port):\n super(SfConnection, self).__init__()\n self._queue = event_queue\n\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n host, port = split_in_two(host_and_port, \":\")\n if len(port) > 0:\n port = int(port)\n else:\n port = 9002\n\n self._server_address = (host, port)\n\n self._alive = threading.Event()\n self._alive.set()\n\n self._connected = threading.Event()\n self._connected.clear()\n\n self._recv_length = 0\n self._recv_buf = None\n\n self.start()\n\n def send(self, packet):\n data = packet.serialize()\n acked = False\n if self._connected.isSet():\n try:\n self._socket.sendall(chr(len(data)).encode())\n self._socket.sendall(data)\n acked = True\n log.debug(\"snt %s\", encode(data, \"hex\"))\n except socket.error:\n self._disconnected()\n else:\n log.debug(\"drop %s\", encode(data, \"hex\"))\n\n if packet.callback:\n packet.callback(packet, acked)\n\n def join(self, timeout=None):\n self._alive.clear()\n self._socket.close()\n threading.Thread.join(self, timeout)\n\n def _disconnected(self):\n log.debug(\"disconnected\")\n self._connected.clear()\n self._queue.put((ConnectionEvents.EVENT_DISCONNECTED, None))\n self._socket.close()\n\n def _connect(self):\n self._socket.connect(self._server_address)\n\n log.debug(\"socket connected\")\n\n self._socket.sendall(self.PROTOCOL_VERSION)\n log.debug(\"handshake sent\")\n\n buf = BytesIO()\n while len(buf.getvalue()) < 2:\n data = self._socket.recv(2 - len(buf.getvalue()))\n if data:\n buf.write(data)\n else:\n raise socket.error(\"no data received\")\n\n if buf.getvalue() == b\"U \":\n log.debug(\"handshake success\")\n self._connected.set()\n self._queue.put((ConnectionEvents.EVENT_CONNECTED, None))\n else:\n raise socket.error(\"handshake mismatch {!s} != {!s}\".format(self.PROTOCOL_VERSION, buf.getvalue()))\n\n def _receive(self):\n try:\n if self._recv_length == 0:\n length = self._socket.recv(1)\n if length:\n self._recv_length = ord(length)\n self._recv_buf = BytesIO()\n else:\n raise socket.error(\"no data received\")\n else:\n data = self._socket.recv(self._recv_length - len(self._recv_buf.getvalue()))\n if data:\n self._recv_buf.write(data)\n if self._recv_length == len(self._recv_buf.getvalue()):\n log.debug(\"rcv %s\", encode(self._recv_buf.getvalue(), \"hex\"))\n self._recv_length = 0\n return self._recv_buf.getvalue()\n else:\n raise socket.error(\"no data received\")\n\n except socket.timeout:\n pass # timeouts are normal\n\n return None\n\n def run(self):\n try:\n self._connect()\n self._socket.settimeout(0.1)\n while self._alive.isSet():\n data = self._receive()\n if data is not None:\n self._queue.put((ConnectionEvents.MESSAGE_INCOMING, data))\n except socket.error as e:\n log.error(\"socket.error: %s\", e.args)\n finally:\n self._disconnected()\n","sub_path":"moteconnection/connection_forwarder.py","file_name":"connection_forwarder.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"89460340","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#tutorial: https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html\n\n#create a series by passing a list of values, using a default integer index\ns = pd.Series([1, 3, 5, np.nan, 6, 8])\n\n#use dates to index a data frame that has been created with a numpy array\ndates = pd.date_range('20130101', periods=6)\ndf = pd.DataFrame(np.random.randn(6,4), index=dates, columns = list('ABCD'))\n\n#creating a data frame by passing a dict of objects\n#seems like if you give it a scalar, it knows to repeat that over and over\ndf2 = pd.DataFrame({'A': 1.,\n 'B': pd.Timestamp('20130102'),\n 'C': pd.Series(1, index=list(range(4)), dtype='float32'),\n 'D': np.array([3] * 4, dtype='int32'),\n 'E': pd.Categorical([\"test\", \"train\", \"test\", \"train\"]),\n 'F': 'foo'})\n\n#to view the top and bottom rows:\ndf.head()\ndf.tail()\n\n#to view the index:\ndf.index\n\n#to view the columns:\ndf.columns\n\n#to see quick statistics:\ndf.describe()\n\n#to transpose:\ndf.T\n\n#sorting...\n#by axis:\ndf.sort_index(axis=1, ascending=False)\ndf.sort_values(by='B')\n\n#selecting...\n#use column name to select\ndf['A']\n\n#a selection of certain rows\ndf[0:3]\ndf['20130102':'20130104']\n\n#you can get all information that corresponds to a particular label\ndf.loc[dates[0]]\n\n#or all data in some columns:\ndf.loc[:, ['A', 'B']]\n\n#combining the above, notice that both endpoints are included:\ndf.loc['20130102':'20130104', ['A', 'B']]\n\n#using a column's values to select data\ndf[df['A']>0]\n\n#copying/adding\ndf3 = df.copy()\ndf3['E'] = ['one', 'one', 'two', 'three', 'four', 'three']\ndf3\ndf3[df3['E'].isin(['two', 'four'])]\n\n#IMDB example: https://medium.com/datactw/imdb-dataset-visualization-data-analytics-using-pandas-97b5c6f03c6d\n\nimdb_1000_data_url = r'imdb_1000.csv'\nmovies = pd.read_csv(imdb_1000_data_url)\n#the head:\nmovies.head()\n#the shape and data type:\nmovies.shape\nmovies.dtypes\n\n#plotting and analytics\n#movies.duration.plot(kind='hist')\n#movies[['content_rating', 'title']].groupby('content_rating').count().plot(kind='bar', title='Content Rating')\n#plt.xlabel('Content Rating')\n#plt.ylabel('Title Count')\n#plt.show()\n\nprint('Avg. star rating for movies 2 hours or longer: ', movies[movies['duration'] >= 120]['star_rating'].mean(),\n '\\nAvg. star rating for movies shorter than 2 hours: ', movies[movies['duration'] < 120]['star_rating'].mean())\n\n# calculate the average duration for each genre\nmovies[['duration','genre']].groupby('genre').mean()\n\n# calculate the average star rating for each genre, but only include genres with at least 10 movies\ngenres = movies['genre'].value_counts()[movies['genre'].value_counts() > 10].index\nprint('the mean rating of genres with 10 or more movies are\\n', movies[movies['genre'].isin(genres)].groupby('genre')['star_rating'].mean())","sub_path":"pandas_training.py","file_name":"pandas_training.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"34980499","text":"import trimesh\nimport torch\nimport pyembree\nimport PIL.Image\nimport cv2\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\n\nfrom utils.utils import mesh_init, get_peeled_intersections\n\n\ndef get_scf_per_intersection(mesh_curr, mesh_next, intersections, ray_param, ray_intersect_param):\n \n #rotate = trimesh.transformations.rotation_matrix(angle=np.radians(90), direction=[0,0,1])\n #rotate2 = trimesh.transformations.rotation_matrix(angle=np.radians(180), direction=[0,1,0])\n #extrinsic = rotate2[:3, :3] @ rotate[:3, :3]\n \n \n scf_per_vertex = np.array(mesh_next.vertices) - np.array(mesh_curr.vertices)\n \n #scf_per_vertex = (extrinsic @ (np.array(mesh_next.vertices) - np.array(mesh_curr.vertices)).T).T\n \n \"\"\"\n Calculate the intersection points for the mesh in the current frame.\n \"\"\"\n \n #intersections, ray_param, ray_intersect_param, scene = get_peeled_intersections(mesh_curr)\n \n first, second, third, fourth = intersections\n _, _, pixels = ray_param\n locations, index_ray, index_tri = ray_intersect_param\n \n \"\"\"\n Map scf_per_vertex to scf_per_intersection using barycentric interpolation\n \"\"\"\n scf_per_intersection = np.zeros((len(locations), 3))\n n = len(locations) #number of interections\n \n for i in range(n):\n v1_i, v2_i, v3_i = np.array(mesh_curr.faces[index_tri[i]])\n v1, v2, v3 = mesh_curr.vertices[[v1_i, v2_i, v3_i]]\n \n v2v1 = v2 - v1\n v2v1_d = v2v1/np.linalg.norm(v2v1)\n v3v1 = v3 - v1 \n v3v1_d = v3v1/np.linalg.norm(v3v1)\n v3v2 = v3 - v2\n v3v2_d = v3v2/np.linalg.norm(v3v2)\n pv1 = locations[i] - v1\n pv2 = locations[i] - v2\n \n p_to_v1v2 = np.linalg.norm(pv1 - pv1.dot(v2v1)* v2v1_d)\n p_to_v1v3 = np.linalg.norm(pv1 - pv1.dot(v3v1)* v3v1_d)\n p_to_v3v2 = np.linalg.norm(pv2 - pv1.dot(v3v2)* v3v2_d)\n c = 1/(p_to_v1v2 + p_to_v1v3 + p_to_v3v2)\n \n t = c*p_to_v3v2\n u = c*p_to_v1v3\n v = 1-u-t\n \n #bary_coor = np.array([t,u,v]).T\n scf_per_intersection[i] = t*scf_per_vertex[v1_i] + u*t*scf_per_vertex[v2_i] + t*scf_per_vertex[v3_i]\n \n return scf_per_intersection\n\n\n\ndef get_scf_peels(root, frame_curr, frame_next, intersections, ray_param, ray_intersect_param, scene, out_dir, is_smpl=True):\n \n mesh_curr = mesh_init(root, frame_curr, is_smpl)\n mesh_next = mesh_init(root, frame_next, is_smpl)\n \n scf_per_intersection = get_scf_per_intersection(mesh_curr, mesh_next,\n intersections, ray_param,\n ray_intersect_param)\n \n \"\"\"\n rotate = trimesh.transformations.rotation_matrix(angle=np.radians(90), direction=[0,0,1])\n rotate2 = trimesh.transformations.rotation_matrix(angle=np.radians(180), direction=[0,1,0])\n extrinsic = rotate2[:3, :3] @ rotate[:3, :3]\n scf_per_intersection = (extrinsic @ scf_per_intersection.T).T\n \"\"\"\n first, second, third, fourth = intersections\n _, _, pixels = ray_param\n _, index_ray, _ = ray_intersect_param\n \n \"\"\" Pixel coordinates for each layer \"\"\"\n pixel_ray_1 = pixels[index_ray[first]]\n pixel_ray_2 = pixels[index_ray[second]]\n pixel_ray_3 = pixels[index_ray[third]]\n pixel_ray_4 = pixels[index_ray[fourth]]\n \n \"\"\" Scene flow \"\"\"\n scf_1_arr = np.zeros((scene.camera.resolution[0], scene.camera.resolution[1], 3), dtype=np.float32)\n scf_2_arr = np.zeros((scene.camera.resolution[0], scene.camera.resolution[1], 3), dtype=np.float32)\n scf_3_arr = np.zeros((scene.camera.resolution[0], scene.camera.resolution[1], 3), dtype=np.float32)\n scf_4_arr = np.zeros((scene.camera.resolution[0], scene.camera.resolution[1], 3), dtype=np.float32)\n \n \n scf_1 = scf_per_intersection[first]\n scf_2 = scf_per_intersection[second]\n scf_3 = scf_per_intersection[third]\n scf_4 = scf_per_intersection[fourth]\n \n temp = [(pixel_ray_1, scf_1, scf_1_arr), (pixel_ray_2, scf_2, scf_2_arr),\n (pixel_ray_3, scf_3, scf_3_arr), (pixel_ray_4, scf_4, scf_4_arr)]\n \n for (a, b, c) in temp:\n ind0 = np.where(a[:, 0] < scene.camera.resolution[0])\n ind1 = np.where(a[:, 1] < scene.camera.resolution[0])\n ind = np.intersect1d(ind0, ind1)\n a = a[ind]\n \n c[a[:, 0], a[:, 1], :] = b[ind]\n \n #name = path_curr.split('/')[0]\n #print(name)\n #os.makedirs(root+'/PeeledMaps/'+name, exist_ok=True)\n mesh_folder = out_dir\n \n np.savez_compressed(mesh_folder + '/' + 'scf_1', a = scf_1_arr)\n np.savez_compressed(mesh_folder + '/' + 'scf_2', a = scf_2_arr)\n np.savez_compressed(mesh_folder + '/' + 'scf_3', a = scf_3_arr)\n np.savez_compressed(mesh_folder + '/' + 'scf_4', a = scf_4_arr)\n \n return [scf_1_arr, scf_2_arr, scf_3_arr, scf_4_arr], scene.camera.K\n","sub_path":"utils/sceneflow.py","file_name":"sceneflow.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"605126302","text":"from math import radians\nfrom django.http.response import JsonResponse\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom django.http import HttpResponse\nimport io\nimport base64\n\n\ndef plot_data(red_r,angle_r):\n angle_ref = angle_r\n lux_ref= red_r\n angle=angle_ref\n print(\"redious\",red_r)\n print(\"angle\",angle_ref)\n\n angle = [radians(a) for a in angle]\n\n lux=lux_ref\n\n # plt.clf()\n sp,(ax1, ax2) = plt.subplots(1, 2, subplot_kw=dict(projection='polar'))\n sp,ax1.set_theta_zero_location('E')\n sp,ax1.set_theta_direction(1)\n sp,ax2.set_theta_zero_location('W')\n sp,ax2.set_theta_direction(-1)\n ax1.plot(angle, lux)\n ax2.plot(angle, lux)\n ax1.set_yticklabels([])\n ax2.set_yticklabels([])\n ax1.set_xticklabels([])\n ax2.set_xticklabels([])\n ax1.grid(False)\n ax2.grid(False)\n ax1.spines['polar'].set_visible(False)\n ax2.spines['polar'].set_visible(False)\n plt.subplots_adjust(left=0.0,\n bottom=0.0, \n right=0.9, \n top=0.9, \n wspace=0.0, \n hspace=0.0)\n # plt.show()\n f = io.BytesIO()\n sp.savefig(f, format=\"png\", facecolor=(0.95, 0.95, 0.95))\n encoded_img = base64.b64encode(f.getvalue()).decode('utf-8').replace('\\n', '')\n context = {\"shape\":encoded_img}\n # f.close()\n # return JsonResponse(' ' % encoded_img, safe=False)\n # response = HttpResponse(content_type = 'image/png')\n # canvas = FigureCanvasAgg(sp)\n # canvas.print_png(response)\n return context","sub_path":"accounts/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"631227070","text":"#Python Dictionaries Example \nstudent = {\n \"name\": \"Mark\", #Key = \"Name\", Value = \"Mark\"\n \"student_id\": \"15163\",\n \"feedback\": None\n}\n\n#List of Dictionaries\nall_students = [\n {\"name\": \"Mark\", \"student_id\": 15163},\n {\"name\": \"Katarina\", \"student_id\": 63112},\n {\"name\": \"Jessica\", \"student_id\": 30021}\n]\n\n#Retriving Dictionary Data\nstudent[\"Name\"] #Will return \"Mark\"\n\n#Key Errors\nstudent[\"last_name\"] #Will return KeyError\n\n#Avoid KeyErrors\nstudent.get(\"last_name\", \"Unknown\") #Will return \"Unknown\"\n\n#Will return all the keys in the dictionary\nstudent.keys()\n\n#Will return all of the values.\nstudent.values() \n\n#Change Value of a key pair\nstudent[\"name\"] = \"James\"\n\n#Delete the key value pair completely from the dictionary\ndel student [\"name\"]\n\n","sub_path":"Pluralsight - Python Courses/Python - Getting Started/Types, Statements, and Other Goodies/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"597277486","text":"#!/usr/bin/python\r\n# coding=utf-8\r\n\r\nimport os\r\nimport platform\r\nimport sys\r\nfrom types import BooleanType, FloatType, IntType, ListType, StringType\r\n\r\nfrom PyQt4.QtGui import (QAction, QActionGroup, QColor, QDialog, QFileDialog,\r\n QFileSystemModel,\r\n QMainWindow, QMenu, QMessageBox, QTextCursor,\r\n QTreeWidgetItem)\r\nfrom PyQt4.QtCore import (QDir, QPoint, QSettings, QSize, QStringList, QVariant,\r\n pyqtSlot, Qt)\r\nfrom PyQt4.QtCore import PYQT_VERSION, QT_VERSION\r\n\r\nfrom gedagui.resources.ui_mainWindow import Ui_MainWindow\r\nfrom gedagui.resources.ui_configdialog import Ui_configDialog\r\nfrom gedagui.utilities import (APP_RESOURCES, USER_DATA_DIR, VERSION)\r\n\r\n#------------------------------------------------------------------------------\r\n# Main graphical components\r\n\r\n\r\nclass MainWindow(QMainWindow, Ui_MainWindow):\r\n '''Mainwindow for initial loading of different plugins.\r\n '''\r\n\r\n def __init__(self):\r\n QMainWindow.__init__(self)\r\n self.setupUi(self)\r\n self.statusbar.showMessage(\"gedaGUI ready\", 4000)\r\n\r\n self.actionGet_selected.setEnabled(False)\r\n\r\n self.file_treeView.doubleClicked.connect(self.update_selected)\r\n self.file_treeView.doubleClicked.connect(self.update_controls)\r\n\r\n # Try to load previous session\r\n settings = QSettings()\r\n self.saveSettings = settings.value(\"Settings/saveSettings\",\r\n QVariant(True)).toBool()\r\n\r\n # Dict to hold the actual paths to selected zips\r\n self.selected = {}\r\n # Location where the files are to be downloaded\r\n self.destination = \"\"\r\n\r\n # Hold the location for various recent places\r\n recent_locations = settings.value(\"Settings/recentLocations\").toList()\r\n if self.saveSettings and recent_locations:\r\n self.recent_locations = recent_locations\r\n else:\r\n self.recent_locations = []\r\n\r\n # Hold the location for various recent places\r\n recent_destinations = settings.value(\"Settings/recentDestinations\").toList()\r\n if self.saveSettings and recent_destinations:\r\n self.recent_destinations = recent_destinations\r\n else:\r\n self.recent_destinations = []\r\n\r\n def closeEvent(self, event):\r\n settings = QSettings()\r\n if self.saveSettings:\r\n if self.recent_locations:\r\n settings.setValue(\"Settings/recentLocations\",\r\n QVariant(self.recent_locations))\r\n if self.recent_destinations:\r\n settings.setValue(\"Settings/recentDestinations\",\r\n QVariant(self.recent_destinations))\r\n settings.setValue(\"Settings/saveSettings\",\r\n QVariant(self.saveSettings))\r\n\r\n @pyqtSlot()\r\n def on_actionAbout_triggered(self):\r\n QMessageBox.about(self, \"About gedaGUI\",\r\n \"\"\"Global Environmental Decision Analysis GUI \r\n Copyright © 2011 Joona Lehtomaki\r\n . \\n\r\n All rights reserved, released under MIT license.\r\n Support for GEDA project.
\r\n Python %s - Qt %s - PyQt %s on %s
\"\"\" % (VERSION, platform.python_version(),\r\n QT_VERSION,\r\n PYQT_VERSION, platform.system()))\r\n\r\n @pyqtSlot()\r\n def on_actionGet_selected_triggered(self):\r\n pass\r\n\r\n @pyqtSlot()\r\n def on_actionOpen_location_triggered(self):\r\n # Get the locations\r\n path = QFileDialog.getExistingDirectory(self, 'Open location', '~')\r\n if path:\r\n self.update_dirmodel(path)\r\n self.recent_locations.append(path)\r\n\r\n @pyqtSlot()\r\n def on_actionOpen_preferences_triggered(self):\r\n dialog = ConfigDialog(self.saveSettings, parent=self)\r\n if dialog.exec_():\r\n self.saveSettings = dialog.saveload_previous()\r\n\r\n @pyqtSlot()\r\n def on_removeSelected_pushButton_clicked(self):\r\n pass\r\n\r\n @pyqtSlot()\r\n def on_setDestination_toolButton_clicked(self):\r\n path = QFileDialog.getExistingDirectory(self, 'Set destination', '~')\r\n self.path_lineEdit.setText(path)\r\n self.destination = path\r\n self.recent_destinations.append(path)\r\n self.update_controls()\r\n\r\n def update_controls(self):\r\n if self.selected_listWidget.count() > 0:\r\n # FIXME: this should only be enabled if there are selected items\r\n self.removeSelected_pushButton.setEnabled(True)\r\n if self.destination != \"\":\r\n self.actionGet_selected.setEnabled(True)\r\n self.getSelected_pushButton.setEnabled(True)\r\n else:\r\n self.removeSelected_pushButton.setEnabled(False)\r\n self.actionGet_selected.setEnabled(False)\r\n self.getSelected_pushButton.setEnabled(False)\r\n\r\n def update_dirmodel(self, path):\r\n dirmodel = QFileSystemModel()\r\n dirmodel.setFilter(QDir.NoDotAndDotDot | QDir.AllEntries)\r\n filefilter = [\"*.zip\"]\r\n dirmodel.setNameFilters(filefilter)\r\n dirmodel.sort(0, Qt.AscendingOrder)\r\n self.file_treeView.setModel(dirmodel)\r\n self.file_treeView.header().setResizeMode(3)\r\n self.file_treeView.model().setRootPath(path)\r\n self.file_treeView.setRootIndex(self.file_treeView.model().index(path))\r\n\r\n def update_selected(self, index):\r\n fileinfo = self.file_treeView.model().fileInfo(index)\r\n filename = fileinfo.fileName()\r\n all_items = self.selected_listWidget.findItems(filename, Qt.MatchRegExp)\r\n if not all_items and filename.endsWith('zip'):\r\n self.selected_listWidget.addItem(fileinfo.fileName())\r\n print(fileinfo.filePath())\r\n self.selected[filename] = fileinfo.filePath()\r\n\r\n\r\nclass ConfigDialog(QDialog, Ui_configDialog):\r\n\r\n def __init__(self, saveload, parent=None):\r\n super(ConfigDialog, self).__init__(parent)\r\n self.setupUi(self)\r\n self.saveloadCheckBox.setChecked(saveload)\r\n\r\n def accept(self):\r\n QDialog.accept(self)\r\n\r\n def saveload_previous(self):\r\n return self.saveloadCheckBox.isChecked()\r\n\r\n#------------------------------------------------------------------------------\r\n# Run the main GUI\r\n\r\n\r\ndef main():\r\n\r\n # Init the Qt basics\r\n from PyQt4.QtGui import QApplication\r\n app = QApplication(sys.argv)\r\n app.setApplicationName(\"gedaGUI\")\r\n app.setApplicationVersion(VERSION)\r\n app.setOrganizationName(\"HU\")\r\n app.setOrganizationDomain(\"MRG\")\r\n window = MainWindow()\r\n window.showMaximized()\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"src/gedagui/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":7052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"363984060","text":"#Author:Chris.chen\nimport re,xlwt\nimport urllib.request,urllib.parse\nfrom bs4 import BeautifulSoup\n\ndef save_img(auctioniteminfo):\n for k, v in auctioniteminfo.items():\n auctionitemimageurl_list = v[4]\n img_num = 0\n for i in auctionitemimageurl_list:\n print(i)\n if img_num == 0:\n urllib.request.urlretrieve(i, 'C:\\\\auction\\\\%s.jpg' % k.strip())\n img_num += 1\n else:\n urllib.request.urlretrieve(i, 'C:\\\\auction\\\\%s_%s.jpg' %(k.strip(),img_num) )\n img_num += 1\n\ndef save_excel(auctioniteminfo):\n #k lotnum k0 itemtile k1 itemdes k2 estimatelow k3 estimatehigh\n excel_init_file = xlwt.Workbook(encoding='utf-8')\n table = excel_init_file.add_sheet('auction',cell_overwrite_ok=True)\n row_num = 0\n for k, v in auctioniteminfo.items():\n table.write(row_num, 0, k)\n table.write(row_num, 1, str(v[0]))\n table.write(row_num, 2, str(v[1]))\n table.write(row_num, 3, str(v[2]))\n table.write(row_num, 4, str(v[3]))\n row_num += 1\n excel_init_file.save('C:\\\\auction\\\\auction.xls')\n\n\ndef get_auctioniteminfo(auctioniteminfo):\n mainurl = \"http://catalogue.drouot.com/html/d/\"\n page_num = 3\n # page_num = input(\"请输入总页码:\")\n auctionitemurllist = []\n for pn in range(1,page_num+1):\n url = \"http://catalogue.drouot.com/html/d/index.jsp?aff=1&ordre=1&npp=100&id=88827&lng=fr&np=%s\" % pn\n print(url)\n response = urllib.request.urlopen(url)\n data = response.read().decode('utf-8')\n soup = BeautifulSoup(data, 'html5lib')\n\n auctionitemurllist_res = re.findall('fiche.jsp\\?id=\\d+&np=%s&lng=fr&npp=100&ordre=1&aff=1&r='%pn,data)\n auctionitemurllist_page = auctionitemurllist_res[::2]\n auctionitemurllist.extend(auctionitemurllist_page)\n # break\n\n for itemurl in auctionitemurllist:\n itemurl = mainurl + itemurl\n print(itemurl)\n headers = {\n 'Cookie': 'JSESSIONID=12159F393906197C47FFDDD3F7543459; __uzma=49c5aefd-d592-4ed1-a5f1-1a57ecbb3b487656; __uzmb=1512969624; __utma=30049642.1027598601.1512969626.1512969626.1512969626.1; __utmc=30049642; __utmz=30049642.1512969626.1.1.utmccn=(direct)|utmcsr=(direct)|utmcmd=(none); __uzmc=444682269135; __uzmd=1512971662; __utmb=30049642',\n 'Host': 'catalogue.drouot.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'\n }\n data = {}\n params = urllib.parse.urlencode(data).encode(encoding='UTF8')\n req = urllib.request.Request(itemurl, params,headers)\n response = urllib.request.urlopen(req)\n data = response.read().decode('utf-8')\n soup = BeautifulSoup(data, 'html5lib')\n # print(soup)\n #lotnum\n lotnum = re.findall('LOT n°(\\w*\\d*\\w*)',data)[0]\n # print(lotnum)\n #获取估价\n try:\n estimate = re.findall('Estimation : (.*?)',data)[0]\n estimatelow,estimatehigh = str(estimate).split('-')\n estimatelow = re.sub(' ','',estimatelow).strip()\n estimatehigh = re.sub(' ','',re.sub(' ','',estimatehigh).strip())\n # print('估价下限:',estimatelow)\n # print('估价上限:',estimatehigh)\n except:\n estimatelow = ''\n estimatehigh = ''\n\n # #获取起拍价\n # try:\n # startingprice = soup.find('div', {'class', 'start-price___30jex'}).text\n #\n # startingprice = re.sub('Starting Bid', '', startingprice)\n # startingprice = re.sub(',', '', startingprice).strip()\n # # print(startingprice)\n # except :\n # startingprice = ''\n\n #获取拍品标题\n try:\n itemtile = re.findall('(.*?) ',data)[0]\n except:\n itemtile = ''\n #获取拍品描述\n itemdes = soup.find_all('p',{'class','Style5 style6'})[0].text.strip()\n if not itemtile:\n itemtile = itemdes\n # print(itemtile)\n # print(itemdes)\n\n #获取图片地址\n httphead = \"http://\"\n auctionitemimageurl_list = []\n auctionitemimageurl_res = re.findall(\"drouotstatic.zonesecure.org/images/perso/zoomsrc/LOT/\\d+/\\d+/\\w*\\d+\\w*-*\\w*\\d*\\w*\\.\\w+\",data)\n for i in auctionitemimageurl_res:\n auctionitemimageurl_list.append(httphead+i)\n # print('auctionitemimageurl_list:',auctionitemimageurl_list)\n\n print(lotnum + '\\n',itemtile + '\\n',itemdes + '\\n',estimatelow + '\\n',estimatehigh + '\\n')\n\n auctioniteminfo[lotnum]=[]\n auctioniteminfo[lotnum].append(itemtile)\n auctioniteminfo[lotnum].append(itemdes)\n auctioniteminfo[lotnum].append(estimatelow)\n auctioniteminfo[lotnum].append(estimatehigh)\n auctioniteminfo[lotnum].append(auctionitemimageurl_list)\n # break\n return auctioniteminfo\n\n\n\nauctioniteminfo = {}\nauctioniteminfo=get_auctioniteminfo(auctioniteminfo)\nprint(len(auctioniteminfo))\nprint(auctioniteminfo)\nsave_excel(auctioniteminfo)\nsave_img(auctioniteminfo)\n\n","sub_path":"tools/Crawler/爬虫www.drouotlive.com.py","file_name":"爬虫www.drouotlive.com.py","file_ext":"py","file_size_in_byte":5251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"631309767","text":"# -*- coding: utf-8 -*-\n# Python 2.7.15\n\nimport json\nimport codecs\nimport struct\n\nfilename = 'Obi3-T13-f10-20120914.bsm'\n\nfp = open(filename,'r')\ndata = fp.read()\nprint(type(data))\n\n\nstr1 = data[0:6].decode('utf8')\n\nngraml = 2\nclassn=13\nfreqsize = len(struct.pack('f', '0.0')) * classn\nbsize = ngraml + len(struct.pack('I', '1')) + freqsize\n\n#str[offset+ngraml, bsize-ngraml].unpack('Ie*')\nfreq = data[7:]\n\nprint(str1)\n","sub_path":"obi3/unpack.py","file_name":"unpack.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"392468707","text":"#A\nn = int(input())\nl = []\n\nfor i in range(n):\n l.append(str(input()))\n \ndi = {\"purple\":\"Power\",\"green\":\"Time\",\"blue\":\"Space\",\"red\":\"Reality\",\"orange\":\"Soul\",\"yellow\":\"Mind\"}\n\nfor i in l:\n del di[i]\n\nprint(6-n)\nif n != 6:\n for i in di.keys():\n print(di[i])\n","sub_path":"Codefoces/485/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"292688333","text":"import random\n\nfrom django.conf import settings\nfrom django.contrib.auth.hashers import make_password\nfrom django.contrib.auth.models import Group\nfrom django.db import transaction\nfrom django.utils.timezone import get_current_timezone, make_aware\nfrom django_ilmoitin.models import NotificationTemplate\nfrom guardian.shortcuts import assign_perm\n\nfrom profiles.enums import AddressType, EmailType, PhoneType\nfrom profiles.models import Address, Email, Phone, Profile\nfrom services.enums import ServiceType\nfrom services.models import AllowedDataField, Service, ServiceConnection\nfrom users.models import User\nfrom youths.enums import NotificationType, YouthLanguage\nfrom youths.models import YouthProfile\n\nDATA_FIELD_VALUES = [\n {\n \"field_name\": \"name\",\n \"translations\": [\n {\"code\": \"en\", \"label\": \"Name\"},\n {\"code\": \"fi\", \"label\": \"Nimi\"},\n {\"code\": \"sv\", \"label\": \"Namn\"},\n ],\n },\n {\n \"field_name\": \"email\",\n \"translations\": [\n {\"code\": \"en\", \"label\": \"Email\"},\n {\"code\": \"fi\", \"label\": \"Sähköposti\"},\n {\"code\": \"sv\", \"label\": \"Epost\"},\n ],\n },\n {\n \"field_name\": \"address\",\n \"translations\": [\n {\"code\": \"en\", \"label\": \"Address\"},\n {\"code\": \"fi\", \"label\": \"Osoite\"},\n {\"code\": \"sv\", \"label\": \"Adress\"},\n ],\n },\n {\n \"field_name\": \"phone\",\n \"translations\": [\n {\"code\": \"en\", \"label\": \"Phone\"},\n {\"code\": \"fi\", \"label\": \"Puhelinnumero\"},\n {\"code\": \"sv\", \"label\": \"Telefonnummer\"},\n ],\n },\n {\n \"field_name\": \"ssn\",\n \"translations\": [\n {\"code\": \"en\", \"label\": \"Social Security Number\"},\n {\"code\": \"fi\", \"label\": \"Henkilötunnus\"},\n {\"code\": \"sv\", \"label\": \"Personnnumer\"},\n ],\n },\n]\n\n\n@transaction.atomic\ndef generate_data_fields():\n \"\"\"Create data fields if they don't exist.\"\"\"\n for value in DATA_FIELD_VALUES:\n if not AllowedDataField.objects.filter(\n field_name=value.get(\"field_name\")\n ).exists():\n data_field = AllowedDataField.objects.create(\n field_name=value.get(\"field_name\")\n )\n for translation in value.get(\"translations\"):\n data_field.set_current_language(translation[\"code\"])\n data_field.label = translation[\"label\"]\n data_field.save()\n\n\nSERVICE_TRANSLATIONS = {\n ServiceType.BERTH: {\n \"title\": {\"en\": \"Boat berths\", \"fi\": \"Venepaikka\", \"sv\": \"Båtplatser\"},\n \"description\": {\n \"en\": \"Boat berths in Helsinki city boat harbours.\",\n \"fi\": \"Venepaikat helsingin kaupungin venesatamissa.\",\n \"sv\": \"Båtplatser i Helsingfors båthamnar.\",\n },\n },\n ServiceType.YOUTH_MEMBERSHIP: {\n \"title\": {\n \"en\": \"Youth service membership\",\n \"fi\": \"Nuorisopalveluiden jäsenkortti\",\n \"sv\": \"Ungdomstjänstmedlemskap\",\n },\n \"description\": {\n \"en\": (\n \"With youth service membership you get to participate in all activities offered by Helsinki city \"\n \"community centers.\"\n ),\n \"fi\": (\n \"Nuorisopalveluiden Jäsenkortilla pääset mukaan nuorisotalojen toimintaan. Saat etuja kaupungin \"\n \"kulttuuritapahtumissa ja paikoissa.\"\n ),\n \"sv\": (\n \"Med medlemskap i ungdomstjänsten får du delta i alla aktiviteter som erbjuds av Helsingfors \"\n \"ungdomscenter.\"\n ),\n },\n },\n ServiceType.GODCHILDREN_OF_CULTURE: {\n \"title\": {\n \"en\": \"Culture Kids\",\n \"fi\": \"Kulttuurin kummilapset\",\n \"sv\": \"Kulturens fadderbarn\",\n },\n \"description\": {\n \"en\": \"Culture kids -service provides free cultural experiences for children born in Helsinki in 2020.\",\n \"fi\": (\n \"Kulttuurin kummilapset -palvelu tarjoaa ilmaisia kulttuurielämyksiä vuodesta 2020 alkaen \",\n \"Helsingissä syntyville lapsille.\",\n ),\n \"sv\": \"Kulturens fadderbarn - tjänsten ger gratis kulturupplevelser för barn födda i Helsingfors 2020.\",\n },\n },\n}\n\n\n@transaction.atomic\ndef generate_services():\n \"\"\"Create services unless they already exist.\n\n Also assigns allowed data fields for each created service.\n \"\"\"\n services = []\n for service_type in ServiceType:\n service = Service.objects.filter(service_type=service_type).first()\n if not service:\n service = Service(service_type=service_type, title=service_type.name)\n if service_type in SERVICE_TRANSLATIONS:\n for language in [\"fi\", \"en\", \"sv\"]:\n service.set_current_language(language)\n service.title = SERVICE_TRANSLATIONS[service_type][\"title\"][\n language\n ]\n service.description = SERVICE_TRANSLATIONS[service_type][\n \"description\"\n ][language]\n service.save()\n for field in AllowedDataField.objects.all():\n if (\n field.field_name != \"ssn\"\n or service.service_type == ServiceType.BERTH\n ):\n service.allowed_data_fields.add(field)\n services.append(service)\n return services\n\n\n@transaction.atomic\ndef generate_groups_for_services(services=tuple()):\n \"\"\"Create groups for given services unless they already exist.\"\"\"\n groups = []\n for service in services:\n group, created = Group.objects.get_or_create(name=service.service_type.value)\n groups.append(group)\n return groups\n\n\ndef assign_permissions(groups=tuple()):\n \"\"\"Assigns all service permissions for a group for development purposes.\n\n Assumes that a Service exists with the same group name.\n \"\"\"\n available_permissions = [item[0] for item in Service._meta.permissions]\n for group in groups:\n service = Service.objects.get(service_type=group.name)\n if service:\n for permission in available_permissions:\n assign_perm(permission, group, service)\n\n\ndef create_user(username=\"\", faker=None):\n \"\"\"Creates a fake user for development purposes.\"\"\"\n\n def get_random_username():\n while True:\n name = faker.user_name()\n if not User.objects.filter(username=name).exists():\n return name\n\n return User(\n first_name=faker.first_name(),\n last_name=faker.last_name(),\n username=username if username else get_random_username(),\n email=faker.email(),\n password=make_password(\"password\"),\n is_active=True,\n is_staff=True,\n date_joined=make_aware(\n faker.date_time_between(start_date=\"-10y\", end_date=\"now\"),\n get_current_timezone(),\n is_dst=False,\n ),\n )\n\n\ndef generate_group_admins(groups=tuple(), faker=None):\n \"\"\"Creates fake development group admins for development purposes.\"\"\"\n\n def create_user_and_add_to_group(group=None):\n user = create_user(username=\"{}_user\".format(group.name.lower()), faker=faker)\n user.save()\n user.groups.add(group)\n return user\n\n return [create_user_and_add_to_group(group) for group in groups]\n\n\ndef generate_profiles(k=50, faker=None):\n \"\"\"Create fake profiles and users for development purposes.\"\"\"\n for i in range(k):\n user = create_user(faker=faker)\n user.save()\n profile = Profile.objects.create(\n user=user,\n language=random.choice(settings.LANGUAGES)[0],\n contact_method=random.choice(settings.CONTACT_METHODS)[0],\n )\n Email.objects.create(\n profile=profile,\n primary=True,\n email_type=EmailType.NONE,\n email=faker.email(),\n )\n Phone.objects.create(\n profile=profile,\n primary=True,\n phone_type=PhoneType.NONE,\n phone=faker.phone_number(),\n )\n Address.objects.create(\n profile=profile,\n primary=True,\n address=faker.street_address(),\n city=faker.city(),\n postal_code=faker.postcode(),\n country_code=faker.country_code(),\n address_type=AddressType.NONE,\n )\n\n\ndef generate_service_connections(youth_profile_percentage=0.2):\n \"\"\"Create fake service connections for development purposes.\"\"\"\n profiles = Profile.objects.all()\n number_of_youth_profiles_to_generate = int(\n profiles.count() * youth_profile_percentage\n )\n\n youth_service = Service.objects.get(service_type=ServiceType.YOUTH_MEMBERSHIP)\n other_services = Service.objects.exclude(pk=youth_service.pk)\n\n for index, profile in enumerate(profiles):\n if index < number_of_youth_profiles_to_generate:\n ServiceConnection.objects.create(profile=profile, service=youth_service)\n else:\n ServiceConnection.objects.create(\n profile=profile, service=random.choice(other_services)\n )\n\n\ndef generate_youth_profiles(faker=None):\n \"\"\"Create fake youth membership profiles for development purposes.\"\"\"\n profiles = Profile.objects.filter(\n service_connections__service__service_type=ServiceType.YOUTH_MEMBERSHIP\n )\n\n for profile in profiles:\n approved = bool(random.getrandbits(1))\n YouthProfile.objects.create(\n profile=profile,\n birth_date=make_aware(\n faker.date_time_between(start_date=\"-17y\", end_date=\"-13y\"),\n get_current_timezone(),\n is_dst=False,\n ),\n language_at_home=random.choice(list(YouthLanguage)),\n approver_first_name=faker.first_name() if approved else \"\",\n approver_last_name=profile.last_name if approved else \"\",\n approved_time=make_aware(\n faker.date_time_between(\n start_date=profile.user.date_joined, end_date=\"now\"\n ),\n get_current_timezone(),\n is_dst=False,\n )\n if approved\n else None,\n photo_usage_approved=bool(random.getrandbits(1)) if approved else False,\n )\n\n\n@transaction.atomic\ndef generate_notifications():\n \"\"\"Creates Youth Profile notifications if they don't already exist.\"\"\"\n\n if not NotificationTemplate.objects.filter(\n type=NotificationType.YOUTH_PROFILE_CONFIRMATION_NEEDED.value\n ).exists():\n template = NotificationTemplate(\n type=NotificationType.YOUTH_PROFILE_CONFIRMATION_NEEDED.value\n )\n fi_subject = \"Vahvista nuorisojäsenyys\"\n fi_html = (\n \"Hei {{ youth_profile.approver_first_name }}, {{ youth_profile.profile.first_name }} on \"\n \"pyytänyt sinua vahvistamaan nuorisojäsenyytensä. Käy antamassa vahvistus Jässäri-palvelussa käyttäen \"\n \"tätä linkkiä:\"\n ''\n \"https://jassari.test.kuva.hel.ninja/approve/{{ youth_profile.approval_token }} Tämä \"\n \"viesti on lähetetty järjestelmistä automaattisesti. Älä vastaa tähän viestiin, sillä vastauksia ei \"\n \"käsitellä. \"\n )\n fi_text = (\n \"Hei {{ youth_profile.approver_first_name }},\\r\\n\\r\\n{{ youth_profile.profile.first_name }} on pyytänyt \"\n \"sinua vahvistamaan nuorisojäsenyytensä. Käy antamassa vahvistus Jässäri-palvelussa käyttäen tätä linkkiä:\"\n \"\\r\\n\\r\\nhttps://jassari.test.kuva.hel.ninja/approve/{{ youth_profile.approval_token }}\\r\\n\\r\\nTämä viesti \"\n \"on lähetetty järjestelmistä automaattisesti. Älä vastaa tähän viestiin, sillä vastauksia ei käsitellä.\"\n )\n template.set_current_language(\"fi\")\n template.subject = fi_subject\n template.body_html = fi_html\n template.body_text = fi_text\n template.set_current_language(\"sv\")\n template.subject = fi_subject + \" SV TRANSLATION NEEDED\"\n template.body_html = fi_html + \"SV TRANSLATION NEEDED
\"\n template.body_text = fi_text + \"SV TRANSLATION NEEDED
\"\n template.set_current_language(\"en\")\n template.subject = fi_subject + \" EN TRANSLATION NEEDED\"\n template.body_html = fi_html + \"EN TRANSLATION NEEDED
\"\n template.body_text = fi_text + \"EN TRANSLATION NEEDED
\"\n template.save()\n\n if not NotificationTemplate.objects.filter(\n type=NotificationType.YOUTH_PROFILE_CONFIRMED.value\n ).exists():\n template = NotificationTemplate(\n type=NotificationType.YOUTH_PROFILE_CONFIRMED.value\n )\n fi_subject = \"Nuorisojäsenyys vahvistettu\"\n fi_html = (\n \"Hei {{ youth_profile.profile.first_name }},\\r\\n \\r\\n{{ youth_profile.approver_first_name }} on \"\n \"vahvistanut nuorisojäsenyytesi. Kirjaudu Jässäri-palveluun nähdäksesi omat tietosi:\\r\\n \\r\\n\"\n 'https://jassari.test.kuva.hel.ninja \\r\\n \\r\\n'\n \"Tämä viesti on lähetetty järjestelmästä automaattisesti. Älä vastaa tähän viestiin, sillä vastauksia \"\n \"ei käsitellä. \"\n )\n fi_text = (\n \"Hei {{ youth_profile.profile.first_name }},\\r\\n\\r\\n{{ youth_profile.approver_first_name }} on vahvistanut \"\n \"nuorisojäsenyytesi. Kirjaudu Jässäri-palveluun nähdäksesi omat tietosi:\\r\\n\\r\\n\"\n \"https://jassari.test.kuva.hel.ninja\\r\\n\\r\\nTämä viesti on lähetetty järjestelmästä automaattisesti. Älä \"\n \"vastaa tähän viestiin, sillä vastauksia ei käsitellä.\"\n )\n template.set_current_language(\"fi\")\n template.subject = fi_subject\n template.body_html = fi_html\n template.body_text = fi_text\n template.set_current_language(\"sv\")\n template.subject = fi_subject + \" SV TRANSLATION NEEDED\"\n template.body_html = fi_html + \"SV TRANSLATION NEEDED
\"\n template.body_text = fi_text + \"SV TRANSLATION NEEDED
\"\n template.set_current_language(\"en\")\n template.subject = fi_subject + \" EN TRANSLATION NEEDED\"\n template.body_html = fi_html + \"EN TRANSLATION NEEDED
\"\n template.body_text = fi_text + \"EN TRANSLATION NEEDED
\"\n template.save()\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"212198746","text":"from datetime import datetime\nfrom .utils import to_timezone\n\nclass Alert():\n __slots__ = (\n 'description',\n 'expires',\n 'expiresLocal',\n 'regions',\n 'severity',\n 'time',\n 'timeLocal',\n 'title',\n 'uri'\n )\n def __init__(self, forecast, **data):\n # since we have defined what we expect in slots,\n # we cant add more information without editing slots\n for k, v in data.items():\n if k in ('expires', 'time'):\n v = datetime.utcfromtimestamp(v)\n setattr(self, k + 'Local', to_timezone(v, forecast.timezone))\n\n setattr(self, k, v)","sub_path":"darksky_async/alert.py","file_name":"alert.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"579271031","text":"import requests\nimport random\ndef searchpic():\n URL = \"http://mandm.plearnjai.com/API/id_nameMovie.php?key=mandm\"\n r = requests.get(url=URL)\n data = r.json()\n q = []\n for movie in data:\n if movie['idIMDb'] !='tt':\n y = (\"https://imagemovie.herokuapp.com/\" + movie['idIMDb'] + '.jpg')\n q.append(y)\n t = random.choice(q)\n return t","sub_path":"searchpic.py","file_name":"searchpic.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"160382169","text":"import json\nimport requests\n\n# Example of a Python implementation for a continuous authentication client.\n# It's necessary to :\n# - update APPLICATION_ID\n# - update request_url at the end of the script\n# unique application id : you can find this in the curl's command to generate jwt token or with Base64(consumer-key:consumer-secret) keys application\nAPPLICATION_ID = 'Zk8zYURrVjB5QkdBM0Y4ZjlaYkd3M0xob3IwYTpSUU52R0NIMWZUdktNOGt5R1lXYURYcjROUDhh'\n# url to obtain acces token\nTOKEN_URL = \"https://portail-api.meteofrance.fr/private/nativeAPIs/token\"\nclass Client_APIM_MF (object):\n '''\n Client fourni par MF pour renouveler régulièrement le token d'acces à l'API WCS.\n Cette procédure est obligatoire depuis le 12 janvier 2022.\n Voir mail de Cédric Legal (MF/DP/Services) du 11 mars 2021 qui explique comment obtenir\n la valeur de APPLICATION_ID ci-dessus.\n voir \"https://portail-api.meteofrance.fr/devportal/apis\"\n et \"https://portail-api.meteofrance.fr/authenticationendpoint/aide_fr.do\"\n pour le code Python de ce client. \n '''\n def __init__(self):\n self.session = requests.Session()\n def request(self, method, url, **kwargs):\n # First request will always need to obtain a token first\n if 'Authorization' not in self.session.headers:\n self.obtain_token()\n # Optimistically attempt to dispatch reqest\n response = self.session.request(method, url, **kwargs)\n if self.token_has_expired(response):\n # We got an 'Access token expired' response => refresh token\n self.obtain_token()\n # Re-dispatch the request that previously failed\n response = self.session.request(method, url, **kwargs)\n return response\n def token_has_expired(self, response):\n status = response.status_code\n content_type = response.headers['Content-Type']\n if status == 401 and 'application/json' in content_type:\n if 'expired' in response.headers['WWW-Authenticate']:\n return True\n return False\n def obtain_token(self):\n # Obtain new token\n data = {'grant_type': 'client_credentials'}\n headers = {'Authorization': 'Basic ' + APPLICATION_ID}\n access_token_response = requests.post(TOKEN_URL, data=data, verify=True, allow_redirects=False, headers=headers)\n #print (access_token_response.json())\n token = access_token_response.json()['access_token']\n # Update session with fresh token\n self.session.headers.update({'Authorization': 'Bearer %s' % token})\n\"\"\" \ndef main():\n client = Client_APIM_MF()\n # Issue a series of API requests an example. For use this test, you must first subscribe to the arome api with your application\n client.session.headers.update({'Accept': 'application/json'})\n for i in range(1):\n response = client.request('GET', 'https://public-api.meteofrance.fr/public/arome/1.0/wms/MF-NWP-HIGHRES-AROME-001-FRANCE-WMS/GetCapabilities?service=WMS&version=1.3.0', verify=False)\n print(response.status_code)\n print (response.content)\n time.sleep(5)\nif __name__ == '__main__':\n main()\n\"\"\"","sub_path":"Client_APIM_MF.py","file_name":"Client_APIM_MF.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"193188009","text":"from __future__ import division\nfrom Hamamatsu_ScopeFoundry.CameraMeasurement import HamamatsuMeasurement\nfrom ScopeFoundry.helper_funcs import sibling_path, load_qt_ui_file\nfrom SyncreadoutTriggerMeasurement import SyncreadoutTriggerMeasurement\nfrom ScopeFoundry import h5_io\nimport pyqtgraph as pg\nimport numpy as np\nimport os\nimport time\nfrom datetime import datetime\nimport pandas as pd\n\nclass SyncreadoutTriggerCounterMeasurement(HamamatsuMeasurement):\n \n name = \"SyncreadoutTriggerCounterMeasurement\"\n \n def update_cell_counter(self,frame,th): #th potrebbe essere un valore da inserire\n \n frame_mean=np.mean(frame)\n t1 = time.time\n delta_t = t1-self.t0\n self.cell_list.append([t1, delta_t, frame_mean])\n \n if frame_mean > th and not self.detection :\n self.counter+=1\n self.detection=1\n self.delta_t = delta_t \n self.t0 = t1\n elif frame_mean < th and self.detection :\n self.detection=0 \n \n def getNumberOfCells(self):\n \n return self.counter\n \n def get_delta_t(self):\n \n return self.delta_t\n \n def getMeanImage1(self):\n \n return self.frame_mean\n \n def setup(self):\n \n super().setup()\n \n self.settings.New('counter_threshold', dtype=float, initial=5000, spinbox_step = 1000, spinbox_decimals = 0)\n self.settings.New('number_of_cells', dtype=float, initial=0, spinbox_step = 1, spinbox_decimals = 0, hardware_read_func = self.getNumberOfCells)\n self.settings.New('delta_t', dtype=float, initial=0, spinbox_step = 1, spinbox_decimals = 0, hardware_read_func = self.get_delta_t)\n self.settings.New('mean_image_1', dtype=float, initial=0, spinbox_step = 1, hardware_read_func = self.getMeanImage1, spinbox_decimals = 0)\n \n \n def run(self):\n \n self.eff_subarrayh = int(self.camera.subarrayh.val/self.camera.binning.val)\n self.eff_subarrayv = int(self.camera.subarrayv.val/self.camera.binning.val)\n \n self.image = np.zeros((self.eff_subarrayv,self.eff_subarrayh),dtype=np.uint16)\n self.image[0,0] = 1 #Otherwise we get the \"all zero pixels\" error (we should modify pyqtgraph...)\n try:\n \n self.camera.read_from_hardware()\n\n self.camera.hamamatsu.startAcquisition()\n \n index = 0\n \n if self.camera.acquisition_mode.val == \"fixed_length\":\n \n if self.settings['save_h5']:\n self.initH5()\n print(\"\\n \\n ******* \\n \\n Saving :D !\\n \\n *******\")\n \n while index < self.camera.hamamatsu.number_image_buffers:\n \n # Get frames.\n #The camera stops acquiring once the buffer is terminated (in snapshot mode)\n [frames, dims] = self.camera.hamamatsu.getFrames()\n \n # Save frames.\n for aframe in frames:\n \n self.np_data = aframe.getData() \n self.image = np.reshape(self.np_data,(self.eff_subarrayv, self.eff_subarrayh)) \n if self.settings['save_h5']:\n self.image_h5[index,:,:] = self.image # saving to the h5 dataset\n self.h5file.flush() # maybe is not necessary\n \n if self.interrupt_measurement_called:\n break\n index+=1\n print(index)\n \n if self.interrupt_measurement_called:\n break \n #index = index + len(frames)\n #np_data.tofile(bin_fp)\n self.settings['progress'] = index*100./self.camera.hamamatsu.number_image_buffers\n \n elif self.camera.acquisition_mode.val == \"run_till_abort\":\n \n save = True\n self.counter=0\n self.detection=0\n self.cell_list=[]\n self.t0=time.time()\n self.delta_t=0\n \n while not self.interrupt_measurement_called:\n \n [frame, dims] = self.camera.hamamatsu.getLastFrame() \n self.np_data = frame.getData()\n self.image = np.reshape(self.np_data,(self.eff_subarrayv, self.eff_subarrayh))\n \n self.update_cell_counter(self.np_data,self.settings.counter_threshold.val)\n self.settings.number_of_cells.read_from_hardware()\n print(self.cell_list)\n if self.settings['record']:\n self.camera.hamamatsu.stopAcquisition()\n self.camera.hamamatsu.startRecording()\n self.camera.hamamatsu.stopRecording()\n self.interrupt() \n \n if self.settings['save_h5']:\n \n if save:\n self.initH5()\n save = False #at next cycle, we don't do initH5 again (we have already created the file)\n \n mean_value = np.mean(self.np_data)\n last_frame_index = self.camera.hamamatsu.buffer_index\n #print(self.camera.hamamatsu.last_frame_number)\n if self.debug:\n print(\"The mean is: \", mean_value)\n#===============================================================================\n# if mean_value > self.settings['threshold']:\n# print(\"\\n \\n ******* \\n \\n Saving :D !\\n \\n *******\")\n# j = 0\n# \n# #while(len(frames)) < self.camera.number_frames.val: #we want 200 frames\n# while j < self.camera.number_frames.val: \n# upgraded_last_frame_index, upgraded_frame_number = self.camera.hamamatsu.getTransferInfo() #we upgrade the transfer information\n# \n# if last_frame_index < upgraded_last_frame_index: #acquisition has not reached yet the end of the buffer \n# j = self.getThresholdH5(last_frame_index, upgraded_last_frame_index + 1, j)\n# # for i in range(last_frame_index, upgraded_last_frame_index + 1):\n# # \n# # if j < self.camera.number_frames.val:\n# # frame = self.camera.hamamatsu.getRequiredFrame(i)[0]\n# # self.np_data = frame.getData() #-1 takes the last element\n# # self.image = np.reshape(self.np_data,(int(self.camera.subarrayv.val), int(self.camera.subarrayh.val))).T\n# # self.image_h5[j,:,:] = self.image # saving to the h5 dataset\n# # j+=1\n# # self.settings['progress'] = j*100./self.camera.hamamatsu.number_image_buffers\n# \n# else: #acquisition has reached the end of the buffer\n# j = self.getThresholdH5(last_frame_index+1, 3*self.camera.hamamatsu.number_image_buffers + 1, j)\n# j = self.getThresholdH5(0, upgraded_last_frame_index, j)\n# \n# last_frame_index = upgraded_last_frame_index\n# \n# \n# \n# \n# self.interrupt()\n# print(self.camera.hamamatsu.last_frame_number)\n#===============================================================================\n \n if mean_value > self.settings['threshold']:\n \n print(\"\\n \\n ******* \\n \\n Saving :D !\\n \\n *******\")\n j = 0\n #starting_index=last_frame_index\n stalking_number = 0\n remaining = False\n while j < self.camera.number_frames.val: \n \n self.get_and_save_Frame(j,last_frame_index)\n last_frame_index = self.updateIndex(last_frame_index)\n \n if self.debug:\n print(\"The last_frame_index is: \", last_frame_index)\n \n j+=1\n \n if not remaining:\n upgraded_last_frame_index = self.camera.hamamatsu.getTransferInfo()[0] # upgrades the transfer information\n #The stalking_number represents the relative steps the camera has made in acquisition with respect to the saving.\n stalking_number = stalking_number + self.camera.hamamatsu.backlog - 1\n \n if self.debug:\n print('upgraded_last_frame_index: ' , upgraded_last_frame_index)\n print('stalking_number: ' , stalking_number)\n print('The camera is at {} passes from you'.format(self.camera.hamamatsu.number_image_buffers - stalking_number))\n \n if stalking_number + self.camera.hamamatsu.backlog > self.camera.hamamatsu.number_image_buffers: \n self.camera.hamamatsu.stopAcquisitionNotReleasing() #stop acquisition when we know that at next iteration, some images may be rewritten\n remaining = True #if the buffer reach us, we execute the \"while\" without the \"if not remaining\" block.\n \n self.interrupt()\n self.camera.hamamatsu.stopAcquisition()\n if self.debug:\n print(\"The last_frame_number is: \", self.camera.hamamatsu.last_frame_number)\n \n finally:\n \n self.camera.hamamatsu.stopAcquisition()\n\n if self.settings['save_h5']:\n self.h5file.close() # close h5 file \n self.settings.save_h5.update_value(new_val = False)\n ","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":11295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"52901182","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n\r\n\r\nCreated for JPM Assignment - Super Simple Stock Market for (Beverage Trading Companies)\r\n\r\n\r\n\r\nFriday 18th of May 2018 : \r\n\r\n\r\n@author: MILAN GOHIL\r\n\"\"\"\r\n\r\n\r\n\r\n\r\nimport time\r\n\r\nimport csv\r\n\r\n\r\n\r\nDEFAULT_TIME_MINUTES = 5\r\n\r\n\r\n\r\nclass stockFactor:\r\n\r\n symbol_=\"\"\r\n\r\n properties_={} # key:value array\r\n\r\n \r\n\r\n def __init__(self, symbol,properties):\r\n\r\n self.symbol_ = symbol\r\n\r\n self.properties_ = properties \r\n\r\n \r\n\r\nclass Trade:\r\n\r\n symbol_=\"\"\r\n\r\n timestamp_= None\r\n\r\n quantity_= None\r\n\r\n buy_sell_= None\r\n\r\n price_= None\r\n\r\n\r\n\r\n def __init__(self, symbol, timestamp, quantity, buy_sell, price):\r\n\r\n self.symbol_ = symbol\r\n\r\n self.timestamp_ = timestamp\r\n\r\n self.quantity_ = quantity\r\n\r\n self.buy_sell_ = buy_sell\r\n\r\n self.price_ = price \r\n\r\n \r\n\r\ndef calculateDividendYield(stockFactor, price):\r\n\r\n #Receives a stockFactor object and a price\r\n\r\n #Returns the value of Dividend Yield \r\n\r\n if stockFactor.properties_['Type'] == \"Common\":\r\n\r\n divYield = float(stockFactor.properties_['Last Dividend'])/price\r\n\r\n else:\r\n\r\n divYield = float(stockFactor.properties_['Fixed Dividend'].strip(\"%\"))*float(stockFactor.properties_['Par Value'])/(100*price)\r\n\r\n return divYield\r\n\r\n\r\n\r\ndef calculatePERatio(stockFactor, price):\r\n\r\n #Receives a stockFactor object and a price\r\n\r\n #Returns the value of P/E Ratio \r\n\r\n if float(stockFactor.properties_['Last Dividend']) == 0:\r\n\r\n return (\"Last Dividend Is 0, P/E Ratio Cannot Be Computed\\n\")\r\n\r\n else: \r\n\r\n return price/float(stockFactor.properties_['Last Dividend'])\r\n\r\n \r\n\r\ndef calculateVWSP(tradesArray):\r\n\r\n #Receives an array with Trade objects\r\n\r\n #Returns the value of the VWSP\r\n\r\n num = den = 0 \r\n\r\n for i in range(0,len(tradesArray)):\r\n\r\n num+= tradesArray[i].quantity_*tradesArray[i].price_\r\n\r\n den+= tradesArray[i].quantity_\r\n\r\n return num/den \r\n\r\n \r\n\r\ndef calculateGBCE(volumesArray):\r\n\r\n #Receives an array\r\n\r\n #Returns the geometric mean \r\n\r\n n = len(volumesArray)\r\n\r\n p = 1\r\n\r\n for i in range(0, n):\r\n\r\n p *= volumesArray[i]\r\n\r\n return pow(p,1/n) \r\n\r\n \r\n\r\ndef loadingStocks():\r\n\r\n #Loads stock file path and calls parseLines() to save stocks to an array\r\n\r\n #Returns stockArray\r\n\r\n #Symbol's name is assumed to be 'Stock Symbol'\r\n\r\n stockArrayAux = [] \r\n\r\n StockIn=input(\"\\nPlease Select Path For Stocks CSV file \\n(Locate And Select File: './Your_Location/SSSM.csv'): \")\r\n\r\n with open(StockIn, 'r') as inputFile:\r\n\r\n inputRows = csv.DictReader(inputFile, delimiter=';')\r\n \r\n for row in inputRows:\r\n\r\n symbol=row['Stock Symbol']\r\n\r\n symbolArray.append(symbol)\r\n\r\n del row['Stock Symbol']\r\n\r\n stockArrayAux.append(stockFactor(symbol, dict(row))) \r\n\r\n print(\"Beverage Stocks Have Been Loaded Succesfully\\n\") \r\n\r\n return stockArrayAux \r\n\r\n \r\n\r\ndef extractTradesSymbol(tradesArray, stock):\r\n\r\n #Receives an array of Trade objects and a stock's symbol\r\n\r\n #Returns an array of Trades corresponding to that stock's symbol \r\n\r\n tradesAux = []\r\n\r\n for i in range(0, len(tradesArray)):\r\n\r\n if tradesArray[i].symbol_ == stock:\r\n\r\n tradesAux.append(tradesArray[i]) \r\n\r\n return tradesAux \r\n\r\n\r\n\r\ndef extractTradesTime(tradesArray, minutes):\r\n\r\n #Receives an array of Trade objects and a time in minutes\r\n\r\n #Returns an array of Trades in the last 'x' minutes \r\n\r\n tradesAux = []\r\n\r\n for i in range(0, len(tradesArray)):\r\n\r\n if (time.time() - tradesArray[i].timestamp_) <= minutes*60 :\r\n\r\n tradesAux.append(tradesArray[i]) \r\n\r\n return tradesAux \r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n\r\n \r\n\r\n #Data arrays for stocks\r\n\r\n stockArray = []\r\n\r\n symbolArray = []\r\n\r\n tradeRecordArray = [] \r\n\r\n # Main Menu for SSSM\r\n\r\n mainMenu = {'1': \"Load Beverage Stocks.\", '2': \"Select Stock To Engage.\", '3': \"Calculate (GBCE) All Share Index.\", '4': \"Exit Stock Market\"}\r\n\r\n while True:\r\n\r\n options=list(mainMenu.keys())\r\n\r\n options.sort()\r\n\r\n print(\"\\n\") \r\n\r\n for entry in options: \r\n\r\n print (entry, mainMenu[entry])\r\n\r\n selection=input(\"\\nPlease Select An Option : \")\r\n\r\n if selection =='1':\r\n\r\n #Loading Stock Information \r\n\r\n if len(stockArray) == 0: \r\n\r\n stockArray=loadingStocks() \r\n\r\n else:\r\n\r\n answer=input(\"Loading A New File Will Erase Previous Information, Are You Sure You Wish To Continue? (y/n): \")\r\n\r\n while answer not in ['y','n']:\r\n\r\n answer = input(\"Please enter only 'y' or 'n' for YES or NO: \")\r\n\r\n if answer=='y':\r\n\r\n stockArray=loadingStocks()\r\n\r\n elif selection == '2': \r\n\r\n #Select a stock to operate with \r\n\r\n if len(stockArray) == 0:\r\n\r\n print (\"\\nPlease Load Stocks FIRST To Engage With\\n\")\r\n\r\n else:\r\n\r\n selectedStock=input(\"\\nSelect A Stock To Engage On\\n(Select A Stock (Upper Case) : TEA | POP | ALE | GIN | JOE): \")\r\n\r\n if selectedStock in symbolArray:\r\n\r\n operationsMenu = {'1': \"Calculate Dividend Yield.\", '2': \"Calculate P/E Ratio.\",\r\n '3': \"Initiate A BUY/SELL Trade.\", '4': \"Calculate Volume Weighted Stock Price.\", '5': \"Go back.\"}\r\n\r\n while True:\r\n\r\n options=list(operationsMenu.keys())\r\n\r\n options.sort()\r\n\r\n print(\"\\n\")\r\n\r\n for entry in options: \r\n\r\n print (entry, operationsMenu[entry])\r\n\r\n selection=input(\"\\nPlease Select A Function From The Menu: \")\r\n\r\n if selection == '1':\r\n\r\n #Calculating dividend yield\r\n\r\n for i in range(0,len(stockArray)):\r\n\r\n if selectedStock == stockArray[i].symbol_:\r\n\r\n stockFactorAux = stockArray[i]\r\n\r\n break\r\n\r\n while True:\r\n\r\n try:\r\n\r\n price = float(input(\"Enter Price For The Dividend Yield Calculation: \")) \r\n\r\n except ValueError:\r\n\r\n print(\"Please Verify Quantity You Are Entering Is A Float \")\r\n\r\n continue\r\n\r\n else:\r\n\r\n if price <= 0:\r\n\r\n print(\"Please Enter A Positive Price\") \r\n\r\n continue\r\n\r\n else:\r\n\r\n break \r\n\r\n print(\"Result Is: \",calculateDividendYield(stockFactorAux, float(price)))\r\n\r\n elif selection == '2':\r\n\r\n #Calculating P/E Ratio = Price Earnings Ratio\r\n\r\n for i in range(0,len(stockArray)):\r\n\r\n if selectedStock == stockArray[i].symbol_:\r\n\r\n stockFactorAux = stockArray[i]\r\n\r\n break\r\n\r\n while True:\r\n\r\n try:\r\n\r\n price = float(input(\"Enter Price For P/E Ratio Calculation: \")) \r\n\r\n except ValueError:\r\n\r\n print(\"Please Verify Quantity You Are Entering Is A Float \")\r\n\r\n continue\r\n\r\n else:\r\n\r\n if price <= 0:\r\n\r\n print(\"Please Enter A Positive Price\") \r\n\r\n continue\r\n\r\n else:\r\n\r\n break \r\n\r\n print(\"Result Is: \",calculatePERatio(stockFactorAux, float(price)))\r\n\r\n elif selection == '3':\r\n\r\n #Recording and confirming a BUY or SELL of a trade\r\n\r\n buy_sell = input(\"Select 'b' for BUY or 's' for SELL: \")\r\n\r\n while buy_sell not in ['b','s']:\r\n\r\n buy_sell = input(\"Please Enter Only 'b' or 's' For BUY or SELL: \") \r\n\r\n while True:\r\n\r\n try:\r\n\r\n quantity = int(input(\"Enter The Quantity To BUY Or SELL: \")) \r\n\r\n except ValueError:\r\n\r\n print(\"Please Verify The Quantity You Are Entering Is An Integer \")\r\n\r\n continue\r\n\r\n else:\r\n\r\n if quantity <= 0:\r\n\r\n print(\"Please Verify The Quantity You Are Entering Is Positive\")\r\n\r\n continue\r\n\r\n else:\r\n\r\n break\r\n\r\n while True:\r\n\r\n try:\r\n\r\n price = float(input(\"Enter Price Of The Operation: \")) \r\n\r\n except ValueError:\r\n\r\n print(\"Please Verify The Quantity You Are Entering Is A Float \")\r\n\r\n continue\r\n\r\n else:\r\n\r\n if price <= 0:\r\n\r\n print(\"Please Enter A Positive Price\") \r\n\r\n continue\r\n\r\n else:\r\n\r\n break \r\n\r\n tradeRecordArray.append(Trade(selectedStock, time.time(), quantity, buy_sell, price))\r\n\r\n tradeAux = Trade(selectedStock, time.time(), quantity, buy_sell, price) \r\n\r\n print(\"Your BUY/SELL Trade Has Been Recorded Succesfully\\n\")\r\n\r\n elif selection == '4': \r\n\r\n #Calculating VWSP = Volume Weighted Stock Price\r\n\r\n tradesAux = extractTradesSymbol(tradeRecordArray, selectedStock)\r\n\r\n if len(tradesAux) == 0:\r\n\r\n print(\"Please Record Trades For This Stock Before Trying To Obtain VWSP\")\r\n\r\n else:\r\n\r\n tradesAux = extractTradesTime(tradesAux,DEFAULT_TIME_MINUTES)\r\n\r\n if len(tradesAux) == 0:\r\n\r\n print(\"There Are No Records In Under 5 Minutes For This Stock\")\r\n\r\n else:\r\n\r\n print(\"VWSP is: \", calculateVWSP(tradesAux)) \r\n\r\n elif selection == '5':\r\n\r\n break \r\n\r\n else:\r\n\r\n print(\"Error, Unknown Option Selected!\") \r\n\r\n else:\r\n\r\n print(\"Entered Stock Is Not An Option. Please Try Again\") \r\n\r\n elif selection == '3':\r\n\r\n #Calculate GBCE\r\n\r\n if len(stockArray) == 0:\r\n\r\n print (\"\\nFirst You Must Load Stocks To Operate With\\n\")\r\n\r\n else:\r\n\r\n if len(tradeRecordArray) == 0:\r\n\r\n print(\"\\nFirst You Must Initiate BUY/SELL Trades\\n\")\r\n\r\n else: \r\n\r\n volumeWSP = [] \r\n\r\n for i in range(0, len(stockArray)):\r\n\r\n tradesAux = extractTradesSymbol(tradeRecordArray, stockArray[i].symbol_)\r\n\r\n if len(tradesAux) > 0:\r\n\r\n volumeWSP.append(calculateVWSP(tradesAux)) \r\n\r\n print(\"(GBCE) All Share Index Is: \", calculateGBCE(volumeWSP))\r\n\r\n elif selection == '4': \r\n\r\n #End program \r\n\r\n print(\"Stock Market Program Has Ended\")\r\n\r\n break\r\n\r\n else: \r\n\r\n print (\"Error, Unknown Option Selected! - Please Try Again!\")\r\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":10970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"199788277","text":"import sys\n\nfrom django.db import models\nfrom django.utils.text import slugify\nfrom django.utils.translation import ugettext_lazy as _\nfrom mptt.models import MPTTModel, TreeForeignKey\n\nfrom core.models import BaseModel\n\n\nclass Category(BaseModel, MPTTModel):\n \"\"\"\n Category Model\n \"\"\"\n channel = models.ForeignKey(\n 'channels.Channel',\n verbose_name=_('Channel'),\n related_name='categories',\n on_delete=models.CASCADE\n )\n\n parent = TreeForeignKey(\n 'self',\n verbose_name=_('Parent'),\n related_name='children',\n on_delete=models.CASCADE,\n db_index=True,\n null=True,\n blank=True\n )\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n\n if self.parent:\n self.slug = f'{self.parent.slug}-{self.slug}'\n\n super(Category, self).save()\n\n @staticmethod\n def import_categories(channel, categories_list):\n parent = None\n count = 0\n\n for category in categories_list:\n parent, created = Category.objects.get_or_create(\n name=category,\n channel=channel,\n parent=parent\n )\n if created:\n sys.stdout.write(f'{category} created!\\n')\n count += 1\n\n return count\n\n class MPTTMeta:\n order_insertion_by = ['name']\n","sub_path":"work-at-olist/categories/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"597753785","text":"import tkinter as tk\n\nclass Pages():\n\t\"\"\"mother class of pages\"\"\"\n\n\tbg_color = \"#AECADD\"\n\tgrey_color = \"#737373\"\n\tmatte_blue = \"#4289A5\"\n\tblack_color = \"#252525\"\n\tpink_color = \"#EFCFC9\"\n\tactive_server = False\n\n\tdef __init__(self, master):\n\t\tself.master = master\n\t\tself.master.title(\"Skyper Bitches\")\n\t\tself.master.geometry(\"400x400\")\n\t\tself.master.configure(background=self.bg_color)\n\n\n\tdef new_window(self):\n\t\t# self.master = master\n\t\tself.newWindow = tk.Toplevel(self.master)\n\t\tself.app = SkypeWindow(self.newWindow)\n\t\tprint(self.__class__)\n\n\nclass SkypeWindow(Pages):\n\tdef __init__(self, master):\n\t\tpass\n\n\nclass LoginPage(Pages):\n\t\"\"\"initial page seen when you launch the interface\"\"\"\n\n\tdef __init__(self, *args, **kwargs):\n\t\t# Pages.__init__(self, *args, **kwargs)\n\t\tPages.__init__(self, *args, **kwargs)\n\t\tself.master.configure(background=\"#000000\")\n\t\tself.frame = tk.Frame(self.master)\n\n\t\t# self.label = tk.Label(self.frame, text=\"This is my first program thing\")\n\t\t# self.label.pack()\n\t\t# # self.label.grid(row=2, column=2, columnspan=2, sticky=W+E)\n\t\t#\n\t\t# self.username_label = tk.Label(self.frame, text=\"Username\", fg=\"#5B6B77\", bg=self.bg_color)\n\t\t# self.username_field = tk.Entry(self.frame)\n\t\t# self.username_label.pack()\n\t\t# self.username_field.pack()\n\n\t\tself.hello = tk.Button(self.frame, text=\"Connect To Chat\", command=self.new_window(), fg=self.bg_color,\n\t\t\t\t\t\t\t bg=self.black_color)\n\t\tself.hello.pack()\n\t\tself.frame.pack()\n\t\tprint('shit')\n\n\t\tself.button1 = tk.Button(self.frame, text='New Window', width=25, command=Pages.new_window(self.master))\n\t\tself.button1.pack()\n\t\tself.frame.pack()\n\n\tdef new_window(self):\n\t\t# self.newWindow =\n\t\t# tk.Toplevel(self.master)\n\t\tprint(self.__class__)\n\n\n\ndef main():\n\troot = tk.Tk()\n\tapp = LoginPage(root)\n\troot.mainloop()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"attempted_classes.py","file_name":"attempted_classes.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"75084956","text":"\"\"\"This tutorial introduces restricted boltzmann machines (RBM) using Theano.\n\nBoltzmann Machines (BMs) are a particular form of energy-based model which\ncontain hidden variables. Restricted Boltzmann Machines further restrict BMs\nto those without visible-visible and hidden-hidden connections.\n\"\"\"\n\nimport numpy\nimport theano\nimport theano.tensor as T\nfrom bpfe.dl_dbn.constants import DTYPES\n\nfrom theano.tensor.shared_randomstreams import RandomStreams\nfrom theano.tensor.sharedvar import SharedVariable\n\n\n# noinspection PyCallingNonCallable\nfrom bpfe.dl_dbn.hidden_layer import HiddenLayer\n\n\nclass RBM(object):\n \"\"\"Restricted Boltzmann Machine (RBM) \"\"\"\n def __init__(\n self,\n input_vector=None,\n n_in=784,\n n_hidden=500,\n hidden_layer=None,\n vbias=None,\n lmbda=0.1,\n momentum=0.9,\n weight_decay_cost=0.001,\n numpy_seed=None\n ):\n \"\"\"\n RBM constructor. Defines the parameters of the model along with\n basic operations for inferring hidden from visible (and vice-versa),\n as well as for performing CD updates.\n\n :param input_vector: None for standalone RBMs or symbolic variable if\n RBM is\n part of a larger graph.\n\n :param n_in: number of visible units\n\n :param n_hidden: number of hidden units\n\n :param W: None for standalone RBMs or symbolic variable pointing to a\n shared weight matrix in case RBM is part of a DBN network; in a DBN,\n the weights are shared between RBMs and layers of a MLP\n\n :param hbias: None for standalone RBMs or symbolic variable pointing\n to a shared hidden units bias vector in case RBM is part of a\n different network\n\n :param vbias: None for standalone RBMs or a symbolic variable\n pointing to a shared visible units bias\n \"\"\"\n\n self.n_visible = n_in\n self.n_hidden = n_hidden\n self.lmbda = lmbda\n self.weight_decay_cost = weight_decay_cost\n\n numpy_rng = numpy.random.RandomState(numpy_seed)\n theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))\n\n self.theano_rng = theano_rng\n self.monitoring_cost = None\n self.updates = None\n\n if hidden_layer is None:\n # W is initialized with `initial_W` which is uniformely\n # sampled from -4*sqrt(6./(n_visible+n_hidden)) and\n # 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if\n # converted using asarray to dtype DTYPES.FLOATX so\n # that the code is runable on GPU\n initial_W = numpy.asarray(\n numpy_rng.uniform(\n # TODO figure out initial weights\n # low=-.01,\n # high=.01,\n # low=-1./numpy.sqrt(n_in),\n # high=1./numpy.sqrt(n_in),\n low=-4 * numpy.sqrt(6. / (n_hidden + n_in)),\n high=4 * numpy.sqrt(6. / (n_hidden + n_in)),\n size=(n_in, n_hidden)\n ),\n dtype=DTYPES.FLOATX\n )\n # theano shared variables for weights and biases\n W = theano.shared(value=initial_W, name='W', borrow=True)\n\n # create shared variable for hidden units bias\n hbias = theano.shared(\n value=numpy.zeros(\n n_hidden,\n dtype=DTYPES.FLOATX\n ),\n name='hbias',\n borrow=True\n )\n self.hidden_layer = HiddenLayer(\n numpy_rng, None, None, None, W=W, b=hbias)\n else:\n self.hidden_layer = hidden_layer\n\n # track momentums\n self.Ms = []\n\n # track mean squareds\n self.MSs = []\n\n self.momentum = momentum\n\n if vbias is None:\n # create shared variable for visible units bias\n vbias = theano.shared(\n value=numpy.zeros(\n n_in,\n dtype=DTYPES.FLOATX\n ),\n name='vbias',\n borrow=True\n )\n\n # initialize input layer for standalone RBM or layer0 of DBN\n self.input = input_vector\n if not input_vector:\n self.input = T.matrix('input')\n\n self.vbias = vbias\n # **** WARNING: It is not a good idea to put things in this list\n # other than shared variables created in this function.\n self.params = [self.hidden_layer.W, self.hidden_layer.b, self.vbias]\n\n def free_energy(self, v_sample):\n \"\"\" Function to compute the free energy \"\"\"\n wx_b = T.dot(v_sample, self.hidden_layer.W) + self.hidden_layer.b\n vbias_term = T.dot(v_sample, self.vbias)\n hidden_term = T.sum(T.log(1 + T.exp(wx_b)), axis=1)\n return -hidden_term - vbias_term\n\n def propup(self, vis):\n \"\"\"\n This function propagates the visible units activation upwards to\n the hidden units\n\n Note that we return also the pre-sigmoid activation of the\n layer. As it will turn out later, due to how Theano deals with\n optimizations, this symbolic variable will be needed to write\n down a more stable computational graph (see details in the\n reconstruction cost function)\n \"\"\"\n pre_sigmoid_activation = \\\n T.dot(vis, self.hidden_layer.W) + self.hidden_layer.b\n return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]\n\n def sample_h_given_v(self, v0_sample):\n \"\"\" This function infers state of hidden units given visible units \"\"\"\n\n # compute the activation of the hidden units given a sample of\n # the visibles\n pre_sigmoid_h1, h1_mean = self.propup(v0_sample)\n\n # get a sample of the hiddens given their activation\n # Note that theano_rng.binomial returns a symbolic sample of dtype\n # int64 by default. If we want to keep our computations in floatX\n # for the GPU we need to specify to return the dtype floatX\n h1_sample = self.theano_rng.binomial(\n size=h1_mean.shape,\n n=1,\n p=h1_mean,\n dtype=DTYPES.FLOATX\n )\n return [pre_sigmoid_h1, h1_mean, h1_sample]\n\n def propdown(self, hid):\n \"\"\"\n This function propagates the hidden units activation downwards to\n the visible units\n\n Note that we return also the pre_sigmoid_activation of the\n layer. As it will turn out later, due to how Theano deals with\n optimizations, this symbolic variable will be needed to write\n down a more stable computational graph (see details in the\n reconstruction cost function)\n \"\"\"\n pre_sigmoid_activation = \\\n T.dot(hid, self.hidden_layer.W.T) + self.vbias\n return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]\n\n def sample_v_given_h(self, h0_sample):\n \"\"\" This function infers state of visible units given hidden units \"\"\"\n\n # compute the activation of the visible given the hidden sample\n pre_sigmoid_v1, v1_mean = self.propdown(h0_sample)\n\n # get a sample of the visible given their activation\n # Note that theano_rng.binomial returns a symbolic sample of dtype\n # int64 by default. If we want to keep our computations in floatX\n # for the GPU we need to specify to return the dtype floatX\n v1_sample = self.theano_rng.binomial(\n size=v1_mean.shape,\n n=1,\n p=v1_mean,\n dtype=DTYPES.FLOATX\n )\n return [pre_sigmoid_v1, v1_mean, v1_sample]\n\n def gibbs_hvh(self, h0_sample):\n \"\"\"\n This function implements one step of Gibbs sampling, starting from the\n hidden state\n \"\"\"\n pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample)\n pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample)\n return [pre_sigmoid_v1, v1_mean, v1_sample,\n pre_sigmoid_h1, h1_mean, h1_sample]\n\n def gibbs_vhv(self, v0_sample):\n \"\"\"\n This function implements one step of Gibbs sampling, starting from the\n visible state\n \"\"\"\n pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v0_sample)\n pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h1_sample)\n return [pre_sigmoid_h1, h1_mean, h1_sample,\n pre_sigmoid_v1, v1_mean, v1_sample]\n\n def get_cost_updates(self, lr=0.1, k=1):\n \"\"\"\n This functions implements one step of CD-k or PCD-k\n\n :param lr: learning rate used to train the RBM\n :param k: number of Gibbs steps to do in CD-k/PCD-k\n\n Returns a proxy for the cost and the updates dictionary. The\n dictionary contains the update rules for weights and biases but\n also an update of the shared variable used to store the persistent\n chain, if one is used.\n \"\"\"\n\n # compute positive phase\n pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)\n\n # decide how to initialize persistent chain:\n # for CD, we use the newly generate hidden sample\n chain_start = ph_sample\n\n # perform actual negative phase\n # in order to implement CD-k/PCD-k we need to scan over the\n # function that implements one gibbs step k times.\n # Read Theano tutorial on scan for more information :\n # http://deeplearning.net/software/theano/library/scan.html\n # the scan will return the entire Gibbs chain\n\n # noinspection PyCallingNonCallable, PyUnusedLocal\n (\n [\n pre_sigmoid_nvs,\n nv_means,\n nv_samples,\n pre_sigmoid_nhs,\n nh_means,\n nh_samples\n ],\n updates\n ) = theano.scan(\n self.gibbs_hvh,\n # the None are place holders, saying that\n # chain_start is the initial state corresponding to the\n # 6th output\n outputs_info=[None, None, None, None, None, chain_start],\n n_steps=k\n )\n # determine gradients on RBM parameters\n # note that we only need the sample at the end of the chain\n chain_end = nv_samples[-1]\n\n cost = \\\n T.mean(self.free_energy(self.input)) - \\\n T.mean(self.free_energy(chain_end))\n\n regularization = \\\n (self.lmbda / 2.) * \\\n (T.mean(T.sum(T.sqr(self.hidden_layer.W), axis=1)))\n\n cost += regularization\n\n learning_rate = T.cast(0.01, dtype=DTYPES.FLOATX)\n\n # noinspection PyUnusedLocal\n def regulared(pidx, param):\n # We must not compute the gradient through the gibbs sampling\n grad = T.grad(cost, param, consider_constant=[chain_end])\n current_cost = learning_rate * grad\n updates[param] = param - current_cost\n\n def momentumed(pidx, param):\n if len(self.Ms) < pidx + 1:\n # initialize momentum for this element to zeros\n self.Ms.append(theano.shared(\n param.get_value() * 0.,\n broadcastable=param.broadcastable\n ))\n # self.Ms.append(theano.shared(0.))\n\n M_update = self.Ms[pidx]\n\n momentum = T.cast(self.momentum, dtype=DTYPES.FLOATX)\n mom = momentum * M_update\n\n grad = T.grad(\n cost,\n param,\n consider_constant=[chain_end]\n )\n\n v_prime = mom - learning_rate * grad\n w_prime = param + v_prime\n updates[M_update] = v_prime\n updates[param] = w_prime\n\n # noinspection PyUnusedLocal\n def rmsproped(pidx, param):\n # NOTE : matrix of learning rates\n if len(self.MSs) < pidx + 1:\n self.MSs.append(theano.shared(\n (param.get_value() * 0.) + 1,\n broadcastable=param.broadcastable\n ))\n\n # # NOTE: single scalar\n # if len(self.MSs) < pidx + 1:\n # self.MSs.append(theano.shared(1.))\n\n MS_update = self.MSs[pidx]\n\n grad = T.grad(cost, param, consider_constant=[chain_end])\n\n decay = T.cast(.9, dtype=DTYPES.FLOATX)\n one_minus_decay = T.cast(.1, dtype=DTYPES.FLOATX)\n\n # NOTE: matrix of learning weights\n current_rmsprop = \\\n (decay * MS_update) + \\\n (one_minus_decay * T.sqrt(T.sum(T.sqr(grad))))\n\n # # NOTE: single scalar\n # current_rmsprop = \\\n # (decay * MS_update) + \\\n # (one_minus_decay * T.mean(T.sqr(grad)))\n\n updates[MS_update] = current_rmsprop\n\n learning_rate = .1 / current_rmsprop\n # learning_rate = T.sqrt(\n # T.cast(current_rmsprop, dtype=DTYPES.FLOATX)\n # )\n # if True:\n # learning_rate = theano.printing.Print('lr')(learning_rate)\n\n updates[param] = param - (learning_rate * grad)\n # updates[param] = param - (grad / learning_rate)\n\n # constructs the update dictionary\n for pidx, param in enumerate(self.params):\n # regulared(pidx, param)\n momentumed(pidx, param)\n # rmsproped(pidx, param)\n\n # reconstruction cross-entropy is a better proxy for CD\n monitoring_cost = self.get_reconstruction_cost(\n pre_sigmoid_nvs[-1]\n )\n\n self.monitoring_cost = monitoring_cost\n self.updates = updates\n self.learning_rate = lr\n\n def get_reconstruction_cost(self, pre_sigmoid_nv):\n \"\"\"Approximation to the reconstruction error\n\n Note that this function requires the pre-sigmoid activation as\n input. To understand why this is so you need to understand a\n bit about how Theano works. Whenever you compile a Theano\n function, the computational graph that you pass as input gets\n optimized for speed and stability. This is done by changing\n several parts of the subgraphs with others. One such\n optimization expresses terms of the form log(sigmoid(x)) in\n terms of softplus. We need this optimization for the\n cross-entropy since sigmoid of numbers larger than 30. (or\n even less then that) turn to 1. and numbers smaller than\n -30. turn to 0 which in terms will force theano to compute\n log(0) and therefore we will get either -inf or NaN as\n cost. If the value is expressed in terms of softplus we do not\n get this undesirable behaviour. This optimization usually\n works fine, but here we have a special case. The sigmoid is\n applied inside the scan op, while the log is\n outside. Therefore Theano will only see log(scan(..)) instead\n of log(sigmoid(..)) and will not apply the wanted\n optimization. We can not go and replace the sigmoid in scan\n with something else also, because this only needs to be done\n on the last step. Therefore the easiest and more efficient way\n is to get also the pre-sigmoid activation as an output of\n scan, and apply both the log and sigmoid outside scan such\n that Theano can catch and optimize the expression.\n\n \"\"\"\n y = self.input\n a = T.nnet.sigmoid(pre_sigmoid_nv)\n\n cross_entropy = -T.mean(\n T.sum(y * T.log(a) + (1 - y) * T.log(1 - a), axis=1)\n )\n\n return cross_entropy\n","sub_path":"bpfe/dl_dbn/rbm.py","file_name":"rbm.py","file_ext":"py","file_size_in_byte":15691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"484905634","text":"\"\"\"\r\nThe :mod:`scadnano` Python module is a library for describing synthetic DNA nanostructures\r\n(e.g., DNA origami).\r\nInstallation instructions are at the\r\n`GitHub repository `_.\r\n\r\nThe scadnano project is developed and maintained by the UC Davis Molecular Computing group.\r\nNote that `cadnano `_ is a separate project,\r\ndeveloped and maintained by the `Douglas lab `_ at UCSF.\r\n\r\nThis module is used to write Python scripts creating files readable\r\nby `scadnano `_, a web application useful for displaying\r\nand manually editing synthetic DNA nanostructures.\r\nThe purpose of this module is to help automate some of the task of creating DNA designs,\r\nas well as making large-scale changes to them that are easier to describe programmatically than\r\nto do by hand in scadnano.\r\n\r\nIf you find scadnano useful in a scientific project, please cite its associated paper:\r\n\r\n | scadnano: A browser-based, scriptable tool for designing DNA nanostructures.\r\n | David Doty, Benjamin L Lee, and Tristan Stérin.\r\n | DNA 2020: *Proceedings of the 26th International Conference on DNA Computing and Molecular Programming*\r\n | [ `paper `_ | `BibTeX `_ ]\r\n\r\nThis document describes the API for the scadnano Python package,\r\nsee the `repository `_\r\nfor additional documentation, such as installation instructions.\r\nThere is separate documentation for the\r\n`scadnano web interface `_.\r\n\r\nThis library uses typing hints from the Python typing library.\r\n(https://docs.python.org/3/library/typing.html)\r\nEach function and method indicate intended types of the parameters.\r\nHowever, due to Python's design, these types are not enforced at runtime.\r\nIt is suggested to use a static analysis tool such as that provided by an IDE such as PyCharm\r\n(https://www.jetbrains.com/pycharm/)\r\nto see warnings when the typing rules are violated. \r\nSuch warnings probably indicate an erroneous usage.\r\n\r\nMost of the classes in this module are Python dataclasses\r\n(https://docs.python.org/3/library/dataclasses.html)\r\nwhose fields show up in the documentation.\r\nTheir types are listed in parentheses after the name of the class;\r\nfor example :any:`Color` has ``int`` fields :py:data:`Color.r`, :py:data:`Color.g`, :py:data:`Color.b`.\r\nIn general it is safe to read these fields directly, but not to write to them directly.\r\nSetter methods (named ``set_``) are provided for fields where it makes sense to set it to another\r\nvalue than it had originally.\r\nHowever, due to Python naming conventions for dataclass fields and property setters,\r\nit is not straightforward to enforce that the fields cannot be written, \r\nso the user must take care not to set them.\r\n\"\"\"\r\n\r\n# needed to use forward annotations: https://docs.python.org/3/whatsnew/3.7.html#whatsnew37-pep563\r\n# commented out for now to support Py3.6, which does not support this feature\r\n# from __future__ import annotations\r\n\r\n__version__ = \"0.15.1\" # version line; WARNING: do not remove or change this line or comment\r\n\r\nimport dataclasses\r\nfrom abc import abstractmethod, ABC, ABCMeta\r\nimport json\r\nimport enum\r\nimport itertools\r\nimport re\r\nfrom builtins import ValueError\r\nfrom dataclasses import dataclass, field, InitVar, replace\r\nfrom typing import Tuple, List, Iterable, Set, Dict, Union, Optional, FrozenSet, Type, cast, Any, \\\r\n TypeVar, Generic, Callable\r\nfrom collections import defaultdict, OrderedDict, Counter\r\nimport sys\r\nimport os.path\r\n\r\ndefault_scadnano_file_extension = 'sc'\r\n\r\nVStrands = Dict[int, Dict[str, Any]]\r\n\r\n\r\ndef _pairwise(iterable: Iterable) -> Iterable:\r\n \"\"\"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\"\"\r\n a, b = itertools.tee(iterable)\r\n next(b, None)\r\n return zip(a, b)\r\n\r\n\r\n# for putting evaluated expressions in docstrings\r\n# https://stackoverflow.com/questions/10307696/how-to-put-a-variable-into-python-docstring\r\ndef _docstring_parameter(*sub: Any, **kwargs: Any) -> Any:\r\n def dec(obj: Any) -> Any:\r\n obj.__doc__ = obj.__doc__.format(*sub, **kwargs)\r\n return obj\r\n\r\n return dec\r\n\r\n\r\n##############################################################################\r\n# JSON serialization\r\n# There are external libraries to handle JSON\r\n# in Python, but I want this to be a simple, single-file library, so we just\r\n# implement what we need below.\r\n\r\n\r\nclass _JSONSerializable(ABC):\r\n\r\n @abstractmethod\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Any:\r\n raise NotImplementedError()\r\n\r\n\r\ndef _json_encode(obj: _JSONSerializable, suppress_indent: bool = True) -> str:\r\n encoder = _SuppressableIndentEncoder if suppress_indent else json.JSONEncoder\r\n serializable = obj.to_json_serializable(suppress_indent=suppress_indent)\r\n return json.dumps(serializable, cls=encoder, indent=2)\r\n\r\n\r\nclass NoIndent:\r\n # Value wrapper. Placing a value in this will stop it from being indented when converting to JSON\r\n # using _SuppressableIndentEncoder\r\n\r\n def __init__(self, value: Any) -> None:\r\n self.value = value\r\n\r\n\r\nclass _SuppressableIndentEncoder(json.JSONEncoder):\r\n def __init__(self, *args: Any, **kwargs: Any) -> None:\r\n self.unique_id = 0\r\n super(_SuppressableIndentEncoder, self).__init__(*args, **kwargs)\r\n self.kwargs = dict(kwargs)\r\n del self.kwargs['indent']\r\n self._replacement_map: Dict[int, Any] = {}\r\n\r\n def default(self, obj: Any) -> Any:\r\n if isinstance(obj, NoIndent):\r\n # key = uuid.uuid1().hex # this caused problems with Brython.\r\n key = self.unique_id\r\n self.unique_id += 1\r\n self._replacement_map[key] = json.dumps(obj.value, **self.kwargs)\r\n return f\"@@{key}@@\"\r\n else:\r\n return super().default(obj)\r\n\r\n def encode(self, obj: Any) -> Any:\r\n result = super().encode(obj)\r\n for k, v in self._replacement_map.items():\r\n result = result.replace(f'\"@@{k}@@\"', v)\r\n return result\r\n\r\n\r\n#\r\n# END JSON serialization\r\n##############################################################################\r\n\r\n\r\n##############################################################################\r\n# Colors\r\n# As with JSON serialization, there are external libraries to handle colors\r\n# in Python, but I want this to be a simple, single-file library, so we just\r\n# implement what we need below.\r\n\r\n@dataclass\r\nclass Color(_JSONSerializable):\r\n r: Optional[int] = None\r\n \"\"\"\r\n Red component: 0-255.\r\n \r\n Optional if :py:data:`Color.hex` is given.\"\"\"\r\n\r\n g: Optional[int] = None\r\n \"\"\"Green component: 0-255.\r\n \r\n Optional if :py:data:`Color.hex` is given.\"\"\"\r\n\r\n b: Optional[int] = None\r\n \"\"\"Blue component: 0-255.\r\n \r\n Optional if :py:data:`Color.hex` is given.\"\"\"\r\n\r\n hex_string: InitVar[str] = None\r\n \"\"\"Hex color preceded by # sign, e.g., \"#ff0000\" is red.\r\n \r\n Optional if :py:data:`Color.r`, :py:data:`Color.g`, :py:data:`Color.b` are all given.\"\"\"\r\n\r\n def __post_init__(self, hex_string: str) -> None:\r\n if hex_string is None:\r\n assert (self.r is not None and self.g is not None and self.b is not None)\r\n else:\r\n assert (self.r is None and self.g is None and self.b is None)\r\n hex_string = hex_string.lstrip('#')\r\n self.r = int(hex_string[0:2], 16)\r\n self.g = int(hex_string[2:4], 16)\r\n self.b = int(hex_string[4:6], 16)\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> str:\r\n # Return object representing this Color that is JSON serializable.\r\n # return NoIndent(self.__dict__) if suppress_indent else self.__dict__\r\n return f'#{self.r:02x}{self.g:02x}{self.b:02x}'\r\n\r\n def to_cadnano_v2_int_hex(self) -> int:\r\n return int(f'{self.r:02x}{self.g:02x}{self.b:02x}', 16)\r\n\r\n @classmethod\r\n def from_cadnano_v2_int_hex(cls, hex_int: int) -> 'Color':\r\n hex_str = \"0x{:06x}\".format(hex_int)\r\n return Color(hex_string=hex_str[2:])\r\n\r\n\r\n# https://medium.com/@rjurney/kellys-22-colours-of-maximum-contrast-58edb70c90d1\r\n_kelly_colors = [ # 'F2F3F4', #almost white so it's no good\r\n '222222', 'F3C300', '875692', 'F38400', 'A1CAF1', 'BE0032', 'C2B280', '848482',\r\n '008856', 'E68FAC', '0067A5', 'F99379', '604E97', 'F6A600', 'B3446C', 'DCD300', '882D17',\r\n '8DB600', '654522', 'E25822', '2B3D26']\r\n\r\n\r\nclass ColorCycler:\r\n \"\"\"\r\n Calling ``next(color_cycler)`` on a ColorCycler named ``color_cycler``\r\n returns a the next :any:`Color` from a fixed size list,\r\n cycling after reaching the end of the list.\r\n\r\n To choose new colors, set ``color_cycler.colors`` to a new list of :any:`Color`'s.\r\n \"\"\"\r\n\r\n # These are copied from cadnano:\r\n # https://github.com/sdouglas/cadnano2/blob/master/views/styles.py#L97\r\n _colors: List[Color] = [Color(50, 184, 108),\r\n Color(204, 0, 0),\r\n Color(247, 67, 8),\r\n Color(247, 147, 30),\r\n Color(170, 170, 0),\r\n Color(87, 187, 0),\r\n Color(0, 114, 0),\r\n Color(3, 182, 162),\r\n # Color(23, 0, 222), # don't like this because it looks too much like scaffold\r\n Color(50, 0, 150), # this one is better contrast with scaffold\r\n Color(184, 5, 108),\r\n Color(51, 51, 51),\r\n Color(115, 0, 222),\r\n Color(136, 136, 136)]\r\n \"\"\"List of colors to cycle through.\"\"\"\r\n\r\n # _colors = [Color(hex_string=kelly_color) for kelly_color in _kelly_colors]\r\n # \"\"\"List of colors to cycle through.\"\"\"\r\n\r\n def __init__(self) -> None:\r\n self._current_color_idx = 0\r\n # random order\r\n order = [3, 11, 0, 12, 8, 1, 10, 6, 5, 9, 4, 7, 2]\r\n # order = range(len(self._colors))\r\n colors_shuffled: List[Color] = list(self._colors)\r\n for i, color in zip(order, self._colors):\r\n colors_shuffled[i] = color\r\n self._colors: List[Color] = colors_shuffled\r\n\r\n def __iter__(self) -> 'ColorCycler':\r\n # need to make ColorCycler an iterator\r\n return self\r\n\r\n def __next__(self) -> Color:\r\n color = self.current_color()\r\n self._current_color_idx = (self._current_color_idx + 1) % len(self._colors)\r\n return color\r\n\r\n def current_color(self) -> Color:\r\n return self._colors[self._current_color_idx]\r\n\r\n def __hash__(self) -> int:\r\n return hash(self.current_color())\r\n\r\n def __eq__(self, other: Any) -> bool:\r\n if not isinstance(other, ColorCycler):\r\n return False\r\n return self._current_color_idx == other._current_color_idx\r\n\r\n def __str__(self) -> str:\r\n return repr(self)\r\n\r\n def __repr__(self) -> str:\r\n return f'ColorCycler({self.current_color()})'\r\n\r\n @property\r\n def colors(self) -> List[Color]:\r\n \"\"\"The colors that are cycled through when calling ``next()`` on some :any:`ColorCycler`.\"\"\"\r\n return list(self._colors)\r\n\r\n @colors.setter\r\n def colors(self, newcolors: Iterable[Color]) -> None:\r\n self._colors = list(newcolors)\r\n self._current_color_idx = 0\r\n\r\n\r\ndefault_scaffold_color = Color(0, 102, 204)\r\n\"\"\"Default color for scaffold strand(s).\"\"\"\r\n\r\ndefault_strand_color = Color(0, 0, 0)\r\n\"\"\"Default color for non-scaffold strand(s).\"\"\"\r\n\r\ndefault_cadnano_strand_color = Color(hex_string='#BFBFBF')\r\n\r\n\r\n#\r\n# END Colors\r\n##############################################################################\r\n\r\n\r\n@enum.unique\r\nclass Grid(str, enum.Enum):\r\n \"\"\"\r\n Represents default patterns for laying out helices in the side view.\r\n Each :any:`Grid` except :py:data:`Grid.none` has an interpretation of a \"grid position\",\r\n which is a 2D integer coordinate (`h`, `v`).\r\n (scadnano also allows a 3rd coordinate (`h`, `v`, `b`) specifying a \"base offset\" at which to position\r\n the start of the :any:`Helix`, which is not relevant for the side view but will eventually be\r\n supported to adjust the main view.)\r\n \"\"\"\r\n\r\n square = \"square\"\r\n \"\"\"\r\n Square lattice. \r\n Increasing `h` moves right and increasing `v` moves down. \r\n (i.e., \"computer screen coordinates\" rather than Cartesian coordinates where positive `y` is up.)\r\n \"\"\"\r\n\r\n hex = \"hex\"\r\n \"\"\"\r\n Hexagonal lattice. Uses the *\"odd-q horizontal layout\"* coordinate system described here: \r\n https://www.redblobgames.com/grids/hexagons/. \r\n Incrementing `v` moves down.\r\n Incrementing `h` moves down and to the right if `h` is even, \r\n and moves up and to the right if `h` is odd.\r\n \"\"\"\r\n\r\n honeycomb = \"honeycomb\"\r\n \"\"\"\r\n Honeycomb lattice. This consists of all the hex lattice positions except where \r\n honeycomb lattice disallows grid positions (`h`, `v`) with \r\n `v` even and `h` a multiple of 3 or\r\n `v` odd and `h` = 1 + a multiple of 3. \r\n \r\n However, we use the same convention as cadnano for encoding hex coordinates see `misc/cadnano-format-specs/v2.txt`.\r\n That convention is different from simply excluding coordinates from the hex lattice.\r\n\r\n \"\"\"\r\n\r\n none = \"none\"\r\n \"\"\"No fixed grid.\"\"\"\r\n\r\n\r\n# convenience names for users\r\nsquare = Grid.square\r\nhexagonal = Grid.hex # should not use identifier \"hex\" because that's a Python built-in function\r\nhoneycomb = Grid.honeycomb\r\n\r\n##########################################################################\r\n# constants\r\n\r\ndefault_idt_scale = \"25nm\"\r\ndefault_idt_purification = \"STD\"\r\n\r\n\r\ndef default_major_tick_distance(grid: Grid) -> int:\r\n return 7 if grid in (Grid.hex, Grid.honeycomb) else 8\r\n\r\n\r\ndefault_pitch: float = 0.0\r\ndefault_roll: float = 0.0\r\ndefault_yaw: float = 0.0\r\n\r\ndefault_group_name = 'default_group'\r\n\r\n# XXX: code below related to SVG positions is not currently needed in the scripting library,\r\n# but I want to make sure these conventions are documented somewhere, so they are just commented out for now.\r\n#\r\n# base_width_svg: float = 10.0\r\n# \"\"\"Width of a single base in the SVG main view of scadnano.\"\"\"\r\n#\r\n# base_height_svg: float = 10.0\r\n# \"\"\"Height of a single base in the SVG main view of scadnano.\"\"\"\r\n#\r\n# distance_between_helices_nm: float = 2.5\r\n# \"\"\"Distance between centers of helices in nanometers.\r\n# See :py:data:`distance_between_helices_svg` for explanation of this value.\"\"\"\r\n#\r\n# base_width_nm: float = 0.34\r\n# \"\"\"Width of a single DNA base in nanometers.\"\"\"\r\n#\r\n# distance_between_helices_svg: float = base_width_svg * distance_between_helices_nm / base_width_nm\r\n# \"\"\"Distance between tops of two consecutive helices (using default positioning rules).\r\n#\r\n# This is set to (:const:`base_width_svg` * 2.5/0.34) based on the following calculation,\r\n# to attempt to make the DNA structure appear to scale in 2D drawings:\r\n# The width of one base pair of double-stranded DNA bp is 0.34 nm. In a DNA origami,\r\n# AFM images let us estimate that the average distance between adjacent double helices is 2.5 nm.\r\n# (A DNA double-helix is only 2 nm wide, but the helices electrostatically repel each other so the spacing\r\n# in a DNA origami or an other DNA nanostructure with many parallel DNA helices---e.g., single-stranded tile\r\n# lattices---is larger than 2 nm.)\r\n# Thus the distance between the helices is 2.5/0.34 ~ 7.5 times the width of a single DNA base.\r\n# \"\"\"\r\n\r\nDNA_base_wildcard: str = '?'\r\n\"\"\"Symbol to insert when a DNA sequence has been assigned to a strand through complementarity, but\r\nsome regions of the strand are not bound to the strand that was just assigned. Also used in case the\r\nDNA sequence assigned to a strand is too short; the sequence is padded with :any:`DNA_base_wildcard` to \r\nmake its length the same as the length of the strand.\"\"\"\r\n\r\n\r\ndef _rotate_string(string: str, rotation: int) -> str:\r\n rotation = rotation % len(string)\r\n return string[rotation:] + string[:rotation]\r\n\r\n\r\nclass M13Variant(enum.Enum):\r\n \"\"\"Variants of M13mp18 viral genome. \"Standard\" variant is p7249. Other variants are longer.\"\"\"\r\n\r\n p7249 = \"p7249\"\r\n \"\"\"\"Standard\" variant of M13mp18; 7249 bases long, available from, for example\r\n \r\n https://www.tilibit.com/collections/scaffold-dna/products/single-stranded-scaffold-dna-type-p7249\r\n \r\n https://www.neb.com/products/n4040-m13mp18-single-stranded-dna\r\n \r\n http://www.bayoubiolabs.com/biochemicat/vectors/pUCM13/\r\n \"\"\"\r\n\r\n p7560 = \"p7560\"\r\n \"\"\"Variant of M13mp18 that is 7560 bases long. Available from, for example\r\n \r\n https://www.tilibit.com/collections/scaffold-dna/products/single-stranded-scaffold-dna-type-p7560\r\n \"\"\"\r\n\r\n p8064 = \"p8064\"\r\n \"\"\"Variant of M13mp18 that is 8064 bases long. Available from, for example\r\n \r\n https://www.tilibit.com/collections/scaffold-dna/products/single-stranded-scaffold-dna-type-p8064\r\n \"\"\"\r\n\r\n\r\ndef m13(rotation: int = 5587, variant: M13Variant = M13Variant.p7249) -> str:\r\n \"\"\"\r\n The M13mp18 DNA sequence (commonly called simply M13).\r\n \r\n By default, starts from cyclic rotation 5587 \r\n (with 0-based indexing; commonly this is called rotation 5588, which assumes that indexing begins at 1), \r\n as defined in\r\n `GenBank `_.\r\n \r\n By default, returns the \"standard\" variant of consisting of 7249 bases, sold by companies such as \r\n `Tilibit `_.\r\n and\r\n `New England Biolabs `_\r\n \r\n The actual M13 DNA strand itself is circular, \r\n so assigning this sequence to the scaffold :any:`Strand` in a :any:`Design`\r\n means that the \"5' end\" of the scaffold :any:`Strand` \r\n (which is a fiction since the actual circular DNA strand has no endpoint) \r\n will have the sequence starting at position 5587 starting at the displayed 5' in scadnano,\r\n assigned until the displayed 3' end. \r\n Assuming the displayed scaffold :any:`Strand` has length :math:`n < 7249`, then a loopout of length \r\n :math:`7249 - n` consisting of the undisplayed bases will be present in the actual DNA structure.\r\n For a more detailed discussion of why this particular rotation of M13 is chosen,\r\n see \r\n `Supplementary Note S8 `_ \r\n in\r\n [`Folding DNA to create nanoscale shapes and patterns. Paul W. K. Rothemund, Nature 440:297-302 (2006) `_].\r\n \r\n :param rotation: rotation of circular strand. Valid values are 0 through length-1.\r\n :param variant: variant of M13 strand to use\r\n :return: M13 strand sequence\r\n \"\"\" # noqa (suppress PEP warning)\r\n seq = _m13_variants[variant]\r\n return _rotate_string(seq, rotation)\r\n\r\n\r\n_7249 = re.sub(r'\\s', '', '''\r\nAATGCTACTACTATTAGTAGAATTGATGCCACCTTTTCAGCTCGCGCCCCAAATGAAAATATAGCTAAACAGGTTATTGACCATTTGCGAAATGTATCTA\r\nATGGTCAAACTAAATCTACTCGTTCGCAGAATTGGGAATCAACTGTTATATGGAATGAAACTTCCAGACACCGTACTTTAGTTGCATATTTAAAACATGT\r\nTGAGCTACAGCATTATATTCAGCAATTAAGCTCTAAGCCATCCGCAAAAATGACCTCTTATCAAAAGGAGCAATTAAAGGTACTCTCTAATCCTGACCTG\r\nTTGGAGTTTGCTTCCGGTCTGGTTCGCTTTGAAGCTCGAATTAAAACGCGATATTTGAAGTCTTTCGGGCTTCCTCTTAATCTTTTTGATGCAATCCGCT\r\nTTGCTTCTGACTATAATAGTCAGGGTAAAGACCTGATTTTTGATTTATGGTCATTCTCGTTTTCTGAACTGTTTAAAGCATTTGAGGGGGATTCAATGAA\r\nTATTTATGACGATTCCGCAGTATTGGACGCTATCCAGTCTAAACATTTTACTATTACCCCCTCTGGCAAAACTTCTTTTGCAAAAGCCTCTCGCTATTTT\r\nGGTTTTTATCGTCGTCTGGTAAACGAGGGTTATGATAGTGTTGCTCTTACTATGCCTCGTAATTCCTTTTGGCGTTATGTATCTGCATTAGTTGAATGTG\r\nGTATTCCTAAATCTCAACTGATGAATCTTTCTACCTGTAATAATGTTGTTCCGTTAGTTCGTTTTATTAACGTAGATTTTTCTTCCCAACGTCCTGACTG\r\nGTATAATGAGCCAGTTCTTAAAATCGCATAAGGTAATTCACAATGATTAAAGTTGAAATTAAACCATCTCAAGCCCAATTTACTACTCGTTCTGGTGTTT\r\nCTCGTCAGGGCAAGCCTTATTCACTGAATGAGCAGCTTTGTTACGTTGATTTGGGTAATGAATATCCGGTTCTTGTCAAGATTACTCTTGATGAAGGTCA\r\nGCCAGCCTATGCGCCTGGTCTGTACACCGTTCATCTGTCCTCTTTCAAAGTTGGTCAGTTCGGTTCCCTTATGATTGACCGTCTGCGCCTCGTTCCGGCT\r\nAAGTAACATGGAGCAGGTCGCGGATTTCGACACAATTTATCAGGCGATGATACAAATCTCCGTTGTACTTTGTTTCGCGCTTGGTATAATCGCTGGGGGT\r\nCAAAGATGAGTGTTTTAGTGTATTCTTTTGCCTCTTTCGTTTTAGGTTGGTGCCTTCGTAGTGGCATTACGTATTTTACCCGTTTAATGGAAACTTCCTC\r\nATGAAAAAGTCTTTAGTCCTCAAAGCCTCTGTAGCCGTTGCTACCCTCGTTCCGATGCTGTCTTTCGCTGCTGAGGGTGACGATCCCGCAAAAGCGGCCT\r\nTTAACTCCCTGCAAGCCTCAGCGACCGAATATATCGGTTATGCGTGGGCGATGGTTGTTGTCATTGTCGGCGCAACTATCGGTATCAAGCTGTTTAAGAA\r\nATTCACCTCGAAAGCAAGCTGATAAACCGATACAATTAAAGGCTCCTTTTGGAGCCTTTTTTTTGGAGATTTTCAACGTGAAAAAATTATTATTCGCAAT\r\nTCCTTTAGTTGTTCCTTTCTATTCTCACTCCGCTGAAACTGTTGAAAGTTGTTTAGCAAAATCCCATACAGAAAATTCATTTACTAACGTCTGGAAAGAC\r\nGACAAAACTTTAGATCGTTACGCTAACTATGAGGGCTGTCTGTGGAATGCTACAGGCGTTGTAGTTTGTACTGGTGACGAAACTCAGTGTTACGGTACAT\r\nGGGTTCCTATTGGGCTTGCTATCCCTGAAAATGAGGGTGGTGGCTCTGAGGGTGGCGGTTCTGAGGGTGGCGGTTCTGAGGGTGGCGGTACTAAACCTCC\r\nTGAGTACGGTGATACACCTATTCCGGGCTATACTTATATCAACCCTCTCGACGGCACTTATCCGCCTGGTACTGAGCAAAACCCCGCTAATCCTAATCCT\r\nTCTCTTGAGGAGTCTCAGCCTCTTAATACTTTCATGTTTCAGAATAATAGGTTCCGAAATAGGCAGGGGGCATTAACTGTTTATACGGGCACTGTTACTC\r\nAAGGCACTGACCCCGTTAAAACTTATTACCAGTACACTCCTGTATCATCAAAAGCCATGTATGACGCTTACTGGAACGGTAAATTCAGAGACTGCGCTTT\r\nCCATTCTGGCTTTAATGAGGATTTATTTGTTTGTGAATATCAAGGCCAATCGTCTGACCTGCCTCAACCTCCTGTCAATGCTGGCGGCGGCTCTGGTGGT\r\nGGTTCTGGTGGCGGCTCTGAGGGTGGTGGCTCTGAGGGTGGCGGTTCTGAGGGTGGCGGCTCTGAGGGAGGCGGTTCCGGTGGTGGCTCTGGTTCCGGTG\r\nATTTTGATTATGAAAAGATGGCAAACGCTAATAAGGGGGCTATGACCGAAAATGCCGATGAAAACGCGCTACAGTCTGACGCTAAAGGCAAACTTGATTC\r\nTGTCGCTACTGATTACGGTGCTGCTATCGATGGTTTCATTGGTGACGTTTCCGGCCTTGCTAATGGTAATGGTGCTACTGGTGATTTTGCTGGCTCTAAT\r\nTCCCAAATGGCTCAAGTCGGTGACGGTGATAATTCACCTTTAATGAATAATTTCCGTCAATATTTACCTTCCCTCCCTCAATCGGTTGAATGTCGCCCTT\r\nTTGTCTTTGGCGCTGGTAAACCATATGAATTTTCTATTGATTGTGACAAAATAAACTTATTCCGTGGTGTCTTTGCGTTTCTTTTATATGTTGCCACCTT\r\nTATGTATGTATTTTCTACGTTTGCTAACATACTGCGTAATAAGGAGTCTTAATCATGCCAGTTCTTTTGGGTATTCCGTTATTATTGCGTTTCCTCGGTT\r\nTCCTTCTGGTAACTTTGTTCGGCTATCTGCTTACTTTTCTTAAAAAGGGCTTCGGTAAGATAGCTATTGCTATTTCATTGTTTCTTGCTCTTATTATTGG\r\nGCTTAACTCAATTCTTGTGGGTTATCTCTCTGATATTAGCGCTCAATTACCCTCTGACTTTGTTCAGGGTGTTCAGTTAATTCTCCCGTCTAATGCGCTT\r\nCCCTGTTTTTATGTTATTCTCTCTGTAAAGGCTGCTATTTTCATTTTTGACGTTAAACAAAAAATCGTTTCTTATTTGGATTGGGATAAATAATATGGCT\r\nGTTTATTTTGTAACTGGCAAATTAGGCTCTGGAAAGACGCTCGTTAGCGTTGGTAAGATTCAGGATAAAATTGTAGCTGGGTGCAAAATAGCAACTAATC\r\nTTGATTTAAGGCTTCAAAACCTCCCGCAAGTCGGGAGGTTCGCTAAAACGCCTCGCGTTCTTAGAATACCGGATAAGCCTTCTATATCTGATTTGCTTGC\r\nTATTGGGCGCGGTAATGATTCCTACGATGAAAATAAAAACGGCTTGCTTGTTCTCGATGAGTGCGGTACTTGGTTTAATACCCGTTCTTGGAATGATAAG\r\nGAAAGACAGCCGATTATTGATTGGTTTCTACATGCTCGTAAATTAGGATGGGATATTATTTTTCTTGTTCAGGACTTATCTATTGTTGATAAACAGGCGC\r\nGTTCTGCATTAGCTGAACATGTTGTTTATTGTCGTCGTCTGGACAGAATTACTTTACCTTTTGTCGGTACTTTATATTCTCTTATTACTGGCTCGAAAAT\r\nGCCTCTGCCTAAATTACATGTTGGCGTTGTTAAATATGGCGATTCTCAATTAAGCCCTACTGTTGAGCGTTGGCTTTATACTGGTAAGAATTTGTATAAC\r\nGCATATGATACTAAACAGGCTTTTTCTAGTAATTATGATTCCGGTGTTTATTCTTATTTAACGCCTTATTTATCACACGGTCGGTATTTCAAACCATTAA\r\nATTTAGGTCAGAAGATGAAATTAACTAAAATATATTTGAAAAAGTTTTCTCGCGTTCTTTGTCTTGCGATTGGATTTGCATCAGCATTTACATATAGTTA\r\nTATAACCCAACCTAAGCCGGAGGTTAAAAAGGTAGTCTCTCAGACCTATGATTTTGATAAATTCACTATTGACTCTTCTCAGCGTCTTAATCTAAGCTAT\r\nCGCTATGTTTTCAAGGATTCTAAGGGAAAATTAATTAATAGCGACGATTTACAGAAGCAAGGTTATTCACTCACATATATTGATTTATGTACTGTTTCCA\r\nTTAAAAAAGGTAATTCAAATGAAATTGTTAAATGTAATTAATTTTGTTTTCTTGATGTTTGTTTCATCATCTTCTTTTGCTCAGGTAATTGAAATGAATA\r\nATTCGCCTCTGCGCGATTTTGTAACTTGGTATTCAAAGCAATCAGGCGAATCCGTTATTGTTTCTCCCGATGTAAAAGGTACTGTTACTGTATATTCATC\r\nTGACGTTAAACCTGAAAATCTACGCAATTTCTTTATTTCTGTTTTACGTGCAAATAATTTTGATATGGTAGGTTCTAACCCTTCCATTATTCAGAAGTAT\r\nAATCCAAACAATCAGGATTATATTGATGAATTGCCATCATCTGATAATCAGGAATATGATGATAATTCCGCTCCTTCTGGTGGTTTCTTTGTTCCGCAAA\r\nATGATAATGTTACTCAAACTTTTAAAATTAATAACGTTCGGGCAAAGGATTTAATACGAGTTGTCGAATTGTTTGTAAAGTCTAATACTTCTAAATCCTC\r\nAAATGTATTATCTATTGACGGCTCTAATCTATTAGTTGTTAGTGCTCCTAAAGATATTTTAGATAACCTTCCTCAATTCCTTTCAACTGTTGATTTGCCA\r\nACTGACCAGATATTGATTGAGGGTTTGATATTTGAGGTTCAGCAAGGTGATGCTTTAGATTTTTCATTTGCTGCTGGCTCTCAGCGTGGCACTGTTGCAG\r\nGCGGTGTTAATACTGACCGCCTCACCTCTGTTTTATCTTCTGCTGGTGGTTCGTTCGGTATTTTTAATGGCGATGTTTTAGGGCTATCAGTTCGCGCATT\r\nAAAGACTAATAGCCATTCAAAAATATTGTCTGTGCCACGTATTCTTACGCTTTCAGGTCAGAAGGGTTCTATCTCTGTTGGCCAGAATGTCCCTTTTATT\r\nACTGGTCGTGTGACTGGTGAATCTGCCAATGTAAATAATCCATTTCAGACGATTGAGCGTCAAAATGTAGGTATTTCCATGAGCGTTTTTCCTGTTGCAA\r\nTGGCTGGCGGTAATATTGTTCTGGATATTACCAGCAAGGCCGATAGTTTGAGTTCTTCTACTCAGGCAAGTGATGTTATTACTAATCAAAGAAGTATTGC\r\nTACAACGGTTAATTTGCGTGATGGACAGACTCTTTTACTCGGTGGCCTCACTGATTATAAAAACACTTCTCAGGATTCTGGCGTACCGTTCCTGTCTAAA\r\nATCCCTTTAATCGGCCTCCTGTTTAGCTCCCGCTCTGATTCTAACGAGGAAAGCACGTTATACGTGCTCGTCAAAGCAACCATAGTACGCGCCCTGTAGC\r\nGGCGCATTAAGCGCGGCGGGTGTGGTGGTTACGCGCAGCGTGACCGCTACACTTGCCAGCGCCCTAGCGCCCGCTCCTTTCGCTTTCTTCCCTTCCTTTC\r\nTCGCCACGTTCGCCGGCTTTCCCCGTCAAGCTCTAAATCGGGGGCTCCCTTTAGGGTTCCGATTTAGTGCTTTACGGCACCTCGACCCCAAAAAACTTGA\r\nTTTGGGTGATGGTTCACGTAGTGGGCCATCGCCCTGATAGACGGTTTTTCGCCCTTTGACGTTGGAGTCCACGTTCTTTAATAGTGGACTCTTGTTCCAA\r\nACTGGAACAACACTCAACCCTATCTCGGGCTATTCTTTTGATTTATAAGGGATTTTGCCGATTTCGGAACCACCATCAAACAGGATTTTCGCCTGCTGGG\r\nGCAAACCAGCGTGGACCGCTTGCTGCAACTCTCTCAGGGCCAGGCGGTGAAGGGCAATCAGCTGTTGCCCGTCTCACTGGTGAAAAGAAAAACCACCCTG\r\nGCGCCCAATACGCAAACCGCCTCTCCCCGCGCGTTGGCCGATTCATTAATGCAGCTGGCACGACAGGTTTCCCGACTGGAAAGCGGGCAGTGAGCGCAAC\r\nGCAATTAATGTGAGTTAGCTCACTCATTAGGCACCCCAGGCTTTACACTTTATGCTTCCGGCTCGTATGTTGTGTGGAATTGTGAGCGGATAACAATTTC\r\nACACAGGAAACAGCTATGACCATGATTACGAATTCGAGCTCGGTACCCGGGGATCCTCTAGAGTCGACCTGCAGGCATGCAAGCTTGGCACTGGCCGTCG\r\nTTTTACAACGTCGTGACTGGGAAAACCCTGGCGTTACCCAACTTAATCGCCTTGCAGCACATCCCCCTTTCGCCAGCTGGCGTAATAGCGAAGAGGCCCG\r\nCACCGATCGCCCTTCCCAACAGTTGCGCAGCCTGAATGGCGAATGGCGCTTTGCCTGGTTTCCGGCACCAGAAGCGGTGCCGGAAAGCTGGCTGGAGTGC\r\nGATCTTCCTGAGGCCGATACTGTCGTCGTCCCCTCAAACTGGCAGATGCACGGTTACGATGCGCCCATCTACACCAACGTGACCTATCCCATTACGGTCA\r\nATCCGCCGTTTGTTCCCACGGAGAATCCGACGGGTTGTTACTCGCTCACATTTAATGTTGATGAAAGCTGGCTACAGGAAGGCCAGACGCGAATTATTTT\r\nTGATGGCGTTCCTATTGGTTAAAAAATGAGCTGATTTAACAAAAATTTAATGCGAATTTTAACAAAATATTAACGTTTACAATTTAAATATTTGCTTATA\r\nCAATCTTCCTGTTTTTGGGGCTTTTCTGATTATCAACCGGGGTACATATGATTGACATGCTAGTTTTACGATTACCGTTCATCGATTCTCTTGTTTGCTC\r\nCAGACTCTCAGGCAATGACCTGATAGCCTTTGTAGATCTCTCAAAAATAGCTACCCTCTCCGGCATTAATTTATCAGCTAGAACGGTTGAATATCATATT\r\nGATGGTGATTTGACTGTCTCCGGCCTTTCTCACCCTTTTGAATCTTTACCTACACATTACTCAGGCATTGCATTTAAAATATATGAGGGTTCTAAAAATT\r\nTTTATCCTTGCGTTGAAATAAAGGCTTCTCCCGCAAAAGTATTACAGGGTCATAATGTTTTTGGTACAACCGATTTAGCTTTATGCTCTGAGGCTTTATT\r\nGCTTAATTTTGCTAATTCTTTGCCTTGCCTGTATGATTTATTGGATGTT\r\n''')\r\n\r\n_7560 = re.sub(r'\\s', '', '''\r\nAGCTTGGCACTGGCCGTCGTTTTACAACGTCGTGACTGGGAAAACCCTGGCGTTACCCAACTTAATCGCCTTGCAGCACATCCCCCTTTCGCCAGCTGGC \r\nGTAATAGCGAAGAGGCCCGCACCGATCGCCCTTCCCAACAGTTGCGCAGCCTGAATGGCGAATGGCGCTTTGCCTGGTTTCCGGCACCAGAAGCGGTGCC\r\nGGAAAGCTGGCTGGAGTGCGATCTTCCTGAGGCCGATACTGTCGTCGTCCCCTCAAACTGGCAGATGCACGGTTACGATGCGCCCATCTACACCAACGTG\r\nACCTATCCCATTACGGTCAATCCGCCGTTTGTTCCCACGGAGAATCCGACGGGTTGTTACTCGCTCACATTTAATGTTGATGAAAGCTGGCTACAGGAAG\r\nGCCAGACGCGAATTATTTTTGATGGCGTTCCTATTGGTTAAAAAATGAGCTGATTTAACAAAAATTTAATGCGAATTTTAACAAAATATTAACGTTTACA\r\nATTTAAATATTTGCTTATACAATCTTCCTGTTTTTGGGGCTTTTCTGATTATCAACCGGGGTACATATGATTGACATGCTAGTTTTACGATTACCGTTCA\r\nTCGATTCTCTTGTTTGCTCCAGACTCTCAGGCAATGACCTGATAGCCTTTGTAGATCTCTCAAAAATAGCTACCCTCTCCGGCATTAATTTATCAGCTAG\r\nAACGGTTGAATATCATATTGATGGTGATTTGACTGTCTCCGGCCTTTCTCACCCTTTTGAATCTTTACCTACACATTACTCAGGCATTGCATTTAAAATA\r\nTATGAGGGTTCTAAAAATTTTTATCCTTGCGTTGAAATAAAGGCTTCTCCCGCAAAAGTATTACAGGGTCATAATGTTTTTGGTACAACCGATTTAGCTT\r\nTATGCTCTGAGGCTTTATTGCTTAATTTTGCTAATTCTTTGCCTTGCCTGTATGATTTATTGGATGTTAATGCTACTACTATTAGTAGAATTGATGCCAC\r\nCTTTTCAGCTCGCGCCCCAAATGAAAATATAGCTAAACAGGTTATTGACCATTTGCGAAATGTATCTAATGGTCAAACTAAATCTACTCGTTCGCAGAAT\r\nTGGGAATCAACTGTTATATGGAATGAAACTTCCAGACACCGTACTTTAGTTGCATATTTAAAACATGTTGAGCTACAGCATTATATTCAGCAATTAAGCT\r\nCTAAGCCATCCGCAAAAATGACCTCTTATCAAAAGGAGCAATTAAAGGTACTCTCTAATCCTGACCTGTTGGAGTTTGCTTCCGGTCTGGTTCGCTTTGA\r\nAGCTCGAATTAAAACGCGATATTTGAAGTCTTTCGGGCTTCCTCTTAATCTTTTTGATGCAATCCGCTTTGCTTCTGACTATAATAGTCAGGGTAAAGAC\r\nCTGATTTTTGATTTATGGTCATTCTCGTTTTCTGAACTGTTTAAAGCATTTGAGGGGGATTCAATGAATATTTATGACGATTCCGCAGTATTGGACGCTA\r\nTCCAGTCTAAACATTTTACTATTACCCCCTCTGGCAAAACTTCTTTTGCAAAAGCCTCTCGCTATTTTGGTTTTTATCGTCGTCTGGTAAACGAGGGTTA\r\nTGATAGTGTTGCTCTTACTATGCCTCGTAATTCCTTTTGGCGTTATGTATCTGCATTAGTTGAATGTGGTATTCCTAAATCTCAACTGATGAATCTTTCT\r\nACCTGTAATAATGTTGTTCCGTTAGTTCGTTTTATTAACGTAGATTTTTCTTCCCAACGTCCTGACTGGTATAATGAGCCAGTTCTTAAAATCGCATAAG\r\nGTAATTCACAATGATTAAAGTTGAAATTAAACCATCTCAAGCCCAATTTACTACTCGTTCTGGTGTTTCTCGTCAGGGCAAGCCTTATTCACTGAATGAG\r\nCAGCTTTGTTACGTTGATTTGGGTAATGAATATCCGGTTCTTGTCAAGATTACTCTTGATGAAGGTCAGCCAGCCTATGCGCCTGGTCTGTACACCGTTC\r\nATCTGTCCTCTTTCAAAGTTGGTCAGTTCGGTTCCCTTATGATTGACCGTCTGCGCCTCGTTCCGGCTAAGTAACATGGAGCAGGTCGCGGATTTCGACA\r\nCAATTTATCAGGCGATGATACAAATCTCCGTTGTACTTTGTTTCGCGCTTGGTATAATCGCTGGGGGTCAAAGATGAGTGTTTTAGTGTATTCTTTTGCC\r\nTCTTTCGTTTTAGGTTGGTGCCTTCGTAGTGGCATTACGTATTTTACCCGTTTAATGGAAACTTCCTCATGAAAAAGTCTTTAGTCCTCAAAGCCTCTGT\r\nAGCCGTTGCTACCCTCGTTCCGATGCTGTCTTTCGCTGCTGAGGGTGACGATCCCGCAAAAGCGGCCTTTAACTCCCTGCAAGCCTCAGCGACCGAATAT\r\nATCGGTTATGCGTGGGCGATGGTTGTTGTCATTGTCGGCGCAACTATCGGTATCAAGCTGTTTAAGAAATTCACCTCGAAAGCAAGCTGATAAACCGATA\r\nCAATTAAAGGCTCCTTTTGGAGCCTTTTTTTTGGAGATTTTCAACGTGAAAAAATTATTATTCGCAATTCCTTTAGTTGTTCCTTTCTATTCTCACTCCG\r\nCTGAAACTGTTGAAAGTTGTTTAGCAAAATCCCATACAGAAAATTCATTTACTAACGTCTGGAAAGACGACAAAACTTTAGATCGTTACGCTAACTATGA\r\nGGGCTGTCTGTGGAATGCTACAGGCGTTGTAGTTTGTACTGGTGACGAAACTCAGTGTTACGGTACATGGGTTCCTATTGGGCTTGCTATCCCTGAAAAT\r\nGAGGGTGGTGGCTCTGAGGGTGGCGGTTCTGAGGGTGGCGGTTCTGAGGGTGGCGGTACTAAACCTCCTGAGTACGGTGATACACCTATTCCGGGCTATA\r\nCTTATATCAACCCTCTCGACGGCACTTATCCGCCTGGTACTGAGCAAAACCCCGCTAATCCTAATCCTTCTCTTGAGGAGTCTCAGCCTCTTAATACTTT\r\nCATGTTTCAGAATAATAGGTTCCGAAATAGGCAGGGGGCATTAACTGTTTATACGGGCACTGTTACTCAAGGCACTGACCCCGTTAAAACTTATTACCAG\r\nTACACTCCTGTATCATCAAAAGCCATGTATGACGCTTACTGGAACGGTAAATTCAGAGACTGCGCTTTCCATTCTGGCTTTAATGAGGATTTATTTGTTT\r\nGTGAATATCAAGGCCAATCGTCTGACCTGCCTCAACCTCCTGTCAATGCTGGCGGCGGCTCTGGTGGTGGTTCTGGTGGCGGCTCTGAGGGTGGTGGCTC\r\nTGAGGGTGGCGGTTCTGAGGGTGGCGGCTCTGAGGGAGGCGGTTCCGGTGGTGGCTCTGGTTCCGGTGATTTTGATTATGAAAAGATGGCAAACGCTAAT\r\nAAGGGGGCTATGACCGAAAATGCCGATGAAAACGCGCTACAGTCTGACGCTAAAGGCAAACTTGATTCTGTCGCTACTGATTACGGTGCTGCTATCGATG\r\nGTTTCATTGGTGACGTTTCCGGCCTTGCTAATGGTAATGGTGCTACTGGTGATTTTGCTGGCTCTAATTCCCAAATGGCTCAAGTCGGTGACGGTGATAA\r\nTTCACCTTTAATGAATAATTTCCGTCAATATTTACCTTCCCTCCCTCAATCGGTTGAATGTCGCCCTTTTGTCTTTGGCGCTGGTAAACCATATGAATTT\r\nTCTATTGATTGTGACAAAATAAACTTATTCCGTGGTGTCTTTGCGTTTCTTTTATATGTTGCCACCTTTATGTATGTATTTTCTACGTTTGCTAACATAC\r\nTGCGTAATAAGGAGTCTTAATCATGCCAGTTCTTTTGGGTATTCCGTTATTATTGCGTTTCCTCGGTTTCCTTCTGGTAACTTTGTTCGGCTATCTGCTT\r\nACTTTTCTTAAAAAGGGCTTCGGTAAGATAGCTATTGCTATTTCATTGTTTCTTGCTCTTATTATTGGGCTTAACTCAATTCTTGTGGGTTATCTCTCTG\r\nATATTAGCGCTCAATTACCCTCTGACTTTGTTCAGGGTGTTCAGTTAATTCTCCCGTCTAATGCGCTTCCCTGTTTTTATGTTATTCTCTCTGTAAAGGC\r\nTGCTATTTTCATTTTTGACGTTAAACAAAAAATCGTTTCTTATTTGGATTGGGATAAATAATATGGCTGTTTATTTTGTAACTGGCAAATTAGGCTCTGG\r\nAAAGACGCTCGTTAGCGTTGGTAAGATTCAGGATAAAATTGTAGCTGGGTGCAAAATAGCAACTAATCTTGATTTAAGGCTTCAAAACCTCCCGCAAGTC\r\nGGGAGGTTCGCTAAAACGCCTCGCGTTCTTAGAATACCGGATAAGCCTTCTATATCTGATTTGCTTGCTATTGGGCGCGGTAATGATTCCTACGATGAAA\r\nATAAAAACGGCTTGCTTGTTCTCGATGAGTGCGGTACTTGGTTTAATACCCGTTCTTGGAATGATAAGGAAAGACAGCCGATTATTGATTGGTTTCTACA\r\nTGCTCGTAAATTAGGATGGGATATTATTTTTCTTGTTCAGGACTTATCTATTGTTGATAAACAGGCGCGTTCTGCATTAGCTGAACATGTTGTTTATTGT\r\nCGTCGTCTGGACAGAATTACTTTACCTTTTGTCGGTACTTTATATTCTCTTATTACTGGCTCGAAAATGCCTCTGCCTAAATTACATGTTGGCGTTGTTA\r\nAATATGGCGATTCTCAATTAAGCCCTACTGTTGAGCGTTGGCTTTATACTGGTAAGAATTTGTATAACGCATATGATACTAAACAGGCTTTTTCTAGTAA\r\nTTATGATTCCGGTGTTTATTCTTATTTAACGCCTTATTTATCACACGGTCGGTATTTCAAACCATTAAATTTAGGTCAGAAGATGAAATTAACTAAAATA\r\nTATTTGAAAAAGTTTTCTCGCGTTCTTTGTCTTGCGATTGGATTTGCATCAGCATTTACATATAGTTATATAACCCAACCTAAGCCGGAGGTTAAAAAGG\r\nTAGTCTCTCAGACCTATGATTTTGATAAATTCACTATTGACTCTTCTCAGCGTCTTAATCTAAGCTATCGCTATGTTTTCAAGGATTCTAAGGGAAAATT\r\nAATTAATAGCGACGATTTACAGAAGCAAGGTTATTCACTCACATATATTGATTTATGTACTGTTTCCATTAAAAAAGGTAATTCAAATGAAATTGTTAAA\r\nTGTAATTAATTTTGTTTTCTTGATGTTTGTTTCATCATCTTCTTTTGCTCAGGTAATTGAAATGAATAATTCGCCTCTGCGCGATTTTGTAACTTGGTAT\r\nTCAAAGCAATCAGGCGAATCCGTTATTGTTTCTCCCGATGTAAAAGGTACTGTTACTGTATATTCATCTGACGTTAAACCTGAAAATCTACGCAATTTCT\r\nTTATTTCTGTTTTACGTGCAAATAATTTTGATATGGTAGGTTCTAACCCTTCCATTATTCAGAAGTATAATCCAAACAATCAGGATTATATTGATGAATT\r\nGCCATCATCTGATAATCAGGAATATGATGATAATTCCGCTCCTTCTGGTGGTTTCTTTGTTCCGCAAAATGATAATGTTACTCAAACTTTTAAAATTAAT\r\nAACGTTCGGGCAAAGGATTTAATACGAGTTGTCGAATTGTTTGTAAAGTCTAATACTTCTAAATCCTCAAATGTATTATCTATTGACGGCTCTAATCTAT\r\nTAGTTGTTAGTGCTCCTAAAGATATTTTAGATAACCTTCCTCAATTCCTTTCAACTGTTGATTTGCCAACTGACCAGATATTGATTGAGGGTTTGATATT\r\nTGAGGTTCAGCAAGGTGATGCTTTAGATTTTTCATTTGCTGCTGGCTCTCAGCGTGGCACTGTTGCAGGCGGTGTTAATACTGACCGCCTCACCTCTGTT\r\nTTATCTTCTGCTGGTGGTTCGTTCGGTATTTTTAATGGCGATGTTTTAGGGCTATCAGTTCGCGCATTAAAGACTAATAGCCATTCAAAAATATTGTCTG\r\nTGCCACGTATTCTTACGCTTTCAGGTCAGAAGGGTTCTATCTCTGTTGGCCAGAATGTCCCTTTTATTACTGGTCGTGTGACTGGTGAATCTGCCAATGT\r\nAAATAATCCATTTCAGACGATTGAGCGTCAAAATGTAGGTATTTCCATGAGCGTTTTTCCTGTTGCAATGGCTGGCGGTAATATTGTTCTGGATATTACC\r\nAGCAAGGCCGATAGTTTGAGTTCTTCTACTCAGGCAAGTGATGTTATTACTAATCAAAGAAGTATTGCTACAACGGTTAATTTGCGTGATGGACAGACTC\r\nTTTTACTCGGTGGCCTCACTGATTATAAAAACACTTCTCAGGATTCTGGCGTACCGTTCCTGTCTAAAATCCCTTTAATCGGCCTCCTGTTTAGCTCCCG\r\nCTCTGATTCTAACGAGGAAAGCACGTTATACGTGCTCGTCAAAGCAACCATAGTACGCGCCCTGTAGCGGCGCATTAAGCGCGGCGGGTGTGGTGGTTAC\r\nGCGCAGCGTGACCGCTACACTTGCCAGCGCCCTAGCGCCCGCTCCTTTCGCTTTCTTCCCTTCCTTTCTCGCCACGTTCGCCGGCTTTCCCCGTCAAGCT\r\nCTAAATCGGGGGCTCCCTTTAGGGTTCCGATTTAGTGCTTTACGGCACCTCGACCCCAAAAAACTTGATTTGGGTGATGGTTCACGTAGTGGGCCATCGC\r\nCCTGATAGACGGTTTTTCGCCCTTTGACGTTGGAGTCCACGTTCTTTAATAGTGGACTCTTGTTCCAAACTGGAACAACACTCAACCCTATCTCGGGCTA\r\nTTCTTTTGATTTATAAGGGATTTTGCCGATTTCGGAACCACCATCAAACAGGATTTTCGCCTGCTGGGGCAAACCAGCGTGGACCGCTTGCTGCAACTCT\r\nCTCAGGGCCAGGCGGTGAAGGGCAATCAGCTGTTGCCCGTCTCACTGGTGAAAAGAAAAACCACCCTGGCGCCCAATACGCAAACCGCCTCTCCCCGCGC\r\nGTTGGCCGATTCATTAATGCAGCTGGCACGACAGGTTTCCCGACTGGAAAGCGGGCAGTGAGCGCAACGCAATTAATGTGAGTTAGCTCACTCATTAGGC\r\nACCCCAGGCTTTACACTTTATGCTTCCGGCTCGTATGTTGTGTGGAATTGTGAGCGGATAACAATTTCACACAGGAAACAGCTATGACCATGATTACGAA\r\nTTCGAGCTCGGTACCCGGGGATCCTCCGTCTTTATCGAGGTAACAAGCACCACGTAGCTTAAGCCCTGTTTACTCATTACACCAACCAGGAGGTCAGAGT\r\nTCGGAGAAATGATTTATGTGAAATGCGTCAGCCGATTCAAGGCCCCTATATTCGTGCCCACCGACGAGTTGCTTACAGATGGCAGGGCCGCACTGTCGGT\r\nATCATAGAGTCACTCCAGGGCGAGCGTAAATAGATTAGAAGCGGGGTTATTTTGGCGGGACATTGTCATAAGGTTGACAATTCAGCACTAAGGACACTTA\r\nAGTCGTGCGCATGAATTCACAACCACTTAGAAGAACATCCACCCTGGCTTCTCCTGAGAA\r\n''')\r\n\r\n_8064 = re.sub(r'\\s', '', '''\r\nGGCAATGACCTGATAGCCTTTGTAGATCTCTCAAAAATAGCTACCCTCTCCGGCATTAATTTATCAGCTAGAACGGTTGAATATCATATTGATGGTGATT\r\nTGACTGTCTCCGGCCTTTCTCACCCTTTTGAATCTTTACCTACACATTACTCAGGCATTGCATTTAAAATATATGAGGGTTCTAAAAATTTTTATCCTTG\r\nCGTTGAAATAAAGGCTTCTCCCGCAAAAGTATTACAGGGTCATAATGTTTTTGGTACAACCGATTTAGCTTTATGCTCTGAGGCTTTATTGCTTAATTTT\r\nGCTAATTCTTTGCCTTGCCTGTATGATTTATTGGATGTTAATGCTACTACTATTAGTAGAATTGATGCCACCTTTTCAGCTCGCGCCCCAAATGAAAATA\r\nTAGCTAAACAGGTTATTGACCATTTGCGAAATGTATCTAATGGTCAAACTAAATCTACTCGTTCGCAGAATTGGGAATCAACTGTTATATGGAATGAAAC\r\nTTCCAGACACCGTACTTTAGTTGCATATTTAAAACATGTTGAGCTACAGCATTATATTCAGCAATTAAGCTCTAAGCCATCCGCAAAAATGACCTCTTAT\r\nCAAAAGGAGCAATTAAAGGTACTCTCTAATCCTGACCTGTTGGAGTTTGCTTCCGGTCTGGTTCGCTTTGAAGCTCGAATTAAAACGCGATATTTGAAGT\r\nCTTTCGGGCTTCCTCTTAATCTTTTTGATGCAATCCGCTTTGCTTCTGACTATAATAGTCAGGGTAAAGACCTGATTTTTGATTTATGGTCATTCTCGTT\r\nTTCTGAACTGTTTAAAGCATTTGAGGGGGATTCAATGAATATTTATGACGATTCCGCAGTATTGGACGCTATCCAGTCTAAACATTTTACTATTACCCCC\r\nTCTGGCAAAACTTCTTTTGCAAAAGCCTCTCGCTATTTTGGTTTTTATCGTCGTCTGGTAAACGAGGGTTATGATAGTGTTGCTCTTACTATGCCTCGTA\r\nATTCCTTTTGGCGTTATGTATCTGCATTAGTTGAATGTGGTATTCCTAAATCTCAACTGATGAATCTTTCTACCTGTAATAATGTTGTTCCGTTAGTTCG\r\nTTTTATTAACGTAGATTTTTCTTCCCAACGTCCTGACTGGTATAATGAGCCAGTTCTTAAAATCGCATAAGGTAATTCACAATGATTAAAGTTGAAATTA\r\nAACCATCTCAAGCCCAATTTACTACTCGTTCTGGTGTTTCTCGTCAGGGCAAGCCTTATTCACTGAATGAGCAGCTTTGTTACGTTGATTTGGGTAATGA\r\nATATCCGGTTCTTGTCAAGATTACTCTTGATGAAGGTCAGCCAGCCTATGCGCCTGGTCTGTACACCGTTCATCTGTCCTCTTTCAAAGTTGGTCAGTTC\r\nGGTTCCCTTATGATTGACCGTCTGCGCCTCGTTCCGGCTAAGTAACATGGAGCAGGTCGCGGATTTCGACACAATTTATCAGGCGATGATACAAATCTCC\r\nGTTGTACTTTGTTTCGCGCTTGGTATAATCGCTGGGGGTCAAAGATGAGTGTTTTAGTGTATTCTTTTGCCTCTTTCGTTTTAGGTTGGTGCCTTCGTAG\r\nTGGCATTACGTATTTTACCCGTTTAATGGAAACTTCCTCATGAAAAAGTCTTTAGTCCTCAAAGCCTCTGTAGCCGTTGCTACCCTCGTTCCGATGCTGT\r\nCTTTCGCTGCTGAGGGTGACGATCCCGCAAAAGCGGCCTTTAACTCCCTGCAAGCCTCAGCGACCGAATATATCGGTTATGCGTGGGCGATGGTTGTTGT\r\nCATTGTCGGCGCAACTATCGGTATCAAGCTGTTTAAGAAATTCACCTCGAAAGCAAGCTGATAAACCGATACAATTAAAGGCTCCTTTTGGAGCCTTTTT\r\nTTTGGAGATTTTCAACGTGAAAAAATTATTATTCGCAATTCCTTTAGTTGTTCCTTTCTATTCTCACTCCGCTGAAACTGTTGAAAGTTGTTTAGCAAAA\r\nTCCCATACAGAAAATTCATTTACTAACGTCTGGAAAGACGACAAAACTTTAGATCGTTACGCTAACTATGAGGGCTGTCTGTGGAATGCTACAGGCGTTG\r\nTAGTTTGTACTGGTGACGAAACTCAGTGTTACGGTACATGGGTTCCTATTGGGCTTGCTATCCCTGAAAATGAGGGTGGTGGCTCTGAGGGTGGCGGTTC\r\nTGAGGGTGGCGGTTCTGAGGGTGGCGGTACTAAACCTCCTGAGTACGGTGATACACCTATTCCGGGCTATACTTATATCAACCCTCTCGACGGCACTTAT\r\nCCGCCTGGTACTGAGCAAAACCCCGCTAATCCTAATCCTTCTCTTGAGGAGTCTCAGCCTCTTAATACTTTCATGTTTCAGAATAATAGGTTCCGAAATA\r\nGGCAGGGGGCATTAACTGTTTATACGGGCACTGTTACTCAAGGCACTGACCCCGTTAAAACTTATTACCAGTACACTCCTGTATCATCAAAAGCCATGTA\r\nTGACGCTTACTGGAACGGTAAATTCAGAGACTGCGCTTTCCATTCTGGCTTTAATGAGGATTTATTTGTTTGTGAATATCAAGGCCAATCGTCTGACCTG\r\nCCTCAACCTCCTGTCAATGCTGGCGGCGGCTCTGGTGGTGGTTCTGGTGGCGGCTCTGAGGGTGGTGGCTCTGAGGGTGGCGGTTCTGAGGGTGGCGGCT\r\nCTGAGGGAGGCGGTTCCGGTGGTGGCTCTGGTTCCGGTGATTTTGATTATGAAAAGATGGCAAACGCTAATAAGGGGGCTATGACCGAAAATGCCGATGA\r\nAAACGCGCTACAGTCTGACGCTAAAGGCAAACTTGATTCTGTCGCTACTGATTACGGTGCTGCTATCGATGGTTTCATTGGTGACGTTTCCGGCCTTGCT\r\nAATGGTAATGGTGCTACTGGTGATTTTGCTGGCTCTAATTCCCAAATGGCTCAAGTCGGTGACGGTGATAATTCACCTTTAATGAATAATTTCCGTCAAT\r\nATTTACCTTCCCTCCCTCAATCGGTTGAATGTCGCCCTTTTGTCTTTGGCGCTGGTAAACCATATGAATTTTCTATTGATTGTGACAAAATAAACTTATT\r\nCCGTGGTGTCTTTGCGTTTCTTTTATATGTTGCCACCTTTATGTATGTATTTTCTACGTTTGCTAACATACTGCGTAATAAGGAGTCTTAATCATGCCAG\r\nTTCTTTTGGGTATTCCGTTATTATTGCGTTTCCTCGGTTTCCTTCTGGTAACTTTGTTCGGCTATCTGCTTACTTTTCTTAAAAAGGGCTTCGGTAAGAT\r\nAGCTATTGCTATTTCATTGTTTCTTGCTCTTATTATTGGGCTTAACTCAATTCTTGTGGGTTATCTCTCTGATATTAGCGCTCAATTACCCTCTGACTTT\r\nGTTCAGGGTGTTCAGTTAATTCTCCCGTCTAATGCGCTTCCCTGTTTTTATGTTATTCTCTCTGTAAAGGCTGCTATTTTCATTTTTGACGTTAAACAAA\r\nAAATCGTTTCTTATTTGGATTGGGATAAATAATATGGCTGTTTATTTTGTAACTGGCAAATTAGGCTCTGGAAAGACGCTCGTTAGCGTTGGTAAGATTC\r\nAGGATAAAATTGTAGCTGGGTGCAAAATAGCAACTAATCTTGATTTAAGGCTTCAAAACCTCCCGCAAGTCGGGAGGTTCGCTAAAACGCCTCGCGTTCT\r\nTAGAATACCGGATAAGCCTTCTATATCTGATTTGCTTGCTATTGGGCGCGGTAATGATTCCTACGATGAAAATAAAAACGGCTTGCTTGTTCTCGATGAG\r\nTGCGGTACTTGGTTTAATACCCGTTCTTGGAATGATAAGGAAAGACAGCCGATTATTGATTGGTTTCTACATGCTCGTAAATTAGGATGGGATATTATTT\r\nTTCTTGTTCAGGACTTATCTATTGTTGATAAACAGGCGCGTTCTGCATTAGCTGAACATGTTGTTTATTGTCGTCGTCTGGACAGAATTACTTTACCTTT\r\nTGTCGGTACTTTATATTCTCTTATTACTGGCTCGAAAATGCCTCTGCCTAAATTACATGTTGGCGTTGTTAAATATGGCGATTCTCAATTAAGCCCTACT\r\nGTTGAGCGTTGGCTTTATACTGGTAAGAATTTGTATAACGCATATGATACTAAACAGGCTTTTTCTAGTAATTATGATTCCGGTGTTTATTCTTATTTAA\r\nCGCCTTATTTATCACACGGTCGGTATTTCAAACCATTAAATTTAGGTCAGAAGATGAAATTAACTAAAATATATTTGAAAAAGTTTTCTCGCGTTCTTTG\r\nTCTTGCGATTGGATTTGCATCAGCATTTACATATAGTTATATAACCCAACCTAAGCCGGAGGTTAAAAAGGTAGTCTCTCAGACCTATGATTTTGATAAA\r\nTTCACTATTGACTCTTCTCAGCGTCTTAATCTAAGCTATCGCTATGTTTTCAAGGATTCTAAGGGAAAATTAATTAATAGCGACGATTTACAGAAGCAAG\r\nGTTATTCACTCACATATATTGATTTATGTACTGTTTCCATTAAAAAAGGTAATTCAAATGAAATTGTTAAATGTAATTAATTTTGTTTTCTTGATGTTTG\r\nTTTCATCATCTTCTTTTGCTCAGGTAATTGAAATGAATAATTCGCCTCTGCGCGATTTTGTAACTTGGTATTCAAAGCAATCAGGCGAATCCGTTATTGT\r\nTTCTCCCGATGTAAAAGGTACTGTTACTGTATATTCATCTGACGTTAAACCTGAAAATCTACGCAATTTCTTTATTTCTGTTTTACGTGCAAATAATTTT\r\nGATATGGTAGGTTCTAACCCTTCCATTATTCAGAAGTATAATCCAAACAATCAGGATTATATTGATGAATTGCCATCATCTGATAATCAGGAATATGATG\r\nATAATTCCGCTCCTTCTGGTGGTTTCTTTGTTCCGCAAAATGATAATGTTACTCAAACTTTTAAAATTAATAACGTTCGGGCAAAGGATTTAATACGAGT\r\nTGTCGAATTGTTTGTAAAGTCTAATACTTCTAAATCCTCAAATGTATTATCTATTGACGGCTCTAATCTATTAGTTGTTAGTGCTCCTAAAGATATTTTA\r\nGATAACCTTCCTCAATTCCTTTCAACTGTTGATTTGCCAACTGACCAGATATTGATTGAGGGTTTGATATTTGAGGTTCAGCAAGGTGATGCTTTAGATT\r\nTTTCATTTGCTGCTGGCTCTCAGCGTGGCACTGTTGCAGGCGGTGTTAATACTGACCGCCTCACCTCTGTTTTATCTTCTGCTGGTGGTTCGTTCGGTAT\r\nTTTTAATGGCGATGTTTTAGGGCTATCAGTTCGCGCATTAAAGACTAATAGCCATTCAAAAATATTGTCTGTGCCACGTATTCTTACGCTTTCAGGTCAG\r\nAAGGGTTCTATCTCTGTTGGCCAGAATGTCCCTTTTATTACTGGTCGTGTGACTGGTGAATCTGCCAATGTAAATAATCCATTTCAGACGATTGAGCGTC\r\nAAAATGTAGGTATTTCCATGAGCGTTTTTCCTGTTGCAATGGCTGGCGGTAATATTGTTCTGGATATTACCAGCAAGGCCGATAGTTTGAGTTCTTCTAC\r\nTCAGGCAAGTGATGTTATTACTAATCAAAGAAGTATTGCTACAACGGTTAATTTGCGTGATGGACAGACTCTTTTACTCGGTGGCCTCACTGATTATAAA\r\nAACACTTCTCAGGATTCTGGCGTACCGTTCCTGTCTAAAATCCCTTTAATCGGCCTCCTGTTTAGCTCCCGCTCTGATTCTAACGAGGAAAGCACGTTAT\r\nACGTGCTCGTCAAAGCAACCATAGTACGCGCCCTGTAGCGGCGCATTAAGCGCGGCGGGTGTGGTGGTTACGCGCAGCGTGACCGCTACACTTGCCAGCG\r\nCCCTAGCGCCCGCTCCTTTCGCTTTCTTCCCTTCCTTTCTCGCCACGTTCGCCGGCTTTCCCCGTCAAGCTCTAAATCGGGGGCTCCCTTTAGGGTTCCG\r\nATTTAGTGCTTTACGGCACCTCGACCCCAAAAAACTTGATTTGGGTGATGGTTCACGTAGTGGGCCATCGCCCTGATAGACGGTTTTTCGCCCTTTGACG\r\nTTGGAGTCCACGTTCTTTAATAGTGGACTCTTGTTCCAAACTGGAACAACACTCAACCCTATCTCGGGCTATTCTTTTGATTTATAAGGGATTTTGCCGA\r\nTTTCGGAACCACCATCAAACAGGATTTTCGCCTGCTGGGGCAAACCAGCGTGGACCGCTTGCTGCAACTCTCTCAGGGCCAGGCGGTGAAGGGCAATCAG\r\nCTGTTGCCCGTCTCACTGGTGAAAAGAAAAACCACCCTGGCGCCCAATACGCAAACCGCCTCTCCCCGCGCGTTGGCCGATTCATTAATGCAGCTGGCAC\r\nGACAGGTTTCCCGACTGGAAAGCGGGCAGTGAGCGCAACGCAATTAATGTGAGTTAGCTCACTCATTAGGCACCCCAGGCTTTACACTTTATGCTTCCGG\r\nCTCGTATGTTGTGTGGAATTGTGAGCGGATAACAATTTCACACAGGAAACAGCTATGACCATGATTACGAATTCGAGCTCGGTACCCGGGGATCCTCAAC\r\nTGTGAGGAGGCTCACGGACGCGAAGAACAGGCACGCGTGCTGGCAGAAACCCCCGGTATGACCGTGAAAACGGCCCGCCGCATTCTGGCCGCAGCACCAC\r\nAGAGTGCACAGGCGCGCAGTGACACTGCGCTGGATCGTCTGATGCAGGGGGCACCGGCACCGCTGGCTGCAGGTAACCCGGCATCTGATGCCGTTAACGA\r\nTTTGCTGAACACACCAGTGTAAGGGATGTTTATGACGAGCAAAGAAACCTTTACCCATTACCAGCCGCAGGGCAACAGTGACCCGGCTCATACCGCAACC\r\nGCGCCCGGCGGATTGAGTGCGAAAGCGCCTGCAATGACCCCGCTGATGCTGGACACCTCCAGCCGTAAGCTGGTTGCGTGGGATGGCACCACCGACGGTG\r\nCTGCCGTTGGCATTCTTGCGGTTGCTGCTGACCAGACCAGCACCACGCTGACGTTCTACAAGTCCGGCACGTTCCGTTATGAGGATGTGCTCTGGCCGGA\r\nGGCTGCCAGCGACGAGACGAAAAAACGGACCGCGTTTGCCGGAACGGCAATCAGCATCGTTTAACTTTACCCTTCATCACTAAAGGCCGCCTGTGCGGCT\r\nTTTTTTACGGGATTTTTTTATGTCGATGTACACAACCGCCCAACTGCTGGCGGCAAATGAGCAGAAATTTAAGTTTGATCCGCTGTTTCTGCGTCTCTTT\r\nTTCCGTGAGAGCTATCCCTTCACCACGGAGAAAGTCTATCTCTCACAAATTCCGGGACTGGTAAACATGGCGCTGTACGTTTCGCCGATTGTTTCCGGTG\r\nAGGTTATCCGTTCCCGTGGCGGCTCCACCTCTGAAAGCTTGGCACTGGCCGTCGTTTTACAACGTCGTGACTGGGAAAACCCTGGCGTTACCCAACTTAA\r\nTCGCCTTGCAGCACATCCCCCTTTCGCCAGCTGGCGTAATAGCGAAGAGGCCCGCACCGATCGCCCTTCCCAACAGTTGCGCAGCCTGAATGGCGAATGG\r\nCGCTTTGCCTGGTTTCCGGCACCAGAAGCGGTGCCGGAAAGCTGGCTGGAGTGCGATCTTCCTGAGGCCGATACTGTCGTCGTCCCCTCAAACTGGCAGA\r\nTGCACGGTTACGATGCGCCCATCTACACCAACGTGACCTATCCCATTACGGTCAATCCGCCGTTTGTTCCCACGGAGAATCCGACGGGTTGTTACTCGCT\r\nCACATTTAATGTTGATGAAAGCTGGCTACAGGAAGGCCAGACGCGAATTATTTTTGATGGCGTTCCTATTGGTTAAAAAATGAGCTGATTTAACAAAAAT\r\nTTAATGCGAATTTTAACAAAATATTAACGTTTACAATTTAAATATTTGCTTATACAATCTTCCTGTTTTTGGGGCTTTTCTGATTATCAACCGGGGTACA\r\nTATGATTGACATGCTAGTTTTACGATTACCGTTCATCGATTCTCTTGTTTGCTCCAGACTCTCA\r\n''')\r\n\r\n_m13_variants = {\r\n M13Variant.p7249: _7249,\r\n M13Variant.p7560: _7560,\r\n M13Variant.p8064: _8064,\r\n}\r\n\r\n##################\r\n# keys\r\n\r\n# Design keys\r\nversion_key = 'version'\r\ngrid_key = 'grid'\r\nhelices_key = 'helices'\r\nstrands_key = 'strands'\r\nscaffold_key = 'scaffold'\r\nhelices_view_order_key = 'helices_view_order'\r\nis_origami_key = 'is_origami'\r\ndesign_modifications_key = 'modifications_in_design'\r\ngeometry_key = 'geometry'\r\ngroups_key = 'groups'\r\n\r\n# Geometry keys\r\nrise_per_base_pair_key = 'rise_per_base_pair'\r\nlegacy_rise_per_base_pair_keys = ['z_step']\r\nhelix_radius_key = 'helix_radius'\r\nbases_per_turn_key = 'bases_per_turn'\r\nminor_groove_angle_key = 'minor_groove_angle'\r\ninter_helix_gap_key = 'inter_helix_gap'\r\n\r\n# Helix keys\r\nidx_on_helix_key = 'idx'\r\nmax_offset_key = 'max_offset'\r\nmin_offset_key = 'min_offset'\r\ngrid_position_key = 'grid_position'\r\nposition_key = 'position'\r\nlegacy_position_keys = ['origin']\r\nmajor_tick_distance_key = 'major_tick_distance'\r\nmajor_ticks_key = 'major_ticks'\r\nmajor_tick_start_key = 'major_tick_start'\r\nmajor_tick_periodic_distances_key = 'major_tick_periodic_distances'\r\ngroup_key = 'group'\r\n\r\n# Position keys\r\nposition_x_key = 'x'\r\nposition_y_key = 'y'\r\nposition_z_key = 'z'\r\npitch_key = 'pitch'\r\nroll_key = 'roll'\r\nyaw_key = 'yaw'\r\nposition_origin_key = 'origin'\r\n\r\n# Strand keys\r\nstrand_name_key = 'name'\r\ncircular_key = 'circular'\r\ncolor_key = 'color'\r\ndna_sequence_key = 'sequence'\r\nlegacy_dna_sequence_keys = ['dna_sequence'] # support legacy names for these ideas\r\ndomains_key = 'domains'\r\nlegacy_domains_keys = ['substrands'] # support legacy names for these ideas\r\nidt_key = 'idt'\r\nis_scaffold_key = 'is_scaffold'\r\nmodification_5p_key = '5prime_modification'\r\nmodification_3p_key = '3prime_modification'\r\nmodifications_int_key = 'internal_modifications'\r\nstrand_label_key = 'label'\r\n\r\n# Domain keys\r\ndomain_name_key = 'name'\r\nhelix_idx_key = 'helix'\r\nforward_key = 'forward'\r\nlegacy_forward_keys = ['right'] # support legacy names for these ideas\r\nstart_key = 'start'\r\nend_key = 'end'\r\ndeletions_key = 'deletions'\r\ninsertions_key = 'insertions'\r\ndomain_label_key = 'label'\r\n\r\n# Loopout keys\r\nloopout_key = 'loopout'\r\n\r\n# Modification keys\r\nmod_location_key = 'location'\r\nmod_display_text_key = 'display_text'\r\nmod_id_key = 'id'\r\nmod_idt_text_key = 'idt_text'\r\nmod_font_size_key = 'font_size'\r\nmod_display_connector_key = 'display_connector'\r\nmod_allowed_bases_key = 'allowed_bases'\r\n\r\n# IDT keys\r\nidt_scale_key = 'scale'\r\nidt_purification_key = 'purification'\r\nidt_plate_key = 'plate'\r\nidt_well_key = 'well'\r\n# legacy; not written anymore as part of idt, but may be read from older versions of the JSON if\r\n# the Strand has no name but the IDT field does have a name\r\nidt_name_key = 'name'\r\n\r\n# end keys\r\n##################\r\n\r\n# end constants\r\n##########################################################################\r\n\r\n\r\n##########################################################################\r\n# modification abstract base classes\r\n\r\n_default_modification_id = \"WARNING: no id assigned to modification\"\r\n\r\n\r\n@dataclass(frozen=True, eq=True)\r\nclass Modification(_JSONSerializable):\r\n \"\"\"Base class of modifications (to DNA sequences, e.g., biotin or Cy3).\r\n Use :any:`Modification3Prime`, :any:`Modification5Prime`, or :any:`ModificationInternal`\r\n to instantiate.\"\"\"\r\n\r\n display_text: str\r\n \"\"\"Short text to display in the web interface as an \"icon\"\r\n visually representing the modification, e.g., ``'B'`` for biotin or ``'Cy3'`` for Cy3.\"\"\"\r\n\r\n id: str = _default_modification_id\r\n \"\"\"\r\n Representation as a string; used to write in :any:`Strand` json representation,\r\n while the full description of the modification is written under a global key in the :any:`Design`.\r\n If not specified, but :py:data:`Modification.idt_text` is specified, then it will be set equal to that.\r\n \"\"\"\r\n\r\n idt_text: Optional[str] = None\r\n \"\"\"IDT text string specifying this modification (e.g., '/5Biosg/' for 5' biotin). optional\"\"\"\r\n\r\n def __post_init__(self) -> None:\r\n if self.id == _default_modification_id and self.idt_text is not None:\r\n object.__setattr__(self, 'id', self.idt_text)\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n ret = {mod_display_text_key: self.display_text}\r\n if self.idt_text is not None:\r\n ret[mod_idt_text_key] = self.idt_text\r\n ret[mod_display_connector_key] = False # type: ignore\r\n return ret\r\n\r\n @staticmethod\r\n def from_json(\r\n json_map: Dict[str, Any]) -> 'Modification': # remove quotes when Py3.6 support dropped\r\n location = json_map[mod_location_key]\r\n if location == \"5'\":\r\n return Modification5Prime.from_json(json_map)\r\n elif location == \"3'\":\r\n return Modification3Prime.from_json(json_map)\r\n elif location == \"internal\":\r\n return ModificationInternal.from_json(json_map)\r\n else:\r\n raise IllegalDesignError(f'unknown Modification location \"{location}\"')\r\n\r\n\r\n@dataclass(frozen=True, eq=True)\r\nclass Modification5Prime(Modification):\r\n \"\"\"5' modification of DNA sequence, e.g., biotin or Cy3.\"\"\"\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n ret = super().to_json_serializable(suppress_indent)\r\n ret[mod_location_key] = \"5'\"\r\n return ret\r\n\r\n # remove quotes when Py3.6 support dropped\r\n @staticmethod\r\n def from_json(json_map: Dict[str, Any]) -> 'Modification5Prime':\r\n display_text = json_map[mod_display_text_key]\r\n location = json_map[mod_location_key]\r\n assert location == \"5'\"\r\n idt_text = json_map.get(mod_idt_text_key)\r\n return Modification5Prime(display_text=display_text, idt_text=idt_text)\r\n\r\n\r\n@dataclass(frozen=True, eq=True)\r\nclass Modification3Prime(Modification):\r\n \"\"\"3' modification of DNA sequence, e.g., biotin or Cy3.\"\"\"\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n ret = super().to_json_serializable(suppress_indent)\r\n ret[mod_location_key] = \"3'\"\r\n return ret\r\n\r\n # remove quotes when Py3.6 support dropped\r\n @staticmethod\r\n def from_json(json_map: Dict[str, Any]) -> 'Modification3Prime':\r\n display_text = json_map[mod_display_text_key]\r\n location = json_map[mod_location_key]\r\n assert location == \"3'\"\r\n idt_text = json_map.get(mod_idt_text_key)\r\n return Modification3Prime(display_text=display_text, idt_text=idt_text)\r\n\r\n\r\n@dataclass(frozen=True, eq=True)\r\nclass ModificationInternal(Modification):\r\n \"\"\"Internal modification of DNA sequence, e.g., biotin or Cy3.\"\"\"\r\n\r\n allowed_bases: Optional[FrozenSet[str]] = None\r\n \"\"\"If None, then this is an internal modification that goes between bases. \r\n If instead it is a list of bases, then this is an internal modification that attaches to a base,\r\n and this lists the allowed bases for this internal modification to be placed at. \r\n For example, internal biotins for IDT must be at a T. If any base is allowed, it should be\r\n ``['A','C','G','T']``.\"\"\"\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n ret = super().to_json_serializable(suppress_indent)\r\n ret[mod_location_key] = \"internal\"\r\n if self.allowed_bases is not None:\r\n ret[mod_allowed_bases_key] = NoIndent(\r\n list(self.allowed_bases)) if suppress_indent else list(self.allowed_bases)\r\n return ret\r\n\r\n # remove quotes when Py3.6 support dropped\r\n @staticmethod\r\n def from_json(json_map: Dict[str, Any]) -> 'ModificationInternal':\r\n display_text = json_map[mod_display_text_key]\r\n location = json_map[mod_location_key]\r\n assert location == \"internal\"\r\n idt_text = json_map.get(mod_idt_text_key)\r\n allowed_bases_list = json_map.get(mod_allowed_bases_key)\r\n allowed_bases = frozenset(allowed_bases_list) if allowed_bases_list is not None else None\r\n return ModificationInternal(display_text=display_text, idt_text=idt_text, allowed_bases=allowed_bases)\r\n\r\n\r\n# end modification abstract base classes\r\n##########################################################################\r\n\r\n\r\n@dataclass\r\nclass Position3D(_JSONSerializable):\r\n \"\"\"\r\n Position (x,y,z) in 3D space.\r\n \"\"\"\r\n\r\n x: float = 0\r\n \"\"\"x-coordinate of position. \r\n Increasing `x` moves right in the side view and out of the screen in the main view.\"\"\"\r\n\r\n y: float = 0\r\n \"\"\"y-coordinate of position.\r\n Increasing `y` moves down in the side and main views, i.e., \"screen coordinates\".\r\n (though this can be rotated to Cartesian coordinates, where y goes up, \r\n by selecting \"invert y/z axes\" in the View menu of scadnano.)\"\"\"\r\n\r\n z: float = 0\r\n \"\"\"z-coordinate of position.\r\n Increasing `z` moves right in the main view and into the screen in the side view.\"\"\"\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n dct: Dict[str, Any] = dict(self.__dict__)\r\n # return NoIndent(dct) if suppress_indent else dct\r\n return dct\r\n\r\n @staticmethod\r\n def from_json(json_map: Dict[str, Any]) -> 'Position3D': # remove quotes when Py3.6 support dropped\r\n if position_origin_key in json_map:\r\n origin_ = json_map[position_origin_key]\r\n x = origin_[position_x_key]\r\n y = origin_[position_y_key]\r\n z = origin_[position_z_key]\r\n else:\r\n x = json_map[position_x_key]\r\n y = json_map[position_y_key]\r\n z = json_map[position_z_key]\r\n return Position3D(x=x, y=y, z=z)\r\n\r\n\r\norigin: Position3D = Position3D(x=0, y=0, z=0)\r\n\r\n\r\n@dataclass\r\nclass HelixGroup(_JSONSerializable):\r\n \"\"\"\r\n Represents a set of properties to apply to a specific group of :any:`Helix`'s in the :any:`Design`.\r\n\r\n A :any:`HelixGroup` is useful for grouping together helices that should all be in parallel,\r\n as part of a design where different groups are not parallel. In particular, each :any:`HelixGroup`\r\n can be given its own 3D position and pitch/yaw/roll orientation angles. Each :any:`HelixGroup` does\r\n not actually *contain* its helices; they are associated through the field `Helix.group`, which is\r\n a string representing a key in the dict ``groups`` specified in the constructor for :any:`Design`.\r\n\r\n If there are :any:`HelixGroup`'s explicitly specified, then the field :py:data:`Design.grid` is ignored.\r\n Each :any:`HelixGroup` has its own grid, and the fields :py:data:`Helix.position` or\r\n :py:data:`Helix.grid_position` are considered relative to the origin of that :any:`HelixGroup`\r\n (i.e., the value :py:data:`HelixGroup.position`). Although it is possible to assign a :any:`Helix`\r\n in a :any:`HelixGroup` a non-zero :py:data:`Helix.pitch` or :py:data:`Helix.yaw`,\r\n the most common use case is that all helices in a group are parallel, so they all have these angles\r\n equal to 0 (since they are unrotated relative to each other along the pitch and yaw planes).\r\n \"\"\"\r\n\r\n position: Position3D = origin\r\n \"\"\"The \"origin\" of this :any:`HelixGroup`.\"\"\"\r\n\r\n pitch: float = 0\r\n \"\"\"Same meaning as :py:data:`Helix.pitch`, applied to every :any:`Helix` in the group.\"\"\"\r\n\r\n yaw: float = 0\r\n \"\"\"Same meaning as :py:data:`Helix.yaw`, applied to every :any:`Helix` in the group.\"\"\"\r\n\r\n roll: float = 0\r\n \"\"\"Same meaning as :py:data:`Helix.roll`, applied to every :any:`Helix` in the group.\"\"\"\r\n\r\n helices_view_order: Optional[List[int]] = None\r\n \"\"\"Same meaning as :py:data:`Design.helices_view_order`, \r\n applied to only the :any:`Helix`'s in the group.\"\"\"\r\n\r\n grid: Grid = Grid.none\r\n \"\"\"Same meaning as :py:data:`Design.grid`, enforced only on the :any:`Helix`'s in the group.\"\"\"\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n dct: Dict[str, Any] = dict()\r\n\r\n helix_idxs: List[int] = kwargs['helix_idxs']\r\n\r\n pos = self.position.to_json_serializable(suppress_indent)\r\n dct[position_key] = NoIndent(pos) if suppress_indent else pos\r\n\r\n if not _is_close(self.pitch, default_pitch):\r\n dct[pitch_key] = self.pitch\r\n if not _is_close(self.roll, default_roll):\r\n dct[roll_key] = self.roll\r\n if not _is_close(self.yaw, default_yaw):\r\n dct[yaw_key] = self.yaw\r\n\r\n dct[grid_key] = str(self.grid)[5:] # remove prefix 'Grid.'\r\n\r\n default_helices_view_order = sorted(helix_idxs)\r\n if self.helices_view_order != default_helices_view_order:\r\n dct[helices_view_order_key] = NoIndent(self.helices_view_order)\r\n\r\n return dct\r\n\r\n def _assign_default_helices_view_order(self, helices_in_group: Dict[int, 'Helix']) -> None:\r\n if self.helices_view_order is not None:\r\n raise AssertionError('should not call _assign_default_helices_view_order if '\r\n 'HelixGroup.helices_view_order is not None, but it is '\r\n f'{self.helices_view_order}')\r\n helix_idxs = list(helices_in_group.keys())\r\n self.helices_view_order = _check_helices_view_order_and_return(self.helices_view_order, helix_idxs)\r\n\r\n @staticmethod\r\n def from_json(json_map: dict, **kwargs: Any) -> 'HelixGroup': # remove quotes when Py3.6 support dropped\r\n grid = optional_field(Grid.none, json_map, grid_key)\r\n\r\n num_helices: int = kwargs['num_helices']\r\n helices_view_order = json_map.get(helices_view_order_key)\r\n if helices_view_order is not None:\r\n if len(helices_view_order) != num_helices:\r\n raise IllegalDesignError(f'length of helices ({num_helices}) does not match '\r\n f'length of helices_view_order ({len(helices_view_order)})')\r\n\r\n position_map = mandatory_field(HelixGroup, json_map, position_key, *legacy_position_keys)\r\n position = Position3D.from_json(position_map)\r\n\r\n pitch = json_map.get(pitch_key, default_pitch)\r\n roll = json_map.get(roll_key, default_roll)\r\n yaw = json_map.get(yaw_key, default_yaw)\r\n\r\n return HelixGroup(position=position,\r\n pitch=pitch,\r\n yaw=yaw,\r\n roll=roll,\r\n helices_view_order=helices_view_order,\r\n grid=grid)\r\n\r\n def helices_view_order_inverse(self, idx: int) -> int:\r\n \"\"\"\r\n Given a :py:data:`Helix.idx` in this :any:`HelixGroup`, return its view order.\r\n\r\n :param idx: index of :any:`Helix` in this :any:`HelixGroup`\r\n :return: view order of the :any:`Helix`\r\n :raises ValueError: if `idx` is not the index of a :any:`Helix` in this :any:`HelixGroup`\r\n \"\"\"\r\n if self.helices_view_order is None:\r\n raise ValueError('cannot access helices_view_order_inverse until helices_view_order is set')\r\n return self.helices_view_order.index(idx)\r\n\r\n\r\n# def in_browser() -> bool:\r\n# \"\"\"Test if this code is running in the browser.\r\n#\r\n# Checks for existence of package \"pyodide\" used in pyodide. If present it is assumed the code is\r\n# running in the browser.\"\"\"\r\n# try:\r\n# import pyodide # type: ignore\r\n# return True\r\n# except ImportError:\r\n# return False\r\n\r\n\r\n@dataclass\r\nclass Helix(_JSONSerializable):\r\n \"\"\"\r\n Represents a \"helix\" where :any:`Domain`'s could go. Technically a :any:`Helix` can contain no\r\n :any:`Domain`'s. More commonly, some partial regions of it may have only 1 or 0 :any:`Domain`'s.\r\n So it is best thought of as a \"potential\" double-helix.\r\n\r\n It has a 1-dimensional integer coordinate system given by \"offsets\", integers between\r\n :py:data:`Helix.min_offset` (inclusive) and :py:data:`Helix.max_offset` (exclusive).\r\n At any valid offset for this :any:`Helix`, at most two :any:`Domain`'s may share that offset\r\n on this :any:`Helix`, and if there are exactly two, then one must have\r\n :py:data:`Domain.forward` = ``true`` and the other must have\r\n :py:data:`Domain.forward` = ``false``.\r\n\r\n Once part of a :any:`Design`, a :any:`Helix` has an index (accessible via :py:meth:`Helix.idx`\r\n once the :any:`Design` is created)\r\n representing its order in the list of all :any:`Helix`'s. This index is how a :any:`Domain` is\r\n associated to the :any:`Helix` via the integer index :any:`Domain.helix`.\r\n \"\"\"\r\n\r\n max_offset: Optional[int] = None # type: ignore\r\n \"\"\"Maximum offset (exclusive) of :any:`Domain` that can be drawn on this :any:`Helix`. \r\n \r\n Optional field.\r\n If unspecified, it is calculated when the :any:`Design` is instantiated as \r\n the largest :any:`Domain.end` offset of any :any:`Domain` in the design.\r\n \"\"\"\r\n\r\n min_offset: int = 0\r\n \"\"\"Minimum offset (inclusive) of :any:`Domain` that can be drawn on this :any:`Helix`. \r\n \r\n Optional field. Default value 0.\r\n \"\"\"\r\n\r\n major_tick_start: Optional[int] = None # type: ignore\r\n \"\"\"Offset of first major tick when not specifying :py:data:`Helix.major_ticks`. \r\n Used in combination with either \r\n :py:data:`Helix.major_tick_distance` or \r\n :py:data:`Helix.major_tick_periodic_distances`.\r\n \r\n Optional field. \r\n If not specified, is initialized to value :py:data:`Helix.min_offset`.\"\"\"\r\n\r\n major_tick_distance: Optional[int] = None\r\n \"\"\"Distance between major ticks (bold) delimiting boundaries between bases. Major ticks will appear\r\n in the visual interface at positions \r\n\r\n Optional field.\r\n If 0 then no major ticks are drawn.\r\n If not specified then the default value is assumed.\r\n If the grid is :any:`Grid.square` then the default value is 8.\r\n If the grid is :any:`Grid.hex` or :any:`Grid.honeycomb` then the default value is 7.\"\"\"\r\n\r\n major_tick_periodic_distances: Optional[List[int]] = None\r\n \"\"\"Periodic distances between major ticks. For example, setting \r\n :py:data:`Helix.major_tick_periodic_distances` = [2, 3] and \r\n :py:data:`Helix.major_tick_start` = 10 means that major ticks will appear at\r\n 12, \r\n 15,\r\n 17,\r\n 20,\r\n 22, \r\n 25,\r\n 27,\r\n 30, ...\r\n \r\n Optional field.\r\n :py:data:`Helix.major_tick_distance` is equivalent to \r\n the setting :py:data:`Helix.major_tick_periodic_distances` = [:py:data:`Helix.major_tick_distance`].\r\n \"\"\"\r\n\r\n major_ticks: Optional[List[int]] = None # type: ignore\r\n \"\"\"If not ``None``, overrides :any:`Helix.major_tick_distance`\r\n to specify a list of offsets at which to put major ticks.\"\"\"\r\n\r\n grid_position: Optional[Tuple[int, int]] = None # type: ignore\r\n \"\"\"`(h,v)` position of this helix in the side view grid,\r\n if :const:`Grid.square`, :const:`Grid.hex` , or :const:`Grid.honeycomb` is used\r\n in the :any:`Design` containing this helix.\r\n `h` and `v` are in units of \"helices\": incrementing `h` moves right one helix in the grid\r\n and incrementing `v` moves down one helix in the grid. \r\n In the case of the hexagonal lattice, \r\n The convention is that incrementing `v` moves down and to the right if h is even, \r\n and moves down and to the left if `h` is odd.\r\n This is the \"odd-q\" coordinate system here: \r\n https://www.redblobgames.com/grids/hexagons/)\r\n However, the default y position in the main view for helices does not otherwise depend on grid_position.\r\n The default is to list the y-coordinates in order by helix idx.\r\n \r\n Default is `h` = 0, `v` = index of :any:`Helix` in :py:data:`Design.helices`.\r\n \r\n In the case of the honeycomb lattice, we use the same convention as cadnano for encoding hex coordinates,\r\n see `misc/cadnano-format-specs/v2.txt`.\r\n That convention is different from simply excluding coordinates from the hex lattice.\r\n \"\"\"\r\n\r\n position: Optional[Position3D] = None # type: ignore\r\n \"\"\"Position (x,y,z) of this :any:`Helix` in 3D space.\r\n \r\n Must be None if :py:data:`Helix.grid_position` is specified.\"\"\"\r\n\r\n pitch: float = 0\r\n \"\"\"Angle in the main view plane; 0 means pointing to the right (min_offset on left, max_offset on right).\r\n Rotation is clockwise in the main view.\r\n See https://en.wikipedia.org/wiki/Aircraft_principal_axes\r\n Units are degrees.\"\"\"\r\n\r\n roll: float = 0\r\n \"\"\"Angle around the center of the helix; 0 means pointing straight up in the side view.\r\n Rotation is clockwise in the side view.\r\n See https://en.wikipedia.org/wiki/Aircraft_principal_axes\r\n Units are degrees.\"\"\"\r\n\r\n yaw: float = 0\r\n \"\"\"Third angle for orientation besides :py:data:`Helix.pitch` and :py:data:`Helix.roll`.\r\n Not visually displayed in scadnano, but here to support more general 3D applications.\r\n See https://en.wikipedia.org/wiki/Aircraft_principal_axes\r\n Units are degrees.\"\"\"\r\n\r\n idx: Optional[int] = None\r\n \"\"\"Index of this :any:`Helix`.\r\n \r\n Optional if no other :any:`Helix` specifies a value for *idx*.\r\n Default is the order of the :any:`Helix` is listed in constructor for :any:`Design`.\"\"\"\r\n\r\n group: str = default_group_name # type: ignore\r\n \"\"\"Name of the :any:`HelixGroup` to which this :any:`Helix` belongs.\"\"\"\r\n\r\n # for optimization; list of domains on that Helix\r\n _domains: List['Domain'] = field(default_factory=list)\r\n\r\n def __post_init__(self) -> None:\r\n if self.major_tick_start is None: # type: ignore\r\n self.major_tick_start = self.min_offset # type: ignore\r\n if self.grid_position is not None and self.position is not None:\r\n raise IllegalDesignError('exactly one of grid_position or position must be specified, '\r\n 'but both are specified')\r\n if self.major_ticks is not None and self.max_offset is not None and self.min_offset is not None:\r\n for major_tick in self.major_ticks:\r\n if major_tick > self.max_offset - self.min_offset:\r\n raise IllegalDesignError(f'major tick {major_tick} in list {self.major_ticks} is '\r\n f'outside the range of available offsets since max_offset = '\r\n f'{self.max_offset}')\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n dct: Any = dict()\r\n\r\n grid: Grid = kwargs['grid']\r\n\r\n # if we have major ticks or position, it's harder to read Helix on one line,\r\n # so don't wrap it in NoIndent, but still wrap longer sub-objects in them\r\n # use_no_indent_helix: bool = not (self.major_ticks is not None or self.position is not None)\r\n use_no_indent_helix: bool = not (self.major_ticks is not None)\r\n\r\n if self.group != default_group_name:\r\n dct[group_key] = self.group\r\n\r\n if not self.min_offset_is_default():\r\n dct[min_offset_key] = self.min_offset\r\n\r\n if not self.major_tick_start_is_default():\r\n dct[major_tick_start_key] = self.major_tick_start\r\n\r\n dct[max_offset_key] = self.max_offset\r\n\r\n if self.position is None:\r\n if grid == Grid.none:\r\n raise IllegalDesignError('cannot have Helix.position == None when grid is None')\r\n dct[grid_position_key] = NoIndent(\r\n self.grid_position) if suppress_indent and not use_no_indent_helix else self.grid_position\r\n else:\r\n if grid != Grid.none:\r\n raise IllegalDesignError('cannot have Helix.position != None when grid is not None')\r\n pos = self.position.to_json_serializable(suppress_indent)\r\n dct[position_key] = NoIndent(pos) if suppress_indent and not use_no_indent_helix else pos\r\n\r\n if not _is_close(self.pitch, default_pitch):\r\n dct[pitch_key] = self.pitch\r\n if not _is_close(self.roll, default_roll):\r\n dct[roll_key] = self.roll\r\n if not _is_close(self.yaw, default_yaw):\r\n dct[yaw_key] = self.yaw\r\n\r\n if not self.major_tick_distance_is_default(grid):\r\n dct[major_tick_distance_key] = self.major_tick_distance\r\n\r\n if not self.major_tick_periodic_distances_is_default():\r\n dct[major_tick_periodic_distances_key] = NoIndent(\r\n self.major_tick_periodic_distances) if suppress_indent and not use_no_indent_helix else \\\r\n self.major_tick_periodic_distances\r\n\r\n if not self.major_ticks_is_default():\r\n dct[major_ticks_key] = NoIndent(\r\n self.major_ticks) if suppress_indent and not use_no_indent_helix else self.major_ticks\r\n\r\n dct[idx_on_helix_key] = self.idx\r\n\r\n return NoIndent(dct) if suppress_indent and use_no_indent_helix else dct\r\n\r\n @staticmethod\r\n def from_json(json_map: dict) -> 'Helix': # remove quotes when Py3.6 support dropped\r\n grid_position: Optional[Tuple[int, int]] = None\r\n if grid_position_key in json_map:\r\n gp_list = json_map[grid_position_key]\r\n if len(gp_list) == 3:\r\n gp_list = gp_list[:2]\r\n if len(gp_list) != 2:\r\n raise IllegalDesignError(\"list of grid_position coordinates must be length 2, \"\r\n f\"but this is the list: {gp_list}\")\r\n grid_position = (gp_list[0], gp_list[1])\r\n\r\n major_tick_distance = json_map.get(major_tick_distance_key)\r\n major_ticks = json_map.get(major_ticks_key)\r\n major_tick_start = json_map.get(major_tick_start_key)\r\n major_tick_periodic_distances = json_map.get(major_tick_periodic_distances_key)\r\n min_offset = optional_field(0, json_map, min_offset_key)\r\n max_offset = json_map.get(max_offset_key)\r\n idx = json_map.get(idx_on_helix_key)\r\n\r\n position_map = optional_field(None, json_map, position_key, *legacy_position_keys)\r\n position = Position3D.from_json(position_map) if position_map is not None else None\r\n\r\n pitch = json_map.get(pitch_key, default_pitch)\r\n roll = json_map.get(roll_key, default_roll)\r\n yaw = json_map.get(yaw_key, default_yaw)\r\n\r\n group = json_map.get(group_key, default_group_name)\r\n\r\n return Helix(\r\n major_tick_distance=major_tick_distance,\r\n major_ticks=major_ticks,\r\n major_tick_start=major_tick_start,\r\n major_tick_periodic_distances=major_tick_periodic_distances,\r\n grid_position=grid_position,\r\n min_offset=min_offset,\r\n max_offset=max_offset,\r\n position=position,\r\n pitch=pitch,\r\n roll=roll,\r\n yaw=yaw,\r\n idx=idx,\r\n group=group,\r\n )\r\n\r\n def default_grid_position(self) -> Tuple[int, int]:\r\n if self.idx is None:\r\n raise AssertionError('cannot call default_grid_position when idx is None')\r\n return 0, self.idx\r\n\r\n def calculate_major_ticks(self, grid: Grid) -> List[int]:\r\n \"\"\"\r\n Calculates full list of major tick marks, whether using `default_major_tick_distance` (from\r\n :any:`Design`), :py:data:`Helix.major_tick_distance`, or :py:data:`Helix.major_ticks`.\r\n They are used in reverse order to determine precedence. (e.g., :py:data:`Helix.major_ticks`\r\n overrides :py:data:`Helix.major_tick_distance`, which overrides\r\n `default_major_tick_distance` from :any:`Design`.\r\n \"\"\"\r\n if self.max_offset is None:\r\n raise ValueError('cannot calculate major ticks if max_offset is not specified')\r\n if self.major_tick_start is None:\r\n raise AssertionError('major_tick_start should never be None')\r\n if self.major_ticks is not None:\r\n return self.major_ticks\r\n elif self.major_tick_distance is not None:\r\n return list(range(self.major_tick_start, self.max_offset + 1, self.major_tick_distance))\r\n elif self.major_tick_periodic_distances is not None:\r\n ticks = []\r\n tick = self.major_tick_start\r\n idx_period = 0\r\n while tick <= self.max_offset:\r\n ticks.append(tick)\r\n tick += self.major_tick_periodic_distances[idx_period]\r\n idx_period = (idx_period + 1) % len(self.major_tick_periodic_distances)\r\n return ticks\r\n else:\r\n distance = default_major_tick_distance(grid)\r\n return list(range(self.major_tick_start, self.max_offset + 1, distance))\r\n\r\n @property\r\n def domains(self) -> List['Domain']:\r\n \"\"\"\r\n Return :any:`Domain`'s on this :any:`Helix`.\r\n Assigned when a :any:`Design` is created using this :any:`Helix`.\r\n\r\n :return: :any:`Domain`'s on this helix\r\n \"\"\"\r\n return self._domains\r\n\r\n def min_offset_is_default(self) -> bool:\r\n return self.min_offset == 0\r\n\r\n def major_tick_start_is_default(self) -> bool:\r\n return self.major_tick_start == self.min_offset\r\n\r\n def major_tick_distance_is_default(self, grid: Grid) -> bool:\r\n return (self.major_tick_distance is None\r\n or default_major_tick_distance(grid) == self.major_tick_distance)\r\n\r\n def major_tick_periodic_distances_is_default(self) -> bool:\r\n return self.major_tick_periodic_distances is None\r\n\r\n def major_ticks_is_default(self) -> bool:\r\n return self.major_ticks is None\r\n\r\n\r\ndef _is_close(x1: float, x2: float) -> bool:\r\n return abs(x1 - x2) < 0.00000001\r\n\r\n\r\nDomainLabel = TypeVar('DomainLabel')\r\nStrandLabel = TypeVar('StrandLabel')\r\n\r\n\r\n@dataclass\r\nclass Domain(_JSONSerializable, Generic[DomainLabel]):\r\n \"\"\"\r\n A maximal portion of a :any:`Strand` that is continguous on a single :any:`Helix`.\r\n A :any:`Strand` contains a list of :any:`Domain`'s (and also potentially :any:`Loopout`'s).\r\n \"\"\"\r\n\r\n helix: int\r\n \"\"\"index of the :any:`Helix` on which this :any:`Domain` resides.\"\"\"\r\n\r\n forward: bool\r\n \"\"\"Whether the strand \"points\" forward (i.e., its 3' end has a larger offset than its 5' end).\r\n If :any:`Domain.forward` is ``True``, then \r\n :any:`Domain.start` is the 5' end of the :any:`Domain` and \r\n :any:`Domain.end` is the 3' end of the :any:`Domain`.\r\n If :any:`Domain.forward` is ``False``, these roles are reversed.\"\"\"\r\n\r\n start: int\r\n \"\"\"\r\n The smallest offset position of any base on this Domain\r\n (3' end if :any:`Domain.forward` = ``False``,\r\n 5' end if :any:`Domain.forward` = ``True``).\r\n \"\"\"\r\n\r\n end: int\r\n \"\"\"\r\n 1 plus the largest offset position of any base on this Domain\r\n (5' end if :any:`Domain.forward` = ``False``,\r\n 3' end if :any:`Domain.forward` = ``True``).\r\n Note that the set of base offsets occupied by this Domain is {start, start+1, ..., end-1},\r\n i.e., inclusive for :py:data:`Strand.start` but exclusive for :py:data:`Strand.end`,\r\n the same convention used in Python for slices of lists and strings.\r\n (e.g., :samp:`\"abcdef\"[1:3] == \"bc\"`)\r\n \r\n Some methods (such as :py:meth:`Domain.dna_sequence_in`) use the convention of being inclusive on \r\n both ends and are marked with the word \"INCLUSIVE\".\r\n (Such a convention is easier to reason about when there are insertions and deletions.)\r\n \"\"\"\r\n\r\n deletions: List[int] = field(default_factory=list)\r\n \"\"\"List of positions of deletions on this Domain.\"\"\"\r\n\r\n insertions: List[Tuple[int, int]] = field(default_factory=list)\r\n \"\"\"List of (position,num_insertions) pairs on this Domain.\r\n \r\n This is the number of *extra* bases in addition to the base already at this position. \r\n The total number of bases at this offset is num_insertions+1.\"\"\"\r\n\r\n name: Optional[str] = None\r\n \"\"\"Optional name to give this :any:`Domain`. \r\n \r\n This is used to interoperate with the dsd DNA sequence design package.\"\"\"\r\n\r\n label: Optional[DomainLabel] = None\r\n \"\"\"\r\n Generic \"label\" object to associate to this :any:`Domain`.\r\n\r\n Useful for associating extra information with the :any:`Domain` that will be serialized, for example,\r\n for DNA sequence design. It must be an object (e.g., a dict or primitive type such as str or int) \r\n that is naturally JSON serializable. (Calling \r\n `json.dumps `_\r\n on the object should succeed without having to specify a custom encoder.)\r\n \"\"\"\r\n\r\n # not serialized; for efficiency\r\n # remove quotes when Py3.6 support dropped\r\n _parent_strand: Optional['Strand'] = field(init=False, repr=False, compare=False, default=None)\r\n\r\n def __post_init__(self) -> None:\r\n self._check_start_end()\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True,\r\n **kwargs: Any) -> Union[NoIndent, Dict[str, Any]]:\r\n dct: Dict[str, Any] = OrderedDict()\r\n if self.name is not None:\r\n dct[domain_name_key] = self.name\r\n dct[helix_idx_key] = self.helix\r\n dct[forward_key] = self.forward\r\n dct[start_key] = self.start\r\n dct[end_key] = self.end\r\n if len(self.deletions) > 0:\r\n dct[deletions_key] = self.deletions\r\n if len(self.insertions) > 0:\r\n dct[insertions_key] = self.insertions\r\n if self.label is not None:\r\n dct[domain_label_key] = self.label\r\n return NoIndent(dct) if suppress_indent else dct\r\n\r\n @staticmethod\r\n def from_json(json_map: Dict[str, Any]) -> 'Domain': # remove quotes when Py3.6 support dropped\r\n helix = mandatory_field(Domain, json_map, helix_idx_key)\r\n forward = mandatory_field(Domain, json_map, forward_key, *legacy_forward_keys)\r\n start = mandatory_field(Domain, json_map, start_key)\r\n end = mandatory_field(Domain, json_map, end_key)\r\n deletions = json_map.get(deletions_key, [])\r\n insertions = cast(List[Tuple[int, int]], # noqa\r\n list(map(tuple, json_map.get(insertions_key, [])))) # type: ignore\r\n name = json_map.get(domain_name_key)\r\n label = json_map.get(domain_label_key)\r\n return Domain(\r\n helix=helix,\r\n forward=forward,\r\n start=start,\r\n end=end,\r\n deletions=deletions,\r\n insertions=insertions,\r\n name=name,\r\n label=label,\r\n )\r\n\r\n def __repr__(self) -> str:\r\n rep = (f'Domain(' +\r\n (f'name={self.name}' if self.name is not None else '') +\r\n f', helix={self.helix}'\r\n f', forward={self.forward}'\r\n f', start={self.start}'\r\n f', end={self.end}') + \\\r\n (f', deletions={self.deletions}' if len(self.deletions) > 0 else '') + \\\r\n (f', insertions={self.insertions}' if len(self.insertions) > 0 else '') + \\\r\n ')'\r\n return rep\r\n\r\n def __str__(self) -> str:\r\n return repr(self) if self.name is None else self.name\r\n\r\n def strand(self) -> 'Strand': # remove quotes when Py3.6 support dropped\r\n \"\"\"\r\n :return: The :any:`Strand` that contains this :any:`Loopout`.\r\n \"\"\"\r\n if self._parent_strand is None:\r\n raise ValueError('_parent_strand has not yet been set')\r\n return self._parent_strand\r\n\r\n def set_name(self, name: str) -> None:\r\n \"\"\"Sets name of this :any:`Domain`.\"\"\"\r\n self.name = name\r\n\r\n def set_label(self, label: DomainLabel) -> None:\r\n \"\"\"Sets label of this :any:`Domain`.\"\"\"\r\n self.label = label\r\n\r\n def _check_start_end(self) -> None:\r\n if self.start >= self.end:\r\n if self._parent_strand is None:\r\n raise ValueError(f'start = {self.start} must be less than end = {self.end}\\n'\r\n f'_parent_strand has not yet been set')\r\n raise StrandError(self._parent_strand,\r\n f'start = {self.start} must be less than end = {self.end}')\r\n\r\n # @staticmethod\r\n # def is_loopout() -> bool:\r\n # \"\"\"Indicates if this is a :any:`Loopout` (always false)\r\n # Useful when object could be either :any:`Loopout` or :any:`Domain`.\"\"\"\r\n # return False\r\n #\r\n # @staticmethod\r\n # def is_domain() -> bool:\r\n # \"\"\"Indicates if this is a :any:`Domain` (always true)\r\n # Useful when object could be either :any:`Loopout` or :any:`Domain`.\"\"\"\r\n # return True\r\n\r\n def set_start(self, new_start: int) -> None:\r\n self.start = new_start\r\n self._check_start_end()\r\n\r\n def set_end(self, new_end: int) -> None:\r\n self.end = new_end\r\n self._check_start_end()\r\n\r\n def offset_5p(self) -> int:\r\n \"\"\"5' offset of this :any:`Domain`, INCLUSIVE.\"\"\"\r\n if self.forward:\r\n return self.start\r\n else:\r\n return self.end - 1\r\n # return self.start if self.forward else self.end - 1\r\n\r\n def offset_3p(self) -> int:\r\n \"\"\"3' offset of this :any:`Domain`, INCLUSIVE.\"\"\"\r\n return self.end - 1 if self.forward else self.start\r\n\r\n def _num_insertions(self) -> int:\r\n # total number of insertions in this Domain\r\n return sum(insertion[1] for insertion in self.insertions)\r\n\r\n def contains_offset(self, offset: int) -> bool:\r\n \"\"\"Indicates if `offset` is the offset of a base on this :any:`Domain`.\r\n\r\n Note that offsets refer to visual portions of the displayed grid for the Helix.\r\n If for example, this Domain starts at position 0 and ends at 10, and it has 5 deletions,\r\n then it contains the offset 7 even though there is no base 7 positions from the start.\"\"\"\r\n return self.start <= offset < self.end\r\n\r\n def __len__(self) -> int:\r\n \"\"\"Same as :meth:`Domain.dna_length`.\r\n\r\n See also :meth:`Domain.visual_length`.\"\"\"\r\n return self.dna_length()\r\n\r\n def dna_length(self) -> int:\r\n \"\"\"Number of bases in this Domain.\"\"\"\r\n return self.end - self.start - len(self.deletions) + self._num_insertions()\r\n\r\n def dna_length_in(self, left: int, right: int) -> int:\r\n \"\"\"Number of bases in this Domain between offsets `left` and `right` (INCLUSIVE).\"\"\"\r\n if not left <= right + 1:\r\n raise ValueError(f'left = {left} and right = {right} but we should have left <= right + 1')\r\n if not self.start <= left:\r\n raise ValueError(f'left = {left} should be at least self.start = {self.start}')\r\n if not right < self.end:\r\n raise ValueError(f'right = {right} should be at most self.end - 1 = {self.end - 1}')\r\n num_deletions = sum(1 for offset in self.deletions if left <= offset <= right)\r\n num_insertions = sum(length for (offset, length) in self.insertions if left <= offset <= right)\r\n return (right - left + 1) - num_deletions + num_insertions\r\n\r\n def visual_length(self) -> int:\r\n \"\"\"Distance between :any:`Domain.start` offset and :any:`Domain.end` offset.\r\n\r\n This can be more or less than the :meth:`Domain.dna_length` due to insertions and deletions.\"\"\"\r\n return self.end - self.start\r\n\r\n def dna_sequence(self) -> Optional[str]:\r\n \"\"\"Return DNA sequence of this Domain, or ``None`` if no DNA sequence has been assigned\r\n to this :any:`Domain`'s :any:`Strand`.\"\"\"\r\n return self.dna_sequence_in(self.start, self.end - 1)\r\n\r\n def dna_sequence_in(self, offset_left: int, offset_right: int) -> Optional[str]:\r\n \"\"\"Return DNA sequence of this Domain in the interval of offsets given by\r\n [`offset_left`, `offset_right`], INCLUSIVE, or ``None`` if no DNA sequence has been assigned\r\n to this :any:`Domain`'s :any:`Strand`.\r\n\r\n WARNING: This is inclusive on both ends,\r\n unlike other parts of this API where the right endpoint is exclusive.\r\n This is to make the notion well-defined when one of the endpoints is on an offset with a\r\n deletion or insertion.\"\"\"\r\n strand_seq = self._parent_strand.dna_sequence if self._parent_strand is not None else None\r\n if strand_seq is None:\r\n return None\r\n\r\n # if on a deletion, move inward until we are off of it\r\n while offset_left in self.deletions:\r\n offset_left += 1\r\n while offset_right in self.deletions:\r\n offset_right -= 1\r\n\r\n if offset_left > offset_right:\r\n return ''\r\n if offset_left >= self.end:\r\n return ''\r\n if offset_right < 0:\r\n return ''\r\n\r\n str_idx_left = self.domain_offset_to_strand_dna_idx(offset_left, self.forward)\r\n str_idx_right = self.domain_offset_to_strand_dna_idx(offset_right, not self.forward)\r\n if not self.forward: # these will be out of order if strand is left\r\n str_idx_left, str_idx_right = str_idx_right, str_idx_left\r\n subseq = strand_seq[str_idx_left:str_idx_right + 1]\r\n return subseq\r\n\r\n def get_seq_start_idx(self) -> int:\r\n \"\"\"Starting DNA subsequence index for first base of this :any:`Domain` on its\r\n Parent :any:`Strand`'s DNA sequence.\"\"\"\r\n if self._parent_strand is None:\r\n raise ValueError('should not call this method if a Strand has not be associated to this Domain')\r\n domains = self._parent_strand.domains\r\n # index of self in parent strand's list of domains\r\n self_domain_idx = domains.index(self)\r\n # index of self's position within the DNA sequence of parent strand\r\n self_seq_idx_start = sum(prev_domain.dna_length()\r\n for prev_domain in domains[:self_domain_idx])\r\n return self_seq_idx_start\r\n\r\n def domain_offset_to_strand_dna_idx(self, offset: int, offset_closer_to_5p: bool) -> int:\r\n \"\"\" Convert from offset on this :any:`Domain`'s :any:`Helix`\r\n to string index on the parent :any:`Strand`'s DNA sequence.\r\n\r\n If `offset_closer_to_5p` is ``True``, (this only matters if `offset` contains an insertion)\r\n then the only leftmost string index corresponding to this offset is included,\r\n otherwise up to the rightmost string index (including all insertions) is included.\"\"\"\r\n if offset in self.deletions:\r\n raise ValueError(f'offset {offset} illegally contains a deletion from {self.deletions}')\r\n\r\n # length adjustment for insertions depends on whether this is a left or right offset\r\n len_adjust = self._net_ins_del_length_increase_from_5p_to(offset, offset_closer_to_5p)\r\n\r\n # get string index assuming this Domain is first on Strand\r\n if self.forward:\r\n offset += len_adjust # account for insertions and deletions\r\n domain_str_idx = offset - self.start\r\n else:\r\n # account for insertions and deletions\r\n offset -= len_adjust # account for insertions and deletions\r\n domain_str_idx = self.end - 1 - offset\r\n\r\n # correct for existence of previous Domains on this Strand\r\n return domain_str_idx + self.get_seq_start_idx()\r\n\r\n def _net_ins_del_length_increase_from_5p_to(self, offset_edge: int, offset_closer_to_5p: bool) -> int:\r\n \"\"\"Net number of insertions from 5'/3' end to offset_edge,\r\n INCLUSIVE on 5'/3' end, EXCLUSIVE on offset_edge.\r\n\r\n Set `five_p` ``= False`` to test from 3' end to `offset_edge`.\"\"\"\r\n length_increase = 0\r\n for deletion in self.deletions:\r\n if self._between_5p_and_offset(deletion, offset_edge):\r\n length_increase -= 1\r\n for (insertion_offset, insertion_length) in self.insertions:\r\n if self._between_5p_and_offset(insertion_offset, offset_edge):\r\n length_increase += insertion_length\r\n # special case for when offset_edge is an endpoint closer to the 3' end,\r\n # we add its extra insertions also in this case\r\n if not offset_closer_to_5p:\r\n insertion_map: Dict[int, int] = dict(self.insertions)\r\n if offset_edge in insertion_map:\r\n insertion_length = insertion_map[offset_edge]\r\n length_increase += insertion_length\r\n return length_increase\r\n\r\n def _between_5p_and_offset(self, offset_to_test: int, offset_edge: int) -> bool:\r\n return ((self.forward and self.start <= offset_to_test < offset_edge) or\r\n (not self.forward and offset_edge < offset_to_test < self.end))\r\n\r\n # def _between_3p_and_offset(self, offset_to_test: int, offset_edge: int) -> bool:\r\n # return ((self.direction == Direction.left and self.start <= offset_to_test < offset_edge) or\r\n # (self.direction == Direction.forward and offset_edge < offset_to_test < self.end))\r\n\r\n # The type hint 'Domain' must be in quotes since Domain is not yet defined.\r\n # This is a \"forward reference\": https://www.python.org/dev/peps/pep-0484/#forward-references\r\n # remove quotes when Py3.6 support dropped\r\n # def overlaps(self, other: Domain) -> bool:\r\n def overlaps(self, other: 'Domain') -> bool:\r\n r\"\"\"Indicates if this :any:`Domain`'s set of offsets (the set\r\n :math:`\\{x \\in \\mathbb{N} \\mid`\r\n ``self.start``\r\n :math:`\\leq x \\leq`\r\n ``self.end``\r\n :math:`\\}`)\r\n has nonempty intersection with those of `other`,\r\n and they appear on the same helix,\r\n and they point in opposite directions.\"\"\" # noqa (suppress PEP warning)\r\n return (self.helix == other.helix and\r\n self.forward == (not other.forward) and\r\n self.compute_overlap(other)[0] >= 0)\r\n\r\n # remove quotes when Py3.6 support dropped\r\n # def overlaps_illegally(self, other: Domain):\r\n def overlaps_illegally(self, other: 'Domain') -> bool:\r\n r\"\"\"Indicates if this :any:`Domain`'s set of offsets (the set\r\n :math:`\\{x \\in \\mathbb{N} \\mid`\r\n ``self.start``\r\n :math:`\\leq x \\leq`\r\n ``self.end``\r\n :math:`\\}`)\r\n has nonempty intersection with those of `other`,\r\n and they appear on the same helix,\r\n and they point in the same direction.\"\"\" # noqa (suppress PEP warning)\r\n return (self.helix == other.helix and\r\n self.forward == other.forward and\r\n self.compute_overlap(other)[0] >= 0)\r\n\r\n # remove quotes when Py3.6 support dropped\r\n # def compute_overlap(self, other: Domain) -> Tuple[int, int]:\r\n def compute_overlap(self, other: 'Domain') -> Tuple[int, int]:\r\n \"\"\"Return [left,right) offset indicating overlap between this Domain and `other`.\r\n\r\n Return ``(-1,-1)`` if they do not overlap (different helices, or non-overlapping regions\r\n of the same helix).\"\"\"\r\n overlap_start = max(self.start, other.start)\r\n overlap_end = min(self.end, other.end)\r\n if overlap_start >= overlap_end: # overlap is empty\r\n return -1, -1\r\n return overlap_start, overlap_end\r\n\r\n def insertion_offsets(self) -> List[int]:\r\n \"\"\"Return offsets of insertions (but not their lengths).\"\"\"\r\n return [ins_off for (ins_off, _) in self.insertions]\r\n\r\n\r\n@dataclass\r\nclass Loopout(_JSONSerializable, Generic[DomainLabel]):\r\n \"\"\"Represents a single-stranded loopout on a :any:`Strand`.\r\n\r\n One could think of a :any:`Loopout` as a type of :any:`Domain`, but none of the fields of\r\n :any:`Domain` make sense for :any:`Loopout`, so they are not related to each other in the type\r\n hierarchy. It is interpreted that a :any:`Loopout` is a single-stranded region bridging two\r\n :any:`Domain`'s that are connected to :any:`Helix`'s.\r\n It is illegal for two consecutive :any:`Domain`'s to both\r\n be :any:`Loopout`'s,\r\n or for a :any:`Loopout` to occur on either end of the :any:`Strand`\r\n (i.e., each :any:`Strand` must begin and end with a :any:`Domain`).\r\n\r\n For example, one use of a loopout is to describe a hairpin (a.k.a.,\r\n `stem-loop `_).\r\n The following creates a :any:`Strand` that represents a hairpin with a stem length of 10 and a loop\r\n length of 5.\r\n\r\n .. code-block:: Python\r\n\r\n import scadnano as sc\r\n\r\n domain_f = sc.Domain(helix=0, forward=True, start=0, end=10)\r\n loop = sc.Loopout(length=5)\r\n domain_r = sc.Domain(helix=0, forward=False, start=0, end=10)\r\n hairpin = sc.Strand([domain_f, loop, domain_r])\r\n\r\n It can also be created with chained method calls\r\n\r\n .. code-block:: Python\r\n\r\n import scadnano as sc\r\n\r\n design = sc.Design(helices=[sc.Helix(max_offset=10)])\r\n design.strand(0,0).move(10).loopout(0,5).move(-10)\r\n \"\"\"\r\n\r\n length: int\r\n \"\"\"Length (in DNA bases) of this :any:`Loopout`.\"\"\"\r\n\r\n name: Optional[str] = None\r\n \"\"\"\r\n Optional name to give this :any:`Loopout`.\r\n\r\n This is used to interoperate with the dsd DNA sequence design package.\r\n \"\"\"\r\n\r\n label: Optional[DomainLabel] = None\r\n \"\"\"\r\n Generic \"label\" object to associate to this :any:`Loopout`.\r\n\r\n Useful for associating extra information with the :any:`Loopout` that will be serialized, for example,\r\n for DNA sequence design. It must be an object (e.g., a dict or primitive type such as str or int) \r\n that is naturally JSON serializable. (Calling \r\n `json.dumps `_\r\n on the object should succeed without having to specify a custom encoder.)\r\n \"\"\"\r\n\r\n # not serialized; for efficiency\r\n # remove quotes when Py3.6 support dropped\r\n _parent_strand: Optional['Strand'] = field(init=False, repr=False, compare=False, default=None)\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True,\r\n **kwargs: Any) -> Union[Dict[str, Any], NoIndent]:\r\n dct: Dict[str, Any] = {loopout_key: self.length}\r\n if self.name is not None:\r\n dct[domain_name_key] = self.name\r\n if self.label is not None:\r\n dct[domain_label_key] = self.label\r\n return NoIndent(dct) if suppress_indent else dct\r\n\r\n @staticmethod\r\n def from_json(json_map: Dict[str, Any]) -> 'Loopout': # remove quotes when Py3.6 support dropped\r\n # XXX: this should never fail since we detect whether to call this from_json by the presence\r\n # of a length key in json_map\r\n length_str = mandatory_field(Loopout, json_map, loopout_key)\r\n length = int(length_str)\r\n name = json_map.get(domain_name_key)\r\n label = json_map.get(domain_label_key)\r\n return Loopout(length=length, name=name, label=label)\r\n\r\n def strand(self) -> 'Strand': # remove quotes when Py3.6 support dropped\r\n \"\"\"\r\n :return: The :any:`Strand` that contains this :any:`Loopout`.\r\n \"\"\"\r\n if self._parent_strand is None:\r\n raise ValueError('_parent_strand has not yet been set')\r\n return self._parent_strand\r\n\r\n def __repr__(self) -> str:\r\n return f'Loopout(' + \\\r\n (f'{self.name}, ' if self.name is not None else '') + \\\r\n f'{self.length}, ' + \\\r\n (f'{self.label}, ' if self.label is not None else '') + \\\r\n f')'\r\n\r\n def __str__(self) -> str:\r\n return repr(self) if self.name is None else self.name\r\n\r\n def set_name(self, name: str) -> None:\r\n \"\"\"Sets name of this :any:`Loopout`.\"\"\"\r\n self.name = name\r\n\r\n def set_label(self, label: Optional[DomainLabel]) -> None:\r\n \"\"\"Sets label of this :any:`Loopout`.\"\"\"\r\n self.label = label\r\n\r\n def __len__(self) -> int:\r\n \"\"\"Same as :any:`Loopout.dna_length`\"\"\"\r\n return self.dna_length()\r\n\r\n def dna_length(self) -> int:\r\n \"\"\"Length of this :any:`Loopout`; same as field :py:data:`Loopout.length`.\"\"\"\r\n return self.length\r\n\r\n def dna_sequence(self) -> Optional[str]:\r\n \"\"\"Return DNA sequence of this :any:`Loopout`, or ``None`` if no DNA sequence has been assigned\r\n to the :any:`Strand` of this :any:`Loopout`.\"\"\"\r\n if self._parent_strand is None:\r\n raise ValueError('_parent_strand has not been set')\r\n strand_seq = self._parent_strand.dna_sequence\r\n if strand_seq is None:\r\n return None\r\n\r\n str_idx_left = self.get_seq_start_idx()\r\n str_idx_right = str_idx_left + self.length # EXCLUSIVE (unlike similar code for Domain)\r\n subseq = strand_seq[str_idx_left:str_idx_right]\r\n return subseq\r\n\r\n def get_seq_start_idx(self) -> int:\r\n \"\"\"Starting DNA subsequence index for first base of this :any:`Loopout` on its\r\n :any:`Strand`'s DNA sequence.\"\"\"\r\n if self._parent_strand is None:\r\n raise ValueError('_parent_strand has not been set')\r\n domains = self._parent_strand.domains\r\n # index of self in parent strand's list of domains\r\n self_domain_idx = domains.index(self)\r\n # index of self's position within the DNA sequence of parent strand\r\n self_seq_idx_start = sum(prev_domain.dna_length()\r\n for prev_domain in domains[:self_domain_idx])\r\n return self_seq_idx_start\r\n\r\n\r\n_wctable = str.maketrans('ACGTacgt', 'TGCAtgca')\r\n\r\n\r\ndef wc(seq: str) -> str:\r\n \"\"\"Return reverse Watson-Crick complement of `seq`.\"\"\"\r\n return seq.translate(_wctable)[::-1]\r\n\r\n\r\n@dataclass\r\nclass IDTFields(_JSONSerializable):\r\n \"\"\"Data required when ordering DNA strands from the synthesis company\r\n `IDT (Integrated DNA Technologies) `_.\r\n This data is used when automatically generating files used to order DNA from IDT.\r\n\r\n When exporting to IDT files via :py:meth:`Design.write_idt_plate_excel_file`\r\n or :py:meth:`Design.write_idt_bulk_input_file`, the field :py:data:`Strand.name` is used for the\r\n name if it exists, otherwise a reasonable default is chosen.\"\"\"\r\n\r\n scale: str = default_idt_scale\r\n \"\"\"Synthesis scale at which to synthesize the strand (third field in IDT bulk input:\r\n https://www.idtdna.com/site/order/oligoentry).\r\n Choices supplied by IDT at the time this was written: \r\n ``\"25nm\"``, ``\"100nm\"``, ``\"250nm\"``, ``\"1um\"``, ``\"5um\"``, \r\n ``\"10um\"``, ``\"4nmU\"``, ``\"20nmU\"``, ``\"PU\"``, ``\"25nmS\"``.\r\n \r\n Optional field.\r\n \"\"\"\r\n\r\n purification: str = default_idt_purification\r\n \"\"\"Purification options (fourth field in IDT bulk input:\r\n https://www.idtdna.com/site/order/oligoentry). \r\n Choices supplied by IDT at the time this was written: \r\n ``\"STD\"``, ``\"PAGE\"``, ``\"HPLC\"``, ``\"IEHPLC\"``, ``\"RNASE\"``, ``\"DUALHPLC\"``, ``\"PAGEHPLC\"``.\r\n \r\n Optional field.\r\n \"\"\"\r\n\r\n plate: Optional[str] = None\r\n \"\"\"Name of plate in case this strand will be ordered on a 96-well or 384-well plate.\r\n \r\n Optional field, but non-optional if :py:data:`IDTField.well` is not ``None``.\r\n \"\"\"\r\n\r\n well: Optional[str] = None\r\n \"\"\"Well position on plate in case this strand will be ordered on a 96-well or 384-well plate.\r\n \r\n Optional field, but non-optional if :py:data:`IDTField.plate` is not ``None``.\r\n \"\"\"\r\n\r\n def __post_init__(self) -> None:\r\n _check_idt_string_not_none_or_empty(self.scale, 'scale')\r\n _check_idt_string_not_none_or_empty(self.purification, 'purification')\r\n if self.plate is None and self.well is not None:\r\n raise IllegalDesignError(f'IDTFields.plate cannot be None if IDTFields.well is not None\\n'\r\n f'IDTFields.well = {self.well}')\r\n if self.plate is not None and self.well is None:\r\n raise IllegalDesignError(f'IDTFields.well cannot be None if IDTFields.plate is not None\\n'\r\n f'IDTFields.plate = {self.plate}')\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True,\r\n **kwargs: Any) -> Union[NoIndent, Dict[str, Any]]:\r\n dct: Dict[str, Any] = dict(self.__dict__)\r\n if self.plate is None:\r\n del dct['plate']\r\n if self.well is None:\r\n del dct['well']\r\n return NoIndent(dct)\r\n\r\n @staticmethod\r\n def from_json(json_map: Dict[str, Any]) -> 'IDTFields':\r\n scale = mandatory_field(IDTFields, json_map, idt_scale_key)\r\n purification = mandatory_field(IDTFields, json_map, idt_purification_key)\r\n plate = json_map.get(idt_plate_key)\r\n well = json_map.get(idt_well_key)\r\n return IDTFields(scale=scale, purification=purification, plate=plate, well=well)\r\n\r\n\r\ndef _check_idt_string_not_none_or_empty(value: str, field_name: str) -> None:\r\n if value is None:\r\n raise IllegalDesignError(f'field {field_name} in IDTFields cannot be None')\r\n if len(value) == 0:\r\n raise IllegalDesignError(f'field {field_name} in IDTFields cannot be empty')\r\n\r\n\r\nclass StrandBuilder(Generic[StrandLabel, DomainLabel]):\r\n \"\"\"\r\n Represents a :any:`Strand` that is being built in an existing :any:`Design`.\r\n\r\n This is an intermediate object created when using chained method building by calling\r\n :py:meth:`Design.strand`, for example\r\n\r\n .. code-block:: Python\r\n\r\n design.strand(0, 0).to(10).cross(1).to(5).with_modification_5p(mod.biotin_5p).as_scaffold()\r\n\r\n :any:`StrandBuilder` should generally not be created directly.\r\n Although it is convenient to use chained method calls, it is also sometimes useful to assign the\r\n :any:`StrandBuilder` object into a variable and then call the methods on that variable. For example,\r\n this code is equivalent to the above line:\r\n\r\n .. code-block:: Python\r\n\r\n strand_builder = design.strand(0, 0)\r\n strand_builder.to(10)\r\n strand_builder.cross(1)\r\n strand_builder.to(5)\r\n strand_builder.with_modification_5p(mod.biotin_5p)\r\n strand_builder.as_scaffold()\r\n \"\"\"\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def __init__(self, design: 'Design[StrandLabel, DomainLabel]', helix: int, offset: int):\r\n self.design: Design[StrandLabel, DomainLabel] = design\r\n self.current_helix: int = helix\r\n self.current_offset: int = offset\r\n # self.loopout_length: Optional[int] = None\r\n self._strand: Optional[Strand[StrandLabel, DomainLabel]] = None\r\n self.just_moved_to_helix: bool = True\r\n self.last_domain: Optional[Domain[DomainLabel]] = None\r\n\r\n @property\r\n def strand(self) -> 'Strand[StrandLabel, DomainLabel]':\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n return self._strand\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def cross(self, helix: int, offset: Optional[int] = None, move: Optional[int] = None) \\\r\n -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Add crossover. To have any effect, must be followed by call to :py:meth:`StrandBuilder.to`\r\n or :py:meth:`StrandBuilder.move`.\r\n\r\n :param helix: :any:`Helix` to crossover to\r\n :param offset: new offset on `helix`. If not specified, defaults to current offset.\r\n (i.e., a \"vertical\" crossover)\r\n Mutually excusive with `move`.\r\n :param move:\r\n Relative distance to new offset on `helix` from current offset.\r\n If not specified, defaults to using parameter `offset`.\r\n Mutually excusive with `offset`.\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n if move is not None and offset is not None:\r\n raise IllegalDesignError('move and offset cannot both be specified:\\n'\r\n f'move: {move}\\n'\r\n f'offset: {offset}')\r\n self.last_domain = None\r\n self.current_helix = helix\r\n if offset is not None:\r\n self.current_offset = offset\r\n elif move is not None:\r\n self.current_offset += move\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def loopout(self, helix: int, length: int, offset: Optional[int] = None, move: Optional[int] = None) \\\r\n -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Like :py:meth:`StrandBuilder.cross`, but creates a :any:`Loopout` instead of a crossover.\r\n\r\n :param helix: :any:`Helix` to crossover to\r\n :param length: length of :any:`Loopout` to add\r\n :param offset: new offset on `helix`. If not specified, defaults to current offset.\r\n (i.e., a \"vertical\" loopout)\r\n Mutually excusive with `move`.\r\n :param move:\r\n Relative distance to new offset on `helix` from current offset.\r\n If not specified, defaults to using parameter `offset`.\r\n Mutually excusive with `offset`.\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self.cross(helix, offset=offset, move=move)\r\n self.design.append_domain(self._strand, Loopout(length))\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def move(self, delta: int) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Extends this :any:`StrandBuilder` on the current helix to offset given by the current offset\r\n plus `delta`, which adds a new :any:`Domain` to the :any:`Strand` being built. This is a\r\n \"relative move\", whereas :py:meth:`StrandBuilder.to` and :py:meth:`StrandBuilder.update_to`\r\n are \"absolute moves\".\r\n\r\n This updates the underlying :any:`Design` with a new :any:`Domain`,\r\n and if :py:meth:`StrandBuilder.loopout` was last called on this :any:`StrandBuilder`,\r\n also a new :any:`Loopout`.\r\n\r\n If two instances of :py:meth:`StrandBuilder.move` are chained together, this creates two domains\r\n on the same helix. The two offsets must move in the same direction. In other words, if we call\r\n ``.move(o1).move(o2)``, then ``o1`` and ``o2`` must be either both negative or both positive.\r\n\r\n :param delta:\r\n Distance to new offset to extend to, compared to current offset.\r\n If less than current offset, the new :any:`Domain` is reverse, otherwise it is forward.\r\n :return: self\r\n \"\"\"\r\n return self.to(self.current_offset + delta)\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def to(self, offset: int) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Extends this :any:`StrandBuilder` on the current helix to offset `offset`,\r\n which adds a new :any:`Domain` to the :any:`Strand` being built. This is an\r\n \"absolute move\", whereas :py:meth:`StrandBuilder.move` is a \"relative move\".\r\n\r\n This updates the underlying :any:`Design` with a new :any:`Domain`,\r\n and if :py:meth:`StrandBuilder.loopout` was last called on this :any:`StrandBuilder`,\r\n also a new :any:`Loopout`.\r\n\r\n If two instances of :py:meth:`StrandBuilder.to` are chained together, this creates two domains\r\n on the same helix. The two offsets must move in the same direction. In other words, if the starting\r\n offset is ``s``, and we call ``.to(o1).to(o2)``, then either ``s < o1 < o2`` or ``o2 < o1 < s``\r\n must be true.\r\n\r\n To simply change the current offset after calling :py:meth:`StrandBuilder.to`, without creating\r\n a new Domain, call :py:meth:`StrandBuilder.update_to` instead.\r\n\r\n :param offset: new offset to extend to. If less than current offset,\r\n the new :any:`Domain` is reverse, otherwise it is forward.\r\n :return: self\r\n \"\"\"\r\n if self.last_domain and ((self.last_domain.forward and offset < self.current_offset) or (\r\n not self.last_domain.forward and offset > self.current_offset)):\r\n raise IllegalDesignError('offsets must be monotonic '\r\n '(strictly increasing or strictly decreasing) '\r\n 'when calling to() twice in a row')\r\n\r\n if offset > self.current_offset:\r\n forward = True\r\n start = self.current_offset\r\n end = offset\r\n elif offset < self.current_offset:\r\n forward = False\r\n start = offset\r\n end = self.current_offset\r\n else:\r\n raise IllegalDesignError(f'offset {offset} cannot be equal to current offset')\r\n\r\n domain: Domain[DomainLabel] = Domain(helix=self.current_helix, forward=forward, start=start, end=end)\r\n self.last_domain = domain\r\n if self._strand is not None:\r\n self.design.append_domain(self._strand, domain)\r\n else:\r\n self._strand = Strand(domains=[domain])\r\n self.design.add_strand(self._strand)\r\n\r\n self.current_offset = offset\r\n\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def update_to(self, offset: int) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Like :py:meth:`StrandBuilder.to`, but changes the current offset without creating\r\n a new :any:`Domain`. So unlike :py:meth:`StrandBuilder.to`, several consecutive calls to\r\n :py:meth:`StrandBuilder.update_to` are equivalent to only making the final call. This is an\r\n \"absolute move\", whereas :py:meth:`StrandBuilder.move` is a \"relative move\".\r\n\r\n If :py:meth:`StrandBuilder.cross` or :py:meth:`StrandBuilder.loopout` was just called,\r\n then :py:meth:`StrandBuilder.to` and :py:meth:`StrandBuilder.update_to` have the same effect.\r\n\r\n :param offset: new offset to extend to. If less than offset of the last call to\r\n :py:meth:`StrandBuilder.cross` or :py:meth:`StrandBuilder.loopout`,\r\n the new :any:`Domain` is reverse, otherwise it is forward.\r\n :return: self\r\n \"\"\"\r\n if not self.last_domain:\r\n return self.to(offset)\r\n\r\n domain = self.last_domain\r\n if (self.last_domain.forward and offset < self.current_offset) or (\r\n not self.last_domain.forward and offset > self.current_offset):\r\n raise IllegalDesignError(f'when calling ')\r\n\r\n if domain.forward:\r\n domain.set_end(offset)\r\n else:\r\n domain.set_start(offset)\r\n\r\n self.current_offset = offset\r\n\r\n return self\r\n\r\n def as_circular(self) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Makes :any:`Strand` being built circular.\r\n\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self._strand.set_circular()\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def as_scaffold(self) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Makes :any:`Strand` being built a scaffold.\r\n\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self._strand.set_scaffold(True)\r\n return self\r\n\r\n def with_idt(self, scale: str = default_idt_scale,\r\n purification: str = default_idt_purification,\r\n plate: Optional[str] = None, well: Optional[str] = None) \\\r\n -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Gives :any:`IDTFields` value to :any:`Strand` being built.\r\n Only a name is required; other fields are given reasonable default values.\r\n\r\n :param scale:\r\n see :py:data:`IDTFields.scale`\r\n :param purification:\r\n see :py:data:`IDTFields.purification`\r\n :param plate:\r\n see :py:data:`IDTFields.plate`\r\n :param well:\r\n see :py:data:`IDTFields.well`\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self._strand.idt = IDTFields(scale=scale, purification=purification,\r\n plate=plate, well=well)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_modification_5p(self, mod: Modification5Prime) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Sets Strand being built to have given 5' modification.\r\n\r\n :param mod: 5' modification\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self._strand.set_modification_5p(mod)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_modification_3p(self, mod: Modification3Prime) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Sets Strand being built to have given 3' modification.\r\n\r\n :param mod: 3' modification\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self._strand.set_modification_3p(mod)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_modification_internal(self, idx: int, mod: ModificationInternal, warn_on_no_dna: bool) \\\r\n -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Sets Strand being built to have given internal modification.\r\n\r\n :param idx: idx along DNA sequence of internal modification\r\n :param mod: internal modification\r\n :param warn_on_no_dna: whether to print warning to screen if DNA has not been assigned\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self._strand.set_modification_internal(idx, mod, warn_on_no_dna)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_color(self, color: Color) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Sets Strand being built to have given color.\r\n\r\n :param color: color to set for Strand\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self._strand.set_color(color)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_sequence(self, sequence: str, assign_complement: bool = True) \\\r\n -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Assigns `sequence` as DNA sequence of the :any:`Strand` being built.\r\n This should be done after the :any:`Strand`'s structure is done being built, e.g.,\r\n\r\n .. code-block:: Python\r\n\r\n design.strand(0, 0).to(10).cross(1).to(5).with_sequence('AAAAAAAAAACGCGC')\r\n\r\n :param sequence: the DNA sequence to assign to the :any:`Strand`\r\n :param assign_complement: whether to automatically assign the complement to existing :any:`Strand`'s\r\n bound to this :any:`Strand`. This has the same meaning as the parameter `assign_complement` in\r\n :py:meth:`Design.assign_dna`.\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self.design.assign_dna(strand=self._strand, sequence=sequence, assign_complement=assign_complement)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_domain_sequence(self, sequence: str, assign_complement: bool = True) \\\r\n -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Assigns `sequence` as DNA sequence of the most recently created :any:`Domain` in\r\n the :any:`Strand` being built. This should be called immediately after a :any:`Domain` is created\r\n via a call to\r\n :py:meth:`StrandBuilder.to`,\r\n :py:meth:`StrandBuilder.update_to`,\r\n or\r\n :py:meth:`StrandBuilder.loopout`, e.g.,\r\n\r\n .. code-block:: Python\r\n\r\n design.strand(0, 5).to(8).with_domain_sequence('AAA')\\\\\r\n .cross(1).to(5).with_domain_sequence('TTT')\\\\\r\n .loopout(2, 4).with_domain_sequence('CCCC')\\\\\r\n .to(10).with_domain_sequence('GGGGG')\r\n\r\n :param sequence: the DNA sequence to assign to the :any:`Domain`\r\n :param assign_complement: whether to automatically assign the complement to existing :any:`Strand`'s\r\n bound to this :any:`Strand`. This has the same meaning as the parameter `assign_complement` in\r\n :py:meth:`Design.assign_dna`.\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n last_domain = self._strand.domains[-1]\r\n self.design.assign_dna(strand=self._strand, sequence=sequence, domain=last_domain,\r\n assign_complement=assign_complement)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_name(self, name: str) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Assigns `name` as name of the :any:`Strand` being built.\r\n\r\n .. code-block:: Python\r\n\r\n design.strand(0, 0).to(10).cross(1).to(5).with_name('scaffold')\r\n\r\n :param name: name to assign to the :any:`Strand`\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n self._strand.set_name(name)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_label(self, label: StrandLabel) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Assigns `label` as label of the :any:`Strand` being built.\r\n\r\n .. code-block:: Python\r\n\r\n design.strand(0, 0).to(10).cross(1).to(5).with_label('scaffold')\r\n\r\n :param label: label to assign to the :any:`Strand`\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise AssertionError('_strand cannot be None')\r\n self._strand.set_label(label)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_domain_name(self, name: str) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Assigns `name` as of the most recently created :any:`Domain` or :any:`Loopout` in\r\n the :any:`Strand` being built. This should be called immediately after a :any:`Domain` is created\r\n via a call to\r\n :py:meth:`StrandBuilder.to`,\r\n :py:meth:`StrandBuilder.update_to`,\r\n or\r\n :py:meth:`StrandBuilder.loopout`, e.g.,\r\n\r\n .. code-block:: Python\r\n\r\n design.strand(0, 0).to(10).with_domain_name('dom1*').cross(1).to(5).with_domain_name('dom1')\r\n\r\n :param name: name to assign to the most recently created :any:`Domain` or :any:`Loopout`\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n last_domain = self._strand.domains[-1]\r\n last_domain.set_name(name)\r\n return self\r\n\r\n # remove quotes when Py3.6 support dropped\r\n def with_domain_label(self, label: DomainLabel) -> 'StrandBuilder[StrandLabel, DomainLabel]':\r\n \"\"\"\r\n Assigns `label` as label of the most recently created :any:`Domain` or :any:`Loopout` in\r\n the :any:`Strand` being built. This should be called immediately after a :any:`Domain` is created\r\n via a call to\r\n :py:meth:`StrandBuilder.to`,\r\n :py:meth:`StrandBuilder.update_to`,\r\n or\r\n :py:meth:`StrandBuilder.loopout`, e.g.,\r\n\r\n .. code-block:: Python\r\n\r\n design.strand(0, 5).to(8).with_domain_label('domain 1')\\\\\r\n .cross(1).to(5).with_domain_label('domain 2')\\\\\r\n .loopout(2, 4).with_domain_label('domain 3')\\\\\r\n .to(10).with_domain_label('domain 4')\r\n\r\n :param label: label to assign to the :any:`Domain` or :any:`Loopout`\r\n :return: self\r\n \"\"\"\r\n if self._strand is None:\r\n raise ValueError('no Strand created yet; make at least one domain first')\r\n last_domain = self._strand.domains[-1]\r\n last_domain.set_label(label)\r\n return self\r\n\r\n\r\n@dataclass\r\nclass Strand(_JSONSerializable, Generic[StrandLabel, DomainLabel]):\r\n \"\"\"\r\n Represents a single strand of DNA.\r\n\r\n Each maximal portion that is continguous on a single :any:`Helix` is a :any:`Domain`.\r\n Crossovers from one :any:`Helix` to another are implicitly from the 3' end of one of this\r\n Strand's :any:`Domain`'s to the 5' end of the next :any:`Domain`.\r\n\r\n A portion of the :any:`Strand` not associated to any :any:`Helix` is represented by a :any:`Loopout`.\r\n Two :any:`Loopout`'s cannot occur consecutively on a :any:`Strand`, nor can a :any:`Strand`\r\n contain only a :any:`Loopout` but no :any:`Domain`.\r\n\r\n\r\n One can set the strand to be a scaffold in the constructor:\r\n\r\n .. code-block:: Python\r\n\r\n import scadnano as sc\r\n\r\n scaffold_domains = [ ... ]\r\n scaffold_strand = sc.Strand(domains=scaffold_domains, is_scaffold=True)\r\n\r\n or by calling :py:meth:`Strand.set_scaffold` on the :any:`Strand` object:\r\n\r\n .. code-block:: Python\r\n\r\n import scadnano as sc\r\n\r\n scaffold_domains = [ ... ]\r\n scaffold_strand = sc.Strand(domains=scaffold_domains)\r\n scaffold_strand.set_scaffold()\r\n\r\n Both will give the strand the same color that\r\n `cadnano `_\r\n uses for the scaffold.\r\n \"\"\"\r\n\r\n domains: List[Union[Domain[DomainLabel], Loopout[DomainLabel]]]\r\n \"\"\":any:`Domain`'s (or :any:`Loopout`'s) composing this Strand. \r\n Each :any:`Domain` is contiguous on a single :any:`Helix` \r\n and could be either single-stranded or double-stranded, \r\n whereas each :any:`Loopout` is single-stranded and has no associated :any:`Helix`.\"\"\"\r\n\r\n circular: bool = False\r\n \"\"\"If True, this :any:`Strand` is circular and has no 5' or 3' end. Although there is still a \r\n first and last :any:`Domain`, we interpret there to be a crossover from the 3' end of the last domain\r\n to the 5' end of the first domain, and any circular permutation of :py:data:`Strand.domains` \r\n should result in a functionally equivalent :any:`Strand`. It is illegal to have a \r\n :any:`Modification5Prime` or :any:`Modification3Prime` on a circular :any:`Strand`.\"\"\"\r\n\r\n dna_sequence: Optional[str] = None\r\n \"\"\"Do not assign directly to this field. Always use :any:`Design.assign_dna` \r\n (for complementarity checking) or :any:`Strand.set_dna_sequence` \r\n (without complementarity checking, to allow mismatches).\"\"\"\r\n\r\n color: Optional[Color] = None\r\n \"\"\"Color to show this strand in the main view. If not specified in the constructor,\r\n a color is assigned by cycling through a list of defaults given by \r\n :meth:`ColorCycler.colors`\"\"\"\r\n\r\n idt: Optional[IDTFields] = None\r\n \"\"\"Fields used when ordering strands from the synthesis company IDT \r\n (Integrated DNA Technologies, Coralville, IA). If present (i.e., not equal to :const:`None`)\r\n then the method :py:meth:`Design.write_idt_bulk_input_file` can be called to automatically\r\n generate an text file for ordering strands in test tubes: \r\n https://www.idtdna.com/site/order/oligoentry,\r\n as can the method :py:meth:`Design.write_idt_plate_excel_file` for writing a Microsoft Excel \r\n file that can be uploaded to IDT's website for describing DNA sequences to be ordered in 96-well\r\n or 384-well plates.\"\"\"\r\n\r\n is_scaffold: bool = False\r\n \"\"\"Indicates whether this :any:`Strand` is a scaffold for a DNA origami. If any :any:`Strand` in a\r\n :any:`Design` is a scaffold, then the design is considered a DNA origami design.\"\"\"\r\n\r\n modification_5p: Optional[Modification5Prime] = None\r\n \"\"\"\r\n 5' modification; None if there is no 5' modification. \r\n Illegal to have if :py:data:`Strand.circular` is True.\r\n \"\"\"\r\n\r\n modification_3p: Optional[Modification3Prime] = None\r\n \"\"\"\r\n 3' modification; None if there is no 3' modification. \r\n Illegal to have if :py:data:`Strand.circular` is True.\r\n \"\"\"\r\n\r\n modifications_int: Dict[int, ModificationInternal] = field(default_factory=dict)\r\n \"\"\":any:`Modification`'s to the DNA sequence (e.g., biotin, Cy3/Cy5 fluorphores). Maps offset to \r\n modification. If the internal modification is attached to a base \r\n (e.g., internal biotin, /iBiodT/ from IDT), \r\n then the offset is that of the base.\r\n If it goes between two bases \r\n (e.g., internal Cy3, /iCy3/ from IDT),\r\n then the offset is that of the previous base, \r\n e.g., to put a Cy3 between bases at offsets 3 and 4, the offset should be 3. \r\n So for an internal modified base on a sequence of length n, the allowed offsets are 0,...,n-1,\r\n and for an internal modification that goes between bases, the allowed offsets are 0,...,n-2.\"\"\"\r\n\r\n name: Optional[str] = None\r\n \"\"\"\r\n Optional name to give the strand. If specified it is shown on mouseover in the scadnano web interface.\r\n \r\n This is used to interoperate with the dsd DNA sequence design package.\r\n \"\"\"\r\n\r\n label: Optional[StrandLabel] = None\r\n \"\"\"Generic \"label\" object to associate to this :any:`Strand`.\r\n \r\n Useful for associating extra information with the Strand that will be serialized, for example,\r\n for DNA sequence design. It must be an object (e.g., a dict or primitive type such as str or int) \r\n that is naturally JSON serializable. (Calling ``json.dumps`` on the object should succeed without\r\n having to specify a custom encoder.)\r\n \"\"\"\r\n\r\n # not serialized; efficient way to see a list of all domains on a given helix\r\n _helix_idx_domain_map: Dict[int, List[Domain[DomainLabel]]] = field(\r\n init=False, repr=False, compare=False, default_factory=dict)\r\n\r\n def __post_init__(self) -> None:\r\n self._helix_idx_domain_map = defaultdict(list)\r\n\r\n self.set_domains(self.domains)\r\n\r\n self._ensure_modifications_legal()\r\n self._ensure_domains_nonoverlapping()\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n dct: Dict[str, Any] = OrderedDict()\r\n if self.name is not None:\r\n dct[strand_name_key] = self.name\r\n if self.circular:\r\n dct[circular_key] = self.circular\r\n if self.color is not None:\r\n dct[color_key] = self.color.to_json_serializable(suppress_indent)\r\n if self.dna_sequence is not None:\r\n dct[dna_sequence_key] = self.dna_sequence\r\n if self.idt is not None:\r\n dct[idt_key] = self.idt.to_json_serializable(suppress_indent)\r\n dct[domains_key] = [domain.to_json_serializable(suppress_indent) for domain in self.domains]\r\n if hasattr(self, is_scaffold_key) and self.is_scaffold:\r\n dct[is_scaffold_key] = self.is_scaffold\r\n\r\n if self.modification_5p is not None:\r\n dct[modification_5p_key] = self.modification_5p.id\r\n if self.modification_3p is not None:\r\n dct[modification_3p_key] = self.modification_3p.id\r\n if len(self.modifications_int) > 0:\r\n mods_dict = {}\r\n for offset, mod in self.modifications_int.items():\r\n mods_dict[f\"{offset}\"] = mod.id\r\n dct[modifications_int_key] = NoIndent(mods_dict) if suppress_indent else mods_dict\r\n\r\n if self.label is not None:\r\n dct[strand_label_key] = NoIndent(self.label) if suppress_indent else self.label\r\n\r\n return dct\r\n\r\n @staticmethod\r\n def from_json(json_map: dict) -> 'Strand': # remove quotes when Py3.6 support dropped\r\n domain_jsons = mandatory_field(Strand, json_map, domains_key, *legacy_domains_keys)\r\n if len(domain_jsons) == 0:\r\n raise IllegalDesignError(f'{domains_key} list cannot be empty')\r\n\r\n domains: List[Union[Domain, Loopout]] = []\r\n for domain_json in domain_jsons:\r\n if loopout_key in domain_json:\r\n domains.append(Loopout.from_json(domain_json))\r\n else:\r\n domains.append(Domain.from_json(domain_json))\r\n if isinstance(domains[0], Loopout):\r\n raise IllegalDesignError('Loopout at beginning of Strand not supported')\r\n if isinstance(domains[-1], Loopout):\r\n raise IllegalDesignError('Loopout at end of Strand not supported')\r\n\r\n is_scaffold = json_map.get(is_scaffold_key, False)\r\n circular = json_map.get(circular_key, False)\r\n\r\n dna_sequence = optional_field(None, json_map, dna_sequence_key, *legacy_dna_sequence_keys)\r\n\r\n color_str = json_map.get(color_key,\r\n default_scaffold_color if is_scaffold else default_strand_color)\r\n if isinstance(color_str, int):\r\n def decimal_int_to_hex(d: int) -> str:\r\n return \"#\" + \"{0:#08x}\".format(d, 8)[2:] # type: ignore\r\n\r\n color_str = decimal_int_to_hex(color_str)\r\n color = Color(hex_string=color_str)\r\n\r\n label = json_map.get(strand_label_key)\r\n\r\n name = json_map.get(strand_name_key)\r\n\r\n idt_dict: Optional[dict] = json_map.get(idt_key)\r\n idt = None if idt_dict is None else IDTFields.from_json(idt_dict)\r\n # legacy:\r\n # if no name is specified, but there's a name field in idt, then use that as the Strand's name\r\n if name is None and idt_dict is not None and idt_name_key in idt_dict:\r\n name = idt_dict[idt_name_key]\r\n\r\n return Strand(\r\n domains=domains,\r\n dna_sequence=dna_sequence,\r\n circular=circular,\r\n color=color,\r\n idt=idt,\r\n is_scaffold=is_scaffold,\r\n name=name,\r\n label=label,\r\n )\r\n\r\n def __eq__(self, other: Any) -> bool: # remove quotes when Py3.6 support dropped\r\n if not isinstance(other, Strand):\r\n return False\r\n return self.domains == other.domains\r\n\r\n def __hash__(self) -> int:\r\n return hash(self.domains)\r\n\r\n def __str__(self) -> str:\r\n return repr(self) if self.name is None else self.name\r\n\r\n def rotate_domains(self, rotation: int, forward: bool = True) -> None:\r\n \"\"\"\r\n \"Rotates\" the strand by replacing domains with a circular rotation, e.g., if the domains are\r\n\r\n A, B, C, D, E, F\r\n\r\n then ``strand.rotate_domains(2)`` makes the :any:`Strand` have the same domains, but in this order:\r\n \r\n E, F, A, B, C, D\r\n \r\n and ``strand.rotate_domains(2, forward=False)`` makes\r\n \r\n C, D, E, F, A, B\r\n\r\n :param rotation:\r\n Amount to rotate domains.\r\n :param forward:\r\n Whether to move domains forward (wrapping off 3' end back to 5' end) or backward (wrapping off\r\n 5' end back to 3' end).\r\n \"\"\"\r\n idx = rotation if not forward else len(self.domains) - rotation\r\n self.domains = self.domains[idx:] + self.domains[:idx]\r\n\r\n def set_scaffold(self, is_scaf: bool = True) -> None:\r\n \"\"\"Sets this :any:`Strand` as a scaffold. Alters color to default scaffold color.\r\n\r\n If `is_scaf` == ``False``, sets this strand as not a scaffold, and leaves the color alone.\"\"\"\r\n self.is_scaffold = is_scaf\r\n if is_scaf:\r\n self.color = default_scaffold_color\r\n\r\n def set_name(self, name: str) -> None:\r\n \"\"\"Sets name of this :any:`Strand`.\"\"\"\r\n self.name = name\r\n\r\n def set_label(self, label: Any) -> None:\r\n \"\"\"Sets label of this :any:`Strand`.\"\"\"\r\n self.label = label\r\n\r\n def set_color(self, color: Color) -> None:\r\n \"\"\"Sets color of this :any:`Strand`.\"\"\"\r\n self.color = color\r\n\r\n def set_circular(self, circular: bool = True) -> None:\r\n \"\"\"\r\n Sets this to be a circular :any:`Strand` (or non-circular if optional parameter is False).\r\n\r\n :param circular:\r\n whether to make this :any:`Strand` circular (True) or linear (False)\r\n :raises StrandError:\r\n if this :any:`Strand` has a 5' or 3' modification\r\n \"\"\"\r\n if circular and self.modification_5p is not None:\r\n raise StrandError(self, \"cannot have a 5' modification on a circular strand\")\r\n if circular and self.modification_3p is not None:\r\n raise StrandError(self, \"cannot have a 3' modification on a circular strand\")\r\n self.circular = circular\r\n\r\n def set_linear(self) -> None:\r\n \"\"\"\r\n Makes this a linear (non-circular) :any:`Strand`. Equivalent to calling\r\n `self.set_circular(False)`.\r\n \"\"\"\r\n self.set_circular(False)\r\n\r\n def set_domains(self, domains: Iterable[Union[Domain[DomainLabel], Loopout[DomainLabel]]]) -> None:\r\n \"\"\"\r\n Sets the :any:`Domain`'s/:any:`Loopout`'s of this :any:`Strand` to be `domains`,\r\n which can contain a mix of :any:`Domain`'s and :any:`Loopout`'s,\r\n just like the field :py:data:`Strand.domains`.\r\n\r\n :param domains:\r\n The new sequence of :any:`Domain`'s/:any:`Loopout`'s to use for this :any:`Strand`.\r\n :raises StrandError:\r\n if domains has two consecutive :any:`Loopout`'s, consists of just a single :any:`Loopout`'s,\r\n or starts or ends with a :any:`Loopout`\r\n \"\"\"\r\n self.domains = domains if isinstance(domains, list) else list(domains)\r\n\r\n for domain in self.domains:\r\n if isinstance(domain, Domain):\r\n self._helix_idx_domain_map[domain.helix].append(domain)\r\n\r\n for domain in self.domains:\r\n domain._parent_strand = self\r\n\r\n if len(self.domains) == 1:\r\n if isinstance(self.domains[0], Loopout):\r\n raise StrandError(self, 'strand cannot have a single Loopout as its only domain')\r\n\r\n if len(self.domains) == 0:\r\n raise StrandError(self, 'domains cannot be empty')\r\n\r\n for domain1, domain2 in _pairwise(self.domains):\r\n if isinstance(domain1, Loopout) and isinstance(domain2, Loopout):\r\n raise StrandError(self, 'cannot have two consecutive Loopouts in a strand')\r\n\r\n if isinstance(self.domains[0], Loopout):\r\n raise StrandError(self, 'strand cannot begin with a loopout')\r\n\r\n if isinstance(self.domains[-1], Loopout):\r\n raise StrandError(self, 'strand cannot end with a loopout')\r\n\r\n def idt_export_name(self, unique_names: bool = False) -> str:\r\n \"\"\"\r\n :param unique_names:\r\n If True and default name is used,\r\n enforces that strand names must be unique by encoding the forward/reverse Boolean\r\n into the name.\r\n If False (the default), uses cadnano's exact naming convention, which allows two strands\r\n to have the same default name, if they begin and end at the same (helix,offset) pair (but\r\n point in opposite directions at each).\r\n Has no effect if :py:data:`Strand.idt` or :py:data:`Strand.name` are defined;\r\n if those are used, they must be explicitly set to be unique.\r\n :return:\r\n If :py:data:`Strand.name` is not None, return :py:data:`Strand.name`,\r\n otherwise return the result of :py:meth:`Strand.default_export_name`\r\n with parameter `unique_names`.\r\n \"\"\"\r\n return self.name if self.name is not None else self.default_export_name(unique_names)\r\n\r\n def default_export_name(self, unique_names: bool = False) -> str:\r\n \"\"\"\r\n Returns a default name to use when exporting the DNA sequence.\r\n Uses cadnano's naming\r\n convention of, for example `'ST2[5]4[10]'` to indicate a strand that starts at helix 2, offset 5,\r\n and ends at helix 4, offset 10. Note that this naming convention is not unique: two strands in\r\n the system could share this name. To ensure it is unique, set the parameter `unique_names` to True,\r\n which will modify the name with forward/reverse information from the first domain that uniquely\r\n identifies the strand, e.g., `'ST2[5]F4[10]'` or `'ST2[5]R4[10]'`.\r\n\r\n If the strand is a scaffold (i.e., if :py:data:`Strand.is_scaffold` is True),\r\n then the name will begin with `'SCAF'` instead of `'ST'`.\r\n\r\n :param unique_names:\r\n If True, enforces that strand names must be unique by encoding the forward/reverse Boolean\r\n into the name.\r\n If False (the default), uses cadnano's exact naming convention, which allows two strands\r\n to have the same default name, if they begin and end at the same (helix,offset) pair (but\r\n point in opposite directions at each).\r\n :return:\r\n default name to export\r\n (used, for example, by idt DNA export methods :py:meth:`Design.write_idt_plate_excel_file`\r\n and :py:meth:`Design.write_idt_bulk_input_file`\r\n if :py:data:`Strand.name` and :py:data:`Strand.idt.name` are both not set)\r\n \"\"\"\r\n start_helix = self.first_bound_domain().helix\r\n end_helix = self.last_bound_domain().helix\r\n start_offset = self.first_bound_domain().offset_5p()\r\n end_offset = self.last_bound_domain().offset_3p()\r\n forward_str = 'F' if self.first_bound_domain().forward else 'R'\r\n if not unique_names:\r\n forward_str = ''\r\n name = f'{start_helix}[{start_offset}]{forward_str}{end_helix}[{end_offset}]'\r\n return f'SCAF{name}' if self.is_scaffold else f'ST{name}'\r\n\r\n def set_modification_5p(self, mod: Modification5Prime = None) -> None:\r\n \"\"\"Sets 5' modification to be `mod`. `mod` cannot be non-None if :any:`Strand.circular` is True.\"\"\"\r\n if self.circular and mod is not None:\r\n raise StrandError(self, \"cannot have a 5' modification on a circular strand\")\r\n self.modification_5p = mod\r\n\r\n def set_modification_3p(self, mod: Modification3Prime = None) -> None:\r\n \"\"\"Sets 3' modification to be `mod`. `mod` cannot be non-None if :any:`Strand.circular` is True.\"\"\"\r\n if self.circular and mod is not None:\r\n raise StrandError(self, \"cannot have a 3' modification on a circular strand\")\r\n self.modification_3p = mod\r\n\r\n def remove_modification_5p(self) -> None:\r\n \"\"\"Removes 5' modification.\"\"\"\r\n self.modification_5p = None\r\n\r\n def remove_modification_3p(self) -> None:\r\n \"\"\"Removes 3' modification.\"\"\"\r\n self.modification_3p = None\r\n\r\n def set_modification_internal(self, idx: int, mod: ModificationInternal,\r\n warn_on_no_dna: bool = True) -> None:\r\n \"\"\"Adds internal modification `mod` at given DNA index `idx`.\"\"\"\r\n if idx < 0:\r\n raise IllegalDesignError('idx of modification must be nonnegative')\r\n if idx >= self.dna_length():\r\n raise IllegalDesignError(f'idx of modification must be at most length of DNA: '\r\n f'{self.dna_length()}')\r\n if self.dna_sequence is not None:\r\n if mod.allowed_bases is not None and self.dna_sequence[idx] not in mod.allowed_bases:\r\n raise IllegalDesignError(f'only bases {\",\".join(mod.allowed_bases)} are allowed at '\r\n f'index {idx}, but sequence has base {self.dna_sequence[idx]} '\r\n f'\\nDNA sequence: {self.dna_sequence}'\r\n f'\\nmodification: {mod}')\r\n elif warn_on_no_dna:\r\n print('WARNING: no DNA sequence has been assigned, so certain error checks on the internal '\r\n 'modification were not done. To be safe, first assign DNA, then add the modifications.')\r\n self.modifications_int[idx] = mod\r\n\r\n def remove_modification_internal(self, idx: int) -> None:\r\n \"\"\"Removes internal modification at given DNA index `idx`.\"\"\"\r\n if idx in self.modifications_int:\r\n del self.modifications_int[idx]\r\n\r\n def first_domain(self) -> Domain:\r\n \"\"\"First domain on this :any:`Strand`.\"\"\"\r\n domain = self.domains[0]\r\n if isinstance(domain, Loopout):\r\n raise StrandError(self, 'cannot have loopout as first domain on strand')\r\n return domain\r\n\r\n def last_domain(self) -> Domain:\r\n \"\"\"Last domain on this :any:`Strand`.\"\"\"\r\n domain = self.domains[-1]\r\n if isinstance(domain, Loopout):\r\n raise StrandError(self, 'cannot have loopout as last domain on strand')\r\n return domain\r\n\r\n def set_dna_sequence(self, sequence: str) -> None:\r\n \"\"\"Set this :any:`Strand`'s DNA sequence to `seq`\r\n WITHOUT checking for complementarity with overlapping\r\n :any:`Strand`'s or automatically assigning their sequences.\r\n To assign a sequence to a :any:`Strand` and have the overlapping\r\n :any:`Strand`'s automatically have the appropriate Watson-Crick complements assigned,\r\n use :any:`Design.assign_dna`.\r\n\r\n All whitespace in `sequence` is removed,\r\n and lowercase bases 'a', 'c', 'g', 't' are converted to uppercase.\r\n\r\n `sequence`, after all whitespace is removed, must be exactly the same length as\r\n :py:meth:`Strand.dna_length`.\r\n Wildcard symbols (:py:const:`DNA_case_wildcard`) are allowed to leave part of the DNA unassigned.\r\n \"\"\"\r\n trimmed_seq = _remove_whitespace_and_uppercase(sequence)\r\n if len(trimmed_seq) != self.dna_length():\r\n domain = self.first_domain()\r\n raise StrandError(self, f\"strand starting at helix {domain.helix} offset {domain.offset_5p()} \"\r\n f\"has length {self.dna_length()}, but you attempted to assign a \"\r\n f\"DNA sequence of length {len(trimmed_seq)}: {sequence}\")\r\n self.dna_sequence = trimmed_seq\r\n\r\n def dna_length(self) -> int:\r\n \"\"\"Return sum of DNA length of :any:`Domain`'s and :any:`Loopout`'s of this :any:`Strand`.\"\"\"\r\n acc = 0\r\n for domain in self.domains:\r\n acc += domain.dna_length()\r\n return acc\r\n\r\n def bound_domains(self) -> List[Domain]:\r\n \"\"\":any:`Domain`'s of this :any:`Strand` that are not :any:`Loopout`'s.\"\"\"\r\n return [domain for domain in self.domains if isinstance(domain, Domain)]\r\n\r\n def offset_5p(self) -> int:\r\n \"\"\"5' offset of this entire :any:`Strand`, INCLUSIVE.\"\"\"\r\n return self.first_domain().offset_5p()\r\n\r\n def offset_3p(self) -> int:\r\n \"\"\"3' offset of this entire :any:`Strand`, INCLUSIVE.\"\"\"\r\n return self.last_domain().offset_3p()\r\n\r\n def overlaps(self, other: 'Strand') -> bool: # remove quotes when Py3.6 support dropped\r\n \"\"\"Indicates whether `self` overlaps `other_strand`, meaning that the set of offsets occupied\r\n by `self` has nonempty intersection with those occupied by `other_strand`.\"\"\"\r\n for domain_self in self.bound_domains():\r\n for domain_other in other.bound_domains():\r\n if domain_self.overlaps(domain_other):\r\n return True\r\n return False\r\n\r\n def assign_dna_complement_from(self, other: 'Strand') -> None: # remove quotes when Py3.6 support dropped\r\n \"\"\"Assuming a DNA sequence has been assigned to `other`, assign its Watson-Crick\r\n complement to the portions of this Strand that are bound to `other`.\r\n\r\n Generally this is not called directly; use :py:meth:`Design.assign_dna` to assign\r\n a DNA sequence to a :any:`Strand`. The method :py:meth:`Design.assign_dna` will calculate\r\n which other :any:`Strand`'s need\r\n to be assigned via :py:meth:`Strand.assign_dna_complement_from`.\r\n\r\n However, it is permitted to assign the field :py:data:`Strand.dna_sequence` directly\r\n via the method :py:meth:`Strand.set_dna_sequence`.\r\n This is used, for instance, to assign a DNA sequence to a :any:`Strand` bound to another\r\n :any:`Strand`\r\n with an assigned DNA sequence where they overlap. In this case no error checking\r\n about sequence complementarity is done. This can be used to intentionally assign *mismatching*\r\n DNA sequences to :any:`Strand`'s that are bound on a :any:`Helix`.\"\"\"\r\n\r\n already_assigned = self.dna_sequence is not None\r\n\r\n # put DNA sequences to assign to domains in List, one position per domain\r\n strand_complement_builder: List[str] = []\r\n if already_assigned:\r\n for domain in self.domains:\r\n domain_seq = domain.dna_sequence()\r\n if domain_seq is None:\r\n raise ValueError(f'no DNA sequence has been assigned to {self}')\r\n strand_complement_builder.append(domain_seq)\r\n else:\r\n for domain in self.domains:\r\n wildcards = DNA_base_wildcard * domain.dna_length()\r\n strand_complement_builder.append(wildcards)\r\n\r\n for (domain_idx, domain_self) in enumerate(self.domains):\r\n if isinstance(domain_self, Loopout):\r\n domain_self_dna_sequence = DNA_base_wildcard * domain_self.dna_length()\r\n else:\r\n helix = domain_self.helix\r\n\r\n # for helix, domains_on_helix_self in self._helix_idx_domain_map.items():\r\n domains_on_helix_other = other._helix_idx_domain_map[helix]\r\n # for domain_self in domains_on_helix_self:\r\n overlaps = []\r\n for domain_other in domains_on_helix_other:\r\n if domain_self != domain_other and domain_self.overlaps(domain_other):\r\n overlap = domain_self.compute_overlap(domain_other)\r\n overlaps.append((overlap, domain_other))\r\n\r\n overlaps.sort()\r\n\r\n domain_complement_builder = []\r\n start_idx = domain_self.start\r\n # repeatedly insert wildcards into gaps, then reverse WC complement\r\n for ((overlap_left, overlap_right), domain_other) in overlaps:\r\n # wildcards = DNA_base_wildcard * (overlap_left - start_idx)\r\n num_wildcard_bases = domain_self.dna_length_in(start_idx, overlap_left - 1)\r\n wildcards = DNA_base_wildcard * num_wildcard_bases\r\n\r\n other_seq = domain_other.dna_sequence_in(overlap_left, overlap_right - 1)\r\n if other_seq is None:\r\n raise ValueError(f'no DNA sequence has been assigned to strand {other}')\r\n overlap_complement = wc(other_seq)\r\n domain_complement_builder.append(wildcards)\r\n domain_complement_builder.append(overlap_complement)\r\n start_idx = overlap_right\r\n\r\n # last wildcard for gap between last overlap and end\r\n # last_wildcards = DNA_base_wildcard * (domain_self.end - start_idx)\r\n num_wildcard_bases = domain_self.dna_length_in(start_idx, domain_self.end - 1)\r\n last_wildcards = DNA_base_wildcard * num_wildcard_bases\r\n\r\n domain_complement_builder.append(last_wildcards)\r\n\r\n # If pointing left, each individual overlap sequence was reverse orientation in wc(),\r\n # but not the list of all of them put together until now.\r\n if not domain_self.forward:\r\n domain_complement_builder.reverse()\r\n\r\n domain_self_dna_sequence = ''.join(domain_complement_builder)\r\n\r\n # merge with existing pre-assigned sequence\r\n existing_domain_self_dna_sequence = strand_complement_builder[domain_idx]\r\n merged_domain_self_dna_sequence = _string_merge_wildcard(domain_self_dna_sequence,\r\n existing_domain_self_dna_sequence,\r\n DNA_base_wildcard)\r\n strand_complement_builder[domain_idx] = merged_domain_self_dna_sequence\r\n\r\n strand_complement = ''.join(strand_complement_builder)\r\n new_dna_sequence = strand_complement\r\n if self.dna_sequence is not None:\r\n try:\r\n new_dna_sequence = _string_merge_wildcard(self.dna_sequence, new_dna_sequence,\r\n DNA_base_wildcard)\r\n except ValueError:\r\n domain_self = self.first_domain()\r\n domain_other = other.first_domain()\r\n msg = f'strand starting at helix {domain_self.helix}, offset {domain_self.offset_5p()} ' \\\r\n f'has length ' \\\r\n f'{self.dna_length()} and already has a partial DNA sequence assignment of length ' \\\r\n f'{len(self.dna_sequence)}, which is \\n' \\\r\n f'{self.dna_sequence}, ' \\\r\n f'but you tried to assign sequence of length {len(new_dna_sequence)} to it, which ' \\\r\n f'is\\n{new_dna_sequence} (this assignment was indirect, since you assigned directly ' \\\r\n f'to a strand bound to this one). This occurred while directly assigning a DNA ' \\\r\n f'sequence to the strand whose 5\\' end is at helix {domain_other.helix}, and is of ' \\\r\n f'length {other.dna_length()}.'\r\n raise IllegalDesignError(msg)\r\n\r\n self.set_dna_sequence(new_dna_sequence)\r\n # self.dna_sequence = _pad_dna(new_dna_sequence, self.dna_length())\r\n\r\n def insert_domain(self, order: int, domain: Union[Domain, Loopout]) -> None:\r\n # Only intended to be called by Design.insert_domain\r\n self.domains.insert(order, domain)\r\n domain._parent_strand = self\r\n if isinstance(domain, Domain):\r\n self._helix_idx_domain_map[domain.helix].append(domain)\r\n\r\n # add wildcard symbols to DNA sequence to maintain its length\r\n if self.dna_sequence is not None:\r\n start_idx = self.dna_index_start_domain(domain)\r\n end_idx = start_idx + domain.dna_length()\r\n prefix = self.dna_sequence[:start_idx]\r\n suffix = self.dna_sequence[start_idx:]\r\n new_wildcards = DNA_base_wildcard * (end_idx - start_idx)\r\n self.dna_sequence = prefix + new_wildcards + suffix\r\n\r\n def remove_domain(self, domain: Union[Domain, Loopout]) -> None:\r\n # Only intended to be called by Design.remove_domain\r\n\r\n # remove relevant portion of DNA sequence to maintain its length\r\n if self.dna_sequence is not None:\r\n start_idx = self.dna_index_start_domain(domain)\r\n end_idx = start_idx + domain.dna_length()\r\n prefix = self.dna_sequence[:start_idx]\r\n suffix = self.dna_sequence[end_idx:]\r\n self.dna_sequence = prefix + suffix\r\n\r\n self.domains.remove(domain)\r\n domain._parent_strand = None\r\n if isinstance(domain, Domain):\r\n self._helix_idx_domain_map[domain.helix].remove(domain)\r\n\r\n def dna_index_start_domain(self, domain: Union[Domain, Loopout]) -> int:\r\n \"\"\"\r\n Returns index in DNA sequence of domain, e.g., if there are five domains\r\n\r\n 012 3 45 678 9\r\n AAA-C-GG-TTT-ACGT\r\n\r\n Then their indices, respectively in order, are 0, 3, 4, 6, 9.\r\n\r\n :param domain: :any: to find the start DNA index of\r\n :return: index (within DNA sequence string) of substring of DNA starting with given :any:`Domain`\r\n \"\"\"\r\n domain_order = self.domains.index(domain)\r\n idx = sum(self.domains[i].dna_length() for i in range(domain_order))\r\n return idx\r\n\r\n def contains_loopouts(self) -> bool:\r\n for domain in self.domains:\r\n if isinstance(domain, Loopout):\r\n return True\r\n return False\r\n\r\n def first_bound_domain(self) -> Domain:\r\n \"\"\"First :any:`Domain` (i.e., not a :any:`Loopout`) on this :any:`Strand`.\r\n\r\n Currently the first and last strand must not be :any:`Loopout`'s, so this should return the same\r\n domain as :py:meth:`Strand.first_domain`, but in case an initial or final :any:`Loopout` is\r\n supported in the future, this method is provided.\"\"\"\r\n for domain in self.domains:\r\n if isinstance(domain, Domain):\r\n return domain\r\n raise StrandError(self, 'should not be able to have a Strand with no (bound) Domains')\r\n\r\n def last_bound_domain(self) -> Domain:\r\n \"\"\"Last :any:`Domain` (i.e., not a :any:`Loopout`) on this :any:`Strand`.\r\n\r\n Currently the first and last strand must not be :any:`Loopout`'s, so this should return the same\r\n domain as :py:meth:`Strand.first_domain`, but in case an initial or final :any:`Loopout` is\r\n supported in the future, this method is provided.\"\"\"\r\n domain_rev = list(self.domains)\r\n domain_rev.reverse()\r\n for domain in domain_rev:\r\n if isinstance(domain, Domain):\r\n return domain\r\n raise AssertionError('should not be able to have a Strand with no (bound) Domains')\r\n\r\n def reverse(self) -> None:\r\n \"\"\"\r\n Reverses \"polarity\" of this :any:`Strand`.\r\n\r\n Does NOT check whether this keeps the :any:`Design` legal, so be cautious in calling this method\r\n directly. To reverse every :any:`Strand`, called :py:meth:`Design.reverse_all`.\r\n If the design was legal before, it will be legal after calling that method.\r\n \"\"\"\r\n self.domains.reverse()\r\n for domain in self.bound_domains():\r\n domain.forward = not domain.forward\r\n\r\n def _ensure_modifications_legal(self, check_offsets_legal: bool = False) -> None:\r\n if check_offsets_legal:\r\n if self.dna_sequence is None:\r\n raise IllegalDesignError(f\"must assign DNA sequence first\")\r\n mod_i_offsets_list = list(self.modifications_int.keys())\r\n min_offset = min(mod_i_offsets_list) if len(mod_i_offsets_list) > 0 else None\r\n max_offset = max(mod_i_offsets_list) if len(mod_i_offsets_list) > 0 else None\r\n if min_offset is not None and min_offset < 0:\r\n raise IllegalDesignError(f\"smallest offset is {min_offset} but must be nonnegative: \"\r\n f\"{self.modifications_int}\")\r\n if max_offset is not None and max_offset > len(self.dna_sequence):\r\n raise IllegalDesignError(f\"largeest offset is {max_offset} but must be at most \"\r\n f\"{len(self.dna_sequence)}: \"\r\n f\"{self.modifications_int}\")\r\n\r\n def _ensure_domains_nonoverlapping(self) -> None:\r\n for d1, d2 in itertools.combinations(self.domains, 2):\r\n if isinstance(d1, Domain) and isinstance(d2, Domain) and d1.overlaps_illegally(d2):\r\n raise StrandError(self, f'two domains on strand overlap:'\r\n f'\\n{d1}'\r\n f'\\n{d2}')\r\n\r\n def idt_dna_sequence(self) -> str:\r\n self._ensure_modifications_legal(check_offsets_legal=True)\r\n\r\n if self.dna_sequence is None:\r\n raise ValueError('DNA sequence has not been assigned yet')\r\n\r\n ret_list: List[str] = []\r\n if self.modification_5p is not None and self.modification_5p.idt_text is not None:\r\n ret_list.append(self.modification_5p.idt_text)\r\n\r\n for offset, base in enumerate(self.dna_sequence):\r\n ret_list.append(base)\r\n if offset in self.modifications_int: # if internal mod attached to base, replace base\r\n mod = self.modifications_int[offset]\r\n if mod.idt_text is not None:\r\n if mod.allowed_bases is not None:\r\n if base not in mod.allowed_bases:\r\n msg = f'internal modification {mod} can only replace one of these bases: ' \\\r\n f'{\",\".join(mod.allowed_bases)}, but the base at offset {offset} is {base}'\r\n raise IllegalDesignError(msg)\r\n ret_list[-1] = mod.idt_text # replace base with modified base\r\n else:\r\n ret_list.append(mod.idt_text) # append modification between two bases\r\n\r\n if self.modification_3p is not None and self.modification_3p.idt_text is not None:\r\n ret_list.append(self.modification_3p.idt_text)\r\n\r\n return ''.join(ret_list)\r\n\r\n def no_modifications_version(self) -> 'Strand':\r\n \"\"\"\r\n :return: version of this :any:`Strand` with no DNA modifications.\r\n \"\"\"\r\n strand_nomods = replace(self, modification_3p=None, modification_5p=None, modifications_int={})\r\n return strand_nomods\r\n\r\n\r\n# for defining comparables for strongly typed sorting\r\nclass Comparable(metaclass=ABCMeta):\r\n @abstractmethod\r\n def __lt__(self, other: Any) -> bool: ...\r\n\r\n\r\nT = TypeVar('T')\r\n# CT = TypeVar('CT', bound=Comparable)\r\n\r\n# KeyFunction = Callable[[T], CT]\r\nKeyFunction = Callable[[T], Any]\r\n\r\n\r\nclass StrandOrder(enum.Enum):\r\n \"\"\"\r\n Which part of a :any:`Strand` to use for sorting in the\r\n `key function `_\r\n returned by :py:meth:`strand_order_key_function`.\r\n \"\"\"\r\n five_prime = 0\r\n \"\"\"5' end of the strand\"\"\"\r\n\r\n three_prime = 1\r\n \"\"\"3' end of the strand\"\"\"\r\n\r\n five_or_three_prime = 2\r\n \"\"\"Either 5' end or 3' end is used, whichever is first according to the sort order.\"\"\"\r\n\r\n top_left_domain = 3\r\n \"\"\"The start offset of the \"top-left\" :any:`Domain` of the :any:`Strand`: the :any:`Domain` whose\r\n :py:data:`Domain.helix` is minimal, and, among all such :any:`Domain`'s, the one with \r\n minimal :py:data:`Domain.start`.\"\"\"\r\n\r\n\r\ndef strand_order_key_function(*, column_major: bool = True, strand_order: StrandOrder) -> KeyFunction[Strand]:\r\n \"\"\"\r\n Returns a `key function `_\r\n indicating a sorted order for :any:`Strand`'s. Useful as a parameter for\r\n :py:meth:`Design.`.\r\n\r\n :param column_major:\r\n If true, column major order is used: ordered by base offset first, then by helix.\r\n Otherwise row-major order is used: ordered by helix first, then by base offset.\r\n :param strand_order:\r\n Which part of the strand to use as a key for the sorted order.\r\n See :any:`StrandOrder` for definitions.\r\n :return:\r\n A `key function `_ that can be\r\n passed to :py:meth:`Design.` to specify a sorted order for the :any:`Strand`'s.\r\n \"\"\"\r\n\r\n def key(strand: Strand) -> Tuple[int, int]:\r\n # we'll return a tuple (helix_idx, offset) for row-major or (offset, helix_idx) for col-major.\r\n helix_idx: int\r\n offset: int\r\n\r\n if strand_order == StrandOrder.five_prime:\r\n helix_idx = strand.first_bound_domain().helix\r\n offset = strand.first_bound_domain().offset_5p()\r\n elif strand_order == StrandOrder.three_prime:\r\n helix_idx = strand.last_bound_domain().helix\r\n offset = strand.last_bound_domain().offset_3p()\r\n elif strand_order == StrandOrder.five_or_three_prime:\r\n helix_idx_5p = strand.first_bound_domain().helix\r\n offset_5p = strand.first_bound_domain().offset_5p()\r\n helix_idx_3p = strand.last_bound_domain().helix\r\n offset_3p = strand.last_bound_domain().offset_3p()\r\n if column_major:\r\n offset, helix_idx = min((offset_5p, helix_idx_5p), (offset_3p, helix_idx_3p))\r\n else:\r\n helix_idx, offset = min((helix_idx_5p, offset_5p), (helix_idx_3p, offset_3p))\r\n elif strand_order == StrandOrder.top_left_domain:\r\n helix_idx = strand.first_bound_domain().helix\r\n offset = strand.first_bound_domain().start\r\n for domain in strand.bound_domains():\r\n if (helix_idx, offset) > (domain.helix, domain.start):\r\n helix_idx, offset = domain.helix, domain.start\r\n else:\r\n raise ValueError(f'{strand_order} is not a valid StrandOrder')\r\n\r\n if column_major:\r\n return offset, helix_idx\r\n else:\r\n return helix_idx, offset\r\n\r\n return key\r\n\r\n\r\ndef _pad_and_remove_whitespace_and_uppercase(sequence: str, strand: Strand, start: int = 0) -> str:\r\n sequence = _remove_whitespace_and_uppercase(sequence)\r\n padded_sequence = _pad_dna(sequence, strand.dna_length(), start)\r\n return padded_sequence\r\n\r\n\r\ndef _remove_whitespace_and_uppercase(sequence: str) -> str:\r\n sequence = re.sub(r'\\s*', '', sequence)\r\n sequence = sequence.upper()\r\n return sequence\r\n\r\n\r\ndef _pad_dna(sequence: str, length: int, start: int = 0) -> str:\r\n \"\"\"Return `sequence` modified to have length `length`.\r\n\r\n If len(sequence) < length, pad with :py:data:`DNA_base_wildcard`.\r\n\r\n If len(sequence) > length, remove extra symbols, from 0 up to `start`, and at the end.\r\n\r\n :param sequence: sequence to pad\r\n :param length: final length of padded sequence\r\n :param start: index at which to start padding. If not specified, defaults to 0\r\n :return: padded sequence\r\n \"\"\"\r\n if start < 0:\r\n raise ValueError(f'cannot pad DNA with negative start, but start = {start}')\r\n elif start >= length:\r\n raise ValueError(f'cannot pad DNA with start >= length, but start = {start} and '\r\n f'length = {length}')\r\n if len(sequence) > length:\r\n sequence = sequence[start:start + length]\r\n elif len(sequence) < length:\r\n prefix = DNA_base_wildcard * start\r\n suffix = DNA_base_wildcard * (length - len(sequence) - start)\r\n sequence = prefix + sequence + suffix\r\n return sequence\r\n\r\n\r\ndef _string_merge_wildcard(s1: str, s2: str, wildcard: str) -> str:\r\n \"\"\"Takes a \"union\" of two equal-length strings `s1` and `s2`.\r\n Whenever one has a symbol `wildcard` and the other does not, the result has the non-wildcard symbol.\r\n\r\n Raises :py:class:`ValueError` if `s1` and `s2` are not the same length or do not agree on non-wildcard\r\n symbols at any position.\"\"\"\r\n if len(s1) != len(s2):\r\n raise ValueError(f'\\ns1={s1} and\\ns2={s2}\\nare not the same length.')\r\n union_builder = []\r\n for i in range(len(s1)):\r\n c1, c2 = s1[i], s2[i]\r\n if c1 == wildcard:\r\n union_builder.append(c2)\r\n elif c2 == wildcard:\r\n union_builder.append(c1)\r\n elif c1 != c2:\r\n raise ValueError(f's1={s1} and s2={s2} have unequal symbols {c1} and {c2} at position {i}.')\r\n elif c1 == c2:\r\n union_builder.append(c1)\r\n else:\r\n raise AssertionError('should be unreachable')\r\n return ''.join(union_builder)\r\n\r\n\r\nclass IllegalDesignError(ValueError):\r\n \"\"\"Indicates that some aspect of the :any:`Design` object is illegal.\"\"\"\r\n\r\n def __init__(self, the_cause: str) -> None:\r\n self.cause = the_cause\r\n\r\n # __str__ is to print() the value\r\n def __str__(self) -> str:\r\n return repr(self.cause)\r\n\r\n\r\nclass StrandError(IllegalDesignError):\r\n \"\"\"Indicates that the :any:`Design` is illegal due to some specific :any:`Strand`.\r\n Information about the :any:`Strand` is embedded in the error message when this exception is\r\n raised that helps to identify which :any:`Strand` caused the problem.\"\"\"\r\n\r\n def __init__(self, strand: Strand, the_cause: str) -> None:\r\n # need to avoid calling first_bound_domain here to avoid infinite mutual recursion\r\n first_domain: Optional[Domain]\r\n last_domain: Optional[Domain]\r\n if len(strand.domains) > 0 and isinstance(strand.domains[0], Domain):\r\n first_domain = strand.domains[0]\r\n else:\r\n first_domain = None\r\n if len(strand.domains) > 0 and isinstance(strand.domains[-1], Domain):\r\n last_domain = strand.domains[-1]\r\n else:\r\n last_domain = None\r\n\r\n msg = (f'''{the_cause}\r\n strand length = {strand.dna_length()}\r\n DNA length = {len(strand.dna_sequence) if strand.dna_sequence else \"N/A\"}\r\n DNA sequence = {strand.dna_sequence}\r\n strand 5' helix = {first_domain.helix if first_domain is not None else 'N/A'}\r\n strand 5' end offset = {first_domain.offset_5p() if first_domain is not None else 'N/A'}\r\n strand 3' helix = {last_domain.helix if last_domain is not None else 'N/A'}\r\n strand 3' end offset = {last_domain.offset_3p() if last_domain is not None else 'N/A'}\\n''')\r\n\r\n super().__init__(msg)\r\n # super(IllegalDesignError, self).__init__(msg)\r\n\r\n\r\n# def _plates(idt_strands) -> List[str]:\r\n# plates: Set[str] = set()\r\n# for strand in idt_strands:\r\n# if strand.idt is not None and strand.idt.plate is not None:\r\n# plates.add(strand.idt.plate)\r\n# return list(plates)\r\n\r\n\r\n_96WELL_PLATE_ROWS: List[str] = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\r\n_96WELL_PLATE_COLS: List[int] = list(range(1, 13))\r\n\r\n_384WELL_PLATE_ROWS: List[str] = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',\r\n 'O',\r\n 'P']\r\n_384WELL_PLATE_COLS: List[int] = list(range(1, 25))\r\n\r\n\r\n@enum.unique\r\nclass PlateType(int, enum.Enum):\r\n \"\"\"Represents two different types of plates in which DNA sequences can be ordered.\"\"\"\r\n\r\n wells96 = 96\r\n \"\"\"96-well plate.\"\"\"\r\n\r\n wells384 = 384\r\n \"\"\"384-well plate.\"\"\"\r\n\r\n def rows(self) -> List[str]:\r\n return _96WELL_PLATE_ROWS if self is PlateType.wells96 else _384WELL_PLATE_ROWS\r\n\r\n def cols(self) -> List[int]:\r\n return _96WELL_PLATE_COLS if self is PlateType.wells96 else _384WELL_PLATE_COLS\r\n\r\n\r\nclass _PlateCoordinate:\r\n\r\n def __init__(self, plate_type: PlateType) -> None:\r\n self._plate_type = plate_type\r\n self._plate: int = 1\r\n self._row_idx: int = 0\r\n self._col_idx: int = 0\r\n\r\n def increment(self) -> None:\r\n self._row_idx += 1\r\n if self._row_idx == len(self._plate_type.rows()):\r\n self._row_idx = 0\r\n self._col_idx += 1\r\n if self._col_idx == len(self._plate_type.cols()):\r\n self._col_idx = 0\r\n self._plate += 1\r\n\r\n def plate(self) -> int:\r\n return self._plate\r\n\r\n def row(self) -> str:\r\n return self._plate_type.rows()[self._row_idx]\r\n\r\n def col(self) -> int:\r\n return self._plate_type.cols()[self._col_idx]\r\n\r\n def well(self) -> str:\r\n return f'{self.row()}{self.col()}'\r\n\r\n\r\ndef remove_helix_idxs_if_default(helices: List[Dict]) -> None:\r\n # removes indices from each helix if they are the default (order of appearance in list)\r\n default = True\r\n for expected_idx, helix in enumerate(helices):\r\n idx = helix[idx_on_helix_key]\r\n if idx != expected_idx:\r\n default = False\r\n break\r\n\r\n if default:\r\n for helix in helices:\r\n del helix[idx_on_helix_key]\r\n\r\n\r\ndef add_quotes(string: str) -> str:\r\n # adds quotes around a string\r\n return f'\"{string}\"'\r\n\r\n\r\ndef mandatory_field(ret_type: Type, json_map: Dict[str, Any], main_key: str, *legacy_keys: str) -> Any:\r\n # should be called from function whose return type is the type being constructed from JSON, e.g.,\r\n # Design or Strand, given by ret_type. This helps give a useful error message\r\n for key in (main_key,) + legacy_keys:\r\n if key in json_map:\r\n return json_map[key]\r\n ret_type_name = ret_type.__name__\r\n msg_about_keys = f'the key \"{main_key}\"'\r\n if len(legacy_keys) > 0:\r\n msg_about_keys += f\" (or any of the following legacy keys: {', '.join(map(add_quotes, legacy_keys))})\"\r\n msg = f'I was looking for {msg_about_keys} in the JSON encoding of a {ret_type_name}, ' \\\r\n f'but I did not find it.' \\\r\n f'\\n\\nThis occurred when reading this JSON object:\\n{json_map}'\r\n raise IllegalDesignError(msg)\r\n\r\n\r\ndef optional_field(default_value: Any, json_map: Dict[str, Any], main_key: str, *legacy_keys: str) -> Any:\r\n # like dict.get, except that it checks for multiple keys\r\n for key in (main_key,) + legacy_keys:\r\n if key in json_map:\r\n return json_map[key]\r\n return default_value\r\n\r\n\r\n@dataclass\r\nclass Geometry(_JSONSerializable):\r\n \"\"\"Parameters controlling some geometric visualization/physical aspects of Design.\"\"\"\r\n\r\n rise_per_base_pair: float = 0.332\r\n \"\"\"Distance in nanometers between two adjacent base pairs along the length of a DNA double helix.\"\"\"\r\n\r\n helix_radius: float = 1.0\r\n \"\"\"Radius of a DNA helix in nanometers.\"\"\"\r\n\r\n bases_per_turn: float = 10.5\r\n \"\"\"Number of DNA base pairs in a full turn of DNA.\"\"\"\r\n\r\n minor_groove_angle: float = 150.0\r\n \"\"\"Minor groove angle in degrees.\"\"\"\r\n\r\n inter_helix_gap: float = 1.0\r\n \"\"\"Gap between helices in nanometers (due to electrostatic repulsion; needed to display to scale).\"\"\"\r\n\r\n def distance_between_helices(self) -> float:\r\n return 2 * self.helix_radius + self.inter_helix_gap\r\n\r\n def is_default(self) -> bool:\r\n return self == _default_geometry\r\n\r\n @staticmethod\r\n def from_json(json_map: dict) -> 'Geometry': # remove quotes when Py3.6 support dropped\r\n geometry = Geometry()\r\n geometry.rise_per_base_pair = optional_field(_default_geometry.rise_per_base_pair, json_map,\r\n rise_per_base_pair_key, *legacy_rise_per_base_pair_keys)\r\n geometry.helix_radius = optional_field(_default_geometry.helix_radius, json_map, helix_radius_key)\r\n geometry.bases_per_turn = optional_field(_default_geometry.bases_per_turn, json_map,\r\n bases_per_turn_key)\r\n geometry.minor_groove_angle = optional_field(_default_geometry.minor_groove_angle, json_map,\r\n minor_groove_angle_key)\r\n geometry.inter_helix_gap = optional_field(_default_geometry.inter_helix_gap, json_map,\r\n inter_helix_gap_key)\r\n return geometry\r\n\r\n @staticmethod\r\n def keys() -> List[str]:\r\n return [rise_per_base_pair_key, helix_radius_key, bases_per_turn_key, minor_groove_angle_key,\r\n inter_helix_gap_key]\r\n\r\n def values(self) -> List[float]:\r\n return [self.rise_per_base_pair, self.helix_radius, self.bases_per_turn, self.minor_groove_angle,\r\n self.inter_helix_gap]\r\n\r\n @staticmethod\r\n def default_values() -> List[float]:\r\n return _default_geometry.values()\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n dct: Dict[str, Any] = OrderedDict()\r\n for name, val, val_default in zip(Geometry.keys(), self.values(), Geometry.default_values()):\r\n if val != val_default:\r\n dct[name] = val\r\n return dct\r\n\r\n\r\n_default_geometry = Geometry()\r\n\r\n\r\ndef _check_helices_view_order_and_return(\r\n helices_view_order: Optional[List[int]], helix_idxs: Iterable[int]) -> List[int]:\r\n if helices_view_order is None:\r\n identity = sorted(helix_idxs)\r\n helices_view_order = identity\r\n else:\r\n _check_helices_view_order_is_bijection(helices_view_order, helix_idxs)\r\n return helices_view_order\r\n\r\n\r\ndef _check_helices_grid_legal(grid: Grid, helices: Iterable[Helix]) -> None:\r\n for helix in helices:\r\n if grid == Grid.none and helix.grid_position is not None:\r\n raise IllegalDesignError(\r\n f'grid is none, but Helix {helix.idx} has grid_position = {helix.grid_position}')\r\n elif grid != Grid.none and helix.position is not None:\r\n raise IllegalDesignError(\r\n f'grid is not none, but Helix {helix.idx} has position = ${helix.position}')\r\n\r\n\r\ndef _check_helices_view_order_is_bijection(helices_view_order: List[int], helix_idxs: Iterable[int]) -> None:\r\n if not (sorted(helices_view_order) == sorted(helix_idxs)):\r\n raise IllegalDesignError(\r\n f\"The specified helices view order: {helices_view_order}\\n \"\r\n f\"is not a bijection on helices indices: {helix_idxs}.\")\r\n\r\n\r\n@dataclass\r\nclass Design(_JSONSerializable, Generic[StrandLabel, DomainLabel]):\r\n \"\"\"Object representing the entire design of the DNA structure.\"\"\"\r\n\r\n strands: List[Strand[StrandLabel, DomainLabel]]\r\n \"\"\"All of the :any:`Strand`'s in this :any:`Design`.\r\n \r\n Required field.\"\"\"\r\n\r\n helices: Dict[int, Helix] = None # type: ignore\r\n \"\"\"All of the :any:`Helix`'s in this :any:`Design`. \r\n This is a dictionary mapping index to the :any:`Helix` with that index; if helices have indices \r\n 0, 1, ..., num_helices-1, then this can be used as a list of Helices. \r\n \r\n Optional field. If not specified, then the number of helices will be just large enough to store the\r\n largest index :py:data:`Domain.helix` \r\n stored in any :any:`Domain` \r\n in :py:data:`Design.strands`.\"\"\"\r\n\r\n groups: Dict[str, HelixGroup] = None # type: ignore\r\n \"\"\":any:`HelixGroup`'s in this :any:`Design`.\"\"\"\r\n\r\n geometry: Geometry = field(default_factory=lambda: Geometry())\r\n \"\"\"Controls some geometric/physical aspects of this :any:`Design`.\"\"\"\r\n\r\n automatically_assign_color: bool = field(repr=False, default=True)\r\n \"\"\"If `automatically_assign_color` = ``False``, then for any :any:`Strand` such that\r\n `Strand.color` = ``None``, do not automatically assign a :any:`Color` to it. \r\n In this case color will be set to its default of ``None`` and will not be\r\n written to the JSON with :py:meth:`Design.write_scadnano_file` or :py:meth:`Design.to_json`.\"\"\"\r\n\r\n color_cycler: ColorCycler = field(default_factory=lambda: ColorCycler(), init=False)\r\n\r\n def __init__(self, *,\r\n helices: Optional[Union[List[Helix], Dict[int, Helix]]] = None,\r\n groups: Optional[Dict[str, HelixGroup]] = None,\r\n strands: List[Strand] = None,\r\n grid: Optional[Grid] = None,\r\n helices_view_order: List[int] = None,\r\n geometry: Geometry = None) -> None:\r\n \"\"\"\r\n :param helices: List of :any:`Helix`'s; if missing, set based on `strands`.\r\n :param groups: Dict mapping group name to :any:`HelixGroup`.\r\n Mutually exclusive with `helices_view_order`, and any non-none :any:`Grid`. If set, then each\r\n :any:`Helix` must have its :py:data:`Helix.group` field set to a group name that is one of the\r\n keys of this dict.\r\n :param strands: List of :any:`Strand`'s. If missing, will be empty.\r\n :param grid: :any:`Grid` to use.\r\n :param helices_view_order: order in which to view helices from top to bottom in web interface\r\n main view.\r\n Mutually exclusive with `groups`.\r\n This list must contain each :py:data:`Helix.idx` exactly once.\r\n If no :py:data:`Helix.idx` is explicitly specified, then this should be a permutation of the\r\n list [0,1,...,len(helices)-1].\r\n default is to display in increasing order of :py:data:`Helix.idx`.\r\n :param geometry: geometric physical parameters for visualization.\r\n If not specified, a default set of parameters from the literature are used.\r\n \"\"\"\r\n using_groups = groups is not None\r\n\r\n if helices_view_order is not None and using_groups:\r\n raise IllegalDesignError('Design.helices_view_order and Design.groups are mutually exclusive. '\r\n 'Set at most one of them.')\r\n\r\n if grid is not None and using_groups:\r\n raise IllegalDesignError('Design.grid and Design.groups are mutually exclusive. '\r\n 'Set at most one of them.')\r\n\r\n if grid is None and not using_groups:\r\n # make sure we only set this if groups are not being used\r\n grid = Grid.none\r\n\r\n self.strands = [] if strands is None else strands\r\n self.color_cycler = ColorCycler()\r\n self.geometry = Geometry() if geometry is None else geometry\r\n\r\n if groups is None:\r\n self.groups = {default_group_name: HelixGroup()}\r\n else:\r\n self.groups = groups\r\n if grid is not None:\r\n raise IllegalDesignError('cannot use a non-none grid for whole Design when helix groups are '\r\n 'used; only the HelixGroups can have non-none grids in this case')\r\n if helices_view_order is not None:\r\n raise IllegalDesignError('cannot use helices_view_order for whole Design when helix groups '\r\n 'are used; only the HelixGroups can have helices_view_order '\r\n 'in this case')\r\n\r\n if helices is None:\r\n if len(self.strands) > 0:\r\n max_helix_idx = max(\r\n domain.helix for strand in self.strands for domain in strand.bound_domains())\r\n helices = {idx: Helix(idx=idx) for idx in range(max_helix_idx + 1)}\r\n else:\r\n helices = {}\r\n elif not (isinstance(helices, dict) or isinstance(helices, list)):\r\n raise IllegalDesignError('type of parameter helices must be list of helices, '\r\n f'dict mapping int to helices, or None, but it is {type(helices)}')\r\n\r\n self.helices = Design._normalize_helices_as_dict(helices)\r\n\r\n # set up helices_view_order in groups\r\n uses_default_group = self._has_default_groups()\r\n for name, group in self.groups.items():\r\n helix_idxs_in_group = self.helices_idxs_in_group(name)\r\n if uses_default_group:\r\n helices_view_order_for_group = helices_view_order\r\n grid_for_group = grid\r\n else:\r\n helices_view_order_for_group = group.helices_view_order\r\n grid_for_group = group.grid\r\n group.helices_view_order = _check_helices_view_order_and_return(helices_view_order_for_group,\r\n helix_idxs_in_group)\r\n if grid_for_group is None:\r\n raise AssertionError()\r\n group.grid = grid_for_group\r\n helices_in_group = [self.helices[idx] for idx in helix_idxs_in_group]\r\n _check_helices_grid_legal(group.grid, helices_in_group)\r\n\r\n self.__post_init__()\r\n\r\n def __post_init__(self) -> None:\r\n # XXX: exact order of these calls is important\r\n self._ensure_helices_distinct_objects()\r\n self._ensure_strands_distinct_objects()\r\n self._set_helices_grid_positions_or_positions()\r\n self._build_domains_on_helix_lists()\r\n self._set_helices_min_max_offsets(update=False)\r\n self._ensure_helix_groups_exist()\r\n self._assign_default_helices_view_orders_to_groups()\r\n self._check_legal_design()\r\n\r\n if self.automatically_assign_color:\r\n self._assign_colors_to_strands()\r\n\r\n @property\r\n def helices_view_order(self) -> List[int]:\r\n \"\"\"\r\n Return helices_view_order of this :any:`Design` if no :any:`HelixGroup`'s are being used, otherwise\r\n raise a ValueError.\r\n\r\n :return: helices_view_order of this :any:`Design`\r\n \"\"\"\r\n group = self._get_default_group()\r\n if group.helices_view_order is None:\r\n raise ValueError(f'group {group} does not have helices_view_order defined')\r\n return group.helices_view_order\r\n\r\n @property\r\n def grid(self) -> Grid:\r\n \"\"\"\r\n Return grid of this :any:`Design` if no :any:`HelixGroup`'s are being used, otherwise\r\n raise a ValueError.\r\n\r\n :return: grid of this :any:`Design`\r\n \"\"\"\r\n group = self._get_default_group()\r\n return group.grid\r\n\r\n def set_grid(self, grid: Grid) -> None:\r\n \"\"\"\r\n Sets the grid of the default :any:`HelixGroup`, if the default is being used,\r\n otherwise raises an exception.\r\n\r\n :param grid:\r\n new grid to set for the (only) :any:`HelixGroup` in this :any:`Design`\r\n :raises IllegalDesignError:\r\n if there is more than one :any:`HelixGroup` in this :any:`Design`\r\n \"\"\"\r\n group = self._get_default_group()\r\n group.grid = grid\r\n\r\n def _get_default_group(self) -> HelixGroup:\r\n # Gets default group and raise exception if default group is not being used\r\n if not self._has_default_groups():\r\n raise ValueError('The default group is not being used for this design.')\r\n if self.groups is None:\r\n raise AssertionError('Design.groups should not be None by this point')\r\n groups: List[HelixGroup] = list(self.groups.values())\r\n group: HelixGroup = groups[0]\r\n return group\r\n\r\n def helices_idxs_in_group(self, group_name: str) -> List[int]:\r\n \"\"\"\r\n Indexes of :any:`Helix`'s in this group. Must be associated with a :any:`Design` for this to work.\r\n\r\n :param group_name: name of group\r\n :return: list of indices of :any:`Helix`'s in this :any:`HelixGroup`\r\n \"\"\"\r\n return [idx for idx, helix in self.helices.items() if helix.group == group_name]\r\n\r\n def _assign_colors_to_strands(self) -> None:\r\n # if color not specified, pick one by cycling through list of staple colors,\r\n # unless caller specified not to\r\n for strand in self.strands:\r\n self._assign_color_to_strand(strand)\r\n\r\n def _assign_color_to_strand(self, strand: Strand) -> None:\r\n if strand.color is None and self.automatically_assign_color:\r\n if strand.is_scaffold:\r\n strand.color = default_scaffold_color\r\n else:\r\n strand.color = next(self.color_cycler)\r\n\r\n @staticmethod\r\n def from_scadnano_file(filename: str) -> 'Design': # remove quotes when Py3.6 support dropped\r\n \"\"\"\r\n Loads a :any:`Design` from the file with the given name.\r\n\r\n :param filename: name of the file with the design. Should be a JSON file ending in .dna\r\n :return: Design described in the file\r\n \"\"\"\r\n with open(filename) as f:\r\n json_str = f.read()\r\n return Design.from_scadnano_json_str(json_str)\r\n\r\n @staticmethod\r\n def from_scadnano_json_str(json_str: str) -> 'Design': # remove quotes when Py3.6 support dropped\r\n \"\"\"\r\n Loads a :any:`Design` from the given JSON string.\r\n\r\n :param json_str: JSON description of the :any:`Design`\r\n :return: Design described in the JSON string\r\n \"\"\"\r\n json_map = json.loads(json_str)\r\n try:\r\n design = Design.from_scadnano_json_map(json_map)\r\n return design\r\n except KeyError as e:\r\n raise IllegalDesignError(f'I was expecting a JSON key but did not find it: {e}')\r\n\r\n @staticmethod\r\n def _check_mutually_exclusive_fields(json_map: dict) -> None:\r\n exclusive_pairs = [\r\n (grid_key, groups_key),\r\n (helices_view_order_key, groups_key),\r\n ]\r\n for key1, key2 in exclusive_pairs:\r\n if key1 in json_map and key2 in json_map:\r\n raise IllegalDesignError(f'cannot specify both \"{key1}\" and \"{key2}\" in Design JSON')\r\n\r\n @staticmethod\r\n def from_scadnano_json_map(\r\n json_map: dict) -> 'Design': # remove quotes when Py3.6 support dropped\r\n \"\"\"\r\n Loads a :any:`Design` from the given JSON object (i.e., Python object obtained by calling\r\n json.loads(json_str) from a string representing contents of a JSON file.\r\n\r\n :param json_map: map describing the :any:`Design`;\r\n should be JSON serializable via ``encode(json_map)``\r\n :return: :any:`Design` described in the object\r\n \"\"\"\r\n # version = json_map.get(version_key, initial_version) # not sure what to do with version\r\n\r\n Design._check_mutually_exclusive_fields(json_map)\r\n\r\n grid = optional_field(None, json_map, grid_key)\r\n grid_is_none = grid == Grid.none\r\n\r\n using_groups = groups_key in json_map\r\n\r\n helices = []\r\n deserialized_helices_list = json_map[helices_key]\r\n num_helices = len(deserialized_helices_list)\r\n\r\n # create Helices\r\n idx_default = 0\r\n for helix_json in deserialized_helices_list:\r\n helix = Helix.from_json(helix_json)\r\n if not using_groups and grid_is_none and grid_position_key in helix_json:\r\n raise IllegalDesignError(\r\n f'grid is none, but Helix {idx_default} has grid_position = {helix_json[grid_position_key]}')\r\n elif not using_groups and not grid_is_none and position_key in helix_json:\r\n raise IllegalDesignError(\r\n f'grid is not none, but Helix {idx_default} has position = ${helix_json[position_key]}')\r\n helices.append(helix)\r\n idx_default += 1\r\n\r\n # helix groups\r\n groups = None\r\n if groups_key in json_map:\r\n groups_json = json_map[groups_key]\r\n groups = {}\r\n for name, group_json in groups_json.items():\r\n num_helices_in_group = sum(1 for helix in helices if helix.group == name)\r\n groups[name] = HelixGroup.from_json(group_json, num_helices=num_helices_in_group)\r\n\r\n # view order of helices\r\n helices_view_order = json_map.get(helices_view_order_key)\r\n if helices_view_order is not None:\r\n helix_idxs = [helix.idx for helix in helices]\r\n if len(helices_view_order) != num_helices:\r\n raise IllegalDesignError(f'length of helices ({num_helices}) does not match '\r\n f'length of helices_view_order ({len(helices_view_order)})')\r\n if sorted(helices_view_order) != sorted(helix_idxs):\r\n raise IllegalDesignError(f'helices_view_order = {helices_view_order} is not a '\r\n f'permutation of the set of helices {helix_idxs}')\r\n\r\n # strands\r\n strands = []\r\n strand_jsons = mandatory_field(Design, json_map, strands_key)\r\n for strand_json in strand_jsons:\r\n strand = Strand.from_json(strand_json)\r\n strands.append(strand)\r\n\r\n # modifications in whole design\r\n if design_modifications_key in json_map:\r\n all_mods_json = json_map[design_modifications_key]\r\n all_mods = {}\r\n for mod_key, mod_json in all_mods_json.items():\r\n mod = Modification.from_json(mod_json)\r\n mod = dataclasses.replace(mod, id=mod_key)\r\n all_mods[mod_key] = mod\r\n Design.assign_modifications_to_strands(strands, strand_jsons, all_mods)\r\n\r\n geometry = None\r\n if geometry_key in json_map:\r\n geometry = Geometry.from_json(json_map[geometry_key])\r\n\r\n return Design(\r\n helices=helices,\r\n groups=groups,\r\n strands=strands,\r\n grid=grid,\r\n helices_view_order=helices_view_order,\r\n geometry=geometry,\r\n )\r\n\r\n def to_json_serializable(self, suppress_indent: bool = True, **kwargs: Any) -> Dict[str, Any]:\r\n dct: Any = OrderedDict()\r\n dct[version_key] = __version__\r\n\r\n if self._has_default_groups():\r\n dct[grid_key] = str(self.grid)[5:] # remove prefix 'Grid.'\r\n\r\n if not self.geometry.is_default():\r\n dct[geometry_key] = self.geometry.to_json_serializable(suppress_indent)\r\n\r\n if not self._has_default_groups():\r\n group_map = {}\r\n for name, group in self.groups.items():\r\n helix_idxs_in_group = [helix.idx for helix in self.helices.values() if\r\n helix.group == name]\r\n group_map[name] = group.to_json_serializable(suppress_indent,\r\n helix_idxs=helix_idxs_in_group)\r\n dct[groups_key] = group_map\r\n\r\n helices_json = []\r\n for helix in self.helices.values():\r\n group = self.groups[helix.group]\r\n helix_json = helix.to_json_serializable(suppress_indent, grid=group.grid)\r\n helices_json.append(helix_json)\r\n dct[helices_key] = helices_json\r\n\r\n # remove idx key from list of helices if they have the default index\r\n unwrapped_helices = list(dct[helices_key])\r\n if len(unwrapped_helices) > 0:\r\n for i, wrapped in enumerate(unwrapped_helices):\r\n if isinstance(wrapped, NoIndent):\r\n unwrapped_helices[i] = wrapped.value\r\n remove_helix_idxs_if_default(unwrapped_helices)\r\n\r\n if self._has_default_groups():\r\n default_helices_view_order = sorted(self.helices.keys())\r\n if self.helices_view_order != default_helices_view_order:\r\n dct[helices_view_order_key] = NoIndent(\r\n self.helices_view_order) if suppress_indent else self.helices_view_order\r\n\r\n # modifications\r\n mods = self._all_modifications()\r\n if len(mods) > 0:\r\n mods_dict = {}\r\n for mod in mods:\r\n if mod.id not in mods_dict:\r\n mods_dict[mod.id] = mod.to_json_serializable(suppress_indent)\r\n dct[design_modifications_key] = mods_dict\r\n\r\n dct[strands_key] = [strand.to_json_serializable(suppress_indent) for strand in self.strands]\r\n\r\n for helix_list_order, helix in enumerate(self.helices.values()):\r\n helix_json_maybe: Union[NoIndent, Dict[str, Any]] = dct[helices_key][helix_list_order]\r\n if isinstance(helix_json_maybe, NoIndent):\r\n helix_json = helix_json_maybe.value # get past NoIndent surrounding helix, if it is there\r\n else:\r\n helix_json = helix_json_maybe\r\n # XXX: no need to check here because key was already deleted by Helix.to_json_serializable\r\n # max_offset still needs to be checked here since it requires global knowledge of Strands\r\n # if 0 == helix_json[min_offset_key]:\r\n # del helix_json[min_offset_key]\r\n max_offset = max((domain.end for strand in self.strands for domain in strand.bound_domains()),\r\n default=-1)\r\n if max_offset == helix_json[max_offset_key] or helix_json[max_offset_key] is None:\r\n del helix_json[max_offset_key]\r\n\r\n return dct\r\n\r\n @property\r\n def scaffold(self) -> Optional[Strand]:\r\n \"\"\"Returns the first scaffold in this :any:`Design`, if there is one, or ``None`` otherwise.\"\"\"\r\n for strand in self.strands:\r\n if strand.is_scaffold:\r\n return strand\r\n return None\r\n\r\n @staticmethod\r\n def _normalize_helices_as_dict(helices: Union[List[Helix], Dict[int, Helix]]) -> Dict[int, Helix]:\r\n def idx_of(helix: Helix, order: int) -> int:\r\n return order if helix.idx is None else helix.idx\r\n\r\n if isinstance(helices, list):\r\n indices = [idx_of(helix, idx) for idx, helix in enumerate(helices)]\r\n if len(set(indices)) < len(indices):\r\n duplicates = [index for index, count in Counter(indices).items() if count > 1]\r\n raise IllegalDesignError(\r\n 'No two helices can share an index, but these indices appear on '\r\n f'multiple helices: {\", \".join(map(str, duplicates))}')\r\n helices = {idx_of(helix, idx): helix for idx, helix in enumerate(helices)}\r\n\r\n for idx, helix in helices.items():\r\n helix.idx = idx\r\n\r\n return helices\r\n\r\n @staticmethod\r\n def assign_modifications_to_strands(strands: List[Strand], strand_jsons: List[dict],\r\n all_mods: Dict[str, Modification]) -> None:\r\n for strand, strand_json in zip(strands, strand_jsons):\r\n if modification_5p_key in strand_json:\r\n mod_name = strand_json[modification_5p_key]\r\n strand.modification_5p = cast(Modification5Prime, all_mods[mod_name])\r\n if modification_3p_key in strand_json:\r\n mod_name = strand_json[modification_3p_key]\r\n strand.modification_3p = cast(Modification3Prime, all_mods[mod_name])\r\n if modifications_int_key in strand_json:\r\n mod_names_by_offset = strand_json[modifications_int_key]\r\n for offset_str, mod_name in mod_names_by_offset.items():\r\n offset = int(offset_str)\r\n strand.modifications_int[offset] = cast(ModificationInternal, all_mods[mod_name])\r\n\r\n @staticmethod\r\n def _cadnano_v2_import_find_5_end(vstrands: VStrands, strand_type: str, helix_num: int, base_id: int,\r\n id_from: int,\r\n base_from: int) -> Tuple[int, int, bool]:\r\n \"\"\" Routine which finds the 5' end of a strand in a cadnano v2 import. It returns the\r\n helix and the base of the 5' end.\r\n \"\"\"\r\n id_from_before = helix_num # 'id' stands for helix id\r\n base_from_before = base_id\r\n\r\n circular_seen = {}\r\n is_circular = False\r\n\r\n while not (id_from == -1 and base_from == -1):\r\n if (id_from, base_from) in circular_seen:\r\n is_circular = True\r\n break\r\n circular_seen[(id_from, base_from)] = True\r\n id_from_before = id_from\r\n base_from_before = base_from\r\n id_from, base_from, _, _ = vstrands[id_from][strand_type][base_from]\r\n return id_from_before, base_from_before, is_circular\r\n\r\n @staticmethod\r\n def _cadnano_v2_import_find_strand_color(vstrands: VStrands, strand_type: str, strand_5_end_base: int,\r\n strand_5_end_helix: int) -> Color:\r\n \"\"\"Routine that finds the color of a cadnano v2 strand.\"\"\"\r\n color: Color = default_cadnano_strand_color\r\n\r\n if strand_type == 'scaf':\r\n return default_scaffold_color\r\n\r\n if strand_type == 'stap':\r\n base_id: int\r\n stap_color: int\r\n\r\n for base_id, stap_color in vstrands[strand_5_end_helix]['stap_colors']:\r\n if base_id == strand_5_end_base:\r\n color = Color.from_cadnano_v2_int_hex(stap_color)\r\n break\r\n return color\r\n\r\n @staticmethod\r\n def _cadnano_v2_import_extract_deletions(skip_table: Dict[int, Any], start: int, end: int) -> List[int]:\r\n \"\"\" Routines which converts cadnano skips to scadnano deletions \"\"\"\r\n to_return: List[int] = []\r\n for base_id in range(start, end):\r\n if skip_table[base_id] == -1:\r\n to_return.append(base_id)\r\n return to_return\r\n\r\n @staticmethod\r\n def _cadnano_v2_import_extract_insertions(loop_table: Dict[int, Any],\r\n start: int, end: int) -> List[Tuple[int, int]]:\r\n \"\"\" Routines which converts cadnano skips to scadnano insertions \"\"\"\r\n to_return: List[Tuple[int, int]] = []\r\n for base_id in range(start, end):\r\n if loop_table[base_id] != 0:\r\n to_return.append((base_id, loop_table[base_id]))\r\n return to_return\r\n\r\n @staticmethod\r\n def _cadnano_v2_import_explore_domains(vstrands: VStrands, seen: Dict[Tuple[int, int], bool],\r\n strand_type: str,\r\n strand_5_end_base: int,\r\n strand_5_end_helix: int) -> List[Domain]:\r\n \"\"\"Finds all domains of a cadnano v2 strand. \"\"\"\r\n curr_helix = strand_5_end_helix\r\n curr_base = strand_5_end_base\r\n domains: List[Domain] = []\r\n\r\n direction_forward = (strand_type == 'scaf' and curr_helix % 2 == 0) or (\r\n (strand_type == 'stap' and curr_helix % 2 == 1))\r\n start, end = -1, -1\r\n if direction_forward:\r\n start = curr_base\r\n else:\r\n end = curr_base\r\n\r\n circular_seen = {}\r\n while not (curr_helix == -1 and curr_base == -1):\r\n if (curr_helix, curr_base) in circular_seen:\r\n break\r\n circular_seen[(curr_helix, curr_base)] = True\r\n\r\n old_helix = curr_helix\r\n old_base = curr_base\r\n seen[(curr_helix, curr_base)] = True\r\n curr_helix, curr_base = vstrands[curr_helix][strand_type][curr_base][2:]\r\n # Add crossover\r\n # We have a crossover when we jump helix or when order is broken on same helix\r\n # Or circular strand\r\n if curr_helix != old_helix or (not direction_forward and curr_base > old_base) or (\r\n direction_forward and curr_base < old_base) or (\r\n curr_helix == strand_5_end_helix and curr_base == strand_5_end_base):\r\n\r\n if direction_forward:\r\n end = old_base\r\n else:\r\n start = old_base\r\n\r\n domains.append(\r\n Domain(old_helix, direction_forward, min(start, end), max(start, end) + 1,\r\n deletions=Design._cadnano_v2_import_extract_deletions(\r\n vstrands[old_helix]['skip'], start, end),\r\n insertions=Design._cadnano_v2_import_extract_insertions(\r\n vstrands[old_helix]['loop'], start, end)))\r\n\r\n direction_forward = (strand_type == 'scaf' and curr_helix % 2 == 0) or (\r\n (strand_type == 'stap' and curr_helix % 2 == 1))\r\n start, end = -1, -1\r\n if direction_forward:\r\n start = curr_base\r\n else:\r\n end = curr_base\r\n\r\n return domains\r\n\r\n @staticmethod\r\n def _cadnano_v2_import_circular_strands_merge_first_last_domains(domains: List[Domain]) -> None:\r\n \"\"\" When we create domains for circular strands in the cadnano import routine, we may end up\r\n with a fake crossover if first and last domain are on same helix, we have to merge them \r\n if it is the case.\r\n \"\"\"\r\n if domains[0].helix != domains[-1].helix:\r\n return\r\n\r\n domains[0].start = min(domains[0].start, domains[-1].start)\r\n domains[0].end = max(domains[0].end, domains[-1].end)\r\n\r\n del domains[-1]\r\n\r\n @staticmethod\r\n def _cadnano_v2_import_explore_strand(vstrands: VStrands,\r\n strand_type: str, seen: Dict[Tuple[int, int], bool],\r\n helix_num: int,\r\n base_id: int) -> Optional[Strand]:\r\n \"\"\" Routine that will follow a cadnano v2 strand accross helices and create\r\n cadnano domains and strand accordingly.\r\n \"\"\"\r\n\r\n seen[(helix_num, base_id)] = True\r\n id_from, base_from, id_to, base_to = vstrands[helix_num][strand_type][base_id]\r\n\r\n if (id_from, base_from, id_to, base_to) == (-1, -1, -1, -1):\r\n return None\r\n\r\n strand_5_end_helix, strand_5_end_base, is_circular = Design._cadnano_v2_import_find_5_end(vstrands,\r\n strand_type,\r\n helix_num,\r\n base_id,\r\n id_from,\r\n base_from)\r\n\r\n strand_color = Design._cadnano_v2_import_find_strand_color(vstrands, strand_type,\r\n strand_5_end_base,\r\n strand_5_end_helix)\r\n domains: List[Domain] = Design._cadnano_v2_import_explore_domains(vstrands, seen, strand_type,\r\n strand_5_end_base,\r\n strand_5_end_helix)\r\n # merge first and last domain if circular\r\n if is_circular:\r\n Design._cadnano_v2_import_circular_strands_merge_first_last_domains(domains)\r\n domains_loopouts = cast(List[Union[Domain, Loopout]], # noqa\r\n domains) # type: ignore\r\n strand: Strand = Strand(domains=domains_loopouts,\r\n is_scaffold=(strand_type == 'scaf'), color=strand_color, circular=is_circular)\r\n\r\n return strand\r\n\r\n # remove quotes when Py3.6 support dropped\r\n @staticmethod\r\n def from_cadnano_v2(directory: Optional[str] = None, filename: Optional[str] = None,\r\n json_dict: Optional[dict] = None) -> 'Design':\r\n \"\"\"\r\n Creates a Design from a cadnano v2 file.\r\n \"\"\"\r\n\r\n if json_dict is None and filename is not None and directory is not None:\r\n file_path = os.path.join(directory, filename)\r\n f = open(file_path, 'r')\r\n cadnano_v2_design = json.load(f)\r\n f.close()\r\n elif json_dict is not None:\r\n cadnano_v2_design = json_dict\r\n else:\r\n raise ValueError('must have json_dict None and filename/directory not None, or vice versa')\r\n\r\n num_bases = len(cadnano_v2_design['vstrands'][0]['scaf'])\r\n grid_type = Grid.square\r\n if num_bases % 21 == 0:\r\n grid_type = Grid.honeycomb\r\n\r\n min_row, min_col = None, None\r\n for cadnano_helix in cadnano_v2_design['vstrands']:\r\n col, row = cadnano_helix['col'], cadnano_helix['row']\r\n min_row = row if min_row is None else min_row\r\n min_col = col if min_col is None else min_col\r\n min_row = row if row < min_row else min_row\r\n min_col = col if col < min_col else min_col\r\n\r\n helices = OrderedDict({})\r\n for cadnano_helix in cadnano_v2_design['vstrands']:\r\n col, row = cadnano_helix['col'], cadnano_helix['row']\r\n num = cadnano_helix['num']\r\n helix = Helix(idx=num, max_offset=num_bases, grid_position=(col, row))\r\n helices[num] = helix\r\n\r\n # We do a DFS on strands\r\n seen: Dict[str, dict] = {'scaf': {}, 'stap': {}}\r\n strands: List[Strand] = []\r\n cadnano_helices = OrderedDict({})\r\n for cadnano_helix in cadnano_v2_design['vstrands']:\r\n helix_num = cadnano_helix['num']\r\n cadnano_helices[helix_num] = cadnano_helix\r\n\r\n for cadnano_helix in cadnano_v2_design['vstrands']:\r\n helix_num = cadnano_helix['num']\r\n for strand_type in ['scaf', 'stap']:\r\n for base_id in range(num_bases):\r\n if (helix_num, base_id) in seen[strand_type]:\r\n continue\r\n\r\n strand = Design._cadnano_v2_import_explore_strand(cadnano_helices,\r\n strand_type,\r\n seen[strand_type], helix_num,\r\n base_id)\r\n if strand is not None:\r\n strands.append(strand)\r\n\r\n design: Design = Design(grid=grid_type, helices=helices, strands=strands)\r\n # DD: Tristan, I commented this out because I think it's unnecessary given the way the Design\r\n # constructor works, and because I'm now implementing this feature:\r\n # https://github.com/UC-Davis-molecular-computing/scadnano-python-package/issues/121\r\n # which means we may not have a well-defined helices_view_order on the whole design if groups\r\n # are used\r\n # design.set_helices_view_order([num for num in helices])\r\n\r\n return design\r\n\r\n def _all_modifications(self) -> Set[Modification]:\r\n # List of all modifications.\r\n mods_5p = {strand.modification_5p for strand in self.strands if\r\n strand.modification_5p is not None}\r\n mods_3p = {strand.modification_3p for strand in self.strands if\r\n strand.modification_3p is not None}\r\n mods_int = {mod for strand in self.strands for mod in strand.modifications_int.values()}\r\n return mods_5p | mods_3p | mods_int\r\n\r\n def strand(self, helix: int, offset: int) -> StrandBuilder:\r\n \"\"\"Used for chained method building by calling\r\n :py:meth:`Design.strand` to build the :any:`Strand` domain by domain, in order from 5' to 3'.\r\n For example\r\n\r\n .. code-block:: Python\r\n\r\n design.strand(0, 7).to(10).cross(1).to(5).cross(2).to(15)\r\n\r\n This creates a :any:`Strand` in this :any:`Design` equivalent to\r\n\r\n .. code-block:: Python\r\n\r\n design.add_strand(Strand([\r\n sc.Domain(0, True, 7, 10),\r\n sc.Domain(1, False, 5, 10),\r\n sc.Domain(2, True, 5, 15),\r\n ]))\r\n\r\n Loopouts can also be included:\r\n\r\n .. code-block:: Python\r\n\r\n design.strand(0, 7).to(10).cross(1).to(5).loopout(2, 3).to(15)\r\n\r\n This creates a :any:`Strand` in this :any:`Design` equivalent to\r\n\r\n .. code-block:: Python\r\n\r\n design.add_strand(Strand([\r\n sc.Domain(0, True, 7, 10),\r\n sc.Domain(1, False, 5, 10),\r\n sc.Loopout(3),\r\n sc.Domain(2, True, 5, 15),\r\n ]))\r\n\r\n Each call to\r\n :py:meth:`Design.strand`,\r\n :py:meth:`StrandBuilder.cross`,\r\n :py:meth:`StrandBuilder.loopout`,\r\n :py:meth:`StrandBuilder.to`\r\n :py:meth:`StrandBuilder.update_to`,\r\n returns a :any:`StrandBuilder` object.\r\n\r\n Each call to\r\n :py:meth:`StrandBuilder.to`,\r\n :py:meth:`StrandBuilder.update_to`,\r\n or\r\n :py:meth:`StrandBuilder.loopout`\r\n modifies the :any:`Design` by replacing the Strand with an updated version.\r\n\r\n See the documentation for :any:`StrandBuilder` for the methods available to call in this way.\r\n\r\n :param helix: starting :any:`Helix`\r\n :param offset: starting offset on `helix`\r\n :return: :any:`StrandBuilder` object representing the partially completed :any:`Strand`\r\n \"\"\"\r\n return StrandBuilder(self, helix, offset)\r\n\r\n def assign_m13_to_scaffold(self, rotation: int = 5587, variant: M13Variant = M13Variant.p7249) -> None:\r\n \"\"\"Assigns the scaffold to be the sequence of M13: :py:func:`m13` with the given `rotation`\r\n and :any:`M13Variant`.\r\n\r\n Raises :any:`IllegalDesignError` if the number of scaffolds is not exactly 1.\r\n \"\"\"\r\n scaffold = None\r\n num_scafs = 0\r\n for strand in self.strands:\r\n if strand.is_scaffold:\r\n num_scafs += 1\r\n if scaffold is None:\r\n scaffold = strand\r\n if num_scafs == 0:\r\n raise IllegalDesignError(\r\n 'Tried to assign DNA to scaffold, but there is no scaffold strand.\\n'\r\n 'You must set strand.is_scaffold to True for exactly one strand.')\r\n elif num_scafs > 1:\r\n raise IllegalDesignError(\r\n 'Tried to assign DNA to scaffold, but there are multiple scaffold strands.\\n'\r\n 'You must set strand.is_scaffold to True for exactly one strand.')\r\n if scaffold is None:\r\n raise AssertionError('we counted; there is exactly one scaffold')\r\n self.assign_dna(scaffold, m13(rotation, variant))\r\n\r\n @staticmethod\r\n def _get_multiple_of_x_sup_closest_to_y(x: int, y: int) -> int:\r\n return y if y % x == 0 else y + (x - y % x)\r\n\r\n @staticmethod\r\n def _cadnano_v2_place_strand_segment(helix_dct: Dict[str, Any], domain: Domain,\r\n strand_type: str = 'scaf') -> None:\r\n \"\"\"Converts a strand region with no crossover to cadnano v2.\r\n \"\"\"\r\n # Insertions and deletions\r\n for deletion in domain.deletions:\r\n helix_dct['skip'][deletion] = -1\r\n for loop_where, loop_nb in domain.insertions:\r\n helix_dct['loop'][loop_where] = loop_nb\r\n\r\n start, end, forward = domain.start, domain.end, domain.forward\r\n strand_helix = helix_dct['num']\r\n\r\n for i_base in range(start, end):\r\n if forward:\r\n from_helix, from_base = strand_helix, i_base - 1\r\n to_helix, to_base = strand_helix, i_base + 1\r\n else:\r\n from_helix, from_base = strand_helix, i_base + 1\r\n to_helix, to_base = strand_helix, i_base - 1\r\n\r\n if i_base == start:\r\n if forward:\r\n helix_dct[strand_type][i_base][2:] = [to_helix, to_base]\r\n else:\r\n helix_dct[strand_type][i_base][:2] = [from_helix, from_base]\r\n elif i_base < end - 1:\r\n helix_dct[strand_type][i_base] = [from_helix, from_base, to_helix, to_base]\r\n else:\r\n if forward:\r\n helix_dct[strand_type][i_base][:2] = [from_helix, from_base]\r\n else:\r\n helix_dct[strand_type][i_base][2:] = [to_helix, to_base]\r\n return\r\n\r\n @staticmethod\r\n def _cadnano_v2_place_crossover(helix_from_dct: Dict[str, Any], helix_to_dct: Dict[str, Any],\r\n domain_from: Domain, domain_to: Domain,\r\n strand_type: str = 'scaf') -> None:\r\n \"\"\"Converts a crossover to cadnano v2 format.\r\n Returns a conversion table from ids in the structure self.helices to helices ids\r\n as given by helix.idx.\r\n \"\"\"\r\n\r\n helix_from = helix_from_dct['num']\r\n start_from, end_from, forward_from = domain_from.start, domain_from.end, domain_from.forward\r\n\r\n helix_to = helix_to_dct['num']\r\n start_to, end_to = domain_to.start, domain_to.end\r\n\r\n if forward_from:\r\n helix_from_dct[strand_type][end_from - 1][2:] = [helix_to, end_to - 1]\r\n helix_to_dct[strand_type][end_to - 1][:2] = [helix_from, end_from - 1]\r\n else:\r\n helix_from_dct[strand_type][start_from][2:] = [helix_to, start_to]\r\n helix_to_dct[strand_type][start_to][:2] = [helix_from, start_from]\r\n\r\n @staticmethod\r\n def _cadnano_v2_color_of_stap(color: Color, domain: Domain) -> List[int]:\r\n base_id = domain.start if domain.forward else domain.end - 1\r\n cadnano_color = color.to_cadnano_v2_int_hex()\r\n return [base_id, cadnano_color]\r\n\r\n def _cadnano_v2_place_strand(self, strand: Strand, dct: dict,\r\n helices_ids_reverse: Dict[int, int]) -> None:\r\n \"\"\"Place a scadnano strand in cadnano v2.\r\n \"\"\"\r\n strand_type = 'stap'\r\n if hasattr(strand, is_scaffold_key) and strand.is_scaffold:\r\n strand_type = 'scaf'\r\n\r\n for i, domain in enumerate(strand.domains):\r\n if isinstance(domain, Loopout):\r\n raise ValueError(f'cannot convert Strand {strand} to cadnanov2 format, since it has Loopouts')\r\n\r\n which_helix_id = helices_ids_reverse[domain.helix]\r\n which_helix = dct['vstrands'][which_helix_id]\r\n\r\n if strand_type == 'stap':\r\n color = strand.color if strand.color is not None else Color(0, 0, 0)\r\n which_helix['stap_colors'].append(self._cadnano_v2_color_of_stap(color, domain))\r\n\r\n self._cadnano_v2_place_strand_segment(which_helix, domain, strand_type)\r\n\r\n if i != len(strand.domains) - 1:\r\n next_domain = strand.domains[i + 1]\r\n if isinstance(next_domain, Loopout):\r\n raise ValueError(\r\n f'cannot convert Strand {strand} to cadnanov2 format, since it has Loopouts')\r\n next_helix_id = helices_ids_reverse[next_domain.helix]\r\n next_helix = dct['vstrands'][next_helix_id]\r\n self._cadnano_v2_place_crossover(which_helix, next_helix,\r\n domain, next_domain, strand_type)\r\n\r\n # if the strand is circular, we need to close the loop\r\n if strand.circular:\r\n first_domain = strand.first_bound_domain()\r\n first_helix = dct['vstrands'][first_domain.helix]\r\n first_start, first_end, first_forward = first_domain.start, first_domain.end, first_domain.forward\r\n\r\n last_domain = strand.last_bound_domain()\r\n last_helix = dct['vstrands'][last_domain.helix]\r\n last_start, last_end, last_forward = last_domain.start, last_domain.end, last_domain.forward\r\n\r\n the_base_from = last_end - 1\r\n the_base_to = first_start\r\n\r\n if not last_forward:\r\n the_base_from = last_start\r\n\r\n if not first_forward:\r\n the_base_to = first_end - 1\r\n\r\n if first_helix[strand_type][the_base_to][:2] == [-1, -1]:\r\n first_helix[strand_type][the_base_to][:2] = [last_helix['num'], the_base_from]\r\n else:\r\n first_helix[strand_type][the_base_to][2:] = [last_helix['num'], the_base_from]\r\n\r\n if last_helix[strand_type][the_base_from][:2] == [-1, -1]:\r\n last_helix[strand_type][the_base_from][:2] = [first_helix['num'], the_base_to]\r\n else:\r\n last_helix[strand_type][the_base_from][2:] = [first_helix['num'], the_base_to]\r\n\r\n def _cadnano_v2_fill_blank(self, dct: dict, num_bases: int, design_grid: Grid) -> Dict[int, int]:\r\n \"\"\"Creates blank cadnanov2 helices in and initialized all their fields.\r\n \"\"\"\r\n helices_ids_reverse = {}\r\n for i, helix in self.helices.items():\r\n helix_dct: Dict[str, Any] = OrderedDict()\r\n helix_dct['num'] = helix.idx\r\n\r\n if design_grid == Grid.square or design_grid == Grid.honeycomb:\r\n assert helix.grid_position is not None\r\n helix_dct['row'] = helix.grid_position[1]\r\n helix_dct['col'] = helix.grid_position[0]\r\n\r\n helix_dct['scaf'] = []\r\n helix_dct['stap'] = []\r\n helix_dct['loop'] = []\r\n helix_dct['skip'] = []\r\n\r\n for _ in range(num_bases):\r\n helix_dct['scaf'].append([-1, -1, -1, -1])\r\n helix_dct['stap'].append([-1, -1, -1, -1])\r\n helix_dct['loop'].append(0)\r\n helix_dct['skip'].append(0)\r\n\r\n helix_dct['stap_colors'] = []\r\n helix_dct['scafLoop'] = []\r\n helix_dct['stap_loop'] = []\r\n\r\n helices_ids_reverse[helix_dct['num']] = i\r\n dct['vstrands'].append(helix_dct)\r\n return helices_ids_reverse\r\n\r\n def to_cadnano_v2(self) -> Dict[str, Any]:\r\n \"\"\"Converts the design to the cadnano v2 format.\r\n Please see the spec `misc/cadnano-format-specs/v2.txt` for more info on that format.\r\n \"\"\"\r\n dct: Dict[str, Any] = OrderedDict()\r\n dct['vstrands'] = []\r\n\r\n '''Check if helix group are used or if only one grid is used'''\r\n if self._has_default_groups():\r\n design_grid = self.grid\r\n else:\r\n grid_used = {}\r\n assert len(self.groups) > 0\r\n grid_type = Grid.none\r\n for group_name in self.groups:\r\n grid_used[self.groups[group_name].grid] = True\r\n grid_type = self.groups[group_name].grid\r\n if len(grid_used) > 1:\r\n raise ValueError('Designs using helix groups can be exported to cadnano v2 \\\r\n only if all groups share the same grid type.')\r\n else:\r\n design_grid = grid_type\r\n\r\n '''Figuring out the type of grid.\r\n In cadnano v2, all helices have the same max offset \r\n called `num_bases` and the type of grid is determined as follows:\r\n if num_bases % 32 == 0: then we are on grid square\r\n if num_bases % 21 == 0: then we are on grid honey\r\n '''\r\n num_bases = 0\r\n for helix in self.helices.values():\r\n if helix.max_offset is None:\r\n raise ValueError('must have helix.max_offset set')\r\n num_bases = max(num_bases, helix.max_offset)\r\n\r\n if design_grid == Grid.square:\r\n num_bases = self._get_multiple_of_x_sup_closest_to_y(32, num_bases)\r\n elif design_grid == Grid.honeycomb:\r\n num_bases = self._get_multiple_of_x_sup_closest_to_y(21, num_bases)\r\n else:\r\n raise NotImplementedError('We can export to cadnano v2 `square` and `honeycomb` grids only.')\r\n\r\n '''Figuring out if helices numbers have good parity.\r\n In cadnano v2, only even helices have the scaffold go forward, only odd helices\r\n have the scaffold go backward.\r\n\r\n '''\r\n for strand in self.strands:\r\n for domain in strand.domains:\r\n if isinstance(domain, Loopout):\r\n raise ValueError(\r\n 'We cannot handle designs with Loopouts as it is not a cadnano v2 concept')\r\n right_direction: bool\r\n if hasattr(strand, is_scaffold_key) and strand.is_scaffold:\r\n right_direction = (domain.helix % 2 == int(not domain.forward))\r\n else:\r\n right_direction = not (domain.helix % 2 == int(not domain.forward))\r\n\r\n if not right_direction:\r\n raise ValueError('We can only convert designs where even helices have the scaffold'\r\n 'going forward and odd helices have the scaffold going backward see '\r\n f'the spec v2.txt Note 4. {domain}')\r\n\r\n '''Filling the helices with blank.\r\n '''\r\n helices_ids_reverse = self._cadnano_v2_fill_blank(dct, num_bases, design_grid)\r\n '''Putting the scaffold in place.\r\n '''\r\n\r\n for strand in self.strands:\r\n self._cadnano_v2_place_strand(strand, dct, helices_ids_reverse)\r\n\r\n return dct\r\n\r\n def set_helices_view_order(self, helices_view_order: List[int]) -> None:\r\n \"\"\"\r\n Sets helices_view_order.\r\n\r\n :param helices_view_order: new view order of helices\r\n \"\"\"\r\n if not self._has_default_groups():\r\n raise ValueError('cannot call set_helices_view_order on a Design that uses HelixGroups')\r\n group = self._default_group()\r\n group.helices_view_order = helices_view_order\r\n _check_helices_view_order_is_bijection(helices_view_order,\r\n self.helices_idxs_in_group(default_group_name))\r\n\r\n def _default_group(self) -> HelixGroup:\r\n if not self._has_default_groups():\r\n raise ValueError('cannot call _default_group on a Design that uses HelixGroups')\r\n groups_list = list(self.groups.values())\r\n return groups_list[0]\r\n\r\n def _set_helices_grid_positions_or_positions(self) -> None:\r\n for name, group in self.groups.items():\r\n for idx in self.helices_idxs_in_group(name):\r\n helix = self.helices[idx]\r\n if group.grid != Grid.none and helix.grid_position is None:\r\n helix.grid_position = (0, group.helices_view_order_inverse(idx))\r\n elif group.grid == Grid.none and helix.position is None:\r\n y_delta = self.geometry.distance_between_helices()\r\n y = y_delta * group.helices_view_order_inverse(idx)\r\n helix.position = Position3D(x=0, y=y, z=0)\r\n\r\n def _set_helices_min_max_offsets(self, update: bool) -> None:\r\n \"\"\"update = whether to overwrite existing Helix.max_offset and Helix.min_offset.\r\n Don't do this when Design is first created, but do it later when updating.\"\"\"\r\n for helix in self.helices.values():\r\n\r\n if update or helix.max_offset is None:\r\n max_offset = None if len(helix.domains) == 0 else helix.domains[0].end\r\n for domain in helix.domains:\r\n max_offset = max(max_offset, domain.end)\r\n helix.max_offset = max_offset\r\n\r\n if update or helix.min_offset is None:\r\n min_offset = None if len(helix.domains) == 0 else helix.domains[0].start\r\n for domain in helix.domains:\r\n min_offset = min(min_offset, domain.start)\r\n if min_offset is None or min_offset > 0:\r\n min_offset = 0\r\n helix.min_offset = min_offset\r\n\r\n def strands_starting_on_helix(self, helix: int) -> List[Strand]:\r\n \"\"\"Return list of :any:`Strand`'s that begin (have their 5' end)\r\n on the :any:`Helix` with index `helix`.\"\"\"\r\n return [strand for strand in self.strands\r\n if isinstance(strand.domains[0], Domain) and strand.domains[0].helix == helix]\r\n\r\n def strands_ending_on_helix(self, helix: int) -> List[Strand]:\r\n \"\"\"Return list of :any:`Strand`'s that finish (have their 3' end)\r\n on the :any:`Helix` with index `helix`.\"\"\"\r\n return [strand for strand in self.strands\r\n if isinstance(strand.domains[-1], Domain) and strand.domains[-1].helix == helix]\r\n\r\n def _check_legal_design(self) -> None:\r\n self._check_helix_offsets()\r\n self._check_strands_reference_helices_legally()\r\n self._check_loopouts_not_consecutive_or_singletons_or_zero_length()\r\n self._check_loopouts_not_first_or_last_substrand()\r\n self._check_strands_overlap_legally()\r\n self._warn_if_strand_names_not_unique()\r\n\r\n # TODO: come up with reasonable default behavior when no strands are on helix and max_offset not given\r\n def _check_helix_offsets(self) -> None:\r\n for helix in self.helices.values():\r\n if helix.min_offset is not None \\\r\n and helix.max_offset is not None \\\r\n and helix.min_offset >= helix.max_offset:\r\n err_msg = f'for helix {helix.idx}, ' \\\r\n f'helix.min_offset = {helix.min_offset} must be strictly less than ' \\\r\n f'helix.max_offset = {helix.max_offset}'\r\n raise IllegalDesignError(err_msg)\r\n\r\n def _check_strands_overlap_legally(self, domain_to_check: Optional[Domain] = None) -> None:\r\n \"\"\"If `Domain_to_check` is None, check all.\r\n Otherwise only check pairs where one is domain_to_check.\"\"\"\r\n\r\n def err_msg(d1: Domain, d2: Domain, h_idx: int) -> str:\r\n return f\"two domains overlap on helix {h_idx}: \" \\\r\n f\"\\n{d1}\\n and\\n{d2}\\n but have the same direction\"\r\n\r\n # ensure that if two strands overlap on the same helix,\r\n # they point in opposite directions\r\n for helix_idx, domains in enumerate(helix.domains for helix in self.helices.values()):\r\n if domain_to_check is not None and domain_to_check.helix != helix_idx:\r\n # TODO: if necessary, we can be more efficient by only checking this one domain\r\n continue\r\n\r\n if len(domains) == 0:\r\n continue\r\n\r\n # check all consecutive domains on the same helix, sorted by start/end indices\r\n offsets_data = []\r\n for domain in domains:\r\n offsets_data.append((domain.start, True, domain))\r\n offsets_data.append((domain.end, False, domain))\r\n offsets_data.sort(key=lambda offset_data: offset_data[0])\r\n\r\n current_domains: List[Domain] = []\r\n for offset, is_start, domain in offsets_data:\r\n if is_start:\r\n if len(current_domains) >= 2:\r\n if offset >= current_domains[1].end:\r\n del current_domains[1]\r\n if len(current_domains) >= 1:\r\n if offset >= current_domains[0].end:\r\n del current_domains[0]\r\n current_domains.append(domain)\r\n if len(current_domains) > 2:\r\n domain0, domain1, domain2 = current_domains[0:3]\r\n for d_first, d_second in [(domain0, domain1), (domain1, domain2), (domain0, domain2)]:\r\n if d_first.forward == d_second.forward:\r\n raise IllegalDesignError(err_msg(d_first, d_second, helix_idx))\r\n raise AssertionError(\r\n f\"since current_domains = {current_domains} has at least three domains, \"\r\n f\"I expected to find a pair of illegally overlapping domains\")\r\n elif len(current_domains) == 2:\r\n d_first, d_second = current_domains\r\n if d_first.forward == d_second.forward:\r\n raise IllegalDesignError(err_msg(d_first, d_second, helix_idx))\r\n\r\n def _check_loopouts_not_consecutive_or_singletons_or_zero_length(self) -> None:\r\n for strand in self.strands:\r\n Design._check_loopout_not_singleton(strand)\r\n Design._check_two_consecutive_loopouts(strand)\r\n Design._check_loopouts_length(strand)\r\n\r\n def _check_loopouts_not_first_or_last_substrand(self) -> None:\r\n for strand in self.strands:\r\n if isinstance(strand.first_domain(), Loopout):\r\n raise StrandError(strand, 'strand cannot have a Loopout as its first domain')\r\n if isinstance(strand.last_domain(), Loopout):\r\n raise StrandError(strand, 'strand cannot have a Loopout as its last domain')\r\n\r\n @staticmethod\r\n def _check_loopout_not_singleton(strand: Strand) -> None:\r\n if len(strand.domains) == 1 and isinstance(strand.first_domain(), Loopout):\r\n raise StrandError(strand, 'strand cannot have a single Loopout as its only domain')\r\n\r\n @staticmethod\r\n def _check_two_consecutive_loopouts(strand: Strand) -> None:\r\n for domain1, domain2 in _pairwise(strand.domains):\r\n if isinstance(domain1, Loopout) and isinstance(domain2, Loopout):\r\n raise StrandError(strand, 'cannot have two consecutive Loopouts in a strand')\r\n\r\n @staticmethod\r\n def _check_loopouts_length(strand: Strand) -> None:\r\n for loopout in strand.domains:\r\n if isinstance(loopout, Loopout) and loopout.length <= 0:\r\n raise StrandError(strand, f'loopout length must be positive but is {loopout.length}')\r\n\r\n def _check_strands_reference_helices_legally(self) -> None:\r\n # ensure each strand refers to an existing helix\r\n for strand in self.strands:\r\n self._check_strand_references_legal_helices(strand)\r\n self._check_strand_has_legal_offsets_in_helices(strand)\r\n\r\n def _check_strand_references_legal_helices(self, strand: Strand) -> None:\r\n for domain in strand.domains:\r\n if isinstance(domain, Domain) and domain.helix not in self.helices:\r\n err_msg = f\"domain {domain} refers to nonexistent Helix index {domain.helix}; \" \\\r\n f\"here is the list of valid helices: {self._helices_to_string()}\"\r\n raise StrandError(strand, err_msg)\r\n\r\n def _check_strand_has_legal_offsets_in_helices(self, strand: Strand) -> None:\r\n for domain in strand.domains:\r\n if isinstance(domain, Domain):\r\n helix = self.helices[domain.helix]\r\n if helix.min_offset is not None and domain.start < helix.min_offset:\r\n err_msg = f\"domain {domain} has start offset {domain.start}, \" \\\r\n f\"beyond the end of \" \\\r\n f\"Helix {domain.helix} that has min_offset = {helix.min_offset}\"\r\n raise StrandError(strand, err_msg)\r\n if helix.max_offset is not None and domain.end > helix.max_offset:\r\n err_msg = f\"domain {domain} has end offset {domain.end}, \" \\\r\n f\"beyond the end of \" \\\r\n f\"Helix {domain.helix} that has max_offset = {helix.max_offset}\"\r\n raise StrandError(strand, err_msg)\r\n\r\n # ensure helix_idx's are never negative twice in a row\r\n for domain1, domain2 in _pairwise(strand.domains):\r\n if isinstance(domain1, Loopout) and isinstance(domain2, Loopout):\r\n err_msg = f\"Loopouts {domain1} and {domain2} are consecutive on strand {strand}. \" \\\r\n f\"At least one of any consecutive pair must be a Domain, not a Loopout.\"\r\n raise StrandError(strand, err_msg)\r\n\r\n def set_helix_idx(self, old_idx: int, new_idx: int) -> None:\r\n if new_idx in self.helices:\r\n raise IllegalDesignError(f'cannot assign idx {new_idx} to helix {old_idx}; '\r\n 'another helix already has that index')\r\n helix: Helix = self.helices[old_idx]\r\n del self.helices[old_idx]\r\n self.helices[new_idx] = helix\r\n helix.idx = new_idx\r\n for domain in helix.domains:\r\n domain.helix = new_idx\r\n\r\n def domain_at(self, helix: int, offset: int, forward: bool) -> Optional[Domain]:\r\n \"\"\"\r\n Return :any:`Domain` that overlaps `offset` on helix with idx `helix` and has\r\n :py:data:`Domain.forward` = ``True``, or ``None`` if there is no such :any:`Domain`.\r\n\r\n :param helix: TODO\r\n :param offset: TODO\r\n :param forward: TODO\r\n :return: TODO\r\n \"\"\"\r\n for domain in self.domains_at(helix, offset):\r\n if domain.forward == forward:\r\n return domain\r\n return None\r\n\r\n def domains_at(self, helix: int, offset: int) -> List[Domain]:\r\n \"\"\"Return list of :any:`Domain`'s that overlap `offset` on helix with idx `helix`.\r\n\r\n If constructed properly, this list should have 0, 1, or 2 elements.\"\"\"\r\n domains_on_helix = self.helices[helix].domains\r\n # TODO: replace this with a faster algorithm using binary search\r\n domains_on_helix = [domain for domain in domains_on_helix if\r\n domain.contains_offset(offset)]\r\n if len(domains_on_helix) not in [0, 1, 2]:\r\n raise AssertionError(f'There should be at most 2 domains on helix {helix}, '\r\n f'but there are {len(domains_on_helix)}:\\n{domains_on_helix}')\r\n return domains_on_helix\r\n\r\n # TODO: add_strand and insert_domain should check for existing deletions/insertion parallel strands\r\n def add_strand(self, strand: Strand) -> None:\r\n \"\"\"Add `strand` to this design.\"\"\"\r\n self._check_strand_references_legal_helices(strand)\r\n self.strands.append(strand)\r\n for domain in strand.domains:\r\n if isinstance(domain, Domain):\r\n self.helices[domain.helix].domains.append(domain)\r\n self._check_strands_overlap_legally(domain_to_check=domain)\r\n if self.automatically_assign_color:\r\n self._assign_color_to_strand(strand)\r\n\r\n def remove_strand(self, strand: Strand) -> None:\r\n \"\"\"Remove `strand` from this design.\"\"\"\r\n self.strands.remove(strand)\r\n for domain in strand.domains:\r\n if isinstance(domain, Domain):\r\n self.helices[domain.helix].domains.remove(domain)\r\n\r\n def append_domain(self, strand: Strand, domain: Union[Domain, Loopout]) -> None:\r\n \"\"\"\r\n Same as :any:`Design.insert_domain`, but inserts at end.\r\n\r\n :param strand: strand to append `domain` to\r\n :param domain: :any:`Domain` or :any:`Loopout` to append to :any:`Strand`\r\n \"\"\"\r\n self.insert_domain(strand, len(strand.domains), domain)\r\n\r\n def insert_domain(self, strand: Strand, order: int, domain: Union[Domain, Loopout]) -> None:\r\n \"\"\"Insert `Domain` into `strand` at index given by `order`. Uses same indexing as Python lists,\r\n e.g., ``design.insert_domain(strand, domain, 0)``\r\n inserts ``domain`` as the new first :any:`Domain`.\"\"\"\r\n if isinstance(domain, Domain) and domain.helix not in self.helices:\r\n err_msg = f\"domain {domain} refers to nonexistent Helix index {domain.helix}; \" \\\r\n f\"here is the list of valid helices: {self._helices_to_string()}\"\r\n raise StrandError(strand, err_msg)\r\n\r\n assert strand in self.strands\r\n strand.insert_domain(order, domain)\r\n self._check_strand_references_legal_helices(strand)\r\n self._check_loopouts_not_consecutive_or_singletons_or_zero_length()\r\n if isinstance(domain, Domain):\r\n self.helices[domain.helix].domains.append(domain)\r\n self._check_strands_overlap_legally(domain_to_check=domain)\r\n\r\n def remove_domain(self, strand: Strand, domain: Union[Domain, Loopout]) -> None:\r\n \"\"\"Remove `Domain` from `strand`.\"\"\"\r\n assert strand in self.strands\r\n strand.remove_domain(domain)\r\n if isinstance(domain, Domain):\r\n self.helices[domain.helix].domains.remove(domain)\r\n\r\n def _build_domains_on_helix_lists(self) -> None:\r\n for helix in self.helices.values():\r\n helix._domains = []\r\n for strand in self.strands:\r\n for domain in strand.domains:\r\n if isinstance(domain, Domain):\r\n if domain.helix in self.helices:\r\n self.helices[domain.helix].domains.append(domain)\r\n else:\r\n msg = f\"domain's helix is {domain.helix} but no helix has that index; here \" \\\r\n f\"is the list of helix indices: {self._helices_to_string()}\"\r\n raise StrandError(strand=strand, the_cause=msg)\r\n\r\n def _helices_to_string(self) -> str:\r\n return ', '.join(map(str, self.helices.keys()))\r\n\r\n @_docstring_parameter(default_extension=default_scadnano_file_extension)\r\n def to_json(self, suppress_indent: bool = True) -> str:\r\n \"\"\"Return string representing this Design, suitable for reading by scadnano if written to\r\n a JSON file ending in extension .{default_extension}\"\"\"\r\n # if isinstance(self, DNAOrigamiDesign):\r\n # scaf = None\r\n # for strand in self.strands:\r\n # if strand.is_scaffold == True:\r\n # scaf = strand\r\n # break\r\n # if self.scaffold is None:\r\n # msg = 'No scaffold specified for Design. You can delay assigning the scaffold ' \\\r\n # 'until after creating the Design object, but you must assign a scaffold ' \\\r\n # 'using the method Strand.set_scaffold() before calling to_json().'\r\n # if scaf is not None:\r\n # msg += f'There is a strand marked as a scaffold. Try calling set_scaffold with it as ' \\\r\n # f'a parameter:\\n{scaf}'\r\n # raise IllegalDesignError(msg)\r\n return _json_encode(self, suppress_indent)\r\n\r\n # TODO: create version of add_deletion and add_insertion that simply changes the major tick distance\r\n # on the helix at that position, as well as updating the end offset of the domain (and subsequent\r\n # domains on the same helix)\r\n\r\n def add_deletion(self, helix: int, offset: int) -> None:\r\n \"\"\"Adds a deletion to every :class:`scadnano.Strand` at the given helix and base offset.\"\"\"\r\n domains = self.domains_at(helix, offset)\r\n if len(domains) == 0:\r\n raise IllegalDesignError(f\"no domains are at helix {helix} offset {offset}\")\r\n for domain in domains:\r\n if domain.contains_offset(offset):\r\n domain.deletions.append(offset)\r\n\r\n def add_insertion(self, helix: int, offset: int, length: int) -> None:\r\n \"\"\"Adds an insertion with the given length to every :class:`scadnano.Strand`\r\n at the given helix and base offset, with the given length.\"\"\"\r\n domains = self.domains_at(helix, offset)\r\n if len(domains) == 0:\r\n raise IllegalDesignError(f\"no domains are at helix {helix} offset {offset}\")\r\n for domain in domains:\r\n if domain.contains_offset(offset):\r\n domain.insertions.append((offset, length))\r\n\r\n def set_start(self, domain: Domain, start: int) -> None:\r\n \"\"\"Sets ``Domain.start`` to `start`.\"\"\"\r\n assert domain in (domain for strand in self.strands for domain in strand.domains)\r\n domain.set_start(start)\r\n self._check_strands_overlap_legally(domain)\r\n\r\n def set_end(self, domain: Domain, end: int) -> None:\r\n \"\"\"Sets ``Domain.end`` to `end`.\"\"\"\r\n assert domain in (domain for strand in self.strands for domain in strand.domains)\r\n domain.set_end(end)\r\n self._check_strands_overlap_legally(domain)\r\n\r\n def move_strand_offsets(self, delta: int) -> None:\r\n \"\"\"Moves all strands backward (if `delta` < 0) or forward (if `delta` > 0) by `delta`.\"\"\"\r\n for strand in self.strands:\r\n for domain in strand.bound_domains():\r\n domain.start += delta\r\n domain.end += delta\r\n self._check_strands_overlap_legally()\r\n\r\n def move_strands_on_helices(self, delta: int) -> None:\r\n \"\"\"Moves all strands up (if `delta` < 0) or down (if `delta` > 0) by the number of helices given by\r\n `delta`.\"\"\"\r\n for strand in self.strands:\r\n for domain in strand.bound_domains():\r\n domain.helix += delta\r\n self._check_strands_reference_helices_legally()\r\n\r\n def assign_dna(self, strand: Strand, sequence: str, assign_complement: bool = True,\r\n domain: Union[Domain, Loopout] = None, check_length: bool = False) -> None:\r\n \"\"\"\r\n Assigns `sequence` as DNA sequence of `strand`.\r\n\r\n If any :class:`scadnano.Strand` is bound to `strand`,\r\n it is assigned the reverse Watson-Crick complement of the relevant portion,\r\n and any remaining portions of the other strand that have not already been assigned a DNA sequence\r\n are assigned to be the symbol :py:data:`DNA_base_wildcard`.\r\n\r\n Before assigning, `sequence` is first forced to be the same length as `strand` as follows:\r\n If `sequence` is longer, it is truncated.\r\n If `sequence` is shorter, it is padded with :py:data:`DNA_base_wildcard`'s.\r\n This can be disabled by setting `check_length` to True, in which case the method raises an\r\n :any:`IllegalDesignError` if the lengths do not match.\r\n\r\n All whitespace in `sequence` is removed, and lowercase bases\r\n 'a', 'c', 'g', 't' are converted to uppercase.\r\n\r\n :param strand:\r\n :any:`Strand` to assign DNA sequence to\r\n :param sequence:\r\n string of DNA bases to assign\r\n :param assign_complement:\r\n Whether to assign the complement DNA sequence to any :any:`Strand` that\r\n is bound to this one (default True)\r\n :param domain:\r\n :any:`Domain` on `strand` to assign. If ``None``, then the whole :any:`Strand` is\r\n given a DNA sequence. Otherwise, only `domain` is assigned, and the rest of the :any:`Domain`'s\r\n on `strand` are left alone (either keeping their DNA sequence, or being assigned\r\n :py:const:`DNA_base_wildcard` if no DNA sequence was previously assigned.)\r\n If `domain` is specified, then ``len(sequence)`` must be least than or equal to the number\r\n of bases on `domain`. (i.e., ``domain.dna_length()``)\r\n :param check_length:\r\n If True, raises :any:`IllegalDesignError` if length of :any:`Strand` or :any:`Domain` being\r\n assigned to does not match the length of the DNA sequence.\r\n :raises IllegalDesignError:\r\n If `check_length` is True and the length of :any:`Strand` or :any:`Domain` being\r\n assigned to does not match the length of the DNA sequence.\r\n \"\"\"\r\n start = 0\r\n if domain is not None:\r\n pos = strand.domains.index(domain)\r\n start = sum(prev_dom.dna_length() for prev_dom in strand.domains[:pos])\r\n if domain.dna_length() < len(sequence):\r\n raise IllegalDesignError(f'cannot assign sequence {sequence} to strand domain '\r\n f'\\n{domain}\\n'\r\n f'The number of bases on the domain is {domain.dna_length()} '\r\n f'but the length of the sequence is {len(sequence)}. The length of '\r\n f'the sequence must be at most the number of bases on the domain.')\r\n elif domain.dna_length() > len(sequence) and check_length:\r\n raise IllegalDesignError(f'cannot assign sequence {sequence} to strand domain '\r\n f'\\n{domain}\\n'\r\n f'The number of bases on the domain is {domain.dna_length()} '\r\n f'but the length of the sequence is {len(sequence)}. Since the '\r\n f'parameter `check_length` is set to True, these lengths must '\r\n f'be exactly equal.')\r\n elif check_length and len(sequence) != strand.dna_length():\r\n raise IllegalDesignError(f'cannot assign sequence {sequence} to strand '\r\n f'\\n{strand}\\n'\r\n f'The number of bases on the strand is {strand.dna_length()} '\r\n f'but the length of the sequence is {len(sequence)}. Since the '\r\n f'parameter `check_length` is set to True, these lengths must '\r\n f'be exactly equal.')\r\n\r\n padded_sequence = _pad_and_remove_whitespace_and_uppercase(sequence, strand, start)\r\n\r\n if strand is None:\r\n raise IllegalDesignError('strand cannot be None to assign DNA to it')\r\n if strand not in self.strands:\r\n raise StrandError(strand, 'strand is not in the given Design')\r\n\r\n if strand.dna_sequence is None:\r\n merged_sequence = padded_sequence\r\n else:\r\n try:\r\n merged_sequence = _string_merge_wildcard(strand.dna_sequence, padded_sequence,\r\n DNA_base_wildcard)\r\n except ValueError:\r\n first_domain = strand.first_domain()\r\n msg = f'strand starting at helix {first_domain.helix}, offset {first_domain.offset_5p()} has ' \\\r\n f'length ' \\\r\n f'{strand.dna_length()} and already has a DNA sequence assignment of length ' \\\r\n f'{len(strand.dna_sequence)}, which is \\n' \\\r\n f'{strand.dna_sequence}, ' \\\r\n f'but you tried to assign a different sequence of length {len(padded_sequence)} to ' \\\r\n f'it, which is\\n{padded_sequence}.'\r\n raise IllegalDesignError(msg)\r\n\r\n strand.set_dna_sequence(merged_sequence)\r\n\r\n if not assign_complement:\r\n return\r\n\r\n for other_strand in self.strands:\r\n # note that possibly strand==other_strand; it might bind to itself at some point and we want to\r\n # allow a partial assignment to one domain to automatically assign the complement to the\r\n # bound domain.\r\n # However, if there are no wildcards in the assigned sequence we can safely skip strand.\r\n if strand == other_strand \\\r\n and strand.dna_sequence is not None \\\r\n and DNA_base_wildcard not in strand.dna_sequence:\r\n continue\r\n if other_strand.overlaps(strand):\r\n # we do this even if other_strand has a complete DNA sequence,\r\n # because we get complementarity checks this way\r\n other_strand.assign_dna_complement_from(strand)\r\n\r\n def to_idt_bulk_input_format(self,\r\n delimiter: str = ',',\r\n key: Optional[KeyFunction[Strand]] = None,\r\n warn_duplicate_name: bool = False,\r\n only_strands_with_idt: bool = False,\r\n export_scaffold: bool = False,\r\n export_non_modified_strand_version: bool = False) -> str:\r\n \"\"\"Called by :py:meth:`Design.write_idt_bulk_input_file` to determine what string to write to\r\n the file. This function can be used to get the string directly without creating a file.\r\n\r\n Parameters have the same meaning as in :py:meth:`Design.write_idt_bulk_input_file`.\r\n\r\n :return:\r\n string that is written to the file in the method :py:meth:`Design.write_idt_bulk_input_file`.\r\n \"\"\"\r\n strands_to_export = self._idt_strands_to_export(key=key, warn_duplicate_name=warn_duplicate_name,\r\n only_strands_with_idt=only_strands_with_idt,\r\n export_scaffold=export_scaffold,\r\n export_non_modified_strand_version=export_non_modified_strand_version)\r\n print('hello')\r\n idt_lines: List[str] = []\r\n for strand in strands_to_export:\r\n if strand.idt is None and only_strands_with_idt:\r\n raise AssertionError(f'cannot export strand {strand} to IDT because it has no IDT field; '\r\n f'since only_strands_with_idt is True, '\r\n f'this strand should have been filtered out by _idt_strands_to_export')\r\n if strand.idt is not None:\r\n scale = strand.idt.scale\r\n purification = strand.idt.scale\r\n else:\r\n scale = default_idt_scale\r\n purification = default_idt_purification\r\n idt_lines.append(delimiter.join(\r\n [strand.idt_export_name(), strand.idt_dna_sequence(),\r\n scale, purification]\r\n ))\r\n\r\n idt_string = '\\n'.join(idt_lines)\r\n return idt_string\r\n\r\n def _idt_strands_to_export(self, *,\r\n key: Optional[KeyFunction[Strand]] = None, # for sorting strands\r\n warn_duplicate_name: bool,\r\n only_strands_with_idt: bool = False,\r\n export_scaffold: bool = False,\r\n export_non_modified_strand_version: bool = False) -> List[Strand]:\r\n # gets list of strands to export for IDT export functions\r\n added_strands: Dict[str, Strand] = {} # dict: name -> strand\r\n for strand in self.strands:\r\n # skip scaffold unless requested to export\r\n if strand.is_scaffold and not export_scaffold:\r\n continue\r\n\r\n # skip strands with no IDT field unless requested to export\r\n if strand.idt is None and only_strands_with_idt:\r\n continue\r\n\r\n # figure out what name to export\r\n name = strand.idt_export_name()\r\n\r\n if name in added_strands:\r\n existing_strand = added_strands[name]\r\n self._check_strands_with_same_name_agree_on_other_idt_fields(strand, existing_strand, name,\r\n warn_duplicate_name)\r\n\r\n added_strands[name] = strand\r\n if export_non_modified_strand_version:\r\n added_strands[name + '_nomods'] = strand.no_modifications_version()\r\n\r\n strands = list(added_strands.values())\r\n if key is not None:\r\n strands.sort(key=key)\r\n return strands\r\n\r\n @staticmethod\r\n def _check_strands_with_same_name_agree_on_other_idt_fields(strand: Strand, existing_strand: Strand,\r\n name: str, warn_duplicate_name: bool) -> None:\r\n # Handle the case that two strands being exported, strand and existing_strand\r\n # (the latter was encountered first) have the same name\r\n # This is allowed in case one wants to draw multiple copies of the same strand\r\n # in scadnano without having to worry about setting their idt fields differently.\r\n # But then we need to check that they agree on everything being exported.\r\n if existing_strand.name is not None:\r\n assert existing_strand.name == name\r\n domain = strand.first_domain()\r\n existing_domain = existing_strand.first_domain()\r\n if warn_duplicate_name:\r\n print(\r\n f'WARNING: two strands with same IDT name {name}:\\n'\r\n f' strand 1: helix {domain.helix}, 5\\' end at offset {domain.offset_5p()}\\n'\r\n f' strand 2: helix {existing_domain.helix}, 5\\' end at offset '\r\n f'{existing_domain.offset_5p()}\\n')\r\n if strand.dna_sequence != existing_strand.dna_sequence:\r\n raise IllegalDesignError(\r\n f'two strands with same IDT name {name} but different sequences:\\n'\r\n f' strand 1: helix {domain.helix}, 5\\' end at offset {domain.offset_5p()}, '\r\n f'sequence: {strand.dna_sequence}\\n'\r\n f' strand 2: helix {existing_domain.helix}, 5\\' end at offset '\r\n f'{existing_domain.offset_5p()}, '\r\n f'sequence: {existing_strand.dna_sequence}\\n')\r\n elif strand.idt is not None \\\r\n and existing_strand.idt is not None:\r\n if strand.idt.scale != existing_strand.idt.scale:\r\n raise IllegalDesignError(\r\n f'two strands with same name {name} but different IDT scales:\\n'\r\n f' strand 1: helix {domain.helix}, 5\\' end at offset {domain.offset_5p()}, '\r\n f'scale: {strand.idt.scale}\\n'\r\n f' strand 2: helix {existing_domain.helix}, 5\\' end at offset '\r\n f'{existing_domain.offset_5p()}, '\r\n f'scale: {existing_strand.idt.scale}\\n')\r\n elif strand.idt.purification != existing_strand.idt.purification:\r\n raise IllegalDesignError(\r\n f'two strands with same name {name} but different purifications:\\n'\r\n f' strand 1: helix {domain.helix}, 5\\' end at offset {domain.offset_5p()}, '\r\n f'purification: {strand.idt.purification}\\n'\r\n f' strand 2: helix {existing_domain.helix}, 5\\' end at offset '\r\n f'{existing_domain.offset_5p()}, '\r\n f'purification: {existing_strand.idt.purification}\\n')\r\n\r\n def write_idt_bulk_input_file(self, *, directory: str = '.', filename: str = None,\r\n key: Optional[KeyFunction[Strand]] = None,\r\n extension: Optional[str] = None,\r\n delimiter: str = ',',\r\n warn_duplicate_name: bool = True,\r\n only_strands_with_idt: bool = False,\r\n export_scaffold: bool = False,\r\n export_non_modified_strand_version: bool = False) -> None:\r\n \"\"\"Write ``.idt`` text file encoding the strands of this :any:`Design` with the field\r\n :any:`Strand.idt`, suitable for pasting into the \"Bulk Input\" field of IDT\r\n (Integrated DNA Technologies, Coralville, IA, https://www.idtdna.com/),\r\n with the output file having the same name as the running script but with ``.py`` changed to ``.idt``,\r\n unless `filename` is explicitly specified.\r\n For instance, if the script is named ``my_origami.py``,\r\n then the sequences will be written to ``my_origami.idt``.\r\n If `filename` is not specified but `extension` is, then that extension is used instead of ``idt``.\r\n At least one of `filename` or `extension` must be ``None``.\r\n\r\n The string written is that returned by :meth:`Design.to_idt_bulk_input_format`.\r\n\r\n :param directory:\r\n specifies a directory in which to place the file, either absolute or relative to\r\n the current working directory. Default is the current working directory.\r\n :param filename:\r\n optinoal custom filename to use (instead of currently running script)\r\n :param key:\r\n `key function `_ used to determine\r\n order in which to output strand sequences. Some useful defaults are provided by\r\n :py:meth:`strand_order_key_function`\r\n :param extension:\r\n alternate filename extension to use (instead of idt)\r\n :param delimiter:\r\n is the symbol to delimit the four IDT fields name,sequence,scale,purification.\r\n :param warn_duplicate_name:\r\n if ``True`` prints a warning when two different :any:`Strand`'s have the same\r\n :py:attr:`IDTField.name` and the same :any:`Strand.dna_sequence`. An :any:`IllegalDesignError` is\r\n raised (regardless of the value of this parameter)\r\n if two different :any:`Strand`'s have the same name but different sequences, IDT scales, or IDT\r\n purifications.\r\n :param only_strands_with_idt:\r\n If False (the default), all non-scaffold sequences are output, with reasonable default values\r\n chosen if the field :py:data:`Strand.idt` is missing.\r\n (though scaffold is included if `export_scaffold` is True).\r\n If True, then strands lacking the field :any:`Strand.idt` will not be exported.\r\n :param export_scaffold:\r\n If False (the default), scaffold sequences are not exported.\r\n If True, scaffold sequences on strands output according to `only_strands_with_idt`\r\n (i.e., scaffolds will be exported, unless they lack the field :any:`Strand.idt` and\r\n `only_strands_with_idt` is True).\r\n :param export_non_modified_strand_version:\r\n For any :any:`Strand` with a :any:`Modification`, also export a version of the :any:`Strand`\r\n without any modifications. The name for this :any:`Strand` is the original name with\r\n '_nomods' appended to it.\r\n \"\"\"\r\n contents = self.to_idt_bulk_input_format(delimiter=delimiter,\r\n key=key,\r\n warn_duplicate_name=warn_duplicate_name,\r\n only_strands_with_idt=only_strands_with_idt,\r\n export_scaffold=export_scaffold,\r\n export_non_modified_strand_version=export_non_modified_strand_version)\r\n if extension is None:\r\n extension = 'idt'\r\n _write_file_same_name_as_running_python_script(contents, extension, directory, filename)\r\n\r\n def write_idt_plate_excel_file(self, *, directory: str = '.', filename: str = None,\r\n key: Optional[KeyFunction[Strand]] = None,\r\n warn_duplicate_name: bool = False,\r\n only_strands_with_idt: bool = False,\r\n export_scaffold: bool = False,\r\n use_default_plates: bool = True, warn_using_default_plates: bool = True,\r\n plate_type: PlateType = PlateType.wells96,\r\n export_non_modified_strand_version: bool = False) -> None:\r\n \"\"\"\r\n Write ``.xls`` (Microsoft Excel) file encoding the strands of this :any:`Design` with the field\r\n :py:data:`Strand.idt`, suitable for uploading to IDT\r\n (Integrated DNA Technologies, Coralville, IA, https://www.idtdna.com/)\r\n to describe a 96-well or 384-well plate\r\n (https://www.idtdna.com/site/order/plate/index/dna/),\r\n with the output file having the same name as the running script but with ``.py`` changed to ``.xls``,\r\n unless `filename` is explicitly specified.\r\n For instance, if the script is named ``my_origami.py``,\r\n then the sequences will be written to ``my_origami.xls``.\r\n\r\n :param directory:\r\n specifies a directory in which to place the file, either absolute or relative to\r\n the current working directory. Default is the current working directory.\r\n :param filename:\r\n custom filename if default (explained above) is not desired\r\n :param key:\r\n `key function `_ used to determine\r\n order in which to output strand sequences. Some useful defaults are provided by\r\n :py:meth:`strand_order_key_function`\r\n :param warn_duplicate_name:\r\n if ``True`` prints a warning when two different :any:`Strand`'s have the same\r\n :py:attr:`IDTField.name` and the same :any:`Strand.dna_sequence`. An :any:`IllegalDesignError` is\r\n raised (regardless of the value of this parameter)\r\n if two different :any:`Strand`'s have the same name but different sequences, IDT scales, or IDT\r\n purifications.\r\n :param only_strands_with_idt:\r\n If False (the default), all non-scaffold sequences are output, with reasonable default values\r\n chosen if the field :py:data:`Strand.idt` is missing.\r\n (though scaffold is included if `export_scaffold` is True).\r\n If True, then strands lacking the field :any:`Strand.idt` will not be exported.\r\n If False, then `use_default_plates` must be True.\r\n :param export_scaffold:\r\n If False (the default), scaffold sequences are not exported.\r\n If True, scaffold sequences on strands output according to `only_strands_with_idt`\r\n (i.e., scaffolds will be exported, unless they lack the field :any:`Strand.idt` and\r\n `only_strands_with_idt` is True).\r\n :param use_default_plates:\r\n Use default values for plate and well (ignoring those in idt fields, which may be None).\r\n If False, each Strand to export must have the field :py:data:`Strand.idt`, so in particular\r\n the parameter `only_strands_with_idt` must be True.\r\n :param warn_using_default_plates:\r\n specifies whether, if `use_default_plates` is True, to print a warning for strands whose\r\n :py:data:`Strand.idt` has the fields :py:data:`IDTFields.plate` and :py:data:`IDTFields.well`,\r\n since `use_default_plates` directs these fields to be ignored.\r\n :param plate_type:\r\n a :any:`PlateType` specifying whether to use a 96-well plate or a 384-well plate\r\n if the `use_default_plates` parameter is ``True``.\r\n Ignored if `use_default_plates` is ``False``, because in that case the wells are explicitly set\r\n by the user, who is free to use coordinates for either plate type.\r\n :param export_non_modified_strand_version:\r\n For any :any:`Strand` with a :any:`Modification`, also export a version of the :any:`Strand`\r\n without any modifications. The name for this :any:`Strand` is the original name with\r\n '_nomods' appended to it.\r\n \"\"\"\r\n\r\n strands_to_export = self._idt_strands_to_export(key=key, warn_duplicate_name=warn_duplicate_name,\r\n only_strands_with_idt=only_strands_with_idt,\r\n export_scaffold=export_scaffold,\r\n export_non_modified_strand_version=export_non_modified_strand_version)\r\n\r\n if not use_default_plates:\r\n if not only_strands_with_idt:\r\n raise ValueError('parameters use_default_plates and only_strands_with_idt '\r\n 'cannot both be False')\r\n self._write_plates_assuming_explicit_plates_in_each_strand(directory, filename, strands_to_export)\r\n else:\r\n self._write_plates_default(directory=directory, filename=filename,\r\n strands_to_export=strands_to_export,\r\n plate_type=plate_type,\r\n warn_using_default_plates=warn_using_default_plates)\r\n\r\n def _write_plates_assuming_explicit_plates_in_each_strand(self, directory: str, filename: Optional[str],\r\n strands_to_export: List[Strand]) -> None:\r\n plates = list({strand.idt.plate for strand in strands_to_export if strand.idt is not None if\r\n strand.idt.plate is not None})\r\n if len(plates) == 0:\r\n raise ValueError('Cannot write a a plate file since no plate data exists in any Strands '\r\n 'in the design.\\n'\r\n 'Set the option use_default_plates=True in '\r\n \"Design.write_idt_plate_excel_file\\nif you don't want to enter plate \"\r\n 'and well positions for each Strand you wish to write to the Excel file.')\r\n plates.sort()\r\n filename_plate, workbook = self._setup_excel_file(directory, filename)\r\n for plate in plates:\r\n worksheet = self._add_new_excel_plate_sheet(plate, workbook)\r\n\r\n strands_in_plate = [strand for strand in strands_to_export if\r\n strand.idt is not None and strand.idt.plate == plate]\r\n\r\n strands_in_plate.sort(key=lambda s: (int(s.idt.well[1:]), s.idt.well[0])) # type: ignore\r\n\r\n for row, strand in enumerate(strands_in_plate):\r\n if strand.idt is None:\r\n raise ValueError(f'cannot export strand {strand} to IDT because it has no idt field')\r\n worksheet.write(row + 1, 0, strand.idt.well)\r\n worksheet.write(row + 1, 1, strand.idt_export_name())\r\n worksheet.write(row + 1, 2, strand.idt_dna_sequence())\r\n\r\n workbook.save(filename_plate)\r\n\r\n @staticmethod\r\n def _add_new_excel_plate_sheet(plate_name: str, workbook: Any) -> Any:\r\n worksheet = workbook.add_sheet(plate_name)\r\n worksheet.write(0, 0, 'Well Position')\r\n worksheet.write(0, 1, 'Name')\r\n worksheet.write(0, 2, 'Sequence')\r\n return worksheet\r\n\r\n @staticmethod\r\n def _setup_excel_file(directory: str, filename: Optional[str]) -> Tuple[str, Any]:\r\n import xlwt # type: ignore\r\n plate_extension = f'xls'\r\n if filename is None:\r\n filename_plate = _get_filename_same_name_as_running_python_script(\r\n directory, plate_extension, filename)\r\n else:\r\n filename_plate = _create_directory_and_set_filename(directory, filename)\r\n workbook = xlwt.Workbook()\r\n return filename_plate, workbook\r\n\r\n def _write_plates_default(self, directory: str, filename: Optional[str], strands_to_export: List[Strand],\r\n plate_type: PlateType = PlateType.wells96,\r\n warn_using_default_plates: bool = True) -> None:\r\n plate_coord = _PlateCoordinate(plate_type=plate_type)\r\n plate = 1\r\n excel_row = 1\r\n filename_plate, workbook = self._setup_excel_file(directory, filename)\r\n worksheet = self._add_new_excel_plate_sheet(f'plate{plate}', workbook)\r\n\r\n for strand in strands_to_export:\r\n if strand.idt is not None:\r\n if warn_using_default_plates and strand.idt.plate is not None:\r\n print(\r\n f\"WARNING: strand {strand} has plate entry {strand.idt.plate}, \"\r\n f\"which is being ignored since we are using default plate/well addressing\")\r\n if warn_using_default_plates and strand.idt.well is not None:\r\n print(\r\n f\"WARNING: strand {strand} has well entry {strand.idt.well}, \"\r\n f\"which is being ignored since we are using default plate/well addressing\")\r\n\r\n well = plate_coord.well()\r\n worksheet.write(excel_row, 0, well)\r\n worksheet.write(excel_row, 1, strand.idt_export_name())\r\n worksheet.write(excel_row, 2, strand.idt_dna_sequence())\r\n plate_coord.increment()\r\n if plate != plate_coord.plate():\r\n workbook.save(filename_plate)\r\n plate = plate_coord.plate()\r\n worksheet = self._add_new_excel_plate_sheet(f'plate{plate}', workbook)\r\n excel_row = 1\r\n else:\r\n excel_row += 1\r\n\r\n workbook.save(filename_plate)\r\n\r\n @_docstring_parameter(default_extension=default_scadnano_file_extension)\r\n def write_scadnano_file(self, directory: str = '.', filename: str = None, extension: str = None) -> None:\r\n \"\"\"Write ``.{default_extension}`` file representing this :any:`Design`,\r\n suitable for reading by scadnano,\r\n with the output file having the same name as the running script but with ``.py`` changed to\r\n ``.{default_extension}``,\r\n unless `filename` is explicitly specified.\r\n For instance, if the script is named ``my_origami.py``,\r\n then the design will be written to ``my_origami.{default_extension}``.\r\n If `extension` is specified (but `filename` is not), then the design will be written to\r\n ``my_origami.``\r\n\r\n `directory` specifies a directory in which to place the file, either absolute or relative to\r\n the current working directory. Default is the current working directory.\r\n\r\n The string written is that returned by :meth:`Design.to_json`.\r\n\r\n :param directory: directory in which to put file (default: current working directory)\r\n :param filename: filename (default: name of script with ``.py`` replaced by\r\n ``.{default_extension}``).\r\n Mutually exclusive with `extension`\r\n :param extension: extension for filename (default: ``.{default_extension}``)\r\n Mutually exclusive with `filename`\r\n \"\"\"\r\n self._check_legal_design()\r\n contents = self.to_json()\r\n if filename is not None and extension is not None:\r\n raise ValueError('at least one of filename or extension must be None')\r\n if extension is None:\r\n extension = default_scadnano_file_extension\r\n _write_file_same_name_as_running_python_script(contents, extension, directory, filename)\r\n\r\n def export_cadnano_v2(self, directory: str = '.', filename: Optional[str] = None) -> None:\r\n \"\"\"Write ``.json`` file representing this :any:`Design`, suitable for reading by cadnano v2,\r\n with the output file having the same name as the running script but with ``.py`` changed to ``.json``,\r\n unless `filename` is explicitly specified.\r\n For instance, if the script is named ``my_origami.py``,\r\n then the design will be written to ``my_origami.json``.\r\n\r\n `directory` specifies a directory in which to place the file, either absolute or relative to\r\n the current working directory. Default is the current working directory.\r\n\r\n The string written is that returned by :meth:`Design.to_cadnano_v2`.\r\n \"\"\"\r\n content_serializable = OrderedDict({})\r\n content_serializable['name'] = _get_filename_same_name_as_running_python_script(directory, 'json',\r\n filename)\r\n content_serializable_final = self.to_cadnano_v2()\r\n content_serializable.update(content_serializable_final)\r\n\r\n encoder = _SuppressableIndentEncoder\r\n contents = json.dumps(content_serializable, cls=encoder, indent=2)\r\n\r\n _write_file_same_name_as_running_python_script(contents, 'json', directory, filename)\r\n\r\n def add_nick(self, helix: int, offset: int, forward: bool, new_color: bool = True) -> None:\r\n \"\"\"Add nick to :any:`Domain` on :any:`Helix` with index `helix`,\r\n in direction given by `forward`, at offset `offset`. The two :any:`Domain`'s created by this nick\r\n will have 5'/3' ends at offsets `offset` and `offset-1`.\r\n\r\n For example, if there is a :any:`Domain` with\r\n :py:data:`Domain.helix` = ``0``,\r\n :py:data:`Domain.forward` = ``True``,\r\n :py:data:`Domain.start` = ``0``,\r\n :py:data:`Domain.end` = ``10``,\r\n then calling ``add_nick(helix=0, offset=5, forward=True)`` will split it into two :any:`Domain`'s,\r\n with one domains having the fields\r\n :py:data:`Domain.helix` = ``0``,\r\n :py:data:`Domain.forward` = ``True``,\r\n :py:data:`Domain.start` = ``0``,\r\n :py:data:`Domain.end` = ``5``,\r\n (recall that :py:data:`Domain.end` is exclusive, meaning that the largest offset on this\r\n :any:`Domain` is 4 = ``offset-1``)\r\n and the other domain having the fields\r\n :py:data:`Domain.helix` = ``0``,\r\n :py:data:`Domain.forward` = ``True``,\r\n :py:data:`Domain.start` = ``5``,\r\n :py:data:`Domain.end` = ``10``.\r\n\r\n If the :any:`Strand` is circular, then it will be made linear with the 5' and 3' ends at the\r\n nick position, modified in place. Otherwise, this :any:`Strand` will be deleted from the design,\r\n and two new :any:`Strand`'s will be added.\r\n\r\n :param helix: index of helix where nick will occur\r\n :param offset: offset to nick (nick will be between offset and offset-1)\r\n :param forward: forward or reverse :any:`Domain` on `helix` at `offset`?\r\n :param new_color: whether to assign a new color to one of the :any:`Strand`'s resulting from the\r\n nick.\r\n If False, both :any:`Strand`'s created have the same color as the original\r\n If True, one :any:`Strand` keeps the same color as the original and the other\r\n is assigned a new color\r\n \"\"\"\r\n for domain_to_remove in self.domains_at(helix, offset):\r\n if domain_to_remove.forward == forward:\r\n break\r\n else:\r\n raise IllegalDesignError(f'no domain at helix {helix} in direction '\r\n f'{\"forward\" if forward else \"reverse\"} at offset {offset}')\r\n strand = domain_to_remove.strand()\r\n domains = strand.domains\r\n order = domains.index(domain_to_remove)\r\n domains_before = domains[:order]\r\n domains_after = domains[order + 1:]\r\n domain_left: Domain[DomainLabel] = Domain(helix, forward, domain_to_remove.start, offset)\r\n domain_right: Domain[DomainLabel] = Domain(helix, forward, offset, domain_to_remove.end)\r\n\r\n # \"before\" and \"after\" mean in the 5' --> 3' direction, i.e., if a reverse domain:\r\n # <--------]\r\n # nicked like this:\r\n # <---]<---]\r\n # The before domain is on the right and the after domain is on the left.\r\n #\r\n # If nicking a forward domain:\r\n # [-------->\r\n # nicked like this:\r\n # [--->[--->\r\n # The before domain is on the left and the after domain is on the right.\r\n if domain_to_remove.forward:\r\n domain_to_add_before = domain_left\r\n domain_to_add_after = domain_right\r\n else:\r\n domain_to_add_before = domain_right\r\n domain_to_add_after = domain_left\r\n\r\n seq_before_whole: Optional[str]\r\n seq_after_whole: Optional[str]\r\n if strand.dna_sequence is not None:\r\n seq_before: str = ''.join(domain.dna_sequence() for domain in domains_before) # type: ignore\r\n seq_after: str = ''.join(domain.dna_sequence() for domain in domains_after) # type: ignore\r\n seq_on_domain_left: str = domain_to_remove.dna_sequence_in( # type: ignore\r\n domain_to_remove.start,\r\n offset - 1)\r\n seq_on_domain_right: str = domain_to_remove.dna_sequence_in(offset, # type: ignore\r\n domain_to_remove.end - 1)\r\n if domain_to_remove.forward:\r\n seq_on_domain_before = seq_on_domain_left\r\n seq_on_domain_after = seq_on_domain_right\r\n else:\r\n seq_on_domain_before = seq_on_domain_right\r\n seq_on_domain_after = seq_on_domain_left\r\n seq_before_whole = seq_before + seq_on_domain_before\r\n seq_after_whole = seq_on_domain_after + seq_after\r\n else:\r\n seq_before_whole = None\r\n seq_after_whole = None\r\n\r\n domains_before = domains_before + cast(List[Union[Domain, Loopout]], [domain_to_add_before]) # noqa\r\n domains_after = cast(List[Union[Domain, Loopout]], [domain_to_add_after]) + domains_after # noqa\r\n\r\n if strand.circular:\r\n # if strand is circular, we modify its domains in place\r\n domains = domains_after + domains_before\r\n strand.set_domains(domains)\r\n\r\n # DNA sequence was rotated, so re-assign it\r\n if seq_before_whole is not None and seq_after_whole is not None:\r\n seq = seq_before_whole + seq_after_whole\r\n strand.set_dna_sequence(seq)\r\n\r\n strand.set_linear()\r\n\r\n else:\r\n # if strand is not circular, we delete it and create two new strands\r\n self.strands.remove(strand)\r\n\r\n idt_present = strand.idt is not None\r\n strand_before: Strand[StrandLabel, DomainLabel] = Strand(\r\n domains=domains_before,\r\n dna_sequence=seq_before_whole,\r\n color=strand.color,\r\n idt=strand.idt if idt_present else None,\r\n )\r\n\r\n color_after = next(self.color_cycler) if new_color else strand.color\r\n strand_after: Strand[StrandLabel, DomainLabel] = Strand(\r\n domains=domains_after,\r\n dna_sequence=seq_after_whole,\r\n color=color_after,\r\n )\r\n\r\n self.strands.extend([strand_before, strand_after])\r\n\r\n helix_domains = self.helices[helix].domains\r\n idx_domain_to_remove = helix_domains.index(domain_to_remove)\r\n helix_domains[idx_domain_to_remove] = domain_left\r\n helix_domains.insert(idx_domain_to_remove + 1, domain_right)\r\n\r\n def ligate(self, helix: int, offset: int, forward: bool) -> None:\r\n \"\"\"\r\n Reverse operation of :py:meth:`Design.add_nick`.\r\n \"Ligates\" a nick between two adjacent :any:`Domain`'s in the same direction on a :any:`Helix`\r\n with index `helix`,\r\n in direction given by `forward`, at offset `offset`.\r\n\r\n For example, if there are a :any:`Domain`'s with\r\n :py:data:`Domain.helix` = ``0``,\r\n :py:data:`Domain.forward` = ``True``,\r\n :py:data:`Domain.start` = ``0``,\r\n :py:data:`Domain.end` = ``5``,\r\n (recall that :py:data:`Domain.end` is exclusive, meaning that the largest offset on this\r\n :any:`Domain` is 4 = ``offset-1``)\r\n and the other domain having the fields\r\n :py:data:`Domain.helix` = ``0``,\r\n :py:data:`Domain.forward` = ``True``,\r\n :py:data:`Domain.start` = ``5``,\r\n :py:data:`Domain.end` = ``10``.\r\n then calling ``ligate(helix=0, offset=5, forward=True)`` will combine them into one :any:`Domain`,\r\n having the fields\r\n :py:data:`Domain.helix` = ``0``,\r\n :py:data:`Domain.forward` = ``True``,\r\n :py:data:`Domain.start` = ``0``,\r\n :py:data:`Domain.end` = ``10``.\r\n\r\n If the :any:`Domain`'s are on the same :any:`Strand` (i.e., they are the 5' and 3' ends of that\r\n :any:`Strand`, which is necessarily linear), then the :any:`Strand` is made is circular in place,\r\n Otherwise, the two :any:`Strand`'s of each :any:`Domain` will be joined into one,\r\n replacing the previous strand on the 5'-most side of the nick (i.e., the one whose 3' end\r\n terminated at the nick), and deleting the other strand.\r\n\r\n :param helix: index of helix where nick will be ligated\r\n :param offset: offset to ligate (nick to ligate must be between offset and offset-1)\r\n :param forward: forward or reverse :any:`Domain` on `helix` at `offset`?\r\n \"\"\"\r\n for dom_right in self.domains_at(helix, offset):\r\n if dom_right.forward == forward:\r\n break\r\n else:\r\n raise IllegalDesignError(f'no domain at helix {helix} in direction '\r\n f'{\"forward\" if forward else \"reverse\"} at offset {offset}')\r\n for dom_left in self.domains_at(helix, offset - 1):\r\n if dom_left.forward == forward:\r\n break\r\n else:\r\n raise IllegalDesignError(f'no domain at helix {helix} in direction '\r\n f'{\"forward\" if forward else \"reverse\"} at offset {offset}')\r\n if dom_right.start != offset:\r\n raise IllegalDesignError(f'to ligate at offset {offset}, it must be the start offset of a domain,'\r\n f'but there is no domain at helix {helix} in direction '\r\n f'{\"forward\" if forward else \"reverse\"} with start offset = {offset}')\r\n if dom_left.end != offset:\r\n raise IllegalDesignError(f'to ligate at offset {offset}, it must be the end offset of a domain,'\r\n f'but there is no domain at helix {helix} in direction '\r\n f'{\"forward\" if forward else \"reverse\"} with end offset = {offset}')\r\n\r\n strand_left = dom_left.strand()\r\n strand_right = dom_right.strand()\r\n\r\n dom_new: Domain[DomainLabel] = Domain(helix=helix, forward=forward, start=dom_left.start,\r\n end=dom_right.end,\r\n deletions=dom_left.deletions + dom_right.deletions,\r\n insertions=dom_left.insertions + dom_right.insertions,\r\n name=dom_left.name, label=dom_left.label)\r\n\r\n # normalize 5'/3' distinction; below refers to which Strand has the 5'/3' end that will be ligated\r\n # So strand_5p is the one whose 3' end will be the 3' end of the whole new Strand\r\n # strand_5p and dom_5p are the ones on the 5' side of the nick,\r\n # CAUTION: they are on the 3' side of the nick,\r\n # i.e., the 3' end of strand_5p will be the 3' end of the new strand\r\n # e.g.,\r\n #\r\n # strand_3p strand_5p\r\n # [--------->[--------->\r\n # dom_3p dom_5p\r\n #\r\n # or\r\n #\r\n # strand_5p strand_3p\r\n # <---------]<---------]\r\n # dom_5p dom_3p\r\n if not forward:\r\n dom_5p = dom_left\r\n dom_3p = dom_right\r\n strand_5p = strand_left\r\n strand_3p = strand_right\r\n else:\r\n dom_5p = dom_right\r\n dom_3p = dom_left\r\n strand_5p = strand_right\r\n strand_3p = strand_left\r\n\r\n if strand_left is strand_right:\r\n # join domains and make strand circular\r\n strand = strand_left\r\n assert strand.first_bound_domain() is dom_5p\r\n assert strand.last_bound_domain() is dom_3p\r\n strand.domains[0] = dom_new # set first domain equal to new joined domain\r\n strand.domains.pop() # remove last domain\r\n strand.set_circular()\r\n for domain in strand.domains:\r\n domain._parent_strand = strand\r\n\r\n else:\r\n # join strands\r\n strand_3p.domains.pop()\r\n strand_3p.domains.append(dom_new)\r\n strand_3p.domains.extend(strand_5p.domains[1:])\r\n strand_3p.is_scaffold = strand_left.is_scaffold or strand_right.is_scaffold\r\n strand_3p.set_modification_3p(strand_5p.modification_3p)\r\n for idx, mod in strand_5p.modifications_int.items():\r\n new_idx = idx + strand_3p.dna_length()\r\n strand_3p.set_modification_internal(new_idx, mod)\r\n if strand_3p.dna_sequence is not None and strand_5p.dna_sequence is not None:\r\n strand_3p.dna_sequence += strand_5p.dna_sequence\r\n if strand_5p.is_scaffold and not strand_3p.is_scaffold and strand_5p.color is not None:\r\n strand_3p.set_color(strand_5p.color)\r\n self.strands.remove(strand_5p)\r\n for domain in strand_3p.domains:\r\n domain._parent_strand = strand_3p\r\n\r\n helix_domains = self.helices[helix].domains\r\n idx_domain_to_remove_left = helix_domains.index(dom_left)\r\n helix_domains[idx_domain_to_remove_left] = dom_new\r\n helix_domains.remove(dom_right)\r\n\r\n def add_half_crossover(self, helix: int, helix2: int, offset: int, forward: bool,\r\n offset2: int = None, forward2: bool = None) -> None:\r\n \"\"\"\r\n Add a half crossover from helix `helix` at offset `offset` to `helix2`, on the strand\r\n with :py:data:`Strand.forward` = `forward`.\r\n\r\n Unlike :py:meth:`Design.add_full_crossover`, which automatically adds a nick between the two\r\n half-crossovers, to call this method, there must *already* be nicks adjacent to the given\r\n offsets on the given helices. (either on the left or right side)\r\n\r\n If the crossover is within a :any:`Strand`, i.e., between its 5' and ' ends, the :any:`Strand`\r\n will simply be made circular, modifying it in place. Otherwise, the old two :any:`Strand`'s will be\r\n deleted, and a new :any:`Strand` added.\r\n\r\n :param helix: index of one helix of half crossover\r\n :param helix2: index of other helix of half crossover\r\n :param offset: offset on `helix` at which to add half crossover\r\n :param forward: direction of :any:`Strand` on `helix` to which to add half crossover\r\n :param offset2: offset on `helix2` at which to add half crossover.\r\n If not specified, defaults to `offset`\r\n :param forward2: direction of :any:`Strand` on `helix2` to which to add half crossover.\r\n If not specified, defaults to the negation of `forward`\r\n \"\"\"\r\n if offset2 is None:\r\n offset2 = offset\r\n if forward2 is None:\r\n forward2 = not forward\r\n domain1 = self.domain_at(helix, offset, forward)\r\n domain2 = self.domain_at(helix2, offset2, forward2)\r\n if domain1 is None:\r\n raise IllegalDesignError(\r\n f\"Cannot add half crossover at (helix={helix}, offset={offset}). \"\r\n f\"There is no Domain there.\")\r\n if domain2 is None:\r\n raise IllegalDesignError(\r\n f\"Cannot add half crossover at (helix={helix2}, offset={offset2}). \"\r\n f\"There is no Domain there.\")\r\n strand1 = domain1.strand()\r\n strand2 = domain2.strand()\r\n\r\n if strand1 == strand2:\r\n strand1.set_circular()\r\n return\r\n # raise IllegalDesignError(f\"Cannot add crossover from \"\r\n # f\"(helix={helix}, offset={offset}) to \"\r\n # f\"(helix={helix2}, offset={offset2}) \"\r\n # f\"because that would join two Domains \"\r\n # f\"already on the same Strand! \"\r\n # f\"Currently circular Strands are not supported. \"\r\n # f\"Instead, try adding nicks first, or rearrange the order of \"\r\n # f\"crossover addition, to ensure that all strands are \"\r\n # f\"non-circular, even in intermediate stages.\")\r\n\r\n if domain1.offset_3p() == offset and domain2.offset_5p() == offset2:\r\n strand_first = strand1\r\n strand_last = strand2\r\n elif domain1.offset_5p() == offset and domain2.offset_3p() == offset2:\r\n strand_first = strand2\r\n strand_last = strand1\r\n else:\r\n raise IllegalDesignError(\"Cannot add half crossover. Must have one domain have its \"\r\n \"5' end at the given offset and the other with its 3' end at the \"\r\n \"given offset, but this is not the case.\")\r\n\r\n new_domains = strand_first.domains + strand_last.domains\r\n if strand_first.dna_sequence is None and strand_last.dna_sequence is None:\r\n new_dna = None\r\n elif strand_first.dna_sequence is not None and strand_last.dna_sequence is not None:\r\n new_dna = strand_first.dna_sequence + strand_last.dna_sequence\r\n else:\r\n raise IllegalDesignError(\r\n 'cannot add crossover between two strands if one has a DNA sequence '\r\n 'and the other does not')\r\n new_strand: Strand[StrandLabel, DomainLabel] = Strand(domains=new_domains, color=strand_first.color,\r\n dna_sequence=new_dna, idt=strand_first.idt,\r\n is_scaffold=strand1.is_scaffold or strand2.is_scaffold)\r\n\r\n # put new strand in place where strand_first was\r\n strand_first_idx = self.strands.index(strand_first)\r\n self.strands[strand_first_idx] = new_strand\r\n self.strands.remove(strand_last)\r\n\r\n def add_full_crossover(self, helix: int, helix2: int, offset: int, forward: bool,\r\n offset2: int = None, forward2: bool = None) -> None:\r\n \"\"\"\r\n Adds two half-crossovers, one at `offset` and another at `offset`-1.\r\n Other arguments have the same meaning as in :py:meth:`Design.add_half_crossover`.\r\n A nick is automatically added on helix `helix` between\r\n `offset` and `offset`-1 if one is not already present,\r\n and similarly for `offset2` on helix `helix2`.\r\n\r\n :param helix: index of one helix of half crossover\r\n :param helix2: index of other helix of half crossover\r\n :param offset: offset on `helix` at which to add half crossover\r\n :param forward: direction of :any:`Strand` on `helix` to which to add half crossover\r\n :param offset2: offset on `helix2` at which to add half crossover.\r\n If not specified, defaults to `offset`\r\n :param forward2: direction of :any:`Strand` on `helix2` to which to add half crossover.\r\n If not specified, defaults to the negation of `forward`\r\n \"\"\"\r\n if offset2 is None:\r\n offset2 = offset\r\n if forward2 is None:\r\n forward2 = not forward\r\n for helix_, forward_, offset_ in [(helix, forward, offset), (helix2, forward2, offset2)]:\r\n self._prepare_nicks_for_full_crossover(helix_, forward_, offset_)\r\n self.add_half_crossover(helix=helix, helix2=helix2, offset=offset - 1, offset2=offset2 - 1,\r\n forward=forward, forward2=forward2)\r\n self.add_half_crossover(helix=helix, helix2=helix2, offset=offset, offset2=offset2,\r\n forward=forward, forward2=forward2)\r\n\r\n def _prepare_nicks_for_full_crossover(self, helix: int, forward: bool, offset: int) -> None:\r\n domain_right = self.domain_at(helix, offset, forward)\r\n if domain_right is None:\r\n raise IllegalDesignError(f'You tried to create a full crossover at '\r\n f'(helix={helix}, offset={offset}) '\r\n f'but there is no Strand there.')\r\n domain_left = self.domain_at(helix, offset - 1, forward)\r\n if domain_left is None:\r\n raise IllegalDesignError(f'You tried to create a full crossover at '\r\n f'(helix={helix}, offset={offset}) '\r\n f'but there is no Strand at offset {offset - 1}.')\r\n if domain_left == domain_right:\r\n self.add_nick(helix, offset, forward)\r\n else:\r\n assert domain_left.end == domain_right.start\r\n\r\n def inline_deletions_insertions(self) -> None:\r\n \"\"\"\r\n Converts deletions and insertions by \"inlining\" them. Insertions and deletions are removed,\r\n and their domains have their lengths altered. Also, major tick marks on the helices will be\r\n shifted to preserve their adjacency to bases already present. For example, if there are major\r\n tick marks at 0, 8, 18, 24, and a deletion between 0 and 8:\r\n\r\n .. code-block:: none\r\n\r\n 0 8 18 24 30\r\n |--X---|---------|-----|------\r\n\r\n then the domain is shortened by 1,\r\n the tick marks become 0, 7, 15, 23,\r\n and the helix's maximum offset is shrunk by 1:\r\n\r\n .. code-block:: none\r\n\r\n 0 7 17 23 29\r\n |-----|---------|-----|------\r\n\r\n Similarly, if there are insertions (in the example below, the \"2\" represents an insertion of length 2,\r\n which represents 3 total bases), they are expanded\r\n\r\n .. code-block:: none\r\n\r\n 0 8 18 24 30\r\n |--2---|---------|-----|------\r\n\r\n then the domain is lengthened by 3:\r\n\r\n .. code-block:: none\r\n\r\n 0 10 20 26 32\r\n |--------|---------|-----|------\r\n\r\n And it works if there are both insertions and deletions:\r\n\r\n .. code-block:: none\r\n\r\n 0 8 18 24 30\r\n |--2---|---------|--X--|------\r\n\r\n then the domain is lengthened by 3:\r\n\r\n .. code-block:: none\r\n\r\n 0 10 20 25 31\r\n |--------|---------|----|------\r\n\r\n We assume that a major tick mark appears just to the LEFT of the offset it encodes,\r\n so the minimum and maximum offsets for tick marks are respectively the helix's minimum offset\r\n and 1 plus its maximum offset, the latter being just to the right of the last offset on the helix.\r\n \"\"\"\r\n for helix in self.helices.values():\r\n self._inline_deletions_insertions_on_helix(helix)\r\n\r\n def _inline_deletions_insertions_on_helix(self, helix: Helix) -> None:\r\n ###################################################\r\n # first gather information before changing anything\r\n\r\n # gather all mods on helix\r\n deletions = [deletion for domain in helix.domains for deletion in domain.deletions]\r\n insertions = [insertion for domain in helix.domains for insertion in domain.insertions]\r\n\r\n # change max offset\r\n delta_length = sum(length for (offset, length) in insertions) - len(deletions)\r\n\r\n # combined collection of deletions/insertions into one dict mapping offset --> None/len, where\r\n # value of -1 indicates deletion, and otherwise is length of insertion\r\n dels_ins = dict()\r\n for deletion in deletions:\r\n dels_ins[deletion] = -1\r\n for insertion in insertions:\r\n dels_ins[insertion[0]] = insertion[1]\r\n\r\n # put offsets in sorted order\r\n dels_ins_offsets_sorted = sorted(dels_ins.keys())\r\n\r\n # fix helix major ticks\r\n group = self.groups[helix.group]\r\n major_ticks = sorted(helix.calculate_major_ticks(group.grid))\r\n\r\n ###################################################\r\n # now that info is gathered, start changing things\r\n\r\n if helix.max_offset is not None:\r\n helix.max_offset += delta_length\r\n\r\n if len(major_ticks) > 0:\r\n major_tick_idx = 0\r\n delta_acc = 0 # accumulated delta; insertions add to this and deletions subtract from it\r\n for offset in dels_ins_offsets_sorted:\r\n # go to first major tick great than offset, updating passed ones by delta_acc\r\n while major_tick_idx < len(major_ticks) and major_ticks[major_tick_idx] <= offset:\r\n major_ticks[major_tick_idx] += delta_acc\r\n major_tick_idx += 1\r\n delta_acc += dels_ins[offset]\r\n # if necessary, update major ticks beyond last ins/del\r\n while major_tick_idx < len(major_ticks):\r\n major_ticks[major_tick_idx] += delta_acc\r\n major_tick_idx += 1\r\n # TODO: check if regularly spaced and reaching both ends, and if so set helix.major_tick_distance\r\n helix.major_ticks = major_ticks\r\n\r\n # fix domain start/end offsets\r\n domains = sorted(helix.domains, key=lambda domain: domain.start)\r\n delta_acc = 0\r\n for domain in domains:\r\n domain.start += delta_acc\r\n delta_acc += domain.dna_length() - domain.visual_length()\r\n domain.end += delta_acc\r\n domain.deletions = []\r\n domain.insertions = []\r\n\r\n def reverse_all(self) -> None:\r\n \"\"\"\r\n Reverses \"polarity\" of every :any:`Strand` in this :any:`Design`.\r\n\r\n No attempt is made to make any assigned DNA sequences match by reversing or rearranging them.\r\n Every :any:`Strand` keeps the same DNA sequence it had before (unreversed), if one was assigned.\r\n It is recommended to assign/reassign DNA sequences *after* doing this operation.\r\n \"\"\"\r\n for strand in self.strands:\r\n strand.reverse()\r\n\r\n def set_major_tick_distance(self, major_tick_distance: int) -> None:\r\n for helix in self.helices.values():\r\n helix.major_tick_distance = major_tick_distance\r\n\r\n def _ensure_helices_distinct_objects(self) -> None:\r\n pair = _find_index_pair_same_object(self.helices)\r\n if pair is not None:\r\n i, j = pair\r\n raise IllegalDesignError('helices must all be distinct objects, but those at indices '\r\n f'{i} and {j} are the same object')\r\n\r\n def _ensure_strands_distinct_objects(self) -> None:\r\n pair = _find_index_pair_same_object(self.strands)\r\n if pair is not None:\r\n i, j = pair\r\n raise IllegalDesignError('strands must all be distinct objects, but those at indices '\r\n f'{i} and {j} are the same object')\r\n\r\n def _ensure_helix_groups_exist(self) -> None:\r\n for helix in self.helices.values():\r\n if helix.group not in self.groups.keys():\r\n raise IllegalDesignError(f'helix {helix.idx} has group {helix.group}, which does not '\r\n f'exist in the design. The valid groups are '\r\n f'{\", \".join(self.groups.keys())}')\r\n\r\n def _has_default_groups(self) -> bool:\r\n return len(self.groups) == 1 and default_group_name in self.groups\r\n\r\n def _assign_default_helices_view_orders_to_groups(self) -> None:\r\n for name, group in self.groups.items():\r\n if group.helices_view_order is None:\r\n helices_in_group = {idx: helix for idx, helix in self.helices.items() if helix.group == name}\r\n group._assign_default_helices_view_order(helices_in_group) # noqa\r\n\r\n def _warn_if_strand_names_not_unique(self) -> None:\r\n names = [strand.name for strand in self.strands if strand.name is not None]\r\n if len(names) > len(set(names)):\r\n for name1, name2 in itertools.combinations(names, 2):\r\n if name1 == name2:\r\n print(f'WARNING: there are two strands with name {name1}')\r\n\r\n def strand_with_name(self, name: str) -> Optional[Strand]:\r\n \"\"\"\r\n :param name: name of a :any:`Strand`.\r\n :return: the :any:`Strand` with name `name`, or None if no :any:`Strand` in the :any:`Design` has\r\n that name.\r\n \"\"\"\r\n for strand in self.strands:\r\n if strand.name == name:\r\n return strand\r\n return None\r\n\r\n\r\ndef _find_index_pair_same_object(elts: Union[List, Dict]) -> Optional[Tuple]:\r\n # return pair of indices representing same object in elts, or None if they do not exist\r\n # input can be list or dict; if dict, returns pair of keys mapping to same object\r\n if isinstance(elts, list):\r\n elts = dict(enumerate(elts))\r\n for i, j in itertools.combinations(elts.keys(), 2):\r\n if elts[i] is elts[j]:\r\n return i, j\r\n return None\r\n\r\n\r\ndef _name_of_this_script() -> str:\r\n \"\"\"Return name of the currently running script, WITHOUT the .py extension.\"\"\"\r\n return os.path.basename(sys.argv[0])[:-3]\r\n\r\n\r\ndef _write_file_same_name_as_running_python_script(contents: str, extension: str, directory: str = '.',\r\n filename: Optional[str] = None) -> None:\r\n relative_filename = _get_filename_same_name_as_running_python_script(directory, extension, filename)\r\n with open(relative_filename, 'w') as out_file:\r\n out_file.write(contents)\r\n\r\n\r\ndef _get_filename_same_name_as_running_python_script(directory: str, extension: str,\r\n filename: Optional[str]) -> str:\r\n if filename is None:\r\n filename = _name_of_this_script() + f'.{extension}'\r\n relative_filename = _create_directory_and_set_filename(directory, filename)\r\n return relative_filename\r\n\r\n\r\ndef _create_directory_and_set_filename(directory: str, filename: str) -> str:\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n relative_filename = os.path.join(directory, filename)\r\n return relative_filename\r\n","sub_path":"scadnano/scadnano.py","file_name":"scadnano.py","file_ext":"py","file_size_in_byte":300381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"3303089","text":"from datetime import datetime\nimport traceback\nimport requests\nimport json\n\nfrom .. import app\n\nfrom flask import Blueprint, render_template, session, jsonify, redirect, \\\n url_for, request\n\nfrom ..controllers import document, hand, layertreebank, message, user, \\\n userdocument\n\nfrom ..utils import log\n\nDocument = document.Document\nHand = hand.Hand\nLayertreebank = layertreebank.Layertreebank\nMessage = message.Message\nUser = user.User\nUserdocument = userdocument.Userdocument\nLog = log.Log\n\nedit = Blueprint('edit', __name__, static_folder='/static')\n\n@edit.route('/')\ndef index(id):\n layertreebank = Layertreebank.get(id)\n if layertreebank:\n hand = layertreebank.hand\n document = hand.document\n my = Userdocument.get_editable(document.id, session['user_id'])\n my_messages = Message.get_my()\n return render_template('pages/edit.html', ltb=layertreebank, \n doc=document, my=my, hand=hand, \n my_messages=my_messages)\n else:\n return redirect(url_for('documents.index'))\n\n@edit.route('/add_treebank', methods=['POST'])\ndef add_treebank():\n id = request.form.get('id')\n file = request.files['file']\n return jsonify(Layertreebank.add_treebank(id, file))\n\n@edit.route('/save_paths', methods=['POST'])\ndef save_paths():\n settings = request.form.get('paths').strip()\n id = request.form.get('id');\n return jsonify(Layertreebank.update_settings(id, settings))\n\n@edit.route('/get_treebank', methods=['POST'])\ndef get_treebank():\n id = request.form.get('id').strip()\n return jsonify(Layertreebank.get_treebank(id))\n\n@edit.route('/delete_treebank', methods=['POST'])\ndef delete_treebank():\n id = request.form.get('id').strip()\n return jsonify(Layertreebank.delete_treebank(id))\n\n@edit.route('/store_plaintext', methods=['POST'])\ndef store_plaintext():\n id = request.form.get('id').strip()\n text = request.form.get('text').strip()\n return jsonify(Layertreebank.store_plaintext(id, text))\n\n@edit.route('/update_arethusa_ids', methods=['POST'])\ndef update_arethusa_ids():\n id = request.form.get('id').strip()\n arethusa_id = request.form.get('arethusa_id').strip()\n arethusa_publication_id = request.form.get('arethusa_publication_id').strip()\n return jsonify(Layertreebank.update_arethusa(id, arethusa_id, \n arethusa_publication_id))\n\n@edit.route('/post_treebank', methods=['POST'])\ndef post_treebank():\n xml = request.form.get('xml').strip().encode('utf-8')\n if ('perseids' in session):\n access_token = session['perseids']\n\n url = 'https://sosol.perseids.org/sosol/api/v1/xmlitems/TreebankCite'\n headers = {\n 'Content-Type': 'application/xml; charset=UTF-8',\n 'Accept': 'application/json',\n 'Authorization': 'Bearer '+access_token\n }\n r = requests.post(url, headers=headers, data=xml)\n r_json = r.json()\n if ('publication' in r_json):\n pubid = str(r_json['publication'])\n\n url = 'https://sosol.perseids.org/sosol/api/v1/publications/'+pubid\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'Authorization': 'Bearer '+access_token\n }\n data = {\n 'community_name': app.app.config['PERSEIDS_COMMUNITY_NAME'],\n }\n r = requests.put(url, headers=headers, data=json.dumps(data))\n Log.p(app.app.config['PERSEIDS_COMMUNITY_NAME'])\n if r.status_code == 200:\n return jsonify({'id': r_json['id'], 'pubid':pubid})\n return 'false'\n\n@edit.route('/perseids_get', methods=['POST'])\ndef perseids_get():\n item_id = request.form.get('id').strip()\n if ('perseids' in session):\n access_token = session['perseids']\n\n url = 'https://sosol.perseids.org/sosol/api/v1/items/'+item_id+'/peek'\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'Authorization': 'Bearer '+access_token\n }\n\n r = requests.get(url, headers=headers)\n Log.p(r.json())\n return jsonify(r.json())\n return 'false'","sub_path":"sematia/views/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"5302379","text":"#!/usr/bin/python3\n\"\"\"\nSquare class with basic printing function\n\"\"\"\n\n\nclass Square:\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"\n initialize square with size and pos\n \"\"\"\n if not isinstance(size, int):\n raise TypeError('size must be an integer')\n if size < 0:\n raise ValueError('size must be >= 0')\n errormsg = 'position must be a tuple of 2 positive integers'\n if type(position) is not tuple:\n raise TypeError(errormsg)\n if len(position) != 2:\n raise TypeError(errormsg)\n for i in position:\n if type(i) is not int or i < 0:\n raise TypeError(errormsg)\n self.__size = size\n self.__position = position\n\n def area(self):\n return (self.__size ** 2)\n\n @property\n def size(self):\n return (self.__size)\n\n @size.setter\n def size(self, value):\n if not isinstance(value, int):\n raise TypeError('size must be an integer')\n if value < 0:\n raise ValueError('size must be >=0')\n self.__size = value\n\n @property\n def position(self):\n return (self.__position)\n\n @size.setter\n def size(self, value):\n errormsg = 'position must be a tuple of 2 positive integers'\n if type(value) is not tuple:\n raise TypeError(errormsg)\n if len(value) != 2:\n raise TypeError(errormsg)\n for i in value:\n if type(i) is not int or i < 0:\n raise TypeError(errormsg)\n self.__position = value\n\n def my_print(self):\n if self.__size == 0:\n print(\"\")\n else:\n print(\"\\n\" * self.__position[1], end=\"\")\n print(\"\\n\".join([\" \" * self.position[0] + \"#\" * self.__size\n for i in range(self.__size)]))\n","sub_path":"0x07-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"143086355","text":"import itertools\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\nimport gzip\nimport argparse\nimport sklearn\nimport copy\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\n\nimport keras as ke\nfrom keras import backend as K\nfrom keras.layers import Input, Dense, Dropout, Activation\nfrom keras.optimizers import SGD, Adam, RMSprop, Adadelta\nfrom keras.models import Sequential, Model, model_from_json, model_from_yaml\nfrom keras.utils import np_utils, multi_gpu_model\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping\n\nfrom sklearn.utils.class_weight import compute_class_weight\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, roc_auc_score, confusion_matrix, balanced_accuracy_score, classification_report\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler\n\n\n'''\nNew models must follow the scikit learn api and implement the following methods:\n\nfit(X, y[, sample_weight]): fit the model to the input features and target.\npredict(X): predict the value of the input features.\nscore(X, y): returns target metric given test features and test targets.\ndecision_function(X) (optional): return class probabilities, distance to decision boundaries, or other metric that can be used by margin sampler as a measure of uncertainty.\n\n'''\n\nclass AggAtnnBin(object):\n\n def __init__(self,\n random_state=1,\n epochs=100,\n batch_size=32,\n solver='rmsprop',\n learning_rate=0.001,\n lr_decay=0.\n ):\n\n # params\n self.solver = solver\n self.epochs = epochs\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.lr_decay = lr_decay\n # data\n self.encode_map = None\n self.decode_map = None\n self.model = None\n self.random_state = random_state\n\n\n def build_model(self, PS=6212, DR=0.2):\n inputs = Input(shape=(PS,))\n x = Dense(1000, activation='relu')(inputs)\n a = Dense(1000, activation='relu')(x)\n b = Dense(1000, activation='softmax')(x)\n x = ke.layers.multiply([a,b])\n x = Dense(500, activation='relu')(x)\n x = Dropout(DR)(x)\n x = Dense(250, activation='relu')(x)\n x = Dropout(DR)(x)\n x = Dense(125, activation='relu')(x)\n x = Dropout(DR)(x)\n x = Dense(60, activation='relu')(x)\n x = Dropout(DR)(x)\n x = Dense(30, activation='relu')(x)\n x = Dropout(DR)(x)\n outputs = Dense(2, activation='softmax')(x)\n model = Model(inputs=inputs, outputs=outputs)\n model.summary()\n\n model.compile(loss='categorical_crossentropy',\n optimizer=SGD(lr=0.00001, momentum=0.9),\n metrics=['acc', self.r2])\n\n # Save initial weights so that model can be retrained with same initialization\n self.initial_weights = copy.deepcopy(model.get_weights())\n self.model = model\n\n # According to the active-learning documentation:\n # fit(X, y[, sample_weight]): fit the model to the input features and target.\n def fit(self, X_train, y_train, sample_weight=None, class_weight=None, validation_data=None):\n if self.model is None:\n self.build_model()\n\n\n # We don't want incremental fit so reset learning rate and weights\n K.set_value(self.model.optimizer.lr, self.learning_rate)\n self.model.set_weights(self.initial_weights)\n\n # We want to compensate for unbalanced class labels\n if class_weight is None:\n print(\"shape of y_train is: \", y_train.shape)\n print(\"y_train: \", y_train)\n class_weights = compute_class_weight('balanced', np.unique(y_train), y_train)\n print (\"class weights: \", class_weights)\n d_class_weights = dict(enumerate(class_weights))\n print (\"class weights dict: \", d_class_weights)\n class_weight = d_class_weights\n\n # create y matrix\n y_mat = np.reshape(y_train, (len(y_train), 1))\n y_mat = ke.utils.to_categorical(y_mat, 2)\n\n _X_train, _X_test, _y_train, _y_test = sklearn.model_selection.train_test_split(\n X_train, y_mat, test_size=.2)\n\n\n checkpointer = ModelCheckpoint(filepath='Agg_attn_bin.autosave.model.h5',\n verbose=1,\n save_weights_only=False,\n save_best_only=True)\n\n csv_logger = CSVLogger('Agg_attn_bin.training.log')\n\n reduce_lr = ReduceLROnPlateau(monitor='val_acc',\n factor=0.20,\n patience=40,\n verbose=1,\n mode='auto',\n min_delta=0.0001,\n cooldown=3,\n min_lr=0.000000001)\n\n early_stop = EarlyStopping(monitor='val_loss',\n patience=200,\n verbose=1,\n mode='auto')\n\n print(\"calling fit on X_train and y_mat with shapes: \")\n print(X_train.shape, \" \", X_train)\n print(y_mat.shape, \" \", y_mat)\n\n self.model.fit(\n _X_train,\n _y_train,\n batch_size=self.batch_size,\n epochs=self.epochs,\n shuffle=True,\n class_weight=class_weight,\n verbose=1,\n validation_data=(_X_test, _y_test),\n # validation_split=0.2,\n callbacks = [checkpointer, csv_logger, reduce_lr, early_stop])\n\n # Put any final metrics you want to evaluate on the model here before\n # returning to the framework.\n _y_predict = self.model.predict(_X_test)\n\n threshold = 0.5\n _y_pred_int = (_y_predict[:,0] < threshold).astype(np.int)\n _y_test_int = (_y_test[:,0] < threshold).astype(np.int)\n\n print(sklearn.metrics.roc_auc_score(_y_test_int, _y_pred_int))\n print(sklearn.metrics.balanced_accuracy_score(_y_test_int, _y_pred_int))\n print(sklearn.metrics.classification_report(_y_test_int, _y_pred_int))\n print(sklearn.metrics.confusion_matrix(_y_test_int, _y_pred_int))\n\n\n # According to the active-learning documentation, must implement:\n # predict(X): predict the value of the input features.\n def predict(self, X_val):\n predicted = self.model.predict(X_val)\n return predicted\n\n def score(self, X_val, val_y):\n # y_mat = self.create_y_mat(val_y)\n # create y matrix\n y_mat = np.reshape(val_y, (len(val_y), 1))\n y_mat = ke.utils.to_categorical(y_mat, 2)\n\n val_acc = self.model.evaluate(X_val, y_mat)[1]\n return val_acc\n\n # In Rick's code, this is done at data load time.\n # It's needed for the score function above.\n def create_y_mat(self, y):\n y_encode = self.encode_y(y)\n y_encode = np.reshape(y_encode, (len(y_encode), 1))\n y_mat = keras.utils.to_categorical(y_encode, self.n_classes)\n return y_mat\n\n\n @staticmethod\n def r2(y_true, y_pred):\n SS_res = K.sum(K.square(y_true - y_pred))\n SS_tot = K.sum(K.square(y_true - K.mean(y_true)))\n return (1 - SS_res/(SS_tot + K.epsilon()))\n\n\n def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n\n\n\n\ndef load_data(data_path, PL, nb_classes):\n df = (pd.read_csv(data_path,skiprows=1).values).astype('float32')\n df_y = df[:,0].astype('int')\n df_x = df[:, 1:PL].astype(np.float32)\n\n # scaler = MaxAbsScaler()\n scaler = StandardScaler()\n df_x = scaler.fit_transform(df_x)\n X_train, X_test, Y_train, Y_test = train_test_split(df_x, df_y, test_size= 0.20, random_state=42)\n print('x_train shape:', X_train.shape)\n print('x_test shape:', X_test.shape)\n\n Y_train = np_utils.to_categorical(Y_train, nb_classes)\n Y_test = np_utils.to_categorical(Y_test, nb_classes)\n\n return X_train, X_test, Y_train, Y_test\n\n\ndef run(args):\n data_path=args['in']\n epochs = args['ep']\n PL = 6213\n\n model = AggAtnnBin()\n X_train, X_test, Y_train, Y_test = load_data(data_path, PL, 2)\n\n history = model.fit(X_train, Y_train, \n validation_data=(X_test, Y_test))\n\n\n score = model.model.evaluate(model.X_test, model.Y_test, verbose=0)\n print(\"score: \", score)\n return score\n\n\n\n#if __name__ == '__main__':\n#\n# psr = argparse.ArgumentParser(description='input agg csv file')\n# psr.add_argument('--in', default='in_file')\n# psr.add_argument('--ep', type=int, default=400)\n# args=vars(psr.parse_args())\n# print(args)\n#\n# run(args)\n# try:\n# K.clear_session()\n# except AttributeError: # theano does not have this function\n# pass\n\n\ndef graphs(X_test):\n\n Y_predict = model.predict(X_test)\n threshold = 0.5\n Y_pred_int = (Y_predict[:,0] < threshold).astype(np.int)\n Y_test_int = (Y_test[:,0] < threshold).astype(np.int)\n\n class_names=[\"Non-Response\",\"Response\"]\n \n # Compute confusion matrix\n cnf_matrix = sklearn.metrics.confusion_matrix(Y_test_int, Y_pred_int)\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n #plt.figure()\n plot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\n plt.savefig('Agg_attn_bin.confusion_without_norm.pdf', bbox_inches='tight')\n\n plt.close()\n\n # Plot normalized confusion matrix\n #plt.figure()\n plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n plt.savefig('Agg_attn_bin.confusion_with_norm.pdf', bbox_inches='tight')\n\n plt.close()\n\n print(sklearn.metrics.roc_auc_score(Y_test_int, Y_pred_int))\n print(sklearn.metrics.balanced_accuracy_score(Y_test_int, Y_pred_int))\n print(sklearn.metrics.classification_report(Y_test_int, Y_pred_int))\n print(sklearn.metrics.confusion_matrix(Y_test_int, Y_pred_int))\n print(\"score\")\n print(score)\n\n #exit()\n\n # summarize history for accuracy \n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Model Accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n\n plt.savefig('Agg_attn_bin.accuracy.png', bbox_inches='tight')\n plt.savefig('Agg_attn_bin.accuracy.pdf', bbox_inches='tight')\n\n plt.close()\n\n # summarize history for loss \n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n\n plt.savefig('Agg_attn_bin.loss.png', bbox_inches='tight')\n plt.savefig('Agg_attn_bin.loss.pdf', bbox_inches='tight')\n\n\n print('Test val_loss:', score[0])\n print('Test accuracy:', score[1])\n\n # serialize model to JSON \n model_json = model.to_json()\n with open(\"Agg_attn_bin.model.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize model to YAML \n model_yaml = model.to_yaml()\n with open(\"Agg_attn_bin.model.yaml\", \"w\") as yaml_file:\n yaml_file.write(model_yaml)\n\n\n # serialize weights to HDF5 \n model.save_weights(\"Agg_attn_bin.model.h5\")\n print(\"Saved model to disk\")\n\n # load json and create model \n json_file = open('Agg_attn_bin.model.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model_json = model_from_json(loaded_model_json)\n\n\n # load yaml and create model \n yaml_file = open('Agg_attn_bin.model.yaml', 'r')\n loaded_model_yaml = yaml_file.read()\n yaml_file.close()\n loaded_model_yaml = model_from_yaml(loaded_model_yaml)\n\n\n # load weights into new model \n loaded_model_json.load_weights(\"Agg_attn_bin.model.h5\")\n print(\"Loaded json model from disk\")\n\n # evaluate json loaded model on test data \n loaded_model_json.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'])\n score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)\n\n print('json Validation loss:', score_json[0])\n print('json Validation accuracy:', score_json[1])\n\n print(\"json %s: %.2f%%\" % (loaded_model_json.metrics_names[1], score_json[1]*100))\n\n\n # load weights into new model \n loaded_model_yaml.load_weights(\"Agg_attn_bin.model.h5\")\n print(\"Loaded yaml model from disk\")\n\n # evaluate loaded model on test data\n loaded_model_yaml.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'])\n score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)\n\n print('yaml Validation loss:', score_yaml[0])\n print('yaml Validation accuracy:', score_yaml[1])\n print(\"yaml %s: %.2f%%\" % (loaded_model_yaml.metrics_names[1], score_yaml[1]*100))\n\n # predict using loaded yaml model on test and training data\n predict_yaml_train = loaded_model_yaml.predict(X_train)\n predict_yaml_test = loaded_model_yaml.predict(X_test)\n\n print('Yaml_train_shape:', predict_yaml_train.shape)\n print('Yaml_test_shape:', predict_yaml_test.shape)\n\n predict_yaml_train_classes = np.argmax(predict_yaml_train, axis=1)\n predict_yaml_test_classes = np.argmax(predict_yaml_test, axis=1)\n\n np.savetxt(\"Agg_attn_bin_predict_yaml_train.csv\", predict_yaml_train, delimiter=\",\", fmt=\"%.3f\")\n np.savetxt(\"Agg_attn_bin_predict_yaml_test.csv\", predict_yaml_test, delimiter=\",\", fmt=\"%.3f\")\n np.savetxt(\"Agg_attn_bin_predict_yaml_train_classes.csv\", predict_yaml_train_classes, delimiter=\",\",fmt=\"%d\")\n np.savetxt(\"Agg_attn_bin_predict_yaml_test_classes.csv\", predict_yaml_test_classes, delimiter=\",\",fmt=\"%d\")\n","sub_path":"utils/agg_atnn_bin.py","file_name":"agg_atnn_bin.py","file_ext":"py","file_size_in_byte":15601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"142755023","text":"import pandas as pd\nimport numpy as np\nimport re\nimport sys\nfrom StyleFrame import StyleFrame, Styler\nfrom elasticsearch import Elasticsearch\n\nes = Elasticsearch()\n\ndef skill_names_builder():\n res = es.search(index='ipma', doc_type='skill', body={\n \"_source\": ['name'],\n \"size\": 47,\n \"query\":{\n \"match_all\":{}\n },\n \"sort\":[\n {\"_id\": \"asc\"} \n ]\n })\n skills_list = []\n for hit in res['hits']['hits']:\n skills_list.append((hit['_source']['name'])) \n return(skills_list)\n\n\ndef top_10(skills_names):\n top10 = []\n for skill_nb, skill in enumerate(skills_names):\n top10.append({})\n top10[skill_nb]['name'] = skill\n top10[skill_nb]['top10_stats'] = []\n res= es.search(index='classifier', doc_type='sentences', body={\n \"size\" : 10,\n \"query\":{\n \"bool\":{\n \"must\":[\n {\"match\":{\"skill\": skill }} \n ],\n \"must_not\":[\n {\"match\":{\"skill\": \"Aucune compétence\"}} \n ]\n }\n },\n \"sort\":[\n {\"proba_skill\":\"desc\"} \n ]\n })\n \n for rank, hit in enumerate(res['hits']['hits']):\n top10[skill_nb]['top10_stats'].append({})\n top10[skill_nb]['top10_stats'][rank]['sentence{}'.format(str(rank+1))] = hit['_source']['sentence']\n # For later\n top10[skill_nb]['top10_stats'][rank]['search_results'] = [] \n top10[skill_nb]['top10_stats'][rank]['correctness'] = 'False'\n top10[skill_nb]['top10_stats'][rank]['accuracy'] = 0\n print('----------{}-----------'.format(skill))\n print(top10[skill_nb]['top10_stats'][0]['sentence1'])\n print(top10[skill_nb]['top10_stats'][1]['sentence2'])\n return top10\n\ndef skill_query(top10):\n top10_filled = top10\n correctness_count = 0\n for id_skill, skill in enumerate(top10_filled):\n skill_score = 0\n for rank, sentence_stats in enumerate(top10_filled[id_skill]['top10_stats']):\n sentence = sentence_stats['sentence{}'.format(str(rank+1))]\n res = es.search(index='ipma', doc_type='skill', body={\n \"size\" : 5,\n \"_source\": ['name'],\n \"query\":{\n \"match\": {\n \"content\": \"{}\".format(sentence)\n }\n }\n })\n for hit in res['hits']['hits']:\n if (hit['_source']['name'] == skill['name']):\n sentence_stats[\"correctness\"] = 'True'\n correctness_count += 1\n skill_score += 1\n sentence_stats['search_results'].append(hit['_source']['name'])\n sentence_stats['accuracy'] = skill_score/10*100\n print('-----------{}-----------'.format(skill['name']))\n accuracy = '{}%'.format(int(round(correctness_count/470*100)))\n return (top10_filled, accuracy)\n\nsearch_results, accuracy = skill_query(top_10(skill_names_builder()))\n\ndef df_builder(search_results):\n columns = []\n index_names = []\n for id_skill, skill in enumerate(search_results):\n columns.append([])\n index_names.append(skill['name'])\n for rank, sentence_results in enumerate(search_results[id_skill]['top10_stats']):\n result = sentence_results[\"sentence{}\".format(rank+1)]\n result += '\\n#'\n result += '\\n#'.join(sentence_results['search_results'])\n result += '\\n Correctness: {}'.format(sentence_results['correctness'])\n columns[id_skill].append(result)\n columns[id_skill].append(sentence_results['accuracy'])\n df = pd.DataFrame(data = columns, index = index_names, columns = ['#1', '#2', '#3', '#4', '#5', '#6', '#7', '#8', '#9', '#10', 'Score (%)'])\n return df\n\ndf_search = df_builder(search_results) \n\ndef excel_builder(df_search, accuracy):\n number_rows = len(df_search.index)\n writer = pd.ExcelWriter('{}{}.xlsx'.format(sys.argv[1], accuracy), engine='xlsxwriter')\n df_search.to_excel(writer, sheet_name='study1')\n workbook = writer.book\n worksheet = writer.sheets['study1']\n # Define the ranges for the colors formatting\n cols = ['B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']\n color_ranges = []\n for col in cols:\n color_ranges.append(\"{}2:{}{}\".format(col, col, number_rows+1))\n # Add a format. Green fill with dark green text.\n green_format = workbook.add_format({'bg_color': '#C6EFCE', 'font_color': '#006100'})\n # Add a format. Light red fill with dark red text.\n red_format = workbook.add_format({'bg_color': '#FFC7CE', 'font_color': '#9C0006'})\n for ranges in color_ranges:\n worksheet.conditional_format(ranges, {'type': 'text', 'criteria': 'ends with', 'value': 'True', 'format': green_format})\n worksheet.conditional_format(ranges, {'type': 'text', 'criteria': 'ends with', 'value': 'False', 'format': red_format})\n writer.save()\n\nexcel_builder(df_search, accuracy)\n","sub_path":"PREPROCESSING/DATA_MANAGER/Elasticsearch/Study/query/query_test.py","file_name":"query_test.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"496796595","text":"#!/usr/bin/env python\n#\n# ----------------------------------------------------------------------\n#\n# Brad T. Aagaard, U.S. Geological Survey\n#\n# This code was developed as part of the Computational Infrastructure\n# for Geodynamics (http://geodynamics.org).\n#\n# Copyright (c) 2010-2017 University of California, Davis\n#\n# See COPYING for license information.\n#\n# ----------------------------------------------------------------------\n#\n\n## @file spatialdata/spatialdb/SpatialDBObj.py\n##\n## @brief Python abstract base class for spatial database.\n##\n## Factory: spatial_database\n\nfrom pyre.components.Component import Component\nfrom spatialdb import SpatialDB as ModuleSpatialDB\n\n# Validator for label\ndef validateLabel(value):\n \"\"\"\n Validate label for spatial database.\n \"\"\"\n if 0 == len(value):\n raise ValueError(\"Descriptive label for spatial database not specified.\")\n return value\n\n\n# SpatialDBObj class\nclass SpatialDBObj(Component, ModuleSpatialDB):\n \"\"\"\n Python abstract base class for spatial database.\n\n Factory: spatial_database\n \"\"\"\n\n # INVENTORY //////////////////////////////////////////////////////////\n\n class Inventory(Component.Inventory):\n \"\"\"\n Python object for managing SpatialDBObj facilities and properties.\n \"\"\"\n\n ## @class Inventory\n ## Python object for managing SpatialDBObj facilities and properties.\n ##\n ## \\b Properties\n ## @li \\b label Descriprive label for database.\n ##\n ## \\b Facilities\n ## @li None\n\n import pyre.inventory\n\n label = pyre.inventory.str(\"label\", default=\"\",\n validator=validateLabel)\n label.meta['tip'] = \"Descriptive label for database.\"\n\n\n # PUBLIC METHODS /////////////////////////////////////////////////////\n\n def __init__(self, name=\"spatialdb\"):\n \"\"\"\n Constructor.\n \"\"\"\n Component.__init__(self, name, facility=\"spatial_database\")\n self._createModuleObj()\n return\n\n\n # PRIVATE METHODS ////////////////////////////////////////////////////\n\n def _configure(self):\n \"\"\"\n Set attributes based on inventory.\n \"\"\"\n Component._configure(self)\n ModuleSpatialDB.label(self, self.inventory.label)\n return\n \n\n def _createModuleObj(self):\n \"\"\"\n Create Python module object.\n \"\"\"\n raise NotImplementedError(\"_createModuleObj() not implemented.\")\n return\n\n\n# FACTORIES ////////////////////////////////////////////////////////////\n\ndef spatial_database():\n \"\"\"\n Factory associated with SpatialDB.\n \"\"\"\n # Abstract object (so return None).\n return None\n\n\n# End of file \n","sub_path":"spatialdata/spatialdb/SpatialDBObj.py","file_name":"SpatialDBObj.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}
+{"seq_id":"355364618","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nStep-3-csv :Basic Object-Orientation Abstraction and Data Representation of The Ideal Rankine Cycle\r\n \r\n object-oriented programming and data files(csv)\r\n\r\nThe ideal rankine cycle as \r\n \r\n ┌─── Node 0 ── Turbine ── Node 1 ──┐\r\n │ │\r\n Boiler Condenser\r\n │ │\r\n └─── Node 3 ── Pump ── Node 2 ──┘ \r\n\r\n Michael J . Moran. Fundamentals of Engineering Thermodynamics(7th Edition). John Wiley & Sons, Inc. 2011\r\n Chapter 8 : Vapour Power Systems \r\n Example 8.1:Analyzing an Ideal Rankine Cycle Page 438\r\n\r\nLicense: this code is in the public domain\r\n\r\nAuthor:Cheng Maohua\r\nEmail: cmh@seu.edu.cn\r\n\r\n\"\"\"\r\nimport csv\r\n\r\nimport node\r\nimport turbine\r\nimport pump\r\nimport condenser\r\nimport boiler\r\n\r\n\r\ndef read_nodesfile(filename):\r\n \"\"\" nodes in the csv file\"\"\"\r\n countNodes = len(open(filename, 'r').readlines()) - 1\r\n nodes = [None for i in range(countNodes)]\r\n csvfile = open(filename, 'r')\r\n reader = csv.DictReader(csvfile)\r\n for line in reader:\r\n i = int(line['NID'])\r\n nodes[i] = node.Node(line['NAME'], i)\r\n try:\r\n nodes[i].p = float(line['p'])\r\n except:\r\n nodes[i].p = None\r\n try:\r\n nodes[i].t = float(line['t'])\r\n except:\r\n nodes[i].t = None\r\n try:\r\n nodes[i].x = float(line['x'])\r\n except:\r\n nodes[i].x = None\r\n\r\n if line['p'] != '' and line['t'] != '':\r\n nodes[i].pt()\r\n elif line['p'] != '' and line['x'] != '':\r\n nodes[i].px()\r\n elif line['t'] != '' and line['x'] != '':\r\n nodes[i].tx()\r\n\r\n return nodes, countNodes\r\n\r\n\r\ndef read_devicefile(filename):\r\n devFile = open(filename, 'r')\r\n discardHeader = devFile.readline()\r\n Comps = {}\r\n i = 0\r\n begId = 2\r\n for line in devFile:\r\n dev = line.split(',')\r\n if dev[1] == \"TURBINE\":\r\n Comps[dev[0]] = turbine.Turbine(\r\n dev[0], int(dev[begId]), int(dev[begId + 1]))\r\n elif dev[1] == \"BOILER\":\r\n Comps[dev[0]] = boiler.Boiler(\r\n dev[0], int(dev[begId]), int(dev[begId + 1]))\r\n elif dev[1] == \"CONDENSER\":\r\n Comps[dev[0]] = condenser.Condenser(\r\n dev[0], int(dev[begId]), int(dev[begId + 1]))\r\n elif dev[1] == \"PUMP\":\r\n Comps[dev[0]] = pump.Pump(dev[0], int(\r\n dev[begId]), int(dev[begId + 1]))\r\n\r\n i = i + 1\r\n\r\n DevNum = i\r\n return Comps, DevNum\r\n\r\n\r\nclass RankineCycle(object):\r\n\r\n def __init__(self, name, nodefilename, devfilename):\r\n self.name = name\r\n self.nodes = []\r\n self.devs = {}\r\n self.nodes, self.NodeNum = read_nodesfile(nodefilename)\r\n self.devs, self.DevNum = read_devicefile(devfilename)\r\n\r\n def state(self):\r\n for key in self.devs:\r\n self.devs[key].state(self.nodes)\r\n\r\n def simulate(self):\r\n for key in self.devs:\r\n self.devs[key].simulate(self.nodes)\r\n\r\n self.bwr = self.devs['Pump'].workRequired / \\\r\n self.devs['Turbine'].workExtracted\r\n self.efficiency = (self.devs['Turbine'].workExtracted - self.devs[\r\n 'Pump'].workRequired) / (self.devs['Boiler'].heatAdded)\r\n\r\n def spower_simulate(self, Wcycledot):\r\n self.Wcycledot = Wcycledot\r\n self.mdot = Wcycledot * 1000.0 * 3600.0 / \\\r\n (self.devs['Turbine'].workExtracted -\r\n self.devs['Pump'].workRequired)\r\n for key in self.devs:\r\n self.devs[key].mdotenergy(self.mdot)\r\n\r\n def cw_simulate(self):\r\n \"\"\" Circulating water system:Condenser Cooling Water\"\"\"\r\n self.nodew = []\r\n self.nodew.append(node.Node('CW-Inlet', 0))\r\n self.nodew.append(node.Node('CW-Outlet', 1))\r\n\r\n self.nodew[0].t = 15\r\n self.nodew[0].x = 0\r\n self.nodew[1].t = 35\r\n self.nodew[1].x = 0\r\n self.nodew[0].tx()\r\n self.nodew[1].tx()\r\n\r\n self.devs['Condenser'].cw_nodes(0, 1)\r\n self.devs['Condenser'].cw_simulate(self.nodew)\r\n\r\n def export(self):\r\n print(\" \\n -------- %s ----------------------------------\" % self.name)\r\n print(\"The net power output: \", self.Wcycledot, \"MW\")\r\n print(\"Efficiency: \", '%.2f' % (self.efficiency * 100), \"%\")\r\n print(\"The back work ratio: \", '%.2f' % (self.bwr * 100), \"%\")\r\n print(\"The mass flow rate: \", '%.2f' % self.mdot, \"kg/h\")\r\n print('The rate of heat transfer as the fluid passes the boiler: ',\r\n '%.2f' % self.devs['Boiler'].Qindot, 'MW')\r\n print(\" \\n ------- Circulating Water System --------------\")\r\n print(\"Cooling water enters the condenser T:\", self.nodew[0].t, u'°C')\r\n print(\"Cooling water exits the condenser T:\", self.nodew[1].t, u'°C')\r\n print('The rate of heat transfer from the condensing steam: ',\r\n '%.2f' % self.devs['Condenser'].Qoutdot, 'MW')\r\n print('The mass flow rate of the condenser cooling water: ', '%.2f' %\r\n self.devs['Condenser'].mcwdot, 'kg/h')\r\n print(\" \\n -------- NODES -----------------------------------\")\r\n print(\"\\nNodeID\\tName\\tP\\tT\\tH\\tS\\tV\\tX\")\r\n for inode in self.nodes:\r\n print(inode)\r\n\r\n\r\nif __name__ == '__main__':\r\n nds_filename = 'rankine81-nds.csv'\r\n dev_filename = 'rankine81-dev.csv'\r\n c81 = RankineCycle(\"Rankine81\", nds_filename, dev_filename)\r\n c81.state()\r\n c81.simulate()\r\n # Specified Net Output Power\r\n Wcycledot = 100\r\n c81.spower_simulate(Wcycledot)\r\n c81.cw_simulate()\r\n c81.export()\r\n","sub_path":"step3/step3-csv/rankine.py","file_name":"rankine.py","file_ext":"py","file_size_in_byte":5829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"499073686","text":"#unzip file\nimport tarfile\ntf = tarfile.open('/Users/HEIDI/Documents/19advinformatics/lab2/lab2data.tar.gz')\ntf.extractall()\n\n#make lists for input and output\nimport glob\ninput = glob.glob('/lab2data/data/*')\noutput = glob.glob('/lab2data/output/*')\n\n#select out the variable that distinguishes each file (.# and .a/b)\n#join the # and a or b together \nimport re\nf = lambda x: ''.join(re.findall('datafile\\.(.*)\\.(.*)\\..*',x)[0])\ndata_input = map(f, input)\nf = lambda x: ''.join(re.findall('outfile\\.(.*)\\.(.*)\\..*',x)[0])\ndata_output = map(f, output)\n\n#find out those missing in the output folder\nmissing = set(data_input).difference(data_output)\n\n#number of missing files\nprint(len(missing))\n\n#write out the names of the missing files in \"missing.txt\"\nwith open('missing.txt','w') as f:\n for x in missing:\n f.write('outfile.%s.%s.out\\n' % (x[:-1],x[-1]))\n f.close()\n\n","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"548636766","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nfrom models.trivia import db, Categoria, Pregunta, Respuesta\r\n\r\nh=Categoria.query.filter_by(nombre='Historia').first()\r\na=Categoria.query.filter_by(nombre='Arte').first()\r\n\r\ntry:\r\n p2_historia_4 = Pregunta(text=\"Quién fue el Primer Presidente de Uruguay\", categoria=h)\r\n r2_historia_4 = Respuesta(text=\"José Fructuoso Rivera\", es_correcta=True, pregunta=p2_historia_4)\r\n r2_historia_4_2 = Respuesta(text=\"Venancio Flores Barrios\", es_correcta=False, pregunta=p2_historia_4)\r\n r2_historia_4_3 = Respuesta(text=\"Manuel Oribe\", es_correcta=False, pregunta=p2_historia_4)\r\n db.session.add(p2_historia_4)\r\n db.session.add(r2_historia_4)\r\n db.session.add(r2_historia_4_2)\r\n db.session.add(r2_historia_4_3)\r\n \r\n p2_historia_3 = Pregunta(text='El Renacimiento marcó el inicio de la Edad...', categoria=h)\r\n r2_historia_3 = Respuesta(text=\"Moderna\", es_correcta=True, pregunta=p2_historia_3)\r\n r2_historia_3_2 = Respuesta(text=\"Media\", es_correcta=False, pregunta=p2_historia_3)\r\n r2_historia_3_3 = Respuesta(text=\"Contemporánea\", es_correcta=False, pregunta=p2_historia_3)\r\n db.session.add(p2_historia_3)\r\n db.session.add(r2_historia_3)\r\n db.session.add(r2_historia_3_2)\r\n db.session.add(r2_historia_3_3)\r\n \r\n p2_historia_2 = Pregunta(text='De qué color es el humo que informa a los creyentes de que se ha elegido un Papa nuevo', categoria=h)\r\n r2_historia_2 = Respuesta(text=\"Blanco\", es_correcta=True, pregunta=p2_historia_2)\r\n r2_historia_2_2 = Respuesta(text=\"Amarillo\", es_correcta=False, pregunta=p2_historia_2)\r\n r2_historia_2_3 = Respuesta(text=\"Rojo\", es_correcta=False, pregunta=p2_historia_2)\r\n db.session.add(p2_historia_2)\r\n db.session.add(r2_historia_2)\r\n db.session.add(r2_historia_2_2)\r\n db.session.add(r2_historia_2_3)\r\n \r\n p2_arte_2 = Pregunta(text='De que estaba fabricado originalmente el maquillaje blanco de las Geishas', categoria=a)\r\n r2_arte_2 = Respuesta(text=\"Arroz molido\", es_correcta=False, pregunta=p2_arte_2)\r\n r2_arte_2_2 = Respuesta(text=\"Flores de loto\", es_correcta=False, pregunta=p2_arte_2)\r\n r2_arte_2_3 = Respuesta(text=\"Plomo\", es_correcta=True, pregunta=p2_arte_2)\r\n db.session.add(p2_arte_2)\r\n db.session.add(r2_arte_2)\r\n db.session.add(r2_arte_2_2)\r\n db.session.add(r2_arte_2_3)\r\n \r\n p2_arte_3 = Pregunta(text='La Mona Lisa fue pintada por...', categoria=a)\r\n r2_arte_3 = Respuesta(text=\"Picasso\", es_correcta=False, pregunta=p2_arte_3)\r\n r2_arte_3_2 = Respuesta(text=\"Manuel Blanes\", es_correcta=False, pregunta=p2_arte_3)\r\n r2_arte_3_3 = Respuesta(text=\"Leonardo da Vinci\", es_correcta=True, pregunta=p2_arte_3)\r\n db.session.add(p2_arte_3)\r\n db.session.add(r2_arte_3)\r\n db.session.add(r2_arte_3_2)\r\n db.session.add(r2_arte_3_3)\r\n \r\n p2_arte_4 = Pregunta(text='Quién fue impulsor del Fovismo', categoria=a)\r\n r2_arte_4 = Respuesta(text=\"Picasso\", es_correcta=False, pregunta=p2_arte_4)\r\n r2_arte_4_2 = Respuesta(text=\"Torres García\", es_correcta=False, pregunta=p2_arte_4)\r\n r2_arte_4_3 = Respuesta(text=\"Henri Matisse\", es_correcta=True, pregunta=p2_arte_4)\r\n db.session.add(p2_arte_4)\r\n db.session.add(r2_arte_4)\r\n db.session.add(r2_arte_4_2)\r\n db.session.add(r2_arte_4_3)\r\n \r\n p2_arte_5 = Pregunta(text='Cuál de estos pintores no es Uruguayo', categoria=a)\r\n r2_arte_5 = Respuesta(text=\"Francisco de Goya\", es_correcta=False, pregunta=p2_arte_5)\r\n r2_arte_5_2 = Respuesta(text=\"Torres García\", es_correcta=False, pregunta=p2_arte_5)\r\n r2_arte_5_3 = Respuesta(text=\"Juan Manuel Blanes\", es_correcta=True, pregunta=p2_arte_5)\r\n db.session.add(p2_arte_5)\r\n db.session.add(r2_arte_5)\r\n db.session.add(r2_arte_5_2)\r\n db.session.add(r2_arte_5_3)\r\n \r\n # APLICAR CAMBIOS:\r\n db.session.commit()\r\n print(\"DB OK\")\r\n # MOSTRAR TABLAS:\r\n print(Pregunta.query.all())\r\n print(Respuesta.query.all())\r\nexcept Exception as e:\r\n db.session.rollback()\r\n print(str(e)+\" \"+str(type(e)))","sub_path":"src/models/populate2.py","file_name":"populate2.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"194229034","text":"import webapp.database as db\nimport eventlet\nimport time\nimport uuid\nimport sys\nimport queue\nimport random\n\nfrom webapp import socketio\n\nfrom webapp.config import lobby_clients, lobby_clients_active, logger, live_games, SERVER_START_TIME, TIMEOUT_AGE\nfrom webapp.config import GameData, MATCH_MODE, MatchMode, agent_clients, agent_clients_active, CURRENT_MATCH_MODE, LOGGED_OVER_CAPACITY\n\n\ndef maintain_lobby():\n while True:\n remove_ghost_sessions()\n\n match_players()\n\n eventlet.sleep(0.1)\n\n\ndef match_human_players():\n if lobby_clients.qsize() >= 2:\n try:\n client_1 = lobby_clients.get()\n except queue.Empty as e:\n logger.debug(\"Couldn't get client 1 in human-human match, \" + str(e))\n return\n\n try:\n client_2 = lobby_clients.get()\n except queue.Empty as e:\n logger.debug(\"Couldn't get client 2 in human-human match, \" + str(e))\n lobby_clients.put(client_1)\n return\n\n # If one of them isn't there anymore, remove (put the other one back)\n if not (lobby_clients_active[client_1[1].sid] and lobby_clients_active[client_2[1].sid]):\n logger.debug(\"Pair between \" + client_1[1].sid + \" and \" + client_2[1].sid +\n \" didn't work; removing one or both of them\")\n if lobby_clients_active[client_1[1].sid]:\n lobby_clients.put(client_1)\n elif lobby_clients_active[client_2[1].sid]:\n lobby_clients.put(client_2)\n return\n\n if client_1[1].sid == client_2[1].sid:\n # It is possible they can match with themselves in the case\n # that they time out and restart from the same page. Avoid this\n # by removing a client if it has the same sid.\n\n # This only happens because the player joins the lobby twice from the\n # same sid (we don't make them reload the page to return to the\n # lobby). lobby_clients_active is updated to True for the second\n # time they join the lobby, so for both clients in the lobby, they\n # are considered active (the only thing different between the two\n # clients is the time they entered, which we don't want to require\n # for lookups in lobby_clients_active). Return the one to the\n # queue that has waited shorter (i.e., the one that just joined).\n lobby_clients.put(client_2)\n return\n\n client_1_waittime = time.time() - client_1[0] - SERVER_START_TIME\n client_2_waittime = time.time() - client_2[0] - SERVER_START_TIME\n\n client_1 = client_1[1]\n client_2 = client_2[1]\n\n lobby_clients_active[client_1.sid] = False\n lobby_clients_active[client_2.sid] = False\n\n db.write_connection_event(client_1.sid, \"Matched With Partner\")\n db.write_connection_event(client_2.sid, \"Matched With Partner\")\n\n # pair them up\n # guaranteed to be free of collisions and we don't need ordering on\n # game IDs so just use uuid\n game_id = str(uuid.uuid4().hex)\n game_seed = random.randint(0, pow(2, 30))\n print('game seed: ' + str(game_seed))\n logger.debug('game seed: ' + str(game_seed))\n\n num_cards = 21\n\n character_1 = \"Human\" if game_seed % 2 == 0 else \"Agent\"\n character_2 = \"Agent\" if character_1 == \"Human\" else \"Human\"\n if character_1 == \"Human\":\n game = GameData(game_seed,\n game_id,\n num_cards,\n human=client_1,\n agent=client_2,\n using_agent=False)\n else:\n game = GameData(game_seed,\n game_id,\n num_cards,\n human=client_2,\n agent=client_1,\n using_agent=False)\n live_games[game_id] = game\n logger.debug(\"starting game #\" + str(game_id) + \" with seed \" + str(game_seed) + \" and worker ids \"\n + client_1.worker_id + \" / \" + client_2.worker_id)\n logger.debug(\"client 1 (\" + str(client_1) + \" waited for \" + str(client_1_waittime) + \" s\")\n logger.debug(\"client 2 (\" + str(client_2) + \" waited for \" + str(client_2_waittime) + \" s\")\n\n # Initialize the game for both players, and also say that the lobby\n # is ready (players matched) -- this will play the audio reminder\n\n # For now, each player is in a room corresponding to their sid.\n # But later they will go into a room corresponding to the game, so\n # movement communication is easier.\n socketio.emit('initGame',\n {'character': character_1,\n 'gameId': game_id,\n 'seed': game_seed,\n 'num_cards': num_cards},\n room=client_1.sid)\n socketio.emit('lobbyReady',\n room=client_1.sid)\n\n socketio.emit('initGame',\n {'character': character_2,\n 'gameId': game_id,\n 'seed': game_seed,\n 'num_cards': num_cards},\n room=client_2.sid)\n socketio.emit('lobbyReady',\n room=client_2.sid)\n\n db.write_game_start(game)\n\n # Switch the mode\n if MATCH_MODE == MatchMode.HUMAN_AND_AGENT:\n CURRENT_MATCH_MODE.use_human = False\n logger.debug('Switching to matching with human: ' + str(CURRENT_MATCH_MODE.use_human))\n\n\ndef match_human_with_agent():\n # Don't allow more than N concurrent games\n if len(live_games) >= 30:\n if lobby_clients.qsize() > LOGGED_OVER_CAPACITY.num_connected:\n logger.debug(\"New maximum connections reached: %r\" % lobby_clients.qsize())\n LOGGED_OVER_CAPACITY.num_connected = lobby_clients.qsize()\n elif lobby_clients.qsize() < LOGGED_OVER_CAPACITY.num_connected:\n logger.debug(\"Number of connections lowered: %r\" % lobby_clients.qsize())\n LOGGED_OVER_CAPACITY.num_connected = lobby_clients.qsize()\n\n return\n\n if lobby_clients.qsize() >= 1 and agent_clients.qsize() >= 1:\n try:\n human_client = lobby_clients.get()\n except queue.Empty as e:\n logger.debug(\"Couldn't get human in human-agent match, \" + str(e))\n return\n\n try:\n agent_client = agent_clients.get()\n except queue.Empty as e:\n logger.debug(\"Couldn't get agent in human-agent match, \" + str(e))\n lobby_clients.put(human_client)\n return\n\n # If one of them isn't there anymore, remove (put the other one back)\n if not (lobby_clients_active[human_client[1].sid] and agent_clients_active[agent_client[1].sid]):\n logger.debug(\"Pair between \" + human_client[1].sid + \" and \" + agent_client[1].sid +\n \" didn't work; removing one or both of them\")\n if lobby_clients_active[human_client[1].sid]:\n lobby_clients.put(human_client)\n elif agent_clients_active[agent_client[1].sid]:\n agent_clients.put(agent_client)\n return\n\n if human_client[1].sid == agent_client[1].sid:\n raise ValueError('Agent and human client sid should never be the same!' + str(human_client[1].sid))\n\n client_1_waittime = time.time() - human_client[0] - SERVER_START_TIME\n client_2_waittime = time.time() - agent_client[0] - SERVER_START_TIME\n\n human_client = human_client[1]\n agent_client = agent_client[1]\n\n lobby_clients_active[human_client.sid] = False\n agent_clients_active[agent_client.sid] = False\n\n db.write_connection_event(human_client.sid, \"Matched With Partner\")\n db.write_connection_event(agent_client.sid, \"Matched With Partner\")\n\n # pair them up\n # guaranteed to be free of collisions and we don't need ordering on\n # game IDs so just use uuid\n game_id = str(uuid.uuid4().hex)\n game_seed = random.randint(0, pow(2, 30))\n num_cards = 21\n\n game = GameData(game_seed,\n game_id,\n num_cards,\n human=human_client,\n agent=agent_client,\n using_agent=True)\n live_games[game_id] = game\n logger.debug(\"starting game #\" + str(game_id) + \" with seed \" + str(game_seed) + \" and worker ids \"\n + human_client.worker_id + \" / \" + agent_client.worker_id)\n logger.debug(\"client 1 (\" + str(human_client) + \" waited for \" + str(client_1_waittime) + \" s\")\n logger.debug(\"client 2 (\" + str(agent_client) + \" waited for \" + str(client_2_waittime) + \" s\")\n\n # Initialize the game for both players, and also say that the lobby\n # is ready (players matched) -- this will play the audio reminder\n\n # For now, each player is in a room corresponding to their sid.\n # But later they will go into a room corresponding to the game, so\n # movement communication is easier.\n socketio.emit('initGame',\n {'character': 'Human',\n 'gameId': game_id,\n 'seed': game_seed,\n 'num_cards': num_cards},\n room=human_client.sid)\n socketio.emit('lobbyReady',\n room=human_client.sid)\n\n socketio.emit('initGame',\n {'character': 'Agent',\n 'gameId': game_id,\n 'seed': game_seed,\n 'num_cards': num_cards},\n room=agent_client.sid)\n socketio.emit('lobbyReady',\n room=agent_client.sid)\n\n db.write_game_start(game)\n\n # Switch the mode\n if MATCH_MODE == MatchMode.HUMAN_AND_AGENT:\n CURRENT_MATCH_MODE.use_human = True\n logger.debug('Switching to matching with human: ' + str(CURRENT_MATCH_MODE.use_human))\n\n\ndef match_players():\n if CURRENT_MATCH_MODE.use_human:\n match_human_players()\n else:\n match_human_with_agent()\n\n\ndef remove_ghost_sessions():\n cur_age = sys.maxsize\n ghost_start_time = time.time() - SERVER_START_TIME\n while cur_age > TIMEOUT_AGE and not lobby_clients.empty():\n client_entered_time, client = lobby_clients.get()\n\n client_age = ghost_start_time - client_entered_time\n\n if client_age <= TIMEOUT_AGE:\n # If hasn't been waiting long, put it back and break\n lobby_clients.put((client_entered_time, client))\n else:\n lobby_clients_active[client.sid] = False\n\n socketio.emit('reset', room=client.sid)\n logger.debug('marked inactive client ' + str(client) + '; waited for ' + str(client_age) + ' s')\n db.write_connection_event(client.sid, \"Removed Ghost Session\")\n\n cur_age = client_age\n\n","sub_path":"webapp/threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":11124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"506572130","text":"\"\"\"\nFaceEditedフォルダにある画像の2割をテストフォルダに移行するプログラムである、img_split.pyを並列処理する。\n\n作成者 185725J, 185718F(8:2)\n\"\"\"\nimport time\nfrom pathlib import Path\nimport cv2\nimport subprocess\nIMAGE_PATH = \"./arashi_image\"\n\nimport glob\ndef main():\n #img_dir = \"./arashi_image/\"\n members = [\"二宮和也\",\"櫻井翔\",\"相葉雅紀\",\"大野智\",\"松本潤\"]\n\t\n #files = glob.glob(IMAGE_PATH+\"/*\")\n #print(files)\n \n #files = Path(IMAGE_PATH + \"/\").glob(\"*\")\n procs = []\n N = 5 #メモリ不足にならないようにNを適切に設定する必要がある\n #print(files)\n for member in members:\n \n proc = subprocess.Popen([\"python3\", \"img_split.py\",member])\n procs.append(proc)\n\n if len(procs) == N:\n # メモリ不足で実行に失敗するので、\n # 子プロセスの数がNになったら、一旦全ての子プロセスの終了を待つ\n for proc in procs:\n proc.communicate()\n procs.clear()\n for proc in procs:\n proc.communicate()\n\n\nif __name__ == \"__main__\":\n start = time.time()\n main()\n end = time.time()\n print(\"Finished in {} seconds.\".format(end-start))","sub_path":"learning/main_img_split.py","file_name":"main_img_split.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"358773706","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport utils\nimport torchvision as tv\n\nfrom attention import Attention\nfrom classifier import SimpleClassifier\nfrom fc import FCNet\n\nclass vgg16_modified(nn.Module):\n def __init__(self):\n super(vgg16_modified, self).__init__()\n vgg = tv.models.vgg16(pretrained=True)\n self.vgg_features = vgg.features\n self.out_features = vgg.classifier[6].in_features\n features = list(vgg.classifier.children())[:-1] # Remove last layer\n self.vgg_classifier = nn.Sequential(*features) # Replace the model classifier\n #print(self.vgg_classifier)\n\n def rep_size(self):\n return 1024\n\n def base_size(self):\n return 512\n\n def forward(self,x):\n #return self.dropout2(self.relu2(self.lin2(self.dropout1(self.relu1(self.lin1(self.vgg_features(x).view(-1, 512*7*7)))))))\n feat = self.vgg_features(x)\n y = self.vgg_classifier(feat.view(-1, 512*7*7))\n #print('y size :', y.size())\n return feat, y\n\nclass verb_module(nn.Module):\n def __init__(\n self,\n n_verbs,\n mlp_hidden=512\n ):\n super(verb_module, self).__init__()\n self.conv = vgg16_modified()\n\n self.verb = nn.Sequential(\n nn.Linear(mlp_hidden*8, mlp_hidden*2),\n nn.BatchNorm1d(mlp_hidden*2),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(mlp_hidden*2, n_verbs),\n )\n\n def forward(self, image):\n\n _, conv = self.conv(image)\n\n #verb pred\n verb_pred = self.verb(conv)\n\n return verb_pred\n\nclass role_module(nn.Module):\n def __init__(self, encoder,\n gpu_mode,\n embed_hidden=300,\n mlp_hidden=512):\n super(role_module, self).__init__()\n\n self.encoder = encoder\n self.gpu_mode = gpu_mode\n self.n_roles = self.encoder.get_num_roles()\n self.n_verbs = self.encoder.get_num_verbs()\n self.vocab_size = self.encoder.get_num_labels()\n self.max_role_count = self.encoder.get_max_role_count()\n self.n_role_q_vocab = len(self.encoder.question_words)\n\n self.conv = vgg16_modified()\n self.verb_lookup = nn.Embedding(self.n_verbs, embed_hidden)\n self.w_emb = nn.Embedding(self.n_role_q_vocab + 1, embed_hidden, padding_idx=self.n_role_q_vocab)\n self.q_emb = nn.LSTM(embed_hidden, mlp_hidden,\n batch_first=True, bidirectional=True)\n self.q_prep = FCNet([mlp_hidden, mlp_hidden])\n self.lstm_proj = nn.Linear(mlp_hidden * 2, mlp_hidden)\n self.verb_transform = nn.Linear(embed_hidden, mlp_hidden)\n self.v_att = Attention(mlp_hidden, mlp_hidden, mlp_hidden)\n self.q_net = FCNet([mlp_hidden, mlp_hidden])\n self.v_net = FCNet([mlp_hidden, mlp_hidden])\n self.classifier = SimpleClassifier(\n mlp_hidden, 2 * mlp_hidden, self.vocab_size, 0.5)\n\n self.conv_hidden = self.conv.base_size()\n self.mlp_hidden = mlp_hidden\n self.embed_hidden = embed_hidden\n\n def forward(self, img, verb, role_q):\n\n img_features, _ = self.conv(img)\n batch_size, n_channel, conv_h, conv_w = img_features.size()\n role_q = role_q.view(batch_size*self.max_role_count, -1)\n img = img_features.view(batch_size, n_channel, -1)\n img = img.permute(0, 2, 1)\n w_emb = self.w_emb(role_q)\n\n lstm_out, (h, _) = self.q_emb(w_emb)\n\n q_emb = h.permute(1, 0, 2).contiguous().view(batch_size*self.max_role_count, -1)\n q_emb = self.lstm_proj(q_emb)\n verb_embd = self.verb_transform(self.verb_lookup(verb))\n verb_embed_expand = verb_embd.expand(self.max_role_count, verb_embd.size(0), verb_embd.size(1))\n verb_embed_expand = verb_embed_expand.transpose(0,1)\n verb_embed_expand = verb_embed_expand.contiguous().view(-1, self.mlp_hidden)\n q_emb = self.q_prep(q_emb * verb_embed_expand)\n\n img = img.expand(self.max_role_count,img.size(0), img.size(1), img.size(2))\n img = img.transpose(0,1)\n img = img.contiguous().view(batch_size* self.max_role_count, -1, self.mlp_hidden)\n\n att = self.v_att(img, q_emb)\n v_emb = (att * img).sum(1) # [batch, v_dim]\n\n q_repr = self.q_net(q_emb)\n v_repr = self.v_net(v_emb)\n joint_repr = q_repr * v_repr\n logits = self.classifier(joint_repr)\n\n role_label_pred = logits.contiguous().view(batch_size, -1, self.vocab_size)\n return role_label_pred\n\n\nclass BaseModel(nn.Module):\n def __init__(\n self,\n encoder,\n gpu_mode,\n ):\n super(BaseModel, self).__init__()\n\n self.normalize = tv.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\n self.train_transform = tv.transforms.Compose([\n tv.transforms.RandomRotation(10),\n tv.transforms.RandomResizedCrop(224),\n tv.transforms.RandomHorizontalFlip(),\n tv.transforms.ToTensor(),\n self.normalize,\n ])\n\n self.dev_transform = tv.transforms.Compose([\n tv.transforms.Resize(224),\n tv.transforms.CenterCrop(224),\n tv.transforms.ToTensor(),\n self.normalize,\n ])\n\n self.encoder = encoder\n self.gpu_mode = gpu_mode\n self.n_verbs = self.encoder.get_num_verbs()\n\n self.verb = verb_module(self.n_verbs)\n self.roles = role_module(self.encoder, self.gpu_mode)\n\n def train_preprocess(self):\n return self.train_transform\n\n def dev_preprocess(self):\n return self.dev_transform\n\n def forward(self, image):\n\n verb_pred = self.verb(image)\n\n sorted_idx = torch.sort(verb_pred, 1, True)[1]\n verb = sorted_idx[:,0]\n role_qs = self.encoder.get_role_questions_batch(verb)\n if self.gpu_mode >= 0:\n role_qs = role_qs.to(torch.device('cuda'))\n\n role_pred = self.roles(image, verb, role_qs)\n\n return verb_pred, role_pred\n\n\n def calculate_loss(self, verb_pred, gt_verbs):\n\n batch_size = verb_pred.size()[0]\n loss = 0\n #print('eval pred verbs :', pred_verbs)\n for i in range(batch_size):\n verb_loss = utils.cross_entropy_loss(verb_pred[i], gt_verbs[i])\n loss += verb_loss\n '''for index in range(gt_labels.size()[1]):\n frame_loss = 0\n verb_loss = utils.cross_entropy_loss(verb_pred[i], gt_verbs[i])\n\n\n #frame_loss += verb_loss\n #print('frame loss', frame_loss)\n loss += verb_loss'''\n\n\n final_loss = loss/batch_size\n #print('loss :', final_loss)\n return final_loss\n\n\n\n","sub_path":"model_verbmlp_roletd_new.py","file_name":"model_verbmlp_roletd_new.py","file_ext":"py","file_size_in_byte":6787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"652759207","text":"import multiprocessing\nfrom time import sleep\n\n\nclass MiClase(object):\n\n def __init__(self, name):\n self.name = name\n\n def do_something(self):\n proc_name = multiprocessing.current_process().name\n print('Estoy en el proceso {} y me pasan estos datos: {}'.format(proc_name, self.name))\n sleep(5)\n print('Ya he acabado')\n\n\ndef worker(q):\n obj = q.get()\n obj.do_something()\n\n\nif __name__ == '__main__':\n queue = multiprocessing.Queue()\n\n p = multiprocessing.Process(target=worker, args=(queue,))\n p.start()\n\n\n queue.put(MiClase('Cacahuete'))\n\n # Cerramos la cola y esperamos al hilo de background, asegurándonos de que los datos se han flusheado\n queue.close()\n queue.join_thread()\n\n # Esperamos a que termine el proceso\n p.join()\n","sub_path":"notebooks/multiprocessing_queue_test.py","file_name":"multiprocessing_queue_test.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"355512289","text":"from time import time\nimport numpy as np\ndef timer(func):\n def inner(*args,**kargs):\n time1=time()\n ret=func(*args,**kargs)\n print(f'The runtime of the function is {time()-time1}')\n return ret\n return inner\n\n@timer\ndef my_sum(*args,**kargs):\n result=0\n for i in args:\n result+=sum(i)\n return result\n\nmy_sum(list((range(100))))\n","sub_path":"week08/homework_3.py","file_name":"homework_3.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}
+{"seq_id":"414578831","text":"import smtplib\nimport time\nimport urllib\nimport urllib.request\nimport re\nimport json\n\n\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\n\n\n\ndef email(html=\"\",mail_to='yzxm365@163.com'):\n\t#创建实例,构造MIMEMulipart对象为根容器\n\tmsg=MIMEMultipart('alternative')\n\t#正文\n\tmail_boby='hello.this is the mail content'\n\t#发信邮箱\n\tmail_from='yzxm365@sina.cn'\n\t#收信邮箱\n\t#mail_to=['yzxm365@163.com']\n\n\t#msg=MIMEText(mail_boby)\n\t#定义标题\n\tmsg['Subject']=u'this is the title'\n\t#定义发信人\n\tmsg['From']=mail_from\n\tmsg['To']=';'.join(mail_to)\n\tmsg[\"date\"]=time.strftime('%a, %d %b %Y %H:%M:%S %z')\n\ttime.strftime('%a, %d %b %Y %H:%M:%S %z')\n\tmsg[\"date\"]=time.strftime('%a, %d %b %Y %H:%M:%S %z')\n\n\t#定义正文\n\ttxt=MIMEText(u'中文内容','plain','utf-8')\n\tmsg.attach(txt)\n\t#以下附件\n\t# picfiles=[r'C:\\Users\\te3030\\Desktop\\33\\1.png'] #多文件\n\t# for file in picfiles:\n\t# f=open(file,'rb')\n\t# img=MIMEImage(f.read())\n\t# f.close()\n\t# msg.attach(img)\n\n\t#附件文件\n\t# text=\"Hello!\\nHow are you?\\nHere is the link you wanted:\\nhttp://www.python.org\"\n\t# part1=MIMEText(text,'plain')\n\t\n\tpart2=MIMEText(html,'html')\n\t# msg.attach(part1)\n\tmsg.attach(part2)\n\n\tsmtp=smtplib.SMTP()\n\tsmtp.connect(\"smtp.sina.cn\")\n\tsmtp.login('yzxm365@sina.cn','zm520Y')\n\tsmtp.sendmail(mail_from,mail_to,msg.as_string())\n\tsmtp.quit()\n\tprint ('ok')\n\treturn\n\n\ndef caiyun():\n\ttianqi = {\n\t\t'CLEAR_DAY':'晴天',\n\t\t'CLEAR_NIGHT':'晴夜',\n\t\t'PARTLY_CLOUDY_DAY':'多云',\n\t\t'PARTLY_CLOUDY_NIGHT':'多云',\n\t\t'CLOUDY':'阴',\n\t\t'RAIN':'雨',\n\t\t'SNOW':'雪',\n\t\t'WIND':'风',\n\t\t'FOG':'雾',\n\t\t'HAZE':'霾',\n\t\t'SLEET':'冻雨',\n\t}\n\n\tReturn ={}\n\thtml = caiyun_gei()\n\t#print(html)\n\tregx = r'skycon\":.*?\"cloudrate\"'\n\tpattern = re.compile(regx)\n\tget_skycon = re.findall(pattern,repr(html))\n\ti = 0\n\t#print(get_skycon[1])\n\tdq_time = ''\n\tfor text in get_skycon:\n\t\tif i ==0:\n\t\t\tget_skycon[i] = text[9:len(get_skycon)-14]\n\t\t\tregx = r'{.*?}'\n\t\t\tpattern = re.compile(regx)\n\t\t\tget_text = re.findall(pattern,repr(get_skycon[i]))\n\t\t\tdq_timeFT = False\n\t\t\tii=0\n\t\t\tdq_time = time.strftime(\"%Y-%m-%d %H:00\",time.localtime(time.time()))\n\t\t\tfor value in get_text:\n\t\t\t\tdata = json.loads(value)\n\t\t\t\tdatavalue = tianqi[data['value']]\n\t\t\t\tdatetime = data['datetime']\n\t\t\t\tif dq_timeFT:\n\t\t\t\t\tReturn[str(ii)] = datavalue\n\t\t\t\t\tReturn['0'+str(ii)] = datetime[5:len(datetime)-3]\n\t\t\t\t\t#print(Return[str(ii)],str(ii))\n\t\t\t\t\tii +=1\n\t\t\t\telse:\n\t\t\t\t\t#print(dq_timeFT,dq_time,datetime)\n\t\t\t\t\tif dq_time == datetime:\n\t\t\t\t\t\tdq_timeFT = True\n\t\t\t\t\t\tReturn[str(ii)] = datavalue\n\t\t\t\t\t\tReturn['0'+str(ii)] = datetime[5:len(datetime)-3]\n\t\t\t\t\t\t#print(Return[str(ii)],str(ii))\n\t\t\t\t\t\tii +=1\n\t\t\t\t#print(tianqi[data['value']])\n\t\t\t\t#print(data['datetime'])\n\t\telse:\n\t\t\tprint()\n\t\t\t#print('\\n\\n-------------------------------\\n\\n')\n\t\ti +=1\n\treturn Return\n\ndef caiyun_gei():\n\turl = 'https://api.caiyunapp.com/v2/CHtAyZ96OioEHqGe/116.3,40.1/forecast'\n\ta= ''\n\ttry:\n\t\trequest = urllib.request.Request(url)\n\t\tresponse = urllib.request.urlopen(request)\n\t\treturn response.read()\n\texcept Exception:\n\t\tprint(22)\n\t\treturn ''\n\treturn ''\n\n\ndef qidong():\n\ttianqi = {\n\t\t'CLEAR_DAY':'晴天',\n\t\t'CLEAR_NIGHT':'晴夜',\n\t\t'PARTLY_CLOUDY_DAY':'多云',\n\t\t'PARTLY_CLOUDY_NIGHT':'多云',\n\t\t'CLOUDY':'阴',\n\t\t'RAIN':'雨',\n\t\t'SNOW':'雪',\n\t\t'WIND':'风',\n\t\t'FOG':'雾',\n\t\t'HAZE':'霾',\n\t\t'SLEET':'冻雨',\n\t}\n\tdata = caiyun()\n\ttou = '''Hello!
时间 天气 '''\n\tneirong = ''\n\twei='
'\n\ti=0\n\tc = ''\n\twhile i < len(data)/2:\n\t\tneirong = neirong+'' + data['0'+str(i)] +' '+ data[str(i)] +' '\n\t\ti+=1\n\thtml = tou+neirong+wei\n\temail(html,'yzxm365@163.com')\n\tprint('ok')\n\t#print(tou+neirong+wei)\n\t#print('------------------------\\n',data)\n\n\nwhile 1:\n\tqidong()\n\ttime.sleep(60)\n\n#print(a)","sub_path":"demo/email_fujian_html.py","file_name":"email_fujian_html.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"98"}